use super::btf_offsets::BpfMapOffsets;
use super::idr::{translate_any_kva, xa_load};
use super::reader::GuestMem;
use super::symbols::text_kva_to_pa;
use super::{Cr3Pa, Kva, PageOffset};
pub(crate) struct AccessorCtx<'a> {
pub mem: &'a GuestMem,
pub cr3_pa: Cr3Pa,
pub page_offset: PageOffset,
pub offsets: &'a BpfMapOffsets,
pub l5: bool,
}
#[allow(dead_code)]
pub const BPF_MAP_TYPE_HASH: u32 = 1;
pub const BPF_MAP_TYPE_ARRAY: u32 = 2;
#[allow(dead_code)]
pub const BPF_MAP_TYPE_PERCPU_ARRAY: u32 = 6;
const BPF_OBJ_NAME_LEN: usize = 16;
#[derive(Debug, Clone)]
#[allow(dead_code)]
pub struct BpfMapInfo {
pub map_pa: u64,
pub map_kva: u64,
pub name: String,
pub map_type: u32,
pub map_flags: u32,
pub key_size: u32,
pub value_size: u32,
pub max_entries: u32,
pub value_kva: Option<u64>,
pub btf_kva: u64,
pub btf_value_type_id: u32,
}
pub(crate) fn find_all_bpf_maps(ctx: &AccessorCtx<'_>, map_idr_kva: u64) -> Vec<BpfMapInfo> {
let idr_pa = text_kva_to_pa(map_idr_kva);
let offsets = ctx.offsets;
let xa_head = ctx.mem.read_u64(idr_pa, offsets.idr_xa_head);
if xa_head == 0 {
return Vec::new();
}
let idr_next = ctx.mem.read_u32(idr_pa, offsets.idr_next);
let mut maps = Vec::new();
for id in 0..idr_next {
let Some(entry) = xa_load(
ctx.mem,
ctx.page_offset.0,
xa_head,
id as u64,
offsets.xa_node_slots,
offsets.xa_node_shift,
) else {
continue;
};
if entry == 0 {
continue;
}
let Some(map_pa) =
translate_any_kva(ctx.mem, ctx.cr3_pa.0, ctx.page_offset.0, entry, ctx.l5)
else {
continue;
};
let mut name_buf = [0u8; BPF_OBJ_NAME_LEN];
ctx.mem
.read_bytes(map_pa + offsets.map_name as u64, &mut name_buf);
let name_len = name_buf
.iter()
.position(|&b| b == 0)
.unwrap_or(BPF_OBJ_NAME_LEN);
let name = String::from_utf8_lossy(&name_buf[..name_len]).to_string();
let map_type = ctx.mem.read_u32(map_pa, offsets.map_type);
let map_flags = ctx.mem.read_u32(map_pa, offsets.map_flags);
let key_size = ctx.mem.read_u32(map_pa, offsets.key_size);
let value_size = ctx.mem.read_u32(map_pa, offsets.value_size);
let max_entries = ctx.mem.read_u32(map_pa, offsets.max_entries);
let value_kva = if map_type == BPF_MAP_TYPE_ARRAY {
Some(entry + offsets.array_value as u64)
} else {
None
};
let btf_kva = ctx.mem.read_u64(map_pa, offsets.map_btf);
let btf_value_type_id = ctx.mem.read_u32(map_pa, offsets.map_btf_value_type_id);
maps.push(BpfMapInfo {
map_pa,
map_kva: entry,
name,
map_type,
map_flags,
key_size,
value_size,
max_entries,
value_kva,
btf_kva,
btf_value_type_id,
});
}
maps
}
pub(crate) fn find_bpf_map(
ctx: &AccessorCtx<'_>,
map_idr_kva: u64,
name_suffix: &str,
) -> Option<BpfMapInfo> {
find_all_bpf_maps(ctx, map_idr_kva)
.into_iter()
.find(|m| m.map_type == BPF_MAP_TYPE_ARRAY && m.name.ends_with(name_suffix))
}
const BPF_MAP_PAGE_CHUNK: u64 = 4096;
fn chunked_kva_io<F>(ctx: &AccessorCtx<'_>, target_kva: u64, len: usize, mut chunk_fn: F) -> bool
where
F: FnMut(u64, u64, usize),
{
let mut consumed: u64 = 0;
let total = len as u64;
while consumed < total {
let kva = target_kva + consumed;
let Some(pa) = ctx.mem.translate_kva(ctx.cr3_pa.0, Kva(kva), ctx.l5) else {
return false;
};
let page_end = (kva & !(BPF_MAP_PAGE_CHUNK - 1)) + BPF_MAP_PAGE_CHUNK;
let chunk_len = (page_end - kva).min(total - consumed) as usize;
chunk_fn(pa, consumed, chunk_len);
consumed += chunk_len as u64;
}
true
}
pub(crate) fn write_bpf_map_value(
ctx: &AccessorCtx<'_>,
map_info: &BpfMapInfo,
offset: usize,
data: &[u8],
) -> bool {
let Some(base_kva) = map_info.value_kva else {
return false;
};
if offset + data.len() > map_info.value_size as usize {
return false;
}
let target_kva = base_kva + offset as u64;
chunked_kva_io(ctx, target_kva, data.len(), |pa, src_off, chunk_len| {
let src_off = src_off as usize;
for (i, &byte) in data[src_off..src_off + chunk_len].iter().enumerate() {
ctx.mem.write_u8(pa, i, byte);
}
})
}
pub(crate) fn write_bpf_map_value_u32(
ctx: &AccessorCtx<'_>,
map_info: &BpfMapInfo,
offset: usize,
val: u32,
) -> bool {
write_bpf_map_value(ctx, map_info, offset, &val.to_ne_bytes())
}
pub(crate) fn read_bpf_map_value(
ctx: &AccessorCtx<'_>,
map_info: &BpfMapInfo,
offset: usize,
len: usize,
) -> Option<Vec<u8>> {
let base_kva = map_info.value_kva?;
if offset + len > map_info.value_size as usize {
return None;
}
let target_kva = base_kva + offset as u64;
let mut buf = vec![0u8; len];
let buf_ptr = buf.as_mut_ptr();
let ok = chunked_kva_io(ctx, target_kva, len, |pa, dst_off, chunk_len| {
let slice =
unsafe { std::slice::from_raw_parts_mut(buf_ptr.add(dst_off as usize), chunk_len) };
let _ = ctx.mem.read_bytes(pa, slice);
});
if !ok {
return None;
}
Some(buf)
}
pub(crate) fn read_bpf_map_value_u32(
ctx: &AccessorCtx<'_>,
map_info: &BpfMapInfo,
offset: usize,
) -> Option<u32> {
let bytes = read_bpf_map_value(ctx, map_info, offset, 4)?;
Some(u32::from_ne_bytes(bytes.try_into().unwrap()))
}
#[allow(dead_code)]
const HTAB_ITER_MAX: usize = 1_000_000;
#[allow(dead_code)]
fn iter_htab_entries(ctx: &AccessorCtx<'_>, map: &BpfMapInfo) -> Vec<(Vec<u8>, Vec<u8>)> {
if map.map_type != BPF_MAP_TYPE_HASH {
return Vec::new();
}
let Some(htab) = &ctx.offsets.htab_offsets else {
return Vec::new();
};
let htab_kva = map.map_kva;
let Some(htab_pa) =
translate_any_kva(ctx.mem, ctx.cr3_pa.0, ctx.page_offset.0, htab_kva, ctx.l5)
else {
return Vec::new();
};
let n_buckets = ctx.mem.read_u32(htab_pa, htab.htab_n_buckets);
let buckets_kva = ctx.mem.read_u64(htab_pa, htab.htab_buckets);
if n_buckets == 0 || buckets_kva == 0 {
return Vec::new();
}
let key_size = map.key_size as usize;
let value_size = map.value_size as usize;
let value_off_in_elem = htab.htab_elem_size_base + ((key_size + 7) & !7);
let key_off_in_elem = htab.htab_elem_size_base;
let mut entries = Vec::new();
let mut total_visited = 0usize;
for i in 0..n_buckets {
let bucket_kva = buckets_kva + (i as u64) * (htab.bucket_size as u64);
let Some(bucket_pa) =
translate_any_kva(ctx.mem, ctx.cr3_pa.0, ctx.page_offset.0, bucket_kva, ctx.l5)
else {
continue;
};
let first_ptr = ctx
.mem
.read_u64(bucket_pa, htab.bucket_head + htab.hlist_nulls_head_first);
let mut node_ptr = first_ptr;
loop {
if node_ptr & 1 != 0 || node_ptr == 0 {
break;
}
total_visited += 1;
if total_visited > HTAB_ITER_MAX {
return entries;
}
let elem_kva = node_ptr;
let Some(elem_pa) =
translate_any_kva(ctx.mem, ctx.cr3_pa.0, ctx.page_offset.0, elem_kva, ctx.l5)
else {
break;
};
let mut key_buf = vec![0u8; key_size];
ctx.mem
.read_bytes(elem_pa + key_off_in_elem as u64, &mut key_buf);
let mut val_buf = vec![0u8; value_size];
ctx.mem
.read_bytes(elem_pa + value_off_in_elem as u64, &mut val_buf);
entries.push((key_buf, val_buf));
node_ptr = ctx.mem.read_u64(elem_pa, htab.hlist_nulls_node_next);
}
}
entries
}
#[allow(dead_code)]
fn read_percpu_array_value(
ctx: &AccessorCtx<'_>,
map: &BpfMapInfo,
key: u32,
per_cpu_offsets: &[u64],
) -> Vec<Option<Vec<u8>>> {
if map.map_type != BPF_MAP_TYPE_PERCPU_ARRAY {
return Vec::new();
}
if key >= map.max_entries {
return Vec::new();
}
let pptrs_kva = map.map_kva + ctx.offsets.array_value as u64;
let pptr_kva = pptrs_kva + (key as u64) * 8;
let Some(pptr_pa) =
translate_any_kva(ctx.mem, ctx.cr3_pa.0, ctx.page_offset.0, pptr_kva, ctx.l5)
else {
return Vec::new();
};
let percpu_base = ctx.mem.read_u64(pptr_pa, 0);
if percpu_base == 0 {
return Vec::new();
}
let value_size = map.value_size as usize;
let mut result = Vec::with_capacity(per_cpu_offsets.len());
for &cpu_off in per_cpu_offsets {
let cpu_kva = percpu_base.wrapping_add(cpu_off);
let cpu_pa = super::symbols::kva_to_pa(cpu_kva, ctx.page_offset.0);
if cpu_pa + value_size as u64 <= ctx.mem.size() {
let mut buf = vec![0u8; value_size];
ctx.mem.read_bytes(cpu_pa, &mut buf);
result.push(Some(buf));
} else {
result.push(None);
}
}
result
}
pub(crate) fn resolve_to_struct(btf: &btf_rs::Btf, type_id: u32) -> Option<btf_rs::Struct> {
let mut t = btf.resolve_type_by_id(type_id).ok()?;
for _ in 0..20 {
match t {
btf_rs::Type::Struct(s) | btf_rs::Type::Union(s) => return Some(s),
btf_rs::Type::Ptr(_)
| btf_rs::Type::Volatile(_)
| btf_rs::Type::Const(_)
| btf_rs::Type::Typedef(_)
| btf_rs::Type::TypeTag(_)
| btf_rs::Type::Restrict(_) => {
t = btf.resolve_chained_type(t.as_btf_type()?).ok()?;
}
_ => return None,
}
}
None
}
pub struct BpfMapAccessor<'a> {
kernel: &'a super::guest::GuestKernel<'a>,
map_idr_kva: u64,
offsets: &'a BpfMapOffsets,
}
#[allow(dead_code)]
impl<'a> BpfMapAccessor<'a> {
pub fn from_guest_kernel(
kernel: &'a super::guest::GuestKernel<'a>,
offsets: &'a BpfMapOffsets,
) -> anyhow::Result<Self> {
let map_idr_kva = kernel
.symbol_kva("map_idr")
.ok_or_else(|| anyhow::anyhow!("map_idr symbol not found in vmlinux"))?;
Ok(Self {
kernel,
map_idr_kva,
offsets,
})
}
fn ctx(&self) -> AccessorCtx<'_> {
AccessorCtx {
mem: self.kernel.mem(),
cr3_pa: Cr3Pa(self.kernel.cr3_pa()),
page_offset: PageOffset(self.kernel.page_offset()),
offsets: self.offsets,
l5: self.kernel.l5(),
}
}
pub fn maps(&self) -> Vec<BpfMapInfo> {
find_all_bpf_maps(&self.ctx(), self.map_idr_kva)
}
pub fn find_map(&self, name_suffix: &str) -> Option<BpfMapInfo> {
find_bpf_map(&self.ctx(), self.map_idr_kva, name_suffix)
}
pub fn read_value(&self, map: &BpfMapInfo, offset: usize, len: usize) -> Option<Vec<u8>> {
read_bpf_map_value(&self.ctx(), map, offset, len)
}
pub fn write_value(&self, map: &BpfMapInfo, offset: usize, data: &[u8]) -> bool {
write_bpf_map_value(&self.ctx(), map, offset, data)
}
pub fn write_value_u32(&self, map: &BpfMapInfo, offset: usize, val: u32) -> bool {
write_bpf_map_value_u32(&self.ctx(), map, offset, val)
}
pub fn read_value_u32(&self, map: &BpfMapInfo, offset: usize) -> Option<u32> {
read_bpf_map_value_u32(&self.ctx(), map, offset)
}
pub fn iter_hash_map(&self, map: &BpfMapInfo) -> Vec<(Vec<u8>, Vec<u8>)> {
iter_htab_entries(&self.ctx(), map)
}
pub fn read_percpu_array(
&self,
map: &BpfMapInfo,
key: u32,
num_cpus: u32,
) -> Vec<Option<Vec<u8>>> {
let Some(pco_kva) = self.kernel.symbol_kva("__per_cpu_offset") else {
return Vec::new();
};
let pco_pa = super::symbols::text_kva_to_pa(pco_kva);
let per_cpu_offsets =
super::symbols::read_per_cpu_offsets(self.kernel.mem(), pco_pa, num_cpus);
read_percpu_array_value(&self.ctx(), map, key, &per_cpu_offsets)
}
}
pub struct BpfMapAccessorOwned<'a> {
kernel: super::guest::GuestKernel<'a>,
map_idr_kva: u64,
offsets: BpfMapOffsets,
}
#[allow(dead_code)]
impl<'a> BpfMapAccessorOwned<'a> {
pub fn new(mem: &'a GuestMem, vmlinux: &std::path::Path) -> anyhow::Result<Self> {
let kernel = super::guest::GuestKernel::new(mem, vmlinux)?;
let offsets = BpfMapOffsets::from_vmlinux(vmlinux)?;
let map_idr_kva = kernel
.symbol_kva("map_idr")
.ok_or_else(|| anyhow::anyhow!("map_idr symbol not found in vmlinux"))?;
Ok(Self {
kernel,
map_idr_kva,
offsets,
})
}
pub fn as_accessor(&self) -> BpfMapAccessor<'_> {
BpfMapAccessor {
kernel: &self.kernel,
map_idr_kva: self.map_idr_kva,
offsets: &self.offsets,
}
}
pub fn guest_kernel(&self) -> &super::guest::GuestKernel<'a> {
&self.kernel
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::monitor::idr::{XA_CHUNK_SIZE, xa_node_shift};
use crate::monitor::symbols::START_KERNEL_MAP;
#[cfg(target_arch = "x86_64")]
fn value_ctx<'a>(mem: &'a GuestMem, cr3_pa: u64, l5: bool) -> AccessorCtx<'a> {
AccessorCtx {
mem,
cr3_pa: Cr3Pa(cr3_pa),
page_offset: PageOffset(0),
offsets: &BpfMapOffsets::EMPTY,
l5,
}
}
fn lookup_ctx<'a>(
mem: &'a GuestMem,
cr3_pa: u64,
page_offset: u64,
offsets: &'a BpfMapOffsets,
l5: bool,
) -> AccessorCtx<'a> {
AccessorCtx {
mem,
cr3_pa: Cr3Pa(cr3_pa),
page_offset: PageOffset(page_offset),
offsets,
l5,
}
}
#[cfg(target_arch = "x86_64")]
const PTE_BASE: u64 = 0;
#[cfg(target_arch = "aarch64")]
const PTE_BASE: u64 = crate::vmm::kvm::DRAM_START;
#[cfg(target_arch = "x86_64")]
const BLOCK_FLAGS: u64 = 0xE3;
#[cfg(target_arch = "aarch64")]
#[allow(dead_code)] const BLOCK_FLAGS: u64 = 0x01;
#[cfg(target_arch = "x86_64")]
fn setup_page_table() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000; let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let data_pa: u64 = pte_pa + 0x1000;
let size = (data_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte_idx, (data_pa + PTE_BASE) | 0x63);
buf[data_pa as usize..data_pa as usize + 8]
.copy_from_slice(&0xDEAD_BEEF_CAFE_1234u64.to_ne_bytes());
(buf, pgd_pa, kva, data_pa)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_basic() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), false);
assert_eq!(pa, Some(data_pa));
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0xDEAD_BEEF_CAFE_1234);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_with_offset() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva + 0x100), false);
assert_eq!(pa, Some(data_pa + 0x100));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_unmapped() {
let (buf, cr3_pa, _, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(0xFFFF_FFFF_8000_0000), false);
assert_eq!(pa, None);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_unmapped_pte() {
let (buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let unmapped_kva = kva + 0x1000;
let pa = mem.translate_kva(cr3_pa, Kva(unmapped_kva), false);
assert_eq!(pa, None);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_2mb_huge_page() {
let kva: u64 = 0xFFFF_8880_0020_0000; let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let huge_page_pa: u64 = 0x20_0000;
let size = (huge_page_pa + 0x20_0000) as usize; let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(
&mut buf,
pmd_pa,
pmd_idx,
(huge_page_pa + PTE_BASE) | BLOCK_FLAGS,
);
buf[huge_page_pa as usize..huge_page_pa as usize + 8]
.copy_from_slice(&0xCAFE_BABE_1234_5678u64.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(pgd_pa, Kva(kva), false);
assert_eq!(pa, Some(huge_page_pa));
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0xCAFE_BABE_1234_5678);
let pa_off = mem.translate_kva(pgd_pa, Kva(kva + 0x1000), false);
assert_eq!(pa_off, Some(huge_page_pa + 0x1000));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_1gb_huge_page() {
let kva: u64 = 0xFFFF_8880_4000_0000; let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let huge_page_pa: u64 = 0x4000_0000;
let size = (pud_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(
&mut buf,
pud_pa,
pud_idx,
(huge_page_pa + PTE_BASE) | BLOCK_FLAGS,
);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(pgd_pa, Kva(kva), false);
assert_eq!(pa, Some(huge_page_pa));
let pa_off = mem.translate_kva(pgd_pa, Kva(kva + 0x1234_5678), false);
assert_eq!(pa_off, Some(huge_page_pa + 0x1234_5678));
}
#[test]
fn translate_kva_pgd_not_present() {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let size = (pgd_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let off = (pgd_pa + pgd_idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&0x2000u64.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(mem.translate_kva(pgd_pa, Kva(kva), false), None);
}
#[test]
fn translate_kva_pud_not_present() {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let size = (pud_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, 0x3000);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(mem.translate_kva(pgd_pa, Kva(kva), false), None);
}
#[test]
fn translate_kva_pmd_not_present() {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let size = (pmd_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, 0x4000);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(mem.translate_kva(pgd_pa, Kva(kva), false), None);
}
#[test]
fn translate_kva_pte_not_present() {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let size = (pte_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte_idx, 0x5000);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(mem.translate_kva(pgd_pa, Kva(kva), false), None);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_u32_roundtrip() {
let (mut buf, cr3_pa, kva, data_pa) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
assert!(write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
4,
0xABCD_1234,
));
assert_eq!(mem.read_u32(data_pa, 4), 0xABCD_1234);
}
#[test]
fn read_bytes_basic() {
let buf = [1u8, 2, 3, 4, 5, 6, 7, 8];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let mut out = [0u8; 4];
let n = mem.read_bytes(2, &mut out);
assert_eq!(n, 4);
assert_eq!(out, [3, 4, 5, 6]);
}
#[test]
fn read_bytes_past_end() {
let buf = [1u8, 2, 3, 4];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let mut out = [0u8; 8];
let n = mem.read_bytes(2, &mut out);
assert_eq!(n, 2); assert_eq!(out[..2], [3, 4]);
}
#[test]
fn read_bytes_at_boundary() {
let buf = [0xFFu8; 8];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let mut out = [0u8; 8];
let n = mem.read_bytes(8, &mut out);
assert_eq!(n, 0); }
#[test]
fn write_u32_roundtrip() {
let mut buf = [0u8; 16];
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
mem.write_u32(4, 0, 0xDEAD_BEEF);
assert_eq!(mem.read_u32(4, 0), 0xDEAD_BEEF);
assert_eq!(
u32::from_ne_bytes(buf[4..8].try_into().unwrap()),
0xDEAD_BEEF
);
}
#[test]
fn xa_load_zero_head() {
let buf = [0u8; 64];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(xa_load(&mem, 0, 0, 0, 0, 0), Some(0));
assert_eq!(xa_load(&mem, 0, 0, 5, 0, 0), Some(0));
}
#[test]
fn xa_load_single_entry_index_zero() {
let xa_head: u64 = 0xFFFF_8880_0001_0000; assert_eq!(xa_head & 2, 0);
let buf = [0u8; 8];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(xa_load(&mem, 0, xa_head, 0, 0, 0), Some(xa_head));
}
#[test]
fn xa_load_single_entry_index_nonzero() {
let xa_head: u64 = 0xFFFF_8880_0001_0000;
let buf = [0u8; 8];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(xa_load(&mem, 0, xa_head, 1, 0, 0), Some(0));
assert_eq!(xa_load(&mem, 0, xa_head, 63, 0, 0), Some(0));
}
fn setup_xa_node(slots: &[(u64, u64)], slots_off: usize) -> (Vec<u8>, u64, u64) {
let node_pa: u64 = 0x1000;
let page_offset: u64 = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let node_kva = page_offset.wrapping_add(node_pa);
let size = (node_pa as usize) + slots_off + XA_CHUNK_SIZE as usize * 8 + 8;
let mut buf = vec![0u8; size];
buf[node_pa as usize] = 0;
for &(idx, entry) in slots {
let slot_pa = node_pa + slots_off as u64 + idx * 8;
buf[slot_pa as usize..slot_pa as usize + 8].copy_from_slice(&entry.to_ne_bytes());
}
let xa_head = node_kva | 2;
(buf, xa_head, page_offset)
}
#[test]
fn xa_load_multi_entry_populated_slot() {
let slots_off = 16; let entry_ptr: u64 = 0xDEAD_0000; let (buf, xa_head, page_offset) = setup_xa_node(&[(3, entry_ptr)], slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(
xa_load(&mem, page_offset, xa_head, 3, slots_off, 0),
Some(entry_ptr)
);
}
#[test]
fn xa_load_multi_entry_empty_slot() {
let slots_off = 16;
let (buf, xa_head, page_offset) = setup_xa_node(&[], slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(
xa_load(&mem, page_offset, xa_head, 0, slots_off, 0),
Some(0)
);
assert_eq!(
xa_load(&mem, page_offset, xa_head, 5, slots_off, 0),
Some(0)
);
}
#[test]
fn xa_load_multi_entry_multiple_slots() {
let slots_off = 16;
let entries = [
(0, 0xAAAA_0000u64),
(7, 0xBBBB_0000u64),
(63, 0xCCCC_0000u64),
];
let (buf, xa_head, page_offset) = setup_xa_node(&entries, slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(
xa_load(&mem, page_offset, xa_head, 0, slots_off, 0),
Some(0xAAAA_0000)
);
assert_eq!(
xa_load(&mem, page_offset, xa_head, 7, slots_off, 0),
Some(0xBBBB_0000)
);
assert_eq!(
xa_load(&mem, page_offset, xa_head, 63, slots_off, 0),
Some(0xCCCC_0000)
);
assert_eq!(
xa_load(&mem, page_offset, xa_head, 1, slots_off, 0),
Some(0)
);
}
#[cfg(target_arch = "x86_64")]
fn setup_find_bpf_map(
map_name: &str,
map_type: u32,
value_size: u32,
) -> (Vec<u8>, u64, u64, BpfMapOffsets) {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let map_pa: u64 = 0x14000;
let idr_pa: u64 = 0x15000;
let map_kva: u64 = 0xFFFF_C900_0000_0000;
let pgd_idx = (map_kva >> 39) & 0x1FF;
let pud_idx = (map_kva >> 30) & 0x1FF;
let pmd_idx = (map_kva >> 21) & 0x1FF;
let pte_idx = (map_kva >> 12) & 0x1FF;
let size = 0x16000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte_idx * 8, (map_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, idr_pa + offsets.idr_xa_head as u64, map_kva);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 1);
write_u32(&mut buf, map_pa + offsets.map_type as u64, map_type);
write_u32(&mut buf, map_pa + offsets.value_size as u64, value_size);
let name_bytes = map_name.as_bytes();
let name_pa = map_pa + offsets.map_name as u64;
buf[name_pa as usize..name_pa as usize + name_bytes.len()].copy_from_slice(name_bytes);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
(buf, pgd_pa, idr_kva, offsets)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_discovers_matching_map() {
let (buf, cr3_pa, idr_kva, offsets) =
setup_find_bpf_map("mitosis.bss", BPF_MAP_TYPE_ARRAY, 64);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
let info = result.expect("should find the map");
assert_eq!(info.name, "mitosis.bss");
assert_eq!(info.map_type, BPF_MAP_TYPE_ARRAY);
assert_eq!(info.value_size, 64);
assert_eq!(info.map_pa, 0x14000);
let map_kva: u64 = 0xFFFF_C900_0000_0000;
assert_eq!(info.value_kva, Some(map_kva + offsets.array_value as u64));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_no_match_wrong_suffix() {
let (buf, cr3_pa, idr_kva, offsets) =
setup_find_bpf_map("mitosis.bss", BPF_MAP_TYPE_ARRAY, 64);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".data",
);
assert!(result.is_none());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_skips_non_array_type() {
let (buf, cr3_pa, idr_kva, offsets) = setup_find_bpf_map("test.bss", 1, 64);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
assert!(result.is_none());
}
#[test]
fn find_bpf_map_empty_idr() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let idr_pa: u64 = 0x1000;
let size = 0x2000;
let buf = vec![0u8; size];
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, 0x10000, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
assert!(result.is_none());
}
#[cfg(target_arch = "x86_64")]
fn setup_5level_page_table() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFF11_8880_0000_5000;
let pml5_idx = (kva >> 48) & 0x1FF;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pml5_pa: u64 = 0x10000;
let p4d_pa: u64 = pml5_pa + 0x1000;
let pud_pa: u64 = p4d_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let data_pa: u64 = pte_pa + 0x1000;
let size = (data_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pml5_pa, pml5_idx, (p4d_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, p4d_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte_idx, (data_pa + PTE_BASE) | 0x63);
buf[data_pa as usize..data_pa as usize + 8]
.copy_from_slice(&0x5555_AAAA_1234_5678u64.to_ne_bytes());
(buf, pml5_pa, kva, data_pa)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_basic() {
let (buf, cr3_pa, kva, data_pa) = setup_5level_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), true);
assert_eq!(pa, Some(data_pa));
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0x5555_AAAA_1234_5678);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_with_offset() {
let (buf, cr3_pa, kva, data_pa) = setup_5level_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva + 0x100), true);
assert_eq!(pa, Some(data_pa + 0x100));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_unmapped_pml5() {
let (buf, cr3_pa, _, _) = setup_5level_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let unmapped_kva: u64 = 0xFF22_8880_0000_5000;
assert_eq!(mem.translate_kva(cr3_pa, Kva(unmapped_kva), true), None);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_vs_4level_same_buffer() {
let (buf, cr3_pa, kva, _) = setup_5level_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa_4level = mem.translate_kva(cr3_pa, Kva(kva), false);
let pa_5level = mem.translate_kva(cr3_pa, Kva(kva), true);
assert_ne!(pa_4level, pa_5level);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_bytes_roundtrip() {
let (mut buf, cr3_pa, kva, data_pa) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 16,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
let payload = [0xDE, 0xAD, 0xBE, 0xEF];
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&payload
));
for (i, &expected) in payload.iter().enumerate() {
assert_eq!(buf[data_pa as usize + i], expected);
}
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_fails_on_unmapped_kva() {
let (mut buf, cr3_pa, _, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 16,
max_entries: 0,
value_kva: Some(0xFFFF_FFFF_8000_0000), btf_kva: 0,
btf_value_type_id: 0,
};
assert!(!write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&[0xFF]
));
}
fn setup_two_level_xarray(
child_slot: u64,
leaf_slot: u64,
leaf_entry: u64,
slots_off: usize,
) -> (Vec<u8>, u64, u64) {
let root_pa: u64 = 0x1000;
let child_pa: u64 = 0x2000;
let page_offset: u64 = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let root_kva = page_offset.wrapping_add(root_pa);
let child_kva = page_offset.wrapping_add(child_pa);
let size = (child_pa as usize) + slots_off + XA_CHUNK_SIZE as usize * 8 + 8;
let mut buf = vec![0u8; size];
buf[root_pa as usize] = 6;
let root_slot_pa = root_pa + slots_off as u64 + child_slot * 8;
buf[root_slot_pa as usize..root_slot_pa as usize + 8]
.copy_from_slice(&(child_kva | 2).to_ne_bytes());
buf[child_pa as usize] = 0;
let child_slot_pa = child_pa + slots_off as u64 + leaf_slot * 8;
buf[child_slot_pa as usize..child_slot_pa as usize + 8]
.copy_from_slice(&leaf_entry.to_ne_bytes());
let xa_head = root_kva | 2;
(buf, xa_head, page_offset)
}
#[test]
fn xa_load_two_level_finds_leaf() {
let slots_off = 16;
let child_slot = 1u64; let leaf_slot = 5u64; let leaf_entry: u64 = 0xBEEF_0000; let index = (child_slot << 6) | leaf_slot;
let (buf, xa_head, page_offset) =
setup_two_level_xarray(child_slot, leaf_slot, leaf_entry, slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(
xa_load(&mem, page_offset, xa_head, index, slots_off, 0),
Some(leaf_entry)
);
}
#[test]
fn xa_load_two_level_empty_child_slot() {
let slots_off = 16;
let child_slot = 2u64;
let leaf_slot = 10u64;
let leaf_entry: u64 = 0xAAAA_0000;
let (buf, xa_head, page_offset) =
setup_two_level_xarray(child_slot, leaf_slot, leaf_entry, slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let populated_idx = (child_slot << 6) | leaf_slot;
assert_eq!(
xa_load(&mem, page_offset, xa_head, populated_idx, slots_off, 0),
Some(leaf_entry)
);
let empty_child_idx = child_slot << 6;
assert_eq!(
xa_load(&mem, page_offset, xa_head, empty_child_idx, slots_off, 0),
Some(0)
);
}
#[test]
fn xa_load_two_level_empty_root_slot() {
let slots_off = 16;
let (buf, xa_head, page_offset) = setup_two_level_xarray(3, 0, 0xDEAD_0000, slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let empty_root_idx = 5u64; assert_eq!(
xa_load(&mem, page_offset, xa_head, empty_root_idx, slots_off, 0),
Some(0)
);
}
#[test]
fn xa_load_two_level_high_index() {
let slots_off = 16;
let (buf, xa_head, page_offset) = setup_two_level_xarray(63, 63, 0xFFFF_0000, slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let max_index = (63 << 6) | 63; assert_eq!(
xa_load(&mem, page_offset, xa_head, max_index, slots_off, 0),
Some(0xFFFF_0000)
);
}
#[cfg(target_arch = "x86_64")]
fn setup_find_bpf_map_multi() -> (Vec<u8>, u64, u64, BpfMapOffsets) {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let map1_pa: u64 = 0x14000;
let map2_pa: u64 = 0x15000;
let idr_pa: u64 = 0x16000;
let xa_node_pa: u64 = 0x17000;
let map1_kva: u64 = 0xFFFF_C900_0000_0000;
let map2_kva: u64 = 0xFFFF_C900_0000_1000;
let pgd_idx = (map1_kva >> 39) & 0x1FF;
let pud_idx = (map1_kva >> 30) & 0x1FF;
let pmd_idx = (map1_kva >> 21) & 0x1FF;
let pte1_idx = (map1_kva >> 12) & 0x1FF;
let pte2_idx = (map2_kva >> 12) & 0x1FF;
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let xa_node_kva = xa_node_pa + page_offset;
let size = 0x18000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte1_idx * 8, (map1_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte2_idx * 8, (map2_pa + PTE_BASE) | 0x63);
buf[xa_node_pa as usize] = 0; write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64,
map1_kva,
);
write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64 + 8,
map2_kva,
);
write_u64(
&mut buf,
idr_pa + offsets.idr_xa_head as u64,
xa_node_kva | 2,
);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 2);
write_u32(
&mut buf,
map1_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map1_pa + offsets.value_size as u64, 32);
let name1 = b"other.data";
let name1_pa = map1_pa + offsets.map_name as u64;
buf[name1_pa as usize..name1_pa as usize + name1.len()].copy_from_slice(name1);
write_u32(
&mut buf,
map2_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map2_pa + offsets.value_size as u64, 128);
let name2 = b"mitosis.bss";
let name2_pa = map2_pa + offsets.map_name as u64;
buf[name2_pa as usize..name2_pa as usize + name2.len()].copy_from_slice(name2);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
(buf, pgd_pa, idr_kva, offsets)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_skips_wrong_name_finds_second() {
let (buf, cr3_pa, idr_kva, offsets) = setup_find_bpf_map_multi();
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
idr_kva,
".bss",
);
let info = result.expect("should find second map");
assert_eq!(info.name, "mitosis.bss");
assert_eq!(info.map_pa, 0x15000);
assert_eq!(info.value_size, 128);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_full_length_name() {
let full_name = "0123456789a.bss"; let (buf, cr3_pa, idr_kva, offsets) = setup_find_bpf_map(full_name, BPF_MAP_TYPE_ARRAY, 64);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
let info = result.expect("should find map with 15-char name");
assert_eq!(info.name, full_name);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_max_length_name_no_null() {
let max_name = "0123456789a.bss!"; assert_eq!(max_name.len(), BPF_OBJ_NAME_LEN);
let (mut buf, cr3_pa, idr_kva, offsets) =
setup_find_bpf_map("placeholder.bss", BPF_MAP_TYPE_ARRAY, 64);
let map_pa: u64 = 0x14000;
let name_pa = (map_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + 16].copy_from_slice(max_name.as_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
assert!(
result.is_none(),
"16-byte name ending with '!' should not match .bss suffix"
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_nonzero_offset() {
let (mut buf, cr3_pa, kva, data_pa) = setup_page_table();
let original_first_byte = buf[data_pa as usize];
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
let payload = [0x11, 0x22, 0x33, 0x44];
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
8,
&payload
));
for (i, &expected) in payload.iter().enumerate() {
assert_eq!(buf[data_pa as usize + 8 + i], expected);
}
assert_eq!(buf[data_pa as usize], original_first_byte);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_empty_data() {
let (mut buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&[]
));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_u32_5level() {
let (mut buf, cr3_pa, kva, data_pa) = setup_5level_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
assert!(write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, true),
&info,
0,
0xCAFE_BABE,
));
assert_eq!(mem.read_u32(data_pa, 0), 0xCAFE_BABE);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_p4d_not_present() {
let kva: u64 = 0xFF11_8880_0000_5000;
let pml5_idx = (kva >> 48) & 0x1FF;
let pml5_pa: u64 = 0x10000;
let p4d_pa: u64 = pml5_pa + 0x1000;
let size = (p4d_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let off = (pml5_pa + pml5_idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&((p4d_pa + PTE_BASE) | 0x63).to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(mem.translate_kva(pml5_pa, Kva(kva), true), None);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_2mb_huge_page() {
let kva: u64 = 0xFF11_8880_0020_0000; let pml5_idx = (kva >> 48) & 0x1FF;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pml5_pa: u64 = 0x10000;
let p4d_pa: u64 = pml5_pa + 0x1000;
let pud_pa: u64 = p4d_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let huge_page_pa: u64 = 0x20_0000;
let size = (huge_page_pa + 0x20_0000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pml5_pa, pml5_idx, (p4d_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, p4d_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(
&mut buf,
pmd_pa,
pmd_idx,
(huge_page_pa + PTE_BASE) | BLOCK_FLAGS,
);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(pml5_pa, Kva(kva), true);
assert_eq!(pa, Some(huge_page_pa));
let pa_off = mem.translate_kva(pml5_pa, Kva(kva + 0x1234), true);
assert_eq!(pa_off, Some(huge_page_pa + 0x1234));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_1gb_huge_page() {
let kva: u64 = 0xFF11_8880_4000_0000; let pml5_idx = (kva >> 48) & 0x1FF;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pml5_pa: u64 = 0x10000;
let p4d_pa: u64 = pml5_pa + 0x1000;
let pud_pa: u64 = p4d_pa + 0x1000;
let huge_page_pa: u64 = 0x4000_0000;
let size = (pud_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pml5_pa, pml5_idx, (p4d_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, p4d_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(
&mut buf,
pud_pa,
pud_idx,
(huge_page_pa + PTE_BASE) | BLOCK_FLAGS,
);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(pml5_pa, Kva(kva), true);
assert_eq!(pa, Some(huge_page_pa));
let pa_off = mem.translate_kva(pml5_pa, Kva(kva + 0x1234_5678), true);
assert_eq!(pa_off, Some(huge_page_pa + 0x1234_5678));
}
#[test]
fn find_bpf_map_skips_untranslatable_entry() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let idr_pa: u64 = 0x1000;
let pgd_pa: u64 = 0x10000;
let size = 0x12000;
let mut buf = vec![0u8; size];
let unmappable_kva: u64 = 0xFFFF_C900_DEAD_0000;
assert_eq!(unmappable_kva & 2, 0);
let off = (idr_pa + offsets.idr_xa_head as u64) as usize;
buf[off..off + 8].copy_from_slice(&unmappable_kva.to_ne_bytes());
let off_next = (idr_pa + offsets.idr_next as u64) as usize;
buf[off_next..off_next + 4].copy_from_slice(&1u32.to_ne_bytes());
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, pgd_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
assert!(result.is_none());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_5level() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let map_kva: u64 = 0xFF11_C900_0000_0000;
let pml5_idx = (map_kva >> 48) & 0x1FF;
let pgd_idx = (map_kva >> 39) & 0x1FF;
let pud_idx = (map_kva >> 30) & 0x1FF;
let pmd_idx = (map_kva >> 21) & 0x1FF;
let pte_idx = (map_kva >> 12) & 0x1FF;
let pml5_pa: u64 = 0x10000;
let p4d_pa: u64 = 0x11000;
let pud_pa: u64 = 0x12000;
let pmd_pa: u64 = 0x13000;
let pte_pa: u64 = 0x14000;
let map_pa: u64 = 0x15000;
let idr_pa: u64 = 0x16000;
let size = 0x17000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pml5_pa + pml5_idx * 8, (p4d_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, p4d_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte_idx * 8, (map_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, idr_pa + offsets.idr_xa_head as u64, map_kva);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 1);
write_u32(
&mut buf,
map_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map_pa + offsets.value_size as u64, 96);
let name = b"test.bss";
let name_pa = (map_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, pml5_pa, 0xFFFF_8880_0000_0000, &offsets, true),
idr_kva,
".bss",
);
let info = result.expect("should find map via 5-level walk");
assert_eq!(info.name, "test.bss");
assert_eq!(info.map_pa, map_pa);
assert_eq!(info.value_size, 96);
assert_eq!(info.value_kva, Some(map_kva + offsets.array_value as u64));
}
#[cfg(target_arch = "x86_64")]
fn setup_two_page_table() -> (Vec<u8>, u64, u64, u64, u64) {
let kva: u64 = 0xFFFF_8880_0000_5000;
let kva2: u64 = kva + 0x1000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte1_idx = (kva >> 12) & 0x1FF;
let pte2_idx = (kva2 >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let page1_pa: u64 = pte_pa + 0x1000;
let page2_pa: u64 = page1_pa + 0x1000;
let size = (page2_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte1_idx, (page1_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte2_idx, (page2_pa + PTE_BASE) | 0x63);
(buf, pgd_pa, kva, page1_pa, page2_pa)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_across_page_boundary() {
let (mut buf, cr3_pa, kva, page1_pa, page2_pa) = setup_two_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 0x2000,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
let val: u32 = 0xAABB_CCDD;
assert!(write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
0xFFE,
val,
));
let b = val.to_ne_bytes();
assert_eq!(buf[page1_pa as usize + 0xFFE], b[0]);
assert_eq!(buf[page1_pa as usize + 0xFFF], b[1]);
assert_eq!(buf[page2_pa as usize], b[2]);
assert_eq!(buf[page2_pa as usize + 1], b[3]);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_single_byte_on_second_page() {
let (mut buf, cr3_pa, kva, _, page2_pa) = setup_two_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 0x2000,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0x1000,
&[0x42],
));
assert_eq!(buf[page2_pa as usize], 0x42);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_skips_untranslatable_finds_translatable() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let map2_pa: u64 = 0x14000;
let idr_pa: u64 = 0x15000;
let xa_node_pa: u64 = 0x16000;
let map1_kva: u64 = 0xFFFF_C900_0000_0000;
let map2_kva: u64 = 0xFFFF_C900_0000_1000;
let pgd_idx = (map2_kva >> 39) & 0x1FF;
let pud_idx = (map2_kva >> 30) & 0x1FF;
let pmd_idx = (map2_kva >> 21) & 0x1FF;
let pte2_idx = (map2_kva >> 12) & 0x1FF;
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let xa_node_kva = xa_node_pa + page_offset;
let size = 0x17000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte2_idx * 8, (map2_pa + PTE_BASE) | 0x63);
buf[xa_node_pa as usize] = 0; write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64,
map1_kva,
);
write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64 + 8,
map2_kva,
);
write_u64(
&mut buf,
idr_pa + offsets.idr_xa_head as u64,
xa_node_kva | 2,
);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 2);
write_u32(
&mut buf,
map2_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map2_pa + offsets.value_size as u64, 200);
let name = b"target.bss";
let name_pa = (map2_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, pgd_pa, page_offset, &offsets, false),
idr_kva,
".bss",
);
let info = result.expect("should skip untranslatable entry and find the second");
assert_eq!(info.name, "target.bss");
assert_eq!(info.map_pa, map2_pa);
assert_eq!(info.value_size, 200);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_u32_roundtrip() {
let (mut buf, cr3_pa, kva, data_pa) = setup_page_table();
buf[data_pa as usize + 4..data_pa as usize + 8]
.copy_from_slice(&0xCAFE_BABEu32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
let val = read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, false), &info, 4);
assert_eq!(val, Some(0xCAFE_BABE));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_bytes() {
let (mut buf, cr3_pa, kva, data_pa) = setup_page_table();
buf[data_pa as usize..data_pa as usize + 4].copy_from_slice(&[0xAA, 0xBB, 0xCC, 0xDD]);
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
let bytes = read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 0, 4);
assert_eq!(bytes, Some(vec![0xAA, 0xBB, 0xCC, 0xDD]));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_empty() {
let (buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
let bytes = read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 0, 0);
assert_eq!(bytes, Some(vec![]));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_unmapped_returns_none() {
let (buf, cr3_pa, _, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 16,
max_entries: 0,
value_kva: Some(0xFFFF_FFFF_8000_0000), btf_kva: 0,
btf_value_type_id: 0,
};
assert_eq!(
read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 0, 4),
None
);
assert_eq!(
read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, false), &info, 0),
None
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_then_read_bpf_map_value_roundtrip() {
let (mut buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
assert!(write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
8,
0x1234_5678,
));
assert_eq!(
read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, false), &info, 8),
Some(0x1234_5678)
);
let payload = [0x11, 0x22, 0x33, 0x44, 0x55];
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
16,
&payload,
));
assert_eq!(
read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 16, 5),
Some(payload.to_vec()),
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_across_page_boundary() {
let (mut buf, cr3_pa, kva, page1_pa, page2_pa) = setup_two_page_table();
buf[page1_pa as usize + 0xFFE] = 0xAA;
buf[page1_pa as usize + 0xFFF] = 0xBB;
buf[page2_pa as usize] = 0xCC;
buf[page2_pa as usize + 1] = 0xDD;
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 0x2000,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
let bytes = read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 0xFFE, 4);
assert_eq!(bytes, Some(vec![0xAA, 0xBB, 0xCC, 0xDD]));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_u32_5level() {
let (mut buf, cr3_pa, kva, data_pa) = setup_5level_page_table();
buf[data_pa as usize..data_pa as usize + 4].copy_from_slice(&0xDEAD_BEEFu32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
assert_eq!(
read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, true), &info, 0),
Some(0xDEAD_BEEF)
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_returns_both_types() {
let mut setup = setup_find_bpf_map_multi();
let map1_pa: u64 = 0x14000;
let map_type_off = setup.3.map_type;
let off = (map1_pa + map_type_off as u64) as usize;
setup.0[off..off + 4].copy_from_slice(&1u32.to_ne_bytes());
let (buf, cr3_pa, idr_kva, offsets) = setup;
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
idr_kva,
);
assert_eq!(maps.len(), 2);
let hash_map = maps.iter().find(|m| m.name == "other.data");
let array_map = maps.iter().find(|m| m.name == "mitosis.bss");
assert!(hash_map.is_some(), "HASH map should be in results");
assert!(array_map.is_some(), "ARRAY map should be in results");
assert_eq!(hash_map.unwrap().map_type, 1); assert!(hash_map.unwrap().value_kva.is_none());
assert_eq!(array_map.unwrap().map_type, BPF_MAP_TYPE_ARRAY);
assert!(array_map.unwrap().value_kva.is_some());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_single_entry() {
let (buf, cr3_pa, idr_kva, offsets) =
setup_find_bpf_map("test.bss", BPF_MAP_TYPE_ARRAY, 64);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
);
assert_eq!(maps.len(), 1);
assert_eq!(maps[0].name, "test.bss");
}
#[test]
fn find_all_bpf_maps_empty_idr() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let buf = vec![0u8; 0x2000];
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = 0x1000 + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, 0x10000, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
);
assert!(maps.is_empty());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_value_returns_none_for_non_array_map() {
let (buf, cr3_pa, _, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "hash.map".into(),
map_type: 1, map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
};
assert!(read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 0, 4).is_none());
assert!(read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, false), &info, 0).is_none());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_value_returns_false_for_non_array_map() {
let (mut buf, cr3_pa, _, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "hash.map".into(),
map_type: 1, map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
};
assert!(!write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&[1, 2, 3, 4],
));
assert!(!write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
42
));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_reads_map_flags() {
let (mut buf, cr3_pa, idr_kva, offsets) =
setup_find_bpf_map("flagged.bss", BPF_MAP_TYPE_ARRAY, 64);
let map_pa: u64 = 0x14000;
let flags_pa = (map_pa + offsets.map_flags as u64) as usize;
buf[flags_pa..flags_pa + 4].copy_from_slice(&0x0400u32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
);
assert_eq!(maps.len(), 1);
assert_eq!(maps[0].map_flags, 0x0400);
}
#[test]
fn xa_node_shift_nonzero_offset() {
let node_pa: u64 = 0x1000;
let page_offset: u64 = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let node_kva = page_offset.wrapping_add(node_pa);
let shift_off: usize = 8;
let mut buf = vec![0u8; 0x2000];
buf[node_pa as usize + shift_off] = 6;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(xa_node_shift(&mem, page_offset, node_kva, shift_off), 6);
assert_eq!(xa_node_shift(&mem, page_offset, node_kva, 0), 0);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_continues_past_untranslatable_entry() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let map_pa: u64 = 0x14000;
let idr_pa: u64 = 0x15000;
let xa_node_pa: u64 = 0x16000;
let map_kva: u64 = 0xFFFF_C900_0000_0000;
let pgd_idx = (map_kva >> 39) & 0x1FF;
let pud_idx = (map_kva >> 30) & 0x1FF;
let pmd_idx = (map_kva >> 21) & 0x1FF;
let pte_idx = (map_kva >> 12) & 0x1FF;
let bad_kva: u64 = 0xFFFF_C900_8000_0000;
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let xa_node_kva = xa_node_pa + page_offset;
let size = 0x17000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte_idx * 8, (map_pa + PTE_BASE) | 0x63);
buf[xa_node_pa as usize] = 0; write_u64(&mut buf, xa_node_pa + offsets.xa_node_slots as u64, bad_kva);
write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64 + 8,
map_kva,
);
write_u64(
&mut buf,
idr_pa + offsets.idr_xa_head as u64,
xa_node_kva | 2,
);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 2);
write_u32(
&mut buf,
map_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map_pa + offsets.value_size as u64, 64);
let name = b"good.bss";
let name_pa = (map_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, pgd_pa, page_offset, &offsets, false),
idr_kva,
);
let good = maps.iter().find(|m| m.name == "good.bss");
assert!(
good.is_some(),
"good.bss should be found despite bad entry at slot 0"
);
assert_eq!(good.unwrap().map_type, BPF_MAP_TYPE_ARRAY);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_value_rejects_out_of_bounds() {
let (buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 8,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
assert!(read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 4, 4).is_some());
assert!(read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 4, 5).is_none());
assert!(read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 9, 1).is_none());
assert!(read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, false), &info, 6).is_none());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_value_rejects_out_of_bounds() {
let (mut buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 8,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
};
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&[0u8; 8],
));
assert!(!write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&[0u8; 9],
));
assert!(!write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
6,
42
));
assert!(write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
4,
42
));
}
#[test]
fn bpf_map_info_btf_fields_default_zero() {
let info = BpfMapInfo {
map_pa: 0x1000,
map_kva: 0,
name: "test".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 32,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
};
assert_eq!(info.btf_kva, 0);
assert_eq!(info.btf_value_type_id, 0);
}
#[test]
fn bpf_map_info_btf_fields_populated() {
let info = BpfMapInfo {
map_pa: 0x1000,
map_kva: 0,
name: "test".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 32,
max_entries: 0,
value_kva: None,
btf_kva: 0xFFFF_8880_0001_0000,
btf_value_type_id: 42,
};
assert_eq!(info.btf_kva, 0xFFFF_8880_0001_0000);
assert_eq!(info.btf_value_type_id, 42);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_populates_btf_fields() {
let (mut buf, cr3_pa, idr_kva, mut offsets) =
setup_find_bpf_map("test.bss", BPF_MAP_TYPE_ARRAY, 64);
offsets.map_btf = 56;
offsets.map_btf_value_type_id = 64;
let map_pa: u64 = 0x14000;
let btf_off = (map_pa + offsets.map_btf as u64) as usize;
let btf_tid_off = (map_pa + offsets.map_btf_value_type_id as u64) as usize;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
);
assert_eq!(maps.len(), 1);
assert_eq!(maps[0].btf_kva, 0);
assert_eq!(maps[0].btf_value_type_id, 0);
let btf_kva_val: u64 = 0xFFFF_8880_DEAD_0000;
buf[btf_off..btf_off + 8].copy_from_slice(&btf_kva_val.to_ne_bytes());
buf[btf_tid_off..btf_tid_off + 4].copy_from_slice(&7u32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
);
assert_eq!(maps[0].btf_kva, btf_kva_val);
assert_eq!(maps[0].btf_value_type_id, 7);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_respects_idr_next_bound() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let map_pa: u64 = 0x14000;
let map2_pa: u64 = 0x15000;
let map3_pa: u64 = 0x16000;
let idr_pa: u64 = 0x17000;
let xa_node_pa: u64 = 0x18000;
let map_kva: u64 = 0xFFFF_C900_0000_0000;
let map2_kva: u64 = 0xFFFF_C900_0000_1000;
let map3_kva: u64 = 0xFFFF_C900_0000_2000;
let pgd_idx = (map_kva >> 39) & 0x1FF;
let pud_idx = (map_kva >> 30) & 0x1FF;
let pmd_idx = (map_kva >> 21) & 0x1FF;
let pte1_idx = (map_kva >> 12) & 0x1FF;
let pte2_idx = (map2_kva >> 12) & 0x1FF;
let pte3_idx = (map3_kva >> 12) & 0x1FF;
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let xa_node_kva = xa_node_pa + page_offset;
let size = 0x19000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte1_idx * 8, (map_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte2_idx * 8, (map2_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte3_idx * 8, (map3_pa + PTE_BASE) | 0x63);
buf[xa_node_pa as usize] = 0; write_u64(&mut buf, xa_node_pa + offsets.xa_node_slots as u64, map_kva);
write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64 + 8,
map2_kva,
);
write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64 + 2 * 8,
map3_kva,
);
write_u64(
&mut buf,
idr_pa + offsets.idr_xa_head as u64,
xa_node_kva | 2,
);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 2);
write_u32(
&mut buf,
map_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map_pa + offsets.value_size as u64, 32);
let name = b"first.bss";
let name_pa = (map_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
write_u32(
&mut buf,
map2_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map2_pa + offsets.value_size as u64, 64);
let name = b"second.bss";
let name_pa = (map2_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
write_u32(
&mut buf,
map3_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map3_pa + offsets.value_size as u64, 128);
let name = b"third.bss";
let name_pa = (map3_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, pgd_pa, page_offset, &offsets, false),
idr_kva,
);
assert_eq!(maps.len(), 2);
assert!(maps.iter().any(|m| m.name == "first.bss"));
assert!(maps.iter().any(|m| m.name == "second.bss"));
assert!(!maps.iter().any(|m| m.name == "third.bss"));
}
#[cfg(target_arch = "x86_64")]
fn setup_page_table_vmalloc() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFFFF_8000_8400_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let data_pa: u64 = pte_pa + 0x1000;
let size = (data_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte_idx, (data_pa + PTE_BASE) | 0x63);
buf[data_pa as usize..data_pa as usize + 8]
.copy_from_slice(&0x1234_5678_ABCD_EF00u64.to_ne_bytes());
(buf, pgd_pa, kva, data_pa)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_l0_index_256() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_vmalloc();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), false);
assert_eq!(
pa,
Some(data_pa),
"L0[256] walk should resolve to data page"
);
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0x1234_5678_ABCD_EF00);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_l0_index_256_with_offset() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_vmalloc();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva + 0x100), false);
assert_eq!(pa, Some(data_pa + 0x100));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_l0_index_256_unmapped_neighbor() {
let (buf, cr3_pa, kva, _) = setup_page_table_vmalloc();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let kva_257 = kva + (1u64 << 39);
assert_eq!(mem.translate_kva(cr3_pa, Kva(kva_257), false), None);
}
#[cfg(target_arch = "aarch64")]
fn setup_page_table_vmalloc_64k() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFFFF_8000_8400_0000;
let pgd_idx = (kva >> 42) & 0x3F; let pmd_idx = (kva >> 29) & 0x1FFF; let pte_idx = (kva >> 16) & 0x1FFF;
let pgd_pa: u64 = 0x10000;
let pmd_pa: u64 = 0x20000;
let pte_pa: u64 = 0x30000;
let data_pa: u64 = 0x40000;
let size = (data_pa + 0x10000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pmd_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pte_pa, pte_idx, (data_pa + PTE_BASE) | 0x03);
buf[data_pa as usize..data_pa as usize + 8]
.copy_from_slice(&0x1234_5678_ABCD_EF00u64.to_ne_bytes());
(buf, pgd_pa, kva, data_pa)
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_vmalloc_64k() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_vmalloc_64k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), false);
assert_eq!(pa, Some(data_pa), "64KB vmalloc walk should resolve");
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0x1234_5678_ABCD_EF00);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_vmalloc_64k_with_offset() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_vmalloc_64k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva + 0x100), false);
assert_eq!(pa, Some(data_pa + 0x100));
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_vmalloc_64k_unmapped_neighbor() {
let (buf, cr3_pa, kva, _) = setup_page_table_vmalloc_64k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let unmapped = kva + (1u64 << 42);
assert_eq!(mem.translate_kva(cr3_pa, Kva(unmapped), false), None);
}
use crate::monitor::btf_offsets::HtabOffsets;
fn test_htab_offsets() -> HtabOffsets {
HtabOffsets {
htab_buckets: 200,
htab_n_buckets: 208,
bucket_size: 16,
bucket_head: 0,
hlist_nulls_head_first: 0,
hlist_nulls_node_next: 0,
htab_elem_size_base: 32,
}
}
fn test_htab_map_offsets() -> BpfMapOffsets {
BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: Some(test_htab_offsets()),
}
}
#[test]
fn iter_htab_entries_non_hash_map_returns_empty() {
let buf = [0u8; 256];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let offsets = test_htab_map_offsets();
let map = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test.bss".into(),
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 4,
value_size: 8,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
};
let entries = iter_htab_entries(&lookup_ctx(&mem, 0, 0, &offsets, false), &map);
assert!(entries.is_empty());
}
#[test]
fn iter_htab_entries_no_htab_offsets_returns_empty() {
let buf = [0u8; 256];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let mut offsets = test_htab_map_offsets();
offsets.htab_offsets = None;
let map = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name: "test".into(),
map_type: BPF_MAP_TYPE_HASH,
map_flags: 0,
key_size: 4,
value_size: 8,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
};
let entries = iter_htab_entries(&lookup_ctx(&mem, 0, 0, &offsets, false), &map);
assert!(entries.is_empty());
}
fn setup_htab_direct(
key_size: u32,
value_size: u32,
entries: &[(&[u8], &[u8])],
n_buckets: u32,
) -> (Vec<u8>, u64, BpfMapInfo, BpfMapOffsets) {
let htab = test_htab_offsets();
let offsets = test_htab_map_offsets();
let page_offset: u64 = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let pa_to_kva = |pa: u64| -> u64 { page_offset.wrapping_add(pa) };
let htab_pa: u64 = 0x0000;
let buckets_pa: u64 = 0x1000;
let elems_start: u64 = 0x2000;
let elem_data_size = htab.htab_elem_size_base
+ ((key_size as usize + 7) & !7)
+ ((value_size as usize + 7) & !7);
let elem_stride = elem_data_size.max(64);
let buf_size = elems_start as usize + entries.len() * elem_stride + 0x1000;
let mut buf = vec![0u8; buf_size];
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_u32(
&mut buf,
htab_pa + offsets.map_type as u64,
BPF_MAP_TYPE_HASH,
);
write_u32(&mut buf, htab_pa + offsets.key_size as u64, key_size);
write_u32(&mut buf, htab_pa + offsets.value_size as u64, value_size);
write_u64(
&mut buf,
htab_pa + htab.htab_buckets as u64,
pa_to_kva(buckets_pa),
);
write_u32(&mut buf, htab_pa + htab.htab_n_buckets as u64, n_buckets);
for i in 0..n_buckets {
let bucket_pa = buckets_pa + (i as u64) * (htab.bucket_size as u64);
write_u64(
&mut buf,
bucket_pa + htab.bucket_head as u64 + htab.hlist_nulls_head_first as u64,
(i as u64) << 1 | 1, );
}
let mut prev_node_pa: Option<u64> = None;
for (idx, (key, val)) in entries.iter().enumerate().rev() {
let elem_pa = elems_start + (idx as u64) * (elem_stride as u64);
let elem_kva = pa_to_kva(elem_pa);
let key_off = elem_pa + htab.htab_elem_size_base as u64;
buf[key_off as usize..key_off as usize + key.len()].copy_from_slice(key);
let val_off = elem_pa + htab.htab_elem_size_base as u64 + ((key_size as u64 + 7) & !7);
buf[val_off as usize..val_off as usize + val.len()].copy_from_slice(val);
let next = match prev_node_pa {
Some(prev_pa) => pa_to_kva(prev_pa), None => 1u64, };
write_u64(&mut buf, elem_pa + htab.hlist_nulls_node_next as u64, next);
prev_node_pa = Some(elem_pa);
if idx == 0 {
write_u64(
&mut buf,
buckets_pa + htab.bucket_head as u64 + htab.hlist_nulls_head_first as u64,
elem_kva,
);
}
}
let map = BpfMapInfo {
map_pa: htab_pa,
map_kva: pa_to_kva(htab_pa),
name: "test_hash".into(),
map_type: BPF_MAP_TYPE_HASH,
map_flags: 0,
key_size,
value_size,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
};
(buf, page_offset, map, offsets)
}
#[test]
fn iter_htab_entries_empty_map() {
let (buf, page_offset, map, offsets) = setup_htab_direct(4, 8, &[], 4);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let entries = iter_htab_entries(&lookup_ctx(&mem, 0, page_offset, &offsets, false), &map);
assert!(entries.is_empty());
}
#[test]
fn iter_htab_entries_single_entry() {
let key = 42u32.to_ne_bytes();
let val = 0xDEAD_BEEF_CAFE_1234u64.to_ne_bytes();
let (buf, page_offset, map, offsets) = setup_htab_direct(4, 8, &[(&key, &val)], 4);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let entries = iter_htab_entries(&lookup_ctx(&mem, 0, page_offset, &offsets, false), &map);
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].0, key);
assert_eq!(entries[0].1, val);
}
#[test]
fn iter_htab_entries_multiple_entries() {
let k1 = 1u32.to_ne_bytes();
let v1 = 100u64.to_ne_bytes();
let k2 = 2u32.to_ne_bytes();
let v2 = 200u64.to_ne_bytes();
let k3 = 3u32.to_ne_bytes();
let v3 = 300u64.to_ne_bytes();
let (buf, page_offset, map, offsets) =
setup_htab_direct(4, 8, &[(&k1, &v1), (&k2, &v2), (&k3, &v3)], 4);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let entries = iter_htab_entries(&lookup_ctx(&mem, 0, page_offset, &offsets, false), &map);
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].0, k1);
assert_eq!(entries[0].1, v1);
assert_eq!(entries[1].0, k2);
assert_eq!(entries[1].1, v2);
assert_eq!(entries[2].0, k3);
assert_eq!(entries[2].1, v3);
}
#[test]
fn iter_htab_entries_zero_buckets() {
let key = 1u32.to_ne_bytes();
let val = 1u64.to_ne_bytes();
let (mut buf, page_offset, map, offsets) = setup_htab_direct(4, 8, &[(&key, &val)], 4);
let htab = test_htab_offsets();
buf[htab.htab_n_buckets..htab.htab_n_buckets + 4].copy_from_slice(&0u32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let entries = iter_htab_entries(&lookup_ctx(&mem, 0, page_offset, &offsets, false), &map);
assert!(entries.is_empty());
}
#[test]
fn iter_htab_entries_larger_key_and_value() {
let key = 0xAAAA_BBBB_CCCC_DDDDu64.to_ne_bytes();
let val = [
0x11u8, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE,
0xFF, 0x00,
];
let (buf, page_offset, map, offsets) = setup_htab_direct(8, 16, &[(&key, &val)], 2);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let entries = iter_htab_entries(&lookup_ctx(&mem, 0, page_offset, &offsets, false), &map);
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].0, key);
assert_eq!(entries[0].1, val);
}
#[test]
fn iter_htab_entries_multi_bucket() {
let htab = test_htab_offsets();
let offsets = test_htab_map_offsets();
let page_offset: u64 = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let pa_to_kva = |pa: u64| -> u64 { page_offset.wrapping_add(pa) };
let key_size: u32 = 4;
let value_size: u32 = 8;
let htab_pa: u64 = 0x0000;
let buckets_pa: u64 = 0x1000;
let elem_pa: u64 = 0x2000;
let n_buckets: u32 = 4;
let buf_size = 0x3000;
let mut buf = vec![0u8; buf_size];
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_u32(
&mut buf,
htab_pa + offsets.map_type as u64,
BPF_MAP_TYPE_HASH,
);
write_u32(&mut buf, htab_pa + offsets.key_size as u64, key_size);
write_u32(&mut buf, htab_pa + offsets.value_size as u64, value_size);
write_u64(
&mut buf,
htab_pa + htab.htab_buckets as u64,
pa_to_kva(buckets_pa),
);
write_u32(&mut buf, htab_pa + htab.htab_n_buckets as u64, n_buckets);
for i in 0..n_buckets {
let bp = buckets_pa + (i as u64) * (htab.bucket_size as u64);
write_u64(&mut buf, bp, (i as u64) << 1 | 1);
}
let bucket2_pa = buckets_pa + 2 * (htab.bucket_size as u64);
let elem_kva = pa_to_kva(elem_pa);
write_u64(&mut buf, bucket2_pa, elem_kva);
write_u64(&mut buf, elem_pa + htab.hlist_nulls_node_next as u64, 1);
let key_bytes = 99u32.to_ne_bytes();
let key_off = elem_pa + htab.htab_elem_size_base as u64;
buf[key_off as usize..key_off as usize + 4].copy_from_slice(&key_bytes);
let val_bytes = 0xBEEF_CAFEu64.to_ne_bytes();
let val_off = elem_pa + htab.htab_elem_size_base as u64 + ((key_size as u64 + 7) & !7);
buf[val_off as usize..val_off as usize + 8].copy_from_slice(&val_bytes);
let map = BpfMapInfo {
map_pa: htab_pa,
map_kva: pa_to_kva(htab_pa),
name: "multi_bucket".into(),
map_type: BPF_MAP_TYPE_HASH,
map_flags: 0,
key_size,
value_size,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
};
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let entries = iter_htab_entries(&lookup_ctx(&mem, 0, page_offset, &offsets, false), &map);
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].0, key_bytes);
assert_eq!(entries[0].1, val_bytes);
}
#[cfg(target_arch = "x86_64")]
fn setup_percpu_array(
num_cpus: u32,
max_entries: u32,
value_size: u32,
) -> (Vec<u8>, u64, u64, BpfMapInfo, BpfMapOffsets, Vec<u64>) {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let array_pa: u64 = 0x14000;
let map_kva: u64 = 0xFFFF_C900_0000_0000;
let pgd_idx = (map_kva >> 39) & 0x1FF;
let pud_idx = (map_kva >> 30) & 0x1FF;
let pmd_idx = (map_kva >> 21) & 0x1FF;
let pte_idx = (map_kva >> 12) & 0x1FF;
let percpu_base_pa: u64 = 0x20000;
let percpu_stride: u64 = 0x1000;
let elem_size = ((value_size as u64 + 7) & !7) * max_entries as u64;
let total_size = (percpu_base_pa + percpu_stride * num_cpus as u64 + elem_size) as usize;
let mut buf = vec![0u8; total_size.max(0x30000)];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
if off + 8 <= buf.len() {
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
}
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte_idx * 8, (array_pa + PTE_BASE) | 0x63);
let percpu_base_kva = percpu_base_pa + page_offset;
let per_cpu_offsets: Vec<u64> = (0..num_cpus)
.map(|cpu| cpu as u64 * percpu_stride)
.collect();
let pptrs_pa = array_pa + offsets.array_value as u64;
for entry in 0..max_entries {
let pptr_value = percpu_base_kva + entry as u64 * ((value_size as u64 + 7) & !7);
write_u64(&mut buf, pptrs_pa + entry as u64 * 8, pptr_value);
}
let info = BpfMapInfo {
map_pa: array_pa,
map_kva,
name: "test_percpu".into(),
map_type: BPF_MAP_TYPE_PERCPU_ARRAY,
map_flags: 0,
key_size: 4,
value_size,
max_entries,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
};
(buf, pgd_pa, page_offset, info, offsets, per_cpu_offsets)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_percpu_array_basic() {
let num_cpus = 4u32;
let value_size = 8u32;
let (mut buf, cr3_pa, page_offset, info, offsets, per_cpu_offsets) =
setup_percpu_array(num_cpus, 1, value_size);
let percpu_base_pa: u64 = 0x20000;
let stride: u64 = 0x1000;
for cpu in 0..num_cpus {
let pa = percpu_base_pa + cpu as u64 * stride;
buf[pa as usize..pa as usize + 8]
.copy_from_slice(&((cpu as u64 + 1) * 0x1111).to_ne_bytes());
}
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = read_percpu_array_value(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
&info,
0,
&per_cpu_offsets,
);
assert_eq!(result.len(), num_cpus as usize);
for (cpu, entry) in result.iter().enumerate() {
let bytes = entry.as_ref().expect("CPU value should be Some");
let val = u64::from_ne_bytes(bytes[..8].try_into().unwrap());
assert_eq!(val, (cpu as u64 + 1) * 0x1111);
}
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_percpu_array_key_out_of_bounds() {
let (buf, cr3_pa, page_offset, info, offsets, per_cpu_offsets) =
setup_percpu_array(2, 1, 8);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = read_percpu_array_value(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
&info,
1,
&per_cpu_offsets,
);
assert!(result.is_empty());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_percpu_array_wrong_map_type() {
let (buf, cr3_pa, page_offset, mut info, offsets, per_cpu_offsets) =
setup_percpu_array(2, 1, 8);
info.map_type = BPF_MAP_TYPE_ARRAY;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = read_percpu_array_value(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
&info,
0,
&per_cpu_offsets,
);
assert!(result.is_empty());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_percpu_array_zero_pptr() {
let (mut buf, cr3_pa, page_offset, info, offsets, per_cpu_offsets) =
setup_percpu_array(2, 1, 8);
let pptrs_pa = (0x14000 + offsets.array_value as u64) as usize;
buf[pptrs_pa..pptrs_pa + 8].copy_from_slice(&0u64.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = read_percpu_array_value(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
&info,
0,
&per_cpu_offsets,
);
assert!(result.is_empty());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_percpu_array_multiple_entries() {
let num_cpus = 2u32;
let value_size = 4u32;
let max_entries = 3u32;
let (mut buf, cr3_pa, page_offset, info, offsets, per_cpu_offsets) =
setup_percpu_array(num_cpus, max_entries, value_size);
let percpu_base_pa: u64 = 0x20000;
let stride: u64 = 0x1000;
let elem_size = 8u64; for key in 0..max_entries {
for cpu in 0..num_cpus {
let pa = percpu_base_pa + cpu as u64 * stride + key as u64 * elem_size;
let val: u32 = key * 100 + cpu;
buf[pa as usize..pa as usize + 4].copy_from_slice(&val.to_ne_bytes());
}
}
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
for key in 0..max_entries {
let result = read_percpu_array_value(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
&info,
key,
&per_cpu_offsets,
);
assert_eq!(result.len(), num_cpus as usize);
for (cpu, entry) in result.iter().enumerate() {
let bytes = entry.as_ref().expect("CPU value should be Some");
let val = u32::from_ne_bytes(bytes[..4].try_into().unwrap());
assert_eq!(val, key * 100 + cpu as u32);
}
}
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_percpu_array_cpu_out_of_guest_memory() {
let (buf, cr3_pa, page_offset, info, offsets, _) = setup_percpu_array(2, 1, 8);
let bad_offset = buf.len() as u64 + 0x10000;
let per_cpu_offsets = vec![0u64, bad_offset];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = read_percpu_array_value(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
&info,
0,
&per_cpu_offsets,
);
assert_eq!(result.len(), 2);
assert!(result[0].is_some(), "CPU 0 should be readable");
assert!(
result[1].is_none(),
"CPU 1 should be None (out of guest memory)"
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_percpu_array_zero_cpus() {
let (buf, cr3_pa, page_offset, info, offsets, per_cpu_offsets) =
setup_percpu_array(0, 1, 8);
assert!(per_cpu_offsets.is_empty());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = read_percpu_array_value(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
&info,
0,
&per_cpu_offsets,
);
assert!(result.is_empty(), "zero CPUs should produce empty result");
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_percpu_array_mixed_translatable() {
let num_cpus = 4u32;
let value_size = 8u32;
let (mut buf, cr3_pa, page_offset, info, offsets, _) =
setup_percpu_array(num_cpus, 1, value_size);
let percpu_base_pa: u64 = 0x20000;
let stride: u64 = 0x1000;
buf[percpu_base_pa as usize..percpu_base_pa as usize + 8]
.copy_from_slice(&0xAAAAu64.to_ne_bytes());
let cpu2_pa = percpu_base_pa + 2 * stride;
buf[cpu2_pa as usize..cpu2_pa as usize + 8].copy_from_slice(&0xCCCCu64.to_ne_bytes());
let bad = buf.len() as u64 + 0x10000;
let per_cpu_offsets = vec![0, bad, 2 * stride, bad + stride];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = read_percpu_array_value(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
&info,
0,
&per_cpu_offsets,
);
assert_eq!(result.len(), 4);
let v0 = result[0].as_ref().expect("CPU 0 should be Some");
assert_eq!(u64::from_ne_bytes(v0[..8].try_into().unwrap()), 0xAAAA);
assert!(result[1].is_none(), "CPU 1 should be None");
let v2 = result[2].as_ref().expect("CPU 2 should be Some");
assert_eq!(u64::from_ne_bytes(v2[..8].try_into().unwrap()), 0xCCCC);
assert!(result[3].is_none(), "CPU 3 should be None");
}
#[test]
fn read_percpu_array_unmapped_bpf_array() {
let buf = vec![0u8; 0x20000];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0xFFFF_C900_DEAD_0000,
name: "test_percpu".into(),
map_type: BPF_MAP_TYPE_PERCPU_ARRAY,
map_flags: 0,
key_size: 4,
value_size: 8,
max_entries: 1,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
};
let per_cpu_offsets = vec![0u64, 0x1000];
let result = read_percpu_array_value(
&lookup_ctx(&mem, 0, 0xFFFF_8880_0000_0000, &offsets, false),
&info,
0,
&per_cpu_offsets,
);
assert!(
result.is_empty(),
"unmapped bpf_array should return empty vec"
);
}
}