use super::*;
use crate::monitor::idr::{XA_CHUNK_SIZE, xa_node_shift};
use crate::monitor::symbols::START_KERNEL_MAP;
pub(super) use crate::monitor::test_util::name_from_str;
#[cfg(target_arch = "x86_64")]
fn value_ctx<'a>(mem: &'a GuestMem, cr3_pa: u64, l5: bool) -> AccessorCtx<'a> {
AccessorCtx {
mem,
cr3_pa: Cr3Pa(cr3_pa),
page_offset: PageOffset(0),
offsets: &BpfMapOffsets::EMPTY,
l5,
tcr_el1: 0,
start_kernel_map: START_KERNEL_MAP,
phys_base: 0,
}
}
pub(super) fn lookup_ctx<'a>(
mem: &'a GuestMem,
cr3_pa: u64,
page_offset: u64,
offsets: &'a BpfMapOffsets,
l5: bool,
) -> AccessorCtx<'a> {
AccessorCtx {
mem,
cr3_pa: Cr3Pa(cr3_pa),
page_offset: PageOffset(page_offset),
offsets,
l5,
tcr_el1: 0,
start_kernel_map: START_KERNEL_MAP,
phys_base: 0,
}
}
#[cfg(target_arch = "x86_64")]
pub(super) const PTE_BASE: u64 = 0;
#[cfg(target_arch = "aarch64")]
pub(super) const PTE_BASE: u64 = crate::vmm::kvm::DRAM_START;
#[cfg(target_arch = "x86_64")]
const BLOCK_FLAGS: u64 = 0xE3;
#[cfg(target_arch = "aarch64")]
#[allow(dead_code)] const BLOCK_FLAGS: u64 = 0x01;
#[cfg(target_arch = "x86_64")]
fn setup_page_table() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000; let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let data_pa: u64 = pte_pa + 0x1000;
let size = (data_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte_idx, (data_pa + PTE_BASE) | 0x63);
buf[data_pa as usize..data_pa as usize + 8]
.copy_from_slice(&0xDEAD_BEEF_CAFE_1234u64.to_ne_bytes());
(buf, pgd_pa, kva, data_pa)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_basic() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), false, 0);
assert_eq!(pa, Some(data_pa));
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0xDEAD_BEEF_CAFE_1234);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_with_offset() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva + 0x100), false, 0);
assert_eq!(pa, Some(data_pa + 0x100));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_unmapped() {
let (buf, cr3_pa, _, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(0xFFFF_FFFF_8000_0000), false, 0);
assert_eq!(pa, None);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_unmapped_pte() {
let (buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let unmapped_kva = kva + 0x1000;
let pa = mem.translate_kva(cr3_pa, Kva(unmapped_kva), false, 0);
assert_eq!(pa, None);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_2mb_huge_page() {
let kva: u64 = 0xFFFF_8880_0020_0000; let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let huge_page_pa: u64 = 0x20_0000;
let size = (huge_page_pa + 0x20_0000) as usize; let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(
&mut buf,
pmd_pa,
pmd_idx,
(huge_page_pa + PTE_BASE) | BLOCK_FLAGS,
);
buf[huge_page_pa as usize..huge_page_pa as usize + 8]
.copy_from_slice(&0xCAFE_BABE_1234_5678u64.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(pgd_pa, Kva(kva), false, 0);
assert_eq!(pa, Some(huge_page_pa));
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0xCAFE_BABE_1234_5678);
let pa_off = mem.translate_kva(pgd_pa, Kva(kva + 0x1000), false, 0);
assert_eq!(pa_off, Some(huge_page_pa + 0x1000));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_1gb_huge_page() {
let kva: u64 = 0xFFFF_8880_4000_0000; let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let huge_page_pa: u64 = 0x4000_0000;
let size = (pud_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(
&mut buf,
pud_pa,
pud_idx,
(huge_page_pa + PTE_BASE) | BLOCK_FLAGS,
);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(pgd_pa, Kva(kva), false, 0);
assert_eq!(pa, Some(huge_page_pa));
let pa_off = mem.translate_kva(pgd_pa, Kva(kva + 0x1234_5678), false, 0);
assert_eq!(pa_off, Some(huge_page_pa + 0x1234_5678));
}
#[test]
fn translate_kva_pgd_not_present() {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let size = (pgd_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let off = (pgd_pa + pgd_idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&0x2000u64.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(mem.translate_kva(pgd_pa, Kva(kva), false, 0), None);
}
#[test]
fn translate_kva_pud_not_present() {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let size = (pud_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, 0x3000);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(mem.translate_kva(pgd_pa, Kva(kva), false, 0), None);
}
#[test]
fn translate_kva_pmd_not_present() {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let size = (pmd_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, 0x4000);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(mem.translate_kva(pgd_pa, Kva(kva), false, 0), None);
}
#[test]
fn translate_kva_pte_not_present() {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let size = (pte_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte_idx, 0x5000);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(mem.translate_kva(pgd_pa, Kva(kva), false, 0), None);
}
#[cfg(target_arch = "x86_64")]
fn setup_two_page_tables() -> (Vec<u8>, u64, u64, u64, u64, u64) {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pgd_a: u64 = 0x10000;
let pud_a: u64 = pgd_a + 0x1000;
let pmd_a: u64 = pud_a + 0x1000;
let pte_a: u64 = pmd_a + 0x1000;
let data_a: u64 = pte_a + 0x1000;
let pgd_b: u64 = data_a + 0x1000;
let pud_b: u64 = pgd_b + 0x1000;
let pmd_b: u64 = pud_b + 0x1000;
let pte_b: u64 = pmd_b + 0x1000;
let data_b: u64 = pte_b + 0x1000;
let size = (data_b + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_a, pgd_idx, (pud_a + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_a, pud_idx, (pmd_a + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_a, pmd_idx, (pte_a + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_a, pte_idx, (data_a + PTE_BASE) | 0x63);
write_entry(&mut buf, pgd_b, pgd_idx, (pud_b + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_b, pud_idx, (pmd_b + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_b, pmd_idx, (pte_b + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_b, pte_idx, (data_b + PTE_BASE) | 0x63);
buf[data_a as usize..data_a as usize + 8]
.copy_from_slice(&0xAAAA_AAAA_AAAA_AAAAu64.to_ne_bytes());
buf[data_b as usize..data_b as usize + 8]
.copy_from_slice(&0xBBBB_BBBB_BBBB_BBBBu64.to_ne_bytes());
(buf, pgd_a, pgd_b, kva, data_a, data_b)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_tlb_hit_same_page_returns_consistent_pa() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa1 = mem.translate_kva(cr3_pa, Kva(kva), false, 0);
assert_eq!(pa1, Some(data_pa));
let pa2 = mem.translate_kva(cr3_pa, Kva(kva), false, 0);
assert_eq!(pa2, Some(data_pa));
let pa3 = mem.translate_kva(cr3_pa, Kva(kva + 0x100), false, 0);
assert_eq!(pa3, Some(data_pa + 0x100));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_tlb_miss_different_page_does_not_alias() {
let kva_a: u64 = 0xFFFF_8880_0000_5000;
let kva_b: u64 = 0xFFFF_8880_0000_6000; let pgd_idx = (kva_a >> 39) & 0x1FF;
let pud_idx = (kva_a >> 30) & 0x1FF;
let pmd_idx = (kva_a >> 21) & 0x1FF;
let pte_idx_a = (kva_a >> 12) & 0x1FF;
let pte_idx_b = (kva_b >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let data_a: u64 = pte_pa + 0x1000;
let data_b: u64 = data_a + 0x1000;
let size = (data_b + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte_idx_a, (data_a + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte_idx_b, (data_b + PTE_BASE) | 0x63);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa_a1 = mem.translate_kva(pgd_pa, Kva(kva_a), false, 0);
assert_eq!(pa_a1, Some(data_a));
let pa_b = mem.translate_kva(pgd_pa, Kva(kva_b), false, 0);
assert_eq!(pa_b, Some(data_b));
let pa_a2 = mem.translate_kva(pgd_pa, Kva(kva_a), false, 0);
assert_eq!(pa_a2, Some(data_a));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_tlb_cr3_mismatch_invalidates() {
let (buf, cr3_a, cr3_b, kva, data_a, data_b) = setup_two_page_tables();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa_a1 = mem.translate_kva(cr3_a, Kva(kva), false, 0);
assert_eq!(pa_a1, Some(data_a));
assert_eq!(mem.read_u64(pa_a1.unwrap(), 0), 0xAAAA_AAAA_AAAA_AAAA);
let pa_b = mem.translate_kva(cr3_b, Kva(kva), false, 0);
assert_eq!(pa_b, Some(data_b));
assert_eq!(mem.read_u64(pa_b.unwrap(), 0), 0xBBBB_BBBB_BBBB_BBBB);
let pa_a2 = mem.translate_kva(cr3_a, Kva(kva), false, 0);
assert_eq!(pa_a2, Some(data_a));
assert_eq!(mem.read_u64(pa_a2.unwrap(), 0), 0xAAAA_AAAA_AAAA_AAAA);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_tlb_l5_mismatch_invalidates() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa1 = mem.translate_kva(cr3_pa, Kva(kva), false, 0);
assert_eq!(pa1, Some(data_pa));
let _ = mem.translate_kva(cr3_pa, Kva(kva), true, 0);
let pa3 = mem.translate_kva(cr3_pa, Kva(kva), false, 0);
assert_eq!(pa3, Some(data_pa));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_u32_roundtrip() {
let (mut buf, cr3_pa, kva, data_pa) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert!(write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
4,
0xABCD_1234,
));
assert_eq!(mem.read_u32(data_pa, 4), 0xABCD_1234);
}
#[test]
fn read_bytes_basic() {
let buf = [1u8, 2, 3, 4, 5, 6, 7, 8];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let mut out = [0u8; 4];
let n = mem.read_bytes(2, &mut out);
assert_eq!(n, 4);
assert_eq!(out, [3, 4, 5, 6]);
}
#[test]
fn read_bytes_past_end() {
let buf = [1u8, 2, 3, 4];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let mut out = [0u8; 8];
let n = mem.read_bytes(2, &mut out);
assert_eq!(n, 2); assert_eq!(out[..2], [3, 4]);
}
#[test]
fn read_bytes_at_boundary() {
let buf = [0xFFu8; 8];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let mut out = [0u8; 8];
let n = mem.read_bytes(8, &mut out);
assert_eq!(n, 0); }
#[test]
fn write_u32_roundtrip() {
let mut buf = [0u8; 16];
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
mem.write_u32(4, 0, 0xDEAD_BEEF);
assert_eq!(mem.read_u32(4, 0), 0xDEAD_BEEF);
assert_eq!(
u32::from_ne_bytes(buf[4..8].try_into().unwrap()),
0xDEAD_BEEF
);
}
#[test]
fn xa_load_zero_head() {
let buf = [0u8; 64];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(xa_load(&mem, 0, 0, 0, 0, 0), Some(0));
assert_eq!(xa_load(&mem, 0, 0, 5, 0, 0), Some(0));
}
#[test]
fn xa_load_single_entry_index_zero() {
let xa_head: u64 = 0xFFFF_8880_0001_0000; assert_eq!(xa_head & 2, 0);
let buf = [0u8; 8];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(xa_load(&mem, 0, xa_head, 0, 0, 0), Some(xa_head));
}
#[test]
fn xa_load_single_entry_index_nonzero() {
let xa_head: u64 = 0xFFFF_8880_0001_0000;
let buf = [0u8; 8];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(xa_load(&mem, 0, xa_head, 1, 0, 0), Some(0));
assert_eq!(xa_load(&mem, 0, xa_head, 63, 0, 0), Some(0));
}
fn setup_xa_node(slots: &[(u64, u64)], slots_off: usize) -> (Vec<u8>, u64, u64) {
let node_pa: u64 = 0x1000;
let page_offset: u64 = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let node_kva = page_offset.wrapping_add(node_pa);
let size = (node_pa as usize) + slots_off + XA_CHUNK_SIZE as usize * 8 + 8;
let mut buf = vec![0u8; size];
buf[node_pa as usize] = 0;
for &(idx, entry) in slots {
let slot_pa = node_pa + slots_off as u64 + idx * 8;
buf[slot_pa as usize..slot_pa as usize + 8].copy_from_slice(&entry.to_ne_bytes());
}
let xa_head = node_kva | 2;
(buf, xa_head, page_offset)
}
#[test]
fn xa_load_multi_entry_populated_slot() {
let slots_off = 16; let entry_ptr: u64 = 0xDEAD_0000; let (buf, xa_head, page_offset) = setup_xa_node(&[(3, entry_ptr)], slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(
xa_load(&mem, page_offset, xa_head, 3, slots_off, 0),
Some(entry_ptr)
);
}
#[test]
fn xa_load_multi_entry_empty_slot() {
let slots_off = 16;
let (buf, xa_head, page_offset) = setup_xa_node(&[], slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(
xa_load(&mem, page_offset, xa_head, 0, slots_off, 0),
Some(0)
);
assert_eq!(
xa_load(&mem, page_offset, xa_head, 5, slots_off, 0),
Some(0)
);
}
#[test]
fn xa_load_multi_entry_multiple_slots() {
let slots_off = 16;
let entries = [
(0, 0xAAAA_0000u64),
(7, 0xBBBB_0000u64),
(63, 0xCCCC_0000u64),
];
let (buf, xa_head, page_offset) = setup_xa_node(&entries, slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(
xa_load(&mem, page_offset, xa_head, 0, slots_off, 0),
Some(0xAAAA_0000)
);
assert_eq!(
xa_load(&mem, page_offset, xa_head, 7, slots_off, 0),
Some(0xBBBB_0000)
);
assert_eq!(
xa_load(&mem, page_offset, xa_head, 63, slots_off, 0),
Some(0xCCCC_0000)
);
assert_eq!(
xa_load(&mem, page_offset, xa_head, 1, slots_off, 0),
Some(0)
);
}
#[cfg(target_arch = "x86_64")]
fn setup_find_bpf_map(
map_name: &str,
map_type: u32,
value_size: u32,
) -> (Vec<u8>, u64, u64, BpfMapOffsets) {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
map_btf_vmlinux_value_type_id: 0,
map_btf_key_type_id: 0,
btf_data: 0,
btf_data_size: 0,
btf_base_btf: 0,
htab_offsets: None,
task_storage_offsets: None,
struct_ops_offsets: None,
ringbuf_offsets: None,
stackmap_offsets: None,
};
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let map_pa: u64 = 0x14000;
let idr_pa: u64 = 0x15000;
let map_kva: u64 = 0xFFFF_C900_0000_0000;
let pgd_idx = (map_kva >> 39) & 0x1FF;
let pud_idx = (map_kva >> 30) & 0x1FF;
let pmd_idx = (map_kva >> 21) & 0x1FF;
let pte_idx = (map_kva >> 12) & 0x1FF;
let size = 0x16000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte_idx * 8, (map_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, idr_pa + offsets.idr_xa_head as u64, map_kva);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 1);
write_u32(&mut buf, map_pa + offsets.map_type as u64, map_type);
write_u32(&mut buf, map_pa + offsets.value_size as u64, value_size);
let name_bytes = map_name.as_bytes();
let name_pa = map_pa + offsets.map_name as u64;
buf[name_pa as usize..name_pa as usize + name_bytes.len()].copy_from_slice(name_bytes);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
(buf, pgd_pa, idr_kva, offsets)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_discovers_matching_map() {
let (buf, cr3_pa, idr_kva, offsets) = setup_find_bpf_map("mitosis.bss", BPF_MAP_TYPE_ARRAY, 64);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
let info = result.expect("should find the map");
assert_eq!(info.name(), "mitosis.bss");
assert_eq!(info.map_type, BPF_MAP_TYPE_ARRAY);
assert_eq!(info.value_size, 64);
assert_eq!(info.map_pa, 0x14000);
let map_kva: u64 = 0xFFFF_C900_0000_0000;
assert_eq!(info.value_kva, Some(map_kva + offsets.array_value as u64));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_no_match_wrong_suffix() {
let (buf, cr3_pa, idr_kva, offsets) = setup_find_bpf_map("mitosis.bss", BPF_MAP_TYPE_ARRAY, 64);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".data",
);
assert!(result.is_none());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_skips_non_array_type() {
let (buf, cr3_pa, idr_kva, offsets) = setup_find_bpf_map("test.bss", 1, 64);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
assert!(result.is_none());
}
#[test]
fn find_bpf_map_empty_idr() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
map_btf_vmlinux_value_type_id: 0,
map_btf_key_type_id: 0,
btf_data: 0,
btf_data_size: 0,
btf_base_btf: 0,
htab_offsets: None,
task_storage_offsets: None,
struct_ops_offsets: None,
ringbuf_offsets: None,
stackmap_offsets: None,
};
let idr_pa: u64 = 0x1000;
let size = 0x2000;
let buf = vec![0u8; size];
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, 0x10000, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
assert!(result.is_none());
}
#[cfg(target_arch = "x86_64")]
fn setup_5level_page_table() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFF11_8880_0000_5000;
let pml5_idx = (kva >> 48) & 0x1FF;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pml5_pa: u64 = 0x10000;
let p4d_pa: u64 = pml5_pa + 0x1000;
let pud_pa: u64 = p4d_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let data_pa: u64 = pte_pa + 0x1000;
let size = (data_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pml5_pa, pml5_idx, (p4d_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, p4d_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte_idx, (data_pa + PTE_BASE) | 0x63);
buf[data_pa as usize..data_pa as usize + 8]
.copy_from_slice(&0x5555_AAAA_1234_5678u64.to_ne_bytes());
(buf, pml5_pa, kva, data_pa)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_basic() {
let (buf, cr3_pa, kva, data_pa) = setup_5level_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), true, 0);
assert_eq!(pa, Some(data_pa));
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0x5555_AAAA_1234_5678);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_with_offset() {
let (buf, cr3_pa, kva, data_pa) = setup_5level_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva + 0x100), true, 0);
assert_eq!(pa, Some(data_pa + 0x100));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_unmapped_pml5() {
let (buf, cr3_pa, _, _) = setup_5level_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let unmapped_kva: u64 = 0xFF22_8880_0000_5000;
assert_eq!(mem.translate_kva(cr3_pa, Kva(unmapped_kva), true, 0), None);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_vs_4level_same_buffer() {
let (buf, cr3_pa, kva, _) = setup_5level_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa_4level = mem.translate_kva(cr3_pa, Kva(kva), false, 0);
let pa_5level = mem.translate_kva(cr3_pa, Kva(kva), true, 0);
assert_ne!(pa_4level, pa_5level);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_bytes_roundtrip() {
let (mut buf, cr3_pa, kva, data_pa) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 16,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
let payload = [0xDE, 0xAD, 0xBE, 0xEF];
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&payload
));
for (i, &expected) in payload.iter().enumerate() {
assert_eq!(buf[data_pa as usize + i], expected);
}
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_fails_on_unmapped_kva() {
let (mut buf, cr3_pa, _, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 16,
max_entries: 0,
value_kva: Some(0xFFFF_FFFF_8000_0000), btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert!(!write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&[0xFF]
));
}
fn setup_two_level_xarray(
child_slot: u64,
leaf_slot: u64,
leaf_entry: u64,
slots_off: usize,
) -> (Vec<u8>, u64, u64) {
let root_pa: u64 = 0x1000;
let child_pa: u64 = 0x2000;
let page_offset: u64 = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let root_kva = page_offset.wrapping_add(root_pa);
let child_kva = page_offset.wrapping_add(child_pa);
let size = (child_pa as usize) + slots_off + XA_CHUNK_SIZE as usize * 8 + 8;
let mut buf = vec![0u8; size];
buf[root_pa as usize] = 6;
let root_slot_pa = root_pa + slots_off as u64 + child_slot * 8;
buf[root_slot_pa as usize..root_slot_pa as usize + 8]
.copy_from_slice(&(child_kva | 2).to_ne_bytes());
buf[child_pa as usize] = 0;
let child_slot_pa = child_pa + slots_off as u64 + leaf_slot * 8;
buf[child_slot_pa as usize..child_slot_pa as usize + 8]
.copy_from_slice(&leaf_entry.to_ne_bytes());
let xa_head = root_kva | 2;
(buf, xa_head, page_offset)
}
#[test]
fn xa_load_two_level_finds_leaf() {
let slots_off = 16;
let child_slot = 1u64; let leaf_slot = 5u64; let leaf_entry: u64 = 0xBEEF_0000; let index = (child_slot << 6) | leaf_slot;
let (buf, xa_head, page_offset) =
setup_two_level_xarray(child_slot, leaf_slot, leaf_entry, slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(
xa_load(&mem, page_offset, xa_head, index, slots_off, 0),
Some(leaf_entry)
);
}
#[test]
fn xa_load_two_level_empty_child_slot() {
let slots_off = 16;
let child_slot = 2u64;
let leaf_slot = 10u64;
let leaf_entry: u64 = 0xAAAA_0000;
let (buf, xa_head, page_offset) =
setup_two_level_xarray(child_slot, leaf_slot, leaf_entry, slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let populated_idx = (child_slot << 6) | leaf_slot;
assert_eq!(
xa_load(&mem, page_offset, xa_head, populated_idx, slots_off, 0),
Some(leaf_entry)
);
let empty_child_idx = child_slot << 6;
assert_eq!(
xa_load(&mem, page_offset, xa_head, empty_child_idx, slots_off, 0),
Some(0)
);
}
#[test]
fn xa_load_two_level_empty_root_slot() {
let slots_off = 16;
let (buf, xa_head, page_offset) = setup_two_level_xarray(3, 0, 0xDEAD_0000, slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let empty_root_idx = 5u64; assert_eq!(
xa_load(&mem, page_offset, xa_head, empty_root_idx, slots_off, 0),
Some(0)
);
}
#[test]
fn xa_load_two_level_high_index() {
let slots_off = 16;
let (buf, xa_head, page_offset) = setup_two_level_xarray(63, 63, 0xFFFF_0000, slots_off);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let max_index = (63 << 6) | 63; assert_eq!(
xa_load(&mem, page_offset, xa_head, max_index, slots_off, 0),
Some(0xFFFF_0000)
);
}
#[cfg(target_arch = "x86_64")]
fn setup_find_bpf_map_multi() -> (Vec<u8>, u64, u64, BpfMapOffsets) {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
map_btf_vmlinux_value_type_id: 0,
map_btf_key_type_id: 0,
btf_data: 0,
btf_data_size: 0,
btf_base_btf: 0,
htab_offsets: None,
task_storage_offsets: None,
struct_ops_offsets: None,
ringbuf_offsets: None,
stackmap_offsets: None,
};
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let map1_pa: u64 = 0x14000;
let map2_pa: u64 = 0x15000;
let idr_pa: u64 = 0x16000;
let xa_node_pa: u64 = 0x17000;
let map1_kva: u64 = 0xFFFF_C900_0000_0000;
let map2_kva: u64 = 0xFFFF_C900_0000_1000;
let pgd_idx = (map1_kva >> 39) & 0x1FF;
let pud_idx = (map1_kva >> 30) & 0x1FF;
let pmd_idx = (map1_kva >> 21) & 0x1FF;
let pte1_idx = (map1_kva >> 12) & 0x1FF;
let pte2_idx = (map2_kva >> 12) & 0x1FF;
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let xa_node_kva = xa_node_pa + page_offset;
let size = 0x18000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte1_idx * 8, (map1_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte2_idx * 8, (map2_pa + PTE_BASE) | 0x63);
buf[xa_node_pa as usize] = 0; write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64,
map1_kva,
);
write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64 + 8,
map2_kva,
);
write_u64(
&mut buf,
idr_pa + offsets.idr_xa_head as u64,
xa_node_kva | 2,
);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 2);
write_u32(
&mut buf,
map1_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map1_pa + offsets.value_size as u64, 32);
let name1 = b"other.data";
let name1_pa = map1_pa + offsets.map_name as u64;
buf[name1_pa as usize..name1_pa as usize + name1.len()].copy_from_slice(name1);
write_u32(
&mut buf,
map2_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map2_pa + offsets.value_size as u64, 128);
let name2 = b"mitosis.bss";
let name2_pa = map2_pa + offsets.map_name as u64;
buf[name2_pa as usize..name2_pa as usize + name2.len()].copy_from_slice(name2);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
(buf, pgd_pa, idr_kva, offsets)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_skips_wrong_name_finds_second() {
let (buf, cr3_pa, idr_kva, offsets) = setup_find_bpf_map_multi();
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
idr_kva,
".bss",
);
let info = result.expect("should find second map");
assert_eq!(info.name(), "mitosis.bss");
assert_eq!(info.map_pa, 0x15000);
assert_eq!(info.value_size, 128);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_full_length_name() {
let full_name = "0123456789a.bss"; let (buf, cr3_pa, idr_kva, offsets) = setup_find_bpf_map(full_name, BPF_MAP_TYPE_ARRAY, 64);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
let info = result.expect("should find map with 15-char name");
assert_eq!(info.name(), full_name);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_max_length_name_no_null() {
let max_name = "0123456789a.bss!"; assert_eq!(max_name.len(), BPF_OBJ_NAME_LEN);
let (mut buf, cr3_pa, idr_kva, offsets) =
setup_find_bpf_map("placeholder.bss", BPF_MAP_TYPE_ARRAY, 64);
let map_pa: u64 = 0x14000;
let name_pa = (map_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + 16].copy_from_slice(max_name.as_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
assert!(
result.is_none(),
"16-byte name ending with '!' should not match .bss suffix"
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_nonzero_offset() {
let (mut buf, cr3_pa, kva, data_pa) = setup_page_table();
let original_first_byte = buf[data_pa as usize];
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
let payload = [0x11, 0x22, 0x33, 0x44];
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
8,
&payload
));
for (i, &expected) in payload.iter().enumerate() {
assert_eq!(buf[data_pa as usize + 8 + i], expected);
}
assert_eq!(buf[data_pa as usize], original_first_byte);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_empty_data() {
let (mut buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&[]
));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_u32_5level() {
let (mut buf, cr3_pa, kva, data_pa) = setup_5level_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert!(write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, true),
&info,
0,
0xCAFE_BABE,
));
assert_eq!(mem.read_u32(data_pa, 0), 0xCAFE_BABE);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_p4d_not_present() {
let kva: u64 = 0xFF11_8880_0000_5000;
let pml5_idx = (kva >> 48) & 0x1FF;
let pml5_pa: u64 = 0x10000;
let p4d_pa: u64 = pml5_pa + 0x1000;
let size = (p4d_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let off = (pml5_pa + pml5_idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&((p4d_pa + PTE_BASE) | 0x63).to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(mem.translate_kva(pml5_pa, Kva(kva), true, 0), None);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_2mb_huge_page() {
let kva: u64 = 0xFF11_8880_0020_0000; let pml5_idx = (kva >> 48) & 0x1FF;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pml5_pa: u64 = 0x10000;
let p4d_pa: u64 = pml5_pa + 0x1000;
let pud_pa: u64 = p4d_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let huge_page_pa: u64 = 0x20_0000;
let size = (huge_page_pa + 0x20_0000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pml5_pa, pml5_idx, (p4d_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, p4d_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(
&mut buf,
pmd_pa,
pmd_idx,
(huge_page_pa + PTE_BASE) | BLOCK_FLAGS,
);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(pml5_pa, Kva(kva), true, 0);
assert_eq!(pa, Some(huge_page_pa));
let pa_off = mem.translate_kva(pml5_pa, Kva(kva + 0x1234), true, 0);
assert_eq!(pa_off, Some(huge_page_pa + 0x1234));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_5level_1gb_huge_page() {
let kva: u64 = 0xFF11_8880_4000_0000; let pml5_idx = (kva >> 48) & 0x1FF;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pml5_pa: u64 = 0x10000;
let p4d_pa: u64 = pml5_pa + 0x1000;
let pud_pa: u64 = p4d_pa + 0x1000;
let huge_page_pa: u64 = 0x4000_0000;
let size = (pud_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pml5_pa, pml5_idx, (p4d_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, p4d_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(
&mut buf,
pud_pa,
pud_idx,
(huge_page_pa + PTE_BASE) | BLOCK_FLAGS,
);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(pml5_pa, Kva(kva), true, 0);
assert_eq!(pa, Some(huge_page_pa));
let pa_off = mem.translate_kva(pml5_pa, Kva(kva + 0x1234_5678), true, 0);
assert_eq!(pa_off, Some(huge_page_pa + 0x1234_5678));
}
#[test]
fn find_bpf_map_skips_untranslatable_entry() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
map_btf_vmlinux_value_type_id: 0,
map_btf_key_type_id: 0,
btf_data: 0,
btf_data_size: 0,
btf_base_btf: 0,
htab_offsets: None,
task_storage_offsets: None,
struct_ops_offsets: None,
ringbuf_offsets: None,
stackmap_offsets: None,
};
let idr_pa: u64 = 0x1000;
let pgd_pa: u64 = 0x10000;
let size = 0x12000;
let mut buf = vec![0u8; size];
let unmappable_kva: u64 = 0xFFFF_C900_DEAD_0000;
assert_eq!(unmappable_kva & 2, 0);
let off = (idr_pa + offsets.idr_xa_head as u64) as usize;
buf[off..off + 8].copy_from_slice(&unmappable_kva.to_ne_bytes());
let off_next = (idr_pa + offsets.idr_next as u64) as usize;
buf[off_next..off_next + 4].copy_from_slice(&1u32.to_ne_bytes());
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, pgd_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
".bss",
);
assert!(result.is_none());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_5level() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
map_btf_vmlinux_value_type_id: 0,
map_btf_key_type_id: 0,
btf_data: 0,
btf_data_size: 0,
btf_base_btf: 0,
htab_offsets: None,
task_storage_offsets: None,
struct_ops_offsets: None,
ringbuf_offsets: None,
stackmap_offsets: None,
};
let map_kva: u64 = 0xFF11_C900_0000_0000;
let pml5_idx = (map_kva >> 48) & 0x1FF;
let pgd_idx = (map_kva >> 39) & 0x1FF;
let pud_idx = (map_kva >> 30) & 0x1FF;
let pmd_idx = (map_kva >> 21) & 0x1FF;
let pte_idx = (map_kva >> 12) & 0x1FF;
let pml5_pa: u64 = 0x10000;
let p4d_pa: u64 = 0x11000;
let pud_pa: u64 = 0x12000;
let pmd_pa: u64 = 0x13000;
let pte_pa: u64 = 0x14000;
let map_pa: u64 = 0x15000;
let idr_pa: u64 = 0x16000;
let size = 0x17000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pml5_pa + pml5_idx * 8, (p4d_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, p4d_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte_idx * 8, (map_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, idr_pa + offsets.idr_xa_head as u64, map_kva);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 1);
write_u32(
&mut buf,
map_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map_pa + offsets.value_size as u64, 96);
let name = b"test.bss";
let name_pa = (map_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, pml5_pa, 0xFFFF_8880_0000_0000, &offsets, true),
idr_kva,
".bss",
);
let info = result.expect("should find map via 5-level walk");
assert_eq!(info.name(), "test.bss");
assert_eq!(info.map_pa, map_pa);
assert_eq!(info.value_size, 96);
assert_eq!(info.value_kva, Some(map_kva + offsets.array_value as u64));
}
#[cfg(target_arch = "x86_64")]
fn setup_two_page_table() -> (Vec<u8>, u64, u64, u64, u64) {
let kva: u64 = 0xFFFF_8880_0000_5000;
let kva2: u64 = kva + 0x1000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte1_idx = (kva >> 12) & 0x1FF;
let pte2_idx = (kva2 >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let page1_pa: u64 = pte_pa + 0x1000;
let page2_pa: u64 = page1_pa + 0x1000;
let size = (page2_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte1_idx, (page1_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte2_idx, (page2_pa + PTE_BASE) | 0x63);
(buf, pgd_pa, kva, page1_pa, page2_pa)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_across_page_boundary() {
let (mut buf, cr3_pa, kva, page1_pa, page2_pa) = setup_two_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 0x2000,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
let val: u32 = 0xAABB_CCDD;
assert!(write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
0xFFE,
val,
));
let b = val.to_ne_bytes();
assert_eq!(buf[page1_pa as usize + 0xFFE], b[0]);
assert_eq!(buf[page1_pa as usize + 0xFFF], b[1]);
assert_eq!(buf[page2_pa as usize], b[2]);
assert_eq!(buf[page2_pa as usize + 1], b[3]);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_bpf_map_value_single_byte_on_second_page() {
let (mut buf, cr3_pa, kva, _, page2_pa) = setup_two_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 0x2000,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0x1000,
&[0x42],
));
assert_eq!(buf[page2_pa as usize], 0x42);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_bpf_map_skips_untranslatable_finds_translatable() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
map_btf_vmlinux_value_type_id: 0,
map_btf_key_type_id: 0,
btf_data: 0,
btf_data_size: 0,
btf_base_btf: 0,
htab_offsets: None,
task_storage_offsets: None,
struct_ops_offsets: None,
ringbuf_offsets: None,
stackmap_offsets: None,
};
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let map2_pa: u64 = 0x14000;
let idr_pa: u64 = 0x15000;
let xa_node_pa: u64 = 0x16000;
let map1_kva: u64 = 0xFFFF_C900_0000_0000;
let map2_kva: u64 = 0xFFFF_C900_0000_1000;
let pgd_idx = (map2_kva >> 39) & 0x1FF;
let pud_idx = (map2_kva >> 30) & 0x1FF;
let pmd_idx = (map2_kva >> 21) & 0x1FF;
let pte2_idx = (map2_kva >> 12) & 0x1FF;
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let xa_node_kva = xa_node_pa + page_offset;
let size = 0x17000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte2_idx * 8, (map2_pa + PTE_BASE) | 0x63);
buf[xa_node_pa as usize] = 0; write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64,
map1_kva,
);
write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64 + 8,
map2_kva,
);
write_u64(
&mut buf,
idr_pa + offsets.idr_xa_head as u64,
xa_node_kva | 2,
);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 2);
write_u32(
&mut buf,
map2_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map2_pa + offsets.value_size as u64, 200);
let name = b"target.bss";
let name_pa = (map2_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let result = find_bpf_map(
&lookup_ctx(&mem, pgd_pa, page_offset, &offsets, false),
idr_kva,
".bss",
);
let info = result.expect("should skip untranslatable entry and find the second");
assert_eq!(info.name(), "target.bss");
assert_eq!(info.map_pa, map2_pa);
assert_eq!(info.value_size, 200);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_u32_roundtrip() {
let (mut buf, cr3_pa, kva, data_pa) = setup_page_table();
buf[data_pa as usize + 4..data_pa as usize + 8].copy_from_slice(&0xCAFE_BABEu32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
let val = read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, false), &info, 4);
assert_eq!(val, Some(0xCAFE_BABE));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_bytes() {
let (mut buf, cr3_pa, kva, data_pa) = setup_page_table();
buf[data_pa as usize..data_pa as usize + 4].copy_from_slice(&[0xAA, 0xBB, 0xCC, 0xDD]);
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
let bytes = read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 0, 4);
assert_eq!(bytes, Some(vec![0xAA, 0xBB, 0xCC, 0xDD]));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_empty() {
let (buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
let bytes = read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 0, 0);
assert_eq!(bytes, Some(vec![]));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_unmapped_returns_none() {
let (buf, cr3_pa, _, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 16,
max_entries: 0,
value_kva: Some(0xFFFF_FFFF_8000_0000), btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert_eq!(
read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 0, 4),
None
);
assert_eq!(
read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, false), &info, 0),
None
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_then_read_bpf_map_value_roundtrip() {
let (mut buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert!(write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
8,
0x1234_5678,
));
assert_eq!(
read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, false), &info, 8),
Some(0x1234_5678)
);
let payload = [0x11, 0x22, 0x33, 0x44, 0x55];
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
16,
&payload,
));
assert_eq!(
read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 16, 5),
Some(payload.to_vec()),
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_across_page_boundary() {
let (mut buf, cr3_pa, kva, page1_pa, page2_pa) = setup_two_page_table();
buf[page1_pa as usize + 0xFFE] = 0xAA;
buf[page1_pa as usize + 0xFFF] = 0xBB;
buf[page2_pa as usize] = 0xCC;
buf[page2_pa as usize + 1] = 0xDD;
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 0x2000,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
let bytes = read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 0xFFE, 4);
assert_eq!(bytes, Some(vec![0xAA, 0xBB, 0xCC, 0xDD]));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_bpf_map_value_u32_5level() {
let (mut buf, cr3_pa, kva, data_pa) = setup_5level_page_table();
buf[data_pa as usize..data_pa as usize + 4].copy_from_slice(&0xDEAD_BEEFu32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert_eq!(
read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, true), &info, 0),
Some(0xDEAD_BEEF)
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_returns_both_types() {
let mut setup = setup_find_bpf_map_multi();
let map1_pa: u64 = 0x14000;
let map_type_off = setup.3.map_type;
let off = (map1_pa + map_type_off as u64) as usize;
setup.0[off..off + 4].copy_from_slice(&1u32.to_ne_bytes());
let (buf, cr3_pa, idr_kva, offsets) = setup;
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, cr3_pa, page_offset, &offsets, false),
idr_kva,
);
assert_eq!(maps.len(), 2);
let hash_map = maps.iter().find(|m| m.name() == "other.data");
let array_map = maps.iter().find(|m| m.name() == "mitosis.bss");
assert!(hash_map.is_some(), "HASH map should be in results");
assert!(array_map.is_some(), "ARRAY map should be in results");
assert_eq!(hash_map.unwrap().map_type, 1); assert!(hash_map.unwrap().value_kva.is_none());
assert_eq!(array_map.unwrap().map_type, BPF_MAP_TYPE_ARRAY);
assert!(array_map.unwrap().value_kva.is_some());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_single_entry() {
let (buf, cr3_pa, idr_kva, offsets) = setup_find_bpf_map("test.bss", BPF_MAP_TYPE_ARRAY, 64);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
);
assert_eq!(maps.len(), 1);
assert_eq!(maps[0].name(), "test.bss");
}
#[test]
fn find_all_bpf_maps_empty_idr() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
map_btf_vmlinux_value_type_id: 0,
map_btf_key_type_id: 0,
btf_data: 0,
btf_data_size: 0,
btf_base_btf: 0,
htab_offsets: None,
task_storage_offsets: None,
struct_ops_offsets: None,
ringbuf_offsets: None,
stackmap_offsets: None,
};
let buf = vec![0u8; 0x2000];
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = 0x1000 + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, 0x10000, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
);
assert!(maps.is_empty());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_value_returns_none_for_non_array_map() {
let (buf, cr3_pa, _, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("hash.map").0,
name_len: name_from_str("hash.map").1,
map_type: 1, map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert!(read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 0, 4).is_none());
assert!(read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, false), &info, 0).is_none());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_value_returns_false_for_non_array_map() {
let (mut buf, cr3_pa, _, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("hash.map").0,
name_len: name_from_str("hash.map").1,
map_type: 1, map_flags: 0,
key_size: 0,
value_size: 64,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert!(!write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&[1, 2, 3, 4],
));
assert!(!write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
42
));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_reads_map_flags() {
let (mut buf, cr3_pa, idr_kva, offsets) =
setup_find_bpf_map("flagged.bss", BPF_MAP_TYPE_ARRAY, 64);
let map_pa: u64 = 0x14000;
let flags_pa = (map_pa + offsets.map_flags as u64) as usize;
buf[flags_pa..flags_pa + 4].copy_from_slice(&0x0400u32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
);
assert_eq!(maps.len(), 1);
assert_eq!(maps[0].map_flags, 0x0400);
}
#[test]
fn xa_node_shift_nonzero_offset() {
let node_pa: u64 = 0x1000;
let page_offset: u64 = crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
let node_kva = page_offset.wrapping_add(node_pa);
let shift_off: usize = 8;
let mut buf = vec![0u8; 0x2000];
buf[node_pa as usize + shift_off] = 6;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(xa_node_shift(&mem, page_offset, node_kva, shift_off), 6);
assert_eq!(xa_node_shift(&mem, page_offset, node_kva, 0), 0);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_continues_past_untranslatable_entry() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
map_btf_vmlinux_value_type_id: 0,
map_btf_key_type_id: 0,
btf_data: 0,
btf_data_size: 0,
btf_base_btf: 0,
htab_offsets: None,
task_storage_offsets: None,
struct_ops_offsets: None,
ringbuf_offsets: None,
stackmap_offsets: None,
};
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let map_pa: u64 = 0x14000;
let idr_pa: u64 = 0x15000;
let xa_node_pa: u64 = 0x16000;
let map_kva: u64 = 0xFFFF_C900_0000_0000;
let pgd_idx = (map_kva >> 39) & 0x1FF;
let pud_idx = (map_kva >> 30) & 0x1FF;
let pmd_idx = (map_kva >> 21) & 0x1FF;
let pte_idx = (map_kva >> 12) & 0x1FF;
let bad_kva: u64 = 0xFFFF_C900_8000_0000;
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let xa_node_kva = xa_node_pa + page_offset;
let size = 0x17000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte_idx * 8, (map_pa + PTE_BASE) | 0x63);
buf[xa_node_pa as usize] = 0; write_u64(&mut buf, xa_node_pa + offsets.xa_node_slots as u64, bad_kva);
write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64 + 8,
map_kva,
);
write_u64(
&mut buf,
idr_pa + offsets.idr_xa_head as u64,
xa_node_kva | 2,
);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 2);
write_u32(
&mut buf,
map_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map_pa + offsets.value_size as u64, 64);
let name = b"good.bss";
let name_pa = (map_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, pgd_pa, page_offset, &offsets, false),
idr_kva,
);
let good = maps.iter().find(|m| m.name() == "good.bss");
assert!(
good.is_some(),
"good.bss should be found despite bad entry at slot 0"
);
assert_eq!(good.unwrap().map_type, BPF_MAP_TYPE_ARRAY);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn read_value_rejects_out_of_bounds() {
let (buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 8,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert!(read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 4, 4).is_some());
assert!(read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 4, 5).is_none());
assert!(read_bpf_map_value(&value_ctx(&mem, cr3_pa, false), &info, 9, 1).is_none());
assert!(read_bpf_map_value_u32(&value_ctx(&mem, cr3_pa, false), &info, 6).is_none());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn write_value_rejects_out_of_bounds() {
let (mut buf, cr3_pa, kva, _) = setup_page_table();
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let info = BpfMapInfo {
map_pa: 0,
map_kva: 0,
name_bytes: name_from_str("test.bss").0,
name_len: name_from_str("test.bss").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 8,
max_entries: 0,
value_kva: Some(kva),
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert!(write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&[0u8; 8],
));
assert!(!write_bpf_map_value(
&value_ctx(&mem, cr3_pa, false),
&info,
0,
&[0u8; 9],
));
assert!(!write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
6,
42
));
assert!(write_bpf_map_value_u32(
&value_ctx(&mem, cr3_pa, false),
&info,
4,
42
));
}
#[test]
fn bpf_map_info_btf_fields_default_zero() {
let info = BpfMapInfo {
map_pa: 0x1000,
map_kva: 0,
name_bytes: name_from_str("test").0,
name_len: name_from_str("test").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 32,
max_entries: 0,
value_kva: None,
btf_kva: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert_eq!(info.btf_kva, 0);
assert_eq!(info.btf_value_type_id, 0);
}
#[test]
fn bpf_map_info_btf_fields_populated() {
let info = BpfMapInfo {
map_pa: 0x1000,
map_kva: 0,
name_bytes: name_from_str("test").0,
name_len: name_from_str("test").1,
map_type: BPF_MAP_TYPE_ARRAY,
map_flags: 0,
key_size: 0,
value_size: 32,
max_entries: 0,
value_kva: None,
btf_kva: 0xFFFF_8880_0001_0000,
btf_value_type_id: 42,
btf_vmlinux_value_type_id: 0,
btf_key_type_id: 0,
};
assert_eq!(info.btf_kva, 0xFFFF_8880_0001_0000);
assert_eq!(info.btf_value_type_id, 42);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_populates_btf_fields() {
let (mut buf, cr3_pa, idr_kva, mut offsets) =
setup_find_bpf_map("test.bss", BPF_MAP_TYPE_ARRAY, 64);
offsets.map_btf = 56;
offsets.map_btf_value_type_id = 64;
let map_pa: u64 = 0x14000;
let btf_off = (map_pa + offsets.map_btf as u64) as usize;
let btf_tid_off = (map_pa + offsets.map_btf_value_type_id as u64) as usize;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
);
assert_eq!(maps.len(), 1);
assert_eq!(maps[0].btf_kva, 0);
assert_eq!(maps[0].btf_value_type_id, 0);
let btf_kva_val: u64 = 0xFFFF_8880_DEAD_0000;
buf[btf_off..btf_off + 8].copy_from_slice(&btf_kva_val.to_ne_bytes());
buf[btf_tid_off..btf_tid_off + 4].copy_from_slice(&7u32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, cr3_pa, 0xFFFF_8880_0000_0000, &offsets, false),
idr_kva,
);
assert_eq!(maps[0].btf_kva, btf_kva_val);
assert_eq!(maps[0].btf_value_type_id, 7);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn find_all_bpf_maps_respects_idr_next_bound() {
let offsets = BpfMapOffsets {
map_name: 32,
map_type: 24,
map_flags: 28,
key_size: 44,
value_size: 48,
max_entries: 52,
array_value: 256,
xa_node_slots: 16,
xa_node_shift: 0,
idr_xa_head: 8,
idr_next: 20,
map_btf: 0,
map_btf_value_type_id: 0,
map_btf_vmlinux_value_type_id: 0,
map_btf_key_type_id: 0,
btf_data: 0,
btf_data_size: 0,
btf_base_btf: 0,
htab_offsets: None,
task_storage_offsets: None,
struct_ops_offsets: None,
ringbuf_offsets: None,
stackmap_offsets: None,
};
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = 0x11000;
let pmd_pa: u64 = 0x12000;
let pte_pa: u64 = 0x13000;
let map_pa: u64 = 0x14000;
let map2_pa: u64 = 0x15000;
let map3_pa: u64 = 0x16000;
let idr_pa: u64 = 0x17000;
let xa_node_pa: u64 = 0x18000;
let map_kva: u64 = 0xFFFF_C900_0000_0000;
let map2_kva: u64 = 0xFFFF_C900_0000_1000;
let map3_kva: u64 = 0xFFFF_C900_0000_2000;
let pgd_idx = (map_kva >> 39) & 0x1FF;
let pud_idx = (map_kva >> 30) & 0x1FF;
let pmd_idx = (map_kva >> 21) & 0x1FF;
let pte1_idx = (map_kva >> 12) & 0x1FF;
let pte2_idx = (map2_kva >> 12) & 0x1FF;
let pte3_idx = (map3_kva >> 12) & 0x1FF;
let page_offset: u64 = 0xFFFF_8880_0000_0000;
let xa_node_kva = xa_node_pa + page_offset;
let size = 0x19000;
let mut buf = vec![0u8; size];
let write_u64 = |buf: &mut Vec<u8>, pa: u64, val: u64| {
let off = pa as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
let write_u32 = |buf: &mut Vec<u8>, pa: u64, val: u32| {
let off = pa as usize;
buf[off..off + 4].copy_from_slice(&val.to_ne_bytes());
};
write_u64(&mut buf, pgd_pa + pgd_idx * 8, (pud_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pud_pa + pud_idx * 8, (pmd_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pmd_pa + pmd_idx * 8, (pte_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte1_idx * 8, (map_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte2_idx * 8, (map2_pa + PTE_BASE) | 0x63);
write_u64(&mut buf, pte_pa + pte3_idx * 8, (map3_pa + PTE_BASE) | 0x63);
buf[xa_node_pa as usize] = 0; write_u64(&mut buf, xa_node_pa + offsets.xa_node_slots as u64, map_kva);
write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64 + 8,
map2_kva,
);
write_u64(
&mut buf,
xa_node_pa + offsets.xa_node_slots as u64 + 2 * 8,
map3_kva,
);
write_u64(
&mut buf,
idr_pa + offsets.idr_xa_head as u64,
xa_node_kva | 2,
);
write_u32(&mut buf, idr_pa + offsets.idr_next as u64, 2);
write_u32(
&mut buf,
map_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map_pa + offsets.value_size as u64, 32);
let name = b"first.bss";
let name_pa = (map_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
write_u32(
&mut buf,
map2_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map2_pa + offsets.value_size as u64, 64);
let name = b"second.bss";
let name_pa = (map2_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
write_u32(
&mut buf,
map3_pa + offsets.map_type as u64,
BPF_MAP_TYPE_ARRAY,
);
write_u32(&mut buf, map3_pa + offsets.value_size as u64, 128);
let name = b"third.bss";
let name_pa = (map3_pa + offsets.map_name as u64) as usize;
buf[name_pa..name_pa + name.len()].copy_from_slice(name);
let start_kernel_map: u64 = START_KERNEL_MAP;
let idr_kva = idr_pa + start_kernel_map;
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let maps = find_all_bpf_maps(
&lookup_ctx(&mem, pgd_pa, page_offset, &offsets, false),
idr_kva,
);
assert_eq!(maps.len(), 2);
assert!(maps.iter().any(|m| m.name() == "first.bss"));
assert!(maps.iter().any(|m| m.name() == "second.bss"));
assert!(!maps.iter().any(|m| m.name() == "third.bss"));
}
#[cfg(target_arch = "x86_64")]
fn setup_page_table_vmalloc() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFFFF_8000_8400_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let data_pa: u64 = pte_pa + 0x1000;
let size = (data_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x63);
write_entry(&mut buf, pte_pa, pte_idx, (data_pa + PTE_BASE) | 0x63);
buf[data_pa as usize..data_pa as usize + 8]
.copy_from_slice(&0x1234_5678_ABCD_EF00u64.to_ne_bytes());
(buf, pgd_pa, kva, data_pa)
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_l0_index_256() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_vmalloc();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), false, 0);
assert_eq!(
pa,
Some(data_pa),
"L0[256] walk should resolve to data page"
);
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0x1234_5678_ABCD_EF00);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_l0_index_256_with_offset() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_vmalloc();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva + 0x100), false, 0);
assert_eq!(pa, Some(data_pa + 0x100));
}
#[test]
#[cfg(target_arch = "x86_64")]
fn translate_kva_l0_index_256_unmapped_neighbor() {
let (buf, cr3_pa, kva, _) = setup_page_table_vmalloc();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let kva_257 = kva + (1u64 << 39);
assert_eq!(mem.translate_kva(cr3_pa, Kva(kva_257), false, 0), None);
}
#[cfg(target_arch = "aarch64")]
const TCR_EL1_64K_48BIT: u64 = (0b11_u64 << 30) | (16u64 << 16);
#[cfg(target_arch = "aarch64")]
const TCR_EL1_4K_48BIT: u64 = (0b10_u64 << 30) | (16u64 << 16);
#[cfg(target_arch = "aarch64")]
const TCR_EL1_16K_47BIT: u64 = (0b01_u64 << 30) | (17u64 << 16);
#[cfg(target_arch = "aarch64")]
fn setup_page_table_vmalloc_64k() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFFFF_8000_8400_0000;
let pgd_idx = (kva >> 42) & 0x3F; let pmd_idx = (kva >> 29) & 0x1FFF; let pte_idx = (kva >> 16) & 0x1FFF;
let pgd_pa: u64 = 0x10000;
let pmd_pa: u64 = 0x20000;
let pte_pa: u64 = 0x30000;
let data_pa: u64 = 0x40000;
let size = (data_pa + 0x10000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pmd_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pte_pa, pte_idx, (data_pa + PTE_BASE) | 0x03);
buf[data_pa as usize..data_pa as usize + 8]
.copy_from_slice(&0x1234_5678_ABCD_EF00u64.to_ne_bytes());
(buf, pgd_pa, kva, data_pa)
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_vmalloc_64k() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_vmalloc_64k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), false, TCR_EL1_64K_48BIT);
assert_eq!(pa, Some(data_pa), "64KB vmalloc walk should resolve");
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0x1234_5678_ABCD_EF00);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_vmalloc_64k_with_offset() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_vmalloc_64k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva + 0x100), false, TCR_EL1_64K_48BIT);
assert_eq!(pa, Some(data_pa + 0x100));
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_vmalloc_64k_unmapped_neighbor() {
let (buf, cr3_pa, kva, _) = setup_page_table_vmalloc_64k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let unmapped = kva + (1u64 << 42);
assert_eq!(
mem.translate_kva(cr3_pa, Kva(unmapped), false, TCR_EL1_64K_48BIT),
None
);
}
#[cfg(target_arch = "aarch64")]
fn setup_page_table_4k() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let data_pa: u64 = pte_pa + 0x1000;
let size = (data_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pte_pa, pte_idx, (data_pa + PTE_BASE) | 0x03);
buf[data_pa as usize..data_pa as usize + 8]
.copy_from_slice(&0xDEAD_BEEF_CAFE_1234u64.to_ne_bytes());
(buf, pgd_pa, kva, data_pa)
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_aarch64_4k_4level() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_4k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), false, TCR_EL1_4K_48BIT);
assert_eq!(pa, Some(data_pa), "4 KB 4-level walk should resolve");
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0xDEAD_BEEF_CAFE_1234);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_aarch64_4k_4level_offset() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_4k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva + 0x123), false, TCR_EL1_4K_48BIT);
assert_eq!(pa, Some(data_pa + 0x123));
}
#[cfg(target_arch = "aarch64")]
fn setup_page_table_16k() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFFFF_8000_0000_4000;
let l1_idx = (kva >> 36) & 0x7FF;
let l2_idx = (kva >> 25) & 0x7FF;
let l3_idx = (kva >> 14) & 0x7FF;
let l1_pa: u64 = 0x10000;
let l2_pa: u64 = l1_pa + 0x4000;
let l3_pa: u64 = l2_pa + 0x4000;
let data_pa: u64 = l3_pa + 0x4000;
let size = (data_pa + 0x4000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, l1_pa, l1_idx, (l2_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, l2_pa, l2_idx, (l3_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, l3_pa, l3_idx, (data_pa + PTE_BASE) | 0x03);
buf[data_pa as usize..data_pa as usize + 8]
.copy_from_slice(&0xFEED_FACE_C0DE_BABEu64.to_ne_bytes());
(buf, l1_pa, kva, data_pa)
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_aarch64_16k_granule() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_16k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), false, TCR_EL1_16K_47BIT);
assert_eq!(pa, Some(data_pa), "16 KB granule walk should resolve");
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0xFEED_FACE_C0DE_BABE);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_aarch64_tg1_decode_distinct_from_tg0() {
let (buf, cr3_pa, kva, data_pa) = setup_page_table_4k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let tcr = (0b10_u64 << 30) | (16u64 << 16);
assert_eq!(
mem.translate_kva(cr3_pa, Kva(kva), false, tcr),
Some(data_pa),
"TG1=0b10 must decode as 4 KB granule"
);
let tcr_64k = (0b11_u64 << 30) | (16u64 << 16);
assert_ne!(
mem.translate_kva(cr3_pa, Kva(kva), false, tcr_64k),
Some(data_pa),
"TG1=0b11 must NOT resolve a 4 KB-laid-out table to the same PA"
);
let tcr_16k = (0b01_u64 << 30) | (16u64 << 16);
assert_ne!(
mem.translate_kva(cr3_pa, Kva(kva), false, tcr_16k),
Some(data_pa),
"TG1=0b01 must NOT resolve a 4 KB-laid-out table to the same PA"
);
}
#[cfg(target_arch = "aarch64")]
fn setup_page_table_4k_huge_pmd() -> (Vec<u8>, u64, u64, u64) {
let kva: u64 = 0xFFFF_8880_0020_0000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let huge_page_pa: u64 = 0x20_0000;
let size = (huge_page_pa + 0x20_0000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pmd_pa, pmd_idx, (huge_page_pa + PTE_BASE) | 0x01);
buf[huge_page_pa as usize..huge_page_pa as usize + 8]
.copy_from_slice(&0xCAFE_BABE_1234_5678u64.to_ne_bytes());
(buf, pgd_pa, kva, huge_page_pa)
}
#[test]
#[cfg(target_arch = "aarch64")]
fn translate_kva_aarch64_4k_pmd_block() {
let (buf, cr3_pa, kva, huge_page_pa) = setup_page_table_4k_huge_pmd();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let pa = mem.translate_kva(cr3_pa, Kva(kva), false, TCR_EL1_4K_48BIT);
assert_eq!(
pa,
Some(huge_page_pa),
"PMD block descriptor must terminate the walk and resolve to the 2 MB base"
);
assert_eq!(mem.read_u64(pa.unwrap(), 0), 0xCAFE_BABE_1234_5678);
let pa_off = mem.translate_kva(cr3_pa, Kva(kva + 0x12_3456), false, TCR_EL1_4K_48BIT);
assert_eq!(
pa_off,
Some(huge_page_pa + 0x12_3456),
"in-block offset must compose with the 2 MB-aligned base"
);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn walk_aarch64_rejects_t1sz_underflow() {
let (buf, cr3_pa, kva, _) = setup_page_table_4k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let tcr = (0b10_u64 << 30) | (61u64 << 16);
assert_eq!(
mem.translate_kva(cr3_pa, Kva(kva), false, tcr),
None,
"T1SZ=61 (va_width=3) must trip the va_width<4 guard"
);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn walk_aarch64_rejects_levels_overflow() {
let (buf, cr3_pa, kva, _) = setup_page_table_4k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let tcr = (0b10_u64 << 30) | (15u64 << 16);
assert_eq!(
mem.translate_kva(cr3_pa, Kva(kva), false, tcr),
None,
"levels_below=5 must trip the levels_below>4 guard"
);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn walk_aarch64_rejects_tg1_reserved_zero() {
let (buf, cr3_pa, kva, _) = setup_page_table_4k();
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let tcr = (0b00_u64 << 30) | (16u64 << 16);
assert_eq!(
mem.translate_kva(cr3_pa, Kva(kva), false, tcr),
None,
"TG1=0b00 must be rejected as reserved"
);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn walk_aarch64_rejects_level3_reserved_descriptor() {
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let data_pa: u64 = pte_pa + 0x1000;
let size = (data_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pte_pa, pte_idx, (data_pa + PTE_BASE) | 0x01);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(
mem.translate_kva(pgd_pa, Kva(kva), false, TCR_EL1_4K_48BIT),
None,
"level 3 descriptor 0b01 (reserved) must be rejected"
);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn walk_aarch64_rejects_sub_dram_start_descriptor() {
use crate::vmm::aarch64::kvm::DRAM_START;
let kva: u64 = 0xFFFF_8880_0000_5000;
let pgd_idx = (kva >> 39) & 0x1FF;
let pud_idx = (kva >> 30) & 0x1FF;
let pmd_idx = (kva >> 21) & 0x1FF;
let pte_idx = (kva >> 12) & 0x1FF;
let pgd_pa: u64 = 0x10000;
let pud_pa: u64 = pgd_pa + 0x1000;
let pmd_pa: u64 = pud_pa + 0x1000;
let pte_pa: u64 = pmd_pa + 0x1000;
let bad_oa: u64 = 0x1000;
assert!(
bad_oa < DRAM_START,
"test requires OA below DRAM_START to exercise checked_sub guard"
);
let size = (pte_pa + 0x1000) as usize;
let mut buf = vec![0u8; size];
let write_entry = |buf: &mut Vec<u8>, base: u64, idx: u64, val: u64| {
let off = (base + idx * 8) as usize;
buf[off..off + 8].copy_from_slice(&val.to_ne_bytes());
};
write_entry(&mut buf, pgd_pa, pgd_idx, (pud_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pud_pa, pud_idx, (pmd_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pmd_pa, pmd_idx, (pte_pa + PTE_BASE) | 0x03);
write_entry(&mut buf, pte_pa, pte_idx, bad_oa | 0x03);
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
assert_eq!(
mem.translate_kva(pgd_pa, Kva(kva), false, TCR_EL1_4K_48BIT),
None,
"leaf OA below DRAM_START must be rejected by checked_sub guard"
);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn start_kernel_map_for_va_bits_47() {
use crate::monitor::symbols::start_kernel_map_for_tcr;
let tcr = (0b01_u64 << 30) | (17u64 << 16);
assert_eq!(start_kernel_map_for_tcr(tcr), Some(0xFFFF_C000_8000_0000));
}
#[test]
#[cfg(target_arch = "aarch64")]
fn start_kernel_map_for_va_bits_48() {
use crate::monitor::symbols::start_kernel_map_for_tcr;
let tcr = (0b10_u64 << 30) | (16u64 << 16);
assert_eq!(start_kernel_map_for_tcr(tcr), Some(0xFFFF_8000_8000_0000));
}
#[test]
#[cfg(target_arch = "aarch64")]
fn va_bits_from_tcr_decode() {
use crate::monitor::symbols::start_kernel_map_for_tcr;
let tcr_48 = (0b10_u64 << 30) | (16u64 << 16);
assert_eq!(
start_kernel_map_for_tcr(tcr_48),
Some(0xFFFF_8000_8000_0000)
);
let tcr_47 = (0b01_u64 << 30) | (17u64 << 16);
assert_eq!(
start_kernel_map_for_tcr(tcr_47),
Some(0xFFFF_C000_8000_0000)
);
let tcr_39 = (0b10_u64 << 30) | (25u64 << 16);
assert_eq!(
start_kernel_map_for_tcr(tcr_39),
Some(0xFFFF_FFC0_8000_0000)
);
let tcr_t1sz_zero = 0b10_u64 << 30; assert_eq!(start_kernel_map_for_tcr(tcr_t1sz_zero), None);
assert_eq!(start_kernel_map_for_tcr(0), None);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn aarch64_walk_params_rejects_feat_lpa2() {
use crate::monitor::reader::Aarch64WalkParams;
let tcr_lpa2_16k = (1u64 << 59) | (0b01u64 << 30) | (12u64 << 16);
assert!(
Aarch64WalkParams::from_tcr_el1(tcr_lpa2_16k).is_none(),
"LPA2 (TCR_EL1.DS=1) must be rejected"
);
let tcr_no_ds_16k = (0b01u64 << 30) | (12u64 << 16);
assert!(
Aarch64WalkParams::from_tcr_el1(tcr_no_ds_16k).is_some(),
"DS=0 with otherwise identical TCR must accept"
);
let tcr_lpa2_4k = (1u64 << 59) | (0b10u64 << 30) | (12u64 << 16);
assert!(Aarch64WalkParams::from_tcr_el1(tcr_lpa2_4k).is_none());
}
#[test]
#[cfg(target_arch = "x86_64")]
fn map_metadata_short_copy_u32_at_falls_through_to_scalar() {
let total: usize = 256;
let map_pa: u64 = 100;
let expected_copied = total - map_pa as usize;
assert!(
expected_copied < MAP_METADATA_SPAN,
"test premise: bulk read must truncate"
);
let mut buf = vec![0u8; total];
let in_range_off: usize = 8;
let in_range_val: u32 = 0xDEAD_BEEF;
buf[map_pa as usize + in_range_off..map_pa as usize + in_range_off + 4]
.copy_from_slice(&in_range_val.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let meta = MapMetadata::read(&mem, map_pa, &BpfMapOffsets::EMPTY);
assert_eq!(
meta.copied, expected_copied,
"bulk read must short-return when map_pa is near end-of-DRAM"
);
assert_eq!(
meta.u32_at(in_range_off),
in_range_val,
"in-range u32_at must read from the cached buffer"
);
let past_off: usize = 200;
assert!(past_off + 4 > meta.copied, "test premise: must trip scalar");
assert_eq!(
meta.u32_at(past_off),
0,
"past-copied u32_at must fall through to scalar; OOB returns 0"
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn map_metadata_short_copy_u64_at_falls_through_to_scalar() {
let total: usize = 200;
let map_pa: u64 = 50;
let expected_copied = total - map_pa as usize; assert!(
expected_copied < MAP_METADATA_SPAN,
"test premise: bulk read must truncate"
);
let mut buf = vec![0u8; total];
let in_range_off: usize = 16;
let in_range_val: u64 = 0xCAFE_BABE_DEAD_F00D;
buf[map_pa as usize + in_range_off..map_pa as usize + in_range_off + 8]
.copy_from_slice(&in_range_val.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let meta = MapMetadata::read(&mem, map_pa, &BpfMapOffsets::EMPTY);
assert_eq!(meta.copied, expected_copied);
assert_eq!(
meta.u64_at(in_range_off),
in_range_val,
"in-range u64_at must read from the cached buffer"
);
let past_off: usize = 152;
assert!(past_off + 8 > meta.copied);
assert_eq!(
meta.u64_at(past_off),
0,
"past-copied u64_at must fall through to scalar; OOB returns 0"
);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn map_metadata_short_copy_u32_at_boundary() {
let total: usize = 100;
let map_pa: u64 = 20;
let expected_copied = total - map_pa as usize; let mut buf = vec![0u8; total];
let boundary_off = expected_copied - 4; let boundary_val: u32 = 0x1234_5678;
buf[map_pa as usize + boundary_off..map_pa as usize + boundary_off + 4]
.copy_from_slice(&boundary_val.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let meta = MapMetadata::read(&mem, map_pa, &BpfMapOffsets::EMPTY);
assert_eq!(meta.copied, expected_copied);
assert_eq!(
meta.u32_at(boundary_off),
boundary_val,
"boundary off+4==copied must read from buffer (gate is `<=`)"
);
assert_eq!(
meta.u32_at(boundary_off + 1),
0,
"one byte past the boundary must fall through to scalar OOB",
);
}
#[test]
fn per_cpu_offsets_cache_hit_returns_same_arc() {
let cache = PerCpuOffsetsCache::new();
let calls = std::sync::atomic::AtomicUsize::new(0);
let init = || {
calls.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
vec![0u64, 0x1000, 0x2000, 0x3000]
};
let first = cache.get_or_init(4, init);
assert_eq!(calls.load(std::sync::atomic::Ordering::SeqCst), 1);
assert_eq!(first.as_slice(), &[0u64, 0x1000, 0x2000, 0x3000]);
let second = cache.get_or_init(4, init);
assert_eq!(
calls.load(std::sync::atomic::Ordering::SeqCst),
1,
"init closure must not run on cache hit",
);
assert!(
std::sync::Arc::ptr_eq(&first, &second),
"repeat call with same num_cpus must return the cached Arc, not a fresh allocation",
);
}
#[test]
fn per_cpu_offsets_cache_num_cpus_mismatch_refreshes() {
let cache = PerCpuOffsetsCache::new();
let calls = std::sync::atomic::AtomicUsize::new(0);
let init = |n: u32| {
let calls_ref = &calls;
move || {
calls_ref.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
(0..n).map(|i| i as u64 * 0x100).collect::<Vec<u64>>()
}
};
let first = cache.get_or_init(2, init(2));
assert_eq!(calls.load(std::sync::atomic::Ordering::SeqCst), 1);
assert_eq!(first.as_slice(), &[0u64, 0x100]);
let second = cache.get_or_init(4, init(4));
assert_eq!(
calls.load(std::sync::atomic::Ordering::SeqCst),
2,
"num_cpus change must trigger a fresh init",
);
assert_eq!(second.as_slice(), &[0u64, 0x100, 0x200, 0x300]);
let third = cache.get_or_init(4, init(4));
assert_eq!(
calls.load(std::sync::atomic::Ordering::SeqCst),
2,
"cache must store the refreshed slot so a subsequent same-num_cpus call hits",
);
assert!(
std::sync::Arc::ptr_eq(&second, &third),
"subsequent hit on the refreshed num_cpus must return the same Arc",
);
let fourth = cache.get_or_init(2, init(2));
assert_eq!(
calls.load(std::sync::atomic::Ordering::SeqCst),
3,
"going back to the prior num_cpus must re-init (single-slot cache)",
);
assert_eq!(fourth.as_slice(), &[0u64, 0x100]);
assert!(
!std::sync::Arc::ptr_eq(&first, &fourth),
"single-slot cache cannot resurrect the original Arc; the slot was overwritten by `n=4`",
);
}
mod htab_tests;
mod local_storage_tests;