use anyhow::Context;
use super::btf_offsets::BpfMapOffsets;
use super::idr::{translate_any_kva, xa_load};
use super::reader::GuestMem;
use super::symbols::text_kva_to_pa_with_base;
use super::{Cr3Pa, Kva, PageOffset};
mod htab;
mod local_storage;
#[cfg(test)]
mod tests;
use htab::{iter_htab_entries, iter_percpu_htab_entries};
use local_storage::iter_local_storage_entries;
pub(crate) type PerCpuHashEntries = Vec<(Vec<u8>, Vec<Option<Vec<u8>>>)>;
pub(crate) struct AccessorCtx<'a> {
pub mem: &'a GuestMem,
pub cr3_pa: Cr3Pa,
pub page_offset: PageOffset,
pub offsets: &'a BpfMapOffsets,
pub l5: bool,
pub tcr_el1: u64,
pub start_kernel_map: u64,
pub phys_base: u64,
}
pub const BPF_MAP_TYPE_HASH: u32 = 1;
pub const BPF_MAP_TYPE_ARRAY: u32 = 2;
pub const BPF_MAP_TYPE_PROG_ARRAY: u32 = 3;
pub const BPF_MAP_TYPE_PERF_EVENT_ARRAY: u32 = 4;
pub const BPF_MAP_TYPE_PERCPU_HASH: u32 = 5;
pub const BPF_MAP_TYPE_PERCPU_ARRAY: u32 = 6;
pub const BPF_MAP_TYPE_STACK_TRACE: u32 = 7;
pub const BPF_MAP_TYPE_CGROUP_ARRAY: u32 = 8;
pub const BPF_MAP_TYPE_LRU_HASH: u32 = 9;
pub const BPF_MAP_TYPE_LRU_PERCPU_HASH: u32 = 10;
pub const BPF_MAP_TYPE_LPM_TRIE: u32 = 11;
pub const BPF_MAP_TYPE_ARRAY_OF_MAPS: u32 = 12;
pub const BPF_MAP_TYPE_HASH_OF_MAPS: u32 = 13;
pub const BPF_MAP_TYPE_DEVMAP: u32 = 14;
pub const BPF_MAP_TYPE_SOCKMAP: u32 = 15;
pub const BPF_MAP_TYPE_CPUMAP: u32 = 16;
pub const BPF_MAP_TYPE_XSKMAP: u32 = 17;
pub const BPF_MAP_TYPE_SOCKHASH: u32 = 18;
pub const BPF_MAP_TYPE_CGROUP_STORAGE: u32 = 19;
pub const BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: u32 = 20;
pub const BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: u32 = 21;
pub const BPF_MAP_TYPE_QUEUE: u32 = 22;
pub const BPF_MAP_TYPE_STACK: u32 = 23;
pub const BPF_MAP_TYPE_SK_STORAGE: u32 = 24;
pub const BPF_MAP_TYPE_DEVMAP_HASH: u32 = 25;
pub const BPF_MAP_TYPE_STRUCT_OPS: u32 = 26;
pub const BPF_MAP_TYPE_RINGBUF: u32 = 27;
pub const BPF_MAP_TYPE_INODE_STORAGE: u32 = 28;
pub const BPF_MAP_TYPE_TASK_STORAGE: u32 = 29;
pub const BPF_MAP_TYPE_BLOOM_FILTER: u32 = 30;
pub const BPF_MAP_TYPE_USER_RINGBUF: u32 = 31;
pub const BPF_MAP_TYPE_CGRP_STORAGE: u32 = 32;
pub const BPF_MAP_TYPE_ARENA: u32 = 33;
pub const BPF_MAP_TYPE_INSN_ARRAY: u32 = 34;
pub const BPF_OBJ_NAME_LEN: usize = 16;
#[derive(Debug, Clone, Default)]
#[allow(dead_code)]
pub struct BpfMapInfo {
pub map_pa: u64,
pub map_kva: u64,
pub name_bytes: [u8; BPF_OBJ_NAME_LEN],
pub name_len: u8,
pub map_type: u32,
pub map_flags: u32,
pub key_size: u32,
pub value_size: u32,
pub max_entries: u32,
pub value_kva: Option<u64>,
pub btf_kva: u64,
pub btf_value_type_id: u32,
pub btf_vmlinux_value_type_id: u32,
pub btf_key_type_id: u32,
}
impl BpfMapInfo {
pub fn name_bytes_active(&self) -> &[u8] {
&self.name_bytes[..self.name_len as usize]
}
pub fn name(&self) -> std::borrow::Cow<'_, str> {
String::from_utf8_lossy(self.name_bytes_active())
}
}
impl std::fmt::Display for BpfMapInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.name())
}
}
const MAP_METADATA_SPAN: usize = 384;
struct MapMetadata<'a> {
mem: &'a GuestMem,
map_pa: u64,
buf: [u8; MAP_METADATA_SPAN],
copied: usize,
}
impl<'a> MapMetadata<'a> {
fn read(mem: &'a GuestMem, map_pa: u64, _offsets: &BpfMapOffsets) -> Self {
let mut buf = [0u8; MAP_METADATA_SPAN];
let copied = mem.read_bytes(map_pa, &mut buf);
Self {
mem,
map_pa,
buf,
copied,
}
}
fn u32_at(&self, off: usize) -> u32 {
if off + 4 <= self.copied {
u32::from_ne_bytes(self.buf[off..off + 4].try_into().unwrap())
} else {
self.mem.read_u32(self.map_pa, off)
}
}
fn u64_at(&self, off: usize) -> u64 {
if off + 8 <= self.copied {
u64::from_ne_bytes(self.buf[off..off + 8].try_into().unwrap())
} else {
self.mem.read_u64(self.map_pa, off)
}
}
fn name_bytes(&self, name_off: usize) -> std::borrow::Cow<'_, [u8]> {
if name_off + BPF_OBJ_NAME_LEN <= self.copied {
std::borrow::Cow::Borrowed(&self.buf[name_off..name_off + BPF_OBJ_NAME_LEN])
} else {
let mut name_buf = vec![0u8; BPF_OBJ_NAME_LEN];
self.mem
.read_bytes(self.map_pa + name_off as u64, &mut name_buf);
std::borrow::Cow::Owned(name_buf)
}
}
}
pub(crate) fn find_all_bpf_maps(ctx: &AccessorCtx<'_>, map_idr_kva: u64) -> Vec<BpfMapInfo> {
let idr_pa = text_kva_to_pa_with_base(map_idr_kva, ctx.start_kernel_map, ctx.phys_base);
let offsets = ctx.offsets;
let xa_head = ctx.mem.read_u64(idr_pa, offsets.idr_xa_head);
if xa_head == 0 {
return Vec::new();
}
let idr_next = ctx.mem.read_u32(idr_pa, offsets.idr_next).min(65536);
let mut maps = Vec::new();
for id in 0..idr_next {
let Some(entry) = xa_load(
ctx.mem,
ctx.page_offset.0,
xa_head,
id as u64,
offsets.xa_node_slots,
offsets.xa_node_shift,
) else {
continue;
};
if entry == 0 {
continue;
}
let Some(map_pa) = translate_any_kva(
ctx.mem,
ctx.cr3_pa.0,
ctx.page_offset.0,
entry,
ctx.l5,
ctx.tcr_el1,
) else {
continue;
};
let meta = MapMetadata::read(ctx.mem, map_pa, offsets);
let map_type = meta.u32_at(offsets.map_type);
let map_flags = meta.u32_at(offsets.map_flags);
let key_size = meta.u32_at(offsets.key_size);
let value_size = meta.u32_at(offsets.value_size);
let max_entries = meta.u32_at(offsets.max_entries);
let mut name_bytes = [0u8; BPF_OBJ_NAME_LEN];
name_bytes.copy_from_slice(&meta.name_bytes(offsets.map_name));
let name_len = name_bytes
.iter()
.position(|&b| b == 0)
.unwrap_or(BPF_OBJ_NAME_LEN) as u8;
let value_kva = match map_type {
BPF_MAP_TYPE_ARRAY => Some(entry + offsets.array_value as u64),
BPF_MAP_TYPE_STRUCT_OPS => offsets
.struct_ops_offsets
.as_ref()
.map(|so| entry + so.kvalue as u64 + so.value_data as u64),
_ => None,
};
let btf_kva = meta.u64_at(offsets.map_btf);
let btf_value_type_id = meta.u32_at(offsets.map_btf_value_type_id);
let btf_vmlinux_value_type_id = if offsets.map_btf_vmlinux_value_type_id != 0 {
meta.u32_at(offsets.map_btf_vmlinux_value_type_id)
} else {
0
};
let btf_key_type_id = meta.u32_at(offsets.map_btf_key_type_id);
maps.push(BpfMapInfo {
map_pa,
map_kva: entry,
name_bytes,
name_len,
map_type,
map_flags,
key_size,
value_size,
max_entries,
value_kva,
btf_kva,
btf_value_type_id,
btf_vmlinux_value_type_id,
btf_key_type_id,
});
}
maps
}
#[allow(dead_code)]
pub(crate) fn find_bpf_map(
ctx: &AccessorCtx<'_>,
map_idr_kva: u64,
name_suffix: &str,
) -> Option<BpfMapInfo> {
let idr_pa = text_kva_to_pa_with_base(map_idr_kva, ctx.start_kernel_map, ctx.phys_base);
let offsets = ctx.offsets;
let xa_head = ctx.mem.read_u64(idr_pa, offsets.idr_xa_head);
if xa_head == 0 {
return None;
}
let idr_next = ctx.mem.read_u32(idr_pa, offsets.idr_next).min(65536);
for id in 0..idr_next {
let Some(entry) = xa_load(
ctx.mem,
ctx.page_offset.0,
xa_head,
id as u64,
offsets.xa_node_slots,
offsets.xa_node_shift,
) else {
continue;
};
if entry == 0 {
continue;
}
let Some(map_pa) = translate_any_kva(
ctx.mem,
ctx.cr3_pa.0,
ctx.page_offset.0,
entry,
ctx.l5,
ctx.tcr_el1,
) else {
continue;
};
let map_type = ctx.mem.read_u32(map_pa, offsets.map_type);
if map_type != BPF_MAP_TYPE_ARRAY {
continue;
}
let mut name_buf = [0u8; BPF_OBJ_NAME_LEN];
ctx.mem
.read_bytes(map_pa + offsets.map_name as u64, &mut name_buf);
let name_len = name_buf
.iter()
.position(|&b| b == 0)
.unwrap_or(BPF_OBJ_NAME_LEN);
let name_str = std::str::from_utf8(&name_buf[..name_len]).unwrap_or("");
if !name_str.ends_with(name_suffix) {
continue;
}
let meta = MapMetadata::read(ctx.mem, map_pa, offsets);
let name_bytes = name_buf;
let value_kva = Some(entry + offsets.array_value as u64);
let btf_vmlinux_value_type_id = if offsets.map_btf_vmlinux_value_type_id != 0 {
meta.u32_at(offsets.map_btf_vmlinux_value_type_id)
} else {
0
};
return Some(BpfMapInfo {
map_pa,
map_kva: entry,
name_bytes,
name_len: name_len as u8,
map_type,
map_flags: meta.u32_at(offsets.map_flags),
key_size: meta.u32_at(offsets.key_size),
value_size: meta.u32_at(offsets.value_size),
max_entries: meta.u32_at(offsets.max_entries),
value_kva,
btf_kva: meta.u64_at(offsets.map_btf),
btf_value_type_id: meta.u32_at(offsets.map_btf_value_type_id),
btf_vmlinux_value_type_id,
btf_key_type_id: meta.u32_at(offsets.map_btf_key_type_id),
});
}
None
}
const BPF_MAP_PAGE_CHUNK: u64 = 4096;
const MAX_VALUE_SIZE: usize = 16 * 1024 * 1024;
fn chunked_kva_io<F>(ctx: &AccessorCtx<'_>, target_kva: u64, len: usize, mut chunk_fn: F) -> bool
where
F: FnMut(u64, u64, usize),
{
let mut consumed: u64 = 0;
let total = len as u64;
while consumed < total {
let kva = target_kva + consumed;
let Some(pa) = ctx
.mem
.translate_kva(ctx.cr3_pa.0, Kva(kva), ctx.l5, ctx.tcr_el1)
else {
return false;
};
let page_end = (kva & !(BPF_MAP_PAGE_CHUNK - 1)) + BPF_MAP_PAGE_CHUNK;
let chunk_len = (page_end - kva).min(total - consumed) as usize;
chunk_fn(pa, consumed, chunk_len);
consumed += chunk_len as u64;
}
true
}
pub(crate) fn write_bpf_map_value(
ctx: &AccessorCtx<'_>,
map_info: &BpfMapInfo,
offset: usize,
data: &[u8],
) -> bool {
let Some(base_kva) = map_info.value_kva else {
return false;
};
let Some(end) = offset.checked_add(data.len()) else {
return false;
};
if end > map_info.value_size as usize {
return false;
}
let target_kva = base_kva + offset as u64;
let mut bytes_written: usize = 0;
let walked = chunked_kva_io(ctx, target_kva, data.len(), |pa, src_off, chunk_len| {
let src_off = src_off as usize;
let n = ctx
.mem
.write_bytes_at(pa, 0, &data[src_off..src_off + chunk_len]);
bytes_written = bytes_written.saturating_add(n);
});
walked && bytes_written == data.len()
}
pub(crate) fn write_bpf_map_value_u32(
ctx: &AccessorCtx<'_>,
map_info: &BpfMapInfo,
offset: usize,
val: u32,
) -> bool {
write_bpf_map_value(ctx, map_info, offset, &val.to_ne_bytes())
}
pub(crate) fn read_bpf_map_value(
ctx: &AccessorCtx<'_>,
map_info: &BpfMapInfo,
offset: usize,
len: usize,
) -> Option<Vec<u8>> {
let base_kva = map_info.value_kva?;
let end = offset.checked_add(len)?;
if end > map_info.value_size as usize {
return None;
}
if len > MAX_VALUE_SIZE {
return None;
}
let target_kva = base_kva + offset as u64;
let mut buf: Vec<u8> = Vec::with_capacity(len);
let buf_ptr = buf.as_mut_ptr();
let mut bytes_filled: usize = 0;
let ok = chunked_kva_io(ctx, target_kva, len, |pa, dst_off, chunk_len| {
let slice =
unsafe { std::slice::from_raw_parts_mut(buf_ptr.add(dst_off as usize), chunk_len) };
let n = ctx.mem.read_bytes(pa, slice);
bytes_filled = bytes_filled.saturating_add(n);
});
if !ok || bytes_filled != len {
return None;
}
unsafe {
buf.set_len(len);
}
Some(buf)
}
pub(crate) fn read_bpf_map_value_u32(
ctx: &AccessorCtx<'_>,
map_info: &BpfMapInfo,
offset: usize,
) -> Option<u32> {
let bytes = read_bpf_map_value(ctx, map_info, offset, 4)?;
Some(u32::from_ne_bytes(bytes.try_into().unwrap()))
}
fn read_percpu_array_value(
ctx: &AccessorCtx<'_>,
map: &BpfMapInfo,
key: u32,
per_cpu_offsets: &[u64],
) -> Vec<Option<Vec<u8>>> {
if map.map_type != BPF_MAP_TYPE_PERCPU_ARRAY {
return Vec::new();
}
if key >= map.max_entries {
return Vec::new();
}
let pptrs_kva = map.map_kva + ctx.offsets.array_value as u64;
let pptr_kva = pptrs_kva + (key as u64) * 8;
let Some(pptr_pa) = translate_any_kva(
ctx.mem,
ctx.cr3_pa.0,
ctx.page_offset.0,
pptr_kva,
ctx.l5,
ctx.tcr_el1,
) else {
return Vec::new();
};
let percpu_base = ctx.mem.read_u64(pptr_pa, 0);
if percpu_base == 0 {
return Vec::new();
}
let value_size = map.value_size as usize;
let mut result = Vec::with_capacity(per_cpu_offsets.len());
for (cpu_index, &cpu_off) in per_cpu_offsets.iter().enumerate() {
if cpu_off == 0 && cpu_index > 0 {
result.push(None);
continue;
}
let cpu_kva = percpu_base.wrapping_add(cpu_off);
match translate_any_kva(
ctx.mem,
ctx.cr3_pa.0,
ctx.page_offset.0,
cpu_kva,
ctx.l5,
ctx.tcr_el1,
) {
Some(cpu_pa)
if cpu_pa
.checked_add(value_size as u64)
.is_some_and(|end| end <= ctx.mem.size()) =>
{
let mut buf: Vec<u8> = Vec::with_capacity(value_size);
let slice = unsafe { std::slice::from_raw_parts_mut(buf.as_mut_ptr(), value_size) };
let n = ctx.mem.read_bytes(cpu_pa, slice);
if n == value_size {
unsafe {
buf.set_len(value_size);
}
result.push(Some(buf));
} else {
result.push(None);
}
}
_ => result.push(None),
}
}
result
}
pub(crate) fn resolve_to_struct(btf: &btf_rs::Btf, type_id: u32) -> Option<btf_rs::Struct> {
resolve_to_struct_with_id(btf, type_id).map(|(s, _)| s)
}
pub(crate) fn resolve_to_struct_id(btf: &btf_rs::Btf, type_id: u32) -> Option<u32> {
resolve_to_struct_with_id(btf, type_id).map(|(_, tid)| tid)
}
fn resolve_to_struct_with_id(btf: &btf_rs::Btf, type_id: u32) -> Option<(btf_rs::Struct, u32)> {
let mut tid = type_id;
for _ in 0..32 {
let t = btf.resolve_type_by_id(tid).ok()?;
match t {
btf_rs::Type::Struct(s) | btf_rs::Type::Union(s) => return Some((s, tid)),
btf_rs::Type::Ptr(_)
| btf_rs::Type::Volatile(_)
| btf_rs::Type::Const(_)
| btf_rs::Type::Typedef(_)
| btf_rs::Type::TypeTag(_)
| btf_rs::Type::Restrict(_)
| btf_rs::Type::DeclTag(_) => {
tid = t.as_btf_type()?.get_type_id().ok()?;
}
_ => return None,
}
}
None
}
#[allow(dead_code)]
pub trait BpfMapAccessor {
fn maps(&self) -> Vec<BpfMapInfo>;
fn find_map(&self, name_suffix: &str) -> Option<BpfMapInfo> {
self.maps()
.into_iter()
.find(|m| m.name().ends_with(name_suffix))
}
fn read_value(&self, map: &BpfMapInfo, offset: usize, len: usize) -> Option<Vec<u8>>;
fn iter_hash_map(&self, map: &BpfMapInfo) -> Vec<(Vec<u8>, Vec<u8>)>;
fn iter_percpu_hash_map(&self, _map: &BpfMapInfo, _num_cpus: u32) -> PerCpuHashEntries {
Vec::new()
}
fn iter_task_storage(&self, _map: &BpfMapInfo) -> Vec<(Vec<u8>, Vec<u8>)> {
Vec::new()
}
fn read_percpu_array(&self, map: &BpfMapInfo, key: u32, num_cpus: u32) -> Vec<Option<Vec<u8>>>;
fn read_arena_pages(
&self,
_map: &BpfMapInfo,
_arena_offsets: &super::arena::BpfArenaOffsets,
) -> super::arena::ArenaSnapshot {
super::arena::ArenaSnapshot::default()
}
fn load_program_btf(&self, _map: &BpfMapInfo, _base_btf: &btf_rs::Btf) -> Option<btf_rs::Btf> {
None
}
}
pub struct GuestMemMapAccessor<'a> {
kernel: &'a super::guest::GuestKernel,
map_idr_kva: u64,
offsets: &'a BpfMapOffsets,
per_cpu_offsets_cache: Option<&'a PerCpuOffsetsCache>,
maps_cache: std::sync::Mutex<Option<std::sync::Arc<Vec<BpfMapInfo>>>>,
}
#[allow(dead_code)]
pub(crate) struct PerCpuOffsetsCache {
inner: std::sync::Mutex<Option<(u32, std::sync::Arc<Vec<u64>>)>>,
}
#[allow(dead_code)]
impl PerCpuOffsetsCache {
pub(crate) fn new() -> Self {
Self {
inner: std::sync::Mutex::new(None),
}
}
pub(crate) fn get_or_init<F>(&self, num_cpus: u32, init: F) -> std::sync::Arc<Vec<u64>>
where
F: FnOnce() -> Vec<u64>,
{
let mut guard = self.inner.lock().unwrap_or_else(|e| e.into_inner());
if let Some((cached_n, cached)) = guard.as_ref()
&& *cached_n == num_cpus
{
return cached.clone();
}
let arc = std::sync::Arc::new(init());
*guard = Some((num_cpus, arc.clone()));
arc
}
}
#[allow(dead_code)]
impl<'a> GuestMemMapAccessor<'a> {
pub fn from_guest_kernel(
kernel: &'a super::guest::GuestKernel,
offsets: &'a BpfMapOffsets,
) -> anyhow::Result<Self> {
let map_idr_kva = kernel
.symbol_kva("map_idr")
.ok_or_else(|| anyhow::anyhow!("map_idr symbol not found in vmlinux"))?;
Ok(Self {
kernel,
map_idr_kva,
offsets,
per_cpu_offsets_cache: None,
maps_cache: std::sync::Mutex::new(None),
})
}
#[cfg(test)]
pub(crate) fn new_for_test(
kernel: &'a super::guest::GuestKernel,
offsets: &'a BpfMapOffsets,
map_idr_kva: u64,
) -> Self {
Self {
kernel,
map_idr_kva,
offsets,
per_cpu_offsets_cache: None,
maps_cache: std::sync::Mutex::new(None),
}
}
fn ctx(&self) -> AccessorCtx<'_> {
AccessorCtx {
mem: self.kernel.mem(),
cr3_pa: Cr3Pa(self.kernel.cr3_pa()),
page_offset: PageOffset(self.kernel.page_offset()),
offsets: self.offsets,
l5: self.kernel.l5(),
tcr_el1: self.kernel.tcr_el1(),
start_kernel_map: self.kernel.start_kernel_map(),
phys_base: self.kernel.phys_base(),
}
}
pub fn offsets(&self) -> &BpfMapOffsets {
self.offsets
}
pub fn kernel(&self) -> &'a super::guest::GuestKernel {
self.kernel
}
pub fn find_map(&self, name_suffix: &str) -> Option<BpfMapInfo> {
let mut guard = self.maps_cache.lock().unwrap_or_else(|e| e.into_inner());
if guard.is_none() {
*guard = Some(std::sync::Arc::new(find_all_bpf_maps(
&self.ctx(),
self.map_idr_kva,
)));
}
guard
.as_ref()
.unwrap()
.iter()
.find(|m| m.map_type == BPF_MAP_TYPE_ARRAY && m.name().ends_with(name_suffix))
.cloned()
}
pub fn write_value(&self, map: &BpfMapInfo, offset: usize, data: &[u8]) -> bool {
write_bpf_map_value(&self.ctx(), map, offset, data)
}
pub fn write_value_u32(&self, map: &BpfMapInfo, offset: usize, val: u32) -> bool {
write_bpf_map_value_u32(&self.ctx(), map, offset, val)
}
pub fn read_value_u32(&self, map: &BpfMapInfo, offset: usize) -> Option<u32> {
read_bpf_map_value_u32(&self.ctx(), map, offset)
}
pub(crate) fn resolve_per_cpu_offsets(
&self,
num_cpus: u32,
) -> Option<std::sync::Arc<Vec<u64>>> {
let pco_kva = self.kernel.symbol_kva("__per_cpu_offset")?;
let pco_pa = self.kernel.text_kva_to_pa(pco_kva);
let mem = self.kernel.mem();
match self.per_cpu_offsets_cache {
Some(cache) => Some(cache.get_or_init(num_cpus, || {
super::symbols::read_per_cpu_offsets(mem, pco_pa, num_cpus)
})),
None => Some(std::sync::Arc::new(super::symbols::read_per_cpu_offsets(
mem, pco_pa, num_cpus,
))),
}
}
}
impl BpfMapAccessor for GuestMemMapAccessor<'_> {
fn maps(&self) -> Vec<BpfMapInfo> {
let mut guard = self.maps_cache.lock().unwrap_or_else(|e| e.into_inner());
if let Some(cached) = guard.as_ref() {
return (**cached).clone();
}
let maps = find_all_bpf_maps(&self.ctx(), self.map_idr_kva);
let arc = std::sync::Arc::new(maps);
let out = (*arc).clone();
*guard = Some(arc);
out
}
fn find_map(&self, name_suffix: &str) -> Option<BpfMapInfo> {
let mut guard = self.maps_cache.lock().unwrap_or_else(|e| e.into_inner());
if guard.is_none() {
*guard = Some(std::sync::Arc::new(find_all_bpf_maps(
&self.ctx(),
self.map_idr_kva,
)));
}
guard
.as_ref()
.unwrap()
.iter()
.find(|m| m.name().ends_with(name_suffix))
.cloned()
}
fn read_value(&self, map: &BpfMapInfo, offset: usize, len: usize) -> Option<Vec<u8>> {
read_bpf_map_value(&self.ctx(), map, offset, len)
}
fn iter_hash_map(&self, map: &BpfMapInfo) -> Vec<(Vec<u8>, Vec<u8>)> {
iter_htab_entries(&self.ctx(), map)
}
fn read_percpu_array(&self, map: &BpfMapInfo, key: u32, num_cpus: u32) -> Vec<Option<Vec<u8>>> {
let Some(per_cpu_offsets) = self.resolve_per_cpu_offsets(num_cpus) else {
return Vec::new();
};
read_percpu_array_value(&self.ctx(), map, key, per_cpu_offsets.as_slice())
}
fn iter_percpu_hash_map(&self, map: &BpfMapInfo, num_cpus: u32) -> PerCpuHashEntries {
let Some(per_cpu_offsets) = self.resolve_per_cpu_offsets(num_cpus) else {
return Vec::new();
};
iter_percpu_htab_entries(&self.ctx(), map, per_cpu_offsets.as_slice())
}
fn read_arena_pages(
&self,
map: &BpfMapInfo,
arena_offsets: &super::arena::BpfArenaOffsets,
) -> super::arena::ArenaSnapshot {
super::arena::snapshot_arena(self.kernel, map, arena_offsets)
}
fn iter_task_storage(&self, map: &BpfMapInfo) -> Vec<(Vec<u8>, Vec<u8>)> {
iter_local_storage_entries(&self.ctx(), map)
}
fn load_program_btf(&self, map: &BpfMapInfo, base_btf: &btf_rs::Btf) -> Option<btf_rs::Btf> {
if map.btf_kva == 0 {
return None;
}
super::dump::load_program_btf_kva(self, map.btf_kva, base_btf)
}
}
pub struct GuestMemMapAccessorOwned {
kernel: super::guest::GuestKernel,
map_idr_kva: u64,
offsets: BpfMapOffsets,
per_cpu_offsets_cache: PerCpuOffsetsCache,
}
#[allow(dead_code)]
impl GuestMemMapAccessorOwned {
pub fn new(
mem: std::sync::Arc<GuestMem>,
vmlinux: &std::path::Path,
tcr_el1: u64,
cr3_pa: u64,
) -> anyhow::Result<Self> {
let data = std::fs::read(vmlinux)
.with_context(|| format!("read vmlinux: {}", vmlinux.display()))?;
let elf = goblin::elf::Elf::parse(&data).context("parse vmlinux ELF")?;
let kernel = super::guest::GuestKernel::from_elf(mem, &elf, tcr_el1, cr3_pa)?;
let offsets = BpfMapOffsets::from_elf(&elf, &data, vmlinux)?;
let map_idr_kva = kernel
.symbol_kva("map_idr")
.ok_or_else(|| anyhow::anyhow!("map_idr symbol not found in vmlinux"))?;
Ok(Self {
kernel,
map_idr_kva,
offsets,
per_cpu_offsets_cache: PerCpuOffsetsCache::new(),
})
}
pub fn from_elf(
mem: std::sync::Arc<GuestMem>,
elf: &goblin::elf::Elf<'_>,
data: &[u8],
vmlinux: &std::path::Path,
tcr_el1: u64,
cr3_pa: u64,
) -> anyhow::Result<Self> {
Self::from_elf_inner(mem, elf, data, vmlinux, tcr_el1, cr3_pa, 0)
}
pub fn from_elf_with_hint(
mem: std::sync::Arc<GuestMem>,
elf: &goblin::elf::Elf<'_>,
data: &[u8],
vmlinux: &std::path::Path,
tcr_el1: u64,
cr3_pa: u64,
phys_base_hint: u64,
) -> anyhow::Result<Self> {
Self::from_elf_inner(mem, elf, data, vmlinux, tcr_el1, cr3_pa, phys_base_hint)
}
fn from_elf_inner(
mem: std::sync::Arc<GuestMem>,
elf: &goblin::elf::Elf<'_>,
data: &[u8],
vmlinux: &std::path::Path,
tcr_el1: u64,
cr3_pa: u64,
phys_base_hint: u64,
) -> anyhow::Result<Self> {
let kernel = super::guest::GuestKernel::from_elf_with_hint(
mem,
elf,
tcr_el1,
cr3_pa,
phys_base_hint,
)?;
let offsets = BpfMapOffsets::from_elf(elf, data, vmlinux)?;
let map_idr_kva = kernel
.symbol_kva("map_idr")
.ok_or_else(|| anyhow::anyhow!("map_idr symbol not found in vmlinux"))?;
Ok(Self {
kernel,
map_idr_kva,
offsets,
per_cpu_offsets_cache: PerCpuOffsetsCache::new(),
})
}
pub fn as_accessor(&self) -> GuestMemMapAccessor<'_> {
GuestMemMapAccessor {
kernel: &self.kernel,
map_idr_kva: self.map_idr_kva,
offsets: &self.offsets,
per_cpu_offsets_cache: Some(&self.per_cpu_offsets_cache),
maps_cache: std::sync::Mutex::new(None),
}
}
pub fn guest_kernel(&self) -> &super::guest::GuestKernel {
&self.kernel
}
}