use std::{
borrow::Borrow,
ffi::CString,
io,
marker::PhantomData,
ops::Deref,
os::fd::{AsFd, BorrowedFd, OwnedFd},
path::Path,
ptr,
};
use aya_obj::{EbpfSectionKind, InvalidTypeBinding, generated::bpf_map_type, parse_map_info};
use thiserror::Error;
use crate::{
PinningType, Pod,
pin::PinError,
sys::{
SyscallError, bpf_create_map, bpf_get_object, bpf_map_freeze, bpf_map_get_fd_by_id,
bpf_map_get_next_key, bpf_map_update_elem_ptr, bpf_pin_object,
},
util::nr_cpus,
};
pub mod array;
pub mod bloom_filter;
pub mod hash_map;
mod info;
pub mod lpm_trie;
pub mod of_maps;
pub mod perf;
pub mod queue;
pub mod ring_buf;
pub mod sk_storage;
pub mod sock;
pub mod stack;
pub mod stack_trace;
pub mod xdp;
pub use array::{Array, PerCpuArray, ProgramArray};
pub use bloom_filter::BloomFilter;
pub use hash_map::{HashMap, PerCpuHashMap};
pub use info::{MapInfo, MapType, loaded_maps};
pub use lpm_trie::LpmTrie;
pub use of_maps::{ArrayOfMaps, HashOfMaps};
pub use perf::PerfEventArray;
pub use queue::Queue;
pub use ring_buf::RingBuf;
pub use sk_storage::SkStorage;
pub use sock::{SockHash, SockMap};
pub use stack::Stack;
pub use stack_trace::StackTraceMap;
pub use xdp::{CpuMap, DevMap, DevMapHash, XskMap};
pub trait FromMapData: Sized + sealed::FromMapData {}
impl<T: sealed::FromMapData> FromMapData for T {}
pub trait InnerMap: sealed::InnerMap {}
impl<T: sealed::InnerMap> InnerMap for T {}
pub trait CreatableMap: sealed::CreatableMap {
fn create(max_entries: u32, flags: u32) -> Result<Self, MapError> {
<Self as sealed::CreatableMap>::create(max_entries, flags)
}
}
impl<T: sealed::CreatableMap> CreatableMap for T {}
mod sealed {
use super::{MapData, MapError, MapFd};
#[expect(unnameable_types, reason = "intentionally unnameable sealed trait")]
pub trait FromMapData: Sized {
fn from_map_data(map_data: MapData) -> Result<Self, MapError>;
}
#[expect(unnameable_types, reason = "intentionally unnameable sealed trait")]
pub trait InnerMap {
fn fd(&self) -> &MapFd;
}
#[expect(unnameable_types, reason = "intentionally unnameable sealed trait")]
pub trait CreatableMap: Sized {
fn create(max_entries: u32, flags: u32) -> Result<Self, MapError>;
}
}
#[derive(Error, Debug)]
pub enum MapError {
#[error(
"map `{outer_name}` is a map-of-maps but has no inner map definition; \
use #[btf_map] with a BTF-typed map-of-maps that includes an inner map type"
)]
MissingInnerMapDefinition {
outer_name: String,
},
#[error("invalid map type {map_type}")]
InvalidMapType {
map_type: u32,
},
#[error("invalid map name `{name}`")]
InvalidName {
name: String,
},
#[error("failed to create map `{name}`")]
CreateError {
name: String,
#[source]
io_error: io::Error,
},
#[error("invalid key size {size}, expected {expected}")]
InvalidKeySize {
size: usize,
expected: usize,
},
#[error("invalid value size {size}, expected {expected}")]
InvalidValueSize {
size: usize,
expected: usize,
},
#[error("the index is {index} but `max_entries` is {max_entries}")]
OutOfBounds {
index: u32,
max_entries: u32,
},
#[error("key not found")]
KeyNotFound,
#[error("element not found")]
ElementNotFound,
#[error("the program is not loaded")]
ProgramNotLoaded,
#[error(transparent)]
IoError(#[from] io::Error),
#[error(transparent)]
SyscallError(#[from] SyscallError),
#[error("map `{name:?}` requested pinning. pinning failed")]
PinError {
name: Option<String>,
#[source]
error: PinError,
},
#[error("program ids are not supported by the current kernel")]
ProgIdNotSupported,
#[error(
"type of {name} ({map_type:?}) is unsupported; see `EbpfLoader::allow_unsupported_maps`"
)]
Unsupported {
name: String,
map_type: bpf_map_type,
},
}
impl From<InvalidTypeBinding<u32>> for MapError {
fn from(e: InvalidTypeBinding<u32>) -> Self {
let InvalidTypeBinding { value } = e;
Self::InvalidMapType { map_type: value }
}
}
#[derive(Debug)]
pub struct MapFd {
fd: crate::MockableFd,
}
impl MapFd {
const fn from_fd(fd: crate::MockableFd) -> Self {
Self { fd }
}
pub fn try_clone(&self) -> io::Result<Self> {
let Self { fd } = self;
let fd = fd.try_clone()?;
Ok(Self { fd })
}
}
impl AsFd for MapFd {
fn as_fd(&self) -> BorrowedFd<'_> {
let Self { fd } = self;
fd.as_fd()
}
}
#[derive(Debug)]
pub enum Map {
Array(MapData),
ArrayOfMaps(MapData),
BloomFilter(MapData),
CpuMap(MapData),
DevMap(MapData),
DevMapHash(MapData),
HashMap(MapData),
HashOfMaps(MapData),
LpmTrie(MapData),
LruHashMap(MapData),
PerCpuArray(MapData),
PerCpuHashMap(MapData),
PerCpuLruHashMap(MapData),
PerfEventArray(MapData),
ProgramArray(MapData),
Queue(MapData),
RingBuf(MapData),
SockHash(MapData),
SockMap(MapData),
SkStorage(MapData),
Stack(MapData),
StackTraceMap(MapData),
Unsupported(MapData),
XskMap(MapData),
}
impl Map {
const fn map_type(&self) -> u32 {
match self {
Self::Array(map) => map.obj.map_type(),
Self::ArrayOfMaps(map) => map.obj.map_type(),
Self::BloomFilter(map) => map.obj.map_type(),
Self::CpuMap(map) => map.obj.map_type(),
Self::DevMap(map) => map.obj.map_type(),
Self::DevMapHash(map) => map.obj.map_type(),
Self::HashMap(map) => map.obj.map_type(),
Self::HashOfMaps(map) => map.obj.map_type(),
Self::LpmTrie(map) => map.obj.map_type(),
Self::LruHashMap(map) => map.obj.map_type(),
Self::PerCpuArray(map) => map.obj.map_type(),
Self::PerCpuHashMap(map) => map.obj.map_type(),
Self::PerCpuLruHashMap(map) => map.obj.map_type(),
Self::PerfEventArray(map) => map.obj.map_type(),
Self::ProgramArray(map) => map.obj.map_type(),
Self::Queue(map) => map.obj.map_type(),
Self::RingBuf(map) => map.obj.map_type(),
Self::SockHash(map) => map.obj.map_type(),
Self::SockMap(map) => map.obj.map_type(),
Self::SkStorage(map) => map.obj.map_type(),
Self::Stack(map) => map.obj.map_type(),
Self::StackTraceMap(map) => map.obj.map_type(),
Self::Unsupported(map) => map.obj.map_type(),
Self::XskMap(map) => map.obj.map_type(),
}
}
pub fn pin<P: AsRef<Path>>(&self, path: P) -> Result<(), PinError> {
match self {
Self::Array(map) => map.pin(path),
Self::ArrayOfMaps(map) => map.pin(path),
Self::BloomFilter(map) => map.pin(path),
Self::CpuMap(map) => map.pin(path),
Self::DevMap(map) => map.pin(path),
Self::DevMapHash(map) => map.pin(path),
Self::HashMap(map) => map.pin(path),
Self::HashOfMaps(map) => map.pin(path),
Self::LpmTrie(map) => map.pin(path),
Self::LruHashMap(map) => map.pin(path),
Self::PerCpuArray(map) => map.pin(path),
Self::PerCpuHashMap(map) => map.pin(path),
Self::PerCpuLruHashMap(map) => map.pin(path),
Self::PerfEventArray(map) => map.pin(path),
Self::ProgramArray(map) => map.pin(path),
Self::Queue(map) => map.pin(path),
Self::RingBuf(map) => map.pin(path),
Self::SockHash(map) => map.pin(path),
Self::SockMap(map) => map.pin(path),
Self::SkStorage(map) => map.pin(path),
Self::Stack(map) => map.pin(path),
Self::StackTraceMap(map) => map.pin(path),
Self::Unsupported(map) => map.pin(path),
Self::XskMap(map) => map.pin(path),
}
}
pub fn from_map_data(map_data: MapData) -> Result<Self, MapError> {
let map_type = map_data.obj.map_type();
let map = match bpf_map_type::try_from(map_type)? {
bpf_map_type::BPF_MAP_TYPE_HASH => Self::HashMap(map_data),
bpf_map_type::BPF_MAP_TYPE_ARRAY => Self::Array(map_data),
bpf_map_type::BPF_MAP_TYPE_PROG_ARRAY => Self::ProgramArray(map_data),
bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY => Self::PerfEventArray(map_data),
bpf_map_type::BPF_MAP_TYPE_PERCPU_HASH => Self::PerCpuHashMap(map_data),
bpf_map_type::BPF_MAP_TYPE_PERCPU_ARRAY => Self::PerCpuArray(map_data),
bpf_map_type::BPF_MAP_TYPE_STACK_TRACE => Self::StackTraceMap(map_data),
bpf_map_type::BPF_MAP_TYPE_LRU_HASH => Self::LruHashMap(map_data),
bpf_map_type::BPF_MAP_TYPE_LRU_PERCPU_HASH => Self::PerCpuLruHashMap(map_data),
bpf_map_type::BPF_MAP_TYPE_LPM_TRIE => Self::LpmTrie(map_data),
bpf_map_type::BPF_MAP_TYPE_DEVMAP => Self::DevMap(map_data),
bpf_map_type::BPF_MAP_TYPE_SOCKMAP => Self::SockMap(map_data),
bpf_map_type::BPF_MAP_TYPE_CPUMAP => Self::CpuMap(map_data),
bpf_map_type::BPF_MAP_TYPE_XSKMAP => Self::XskMap(map_data),
bpf_map_type::BPF_MAP_TYPE_SOCKHASH => Self::SockHash(map_data),
bpf_map_type::BPF_MAP_TYPE_QUEUE => Self::Queue(map_data),
bpf_map_type::BPF_MAP_TYPE_STACK => Self::Stack(map_data),
bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH => Self::DevMapHash(map_data),
bpf_map_type::BPF_MAP_TYPE_RINGBUF => Self::RingBuf(map_data),
bpf_map_type::BPF_MAP_TYPE_BLOOM_FILTER => Self::BloomFilter(map_data),
bpf_map_type::BPF_MAP_TYPE_CGROUP_ARRAY => Self::Unsupported(map_data),
bpf_map_type::BPF_MAP_TYPE_ARRAY_OF_MAPS => Self::ArrayOfMaps(map_data),
bpf_map_type::BPF_MAP_TYPE_HASH_OF_MAPS => Self::HashOfMaps(map_data),
bpf_map_type::BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED => Self::Unsupported(map_data),
bpf_map_type::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY => Self::Unsupported(map_data),
bpf_map_type::BPF_MAP_TYPE_SK_STORAGE => Self::SkStorage(map_data),
bpf_map_type::BPF_MAP_TYPE_STRUCT_OPS => Self::Unsupported(map_data),
bpf_map_type::BPF_MAP_TYPE_INODE_STORAGE => Self::Unsupported(map_data),
bpf_map_type::BPF_MAP_TYPE_TASK_STORAGE => Self::Unsupported(map_data),
bpf_map_type::BPF_MAP_TYPE_USER_RINGBUF => Self::Unsupported(map_data),
bpf_map_type::BPF_MAP_TYPE_CGRP_STORAGE => Self::Unsupported(map_data),
bpf_map_type::BPF_MAP_TYPE_ARENA => Self::Unsupported(map_data),
bpf_map_type::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED => {
Self::Unsupported(map_data)
}
bpf_map_type::BPF_MAP_TYPE_UNSPEC => return Err(MapError::InvalidMapType { map_type }),
bpf_map_type::__MAX_BPF_MAP_TYPE => return Err(MapError::InvalidMapType { map_type }),
};
Ok(map)
}
pub const fn fd(&self) -> &MapFd {
match self {
Self::Array(map) => map.fd(),
Self::ArrayOfMaps(map) => map.fd(),
Self::BloomFilter(map) => map.fd(),
Self::CpuMap(map) => map.fd(),
Self::DevMap(map) => map.fd(),
Self::DevMapHash(map) => map.fd(),
Self::HashMap(map) => map.fd(),
Self::HashOfMaps(map) => map.fd(),
Self::LpmTrie(map) => map.fd(),
Self::LruHashMap(map) => map.fd(),
Self::PerCpuArray(map) => map.fd(),
Self::PerCpuHashMap(map) => map.fd(),
Self::PerCpuLruHashMap(map) => map.fd(),
Self::PerfEventArray(map) => map.fd(),
Self::ProgramArray(map) => map.fd(),
Self::Queue(map) => map.fd(),
Self::RingBuf(map) => map.fd(),
Self::SockHash(map) => map.fd(),
Self::SockMap(map) => map.fd(),
Self::SkStorage(map) => map.fd(),
Self::Stack(map) => map.fd(),
Self::StackTraceMap(map) => map.fd(),
Self::Unsupported(map) => map.fd(),
Self::XskMap(map) => map.fd(),
}
}
}
macro_rules! impl_map_pin {
($ty_param:tt {
$($ty:ident),+ $(,)?
}) => {
$(impl_map_pin!(<$ty_param> $ty);)+
};
(
<($($ty_param:ident),*)>
$ty:ident
) => {
impl<T: Borrow<MapData>, $($ty_param: Pod),*> $ty<T, $($ty_param),*>
{
pub fn pin<P: AsRef<Path>>(self, path: P) -> Result<(), PinError> {
let data = self.inner.borrow();
data.pin(path)
}
}
};
}
impl_map_pin!(() {
ProgramArray,
SockMap,
StackTraceMap,
CpuMap,
DevMap,
DevMapHash,
XskMap,
});
impl_map_pin!((V) {
Array,
PerCpuArray,
SockHash,
BloomFilter,
Queue,
SkStorage,
Stack,
});
impl_map_pin!((K, V) {
HashMap,
PerCpuHashMap,
LpmTrie,
});
macro_rules! impl_try_from_map {
($ty_param:tt {
$($(#[$meta:meta])* $ty:ident $(from $($variant:ident)|+)?),+ $(,)?
}) => {
$(impl_try_from_map!($(#[$meta])* <$ty_param> $ty $(from $($variant)|+)?);)+
};
($(#[$meta:meta])* <$ty_param:tt> $ty:ident) => {
impl_try_from_map!($(#[$meta])* <$ty_param> $ty from $ty);
};
(
$(#[$meta:meta])* <($($ty_param:ident),*)> $ty:ident from $($variant:ident)|+
) => {
impl_try_from_map!(@impl $(#[$meta])* <'a> ($($ty_param: Pod),*) $ty from $($variant)|+);
impl_try_from_map!(@impl $(#[$meta])* <'a mut> ($($ty_param: Pod),*) $ty from $($variant)|+);
impl_try_from_map!(@impl $(#[$meta])* <> ($($ty_param: Pod),*) $ty from $($variant)|+);
};
(@impl
$(#[$meta:meta])*
<$($l:lifetime $($m:ident)?)?>
($($ty_param:ident $(: $bound:path)?),*)
$ty:ident from $($variant:ident)|+
) => {
$(#[$meta])*
impl<$($l,)? $($ty_param $(: $bound)?),*> TryFrom<$(&$l $($m)?)? Map>
for $ty<$(&$l $($m)?)? MapData, $($ty_param),*>
{
type Error = MapError;
fn try_from(map: $(&$l $($m)?)? Map) -> Result<Self, Self::Error> {
match map {
$(Map::$variant(map_data) => Self::new(map_data),)+
map => Err(MapError::InvalidMapType {
map_type: map.map_type()
}),
}
}
}
};
}
impl_try_from_map!(() {
CpuMap,
DevMap,
DevMapHash,
PerfEventArray,
ProgramArray,
RingBuf,
SockMap,
StackTraceMap,
XskMap,
});
impl_try_from_map!((V) {
Array,
BloomFilter,
PerCpuArray,
Queue,
SockHash,
SkStorage,
Stack,
});
impl_try_from_map!((K, V) {
HashMap from HashMap|LruHashMap,
LpmTrie,
PerCpuHashMap from PerCpuHashMap|PerCpuLruHashMap,
});
macro_rules! impl_try_from_map_of_maps {
($ty:ident) => {
impl_try_from_map_of_maps!($ty <>);
};
($ty:ident <$($pre:ident : $pre_bound:path),*>) => {
impl_try_from_map!(@impl <'a> ($($pre: $pre_bound,)* V: InnerMap) $ty from $ty);
impl_try_from_map!(@impl <'a mut> ($($pre: $pre_bound,)* V: InnerMap) $ty from $ty);
impl_try_from_map!(@impl <> ($($pre: $pre_bound,)* V: InnerMap) $ty from $ty);
};
}
impl_try_from_map_of_maps!(ArrayOfMaps);
impl_try_from_map_of_maps!(HashOfMaps<K: Pod>);
macro_rules! impl_from_map_data {
($ty_param:tt { $($ty:ident),+ $(,)? }) => {
$(impl_from_map_data!(<$ty_param> $ty);)+
};
(<($($ty_param:ident),*)> $ty:ident via $accessor:ident) => {
impl<$($ty_param: Pod),*> sealed::FromMapData for $ty<MapData, $($ty_param),*> {
fn from_map_data(map_data: MapData) -> Result<Self, MapError> {
Self::new(map_data)
}
}
impl<$($ty_param: Pod),*> sealed::InnerMap for $ty<MapData, $($ty_param),*> {
fn fd(&self) -> &MapFd {
self.$accessor().fd()
}
}
};
(<($($ty_param:ident),*)> $ty:ident) => {
impl<$($ty_param: Pod),*> sealed::FromMapData for $ty<MapData, $($ty_param),*> {
fn from_map_data(map_data: MapData) -> Result<Self, MapError> {
Self::new(map_data)
}
}
impl<$($ty_param: Pod),*> sealed::InnerMap for $ty<MapData, $($ty_param),*> {
fn fd(&self) -> &MapFd {
self.inner.fd()
}
}
};
}
impl_from_map_data!(() {
CpuMap, DevMap, DevMapHash,
SockMap, StackTraceMap, XskMap,
});
impl_from_map_data!(<()> PerfEventArray via map_data);
impl_from_map_data!(<()> RingBuf via map_data);
impl_from_map_data!((V) {
Array, BloomFilter, PerCpuArray,
Queue, SockHash, SkStorage, Stack,
});
impl_from_map_data!((K, V) {
HashMap, LpmTrie, PerCpuHashMap,
});
impl sealed::FromMapData for MapData {
fn from_map_data(map_data: MapData) -> Result<Self, MapError> {
Ok(map_data)
}
}
impl sealed::InnerMap for MapData {
fn fd(&self) -> &MapFd {
self.fd()
}
}
impl sealed::InnerMap for MapFd {
fn fd(&self) -> &MapFd {
self
}
}
macro_rules! impl_creatable_map {
($ty:ident<MapData $(, $p:ident: Pod)*>, $map_type:expr, $key_size:expr, $value_size:expr, $name:expr) => {
impl<$($p: Pod),*> sealed::CreatableMap for $ty<MapData, $($p),*> {
fn create(max_entries: u32, flags: u32) -> Result<Self, MapError> {
let obj = aya_obj::Map::new_from_params(
$map_type as u32, $key_size, $value_size, max_entries, flags,
);
Self::new(MapData::create(obj, $name, None)?)
}
}
};
}
impl_creatable_map!(Array<MapData, V: Pod>,
bpf_map_type::BPF_MAP_TYPE_ARRAY, size_of::<u32>() as u32, size_of::<V>() as u32, "standalone_array");
impl_creatable_map!(PerCpuArray<MapData, V: Pod>,
bpf_map_type::BPF_MAP_TYPE_PERCPU_ARRAY, size_of::<u32>() as u32, size_of::<V>() as u32, "standalone_percpu_array");
impl_creatable_map!(BloomFilter<MapData, V: Pod>,
bpf_map_type::BPF_MAP_TYPE_BLOOM_FILTER, 0, size_of::<V>() as u32, "standalone_bloom_filter");
impl_creatable_map!(Queue<MapData, V: Pod>,
bpf_map_type::BPF_MAP_TYPE_QUEUE, 0, size_of::<V>() as u32, "standalone_queue");
impl_creatable_map!(Stack<MapData, V: Pod>,
bpf_map_type::BPF_MAP_TYPE_STACK, 0, size_of::<V>() as u32, "standalone_stack");
impl_creatable_map!(HashMap<MapData, K: Pod, V: Pod>,
bpf_map_type::BPF_MAP_TYPE_HASH, size_of::<K>() as u32, size_of::<V>() as u32, "standalone_hash");
impl_creatable_map!(PerCpuHashMap<MapData, K: Pod, V: Pod>,
bpf_map_type::BPF_MAP_TYPE_PERCPU_HASH, size_of::<K>() as u32, size_of::<V>() as u32, "standalone_percpu_hash");
impl_creatable_map!(LpmTrie<MapData, K: Pod, V: Pod>,
bpf_map_type::BPF_MAP_TYPE_LPM_TRIE, size_of::<lpm_trie::Key<K>>() as u32, size_of::<V>() as u32, "standalone_lpm_trie");
pub(crate) const fn check_bounds(map: &MapData, index: u32) -> Result<(), MapError> {
let max_entries = map.obj.max_entries();
if index >= max_entries {
Err(MapError::OutOfBounds { index, max_entries })
} else {
Ok(())
}
}
pub(crate) const fn check_kv_size<K, V>(map: &MapData) -> Result<(), MapError> {
let size = size_of::<K>();
let expected = map.obj.key_size() as usize;
if size != expected {
return Err(MapError::InvalidKeySize { size, expected });
}
let size = size_of::<V>();
let expected = map.obj.value_size() as usize;
if size != expected {
return Err(MapError::InvalidValueSize { size, expected });
}
Ok(())
}
pub(crate) const fn check_v_size<V>(map: &MapData) -> Result<(), MapError> {
let size = size_of::<V>();
let expected = map.obj.value_size() as usize;
if size != expected {
return Err(MapError::InvalidValueSize { size, expected });
}
Ok(())
}
#[derive(Debug)]
pub struct MapData {
obj: aya_obj::Map,
fd: MapFd,
}
impl MapData {
pub fn create(
obj: aya_obj::Map,
name: &str,
btf_fd: Option<BorrowedFd<'_>>,
) -> Result<Self, MapError> {
Self::create_with_inner_map_fd(obj, name, btf_fd, None)
}
pub(crate) fn create_with_inner_map_fd(
mut obj: aya_obj::Map,
name: &str,
btf_fd: Option<BorrowedFd<'_>>,
inner_map_fd: Option<BorrowedFd<'_>>,
) -> Result<Self, MapError> {
let c_name = CString::new(name)
.map_err(|std::ffi::NulError { .. }| MapError::InvalidName { name: name.into() })?;
if obj.map_type() == bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32 {
let nr_cpus = nr_cpus().map_err(|(_, error)| MapError::IoError(error))? as u32;
if obj.max_entries() == 0 || obj.max_entries() > nr_cpus {
obj.set_max_entries(nr_cpus);
}
}
let fd = bpf_create_map(&c_name, &obj, btf_fd, inner_map_fd).map_err(|io_error| {
MapError::CreateError {
name: name.into(),
io_error,
}
})?;
Ok(Self {
obj,
fd: MapFd::from_fd(fd),
})
}
pub(crate) fn create_pinned_by_name<P: AsRef<Path>>(
path: P,
obj: aya_obj::Map,
name: &str,
btf_fd: Option<BorrowedFd<'_>>,
inner_map_obj: Option<aya_obj::Map>,
) -> Result<Self, MapError> {
use std::os::unix::ffi::OsStrExt as _;
let path = path.as_ref();
let path_string = match CString::new(path.as_os_str().as_bytes()) {
Ok(path) => path,
Err(error) => {
return Err(MapError::PinError {
name: Some(name.into()),
error: PinError::InvalidPinPath {
path: path.to_path_buf(),
error,
},
});
}
};
if let Ok(fd) = bpf_get_object(&path_string) {
Ok(Self {
obj,
fd: MapFd::from_fd(fd),
})
} else {
let inner_map;
let inner_map_fd = if let Some(inner) = inner_map_obj {
inner_map = Self::create(inner, &format!("{name}.inner"), btf_fd)?;
Some(inner_map.fd().as_fd())
} else {
None
};
let map = Self::create_with_inner_map_fd(obj, name, btf_fd, inner_map_fd)?;
map.pin(path).map_err(|error| MapError::PinError {
name: Some(name.into()),
error,
})?;
Ok(map)
}
}
pub(crate) fn finalize(&mut self) -> Result<(), MapError> {
let Self { obj, fd } = self;
if !obj.data().is_empty() {
bpf_map_update_elem_ptr(fd.as_fd(), &0, obj.data_mut().as_mut_ptr(), 0)
.map_err(|io_error| SyscallError {
call: "bpf_map_update_elem",
io_error,
})
.map_err(MapError::from)?;
}
if obj.section_kind() == EbpfSectionKind::Rodata {
bpf_map_freeze(fd.as_fd())
.map_err(|io_error| SyscallError {
call: "bpf_map_freeze",
io_error,
})
.map_err(MapError::from)?;
}
Ok(())
}
pub fn from_pin<P: AsRef<Path>>(path: P) -> Result<Self, MapError> {
use std::os::unix::ffi::OsStrExt as _;
let path = path.as_ref();
let path_string =
CString::new(path.as_os_str().as_bytes()).map_err(|error| MapError::PinError {
name: None,
error: PinError::InvalidPinPath {
path: path.into(),
error,
},
})?;
let fd = bpf_get_object(&path_string).map_err(|io_error| SyscallError {
call: "BPF_OBJ_GET",
io_error,
})?;
Self::from_fd_inner(fd)
}
pub fn from_id(id: u32) -> Result<Self, MapError> {
let fd = bpf_map_get_fd_by_id(id)?;
Self::from_fd_inner(fd)
}
fn from_fd_inner(fd: crate::MockableFd) -> Result<Self, MapError> {
let MapInfo(info) = MapInfo::new_from_fd(fd.as_fd())?;
Ok(Self {
obj: parse_map_info(info, PinningType::None),
fd: MapFd::from_fd(fd),
})
}
pub fn from_fd(fd: OwnedFd) -> Result<Self, MapError> {
let fd = crate::MockableFd::from_fd(fd);
Self::from_fd_inner(fd)
}
pub fn pin<P: AsRef<Path>>(&self, path: P) -> Result<(), PinError> {
use std::os::unix::ffi::OsStrExt as _;
let Self { fd, obj: _ } = self;
let path = path.as_ref();
let path_string = CString::new(path.as_os_str().as_bytes()).map_err(|error| {
PinError::InvalidPinPath {
path: path.to_path_buf(),
error,
}
})?;
bpf_pin_object(fd.as_fd(), &path_string).map_err(|io_error| SyscallError {
call: "BPF_OBJ_PIN",
io_error,
})?;
Ok(())
}
pub const fn fd(&self) -> &MapFd {
let Self { obj: _, fd } = self;
fd
}
pub(crate) const fn obj(&self) -> &aya_obj::Map {
let Self { obj, fd: _ } = self;
obj
}
pub fn info(&self) -> Result<MapInfo, MapError> {
MapInfo::new_from_fd(self.fd.as_fd())
}
}
pub trait IterableMap<K: Pod, V> {
fn map(&self) -> &MapData;
fn get(&self, key: &K) -> Result<V, MapError>;
}
pub struct MapKeys<'coll, K: Pod> {
map: &'coll MapData,
err: bool,
key: Option<K>,
}
impl<'coll, K: Pod> MapKeys<'coll, K> {
const fn new(map: &'coll MapData) -> Self {
Self {
map,
err: false,
key: None,
}
}
}
impl<K: Pod> Iterator for MapKeys<'_, K> {
type Item = Result<K, MapError>;
fn next(&mut self) -> Option<Result<K, MapError>> {
if self.err {
return None;
}
let fd = self.map.fd().as_fd();
let key = bpf_map_get_next_key(fd, self.key.as_ref()).map_err(|io_error| SyscallError {
call: "bpf_map_get_next_key",
io_error,
});
match key {
Err(err) => {
self.err = true;
Some(Err(err.into()))
}
Ok(key) => {
self.key = key;
key.map(Ok)
}
}
}
}
pub struct MapIter<'coll, K: Pod, V, I: IterableMap<K, V>> {
keys: MapKeys<'coll, K>,
map: &'coll I,
_v: PhantomData<V>,
}
impl<'coll, K: Pod, V, I: IterableMap<K, V>> MapIter<'coll, K, V, I> {
fn new(map: &'coll I) -> Self {
Self {
keys: MapKeys::new(map.map()),
map,
_v: PhantomData,
}
}
}
impl<K: Pod, V, I: IterableMap<K, V>> Iterator for MapIter<'_, K, V, I> {
type Item = Result<(K, V), MapError>;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.keys.next() {
Some(Ok(key)) => match self.map.get(&key) {
Ok(value) => return Some(Ok((key, value))),
Err(MapError::KeyNotFound) => {}
Err(e) => return Some(Err(e)),
},
Some(Err(e)) => return Some(Err(e)),
None => return None,
}
}
}
}
pub(crate) struct PerCpuKernelMem {
bytes: Vec<u8>,
}
impl PerCpuKernelMem {
pub(crate) const fn as_mut_ptr(&mut self) -> *mut u8 {
self.bytes.as_mut_ptr()
}
}
#[derive(Debug)]
pub struct PerCpuValues<T: Pod> {
values: Box<[T]>,
}
impl<T: Pod> TryFrom<Vec<T>> for PerCpuValues<T> {
type Error = io::Error;
fn try_from(values: Vec<T>) -> Result<Self, Self::Error> {
let nr_cpus = nr_cpus().map_err(|(_, error)| error)?;
if values.len() != nr_cpus {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("not enough values ({}), nr_cpus: {}", values.len(), nr_cpus),
));
}
Ok(Self {
values: values.into_boxed_slice(),
})
}
}
impl<T: Pod> PerCpuValues<T> {
pub(crate) fn alloc_kernel_mem() -> Result<PerCpuKernelMem, io::Error> {
let value_size = size_of::<T>().next_multiple_of(8);
let nr_cpus = nr_cpus().map_err(|(_, error)| error)?;
Ok(PerCpuKernelMem {
bytes: vec![0u8; nr_cpus * value_size],
})
}
pub(crate) unsafe fn from_kernel_mem(mem: PerCpuKernelMem) -> Self {
let stride = size_of::<T>().next_multiple_of(8);
let mut values = Vec::new();
let mut offset = 0;
while offset < mem.bytes.len() {
values.push(unsafe { ptr::read_unaligned(mem.bytes.as_ptr().add(offset).cast()) });
offset += stride;
}
Self {
values: values.into_boxed_slice(),
}
}
pub(crate) fn build_kernel_mem(&self) -> Result<PerCpuKernelMem, io::Error> {
let mut mem = Self::alloc_kernel_mem()?;
let mem_ptr = mem.as_mut_ptr();
let value_size = size_of::<T>().next_multiple_of(8);
for (i, value) in self.values.iter().enumerate() {
unsafe { ptr::write_unaligned(mem_ptr.byte_add(i * value_size).cast(), *value) }
}
Ok(mem)
}
}
impl<T: Pod> Deref for PerCpuValues<T> {
type Target = Box<[T]>;
fn deref(&self) -> &Self::Target {
&self.values
}
}
#[cfg(test)]
mod test_utils {
use aya_obj::{
EbpfSectionKind,
generated::{bpf_cmd, bpf_map_type},
maps::LegacyMap,
};
use crate::{
bpf_map_def,
maps::MapData,
sys::{Syscall, override_syscall},
};
pub(super) fn new_map(obj: aya_obj::Map) -> MapData {
override_syscall(|call| match call {
Syscall::Ebpf {
cmd: bpf_cmd::BPF_MAP_CREATE,
..
} => Ok(crate::MockableFd::mock_signed_fd().into()),
call => panic!("unexpected syscall {call:?}"),
});
MapData::create(obj, "foo", None).unwrap()
}
pub(super) fn new_obj_map<K>(map_type: bpf_map_type) -> aya_obj::Map {
aya_obj::Map::Legacy(LegacyMap {
def: bpf_map_def {
map_type: map_type as u32,
key_size: size_of::<K>() as u32,
value_size: 4,
max_entries: 1024,
..Default::default()
},
inner_def: None,
section_index: 0,
section_kind: EbpfSectionKind::Maps,
data: Vec::new(),
symbol_index: None,
})
}
pub(super) fn new_obj_map_with_max_entries<K>(
map_type: bpf_map_type,
max_entries: u32,
) -> aya_obj::Map {
aya_obj::Map::Legacy(LegacyMap {
def: bpf_map_def {
map_type: map_type as u32,
key_size: size_of::<K>() as u32,
value_size: 4,
max_entries,
..Default::default()
},
inner_def: None,
section_index: 0,
section_kind: EbpfSectionKind::Maps,
data: Vec::new(),
symbol_index: None,
})
}
}
#[cfg(test)]
mod tests {
use std::{ffi::c_char, os::fd::AsRawFd as _};
use assert_matches::assert_matches;
use aya_obj::generated::{bpf_cmd, bpf_map_info};
use libc::EFAULT;
use super::*;
use crate::sys::{Syscall, override_syscall};
fn new_obj_map() -> aya_obj::Map {
test_utils::new_obj_map::<u32>(bpf_map_type::BPF_MAP_TYPE_HASH)
}
#[test]
fn test_from_map_id() {
override_syscall(|call| match call {
Syscall::Ebpf {
cmd: bpf_cmd::BPF_MAP_GET_FD_BY_ID,
attr,
} => {
assert_eq!(
unsafe { attr.__bindgen_anon_6.__bindgen_anon_1.map_id },
1234
);
Ok(crate::MockableFd::mock_signed_fd().into())
}
Syscall::Ebpf {
cmd: bpf_cmd::BPF_OBJ_GET_INFO_BY_FD,
attr,
} => {
assert_eq!(
unsafe { attr.info.bpf_fd },
crate::MockableFd::mock_unsigned_fd(),
);
Ok(0)
}
_ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
});
assert_matches!(
MapData::from_id(1234),
Ok(MapData {
obj: _,
fd,
}) => assert_eq!(fd.as_fd().as_raw_fd(), crate::MockableFd::mock_signed_fd())
);
}
#[test]
fn test_create() {
override_syscall(|call| match call {
Syscall::Ebpf {
cmd: bpf_cmd::BPF_MAP_CREATE,
..
} => Ok(crate::MockableFd::mock_signed_fd().into()),
_ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
});
assert_matches!(
MapData::create(new_obj_map(), "foo", None),
Ok(MapData {
obj: _,
fd,
}) => assert_eq!(fd.as_fd().as_raw_fd(), crate::MockableFd::mock_signed_fd())
);
}
#[test]
fn test_create_perf_event_array() {
override_syscall(|call| match call {
Syscall::Ebpf {
cmd: bpf_cmd::BPF_MAP_CREATE,
..
} => Ok(crate::MockableFd::mock_signed_fd().into()),
_ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
});
let nr_cpus = nr_cpus().unwrap();
assert_matches!(
MapData::create(test_utils::new_obj_map_with_max_entries::<u32>(
bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
65535,
), "foo", None),
Ok(MapData {
obj,
fd,
}) => {
assert_eq!(fd.as_fd().as_raw_fd(), crate::MockableFd::mock_signed_fd());
assert_eq!(obj.max_entries(), nr_cpus as u32)
}
);
assert_matches!(
MapData::create(test_utils::new_obj_map_with_max_entries::<u32>(
bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
0,
), "foo", None),
Ok(MapData {
obj,
fd,
}) => {
assert_eq!(fd.as_fd().as_raw_fd(), crate::MockableFd::mock_signed_fd());
assert_eq!(obj.max_entries(), nr_cpus as u32)
}
);
assert_matches!(
MapData::create(test_utils::new_obj_map_with_max_entries::<u32>(
bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1,
), "foo", None),
Ok(MapData {
obj,
fd,
}) => {
assert_eq!(fd.as_fd().as_raw_fd(), crate::MockableFd::mock_signed_fd());
assert_eq!(obj.max_entries(), 1)
}
);
}
#[test]
fn test_name() {
const TEST_NAME: &str = "foo";
override_syscall(|call| match call {
Syscall::Ebpf {
cmd: bpf_cmd::BPF_MAP_CREATE,
..
} => Ok(crate::MockableFd::mock_signed_fd().into()),
Syscall::Ebpf {
cmd: bpf_cmd::BPF_OBJ_GET_INFO_BY_FD,
attr,
} => {
assert_eq!(
unsafe { attr.info.info_len },
size_of::<bpf_map_info>() as u32
);
unsafe {
let name_bytes = std::mem::transmute::<&[u8], &[c_char]>(TEST_NAME.as_bytes());
let map_info = attr.info.info as *mut bpf_map_info;
map_info.write({
let mut map_info = map_info.read();
map_info.name[..name_bytes.len()].copy_from_slice(name_bytes);
map_info
})
}
Ok(0)
}
_ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
});
let map_data = MapData::create(new_obj_map(), TEST_NAME, None).unwrap();
assert_eq!(TEST_NAME, map_data.info().unwrap().name_as_str().unwrap());
}
#[test]
fn test_loaded_maps() {
override_syscall(|call| match call {
Syscall::Ebpf {
cmd: bpf_cmd::BPF_MAP_GET_NEXT_ID,
attr,
} => unsafe {
let id = attr.__bindgen_anon_6.__bindgen_anon_1.start_id;
if id < 5 {
attr.__bindgen_anon_6.next_id = id + 1;
Ok(0)
} else {
Err((-1, io::Error::from_raw_os_error(libc::ENOENT)))
}
},
Syscall::Ebpf {
cmd: bpf_cmd::BPF_MAP_GET_FD_BY_ID,
attr,
} => Ok((unsafe { attr.__bindgen_anon_6.__bindgen_anon_1.map_id }
+ crate::MockableFd::mock_unsigned_fd())
.into()),
Syscall::Ebpf {
cmd: bpf_cmd::BPF_OBJ_GET_INFO_BY_FD,
attr,
} => {
unsafe {
let info = attr.info;
let map_info = info.info as *mut bpf_map_info;
map_info.write({
let mut map_info = map_info.read();
map_info.id = info.bpf_fd - crate::MockableFd::mock_unsigned_fd();
map_info.key_size = 32;
map_info.value_size = 64;
map_info.map_flags = 1234;
map_info.max_entries = 99;
map_info
});
}
Ok(0)
}
_ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
});
assert_eq!(
loaded_maps()
.map(|map_info| {
let map_info = map_info.unwrap();
(
map_info.id(),
map_info.key_size(),
map_info.value_size(),
map_info.map_flags(),
map_info.max_entries(),
map_info.fd().unwrap().as_fd().as_raw_fd(),
)
})
.collect::<Vec<_>>(),
(1..6)
.map(|i: u8| (
i.into(),
32,
64,
1234,
99,
crate::MockableFd::mock_signed_fd() + i32::from(i)
))
.collect::<Vec<_>>(),
);
}
#[test]
fn test_create_failed() {
override_syscall(|_| Err((-1, io::Error::from_raw_os_error(EFAULT))));
assert_matches!(
MapData::create(new_obj_map(), "foo", None),
Err(MapError::CreateError { name, io_error }) => {
assert_eq!(name, "foo");
assert_eq!(io_error.raw_os_error(), Some(EFAULT));
}
);
}
}