mod array;
mod flags;
mod hash;
mod lru;
mod queue;
pub(crate) mod stream;
use alloc::{
boxed::Box,
string::{String, ToString},
sync::Arc,
vec,
vec::Vec,
};
use core::{any::Any, ffi::CStr, fmt::Debug, ops::Range};
use crate::{
BpfError, KernelAuxiliaryOps, PollWaker, Result,
linux_bpf::{BpfMapType, bpf_attr},
map::flags::BpfMapCreateFlags,
};
#[inline]
fn round_up(x: usize, align: usize) -> usize {
(x + align - 1) & !(align - 1)
}
pub type BpfCallBackFn = fn(key: &[u8], value: &[u8], ctx: *const u8) -> i32;
pub trait BpfMapCommonOps: Send + Sync + Debug + Any {
fn lookup_elem(&mut self, _key: &[u8]) -> Result<Option<&[u8]>> {
Err(BpfError::EPERM)
}
fn update_elem(&mut self, _key: &[u8], _value: &[u8], _flags: u64) -> Result<()> {
Err(BpfError::EPERM)
}
fn delete_elem(&mut self, _key: &[u8]) -> Result<()> {
Err(BpfError::EPERM)
}
fn for_each_elem(&mut self, _cb: BpfCallBackFn, _ctx: *const u8, _flags: u64) -> Result<u32> {
Err(BpfError::EPERM)
}
fn lookup_and_delete_elem(&mut self, _key: &[u8], _value: &mut [u8]) -> Result<()> {
Err(BpfError::EPERM)
}
fn lookup_percpu_elem(&mut self, _key: &[u8], _cpu: u32) -> Result<Option<&[u8]>> {
Err(BpfError::EPERM)
}
fn get_next_key(&self, _key: Option<&[u8]>, _next_key: &mut [u8]) -> Result<()> {
Err(BpfError::EPERM)
}
fn push_elem(&mut self, _value: &[u8], _flags: u64) -> Result<()> {
Err(BpfError::EPERM)
}
fn pop_elem(&mut self, _value: &mut [u8]) -> Result<()> {
Err(BpfError::EPERM)
}
fn peek_elem(&self, _value: &mut [u8]) -> Result<()> {
Err(BpfError::EPERM)
}
fn freeze(&self) -> Result<()> {
Err(BpfError::EPERM)
}
fn map_values_ptr_range(&self) -> Result<Range<usize>> {
Err(BpfError::EPERM)
}
fn map_mem_usage(&self) -> Result<usize>;
fn map_mmap(&self, offset: usize, size: usize, read: bool, write: bool) -> Result<Vec<usize>> {
Err(BpfError::EPERM)
}
fn readable(&self) -> bool {
false
}
fn writable(&self) -> bool {
false
}
fn as_any(&self) -> &dyn Any;
fn as_any_mut(&mut self) -> &mut dyn Any;
}
pub trait PerCpuVariantsOps: Sync + Send + Debug + 'static {
fn create<T: Clone + Sync + Send + 'static>(value: T) -> Option<Box<dyn PerCpuVariants<T>>>;
fn num_cpus() -> u32;
}
#[allow(clippy::mut_from_ref)]
pub trait PerCpuVariants<T: Clone + Sync + Send>: Sync + Send + Debug {
fn get(&self) -> &T;
fn get_mut(&self) -> &mut T;
unsafe fn force_get(&self, cpu: u32) -> &T;
unsafe fn force_get_mut(&self, cpu: u32) -> &mut T;
}
bitflags::bitflags! {
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct BpfMapUpdateElemFlags: u64 {
const BPF_ANY = 0;
const BPF_NOEXIST = 1;
const BPF_EXISTS = 2;
const BPF_F_LOCK = 4;
}
}
#[derive(Debug, Clone, Default)]
pub struct BpfMapMeta {
pub map_type: BpfMapType,
pub key_size: u32,
pub value_size: u32,
pub max_entries: u32,
pub map_flags: BpfMapCreateFlags,
pub _map_name: String,
}
impl TryFrom<&bpf_attr> for BpfMapMeta {
type Error = BpfError;
fn try_from(attr: &bpf_attr) -> Result<Self> {
let u = unsafe { &attr.__bindgen_anon_1 };
let map_name_slice = unsafe {
core::slice::from_raw_parts(u.map_name.as_ptr() as *const u8, u.map_name.len())
};
let map_name = CStr::from_bytes_until_nul(map_name_slice)
.map_err(|_| BpfError::EINVAL)?
.to_str()
.map_err(|_| BpfError::EINVAL)?
.to_string();
let map_type = BpfMapType::try_from(u.map_type).map_err(|_| BpfError::EINVAL)?;
let map_flags = BpfMapCreateFlags::from_bits(u.map_flags).ok_or(BpfError::EINVAL)?;
Ok(BpfMapMeta {
map_type,
key_size: u.key_size,
value_size: u.value_size,
max_entries: u.max_entries,
map_flags,
_map_name: map_name,
})
}
}
#[derive(Debug)]
pub struct UnifiedMap {
inner_map: Box<dyn BpfMapCommonOps>,
map_meta: BpfMapMeta,
}
impl UnifiedMap {
fn new(map_meta: BpfMapMeta, map: Box<dyn BpfMapCommonOps>) -> Self {
Self {
inner_map: map,
map_meta,
}
}
pub fn map(&self) -> &dyn BpfMapCommonOps {
self.inner_map.as_ref()
}
pub fn map_mut(&mut self) -> &mut dyn BpfMapCommonOps {
self.inner_map.as_mut()
}
pub fn map_meta(&self) -> &BpfMapMeta {
&self.map_meta
}
}
pub fn bpf_map_create<F: KernelAuxiliaryOps, T: PerCpuVariantsOps + 'static>(
map_meta: BpfMapMeta,
poll_waker: Option<Arc<dyn PollWaker>>,
) -> Result<UnifiedMap> {
log::trace!("The map attr is {:#?}", map_meta);
let map: Box<dyn BpfMapCommonOps> = match map_meta.map_type {
BpfMapType::BPF_MAP_TYPE_ARRAY => {
let array_map = array::ArrayMap::new(&map_meta)?;
Box::new(array_map)
}
BpfMapType::BPF_MAP_TYPE_PERCPU_ARRAY => {
let per_cpu_array_map = array::PerCpuArrayMap::<T>::new(&map_meta)?;
Box::new(per_cpu_array_map)
}
BpfMapType::BPF_MAP_TYPE_PERF_EVENT_ARRAY => {
let perf_event_array_map = array::PerfEventArrayMap::new(&map_meta, T::num_cpus())?;
Box::new(perf_event_array_map)
}
BpfMapType::BPF_MAP_TYPE_CPUMAP
| BpfMapType::BPF_MAP_TYPE_DEVMAP
| BpfMapType::BPF_MAP_TYPE_DEVMAP_HASH => {
log::error!("bpf map type {:?} not implemented", map_meta.map_type);
Err(BpfError::EPERM)?
}
BpfMapType::BPF_MAP_TYPE_HASH => {
let hash_map = hash::BpfHashMap::new(&map_meta)?;
Box::new(hash_map)
}
BpfMapType::BPF_MAP_TYPE_PERCPU_HASH => {
let per_cpu_hash_map = hash::PerCpuHashMap::<T>::new(&map_meta)?;
Box::new(per_cpu_hash_map)
}
BpfMapType::BPF_MAP_TYPE_QUEUE => {
let queue_map = queue::QueueMap::new(&map_meta)?;
Box::new(queue_map)
}
BpfMapType::BPF_MAP_TYPE_STACK => {
let stack_map = queue::StackMap::new(&map_meta)?;
Box::new(stack_map)
}
BpfMapType::BPF_MAP_TYPE_LRU_HASH => {
let lru_hash_map = lru::LruMap::new(&map_meta)?;
Box::new(lru_hash_map)
}
BpfMapType::BPF_MAP_TYPE_LRU_PERCPU_HASH => {
let lru_per_cpu_hash_map = lru::PerCpuLruMap::<T>::new(&map_meta)?;
Box::new(lru_per_cpu_hash_map)
}
BpfMapType::BPF_MAP_TYPE_RINGBUF => {
let poll_waker = poll_waker.ok_or(BpfError::EINVAL)?;
let ringbuf_map = stream::RingBufMap::<F>::new(&map_meta, poll_waker)?;
Box::new(ringbuf_map)
}
_ => {
log::error!("bpf map type {:?} not implemented", map_meta.map_type);
Err(BpfError::EPERM)?
}
};
let unified_map = UnifiedMap::new(map_meta, map);
Ok(unified_map)
}
#[derive(Debug, Clone, Copy)]
pub struct BpfMapUpdateArg {
pub map_fd: u32,
pub key: u64,
pub value: u64,
pub flags: u64,
}
impl From<&bpf_attr> for BpfMapUpdateArg {
fn from(attr: &bpf_attr) -> Self {
let u = unsafe { &attr.__bindgen_anon_2 };
let map_fd = u.map_fd;
let key = u.key;
let value = unsafe { u.__bindgen_anon_1.value };
let flags = u.flags;
BpfMapUpdateArg {
map_fd,
key,
value,
flags,
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct BpfMapGetNextKeyArg {
pub map_fd: u32,
pub key: Option<u64>,
pub next_key: u64,
}
impl From<&bpf_attr> for BpfMapGetNextKeyArg {
fn from(attr: &bpf_attr) -> Self {
unsafe {
let u = &attr.__bindgen_anon_2;
BpfMapGetNextKeyArg {
map_fd: u.map_fd,
key: if u.key != 0 { Some(u.key) } else { None },
next_key: u.__bindgen_anon_1.next_key,
}
}
}
}
pub fn bpf_map_update_elem<F: KernelAuxiliaryOps>(arg: BpfMapUpdateArg) -> Result<()> {
F::get_unified_map_from_fd(arg.map_fd, |unified_map| {
let meta = unified_map.map_meta();
let key_size = meta.key_size as usize;
let value_size = meta.value_size as usize;
let mut key = vec![0u8; key_size];
let mut value = vec![0u8; value_size];
F::copy_from_user(arg.key as *const u8, key_size, &mut key)?;
F::copy_from_user(arg.value as *const u8, value_size, &mut value)?;
unified_map.map_mut().update_elem(&key, &value, arg.flags)
})
}
pub fn bpf_map_freeze<F: KernelAuxiliaryOps>(map_fd: u32) -> Result<()> {
F::get_unified_map_from_fd(map_fd, |unified_map| unified_map.map().freeze())
}
pub fn bpf_lookup_elem<F: KernelAuxiliaryOps>(arg: BpfMapUpdateArg) -> Result<()> {
F::get_unified_map_from_fd(arg.map_fd, |unified_map| {
let meta = unified_map.map_meta();
let key_size = meta.key_size as usize;
let value_size = meta.value_size as usize;
let mut key = vec![0u8; key_size];
F::copy_from_user(arg.key as *const u8, key_size, &mut key)?;
let map = unified_map.map_mut();
let r_value = map.lookup_elem(&key)?;
if let Some(r_value) = r_value {
F::copy_to_user(arg.value as *mut u8, value_size, r_value)?;
Ok(())
} else {
Err(BpfError::ENOENT)
}
})
}
pub fn bpf_map_get_next_key<F: KernelAuxiliaryOps>(arg: BpfMapGetNextKeyArg) -> Result<()> {
F::get_unified_map_from_fd(arg.map_fd, |unified_map| {
let meta = unified_map.map_meta();
let key_size = meta.key_size as usize;
let map = unified_map.map_mut();
let mut next_key = vec![0u8; key_size];
if let Some(key_ptr) = arg.key {
let mut key = vec![0u8; key_size];
F::copy_from_user(key_ptr as *const u8, key_size, &mut key)?;
map.get_next_key(Some(&key), &mut next_key)?;
} else {
map.get_next_key(None, &mut next_key)?;
};
F::copy_to_user(arg.next_key as *mut u8, key_size, &next_key)?;
Ok(())
})
}
pub fn bpf_map_delete_elem<F: KernelAuxiliaryOps>(arg: BpfMapUpdateArg) -> Result<()> {
F::get_unified_map_from_fd(arg.map_fd, |unified_map| {
let meta = unified_map.map_meta();
let key_size = meta.key_size as usize;
let mut key = vec![0u8; key_size];
F::copy_from_user(arg.key as *const u8, key_size, &mut key)?;
unified_map.map_mut().delete_elem(&key)
})
}
pub fn bpf_map_lookup_batch<F: KernelAuxiliaryOps>(_arg: BpfMapUpdateArg) -> Result<usize> {
Err(BpfError::EPERM)
}
pub fn bpf_map_lookup_and_delete_elem<F: KernelAuxiliaryOps>(arg: BpfMapUpdateArg) -> Result<()> {
F::get_unified_map_from_fd(arg.map_fd, |unified_map| {
let meta = unified_map.map_meta();
let key_size = meta.key_size as usize;
let value_size = meta.value_size as usize;
let mut key = vec![0u8; key_size];
let mut value = vec![0u8; value_size];
F::copy_from_user(arg.key as *const u8, key_size, &mut key)?;
unified_map
.map_mut()
.lookup_and_delete_elem(&key, &mut value)?;
F::copy_to_user(arg.value as *mut u8, value_size, &value)?;
Ok(())
})
}
#[cfg(test)]
mod tests {
use alloc::{boxed::Box, vec::Vec};
use core::fmt::Debug;
use super::{PerCpuVariants, PerCpuVariantsOps};
#[derive(Debug)]
pub struct DummyPerCpuCreator;
#[derive(Debug)]
pub struct DummyPerCpuCreatorFalse;
pub struct DummyPerCpuVariants<T>(Vec<T>);
impl<T> Debug for DummyPerCpuVariants<T> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_tuple("DummyPerCpuVariants").finish()
}
}
impl<T: Clone + Sync + Send> PerCpuVariants<T> for DummyPerCpuVariants<T> {
fn get(&self) -> &T {
&self.0[0]
}
fn get_mut(&self) -> &mut T {
unsafe { &mut *(self.0.as_ptr() as *mut T) }
}
unsafe fn force_get(&self, cpu: u32) -> &T {
&self.0[cpu as usize]
}
unsafe fn force_get_mut(&self, cpu: u32) -> &mut T {
let ptr = self.0.as_ptr();
let ptr = unsafe { ptr.add(cpu as usize) } as *mut T;
unsafe { &mut *ptr }
}
}
impl PerCpuVariantsOps for DummyPerCpuCreator {
fn create<T: Clone + Sync + Send + 'static>(
value: T,
) -> Option<Box<dyn PerCpuVariants<T>>> {
let mut vec = Vec::new();
for _ in 0..Self::num_cpus() {
vec.push(value.clone());
}
Some(Box::new(DummyPerCpuVariants(vec)))
}
fn num_cpus() -> u32 {
1
}
}
impl PerCpuVariantsOps for DummyPerCpuCreatorFalse {
fn create<T: Clone + Sync + Send + 'static>(
_value: T,
) -> Option<Box<dyn PerCpuVariants<T>>> {
None
}
fn num_cpus() -> u32 {
0
}
}
}