use core::ffi::c_void;
use std::convert::TryFrom;
use std::ffi::CStr;
use std::ffi::CString;
use std::fmt::Debug;
use std::mem;
use std::os::unix::prelude::AsRawFd;
use std::os::unix::prelude::BorrowedFd;
use std::os::unix::prelude::OsStrExt;
use std::path::Path;
use std::ptr;
use std::ptr::null;
use std::ptr::NonNull;
use std::slice::from_raw_parts;
use bitflags::bitflags;
use libbpf_sys::bpf_map_info;
use libbpf_sys::bpf_obj_get_info_by_fd;
use nix::errno;
use nix::unistd;
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use strum_macros::Display;
use crate::util;
use crate::util::parse_ret_i32;
use crate::Error;
use crate::Link;
use crate::Result;
#[derive(Debug)]
pub struct OpenMap {
ptr: NonNull<libbpf_sys::bpf_map>,
}
#[allow(missing_docs)]
impl OpenMap {
pub(crate) unsafe fn new(ptr: NonNull<libbpf_sys::bpf_map>) -> Self {
Self { ptr }
}
pub fn name(&self) -> Result<&str> {
let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
name_c_str
.to_str()
.map_err(|e| Error::Internal(e.to_string()))
}
pub fn map_type(&self) -> MapType {
let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
match MapType::try_from(ty) {
Ok(t) => t,
Err(_) => MapType::Unknown,
}
}
pub fn set_map_ifindex(&mut self, idx: u32) {
unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) };
}
pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> {
let ret = unsafe {
libbpf_sys::bpf_map__set_initial_value(
self.ptr.as_ptr(),
data.as_ptr() as *const std::ffi::c_void,
data.len() as libbpf_sys::size_t,
)
};
util::parse_ret(ret)
}
pub fn set_type(&mut self, ty: MapType) -> Result<()> {
let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) };
util::parse_ret(ret)
}
pub fn set_key_size(&mut self, size: u32) -> Result<()> {
let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) };
util::parse_ret(ret)
}
pub fn set_value_size(&mut self, size: u32) -> Result<()> {
let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) };
util::parse_ret(ret)
}
pub fn set_max_entries(&mut self, count: u32) -> Result<()> {
let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) };
util::parse_ret(ret)
}
pub fn set_map_flags(&mut self, flags: u32) -> Result<()> {
let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) };
util::parse_ret(ret)
}
pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> {
let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) };
util::parse_ret(ret)
}
pub fn set_inner_map_fd(&mut self, inner: &Map) {
unsafe { libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner.fd()) };
}
pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> {
let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) };
util::parse_ret(ret)
}
pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> {
let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) };
util::parse_ret(ret)
}
pub fn set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
let path_c = util::path_to_cstring(path)?;
let path_ptr = path_c.as_ptr();
let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) };
util::parse_ret(ret)
}
pub fn reuse_fd(&self, fd: i32) -> Result<()> {
let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd) };
util::parse_ret(ret)
}
pub fn reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
let cstring = util::path_to_cstring(path)?;
let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) };
if fd < 0 {
return Err(Error::System(errno::errno()));
}
let reuse_result = self.reuse_fd(fd);
let _ = unistd::close(fd);
reuse_result
}
}
#[derive(Debug)]
pub struct Map {
fd: i32,
name: String,
ty: MapType,
key_size: u32,
value_size: u32,
ptr: Option<NonNull<libbpf_sys::bpf_map>>,
}
impl Map {
pub(crate) unsafe fn new(ptr: NonNull<libbpf_sys::bpf_map>) -> Result<Self> {
let name = unsafe { libbpf_sys::bpf_map__name(ptr.as_ptr()) };
let name = util::c_ptr_to_string(name)?;
let fd = unsafe { libbpf_sys::bpf_map__fd(ptr.as_ptr()) };
if fd < 0 {
return Err(Error::System(-fd));
}
let ty = MapType::try_from(unsafe { libbpf_sys::bpf_map__type(ptr.as_ptr()) })
.unwrap_or(MapType::Unknown);
let key_size = unsafe { libbpf_sys::bpf_map__key_size(ptr.as_ptr()) };
let value_size = unsafe { libbpf_sys::bpf_map__value_size(ptr.as_ptr()) };
Ok(Map {
fd,
name,
ty,
key_size,
value_size,
ptr: Some(ptr),
})
}
pub fn from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self> {
fn inner(path: &Path) -> Result<Map> {
let mut p = path.as_os_str().as_bytes().to_vec();
if p.last() != Some(&0) {
p.push(0);
}
let p = CString::from_vec_with_nul(p).expect("path contained null bytes");
let fd = parse_ret_i32(unsafe {
libbpf_sys::bpf_obj_get(p.as_ptr())
})?;
Map::from_fd(fd)
}
inner(path.as_ref())
}
pub fn from_map_id(id: u32) -> Result<Self> {
parse_ret_i32(unsafe {
libbpf_sys::bpf_map_get_fd_by_id(id)
})
.and_then(Self::from_fd)
}
fn from_fd(fd: i32) -> Result<Self> {
let info = MapInfo::new(unsafe {
BorrowedFd::borrow_raw(fd)
})?;
Ok(Self {
fd,
name: info.name()?.into(),
ty: info.map_type(),
key_size: info.info.key_size,
value_size: info.info.value_size,
ptr: None,
})
}
pub fn info(&self) -> Result<MapInfo> {
MapInfo::new(unsafe {
BorrowedFd::borrow_raw(self.fd)
})
}
pub fn name(&self) -> &str {
&self.name
}
pub fn fd(&self) -> i32 {
self.fd
}
pub fn map_type(&self) -> MapType {
self.ty
}
pub fn key_size(&self) -> u32 {
self.key_size
}
pub fn value_size(&self) -> u32 {
self.value_size
}
fn percpu_aligned_value_size(&self) -> usize {
let val_size = self.value_size() as usize;
util::roundup(val_size, 8)
}
fn percpu_buffer_size(&self) -> Result<usize> {
let aligned_val_size = self.percpu_aligned_value_size();
let ncpu = crate::num_possible_cpus()?;
Ok(ncpu * aligned_val_size)
}
pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
let path_c = util::path_to_cstring(path)?;
let path_ptr = path_c.as_ptr();
let ret = match self.ptr {
Some(ptr) => unsafe { libbpf_sys::bpf_map__pin(ptr.as_ptr(), path_ptr) },
None => unsafe { libbpf_sys::bpf_obj_pin(self.fd, path_ptr) },
};
util::parse_ret(ret)
}
pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
match self.ptr {
Some(ptr) => {
let path_c = util::path_to_cstring(path)?;
let path_ptr = path_c.as_ptr();
let ret = unsafe { libbpf_sys::bpf_map__unpin(ptr.as_ptr(), path_ptr) };
util::parse_ret(ret)
}
None => match std::fs::remove_file(path) {
Ok(_) => Ok(()),
Err(e) => Err(Error::Internal(format!("remove pin map failed: {e}"))),
},
}
}
pub fn lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>> {
if self.map_type().is_percpu() {
return Err(Error::InvalidInput(format!(
"lookup_percpu() must be used for per-cpu maps (type of the map is {})",
self.map_type(),
)));
}
let out_size = self.value_size() as usize;
self.lookup_raw(key, flags, out_size)
}
pub fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>> {
if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
return Err(Error::InvalidInput(format!(
"lookup() must be used for maps that are not per-cpu (type of the map is {})",
self.map_type(),
)));
}
let val_size = self.value_size() as usize;
let aligned_val_size = self.percpu_aligned_value_size();
let out_size = self.percpu_buffer_size()?;
let raw_res = self.lookup_raw(key, flags, out_size)?;
if let Some(raw_vals) = raw_res {
let mut out = Vec::new();
for chunk in raw_vals.chunks_exact(aligned_val_size) {
out.push(chunk[..val_size].to_vec());
}
Ok(Some(out))
} else {
Ok(None)
}
}
fn lookup_raw(&self, key: &[u8], flags: MapFlags, out_size: usize) -> Result<Option<Vec<u8>>> {
if key.len() != self.key_size() as usize {
return Err(Error::InvalidInput(format!(
"key_size {} != {}",
key.len(),
self.key_size()
)));
};
let mut out: Vec<u8> = Vec::with_capacity(out_size);
let ret = unsafe {
libbpf_sys::bpf_map_lookup_elem_flags(
self.fd,
key.as_ptr() as *const c_void,
out.as_mut_ptr() as *mut c_void,
flags.bits,
)
};
if ret == 0 {
unsafe {
out.set_len(out_size);
}
Ok(Some(out))
} else {
let errno = errno::errno();
if errno::Errno::from_i32(errno) == errno::Errno::ENOENT {
Ok(None)
} else {
Err(Error::System(errno))
}
}
}
pub fn delete(&self, key: &[u8]) -> Result<()> {
if key.len() != self.key_size() as usize {
return Err(Error::InvalidInput(format!(
"key_size {} != {}",
key.len(),
self.key_size()
)));
};
let ret =
unsafe { libbpf_sys::bpf_map_delete_elem(self.fd, key.as_ptr() as *const c_void) };
util::parse_ret(ret)
}
pub fn lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
if key.len() != self.key_size() as usize {
return Err(Error::InvalidInput(format!(
"key_size {} != {}",
key.len(),
self.key_size()
)));
};
let mut out: Vec<u8> = Vec::with_capacity(self.value_size() as usize);
let ret = unsafe {
libbpf_sys::bpf_map_lookup_and_delete_elem(
self.fd,
key.as_ptr() as *const c_void,
out.as_mut_ptr() as *mut c_void,
)
};
if ret == 0 {
unsafe {
out.set_len(self.value_size() as usize);
}
Ok(Some(out))
} else {
let errno = errno::errno();
if errno::Errno::from_i32(errno) == errno::Errno::ENOENT {
Ok(None)
} else {
Err(Error::System(errno))
}
}
}
pub fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
if self.map_type().is_percpu() {
return Err(Error::InvalidInput(format!(
"update_percpu() must be used for per-cpu maps (type of the map is {})",
self.map_type(),
)));
}
if value.len() != self.value_size() as usize {
return Err(Error::InvalidInput(format!(
"value_size {} != {}",
value.len(),
self.value_size()
)));
};
self.update_raw(key, value, flags)
}
pub fn update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()> {
if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
return Err(Error::InvalidInput(format!(
"update() must be used for maps that are not per-cpu (type of the map is {})",
self.map_type(),
)));
}
if values.len() != crate::num_possible_cpus()? {
return Err(Error::InvalidInput(format!(
"number of values {} != number of cpus {}",
values.len(),
crate::num_possible_cpus()?
)));
};
let val_size = self.value_size() as usize;
let aligned_val_size = self.percpu_aligned_value_size();
let buf_size = self.percpu_buffer_size()?;
let mut value_buf = Vec::new();
value_buf.resize(buf_size, 0);
for (i, val) in values.iter().enumerate() {
if val.len() != val_size {
return Err(Error::InvalidInput(format!(
"value size for cpu {} is {} != {}",
i,
val.len(),
val_size
)));
}
value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)]
.copy_from_slice(val);
}
self.update_raw(key, &value_buf, flags)
}
fn update_raw(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
if key.len() != self.key_size() as usize {
return Err(Error::InvalidInput(format!(
"key_size {} != {}",
key.len(),
self.key_size()
)));
};
let ret = unsafe {
libbpf_sys::bpf_map_update_elem(
self.fd,
key.as_ptr() as *const c_void,
value.as_ptr() as *const c_void,
flags.bits,
)
};
util::parse_ret(ret)
}
pub fn freeze(&self) -> Result<()> {
let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd) };
util::parse_ret(ret)
}
pub fn keys(&self) -> MapKeyIter {
MapKeyIter::new(self, self.key_size())
}
pub fn create<T: AsRef<str>>(
map_type: MapType,
name: Option<T>,
key_size: u32,
value_size: u32,
max_entries: u32,
opts: &libbpf_sys::bpf_map_create_opts,
) -> Result<Map> {
let (map_name_str, map_name) = match name {
Some(name) => (
util::str_to_cstring(name.as_ref())?,
name.as_ref().to_string(),
),
None => (util::str_to_cstring("")?, "".to_string()),
};
let map_name_ptr = {
if map_name_str.as_bytes().is_empty() {
null()
} else {
map_name_str.as_ptr()
}
};
let fd = unsafe {
libbpf_sys::bpf_map_create(
map_type.into(),
map_name_ptr,
key_size,
value_size,
max_entries,
opts,
)
};
if fd < 0 {
return Err(Error::System(fd));
}
Ok(Map {
fd,
name: map_name,
ty: map_type,
key_size,
value_size,
ptr: None,
})
}
pub fn attach_struct_ops(&self) -> Result<Link> {
if self.map_type() != MapType::StructOps {
return Err(Error::InvalidInput(format!(
"Invalid map type ({}) for attach_struct_ops()",
self.map_type(),
)));
}
let ptr = match self.ptr {
Some(ptr) => ptr,
None => {
return Err(Error::InvalidInput(
"Cannot attach a user-created struct_ops map".to_string(),
))
}
};
util::create_bpf_entity_checked(|| unsafe {
libbpf_sys::bpf_map__attach_struct_ops(ptr.as_ptr())
})
.map(|ptr| unsafe {
Link::new(ptr)
})
}
pub fn as_libbpf_bpf_map_ptr(&self) -> Option<NonNull<libbpf_sys::bpf_map>> {
self.ptr
}
}
bitflags! {
pub struct MapFlags: u64 {
const ANY = libbpf_sys::BPF_ANY as _;
const NO_EXIST = libbpf_sys::BPF_NOEXIST as _;
const EXIST = libbpf_sys::BPF_EXIST as _;
const LOCK = libbpf_sys::BPF_F_LOCK as _;
}
}
#[non_exhaustive]
#[repr(u32)]
#[derive(Copy, Clone, TryFromPrimitive, IntoPrimitive, PartialEq, Eq, Display, Debug)]
#[allow(missing_docs)]
pub enum MapType {
Unspec = 0,
Hash,
Array,
ProgArray,
PerfEventArray,
PercpuHash,
PercpuArray,
StackTrace,
CgroupArray,
LruHash,
LruPercpuHash,
LpmTrie,
ArrayOfMaps,
HashOfMaps,
Devmap,
Sockmap,
Cpumap,
Xskmap,
Sockhash,
CgroupStorage,
ReuseportSockarray,
PercpuCgroupStorage,
Queue,
Stack,
SkStorage,
DevmapHash,
StructOps,
RingBuf,
InodeStorage,
TaskStorage,
BloomFilter,
Unknown = u32::MAX,
}
impl MapType {
pub fn is_percpu(&self) -> bool {
matches!(
self,
MapType::PercpuArray
| MapType::PercpuHash
| MapType::LruPercpuHash
| MapType::PercpuCgroupStorage
)
}
pub fn is_supported(&self) -> Result<bool> {
let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, std::ptr::null()) };
match ret {
0 => Ok(false),
1 => Ok(true),
_ => Err(Error::System(-ret)),
}
}
}
#[derive(Debug)]
pub struct MapKeyIter<'a> {
map: &'a Map,
prev: Option<Vec<u8>>,
next: Vec<u8>,
}
impl<'a> MapKeyIter<'a> {
fn new(map: &'a Map, key_size: u32) -> Self {
Self {
map,
prev: None,
next: vec![0; key_size as usize],
}
}
}
impl<'a> Iterator for MapKeyIter<'a> {
type Item = Vec<u8>;
fn next(&mut self) -> Option<Self::Item> {
let prev = self.prev.as_ref().map_or(ptr::null(), |p| p.as_ptr());
let ret = unsafe {
libbpf_sys::bpf_map_get_next_key(self.map.fd(), prev as _, self.next.as_mut_ptr() as _)
};
if ret != 0 {
None
} else {
self.prev = Some(self.next.clone());
Some(self.next.clone())
}
}
}
#[derive(Debug)]
pub struct MapInfo {
pub info: bpf_map_info,
}
impl MapInfo {
pub fn new(fd: BorrowedFd) -> Result<Self> {
let mut map_info = unsafe { mem::zeroed::<bpf_map_info>() };
let mut size = mem::size_of_val(&map_info) as u32;
let () = util::parse_ret(unsafe {
bpf_obj_get_info_by_fd(
fd.as_raw_fd(),
&mut map_info as *mut bpf_map_info as *mut c_void,
&mut size as *mut u32,
)
})?;
Ok(Self { info: map_info })
}
#[inline]
pub fn map_type(&self) -> MapType {
match MapType::try_from(self.info.type_) {
Ok(t) => t,
Err(_) => MapType::Unknown,
}
}
pub fn name<'a>(&self) -> Result<&'a str> {
let char_slice = unsafe {
from_raw_parts(
self.info.name[..].as_ptr() as *const u8,
self.info.name.len(),
)
};
let mut zero_idx = 0;
while zero_idx < char_slice.len() && char_slice[zero_idx] != 0 {
zero_idx += 1;
}
if zero_idx == char_slice.len() {
return Err(Error::Internal(
"No nul found in `bpf_map_info::name`".to_string(),
));
}
CStr::from_bytes_with_nul(&char_slice[..=zero_idx])
.map_err(|e| Error::Internal(format!("Failed to cast name to CStr: {e}")))?
.to_str()
.map_err(|e| Error::Internal(format!("Failed to cast name to str: {e}")))
}
#[inline]
pub fn flags(&self) -> MapFlags {
MapFlags::from_bits_truncate(self.info.map_flags as u64)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Map;
use crate::MapFlags;
use crate::MapInfo;
use crate::MapType;
#[test]
pub fn test_map_info() {
let opts = libbpf_sys::bpf_map_create_opts {
sz: mem::size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
map_flags: libbpf_sys::BPF_ANY,
btf_fd: 0,
btf_key_type_id: 0,
btf_value_type_id: 0,
btf_vmlinux_value_type_id: 0,
inner_map_fd: 0,
map_extra: 0,
numa_node: 0,
map_ifindex: 0,
};
let map = Map::create(MapType::Hash, Some("simple_map"), 8, 64, 1024, &opts).unwrap();
let fd = unsafe { BorrowedFd::borrow_raw(map.fd()) };
let map_info = MapInfo::new(fd).unwrap();
let name_received = map_info.name().unwrap();
assert_eq!(name_received, "simple_map");
assert_eq!(map_info.map_type(), MapType::Hash);
assert_eq!(map_info.flags() & MapFlags::ANY, MapFlags::ANY);
let map_info = &map_info.info;
assert_eq!(map_info.key_size, 8);
assert_eq!(map_info.value_size, 64);
assert_eq!(map_info.max_entries, 1024);
assert_eq!(map_info.btf_id, 0);
assert_eq!(map_info.btf_key_type_id, 0);
assert_eq!(map_info.btf_value_type_id, 0);
assert_eq!(map_info.btf_vmlinux_value_type_id, 0);
assert_eq!(map_info.map_extra, 0);
assert_eq!(map_info.ifindex, 0);
}
}