Skip to main content

libbpf_rs/
map.rs

1use core::ffi::c_void;
2use std::ffi::CStr;
3use std::ffi::CString;
4use std::ffi::OsStr;
5use std::ffi::OsString;
6use std::fmt::Debug;
7use std::fs::remove_file;
8use std::io;
9use std::marker::PhantomData;
10use std::mem;
11use std::mem::transmute;
12use std::ops::Deref;
13use std::os::unix::ffi::OsStrExt;
14use std::os::unix::io::AsFd;
15use std::os::unix::io::AsRawFd;
16use std::os::unix::io::BorrowedFd;
17use std::os::unix::io::FromRawFd;
18use std::os::unix::io::OwnedFd;
19use std::os::unix::io::RawFd;
20use std::path::Path;
21use std::ptr;
22use std::ptr::NonNull;
23use std::slice;
24use std::slice::from_raw_parts;
25
26use bitflags::bitflags;
27use libbpf_sys::bpf_map_info;
28use libbpf_sys::bpf_obj_get_info_by_fd;
29
30use crate::error;
31use crate::util;
32use crate::util::parse_ret_i32;
33use crate::util::validate_bpf_ret;
34use crate::AsRawLibbpf;
35use crate::Error;
36use crate::ErrorExt as _;
37use crate::Link;
38use crate::Mut;
39use crate::Result;
40
41/// An immutable parsed but not yet loaded BPF map.
42pub type OpenMap<'obj> = OpenMapImpl<'obj>;
43/// A mutable parsed but not yet loaded BPF map.
44pub type OpenMapMut<'obj> = OpenMapImpl<'obj, Mut>;
45
46/// Represents a parsed but not yet loaded BPF map.
47///
48/// This object exposes operations that need to happen before the map is created.
49///
50/// Some methods require working with raw bytes. You may find libraries such as
51/// [`plain`](https://crates.io/crates/plain) helpful.
52#[derive(Debug)]
53#[repr(transparent)]
54pub struct OpenMapImpl<'obj, T = ()> {
55    ptr: NonNull<libbpf_sys::bpf_map>,
56    _phantom: PhantomData<&'obj T>,
57}
58
59impl<'obj> OpenMap<'obj> {
60    /// Create a new [`OpenMap`] from a ptr to a `libbpf_sys::bpf_map`.
61    pub fn new(object: &'obj libbpf_sys::bpf_map) -> Self {
62        // SAFETY: We inferred the address from a reference, which is always
63        //         valid.
64        Self {
65            ptr: unsafe { NonNull::new_unchecked(object as *const _ as *mut _) },
66            _phantom: PhantomData,
67        }
68    }
69
70    /// Retrieve the [`OpenMap`]'s name.
71    pub fn name(&self) -> &'obj OsStr {
72        // SAFETY: We ensured `ptr` is valid during construction.
73        let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
74        // SAFETY: `bpf_map__name` can return NULL but only if it's passed
75        //          NULL. We know `ptr` is not NULL.
76        let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
77        OsStr::from_bytes(name_c_str.to_bytes())
78    }
79
80    /// Retrieve type of the map.
81    pub fn map_type(&self) -> MapType {
82        let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
83        MapType::from(ty)
84    }
85
86    fn initial_value_raw(&self) -> (*mut u8, usize) {
87        let mut size = 0u64;
88        let ptr = unsafe {
89            libbpf_sys::bpf_map__initial_value(self.ptr.as_ptr(), &mut size as *mut _ as _)
90        };
91        (ptr.cast(), size as _)
92    }
93
94    /// Retrieve the initial value of the map.
95    pub fn initial_value(&self) -> Option<&[u8]> {
96        let (ptr, size) = self.initial_value_raw();
97        if ptr.is_null() {
98            None
99        } else {
100            let data = unsafe { slice::from_raw_parts(ptr.cast::<u8>(), size) };
101            Some(data)
102        }
103    }
104
105    /// Retrieve the maximum number of entries of the map.
106    pub fn max_entries(&self) -> u32 {
107        unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
108    }
109
110    /// Return `true` if the map is set to be auto-created during load, `false` otherwise.
111    pub fn autocreate(&self) -> bool {
112        unsafe { libbpf_sys::bpf_map__autocreate(self.ptr.as_ptr()) }
113    }
114}
115
116impl<'obj> OpenMapMut<'obj> {
117    /// Create a new [`OpenMapMut`] from a ptr to a `libbpf_sys::bpf_map`.
118    pub fn new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self {
119        Self {
120            ptr: unsafe { NonNull::new_unchecked(object as *mut _) },
121            _phantom: PhantomData,
122        }
123    }
124
125    /// Retrieve the initial value of the map.
126    pub fn initial_value_mut(&mut self) -> Option<&mut [u8]> {
127        let (ptr, size) = self.initial_value_raw();
128        if ptr.is_null() {
129            None
130        } else {
131            let data = unsafe { slice::from_raw_parts_mut(ptr.cast::<u8>(), size) };
132            Some(data)
133        }
134    }
135
136    /// Bind map to a particular network device.
137    ///
138    /// Used for offloading maps to hardware.
139    pub fn set_map_ifindex(&mut self, idx: u32) {
140        unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) };
141    }
142
143    /// Set the initial value of the map.
144    pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> {
145        let ret = unsafe {
146            libbpf_sys::bpf_map__set_initial_value(
147                self.ptr.as_ptr(),
148                data.as_ptr() as *const c_void,
149                data.len() as libbpf_sys::size_t,
150            )
151        };
152
153        util::parse_ret(ret)
154    }
155
156    /// Set the type of the map.
157    pub fn set_type(&mut self, ty: MapType) -> Result<()> {
158        let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) };
159        util::parse_ret(ret)
160    }
161
162    /// Set the key size of the map in bytes.
163    pub fn set_key_size(&mut self, size: u32) -> Result<()> {
164        let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) };
165        util::parse_ret(ret)
166    }
167
168    /// Set the value size of the map in bytes.
169    pub fn set_value_size(&mut self, size: u32) -> Result<()> {
170        let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) };
171        util::parse_ret(ret)
172    }
173
174    /// Set the maximum number of entries this map can have.
175    pub fn set_max_entries(&mut self, count: u32) -> Result<()> {
176        let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) };
177        util::parse_ret(ret)
178    }
179
180    /// Set flags on this map.
181    pub fn set_map_flags(&mut self, flags: u32) -> Result<()> {
182        let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) };
183        util::parse_ret(ret)
184    }
185
186    /// Set the NUMA node for this map.
187    ///
188    /// This can be used to ensure that the map is allocated on a particular
189    /// NUMA node, which can be useful for performance-critical applications.
190    pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> {
191        let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) };
192        util::parse_ret(ret)
193    }
194
195    /// Set the inner map FD.
196    ///
197    /// This is used for nested maps, where the value type of the outer map is a pointer to the
198    /// inner map.
199    pub fn set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()> {
200        let ret = unsafe {
201            libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner_map_fd.as_raw_fd())
202        };
203        util::parse_ret(ret)
204    }
205
206    /// Set the `map_extra` field for this map.
207    ///
208    /// Allows users to pass additional data to the
209    /// kernel when loading the map. The kernel will store this value in the
210    /// `bpf_map_info` struct associated with the map.
211    ///
212    /// This can be used to pass data to the kernel that is not otherwise
213    /// representable via the existing `bpf_map_def` fields.
214    pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> {
215        let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) };
216        util::parse_ret(ret)
217    }
218
219    /// Set whether or not libbpf should automatically create this map during load phase.
220    pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> {
221        let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) };
222        util::parse_ret(ret)
223    }
224
225    /// Set where the map should be pinned.
226    ///
227    /// Note this does not actually create the pin.
228    pub fn set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
229        let path_c = util::path_to_cstring(path)?;
230        let path_ptr = path_c.as_ptr();
231
232        let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) };
233        util::parse_ret(ret)
234    }
235
236    /// Reuse an fd for a BPF map
237    pub fn reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()> {
238        let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd.as_raw_fd()) };
239        util::parse_ret(ret)
240    }
241
242    /// Reuse an already-pinned map for `self`.
243    pub fn reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
244        let cstring = util::path_to_cstring(path)?;
245
246        let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) };
247        if fd < 0 {
248            return Err(Error::from(io::Error::last_os_error()));
249        }
250
251        let fd = unsafe { OwnedFd::from_raw_fd(fd) };
252
253        let reuse_result = self.reuse_fd(fd.as_fd());
254
255        reuse_result
256    }
257}
258
259impl<'obj> Deref for OpenMapMut<'obj> {
260    type Target = OpenMap<'obj>;
261
262    fn deref(&self) -> &Self::Target {
263        // SAFETY: `OpenMapImpl` is `repr(transparent)` and so in-memory
264        //         representation of both types is the same.
265        unsafe { transmute::<&OpenMapMut<'obj>, &OpenMap<'obj>>(self) }
266    }
267}
268
269impl<T> AsRawLibbpf for OpenMapImpl<'_, T> {
270    type LibbpfType = libbpf_sys::bpf_map;
271
272    /// Retrieve the underlying [`libbpf_sys::bpf_map`].
273    fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
274        self.ptr
275    }
276}
277
278pub(crate) fn map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd> {
279    let fd = unsafe { libbpf_sys::bpf_map__fd(map.as_ptr()) };
280    let fd = util::parse_ret_i32(fd).ok();
281    fd
282}
283
284/// Return the size of one value including padding for interacting with per-cpu
285/// maps. The values are aligned to 8 bytes.
286fn percpu_aligned_value_size<M>(map: &M) -> usize
287where
288    M: MapCore + ?Sized,
289{
290    let val_size = map.value_size() as usize;
291    util::roundup(val_size, 8)
292}
293
294/// Returns the size of the buffer needed for a lookup/update of a per-cpu map.
295fn percpu_buffer_size<M>(map: &M) -> Result<usize>
296where
297    M: MapCore + ?Sized,
298{
299    let aligned_val_size = percpu_aligned_value_size(map);
300    let ncpu = crate::num_possible_cpus()?;
301    Ok(ncpu * aligned_val_size)
302}
303
304/// Apply a key check and return a null pointer in case of dealing with queue/stack/bloom-filter
305/// map, before passing the key to the bpf functions that support the map of type
306/// queue/stack/bloom-filter.
307fn map_key<M>(map: &M, key: &[u8]) -> *const c_void
308where
309    M: MapCore + ?Sized,
310{
311    // For all they keyless maps we null out the key per documentation of libbpf
312    if map.key_size() == 0 && map.map_type().is_keyless() {
313        return ptr::null();
314    }
315
316    key.as_ptr() as *const c_void
317}
318
319/// Internal function to perform a map lookup and write the value into raw pointer.
320/// Returns `Ok(true)` if the key was found, `Ok(false)` if not found, or an error.
321fn lookup_raw<M>(
322    map: &M,
323    key: &[u8],
324    value: &mut [mem::MaybeUninit<u8>],
325    flags: MapFlags,
326) -> Result<bool>
327where
328    M: MapCore + ?Sized,
329{
330    if key.len() != map.key_size() as usize {
331        return Err(Error::with_invalid_data(format!(
332            "key_size {} != {}",
333            key.len(),
334            map.key_size()
335        )));
336    }
337
338    // Make sure the internal users of this function pass the expected buffer size
339    debug_assert_eq!(
340        value.len(),
341        if map.map_type().is_percpu() {
342            percpu_buffer_size(map).unwrap()
343        } else {
344            map.value_size() as usize
345        }
346    );
347
348    let ret = unsafe {
349        libbpf_sys::bpf_map_lookup_elem_flags(
350            map.as_fd().as_raw_fd(),
351            map_key(map, key),
352            // TODO: Use `MaybeUninit::slice_as_mut_ptr` once stable.
353            value.as_mut_ptr().cast(),
354            flags.bits(),
355        )
356    };
357
358    if ret == 0 {
359        Ok(true)
360    } else {
361        let err = io::Error::last_os_error();
362        if err.kind() == io::ErrorKind::NotFound {
363            Ok(false)
364        } else {
365            Err(Error::from(err))
366        }
367    }
368}
369
370/// Internal function to return a value from a map into a buffer of the given size.
371fn lookup_raw_vec<M>(
372    map: &M,
373    key: &[u8],
374    flags: MapFlags,
375    out_size: usize,
376) -> Result<Option<Vec<u8>>>
377where
378    M: MapCore + ?Sized,
379{
380    // Allocate without initializing (avoiding memset)
381    let mut out = Vec::with_capacity(out_size);
382
383    match lookup_raw(map, key, out.spare_capacity_mut(), flags)? {
384        true => {
385            // SAFETY: `lookup_raw` successfully filled the buffer
386            unsafe {
387                out.set_len(out_size);
388            }
389            Ok(Some(out))
390        }
391        false => Ok(None),
392    }
393}
394
395/// Internal function to update a map. This does not check the length of the
396/// supplied value.
397fn update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>
398where
399    M: MapCore + ?Sized,
400{
401    if key.len() != map.key_size() as usize {
402        return Err(Error::with_invalid_data(format!(
403            "key_size {} != {}",
404            key.len(),
405            map.key_size()
406        )));
407    };
408
409    let ret = unsafe {
410        libbpf_sys::bpf_map_update_elem(
411            map.as_fd().as_raw_fd(),
412            map_key(map, key),
413            value.as_ptr() as *const c_void,
414            flags.bits(),
415        )
416    };
417
418    util::parse_ret(ret)
419}
420
421/// Internal function to batch lookup (and delete) elements from a map.
422fn lookup_batch_raw<M>(
423    map: &M,
424    count: u32,
425    elem_flags: MapFlags,
426    flags: MapFlags,
427    delete: bool,
428) -> BatchedMapIter<'_>
429where
430    M: MapCore + ?Sized,
431{
432    #[allow(clippy::needless_update)]
433    let opts = libbpf_sys::bpf_map_batch_opts {
434        sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
435        elem_flags: elem_flags.bits(),
436        flags: flags.bits(),
437        // bpf_map_batch_opts might have padding fields on some platform
438        ..Default::default()
439    };
440
441    // for maps of type BPF_MAP_TYPE_{HASH, PERCPU_HASH, LRU_HASH, LRU_PERCPU_HASH}
442    // the key size must be at least 4 bytes
443    let key_size = if map.map_type().is_hash_map() {
444        map.key_size().max(4)
445    } else {
446        map.key_size()
447    };
448
449    BatchedMapIter::new(map.as_fd(), count, key_size, map.value_size(), opts, delete)
450}
451
452/// Intneral function that returns an error for per-cpu and bloom filter maps.
453fn check_not_bloom_or_percpu<M>(map: &M) -> Result<()>
454where
455    M: MapCore + ?Sized,
456{
457    if map.map_type().is_bloom_filter() {
458        return Err(Error::with_invalid_data(
459            "lookup_bloom_filter() must be used for bloom filter maps",
460        ));
461    }
462    if map.map_type().is_percpu() {
463        return Err(Error::with_invalid_data(format!(
464            "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})",
465            map.map_type(),
466        )));
467    }
468
469    Ok(())
470}
471
472#[allow(clippy::wildcard_imports)]
473mod private {
474    use super::*;
475
476    pub trait Sealed {}
477
478    impl<T> Sealed for MapImpl<'_, T> {}
479    impl Sealed for MapHandle {}
480}
481
482/// A trait representing core functionality common to fully initialized maps.
483pub trait MapCore: Debug + AsFd + private::Sealed {
484    /// Retrieve the map's name.
485    fn name(&self) -> &OsStr;
486
487    /// Retrieve type of the map.
488    fn map_type(&self) -> MapType;
489
490    /// Retrieve the size of the map's keys.
491    fn key_size(&self) -> u32;
492
493    /// Retrieve the size of the map's values.
494    fn value_size(&self) -> u32;
495
496    /// Retrieve `max_entries` of the map.
497    fn max_entries(&self) -> u32;
498
499    /// Fetch extra map information
500    #[inline]
501    fn info(&self) -> Result<MapInfo> {
502        MapInfo::new(self.as_fd())
503    }
504
505    /// Returns an iterator over keys in this map
506    ///
507    /// Note that if the map is not stable (stable meaning no updates or deletes) during iteration,
508    /// iteration can skip keys, restart from the beginning, or duplicate keys. In other words,
509    /// iteration becomes unpredictable.
510    fn keys(&self) -> MapKeyIter<'_> {
511        MapKeyIter::new(self.as_fd(), self.key_size())
512    }
513
514    /// Returns map value as `Vec` of `u8`.
515    ///
516    /// `key` must have exactly [`Self::key_size()`] elements.
517    ///
518    /// If the map is one of the per-cpu data structures, the function [`Self::lookup_percpu()`]
519    /// must be used.
520    /// If the map is of type `bloom_filter` the function [`Self::lookup_bloom_filter()`] must be
521    /// used
522    fn lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>> {
523        check_not_bloom_or_percpu(self)?;
524        let out_size = self.value_size() as usize;
525        lookup_raw_vec(self, key, flags, out_size)
526    }
527
528    /// Looks up a map value into a pre-allocated buffer, avoiding allocation.
529    ///
530    /// This method provides a zero-allocation alternative to [`Self::lookup()`].
531    ///
532    /// `key` must have exactly [`Self::key_size()`] elements.
533    /// `value` must have exactly [`Self::value_size()`] elements.
534    ///
535    /// Returns `Ok(true)` if the key was found and the buffer was filled,
536    /// `Ok(false)` if the key was not found, or an error.
537    ///
538    /// If the map is one of the per-cpu data structures, this function cannot be used.
539    /// If the map is of type `bloom_filter`, this function cannot be used.
540    fn lookup_into(&self, key: &[u8], value: &mut [u8], flags: MapFlags) -> Result<bool> {
541        check_not_bloom_or_percpu(self)?;
542
543        if value.len() != self.value_size() as usize {
544            return Err(Error::with_invalid_data(format!(
545                "value buffer size {} != {}",
546                value.len(),
547                self.value_size()
548            )));
549        }
550
551        // SAFETY: `u8` and `MaybeUninit<u8>` have the same in-memory representation.
552        let value = unsafe {
553            slice::from_raw_parts_mut::<mem::MaybeUninit<u8>>(
554                value.as_mut_ptr().cast(),
555                value.len(),
556            )
557        };
558        lookup_raw(self, key, value, flags)
559    }
560
561    /// Returns many elements in batch mode from the map.
562    ///
563    /// `count` specifies the batch size.
564    fn lookup_batch(
565        &self,
566        count: u32,
567        elem_flags: MapFlags,
568        flags: MapFlags,
569    ) -> Result<BatchedMapIter<'_>> {
570        check_not_bloom_or_percpu(self)?;
571        Ok(lookup_batch_raw(self, count, elem_flags, flags, false))
572    }
573
574    /// Returns many elements in batch mode from the map.
575    ///
576    /// `count` specifies the batch size.
577    fn lookup_and_delete_batch(
578        &self,
579        count: u32,
580        elem_flags: MapFlags,
581        flags: MapFlags,
582    ) -> Result<BatchedMapIter<'_>> {
583        check_not_bloom_or_percpu(self)?;
584        Ok(lookup_batch_raw(self, count, elem_flags, flags, true))
585    }
586
587    /// Returns if the given value is likely present in `bloom_filter` as `bool`.
588    ///
589    /// `value` must have exactly [`Self::value_size()`] elements.
590    fn lookup_bloom_filter(&self, value: &[u8]) -> Result<bool> {
591        let ret = unsafe {
592            libbpf_sys::bpf_map_lookup_elem(
593                self.as_fd().as_raw_fd(),
594                ptr::null(),
595                value.to_vec().as_mut_ptr() as *mut c_void,
596            )
597        };
598
599        if ret == 0 {
600            Ok(true)
601        } else {
602            let err = io::Error::last_os_error();
603            if err.kind() == io::ErrorKind::NotFound {
604                Ok(false)
605            } else {
606                Err(Error::from(err))
607            }
608        }
609    }
610
611    /// Returns one value per cpu as `Vec` of `Vec` of `u8` for per per-cpu maps.
612    ///
613    /// For normal maps, [`Self::lookup()`] must be used.
614    fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>> {
615        if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
616            return Err(Error::with_invalid_data(format!(
617                "lookup() must be used for maps that are not per-cpu (type of the map is {:?})",
618                self.map_type(),
619            )));
620        }
621
622        let val_size = self.value_size() as usize;
623        let aligned_val_size = percpu_aligned_value_size(self);
624        let out_size = percpu_buffer_size(self)?;
625
626        let raw_res = lookup_raw_vec(self, key, flags, out_size)?;
627        if let Some(raw_vals) = raw_res {
628            let mut out = Vec::new();
629            for chunk in raw_vals.chunks_exact(aligned_val_size) {
630                out.push(chunk[..val_size].to_vec());
631            }
632            Ok(Some(out))
633        } else {
634            Ok(None)
635        }
636    }
637
638    /// Deletes an element from the map.
639    ///
640    /// `key` must have exactly [`Self::key_size()`] elements.
641    fn delete(&self, key: &[u8]) -> Result<()> {
642        if key.len() != self.key_size() as usize {
643            return Err(Error::with_invalid_data(format!(
644                "key_size {} != {}",
645                key.len(),
646                self.key_size()
647            )));
648        };
649
650        let ret = unsafe {
651            libbpf_sys::bpf_map_delete_elem(self.as_fd().as_raw_fd(), key.as_ptr() as *const c_void)
652        };
653        util::parse_ret(ret)
654    }
655
656    /// Deletes many elements in batch mode from the map.
657    ///
658    /// `keys` must have exactly `Self::key_size() * count` elements.
659    fn delete_batch(
660        &self,
661        keys: &[u8],
662        count: u32,
663        elem_flags: MapFlags,
664        flags: MapFlags,
665    ) -> Result<()> {
666        if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
667            return Err(Error::with_invalid_data(format!(
668                "batch key_size {} != {} * {}",
669                keys.len(),
670                self.key_size(),
671                count
672            )));
673        };
674
675        #[allow(clippy::needless_update)]
676        let opts = libbpf_sys::bpf_map_batch_opts {
677            sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
678            elem_flags: elem_flags.bits(),
679            flags: flags.bits(),
680            // bpf_map_batch_opts might have padding fields on some platform
681            ..Default::default()
682        };
683
684        let mut count = count;
685        let ret = unsafe {
686            libbpf_sys::bpf_map_delete_batch(
687                self.as_fd().as_raw_fd(),
688                keys.as_ptr() as *const c_void,
689                &mut count,
690                &opts as *const libbpf_sys::bpf_map_batch_opts,
691            )
692        };
693        util::parse_ret(ret)
694    }
695
696    /// Same as [`Self::lookup()`] except this also deletes the key from the map.
697    ///
698    /// Note that this operation is currently only implemented in the kernel for [`MapType::Queue`]
699    /// and [`MapType::Stack`].
700    ///
701    /// `key` must have exactly [`Self::key_size()`] elements.
702    fn lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
703        if key.len() != self.key_size() as usize {
704            return Err(Error::with_invalid_data(format!(
705                "key_size {} != {}",
706                key.len(),
707                self.key_size()
708            )));
709        };
710
711        let mut out: Vec<u8> = Vec::with_capacity(self.value_size() as usize);
712
713        let ret = unsafe {
714            libbpf_sys::bpf_map_lookup_and_delete_elem(
715                self.as_fd().as_raw_fd(),
716                map_key(self, key),
717                out.as_mut_ptr() as *mut c_void,
718            )
719        };
720
721        if ret == 0 {
722            unsafe {
723                out.set_len(self.value_size() as usize);
724            }
725            Ok(Some(out))
726        } else {
727            let err = io::Error::last_os_error();
728            if err.kind() == io::ErrorKind::NotFound {
729                Ok(None)
730            } else {
731                Err(Error::from(err))
732            }
733        }
734    }
735
736    /// Update an element.
737    ///
738    /// `key` must have exactly [`Self::key_size()`] elements. `value` must have exactly
739    /// [`Self::value_size()`] elements.
740    ///
741    /// For per-cpu maps, [`Self::update_percpu()`] must be used.
742    fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
743        if self.map_type().is_percpu() {
744            return Err(Error::with_invalid_data(format!(
745                "update_percpu() must be used for per-cpu maps (type of the map is {:?})",
746                self.map_type(),
747            )));
748        }
749
750        if value.len() != self.value_size() as usize {
751            return Err(Error::with_invalid_data(format!(
752                "value_size {} != {}",
753                value.len(),
754                self.value_size()
755            )));
756        };
757
758        update_raw(self, key, value, flags)
759    }
760
761    /// Updates many elements in batch mode in the map
762    ///
763    /// `keys` must have exactly `Self::key_size() * count` elements. `values` must have exactly
764    /// `Self::key_size() * count` elements.
765    fn update_batch(
766        &self,
767        keys: &[u8],
768        values: &[u8],
769        count: u32,
770        elem_flags: MapFlags,
771        flags: MapFlags,
772    ) -> Result<()> {
773        if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
774            return Err(Error::with_invalid_data(format!(
775                "batch key_size {} != {} * {}",
776                keys.len(),
777                self.key_size(),
778                count
779            )));
780        };
781
782        if values.len() as u32 / count != self.value_size() || (values.len() as u32) % count != 0 {
783            return Err(Error::with_invalid_data(format!(
784                "batch value_size {} != {} * {}",
785                values.len(),
786                self.value_size(),
787                count
788            )));
789        }
790
791        #[allow(clippy::needless_update)]
792        let opts = libbpf_sys::bpf_map_batch_opts {
793            sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
794            elem_flags: elem_flags.bits(),
795            flags: flags.bits(),
796            // bpf_map_batch_opts might have padding fields on some platform
797            ..Default::default()
798        };
799
800        let mut count = count;
801        let ret = unsafe {
802            libbpf_sys::bpf_map_update_batch(
803                self.as_fd().as_raw_fd(),
804                keys.as_ptr() as *const c_void,
805                values.as_ptr() as *const c_void,
806                &mut count,
807                &opts as *const libbpf_sys::bpf_map_batch_opts,
808            )
809        };
810
811        util::parse_ret(ret)
812    }
813
814    /// Update an element in an per-cpu map with one value per cpu.
815    ///
816    /// `key` must have exactly [`Self::key_size()`] elements. `value` must have one
817    /// element per cpu (see [`num_possible_cpus`][crate::num_possible_cpus])
818    /// with exactly [`Self::value_size()`] elements each.
819    ///
820    /// For per-cpu maps, [`Self::update_percpu()`] must be used.
821    fn update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()> {
822        if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
823            return Err(Error::with_invalid_data(format!(
824                "update() must be used for maps that are not per-cpu (type of the map is {:?})",
825                self.map_type(),
826            )));
827        }
828
829        if values.len() != crate::num_possible_cpus()? {
830            return Err(Error::with_invalid_data(format!(
831                "number of values {} != number of cpus {}",
832                values.len(),
833                crate::num_possible_cpus()?
834            )));
835        };
836
837        let val_size = self.value_size() as usize;
838        let aligned_val_size = percpu_aligned_value_size(self);
839        let buf_size = percpu_buffer_size(self)?;
840
841        let mut value_buf = vec![0; buf_size];
842
843        for (i, val) in values.iter().enumerate() {
844            if val.len() != val_size {
845                return Err(Error::with_invalid_data(format!(
846                    "value size for cpu {} is {} != {}",
847                    i,
848                    val.len(),
849                    val_size
850                )));
851            }
852
853            value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)]
854                .copy_from_slice(val);
855        }
856
857        update_raw(self, key, &value_buf, flags)
858    }
859}
860
861/// An immutable loaded BPF map.
862pub type Map<'obj> = MapImpl<'obj>;
863/// A mutable loaded BPF map.
864pub type MapMut<'obj> = MapImpl<'obj, Mut>;
865
866/// Represents a libbpf-created map.
867///
868/// Some methods require working with raw bytes. You may find libraries such as
869/// [`plain`](https://crates.io/crates/plain) helpful.
870#[derive(Debug)]
871pub struct MapImpl<'obj, T = ()> {
872    ptr: NonNull<libbpf_sys::bpf_map>,
873    _phantom: PhantomData<&'obj T>,
874}
875
876impl<'obj> Map<'obj> {
877    /// Create a [`Map`] from a [`libbpf_sys::bpf_map`].
878    pub fn new(map: &'obj libbpf_sys::bpf_map) -> Self {
879        // SAFETY: We inferred the address from a reference, which is always
880        //         valid.
881        let ptr = unsafe { NonNull::new_unchecked(map as *const _ as *mut _) };
882        assert!(
883            map_fd(ptr).is_some(),
884            "provided BPF map does not have file descriptor"
885        );
886
887        Self {
888            ptr,
889            _phantom: PhantomData,
890        }
891    }
892
893    /// Create a [`Map`] from a [`libbpf_sys::bpf_map`] that does not contain a
894    /// file descriptor.
895    ///
896    /// The caller has to ensure that the [`AsFd`] impl is not used, or a panic
897    /// will be the result.
898    ///
899    /// # Safety
900    ///
901    /// The pointer must point to a loaded map.
902    #[doc(hidden)]
903    pub unsafe fn from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self {
904        Self {
905            ptr,
906            _phantom: PhantomData,
907        }
908    }
909
910    /// Returns whether map is pinned or not flag
911    pub fn is_pinned(&self) -> bool {
912        unsafe { libbpf_sys::bpf_map__is_pinned(self.ptr.as_ptr()) }
913    }
914
915    /// Returns the `pin_path` if the map is pinned, otherwise, `None`
916    /// is returned.
917    pub fn get_pin_path(&self) -> Option<&OsStr> {
918        let path_ptr = unsafe { libbpf_sys::bpf_map__pin_path(self.ptr.as_ptr()) };
919        if path_ptr.is_null() {
920            // means map is not pinned
921            return None;
922        }
923        let path_c_str = unsafe { CStr::from_ptr(path_ptr) };
924        Some(OsStr::from_bytes(path_c_str.to_bytes()))
925    }
926
927    /// Return `true` if the map was set to be auto-created during load, `false` otherwise.
928    pub fn autocreate(&self) -> bool {
929        unsafe { libbpf_sys::bpf_map__autocreate(self.ptr.as_ptr()) }
930    }
931}
932
933impl<'obj> MapMut<'obj> {
934    /// Create a [`MapMut`] from a [`libbpf_sys::bpf_map`].
935    pub fn new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self {
936        // SAFETY: We inferred the address from a reference, which is always
937        //         valid.
938        let ptr = unsafe { NonNull::new_unchecked(map as *mut _) };
939        assert!(
940            map_fd(ptr).is_some(),
941            "provided BPF map does not have file descriptor"
942        );
943
944        Self {
945            ptr,
946            _phantom: PhantomData,
947        }
948    }
949
950    /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
951    /// this map to bpffs.
952    pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
953        let path_c = util::path_to_cstring(path)?;
954        let path_ptr = path_c.as_ptr();
955
956        let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr.as_ptr(), path_ptr) };
957        util::parse_ret(ret)
958    }
959
960    /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
961    /// this map from bpffs.
962    pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
963        let path_c = util::path_to_cstring(path)?;
964        let path_ptr = path_c.as_ptr();
965        let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr.as_ptr(), path_ptr) };
966        util::parse_ret(ret)
967    }
968
969    /// Attach a struct ops map
970    pub fn attach_struct_ops(&mut self) -> Result<Link> {
971        if self.map_type() != MapType::StructOps {
972            return Err(Error::with_invalid_data(format!(
973                "Invalid map type ({:?}) for attach_struct_ops()",
974                self.map_type(),
975            )));
976        }
977
978        let ptr = unsafe { libbpf_sys::bpf_map__attach_struct_ops(self.ptr.as_ptr()) };
979        let ptr = validate_bpf_ret(ptr).context("failed to attach struct_ops")?;
980        // SAFETY: the pointer came from libbpf and has been checked for errors.
981        let link = unsafe { Link::new(ptr) };
982        Ok(link)
983    }
984}
985
986impl<'obj> Deref for MapMut<'obj> {
987    type Target = Map<'obj>;
988
989    fn deref(&self) -> &Self::Target {
990        unsafe { transmute::<&MapMut<'obj>, &Map<'obj>>(self) }
991    }
992}
993
994impl<T> AsFd for MapImpl<'_, T> {
995    #[inline]
996    fn as_fd(&self) -> BorrowedFd<'_> {
997        // SANITY: Our map must always have a file descriptor associated with
998        //         it.
999        let fd = map_fd(self.ptr).unwrap();
1000        // SAFETY: `fd` is guaranteed to be valid for the lifetime of
1001        //         the created object.
1002        let fd = unsafe { BorrowedFd::borrow_raw(fd) };
1003        fd
1004    }
1005}
1006
1007impl<T> MapCore for MapImpl<'_, T>
1008where
1009    T: Debug,
1010{
1011    fn name(&self) -> &OsStr {
1012        // SAFETY: We ensured `ptr` is valid during construction.
1013        let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
1014        // SAFETY: `bpf_map__name` can return NULL but only if it's passed
1015        //          NULL. We know `ptr` is not NULL.
1016        let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
1017        OsStr::from_bytes(name_c_str.to_bytes())
1018    }
1019
1020    #[inline]
1021    fn map_type(&self) -> MapType {
1022        let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
1023        MapType::from(ty)
1024    }
1025
1026    #[inline]
1027    fn key_size(&self) -> u32 {
1028        unsafe { libbpf_sys::bpf_map__key_size(self.ptr.as_ptr()) }
1029    }
1030
1031    #[inline]
1032    fn value_size(&self) -> u32 {
1033        unsafe { libbpf_sys::bpf_map__value_size(self.ptr.as_ptr()) }
1034    }
1035
1036    #[inline]
1037    fn max_entries(&self) -> u32 {
1038        unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
1039    }
1040}
1041
1042impl AsRawLibbpf for Map<'_> {
1043    type LibbpfType = libbpf_sys::bpf_map;
1044
1045    /// Retrieve the underlying [`libbpf_sys::bpf_map`].
1046    #[inline]
1047    fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
1048        self.ptr
1049    }
1050}
1051
1052/// A handle to a map. Handles can be duplicated and dropped.
1053///
1054/// While possible to [create directly][MapHandle::create], in many cases it is
1055/// useful to create such a handle from an existing [`Map`]:
1056/// ```no_run
1057/// # use libbpf_rs::Map;
1058/// # use libbpf_rs::MapHandle;
1059/// # let get_map = || -> &Map { todo!() };
1060/// let map: &Map = get_map();
1061/// let map_handle = MapHandle::try_from(map).unwrap();
1062/// ```
1063///
1064/// Some methods require working with raw bytes. You may find libraries such as
1065/// [`plain`](https://crates.io/crates/plain) helpful.
1066#[derive(Debug)]
1067pub struct MapHandle {
1068    fd: OwnedFd,
1069    name: OsString,
1070    ty: MapType,
1071    key_size: u32,
1072    value_size: u32,
1073    max_entries: u32,
1074}
1075
1076impl MapHandle {
1077    /// Create a bpf map whose data is not managed by libbpf.
1078    pub fn create<T: AsRef<OsStr>>(
1079        map_type: MapType,
1080        name: Option<T>,
1081        key_size: u32,
1082        value_size: u32,
1083        max_entries: u32,
1084        opts: &libbpf_sys::bpf_map_create_opts,
1085    ) -> Result<Self> {
1086        let name = match name {
1087            Some(name) => name.as_ref().to_os_string(),
1088            // The old version kernel don't support specifying map name.
1089            None => OsString::new(),
1090        };
1091        let name_c_str = CString::new(name.as_bytes()).map_err(|_| {
1092            Error::with_invalid_data(format!("invalid name `{name:?}`: has NUL bytes"))
1093        })?;
1094        let name_c_ptr = if name.is_empty() {
1095            ptr::null()
1096        } else {
1097            name_c_str.as_bytes_with_nul().as_ptr()
1098        };
1099
1100        let fd = unsafe {
1101            libbpf_sys::bpf_map_create(
1102                map_type.into(),
1103                name_c_ptr.cast(),
1104                key_size,
1105                value_size,
1106                max_entries,
1107                opts,
1108            )
1109        };
1110        let () = util::parse_ret(fd)?;
1111
1112        Ok(Self {
1113            // SAFETY: A file descriptor coming from the `bpf_map_create`
1114            //         function is always suitable for ownership and can be
1115            //         cleaned up with close.
1116            fd: unsafe { OwnedFd::from_raw_fd(fd) },
1117            name,
1118            ty: map_type,
1119            key_size,
1120            value_size,
1121            max_entries,
1122        })
1123    }
1124
1125    /// Open a previously pinned map from its path.
1126    ///
1127    /// # Panics
1128    /// If the path contains null bytes.
1129    pub fn from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self> {
1130        fn inner(path: &Path) -> Result<MapHandle> {
1131            let p = CString::new(path.as_os_str().as_bytes()).expect("path contained null bytes");
1132            let fd = parse_ret_i32(unsafe {
1133                // SAFETY
1134                // p is never null since we allocated ourselves.
1135                libbpf_sys::bpf_obj_get(p.as_ptr())
1136            })?;
1137            MapHandle::from_fd(unsafe {
1138                // SAFETY
1139                // A file descriptor coming from the bpf_obj_get function is always suitable for
1140                // ownership and can be cleaned up with close.
1141                OwnedFd::from_raw_fd(fd)
1142            })
1143        }
1144
1145        inner(path.as_ref())
1146    }
1147
1148    /// Open a loaded map from its map id.
1149    pub fn from_map_id(id: u32) -> Result<Self> {
1150        parse_ret_i32(unsafe {
1151            // SAFETY
1152            // This function is always safe to call.
1153            libbpf_sys::bpf_map_get_fd_by_id(id)
1154        })
1155        .map(|fd| unsafe {
1156            // SAFETY
1157            // A file descriptor coming from the bpf_map_get_fd_by_id function is always suitable
1158            // for ownership and can be cleaned up with close.
1159            OwnedFd::from_raw_fd(fd)
1160        })
1161        .and_then(Self::from_fd)
1162    }
1163
1164    fn from_fd(fd: OwnedFd) -> Result<Self> {
1165        let info = MapInfo::new(fd.as_fd())?;
1166        Ok(Self {
1167            fd,
1168            name: info.name()?.into(),
1169            ty: info.map_type(),
1170            key_size: info.info.key_size,
1171            value_size: info.info.value_size,
1172            max_entries: info.info.max_entries,
1173        })
1174    }
1175
1176    /// Freeze the map as read-only from user space.
1177    ///
1178    /// Entries from a frozen map can no longer be updated or deleted with the
1179    /// `bpf()` system call. This operation is not reversible, and the map remains
1180    /// immutable from user space until its destruction. However, read and write
1181    /// permissions for BPF programs to the map remain unchanged.
1182    pub fn freeze(&self) -> Result<()> {
1183        let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd.as_raw_fd()) };
1184
1185        util::parse_ret(ret)
1186    }
1187
1188    /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
1189    /// this map to bpffs.
1190    pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1191        let path_c = util::path_to_cstring(path)?;
1192        let path_ptr = path_c.as_ptr();
1193
1194        let ret = unsafe { libbpf_sys::bpf_obj_pin(self.fd.as_raw_fd(), path_ptr) };
1195        util::parse_ret(ret)
1196    }
1197
1198    /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
1199    /// this map from bpffs.
1200    pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1201        remove_file(path).context("failed to remove pin map")
1202    }
1203}
1204
1205impl MapCore for MapHandle {
1206    #[inline]
1207    fn name(&self) -> &OsStr {
1208        &self.name
1209    }
1210
1211    #[inline]
1212    fn map_type(&self) -> MapType {
1213        self.ty
1214    }
1215
1216    #[inline]
1217    fn key_size(&self) -> u32 {
1218        self.key_size
1219    }
1220
1221    #[inline]
1222    fn value_size(&self) -> u32 {
1223        self.value_size
1224    }
1225
1226    #[inline]
1227    fn max_entries(&self) -> u32 {
1228        self.max_entries
1229    }
1230}
1231
1232impl AsFd for MapHandle {
1233    #[inline]
1234    fn as_fd(&self) -> BorrowedFd<'_> {
1235        self.fd.as_fd()
1236    }
1237}
1238
1239impl<T> TryFrom<&MapImpl<'_, T>> for MapHandle
1240where
1241    T: Debug,
1242{
1243    type Error = Error;
1244
1245    fn try_from(other: &MapImpl<'_, T>) -> Result<Self> {
1246        Ok(Self {
1247            fd: other
1248                .as_fd()
1249                .try_clone_to_owned()
1250                .context("failed to duplicate map file descriptor")?,
1251            name: other.name().to_os_string(),
1252            ty: other.map_type(),
1253            key_size: other.key_size(),
1254            value_size: other.value_size(),
1255            max_entries: other.max_entries(),
1256        })
1257    }
1258}
1259
1260impl TryFrom<&Self> for MapHandle {
1261    type Error = Error;
1262
1263    fn try_from(other: &Self) -> Result<Self> {
1264        Ok(Self {
1265            fd: other
1266                .as_fd()
1267                .try_clone_to_owned()
1268                .context("failed to duplicate map file descriptor")?,
1269            name: other.name().to_os_string(),
1270            ty: other.map_type(),
1271            key_size: other.key_size(),
1272            value_size: other.value_size(),
1273            max_entries: other.max_entries(),
1274        })
1275    }
1276}
1277
1278bitflags! {
1279    /// Flags to configure [`Map`] operations.
1280    #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
1281    pub struct MapFlags: u64 {
1282        /// See [`libbpf_sys::BPF_ANY`].
1283        const ANY      = libbpf_sys::BPF_ANY as _;
1284        /// See [`libbpf_sys::BPF_NOEXIST`].
1285        const NO_EXIST = libbpf_sys::BPF_NOEXIST as _;
1286        /// See [`libbpf_sys::BPF_EXIST`].
1287        const EXIST    = libbpf_sys::BPF_EXIST as _;
1288        /// See [`libbpf_sys::BPF_F_LOCK`].
1289        const LOCK     = libbpf_sys::BPF_F_LOCK as _;
1290    }
1291}
1292
1293/// Type of a [`Map`]. Maps to `enum bpf_map_type` in kernel uapi.
1294// If you add a new per-cpu map, also update `is_percpu`.
1295#[non_exhaustive]
1296#[repr(u32)]
1297#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1298pub enum MapType {
1299    /// An unspecified map type.
1300    Unspec = libbpf_sys::BPF_MAP_TYPE_UNSPEC,
1301    /// A general purpose Hash map storage type.
1302    ///
1303    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html) for more details.
1304    Hash = libbpf_sys::BPF_MAP_TYPE_HASH,
1305    /// An Array map storage type.
1306    ///
1307    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_array.html) for more details.
1308    Array = libbpf_sys::BPF_MAP_TYPE_ARRAY,
1309    /// A program array map which holds only the file descriptors to other eBPF programs. Used for
1310    /// tail-calls.
1311    ///
1312    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_PROG_ARRAY/) for more details.
1313    ProgArray = libbpf_sys::BPF_MAP_TYPE_PROG_ARRAY,
1314    /// An array map which holds only the file descriptors to perf events.
1315    ///
1316    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_PERF_EVENT_ARRAY/) for more details.
1317    PerfEventArray = libbpf_sys::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1318    /// A Hash map with per CPU storage.
1319    ///
1320    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html#per-cpu-hashes) for more details.
1321    PercpuHash = libbpf_sys::BPF_MAP_TYPE_PERCPU_HASH,
1322    /// An Array map with per CPU storage.
1323    ///
1324    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_array.html) for more details.
1325    PercpuArray = libbpf_sys::BPF_MAP_TYPE_PERCPU_ARRAY,
1326    #[allow(missing_docs)]
1327    StackTrace = libbpf_sys::BPF_MAP_TYPE_STACK_TRACE,
1328    #[allow(missing_docs)]
1329    CgroupArray = libbpf_sys::BPF_MAP_TYPE_CGROUP_ARRAY,
1330    /// A Hash map with least recently used (LRU) eviction policy.
1331    ///
1332    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html#bpf-map-type-lru-hash-and-variants) for more details.
1333    LruHash = libbpf_sys::BPF_MAP_TYPE_LRU_HASH,
1334    /// A Hash map with least recently used (LRU) eviction policy with per CPU storage.
1335    ///
1336    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html#per-cpu-hashes) for more details.
1337    LruPercpuHash = libbpf_sys::BPF_MAP_TYPE_LRU_PERCPU_HASH,
1338    /// A Longest Prefix Match (LPM) algorithm based map.
1339    ///
1340    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_lpm_trie.html) for more details.
1341    LpmTrie = libbpf_sys::BPF_MAP_TYPE_LPM_TRIE,
1342    /// A map in map storage.
1343    /// One level of nesting is supported, where an outer map contains instances of a single type
1344    /// of inner map.
1345    ///
1346    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_of_maps.html) for more details.
1347    ArrayOfMaps = libbpf_sys::BPF_MAP_TYPE_ARRAY_OF_MAPS,
1348    /// A map in map storage.
1349    /// One level of nesting is supported, where an outer map contains instances of a single type
1350    /// of inner map.
1351    ///
1352    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_of_maps.html) for more details.
1353    HashOfMaps = libbpf_sys::BPF_MAP_TYPE_HASH_OF_MAPS,
1354    /// An array map that uses the key as the index to lookup a reference to a net device.
1355    /// Primarily used for XDP BPF Helper.
1356    ///
1357    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_devmap.html) for more details.
1358    Devmap = libbpf_sys::BPF_MAP_TYPE_DEVMAP,
1359    /// An array map holds references to a socket descriptor.
1360    ///
1361    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_sockmap.html) for more details.
1362    Sockmap = libbpf_sys::BPF_MAP_TYPE_SOCKMAP,
1363    /// A map that redirects raw XDP frames to another CPU.
1364    ///
1365    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_cpumap.html) for more details.
1366    Cpumap = libbpf_sys::BPF_MAP_TYPE_CPUMAP,
1367    /// A map that redirects raw XDP frames to `AF_XDP` sockets (XSKs), a new type of address
1368    /// family in the kernel that allows redirection of frames from a driver to user space
1369    /// without having to traverse the full network stack.
1370    ///
1371    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_xskmap.html) for more details.
1372    Xskmap = libbpf_sys::BPF_MAP_TYPE_XSKMAP,
1373    /// A Hash map that holds references to sockets via their socket descriptor.
1374    ///
1375    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_sockmap.html) for more details.
1376    Sockhash = libbpf_sys::BPF_MAP_TYPE_SOCKHASH,
1377    /// Deprecated. Use `CGrpStorage` instead.
1378    ///
1379    /// A Local storage for cgroups.
1380    /// Only available with `CONFIG_CGROUP_BPF` and to programs that attach to cgroups.
1381    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_cgroup_storage.html) for more details.
1382    CgroupStorage = libbpf_sys::BPF_MAP_TYPE_CGROUP_STORAGE,
1383    /// A Local storage for cgroups. Only available with `CONFIG_CGROUPS`.
1384    ///
1385    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_cgrp_storage.html) for more details.
1386    /// See also [Difference between cgrp_storage and cgroup_storage](https://docs.kernel.org/bpf/map_cgrp_storage.html#difference-between-bpf-map-type-cgrp-storage-and-bpf-map-type-cgroup-storage)
1387    CGrpStorage = libbpf_sys::BPF_MAP_TYPE_CGRP_STORAGE,
1388    /// A map that holds references to sockets with `SO_REUSEPORT` option set.
1389    ///
1390    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_REUSEPORT_SOCKARRAY/) for more details.
1391    ReuseportSockarray = libbpf_sys::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
1392    /// A per-CPU variant of [`BPF_MAP_TYPE_CGROUP_STORAGE`][`MapType::CgroupStorage`].
1393    ///
1394    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) for more details.
1395    PercpuCgroupStorage = libbpf_sys::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
1396    /// A FIFO storage.
1397    ///
1398    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_queue_stack.html) for more details.
1399    Queue = libbpf_sys::BPF_MAP_TYPE_QUEUE,
1400    /// A LIFO storage.
1401    ///
1402    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_queue_stack.html) for more details.
1403    Stack = libbpf_sys::BPF_MAP_TYPE_STACK,
1404    /// A socket-local storage.
1405    ///
1406    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_sk_storage.html) for more details.
1407    SkStorage = libbpf_sys::BPF_MAP_TYPE_SK_STORAGE,
1408    /// A Hash map that uses the key as the index to lookup a reference to a net device.
1409    /// Primarily used for XDP BPF Helper.
1410    ///
1411    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_devmap.html) for more details.
1412    DevmapHash = libbpf_sys::BPF_MAP_TYPE_DEVMAP_HASH,
1413    /// A specialized map that act as implementations of "struct ops" structures defined in the
1414    /// kernel.
1415    ///
1416    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_STRUCT_OPS/) for more details.
1417    StructOps = libbpf_sys::BPF_MAP_TYPE_STRUCT_OPS,
1418    /// A ring buffer map to efficiently send large amount of data.
1419    ///
1420    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_RINGBUF/) for more details.
1421    RingBuf = libbpf_sys::BPF_MAP_TYPE_RINGBUF,
1422    /// A storage map that holds data keyed on inodes.
1423    ///
1424    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_INODE_STORAGE/) for more details.
1425    InodeStorage = libbpf_sys::BPF_MAP_TYPE_INODE_STORAGE,
1426    /// A storage map that holds data keyed on tasks.
1427    ///
1428    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_TASK_STORAGE/) for more details.
1429    TaskStorage = libbpf_sys::BPF_MAP_TYPE_TASK_STORAGE,
1430    /// Bloom filters are a space-efficient probabilistic data structure used to quickly test
1431    /// whether an element exists in a set. In a bloom filter, false positives are possible
1432    /// whereas false negatives are not.
1433    ///
1434    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_bloom_filter.html) for more details.
1435    BloomFilter = libbpf_sys::BPF_MAP_TYPE_BLOOM_FILTER,
1436    #[allow(missing_docs)]
1437    UserRingBuf = libbpf_sys::BPF_MAP_TYPE_USER_RINGBUF,
1438    /// We choose to specify our own "unknown" type here b/c it's really up to the kernel
1439    /// to decide if it wants to reject the map. If it accepts it, it just means whoever
1440    /// using this library is a bit out of date.
1441    Unknown = u32::MAX,
1442}
1443
1444impl MapType {
1445    /// Returns if the map is of one of the per-cpu types.
1446    pub fn is_percpu(&self) -> bool {
1447        matches!(
1448            self,
1449            Self::PercpuArray | Self::PercpuHash | Self::LruPercpuHash | Self::PercpuCgroupStorage
1450        )
1451    }
1452
1453    /// Returns if the map is of one of the hashmap types.
1454    pub fn is_hash_map(&self) -> bool {
1455        matches!(
1456            self,
1457            Self::Hash | Self::PercpuHash | Self::LruHash | Self::LruPercpuHash
1458        )
1459    }
1460
1461    /// Returns if the map is keyless map type as per documentation of libbpf
1462    /// Keyless map types are: Queues, Stacks and Bloom Filters
1463    fn is_keyless(&self) -> bool {
1464        matches!(self, Self::Queue | Self::Stack | Self::BloomFilter)
1465    }
1466
1467    /// Returns if the map is of bloom filter type
1468    pub fn is_bloom_filter(&self) -> bool {
1469        Self::BloomFilter.eq(self)
1470    }
1471
1472    /// Detects if host kernel supports this BPF map type.
1473    ///
1474    /// Make sure the process has required set of CAP_* permissions (or runs as
1475    /// root) when performing feature checking.
1476    pub fn is_supported(&self) -> Result<bool> {
1477        let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, ptr::null()) };
1478        match ret {
1479            0 => Ok(false),
1480            1 => Ok(true),
1481            _ => Err(Error::from_raw_os_error(-ret)),
1482        }
1483    }
1484}
1485
1486impl From<u32> for MapType {
1487    fn from(value: u32) -> Self {
1488        use MapType::*;
1489
1490        match value {
1491            x if x == Unspec as u32 => Unspec,
1492            x if x == Hash as u32 => Hash,
1493            x if x == Array as u32 => Array,
1494            x if x == ProgArray as u32 => ProgArray,
1495            x if x == PerfEventArray as u32 => PerfEventArray,
1496            x if x == PercpuHash as u32 => PercpuHash,
1497            x if x == PercpuArray as u32 => PercpuArray,
1498            x if x == StackTrace as u32 => StackTrace,
1499            x if x == CgroupArray as u32 => CgroupArray,
1500            x if x == LruHash as u32 => LruHash,
1501            x if x == LruPercpuHash as u32 => LruPercpuHash,
1502            x if x == LpmTrie as u32 => LpmTrie,
1503            x if x == ArrayOfMaps as u32 => ArrayOfMaps,
1504            x if x == HashOfMaps as u32 => HashOfMaps,
1505            x if x == Devmap as u32 => Devmap,
1506            x if x == Sockmap as u32 => Sockmap,
1507            x if x == Cpumap as u32 => Cpumap,
1508            x if x == Xskmap as u32 => Xskmap,
1509            x if x == Sockhash as u32 => Sockhash,
1510            x if x == CgroupStorage as u32 => CgroupStorage,
1511            x if x == ReuseportSockarray as u32 => ReuseportSockarray,
1512            x if x == PercpuCgroupStorage as u32 => PercpuCgroupStorage,
1513            x if x == Queue as u32 => Queue,
1514            x if x == Stack as u32 => Stack,
1515            x if x == SkStorage as u32 => SkStorage,
1516            x if x == DevmapHash as u32 => DevmapHash,
1517            x if x == StructOps as u32 => StructOps,
1518            x if x == RingBuf as u32 => RingBuf,
1519            x if x == InodeStorage as u32 => InodeStorage,
1520            x if x == TaskStorage as u32 => TaskStorage,
1521            x if x == BloomFilter as u32 => BloomFilter,
1522            x if x == UserRingBuf as u32 => UserRingBuf,
1523            _ => Unknown,
1524        }
1525    }
1526}
1527
1528impl From<MapType> for u32 {
1529    fn from(value: MapType) -> Self {
1530        value as Self
1531    }
1532}
1533
1534/// An iterator over the keys of a BPF map.
1535#[derive(Debug)]
1536pub struct MapKeyIter<'map> {
1537    map_fd: BorrowedFd<'map>,
1538    prev: Option<Vec<u8>>,
1539    next: Vec<u8>,
1540}
1541
1542impl<'map> MapKeyIter<'map> {
1543    fn new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self {
1544        Self {
1545            map_fd,
1546            prev: None,
1547            next: vec![0; key_size as usize],
1548        }
1549    }
1550}
1551
1552impl Iterator for MapKeyIter<'_> {
1553    type Item = Vec<u8>;
1554
1555    fn next(&mut self) -> Option<Self::Item> {
1556        let prev = self.prev.as_ref().map_or(ptr::null(), Vec::as_ptr);
1557
1558        let ret = unsafe {
1559            libbpf_sys::bpf_map_get_next_key(
1560                self.map_fd.as_raw_fd(),
1561                prev as _,
1562                self.next.as_mut_ptr() as _,
1563            )
1564        };
1565        if ret != 0 {
1566            None
1567        } else {
1568            self.prev = Some(self.next.clone());
1569            Some(self.next.clone())
1570        }
1571    }
1572}
1573
1574/// An iterator over batches of key value pairs of a BPF map.
1575#[derive(Debug)]
1576pub struct BatchedMapIter<'map> {
1577    map_fd: BorrowedFd<'map>,
1578    delete: bool,
1579    count: usize,
1580    key_size: usize,
1581    value_size: usize,
1582    keys: Vec<u8>,
1583    values: Vec<u8>,
1584    prev: Option<Vec<u8>>,
1585    next: Vec<u8>,
1586    batch_opts: libbpf_sys::bpf_map_batch_opts,
1587    index: Option<usize>,
1588}
1589
1590impl<'map> BatchedMapIter<'map> {
1591    fn new(
1592        map_fd: BorrowedFd<'map>,
1593        count: u32,
1594        key_size: u32,
1595        value_size: u32,
1596        batch_opts: libbpf_sys::bpf_map_batch_opts,
1597        delete: bool,
1598    ) -> Self {
1599        Self {
1600            map_fd,
1601            delete,
1602            count: count as usize,
1603            key_size: key_size as usize,
1604            value_size: value_size as usize,
1605            keys: vec![0; (count * key_size) as usize],
1606            values: vec![0; (count * value_size) as usize],
1607            prev: None,
1608            next: vec![0; key_size as usize],
1609            batch_opts,
1610            index: None,
1611        }
1612    }
1613
1614    fn lookup_next_batch(&mut self) {
1615        let prev = self.prev.as_mut().map_or(ptr::null_mut(), Vec::as_mut_ptr);
1616        let mut count = self.count as u32;
1617
1618        let ret = unsafe {
1619            let lookup_fn = if self.delete {
1620                libbpf_sys::bpf_map_lookup_and_delete_batch
1621            } else {
1622                libbpf_sys::bpf_map_lookup_batch
1623            };
1624            lookup_fn(
1625                self.map_fd.as_raw_fd(),
1626                prev.cast(),
1627                self.next.as_mut_ptr().cast(),
1628                self.keys.as_mut_ptr().cast(),
1629                self.values.as_mut_ptr().cast(),
1630                &mut count,
1631                &self.batch_opts,
1632            )
1633        };
1634
1635        if let Err(e) = util::parse_ret(ret) {
1636            match e.kind() {
1637                // in this case we can trust the returned count value
1638                error::ErrorKind::NotFound => {}
1639                // retry with same input arguments
1640                error::ErrorKind::Interrupted => {
1641                    return self.lookup_next_batch();
1642                }
1643                _ => {
1644                    self.index = None;
1645                    return;
1646                }
1647            }
1648        }
1649
1650        self.prev = Some(self.next.clone());
1651        self.index = Some(0);
1652
1653        unsafe {
1654            self.keys.set_len(self.key_size * count as usize);
1655            self.values.set_len(self.value_size * count as usize);
1656        }
1657    }
1658}
1659
1660impl Iterator for BatchedMapIter<'_> {
1661    type Item = (Vec<u8>, Vec<u8>);
1662
1663    fn next(&mut self) -> Option<Self::Item> {
1664        let load_next_batch = match self.index {
1665            Some(index) => {
1666                let batch_finished = index * self.key_size >= self.keys.len();
1667                let last_batch = self.keys.len() < self.key_size * self.count;
1668                batch_finished && !last_batch
1669            }
1670            None => true,
1671        };
1672
1673        if load_next_batch {
1674            self.lookup_next_batch();
1675        }
1676
1677        let index = self.index?;
1678        let key = self.keys.chunks_exact(self.key_size).nth(index)?.to_vec();
1679        let val = self
1680            .values
1681            .chunks_exact(self.value_size)
1682            .nth(index)?
1683            .to_vec();
1684
1685        self.index = Some(index + 1);
1686        Some((key, val))
1687    }
1688}
1689
1690/// A convenience wrapper for [`bpf_map_info`][libbpf_sys::bpf_map_info]. It
1691/// provides the ability to retrieve the details of a certain map.
1692#[derive(Debug)]
1693pub struct MapInfo {
1694    /// The inner [`bpf_map_info`][libbpf_sys::bpf_map_info] object.
1695    pub info: bpf_map_info,
1696}
1697
1698impl MapInfo {
1699    /// Create a `MapInfo` object from a fd.
1700    pub fn new(fd: BorrowedFd<'_>) -> Result<Self> {
1701        let mut map_info = bpf_map_info::default();
1702        let mut size = mem::size_of_val(&map_info) as u32;
1703        // SAFETY: All pointers are derived from references and hence valid.
1704        let () = util::parse_ret(unsafe {
1705            bpf_obj_get_info_by_fd(
1706                fd.as_raw_fd(),
1707                &mut map_info as *mut bpf_map_info as *mut c_void,
1708                &mut size as *mut u32,
1709            )
1710        })?;
1711        Ok(Self { info: map_info })
1712    }
1713
1714    /// Get the map type
1715    #[inline]
1716    pub fn map_type(&self) -> MapType {
1717        MapType::from(self.info.type_)
1718    }
1719
1720    /// Get the name of this map.
1721    ///
1722    /// Returns error if the underlying data in the structure is not a valid
1723    /// utf-8 string.
1724    pub fn name<'a>(&self) -> Result<&'a str> {
1725        // SAFETY: convert &[i8] to &[u8], and then cast that to &str. i8 and u8 has the same size.
1726        let char_slice =
1727            unsafe { from_raw_parts(self.info.name[..].as_ptr().cast(), self.info.name.len()) };
1728
1729        util::c_char_slice_to_cstr(char_slice)
1730            .ok_or_else(|| Error::with_invalid_data("no nul byte found"))?
1731            .to_str()
1732            .map_err(Error::with_invalid_data)
1733    }
1734
1735    /// Get the map flags.
1736    #[inline]
1737    pub fn flags(&self) -> MapFlags {
1738        MapFlags::from_bits_truncate(self.info.map_flags as u64)
1739    }
1740}
1741
1742#[cfg(test)]
1743mod tests {
1744    use super::*;
1745
1746    use std::mem::discriminant;
1747
1748    #[test]
1749    fn map_type() {
1750        use MapType::*;
1751
1752        for t in [
1753            Unspec,
1754            Hash,
1755            Array,
1756            ProgArray,
1757            PerfEventArray,
1758            PercpuHash,
1759            PercpuArray,
1760            StackTrace,
1761            CgroupArray,
1762            LruHash,
1763            LruPercpuHash,
1764            LpmTrie,
1765            ArrayOfMaps,
1766            HashOfMaps,
1767            Devmap,
1768            Sockmap,
1769            Cpumap,
1770            Xskmap,
1771            Sockhash,
1772            CgroupStorage,
1773            ReuseportSockarray,
1774            PercpuCgroupStorage,
1775            Queue,
1776            Stack,
1777            SkStorage,
1778            DevmapHash,
1779            StructOps,
1780            RingBuf,
1781            InodeStorage,
1782            TaskStorage,
1783            BloomFilter,
1784            UserRingBuf,
1785            Unknown,
1786        ] {
1787            // check if discriminants match after a roundtrip conversion
1788            assert_eq!(discriminant(&t), discriminant(&MapType::from(t as u32)));
1789        }
1790    }
1791}