libbpf_rs/
map.rs

1use core::ffi::c_void;
2use std::ffi::CStr;
3use std::ffi::CString;
4use std::ffi::OsStr;
5use std::ffi::OsString;
6use std::fmt::Debug;
7use std::fs::remove_file;
8use std::io;
9use std::marker::PhantomData;
10use std::mem;
11use std::mem::transmute;
12use std::ops::Deref;
13use std::os::unix::ffi::OsStrExt;
14use std::os::unix::io::AsFd;
15use std::os::unix::io::AsRawFd;
16use std::os::unix::io::BorrowedFd;
17use std::os::unix::io::FromRawFd;
18use std::os::unix::io::OwnedFd;
19use std::os::unix::io::RawFd;
20use std::path::Path;
21use std::ptr;
22use std::ptr::NonNull;
23use std::slice;
24use std::slice::from_raw_parts;
25
26use bitflags::bitflags;
27use libbpf_sys::bpf_map_info;
28use libbpf_sys::bpf_obj_get_info_by_fd;
29
30use crate::error;
31use crate::util;
32use crate::util::parse_ret_i32;
33use crate::util::validate_bpf_ret;
34use crate::AsRawLibbpf;
35use crate::Error;
36use crate::ErrorExt as _;
37use crate::Link;
38use crate::Mut;
39use crate::Result;
40
41/// An immutable parsed but not yet loaded BPF map.
42pub type OpenMap<'obj> = OpenMapImpl<'obj>;
43/// A mutable parsed but not yet loaded BPF map.
44pub type OpenMapMut<'obj> = OpenMapImpl<'obj, Mut>;
45
46/// Represents a parsed but not yet loaded BPF map.
47///
48/// This object exposes operations that need to happen before the map is created.
49///
50/// Some methods require working with raw bytes. You may find libraries such as
51/// [`plain`](https://crates.io/crates/plain) helpful.
52#[derive(Debug)]
53#[repr(transparent)]
54pub struct OpenMapImpl<'obj, T = ()> {
55    ptr: NonNull<libbpf_sys::bpf_map>,
56    _phantom: PhantomData<&'obj T>,
57}
58
59impl<'obj> OpenMap<'obj> {
60    /// Create a new [`OpenMap`] from a ptr to a `libbpf_sys::bpf_map`.
61    pub fn new(object: &'obj libbpf_sys::bpf_map) -> Self {
62        // SAFETY: We inferred the address from a reference, which is always
63        //         valid.
64        Self {
65            ptr: unsafe { NonNull::new_unchecked(object as *const _ as *mut _) },
66            _phantom: PhantomData,
67        }
68    }
69
70    /// Retrieve the [`OpenMap`]'s name.
71    pub fn name(&self) -> &OsStr {
72        // SAFETY: We ensured `ptr` is valid during construction.
73        let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
74        // SAFETY: `bpf_map__name` can return NULL but only if it's passed
75        //          NULL. We know `ptr` is not NULL.
76        let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
77        OsStr::from_bytes(name_c_str.to_bytes())
78    }
79
80    /// Retrieve type of the map.
81    pub fn map_type(&self) -> MapType {
82        let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
83        MapType::from(ty)
84    }
85
86    fn initial_value_raw(&self) -> (*mut u8, usize) {
87        let mut size = 0u64;
88        let ptr = unsafe {
89            libbpf_sys::bpf_map__initial_value(self.ptr.as_ptr(), &mut size as *mut _ as _)
90        };
91        (ptr.cast(), size as _)
92    }
93
94    /// Retrieve the initial value of the map.
95    pub fn initial_value(&self) -> Option<&[u8]> {
96        let (ptr, size) = self.initial_value_raw();
97        if ptr.is_null() {
98            None
99        } else {
100            let data = unsafe { slice::from_raw_parts(ptr.cast::<u8>(), size) };
101            Some(data)
102        }
103    }
104
105    /// Retrieve the maximum number of entries of the map.
106    pub fn max_entries(&self) -> u32 {
107        unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
108    }
109}
110
111impl<'obj> OpenMapMut<'obj> {
112    /// Create a new [`OpenMapMut`] from a ptr to a `libbpf_sys::bpf_map`.
113    pub fn new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self {
114        Self {
115            ptr: unsafe { NonNull::new_unchecked(object as *mut _) },
116            _phantom: PhantomData,
117        }
118    }
119
120    /// Retrieve the initial value of the map.
121    pub fn initial_value_mut(&mut self) -> Option<&mut [u8]> {
122        let (ptr, size) = self.initial_value_raw();
123        if ptr.is_null() {
124            None
125        } else {
126            let data = unsafe { slice::from_raw_parts_mut(ptr.cast::<u8>(), size) };
127            Some(data)
128        }
129    }
130
131    /// Bind map to a particular network device.
132    ///
133    /// Used for offloading maps to hardware.
134    pub fn set_map_ifindex(&mut self, idx: u32) {
135        unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) };
136    }
137
138    /// Set the initial value of the map.
139    pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> {
140        let ret = unsafe {
141            libbpf_sys::bpf_map__set_initial_value(
142                self.ptr.as_ptr(),
143                data.as_ptr() as *const c_void,
144                data.len() as libbpf_sys::size_t,
145            )
146        };
147
148        util::parse_ret(ret)
149    }
150
151    /// Set the type of the map.
152    pub fn set_type(&mut self, ty: MapType) -> Result<()> {
153        let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) };
154        util::parse_ret(ret)
155    }
156
157    /// Set the key size of the map in bytes.
158    pub fn set_key_size(&mut self, size: u32) -> Result<()> {
159        let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) };
160        util::parse_ret(ret)
161    }
162
163    /// Set the value size of the map in bytes.
164    pub fn set_value_size(&mut self, size: u32) -> Result<()> {
165        let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) };
166        util::parse_ret(ret)
167    }
168
169    /// Set the maximum number of entries this map can have.
170    pub fn set_max_entries(&mut self, count: u32) -> Result<()> {
171        let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) };
172        util::parse_ret(ret)
173    }
174
175    /// Set flags on this map.
176    pub fn set_map_flags(&mut self, flags: u32) -> Result<()> {
177        let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) };
178        util::parse_ret(ret)
179    }
180
181    /// Set the NUMA node for this map.
182    ///
183    /// This can be used to ensure that the map is allocated on a particular
184    /// NUMA node, which can be useful for performance-critical applications.
185    pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> {
186        let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) };
187        util::parse_ret(ret)
188    }
189
190    /// Set the inner map FD.
191    ///
192    /// This is used for nested maps, where the value type of the outer map is a pointer to the
193    /// inner map.
194    pub fn set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()> {
195        let ret = unsafe {
196            libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner_map_fd.as_raw_fd())
197        };
198        util::parse_ret(ret)
199    }
200
201    /// Set the `map_extra` field for this map.
202    ///
203    /// Allows users to pass additional data to the
204    /// kernel when loading the map. The kernel will store this value in the
205    /// `bpf_map_info` struct associated with the map.
206    ///
207    /// This can be used to pass data to the kernel that is not otherwise
208    /// representable via the existing `bpf_map_def` fields.
209    pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> {
210        let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) };
211        util::parse_ret(ret)
212    }
213
214    /// Set whether or not libbpf should automatically create this map during load phase.
215    pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> {
216        let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) };
217        util::parse_ret(ret)
218    }
219
220    /// Set where the map should be pinned.
221    ///
222    /// Note this does not actually create the pin.
223    pub fn set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
224        let path_c = util::path_to_cstring(path)?;
225        let path_ptr = path_c.as_ptr();
226
227        let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) };
228        util::parse_ret(ret)
229    }
230
231    /// Reuse an fd for a BPF map
232    pub fn reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()> {
233        let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd.as_raw_fd()) };
234        util::parse_ret(ret)
235    }
236
237    /// Reuse an already-pinned map for `self`.
238    pub fn reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
239        let cstring = util::path_to_cstring(path)?;
240
241        let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) };
242        if fd < 0 {
243            return Err(Error::from(io::Error::last_os_error()));
244        }
245
246        let fd = unsafe { OwnedFd::from_raw_fd(fd) };
247
248        let reuse_result = self.reuse_fd(fd.as_fd());
249
250        reuse_result
251    }
252}
253
254impl<'obj> Deref for OpenMapMut<'obj> {
255    type Target = OpenMap<'obj>;
256
257    fn deref(&self) -> &Self::Target {
258        // SAFETY: `OpenMapImpl` is `repr(transparent)` and so in-memory
259        //         representation of both types is the same.
260        unsafe { transmute::<&OpenMapMut<'obj>, &OpenMap<'obj>>(self) }
261    }
262}
263
264impl<T> AsRawLibbpf for OpenMapImpl<'_, T> {
265    type LibbpfType = libbpf_sys::bpf_map;
266
267    /// Retrieve the underlying [`libbpf_sys::bpf_map`].
268    fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
269        self.ptr
270    }
271}
272
273pub(crate) fn map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd> {
274    let fd = unsafe { libbpf_sys::bpf_map__fd(map.as_ptr()) };
275    let fd = util::parse_ret_i32(fd).ok();
276    fd
277}
278
279/// Return the size of one value including padding for interacting with per-cpu
280/// maps. The values are aligned to 8 bytes.
281fn percpu_aligned_value_size<M>(map: &M) -> usize
282where
283    M: MapCore + ?Sized,
284{
285    let val_size = map.value_size() as usize;
286    util::roundup(val_size, 8)
287}
288
289/// Returns the size of the buffer needed for a lookup/update of a per-cpu map.
290fn percpu_buffer_size<M>(map: &M) -> Result<usize>
291where
292    M: MapCore + ?Sized,
293{
294    let aligned_val_size = percpu_aligned_value_size(map);
295    let ncpu = crate::num_possible_cpus()?;
296    Ok(ncpu * aligned_val_size)
297}
298
299/// Apply a key check and return a null pointer in case of dealing with queue/stack/bloom-filter
300/// map, before passing the key to the bpf functions that support the map of type
301/// queue/stack/bloom-filter.
302fn map_key<M>(map: &M, key: &[u8]) -> *const c_void
303where
304    M: MapCore + ?Sized,
305{
306    // For all they keyless maps we null out the key per documentation of libbpf
307    if map.key_size() == 0 && map.map_type().is_keyless() {
308        return ptr::null();
309    }
310
311    key.as_ptr() as *const c_void
312}
313
314/// Internal function to return a value from a map into a buffer of the given size.
315fn lookup_raw<M>(map: &M, key: &[u8], flags: MapFlags, out_size: usize) -> Result<Option<Vec<u8>>>
316where
317    M: MapCore + ?Sized,
318{
319    if key.len() != map.key_size() as usize {
320        return Err(Error::with_invalid_data(format!(
321            "key_size {} != {}",
322            key.len(),
323            map.key_size()
324        )));
325    };
326
327    let mut out: Vec<u8> = Vec::with_capacity(out_size);
328
329    let ret = unsafe {
330        libbpf_sys::bpf_map_lookup_elem_flags(
331            map.as_fd().as_raw_fd(),
332            map_key(map, key),
333            out.as_mut_ptr() as *mut c_void,
334            flags.bits(),
335        )
336    };
337
338    if ret == 0 {
339        unsafe {
340            out.set_len(out_size);
341        }
342        Ok(Some(out))
343    } else {
344        let err = io::Error::last_os_error();
345        if err.kind() == io::ErrorKind::NotFound {
346            Ok(None)
347        } else {
348            Err(Error::from(err))
349        }
350    }
351}
352
353/// Internal function to update a map. This does not check the length of the
354/// supplied value.
355fn update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>
356where
357    M: MapCore + ?Sized,
358{
359    if key.len() != map.key_size() as usize {
360        return Err(Error::with_invalid_data(format!(
361            "key_size {} != {}",
362            key.len(),
363            map.key_size()
364        )));
365    };
366
367    let ret = unsafe {
368        libbpf_sys::bpf_map_update_elem(
369            map.as_fd().as_raw_fd(),
370            map_key(map, key),
371            value.as_ptr() as *const c_void,
372            flags.bits(),
373        )
374    };
375
376    util::parse_ret(ret)
377}
378
379/// Internal function to batch lookup (and delete) elements from a map.
380fn lookup_batch_raw<M>(
381    map: &M,
382    count: u32,
383    elem_flags: MapFlags,
384    flags: MapFlags,
385    delete: bool,
386) -> BatchedMapIter<'_>
387where
388    M: MapCore + ?Sized,
389{
390    #[allow(clippy::needless_update)]
391    let opts = libbpf_sys::bpf_map_batch_opts {
392        sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
393        elem_flags: elem_flags.bits(),
394        flags: flags.bits(),
395        // bpf_map_batch_opts might have padding fields on some platform
396        ..Default::default()
397    };
398
399    // for maps of type BPF_MAP_TYPE_{HASH, PERCPU_HASH, LRU_HASH, LRU_PERCPU_HASH}
400    // the key size must be at least 4 bytes
401    let key_size = if map.map_type().is_hash_map() {
402        map.key_size().max(4)
403    } else {
404        map.key_size()
405    };
406
407    BatchedMapIter::new(map.as_fd(), count, key_size, map.value_size(), opts, delete)
408}
409
410/// Intneral function that returns an error for per-cpu and bloom filter maps.
411fn check_not_bloom_or_percpu<M>(map: &M) -> Result<()>
412where
413    M: MapCore + ?Sized,
414{
415    if map.map_type().is_bloom_filter() {
416        return Err(Error::with_invalid_data(
417            "lookup_bloom_filter() must be used for bloom filter maps",
418        ));
419    }
420    if map.map_type().is_percpu() {
421        return Err(Error::with_invalid_data(format!(
422            "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})",
423            map.map_type(),
424        )));
425    }
426
427    Ok(())
428}
429
430#[allow(clippy::wildcard_imports)]
431mod private {
432    use super::*;
433
434    pub trait Sealed {}
435
436    impl<T> Sealed for MapImpl<'_, T> {}
437    impl Sealed for MapHandle {}
438}
439
440/// A trait representing core functionality common to fully initialized maps.
441pub trait MapCore: Debug + AsFd + private::Sealed {
442    /// Retrieve the map's name.
443    fn name(&self) -> &OsStr;
444
445    /// Retrieve type of the map.
446    fn map_type(&self) -> MapType;
447
448    /// Retrieve the size of the map's keys.
449    fn key_size(&self) -> u32;
450
451    /// Retrieve the size of the map's values.
452    fn value_size(&self) -> u32;
453
454    /// Retrieve `max_entries` of the map.
455    fn max_entries(&self) -> u32;
456
457    /// Fetch extra map information
458    #[inline]
459    fn info(&self) -> Result<MapInfo> {
460        MapInfo::new(self.as_fd())
461    }
462
463    /// Returns an iterator over keys in this map
464    ///
465    /// Note that if the map is not stable (stable meaning no updates or deletes) during iteration,
466    /// iteration can skip keys, restart from the beginning, or duplicate keys. In other words,
467    /// iteration becomes unpredictable.
468    fn keys(&self) -> MapKeyIter<'_> {
469        MapKeyIter::new(self.as_fd(), self.key_size())
470    }
471
472    /// Returns map value as `Vec` of `u8`.
473    ///
474    /// `key` must have exactly [`Self::key_size()`] elements.
475    ///
476    /// If the map is one of the per-cpu data structures, the function [`Self::lookup_percpu()`]
477    /// must be used.
478    /// If the map is of type `bloom_filter` the function [`Self::lookup_bloom_filter()`] must be
479    /// used
480    fn lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>> {
481        check_not_bloom_or_percpu(self)?;
482        let out_size = self.value_size() as usize;
483        lookup_raw(self, key, flags, out_size)
484    }
485
486    /// Returns many elements in batch mode from the map.
487    ///
488    /// `count` specifies the batch size.
489    fn lookup_batch(
490        &self,
491        count: u32,
492        elem_flags: MapFlags,
493        flags: MapFlags,
494    ) -> Result<BatchedMapIter<'_>> {
495        check_not_bloom_or_percpu(self)?;
496        Ok(lookup_batch_raw(self, count, elem_flags, flags, false))
497    }
498
499    /// Returns many elements in batch mode from the map.
500    ///
501    /// `count` specifies the batch size.
502    fn lookup_and_delete_batch(
503        &self,
504        count: u32,
505        elem_flags: MapFlags,
506        flags: MapFlags,
507    ) -> Result<BatchedMapIter<'_>> {
508        check_not_bloom_or_percpu(self)?;
509        Ok(lookup_batch_raw(self, count, elem_flags, flags, true))
510    }
511
512    /// Returns if the given value is likely present in `bloom_filter` as `bool`.
513    ///
514    /// `value` must have exactly [`Self::value_size()`] elements.
515    fn lookup_bloom_filter(&self, value: &[u8]) -> Result<bool> {
516        let ret = unsafe {
517            libbpf_sys::bpf_map_lookup_elem(
518                self.as_fd().as_raw_fd(),
519                ptr::null(),
520                value.to_vec().as_mut_ptr() as *mut c_void,
521            )
522        };
523
524        if ret == 0 {
525            Ok(true)
526        } else {
527            let err = io::Error::last_os_error();
528            if err.kind() == io::ErrorKind::NotFound {
529                Ok(false)
530            } else {
531                Err(Error::from(err))
532            }
533        }
534    }
535
536    /// Returns one value per cpu as `Vec` of `Vec` of `u8` for per per-cpu maps.
537    ///
538    /// For normal maps, [`Self::lookup()`] must be used.
539    fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>> {
540        if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
541            return Err(Error::with_invalid_data(format!(
542                "lookup() must be used for maps that are not per-cpu (type of the map is {:?})",
543                self.map_type(),
544            )));
545        }
546
547        let val_size = self.value_size() as usize;
548        let aligned_val_size = percpu_aligned_value_size(self);
549        let out_size = percpu_buffer_size(self)?;
550
551        let raw_res = lookup_raw(self, key, flags, out_size)?;
552        if let Some(raw_vals) = raw_res {
553            let mut out = Vec::new();
554            for chunk in raw_vals.chunks_exact(aligned_val_size) {
555                out.push(chunk[..val_size].to_vec());
556            }
557            Ok(Some(out))
558        } else {
559            Ok(None)
560        }
561    }
562
563    /// Deletes an element from the map.
564    ///
565    /// `key` must have exactly [`Self::key_size()`] elements.
566    fn delete(&self, key: &[u8]) -> Result<()> {
567        if key.len() != self.key_size() as usize {
568            return Err(Error::with_invalid_data(format!(
569                "key_size {} != {}",
570                key.len(),
571                self.key_size()
572            )));
573        };
574
575        let ret = unsafe {
576            libbpf_sys::bpf_map_delete_elem(self.as_fd().as_raw_fd(), key.as_ptr() as *const c_void)
577        };
578        util::parse_ret(ret)
579    }
580
581    /// Deletes many elements in batch mode from the map.
582    ///
583    /// `keys` must have exactly `Self::key_size() * count` elements.
584    fn delete_batch(
585        &self,
586        keys: &[u8],
587        count: u32,
588        elem_flags: MapFlags,
589        flags: MapFlags,
590    ) -> Result<()> {
591        if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
592            return Err(Error::with_invalid_data(format!(
593                "batch key_size {} != {} * {}",
594                keys.len(),
595                self.key_size(),
596                count
597            )));
598        };
599
600        #[allow(clippy::needless_update)]
601        let opts = libbpf_sys::bpf_map_batch_opts {
602            sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
603            elem_flags: elem_flags.bits(),
604            flags: flags.bits(),
605            // bpf_map_batch_opts might have padding fields on some platform
606            ..Default::default()
607        };
608
609        let mut count = count;
610        let ret = unsafe {
611            libbpf_sys::bpf_map_delete_batch(
612                self.as_fd().as_raw_fd(),
613                keys.as_ptr() as *const c_void,
614                &mut count,
615                &opts as *const libbpf_sys::bpf_map_batch_opts,
616            )
617        };
618        util::parse_ret(ret)
619    }
620
621    /// Same as [`Self::lookup()`] except this also deletes the key from the map.
622    ///
623    /// Note that this operation is currently only implemented in the kernel for [`MapType::Queue`]
624    /// and [`MapType::Stack`].
625    ///
626    /// `key` must have exactly [`Self::key_size()`] elements.
627    fn lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
628        if key.len() != self.key_size() as usize {
629            return Err(Error::with_invalid_data(format!(
630                "key_size {} != {}",
631                key.len(),
632                self.key_size()
633            )));
634        };
635
636        let mut out: Vec<u8> = Vec::with_capacity(self.value_size() as usize);
637
638        let ret = unsafe {
639            libbpf_sys::bpf_map_lookup_and_delete_elem(
640                self.as_fd().as_raw_fd(),
641                map_key(self, key),
642                out.as_mut_ptr() as *mut c_void,
643            )
644        };
645
646        if ret == 0 {
647            unsafe {
648                out.set_len(self.value_size() as usize);
649            }
650            Ok(Some(out))
651        } else {
652            let err = io::Error::last_os_error();
653            if err.kind() == io::ErrorKind::NotFound {
654                Ok(None)
655            } else {
656                Err(Error::from(err))
657            }
658        }
659    }
660
661    /// Update an element.
662    ///
663    /// `key` must have exactly [`Self::key_size()`] elements. `value` must have exactly
664    /// [`Self::value_size()`] elements.
665    ///
666    /// For per-cpu maps, [`Self::update_percpu()`] must be used.
667    fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
668        if self.map_type().is_percpu() {
669            return Err(Error::with_invalid_data(format!(
670                "update_percpu() must be used for per-cpu maps (type of the map is {:?})",
671                self.map_type(),
672            )));
673        }
674
675        if value.len() != self.value_size() as usize {
676            return Err(Error::with_invalid_data(format!(
677                "value_size {} != {}",
678                value.len(),
679                self.value_size()
680            )));
681        };
682
683        update_raw(self, key, value, flags)
684    }
685
686    /// Updates many elements in batch mode in the map
687    ///
688    /// `keys` must have exactly `Self::key_size() * count` elements. `values` must have exactly
689    /// `Self::key_size() * count` elements.
690    fn update_batch(
691        &self,
692        keys: &[u8],
693        values: &[u8],
694        count: u32,
695        elem_flags: MapFlags,
696        flags: MapFlags,
697    ) -> Result<()> {
698        if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
699            return Err(Error::with_invalid_data(format!(
700                "batch key_size {} != {} * {}",
701                keys.len(),
702                self.key_size(),
703                count
704            )));
705        };
706
707        if values.len() as u32 / count != self.value_size() || (values.len() as u32) % count != 0 {
708            return Err(Error::with_invalid_data(format!(
709                "batch value_size {} != {} * {}",
710                values.len(),
711                self.value_size(),
712                count
713            )));
714        }
715
716        #[allow(clippy::needless_update)]
717        let opts = libbpf_sys::bpf_map_batch_opts {
718            sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
719            elem_flags: elem_flags.bits(),
720            flags: flags.bits(),
721            // bpf_map_batch_opts might have padding fields on some platform
722            ..Default::default()
723        };
724
725        let mut count = count;
726        let ret = unsafe {
727            libbpf_sys::bpf_map_update_batch(
728                self.as_fd().as_raw_fd(),
729                keys.as_ptr() as *const c_void,
730                values.as_ptr() as *const c_void,
731                &mut count,
732                &opts as *const libbpf_sys::bpf_map_batch_opts,
733            )
734        };
735
736        util::parse_ret(ret)
737    }
738
739    /// Update an element in an per-cpu map with one value per cpu.
740    ///
741    /// `key` must have exactly [`Self::key_size()`] elements. `value` must have one
742    /// element per cpu (see [`num_possible_cpus`][crate::num_possible_cpus])
743    /// with exactly [`Self::value_size()`] elements each.
744    ///
745    /// For per-cpu maps, [`Self::update_percpu()`] must be used.
746    fn update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()> {
747        if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
748            return Err(Error::with_invalid_data(format!(
749                "update() must be used for maps that are not per-cpu (type of the map is {:?})",
750                self.map_type(),
751            )));
752        }
753
754        if values.len() != crate::num_possible_cpus()? {
755            return Err(Error::with_invalid_data(format!(
756                "number of values {} != number of cpus {}",
757                values.len(),
758                crate::num_possible_cpus()?
759            )));
760        };
761
762        let val_size = self.value_size() as usize;
763        let aligned_val_size = percpu_aligned_value_size(self);
764        let buf_size = percpu_buffer_size(self)?;
765
766        let mut value_buf = vec![0; buf_size];
767
768        for (i, val) in values.iter().enumerate() {
769            if val.len() != val_size {
770                return Err(Error::with_invalid_data(format!(
771                    "value size for cpu {} is {} != {}",
772                    i,
773                    val.len(),
774                    val_size
775                )));
776            }
777
778            value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)]
779                .copy_from_slice(val);
780        }
781
782        update_raw(self, key, &value_buf, flags)
783    }
784}
785
786/// An immutable loaded BPF map.
787pub type Map<'obj> = MapImpl<'obj>;
788/// A mutable loaded BPF map.
789pub type MapMut<'obj> = MapImpl<'obj, Mut>;
790
791/// Represents a libbpf-created map.
792///
793/// Some methods require working with raw bytes. You may find libraries such as
794/// [`plain`](https://crates.io/crates/plain) helpful.
795#[derive(Debug)]
796pub struct MapImpl<'obj, T = ()> {
797    ptr: NonNull<libbpf_sys::bpf_map>,
798    _phantom: PhantomData<&'obj T>,
799}
800
801impl<'obj> Map<'obj> {
802    /// Create a [`Map`] from a [`libbpf_sys::bpf_map`].
803    pub fn new(map: &'obj libbpf_sys::bpf_map) -> Self {
804        // SAFETY: We inferred the address from a reference, which is always
805        //         valid.
806        let ptr = unsafe { NonNull::new_unchecked(map as *const _ as *mut _) };
807        assert!(
808            map_fd(ptr).is_some(),
809            "provided BPF map does not have file descriptor"
810        );
811
812        Self {
813            ptr,
814            _phantom: PhantomData,
815        }
816    }
817
818    /// Create a [`Map`] from a [`libbpf_sys::bpf_map`] that does not contain a
819    /// file descriptor.
820    ///
821    /// The caller has to ensure that the [`AsFd`] impl is not used, or a panic
822    /// will be the result.
823    ///
824    /// # Safety
825    ///
826    /// The pointer must point to a loaded map.
827    #[doc(hidden)]
828    pub unsafe fn from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self {
829        Self {
830            ptr,
831            _phantom: PhantomData,
832        }
833    }
834
835    /// Returns whether map is pinned or not flag
836    pub fn is_pinned(&self) -> bool {
837        unsafe { libbpf_sys::bpf_map__is_pinned(self.ptr.as_ptr()) }
838    }
839
840    /// Returns the `pin_path` if the map is pinned, otherwise, `None`
841    /// is returned.
842    pub fn get_pin_path(&self) -> Option<&OsStr> {
843        let path_ptr = unsafe { libbpf_sys::bpf_map__pin_path(self.ptr.as_ptr()) };
844        if path_ptr.is_null() {
845            // means map is not pinned
846            return None;
847        }
848        let path_c_str = unsafe { CStr::from_ptr(path_ptr) };
849        Some(OsStr::from_bytes(path_c_str.to_bytes()))
850    }
851}
852
853impl<'obj> MapMut<'obj> {
854    /// Create a [`MapMut`] from a [`libbpf_sys::bpf_map`].
855    pub fn new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self {
856        // SAFETY: We inferred the address from a reference, which is always
857        //         valid.
858        let ptr = unsafe { NonNull::new_unchecked(map as *mut _) };
859        assert!(
860            map_fd(ptr).is_some(),
861            "provided BPF map does not have file descriptor"
862        );
863
864        Self {
865            ptr,
866            _phantom: PhantomData,
867        }
868    }
869
870    /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
871    /// this map to bpffs.
872    pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
873        let path_c = util::path_to_cstring(path)?;
874        let path_ptr = path_c.as_ptr();
875
876        let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr.as_ptr(), path_ptr) };
877        util::parse_ret(ret)
878    }
879
880    /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
881    /// this map from bpffs.
882    pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
883        let path_c = util::path_to_cstring(path)?;
884        let path_ptr = path_c.as_ptr();
885        let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr.as_ptr(), path_ptr) };
886        util::parse_ret(ret)
887    }
888
889    /// Attach a struct ops map
890    pub fn attach_struct_ops(&mut self) -> Result<Link> {
891        if self.map_type() != MapType::StructOps {
892            return Err(Error::with_invalid_data(format!(
893                "Invalid map type ({:?}) for attach_struct_ops()",
894                self.map_type(),
895            )));
896        }
897
898        let ptr = unsafe { libbpf_sys::bpf_map__attach_struct_ops(self.ptr.as_ptr()) };
899        let ptr = validate_bpf_ret(ptr).context("failed to attach struct_ops")?;
900        // SAFETY: the pointer came from libbpf and has been checked for errors.
901        let link = unsafe { Link::new(ptr) };
902        Ok(link)
903    }
904}
905
906impl<'obj> Deref for MapMut<'obj> {
907    type Target = Map<'obj>;
908
909    fn deref(&self) -> &Self::Target {
910        unsafe { transmute::<&MapMut<'obj>, &Map<'obj>>(self) }
911    }
912}
913
914impl<T> AsFd for MapImpl<'_, T> {
915    #[inline]
916    fn as_fd(&self) -> BorrowedFd<'_> {
917        // SANITY: Our map must always have a file descriptor associated with
918        //         it.
919        let fd = map_fd(self.ptr).unwrap();
920        // SAFETY: `fd` is guaranteed to be valid for the lifetime of
921        //         the created object.
922        let fd = unsafe { BorrowedFd::borrow_raw(fd) };
923        fd
924    }
925}
926
927impl<T> MapCore for MapImpl<'_, T>
928where
929    T: Debug,
930{
931    fn name(&self) -> &OsStr {
932        // SAFETY: We ensured `ptr` is valid during construction.
933        let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
934        // SAFETY: `bpf_map__name` can return NULL but only if it's passed
935        //          NULL. We know `ptr` is not NULL.
936        let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
937        OsStr::from_bytes(name_c_str.to_bytes())
938    }
939
940    #[inline]
941    fn map_type(&self) -> MapType {
942        let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
943        MapType::from(ty)
944    }
945
946    #[inline]
947    fn key_size(&self) -> u32 {
948        unsafe { libbpf_sys::bpf_map__key_size(self.ptr.as_ptr()) }
949    }
950
951    #[inline]
952    fn value_size(&self) -> u32 {
953        unsafe { libbpf_sys::bpf_map__value_size(self.ptr.as_ptr()) }
954    }
955
956    #[inline]
957    fn max_entries(&self) -> u32 {
958        unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
959    }
960}
961
962impl AsRawLibbpf for Map<'_> {
963    type LibbpfType = libbpf_sys::bpf_map;
964
965    /// Retrieve the underlying [`libbpf_sys::bpf_map`].
966    #[inline]
967    fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
968        self.ptr
969    }
970}
971
972/// A handle to a map. Handles can be duplicated and dropped.
973///
974/// While possible to [created directly][MapHandle::create], in many cases it is
975/// useful to create such a handle from an existing [`Map`]:
976/// ```no_run
977/// # use libbpf_rs::Map;
978/// # use libbpf_rs::MapHandle;
979/// # let get_map = || -> &Map { todo!() };
980/// let map: &Map = get_map();
981/// let map_handle = MapHandle::try_from(map).unwrap();
982/// ```
983///
984/// Some methods require working with raw bytes. You may find libraries such as
985/// [`plain`](https://crates.io/crates/plain) helpful.
986#[derive(Debug)]
987pub struct MapHandle {
988    fd: OwnedFd,
989    name: OsString,
990    ty: MapType,
991    key_size: u32,
992    value_size: u32,
993    max_entries: u32,
994}
995
996impl MapHandle {
997    /// Create a bpf map whose data is not managed by libbpf.
998    pub fn create<T: AsRef<OsStr>>(
999        map_type: MapType,
1000        name: Option<T>,
1001        key_size: u32,
1002        value_size: u32,
1003        max_entries: u32,
1004        opts: &libbpf_sys::bpf_map_create_opts,
1005    ) -> Result<Self> {
1006        let name = match name {
1007            Some(name) => name.as_ref().to_os_string(),
1008            // The old version kernel don't support specifying map name.
1009            None => OsString::new(),
1010        };
1011        let name_c_str = CString::new(name.as_bytes()).map_err(|_| {
1012            Error::with_invalid_data(format!("invalid name `{name:?}`: has NUL bytes"))
1013        })?;
1014        let name_c_ptr = if name.is_empty() {
1015            ptr::null()
1016        } else {
1017            name_c_str.as_bytes_with_nul().as_ptr()
1018        };
1019
1020        let fd = unsafe {
1021            libbpf_sys::bpf_map_create(
1022                map_type.into(),
1023                name_c_ptr.cast(),
1024                key_size,
1025                value_size,
1026                max_entries,
1027                opts,
1028            )
1029        };
1030        let () = util::parse_ret(fd)?;
1031
1032        Ok(Self {
1033            // SAFETY: A file descriptor coming from the `bpf_map_create`
1034            //         function is always suitable for ownership and can be
1035            //         cleaned up with close.
1036            fd: unsafe { OwnedFd::from_raw_fd(fd) },
1037            name,
1038            ty: map_type,
1039            key_size,
1040            value_size,
1041            max_entries,
1042        })
1043    }
1044
1045    /// Open a previously pinned map from its path.
1046    ///
1047    /// # Panics
1048    /// If the path contains null bytes.
1049    pub fn from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self> {
1050        fn inner(path: &Path) -> Result<MapHandle> {
1051            let p = CString::new(path.as_os_str().as_bytes()).expect("path contained null bytes");
1052            let fd = parse_ret_i32(unsafe {
1053                // SAFETY
1054                // p is never null since we allocated ourselves.
1055                libbpf_sys::bpf_obj_get(p.as_ptr())
1056            })?;
1057            MapHandle::from_fd(unsafe {
1058                // SAFETY
1059                // A file descriptor coming from the bpf_obj_get function is always suitable for
1060                // ownership and can be cleaned up with close.
1061                OwnedFd::from_raw_fd(fd)
1062            })
1063        }
1064
1065        inner(path.as_ref())
1066    }
1067
1068    /// Open a loaded map from its map id.
1069    pub fn from_map_id(id: u32) -> Result<Self> {
1070        parse_ret_i32(unsafe {
1071            // SAFETY
1072            // This function is always safe to call.
1073            libbpf_sys::bpf_map_get_fd_by_id(id)
1074        })
1075        .map(|fd| unsafe {
1076            // SAFETY
1077            // A file descriptor coming from the bpf_map_get_fd_by_id function is always suitable
1078            // for ownership and can be cleaned up with close.
1079            OwnedFd::from_raw_fd(fd)
1080        })
1081        .and_then(Self::from_fd)
1082    }
1083
1084    fn from_fd(fd: OwnedFd) -> Result<Self> {
1085        let info = MapInfo::new(fd.as_fd())?;
1086        Ok(Self {
1087            fd,
1088            name: info.name()?.into(),
1089            ty: info.map_type(),
1090            key_size: info.info.key_size,
1091            value_size: info.info.value_size,
1092            max_entries: info.info.max_entries,
1093        })
1094    }
1095
1096    /// Freeze the map as read-only from user space.
1097    ///
1098    /// Entries from a frozen map can no longer be updated or deleted with the
1099    /// `bpf()` system call. This operation is not reversible, and the map remains
1100    /// immutable from user space until its destruction. However, read and write
1101    /// permissions for BPF programs to the map remain unchanged.
1102    pub fn freeze(&self) -> Result<()> {
1103        let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd.as_raw_fd()) };
1104
1105        util::parse_ret(ret)
1106    }
1107
1108    /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
1109    /// this map to bpffs.
1110    pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1111        let path_c = util::path_to_cstring(path)?;
1112        let path_ptr = path_c.as_ptr();
1113
1114        let ret = unsafe { libbpf_sys::bpf_obj_pin(self.fd.as_raw_fd(), path_ptr) };
1115        util::parse_ret(ret)
1116    }
1117
1118    /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
1119    /// this map from bpffs.
1120    pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1121        remove_file(path).context("failed to remove pin map")
1122    }
1123}
1124
1125impl MapCore for MapHandle {
1126    #[inline]
1127    fn name(&self) -> &OsStr {
1128        &self.name
1129    }
1130
1131    #[inline]
1132    fn map_type(&self) -> MapType {
1133        self.ty
1134    }
1135
1136    #[inline]
1137    fn key_size(&self) -> u32 {
1138        self.key_size
1139    }
1140
1141    #[inline]
1142    fn value_size(&self) -> u32 {
1143        self.value_size
1144    }
1145
1146    #[inline]
1147    fn max_entries(&self) -> u32 {
1148        self.max_entries
1149    }
1150}
1151
1152impl AsFd for MapHandle {
1153    #[inline]
1154    fn as_fd(&self) -> BorrowedFd<'_> {
1155        self.fd.as_fd()
1156    }
1157}
1158
1159impl<T> TryFrom<&MapImpl<'_, T>> for MapHandle
1160where
1161    T: Debug,
1162{
1163    type Error = Error;
1164
1165    fn try_from(other: &MapImpl<'_, T>) -> Result<Self> {
1166        Ok(Self {
1167            fd: other
1168                .as_fd()
1169                .try_clone_to_owned()
1170                .context("failed to duplicate map file descriptor")?,
1171            name: other.name().to_os_string(),
1172            ty: other.map_type(),
1173            key_size: other.key_size(),
1174            value_size: other.value_size(),
1175            max_entries: other.max_entries(),
1176        })
1177    }
1178}
1179
1180impl TryFrom<&MapHandle> for MapHandle {
1181    type Error = Error;
1182
1183    fn try_from(other: &MapHandle) -> Result<Self> {
1184        Ok(Self {
1185            fd: other
1186                .as_fd()
1187                .try_clone_to_owned()
1188                .context("failed to duplicate map file descriptor")?,
1189            name: other.name().to_os_string(),
1190            ty: other.map_type(),
1191            key_size: other.key_size(),
1192            value_size: other.value_size(),
1193            max_entries: other.max_entries(),
1194        })
1195    }
1196}
1197
1198bitflags! {
1199    /// Flags to configure [`Map`] operations.
1200    #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
1201    pub struct MapFlags: u64 {
1202        /// See [`libbpf_sys::BPF_ANY`].
1203        const ANY      = libbpf_sys::BPF_ANY as _;
1204        /// See [`libbpf_sys::BPF_NOEXIST`].
1205        const NO_EXIST = libbpf_sys::BPF_NOEXIST as _;
1206        /// See [`libbpf_sys::BPF_EXIST`].
1207        const EXIST    = libbpf_sys::BPF_EXIST as _;
1208        /// See [`libbpf_sys::BPF_F_LOCK`].
1209        const LOCK     = libbpf_sys::BPF_F_LOCK as _;
1210    }
1211}
1212
1213/// Type of a [`Map`]. Maps to `enum bpf_map_type` in kernel uapi.
1214// If you add a new per-cpu map, also update `is_percpu`.
1215#[non_exhaustive]
1216#[repr(u32)]
1217#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1218pub enum MapType {
1219    /// An unspecified map type.
1220    Unspec = libbpf_sys::BPF_MAP_TYPE_UNSPEC,
1221    /// A general purpose Hash map storage type.
1222    ///
1223    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html) for more details.
1224    Hash = libbpf_sys::BPF_MAP_TYPE_HASH,
1225    /// An Array map storage type.
1226    ///
1227    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_array.html) for more details.
1228    Array = libbpf_sys::BPF_MAP_TYPE_ARRAY,
1229    /// A program array map which holds only the file descriptors to other eBPF programs. Used for
1230    /// tail-calls.
1231    ///
1232    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_PROG_ARRAY/) for more details.
1233    ProgArray = libbpf_sys::BPF_MAP_TYPE_PROG_ARRAY,
1234    /// An array map which holds only the file descriptors to perf events.
1235    ///
1236    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_PERF_EVENT_ARRAY/) for more details.
1237    PerfEventArray = libbpf_sys::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1238    /// A Hash map with per CPU storage.
1239    ///
1240    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html#per-cpu-hashes) for more details.
1241    PercpuHash = libbpf_sys::BPF_MAP_TYPE_PERCPU_HASH,
1242    /// An Array map with per CPU storage.
1243    ///
1244    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_array.html) for more details.
1245    PercpuArray = libbpf_sys::BPF_MAP_TYPE_PERCPU_ARRAY,
1246    #[allow(missing_docs)]
1247    StackTrace = libbpf_sys::BPF_MAP_TYPE_STACK_TRACE,
1248    #[allow(missing_docs)]
1249    CgroupArray = libbpf_sys::BPF_MAP_TYPE_CGROUP_ARRAY,
1250    /// A Hash map with least recently used (LRU) eviction policy.
1251    ///
1252    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html#bpf-map-type-lru-hash-and-variants) for more details.
1253    LruHash = libbpf_sys::BPF_MAP_TYPE_LRU_HASH,
1254    /// A Hash map with least recently used (LRU) eviction policy with per CPU storage.
1255    ///
1256    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html#per-cpu-hashes) for more details.
1257    LruPercpuHash = libbpf_sys::BPF_MAP_TYPE_LRU_PERCPU_HASH,
1258    /// A Longest Prefix Match (LPM) algorithm based map.
1259    ///
1260    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_lpm_trie.html) for more details.
1261    LpmTrie = libbpf_sys::BPF_MAP_TYPE_LPM_TRIE,
1262    /// A map in map storage.
1263    /// One level of nesting is supported, where an outer map contains instances of a single type
1264    /// of inner map.
1265    ///
1266    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_of_maps.html) for more details.
1267    ArrayOfMaps = libbpf_sys::BPF_MAP_TYPE_ARRAY_OF_MAPS,
1268    /// A map in map storage.
1269    /// One level of nesting is supported, where an outer map contains instances of a single type
1270    /// of inner map.
1271    ///
1272    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_of_maps.html) for more details.
1273    HashOfMaps = libbpf_sys::BPF_MAP_TYPE_HASH_OF_MAPS,
1274    /// An array map that uses the key as the index to lookup a reference to a net device.
1275    /// Primarily used for XDP BPF Helper.
1276    ///
1277    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_devmap.html) for more details.
1278    Devmap = libbpf_sys::BPF_MAP_TYPE_DEVMAP,
1279    /// An array map holds references to a socket descriptor.
1280    ///
1281    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_sockmap.html) for more details.
1282    Sockmap = libbpf_sys::BPF_MAP_TYPE_SOCKMAP,
1283    /// A map that redirects raw XDP frames to another CPU.
1284    ///
1285    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_cpumap.html) for more details.
1286    Cpumap = libbpf_sys::BPF_MAP_TYPE_CPUMAP,
1287    /// A map that redirects raw XDP frames to `AF_XDP` sockets (XSKs), a new type of address
1288    /// family in the kernel that allows redirection of frames from a driver to user space
1289    /// without having to traverse the full network stack.
1290    ///
1291    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_xskmap.html) for more details.
1292    Xskmap = libbpf_sys::BPF_MAP_TYPE_XSKMAP,
1293    /// A Hash map that holds references to sockets via their socket descriptor.
1294    ///
1295    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_sockmap.html) for more details.
1296    Sockhash = libbpf_sys::BPF_MAP_TYPE_SOCKHASH,
1297    /// Deprecated. Use `CGrpStorage` instead.
1298    ///
1299    /// A Local storage for cgroups.
1300    /// Only available with `CONFIG_CGROUP_BPF` and to programs that attach to cgroups.
1301    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_cgroup_storage.html) for more details.
1302    CgroupStorage = libbpf_sys::BPF_MAP_TYPE_CGROUP_STORAGE,
1303    /// A Local storage for cgroups. Only available with `CONFIG_CGROUPS`.
1304    ///
1305    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_cgrp_storage.html) for more details.
1306    /// See also [Difference between cgrp_storage and cgroup_storage](https://docs.kernel.org/bpf/map_cgrp_storage.html#difference-between-bpf-map-type-cgrp-storage-and-bpf-map-type-cgroup-storage)
1307    CGrpStorage = libbpf_sys::BPF_MAP_TYPE_CGRP_STORAGE,
1308    /// A map that holds references to sockets with `SO_REUSEPORT` option set.
1309    ///
1310    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_REUSEPORT_SOCKARRAY/) for more details.
1311    ReuseportSockarray = libbpf_sys::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
1312    /// A per-CPU variant of [`BPF_MAP_TYPE_CGROUP_STORAGE`][`MapType::CgroupStorage`].
1313    ///
1314    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) for more details.
1315    PercpuCgroupStorage = libbpf_sys::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
1316    /// A FIFO storage.
1317    ///
1318    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_queue_stack.html) for more details.
1319    Queue = libbpf_sys::BPF_MAP_TYPE_QUEUE,
1320    /// A LIFO storage.
1321    ///
1322    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_queue_stack.html) for more details.
1323    Stack = libbpf_sys::BPF_MAP_TYPE_STACK,
1324    /// A socket-local storage.
1325    ///
1326    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_sk_storage.html) for more details.
1327    SkStorage = libbpf_sys::BPF_MAP_TYPE_SK_STORAGE,
1328    /// A Hash map that uses the key as the index to lookup a reference to a net device.
1329    /// Primarily used for XDP BPF Helper.
1330    ///
1331    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_devmap.html) for more details.
1332    DevmapHash = libbpf_sys::BPF_MAP_TYPE_DEVMAP_HASH,
1333    /// A specialized map that act as implementations of "struct ops" structures defined in the
1334    /// kernel.
1335    ///
1336    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_STRUCT_OPS/) for more details.
1337    StructOps = libbpf_sys::BPF_MAP_TYPE_STRUCT_OPS,
1338    /// A ring buffer map to efficiently send large amount of data.
1339    ///
1340    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_RINGBUF/) for more details.
1341    RingBuf = libbpf_sys::BPF_MAP_TYPE_RINGBUF,
1342    /// A storage map that holds data keyed on inodes.
1343    ///
1344    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_INODE_STORAGE/) for more details.
1345    InodeStorage = libbpf_sys::BPF_MAP_TYPE_INODE_STORAGE,
1346    /// A storage map that holds data keyed on tasks.
1347    ///
1348    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_TASK_STORAGE/) for more details.
1349    TaskStorage = libbpf_sys::BPF_MAP_TYPE_TASK_STORAGE,
1350    /// Bloom filters are a space-efficient probabilistic data structure used to quickly test
1351    /// whether an element exists in a set. In a bloom filter, false positives are possible
1352    /// whereas false negatives are not.
1353    ///
1354    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_bloom_filter.html) for more details.
1355    BloomFilter = libbpf_sys::BPF_MAP_TYPE_BLOOM_FILTER,
1356    #[allow(missing_docs)]
1357    UserRingBuf = libbpf_sys::BPF_MAP_TYPE_USER_RINGBUF,
1358    /// We choose to specify our own "unknown" type here b/c it's really up to the kernel
1359    /// to decide if it wants to reject the map. If it accepts it, it just means whoever
1360    /// using this library is a bit out of date.
1361    Unknown = u32::MAX,
1362}
1363
1364impl MapType {
1365    /// Returns if the map is of one of the per-cpu types.
1366    pub fn is_percpu(&self) -> bool {
1367        matches!(
1368            self,
1369            MapType::PercpuArray
1370                | MapType::PercpuHash
1371                | MapType::LruPercpuHash
1372                | MapType::PercpuCgroupStorage
1373        )
1374    }
1375
1376    /// Returns if the map is of one of the hashmap types.
1377    pub fn is_hash_map(&self) -> bool {
1378        matches!(
1379            self,
1380            MapType::Hash | MapType::PercpuHash | MapType::LruHash | MapType::LruPercpuHash
1381        )
1382    }
1383
1384    /// Returns if the map is keyless map type as per documentation of libbpf
1385    /// Keyless map types are: Queues, Stacks and Bloom Filters
1386    fn is_keyless(&self) -> bool {
1387        matches!(self, MapType::Queue | MapType::Stack | MapType::BloomFilter)
1388    }
1389
1390    /// Returns if the map is of bloom filter type
1391    pub fn is_bloom_filter(&self) -> bool {
1392        MapType::BloomFilter.eq(self)
1393    }
1394
1395    /// Detects if host kernel supports this BPF map type.
1396    ///
1397    /// Make sure the process has required set of CAP_* permissions (or runs as
1398    /// root) when performing feature checking.
1399    pub fn is_supported(&self) -> Result<bool> {
1400        let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, ptr::null()) };
1401        match ret {
1402            0 => Ok(false),
1403            1 => Ok(true),
1404            _ => Err(Error::from_raw_os_error(-ret)),
1405        }
1406    }
1407}
1408
1409impl From<u32> for MapType {
1410    fn from(value: u32) -> Self {
1411        use MapType::*;
1412
1413        match value {
1414            x if x == Unspec as u32 => Unspec,
1415            x if x == Hash as u32 => Hash,
1416            x if x == Array as u32 => Array,
1417            x if x == ProgArray as u32 => ProgArray,
1418            x if x == PerfEventArray as u32 => PerfEventArray,
1419            x if x == PercpuHash as u32 => PercpuHash,
1420            x if x == PercpuArray as u32 => PercpuArray,
1421            x if x == StackTrace as u32 => StackTrace,
1422            x if x == CgroupArray as u32 => CgroupArray,
1423            x if x == LruHash as u32 => LruHash,
1424            x if x == LruPercpuHash as u32 => LruPercpuHash,
1425            x if x == LpmTrie as u32 => LpmTrie,
1426            x if x == ArrayOfMaps as u32 => ArrayOfMaps,
1427            x if x == HashOfMaps as u32 => HashOfMaps,
1428            x if x == Devmap as u32 => Devmap,
1429            x if x == Sockmap as u32 => Sockmap,
1430            x if x == Cpumap as u32 => Cpumap,
1431            x if x == Xskmap as u32 => Xskmap,
1432            x if x == Sockhash as u32 => Sockhash,
1433            x if x == CgroupStorage as u32 => CgroupStorage,
1434            x if x == ReuseportSockarray as u32 => ReuseportSockarray,
1435            x if x == PercpuCgroupStorage as u32 => PercpuCgroupStorage,
1436            x if x == Queue as u32 => Queue,
1437            x if x == Stack as u32 => Stack,
1438            x if x == SkStorage as u32 => SkStorage,
1439            x if x == DevmapHash as u32 => DevmapHash,
1440            x if x == StructOps as u32 => StructOps,
1441            x if x == RingBuf as u32 => RingBuf,
1442            x if x == InodeStorage as u32 => InodeStorage,
1443            x if x == TaskStorage as u32 => TaskStorage,
1444            x if x == BloomFilter as u32 => BloomFilter,
1445            x if x == UserRingBuf as u32 => UserRingBuf,
1446            _ => Unknown,
1447        }
1448    }
1449}
1450
1451impl From<MapType> for u32 {
1452    fn from(value: MapType) -> Self {
1453        value as u32
1454    }
1455}
1456
1457/// An iterator over the keys of a BPF map.
1458#[derive(Debug)]
1459pub struct MapKeyIter<'map> {
1460    map_fd: BorrowedFd<'map>,
1461    prev: Option<Vec<u8>>,
1462    next: Vec<u8>,
1463}
1464
1465impl<'map> MapKeyIter<'map> {
1466    fn new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self {
1467        Self {
1468            map_fd,
1469            prev: None,
1470            next: vec![0; key_size as usize],
1471        }
1472    }
1473}
1474
1475impl Iterator for MapKeyIter<'_> {
1476    type Item = Vec<u8>;
1477
1478    fn next(&mut self) -> Option<Self::Item> {
1479        let prev = self.prev.as_ref().map_or(ptr::null(), Vec::as_ptr);
1480
1481        let ret = unsafe {
1482            libbpf_sys::bpf_map_get_next_key(
1483                self.map_fd.as_raw_fd(),
1484                prev as _,
1485                self.next.as_mut_ptr() as _,
1486            )
1487        };
1488        if ret != 0 {
1489            None
1490        } else {
1491            self.prev = Some(self.next.clone());
1492            Some(self.next.clone())
1493        }
1494    }
1495}
1496
1497/// An iterator over batches of key value pairs of a BPF map.
1498#[derive(Debug)]
1499pub struct BatchedMapIter<'map> {
1500    map_fd: BorrowedFd<'map>,
1501    delete: bool,
1502    count: usize,
1503    key_size: usize,
1504    value_size: usize,
1505    keys: Vec<u8>,
1506    values: Vec<u8>,
1507    prev: Option<Vec<u8>>,
1508    next: Vec<u8>,
1509    batch_opts: libbpf_sys::bpf_map_batch_opts,
1510    index: Option<usize>,
1511}
1512
1513impl<'map> BatchedMapIter<'map> {
1514    fn new(
1515        map_fd: BorrowedFd<'map>,
1516        count: u32,
1517        key_size: u32,
1518        value_size: u32,
1519        batch_opts: libbpf_sys::bpf_map_batch_opts,
1520        delete: bool,
1521    ) -> Self {
1522        Self {
1523            map_fd,
1524            delete,
1525            count: count as usize,
1526            key_size: key_size as usize,
1527            value_size: value_size as usize,
1528            keys: vec![0; (count * key_size) as usize],
1529            values: vec![0; (count * value_size) as usize],
1530            prev: None,
1531            next: vec![0; key_size as usize],
1532            batch_opts,
1533            index: None,
1534        }
1535    }
1536
1537    fn lookup_next_batch(&mut self) {
1538        let prev = self.prev.as_mut().map_or(ptr::null_mut(), Vec::as_mut_ptr);
1539        let mut count = self.count as u32;
1540
1541        let ret = unsafe {
1542            let lookup_fn = if self.delete {
1543                libbpf_sys::bpf_map_lookup_and_delete_batch
1544            } else {
1545                libbpf_sys::bpf_map_lookup_batch
1546            };
1547            lookup_fn(
1548                self.map_fd.as_raw_fd(),
1549                prev.cast(),
1550                self.next.as_mut_ptr().cast(),
1551                self.keys.as_mut_ptr().cast(),
1552                self.values.as_mut_ptr().cast(),
1553                &mut count,
1554                &self.batch_opts,
1555            )
1556        };
1557
1558        if let Err(e) = util::parse_ret(ret) {
1559            match e.kind() {
1560                // in this case we can trust the returned count value
1561                error::ErrorKind::NotFound => {}
1562                // retry with same input arguments
1563                error::ErrorKind::Interrupted => {
1564                    return self.lookup_next_batch();
1565                }
1566                _ => {
1567                    self.index = None;
1568                    return;
1569                }
1570            }
1571        }
1572
1573        self.prev = Some(self.next.clone());
1574        self.index = Some(0);
1575
1576        unsafe {
1577            self.keys.set_len(self.key_size * count as usize);
1578            self.values.set_len(self.value_size * count as usize);
1579        }
1580    }
1581}
1582
1583impl Iterator for BatchedMapIter<'_> {
1584    type Item = (Vec<u8>, Vec<u8>);
1585
1586    fn next(&mut self) -> Option<Self::Item> {
1587        let load_next_batch = match self.index {
1588            Some(index) => {
1589                let batch_finished = index * self.key_size >= self.keys.len();
1590                let last_batch = self.keys.len() < self.key_size * self.count;
1591                batch_finished && !last_batch
1592            }
1593            None => true,
1594        };
1595
1596        if load_next_batch {
1597            self.lookup_next_batch();
1598        }
1599
1600        let index = self.index?;
1601        let key = self.keys.chunks_exact(self.key_size).nth(index)?.to_vec();
1602        let val = self
1603            .values
1604            .chunks_exact(self.value_size)
1605            .nth(index)?
1606            .to_vec();
1607
1608        self.index = Some(index + 1);
1609        Some((key, val))
1610    }
1611}
1612
1613/// A convenience wrapper for [`bpf_map_info`][libbpf_sys::bpf_map_info]. It
1614/// provides the ability to retrieve the details of a certain map.
1615#[derive(Debug)]
1616pub struct MapInfo {
1617    /// The inner [`bpf_map_info`][libbpf_sys::bpf_map_info] object.
1618    pub info: bpf_map_info,
1619}
1620
1621impl MapInfo {
1622    /// Create a `MapInfo` object from a fd.
1623    pub fn new(fd: BorrowedFd<'_>) -> Result<Self> {
1624        let mut map_info = bpf_map_info::default();
1625        let mut size = mem::size_of_val(&map_info) as u32;
1626        // SAFETY: All pointers are derived from references and hence valid.
1627        let () = util::parse_ret(unsafe {
1628            bpf_obj_get_info_by_fd(
1629                fd.as_raw_fd(),
1630                &mut map_info as *mut bpf_map_info as *mut c_void,
1631                &mut size as *mut u32,
1632            )
1633        })?;
1634        Ok(Self { info: map_info })
1635    }
1636
1637    /// Get the map type
1638    #[inline]
1639    pub fn map_type(&self) -> MapType {
1640        MapType::from(self.info.type_)
1641    }
1642
1643    /// Get the name of this map.
1644    ///
1645    /// Returns error if the underlying data in the structure is not a valid
1646    /// utf-8 string.
1647    pub fn name<'a>(&self) -> Result<&'a str> {
1648        // SAFETY: convert &[i8] to &[u8], and then cast that to &str. i8 and u8 has the same size.
1649        let char_slice =
1650            unsafe { from_raw_parts(self.info.name[..].as_ptr().cast(), self.info.name.len()) };
1651
1652        util::c_char_slice_to_cstr(char_slice)
1653            .ok_or_else(|| Error::with_invalid_data("no nul byte found"))?
1654            .to_str()
1655            .map_err(Error::with_invalid_data)
1656    }
1657
1658    /// Get the map flags.
1659    #[inline]
1660    pub fn flags(&self) -> MapFlags {
1661        MapFlags::from_bits_truncate(self.info.map_flags as u64)
1662    }
1663}
1664
1665#[cfg(test)]
1666mod tests {
1667    use super::*;
1668
1669    use std::mem::discriminant;
1670
1671    #[test]
1672    fn map_type() {
1673        use MapType::*;
1674
1675        for t in [
1676            Unspec,
1677            Hash,
1678            Array,
1679            ProgArray,
1680            PerfEventArray,
1681            PercpuHash,
1682            PercpuArray,
1683            StackTrace,
1684            CgroupArray,
1685            LruHash,
1686            LruPercpuHash,
1687            LpmTrie,
1688            ArrayOfMaps,
1689            HashOfMaps,
1690            Devmap,
1691            Sockmap,
1692            Cpumap,
1693            Xskmap,
1694            Sockhash,
1695            CgroupStorage,
1696            ReuseportSockarray,
1697            PercpuCgroupStorage,
1698            Queue,
1699            Stack,
1700            SkStorage,
1701            DevmapHash,
1702            StructOps,
1703            RingBuf,
1704            InodeStorage,
1705            TaskStorage,
1706            BloomFilter,
1707            UserRingBuf,
1708            Unknown,
1709        ] {
1710            // check if discriminants match after a roundtrip conversion
1711            assert_eq!(discriminant(&t), discriminant(&MapType::from(t as u32)));
1712        }
1713    }
1714}