Skip to main content

libbpf_rs/
map.rs

1use core::ffi::c_void;
2use std::ffi::CStr;
3use std::ffi::CString;
4use std::ffi::OsStr;
5use std::ffi::OsString;
6use std::fmt::Debug;
7use std::fs::remove_file;
8use std::fs::File;
9use std::io;
10use std::io::BufRead as _;
11use std::io::BufReader;
12use std::marker::PhantomData;
13use std::mem;
14use std::mem::transmute;
15use std::ops::Deref;
16use std::os::unix::ffi::OsStrExt;
17use std::os::unix::io::AsFd;
18use std::os::unix::io::AsRawFd;
19use std::os::unix::io::BorrowedFd;
20use std::os::unix::io::FromRawFd;
21use std::os::unix::io::OwnedFd;
22use std::os::unix::io::RawFd;
23use std::path::Path;
24use std::ptr;
25use std::ptr::NonNull;
26use std::slice;
27use std::slice::from_raw_parts;
28
29use bitflags::bitflags;
30use libbpf_sys::bpf_map_info;
31use libbpf_sys::bpf_obj_get_info_by_fd;
32
33use crate::error;
34use crate::util;
35use crate::util::parse_ret_i32;
36use crate::util::validate_bpf_ret;
37use crate::AsRawLibbpf;
38use crate::Error;
39use crate::ErrorExt as _;
40use crate::Link;
41use crate::Mut;
42use crate::ProgramType;
43use crate::Result;
44
45/// An immutable parsed but not yet loaded BPF map.
46pub type OpenMap<'obj> = OpenMapImpl<'obj>;
47/// A mutable parsed but not yet loaded BPF map.
48pub type OpenMapMut<'obj> = OpenMapImpl<'obj, Mut>;
49
50/// Represents a parsed but not yet loaded BPF map.
51///
52/// This object exposes operations that need to happen before the map is created.
53///
54/// Some methods require working with raw bytes. You may find libraries such as
55/// [`plain`](https://crates.io/crates/plain) helpful.
56#[derive(Debug)]
57#[repr(transparent)]
58pub struct OpenMapImpl<'obj, T = ()> {
59    ptr: NonNull<libbpf_sys::bpf_map>,
60    _phantom: PhantomData<&'obj T>,
61}
62
63impl<'obj> OpenMap<'obj> {
64    /// Create a new [`OpenMap`] from a ptr to a `libbpf_sys::bpf_map`.
65    pub fn new(object: &'obj libbpf_sys::bpf_map) -> Self {
66        // SAFETY: We inferred the address from a reference, which is always
67        //         valid.
68        Self {
69            ptr: unsafe { NonNull::new_unchecked(object as *const _ as *mut _) },
70            _phantom: PhantomData,
71        }
72    }
73
74    /// Retrieve the [`OpenMap`]'s name.
75    pub fn name(&self) -> &'obj OsStr {
76        // SAFETY: We ensured `ptr` is valid during construction.
77        let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
78        // SAFETY: `bpf_map__name` can return NULL but only if it's passed
79        //          NULL. We know `ptr` is not NULL.
80        let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
81        OsStr::from_bytes(name_c_str.to_bytes())
82    }
83
84    /// Retrieve type of the map.
85    pub fn map_type(&self) -> MapType {
86        let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
87        MapType::from(ty)
88    }
89
90    fn initial_value_raw(&self) -> (*mut u8, usize) {
91        let mut size = 0u64;
92        let ptr = unsafe {
93            libbpf_sys::bpf_map__initial_value(self.ptr.as_ptr(), &mut size as *mut _ as _)
94        };
95        (ptr.cast(), size as _)
96    }
97
98    /// Retrieve the initial value of the map.
99    pub fn initial_value(&self) -> Option<&[u8]> {
100        let (ptr, size) = self.initial_value_raw();
101        if ptr.is_null() {
102            None
103        } else {
104            let data = unsafe { slice::from_raw_parts(ptr.cast::<u8>(), size) };
105            Some(data)
106        }
107    }
108
109    /// Retrieve the maximum number of entries of the map.
110    pub fn max_entries(&self) -> u32 {
111        unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
112    }
113
114    /// Return `true` if the map is set to be auto-created during load, `false` otherwise.
115    pub fn autocreate(&self) -> bool {
116        unsafe { libbpf_sys::bpf_map__autocreate(self.ptr.as_ptr()) }
117    }
118}
119
120impl<'obj> OpenMapMut<'obj> {
121    /// Create a new [`OpenMapMut`] from a ptr to a `libbpf_sys::bpf_map`.
122    pub fn new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self {
123        Self {
124            ptr: unsafe { NonNull::new_unchecked(object as *mut _) },
125            _phantom: PhantomData,
126        }
127    }
128
129    /// Retrieve the initial value of the map.
130    pub fn initial_value_mut(&mut self) -> Option<&mut [u8]> {
131        let (ptr, size) = self.initial_value_raw();
132        if ptr.is_null() {
133            None
134        } else {
135            let data = unsafe { slice::from_raw_parts_mut(ptr.cast::<u8>(), size) };
136            Some(data)
137        }
138    }
139
140    /// Bind map to a particular network device.
141    ///
142    /// Used for offloading maps to hardware.
143    pub fn set_map_ifindex(&mut self, idx: u32) {
144        unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) };
145    }
146
147    /// Set the initial value of the map.
148    pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> {
149        let ret = unsafe {
150            libbpf_sys::bpf_map__set_initial_value(
151                self.ptr.as_ptr(),
152                data.as_ptr() as *const c_void,
153                data.len() as libbpf_sys::size_t,
154            )
155        };
156
157        util::parse_ret(ret)
158    }
159
160    /// Set the type of the map.
161    pub fn set_type(&mut self, ty: MapType) -> Result<()> {
162        let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) };
163        util::parse_ret(ret)
164    }
165
166    /// Set the key size of the map in bytes.
167    pub fn set_key_size(&mut self, size: u32) -> Result<()> {
168        let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) };
169        util::parse_ret(ret)
170    }
171
172    /// Set the value size of the map in bytes.
173    pub fn set_value_size(&mut self, size: u32) -> Result<()> {
174        let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) };
175        util::parse_ret(ret)
176    }
177
178    /// Set the maximum number of entries this map can have.
179    pub fn set_max_entries(&mut self, count: u32) -> Result<()> {
180        let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) };
181        util::parse_ret(ret)
182    }
183
184    /// Set flags on this map.
185    pub fn set_map_flags(&mut self, flags: u32) -> Result<()> {
186        let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) };
187        util::parse_ret(ret)
188    }
189
190    /// Set the NUMA node for this map.
191    ///
192    /// This can be used to ensure that the map is allocated on a particular
193    /// NUMA node, which can be useful for performance-critical applications.
194    pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> {
195        let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) };
196        util::parse_ret(ret)
197    }
198
199    /// Set the inner map FD.
200    ///
201    /// This is used for nested maps, where the value type of the outer map is a pointer to the
202    /// inner map.
203    pub fn set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()> {
204        let ret = unsafe {
205            libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner_map_fd.as_raw_fd())
206        };
207        util::parse_ret(ret)
208    }
209
210    /// Set the `map_extra` field for this map.
211    ///
212    /// Allows users to pass additional data to the
213    /// kernel when loading the map. The kernel will store this value in the
214    /// `bpf_map_info` struct associated with the map.
215    ///
216    /// This can be used to pass data to the kernel that is not otherwise
217    /// representable via the existing `bpf_map_def` fields.
218    pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> {
219        let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) };
220        util::parse_ret(ret)
221    }
222
223    /// Set whether or not libbpf should automatically create this map during load phase.
224    pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> {
225        let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) };
226        util::parse_ret(ret)
227    }
228
229    /// Set where the map should be pinned.
230    ///
231    /// Note this does not actually create the pin.
232    pub fn set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
233        let path_c = util::path_to_cstring(path)?;
234        let path_ptr = path_c.as_ptr();
235
236        let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) };
237        util::parse_ret(ret)
238    }
239
240    /// Reuse an fd for a BPF map
241    pub fn reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()> {
242        let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd.as_raw_fd()) };
243        util::parse_ret(ret)
244    }
245
246    /// Reuse an already-pinned map for `self`.
247    pub fn reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
248        let cstring = util::path_to_cstring(path)?;
249
250        let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) };
251        if fd < 0 {
252            return Err(Error::from(io::Error::last_os_error()));
253        }
254
255        let fd = unsafe { OwnedFd::from_raw_fd(fd) };
256
257        let reuse_result = self.reuse_fd(fd.as_fd());
258
259        reuse_result
260    }
261}
262
263impl<'obj> Deref for OpenMapMut<'obj> {
264    type Target = OpenMap<'obj>;
265
266    fn deref(&self) -> &Self::Target {
267        // SAFETY: `OpenMapImpl` is `repr(transparent)` and so in-memory
268        //         representation of both types is the same.
269        unsafe { transmute::<&OpenMapMut<'obj>, &OpenMap<'obj>>(self) }
270    }
271}
272
273impl<T> AsRawLibbpf for OpenMapImpl<'_, T> {
274    type LibbpfType = libbpf_sys::bpf_map;
275
276    /// Retrieve the underlying [`libbpf_sys::bpf_map`].
277    fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
278        self.ptr
279    }
280}
281
282pub(crate) fn map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd> {
283    let fd = unsafe { libbpf_sys::bpf_map__fd(map.as_ptr()) };
284    let fd = util::parse_ret_i32(fd).ok();
285    fd
286}
287
288/// Return the size of one value including padding for interacting with per-cpu
289/// maps. The values are aligned to 8 bytes.
290fn percpu_aligned_value_size<M>(map: &M) -> usize
291where
292    M: MapCore + ?Sized,
293{
294    let val_size = map.value_size() as usize;
295    util::roundup(val_size, 8)
296}
297
298/// Returns the size of the buffer needed for a lookup/update of a per-cpu map.
299fn percpu_buffer_size<M>(map: &M) -> Result<usize>
300where
301    M: MapCore + ?Sized,
302{
303    let aligned_val_size = percpu_aligned_value_size(map);
304    let ncpu = crate::num_possible_cpus()?;
305    Ok(ncpu * aligned_val_size)
306}
307
308/// Apply a key check and return a null pointer in case of dealing with queue/stack/bloom-filter
309/// map, before passing the key to the bpf functions that support the map of type
310/// queue/stack/bloom-filter.
311fn map_key<M>(map: &M, key: &[u8]) -> *const c_void
312where
313    M: MapCore + ?Sized,
314{
315    // For all they keyless maps we null out the key per documentation of libbpf
316    if map.key_size() == 0 && map.map_type().is_keyless() {
317        return ptr::null();
318    }
319
320    key.as_ptr() as *const c_void
321}
322
323/// Internal function to perform a map lookup and write the value into raw pointer.
324/// Returns `Ok(true)` if the key was found, `Ok(false)` if not found, or an error.
325fn lookup_raw<M>(
326    map: &M,
327    key: &[u8],
328    value: &mut [mem::MaybeUninit<u8>],
329    flags: MapFlags,
330) -> Result<bool>
331where
332    M: MapCore + ?Sized,
333{
334    if key.len() != map.key_size() as usize {
335        return Err(Error::with_invalid_data(format!(
336            "key_size {} != {}",
337            key.len(),
338            map.key_size()
339        )));
340    }
341
342    // Make sure the internal users of this function pass the expected buffer size
343    debug_assert_eq!(
344        value.len(),
345        if map.map_type().is_percpu() {
346            percpu_buffer_size(map).unwrap()
347        } else {
348            map.value_size() as usize
349        }
350    );
351
352    let ret = unsafe {
353        libbpf_sys::bpf_map_lookup_elem_flags(
354            map.as_fd().as_raw_fd(),
355            map_key(map, key),
356            // TODO: Use `MaybeUninit::slice_as_mut_ptr` once stable.
357            value.as_mut_ptr().cast(),
358            flags.bits(),
359        )
360    };
361
362    if ret == 0 {
363        Ok(true)
364    } else {
365        let err = io::Error::last_os_error();
366        if err.kind() == io::ErrorKind::NotFound {
367            Ok(false)
368        } else {
369            Err(Error::from(err))
370        }
371    }
372}
373
374/// Internal function to return a value from a map into a buffer of the given size.
375fn lookup_raw_vec<M>(
376    map: &M,
377    key: &[u8],
378    flags: MapFlags,
379    out_size: usize,
380) -> Result<Option<Vec<u8>>>
381where
382    M: MapCore + ?Sized,
383{
384    // Allocate without initializing (avoiding memset)
385    let mut out = Vec::with_capacity(out_size);
386
387    match lookup_raw(map, key, out.spare_capacity_mut(), flags)? {
388        true => {
389            // SAFETY: `lookup_raw` successfully filled the buffer
390            unsafe {
391                out.set_len(out_size);
392            }
393            Ok(Some(out))
394        }
395        false => Ok(None),
396    }
397}
398
399/// Internal function to update a map. This does not check the length of the
400/// supplied value.
401fn update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>
402where
403    M: MapCore + ?Sized,
404{
405    if key.len() != map.key_size() as usize {
406        return Err(Error::with_invalid_data(format!(
407            "key_size {} != {}",
408            key.len(),
409            map.key_size()
410        )));
411    };
412
413    let ret = unsafe {
414        libbpf_sys::bpf_map_update_elem(
415            map.as_fd().as_raw_fd(),
416            map_key(map, key),
417            value.as_ptr() as *const c_void,
418            flags.bits(),
419        )
420    };
421
422    util::parse_ret(ret)
423}
424
425/// Internal function to batch lookup (and delete) elements from a map.
426fn lookup_batch_raw<M>(
427    map: &M,
428    count: u32,
429    elem_flags: MapFlags,
430    flags: MapFlags,
431    delete: bool,
432) -> BatchedMapIter<'_>
433where
434    M: MapCore + ?Sized,
435{
436    #[allow(clippy::needless_update)]
437    let opts = libbpf_sys::bpf_map_batch_opts {
438        sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
439        elem_flags: elem_flags.bits(),
440        flags: flags.bits(),
441        // bpf_map_batch_opts might have padding fields on some platform
442        ..Default::default()
443    };
444
445    // for maps of type BPF_MAP_TYPE_{HASH, PERCPU_HASH, LRU_HASH, LRU_PERCPU_HASH}
446    // the key size must be at least 4 bytes
447    let key_size = if map.map_type().is_hash_map() {
448        map.key_size().max(4)
449    } else {
450        map.key_size()
451    };
452
453    BatchedMapIter::new(map.as_fd(), count, key_size, map.value_size(), opts, delete)
454}
455
456/// Intneral function that returns an error for per-cpu and bloom filter maps.
457fn check_not_bloom_or_percpu<M>(map: &M) -> Result<()>
458where
459    M: MapCore + ?Sized,
460{
461    if map.map_type().is_bloom_filter() {
462        return Err(Error::with_invalid_data(
463            "lookup_bloom_filter() must be used for bloom filter maps",
464        ));
465    }
466    if map.map_type().is_percpu() {
467        return Err(Error::with_invalid_data(format!(
468            "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})",
469            map.map_type(),
470        )));
471    }
472
473    Ok(())
474}
475
476#[allow(clippy::wildcard_imports)]
477mod private {
478    use super::*;
479
480    pub trait Sealed {}
481
482    impl<T> Sealed for MapImpl<'_, T> {}
483    impl Sealed for MapHandle {}
484}
485
486/// A trait representing core functionality common to fully initialized maps.
487pub trait MapCore: Debug + AsFd + private::Sealed {
488    /// Retrieve the map's name.
489    fn name(&self) -> &OsStr;
490
491    /// Retrieve type of the map.
492    fn map_type(&self) -> MapType;
493
494    /// Retrieve the size of the map's keys.
495    fn key_size(&self) -> u32;
496
497    /// Retrieve the size of the map's values.
498    fn value_size(&self) -> u32;
499
500    /// Retrieve `max_entries` of the map.
501    fn max_entries(&self) -> u32;
502
503    /// Fetch extra map information
504    #[inline]
505    fn info(&self) -> Result<MapInfo> {
506        MapInfo::new(self.as_fd())
507    }
508
509    /// Query map information from `/proc/self/fdinfo`.
510    ///
511    /// This provides information not available through [`MapInfo`],
512    /// such as [`memlock`][MapFdInfo::memlock] (memory usage).
513    #[inline]
514    fn query_fdinfo(&self) -> Result<MapFdInfo> {
515        MapFdInfo::from_fd(self.as_fd())
516    }
517
518    /// Returns an iterator over keys in this map
519    ///
520    /// Note that if the map is not stable (stable meaning no updates or deletes) during iteration,
521    /// iteration can skip keys, restart from the beginning, or duplicate keys. In other words,
522    /// iteration becomes unpredictable.
523    fn keys(&self) -> MapKeyIter<'_> {
524        MapKeyIter::new(self.as_fd(), self.key_size())
525    }
526
527    /// Returns map value as `Vec` of `u8`.
528    ///
529    /// `key` must have exactly [`Self::key_size()`] elements.
530    ///
531    /// If the map is one of the per-cpu data structures, the function [`Self::lookup_percpu()`]
532    /// must be used.
533    /// If the map is of type `bloom_filter` the function [`Self::lookup_bloom_filter()`] must be
534    /// used
535    fn lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>> {
536        check_not_bloom_or_percpu(self)?;
537        let out_size = self.value_size() as usize;
538        lookup_raw_vec(self, key, flags, out_size)
539    }
540
541    /// Looks up a map value into a pre-allocated buffer, avoiding allocation.
542    ///
543    /// This method provides a zero-allocation alternative to [`Self::lookup()`].
544    ///
545    /// `key` must have exactly [`Self::key_size()`] elements.
546    /// `value` must have exactly [`Self::value_size()`] elements.
547    ///
548    /// Returns `Ok(true)` if the key was found and the buffer was filled,
549    /// `Ok(false)` if the key was not found, or an error.
550    ///
551    /// If the map is one of the per-cpu data structures, this function cannot be used.
552    /// If the map is of type `bloom_filter`, this function cannot be used.
553    fn lookup_into(&self, key: &[u8], value: &mut [u8], flags: MapFlags) -> Result<bool> {
554        check_not_bloom_or_percpu(self)?;
555
556        if value.len() != self.value_size() as usize {
557            return Err(Error::with_invalid_data(format!(
558                "value buffer size {} != {}",
559                value.len(),
560                self.value_size()
561            )));
562        }
563
564        // SAFETY: `u8` and `MaybeUninit<u8>` have the same in-memory representation.
565        let value = unsafe {
566            slice::from_raw_parts_mut::<mem::MaybeUninit<u8>>(
567                value.as_mut_ptr().cast(),
568                value.len(),
569            )
570        };
571        lookup_raw(self, key, value, flags)
572    }
573
574    /// Returns many elements in batch mode from the map.
575    ///
576    /// `count` specifies the batch size.
577    fn lookup_batch(
578        &self,
579        count: u32,
580        elem_flags: MapFlags,
581        flags: MapFlags,
582    ) -> Result<BatchedMapIter<'_>> {
583        check_not_bloom_or_percpu(self)?;
584        Ok(lookup_batch_raw(self, count, elem_flags, flags, false))
585    }
586
587    /// Returns many elements in batch mode from the map.
588    ///
589    /// `count` specifies the batch size.
590    fn lookup_and_delete_batch(
591        &self,
592        count: u32,
593        elem_flags: MapFlags,
594        flags: MapFlags,
595    ) -> Result<BatchedMapIter<'_>> {
596        check_not_bloom_or_percpu(self)?;
597        Ok(lookup_batch_raw(self, count, elem_flags, flags, true))
598    }
599
600    /// Returns if the given value is likely present in `bloom_filter` as `bool`.
601    ///
602    /// `value` must have exactly [`Self::value_size()`] elements.
603    fn lookup_bloom_filter(&self, value: &[u8]) -> Result<bool> {
604        let ret = unsafe {
605            libbpf_sys::bpf_map_lookup_elem(
606                self.as_fd().as_raw_fd(),
607                ptr::null(),
608                value.to_vec().as_mut_ptr() as *mut c_void,
609            )
610        };
611
612        if ret == 0 {
613            Ok(true)
614        } else {
615            let err = io::Error::last_os_error();
616            if err.kind() == io::ErrorKind::NotFound {
617                Ok(false)
618            } else {
619                Err(Error::from(err))
620            }
621        }
622    }
623
624    /// Returns one value per cpu as `Vec` of `Vec` of `u8` for per per-cpu maps.
625    ///
626    /// For normal maps, [`Self::lookup()`] must be used.
627    fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>> {
628        if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
629            return Err(Error::with_invalid_data(format!(
630                "lookup() must be used for maps that are not per-cpu (type of the map is {:?})",
631                self.map_type(),
632            )));
633        }
634
635        let val_size = self.value_size() as usize;
636        let aligned_val_size = percpu_aligned_value_size(self);
637        let out_size = percpu_buffer_size(self)?;
638
639        let raw_res = lookup_raw_vec(self, key, flags, out_size)?;
640        if let Some(raw_vals) = raw_res {
641            let mut out = Vec::new();
642            for chunk in raw_vals.chunks_exact(aligned_val_size) {
643                out.push(chunk[..val_size].to_vec());
644            }
645            Ok(Some(out))
646        } else {
647            Ok(None)
648        }
649    }
650
651    /// Deletes an element from the map.
652    ///
653    /// `key` must have exactly [`Self::key_size()`] elements.
654    fn delete(&self, key: &[u8]) -> Result<()> {
655        if key.len() != self.key_size() as usize {
656            return Err(Error::with_invalid_data(format!(
657                "key_size {} != {}",
658                key.len(),
659                self.key_size()
660            )));
661        };
662
663        let ret = unsafe {
664            libbpf_sys::bpf_map_delete_elem(self.as_fd().as_raw_fd(), key.as_ptr() as *const c_void)
665        };
666        util::parse_ret(ret)
667    }
668
669    /// Deletes many elements in batch mode from the map.
670    ///
671    /// `keys` must have exactly `Self::key_size() * count` elements.
672    fn delete_batch(
673        &self,
674        keys: &[u8],
675        count: u32,
676        elem_flags: MapFlags,
677        flags: MapFlags,
678    ) -> Result<()> {
679        if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
680            return Err(Error::with_invalid_data(format!(
681                "batch key_size {} != {} * {}",
682                keys.len(),
683                self.key_size(),
684                count
685            )));
686        };
687
688        #[allow(clippy::needless_update)]
689        let opts = libbpf_sys::bpf_map_batch_opts {
690            sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
691            elem_flags: elem_flags.bits(),
692            flags: flags.bits(),
693            // bpf_map_batch_opts might have padding fields on some platform
694            ..Default::default()
695        };
696
697        let mut count = count;
698        let ret = unsafe {
699            libbpf_sys::bpf_map_delete_batch(
700                self.as_fd().as_raw_fd(),
701                keys.as_ptr() as *const c_void,
702                &mut count,
703                &opts as *const libbpf_sys::bpf_map_batch_opts,
704            )
705        };
706        util::parse_ret(ret)
707    }
708
709    /// Same as [`Self::lookup()`] except this also deletes the key from the map.
710    ///
711    /// Note that this operation is currently only implemented in the kernel for [`MapType::Queue`]
712    /// and [`MapType::Stack`].
713    ///
714    /// `key` must have exactly [`Self::key_size()`] elements.
715    fn lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
716        if key.len() != self.key_size() as usize {
717            return Err(Error::with_invalid_data(format!(
718                "key_size {} != {}",
719                key.len(),
720                self.key_size()
721            )));
722        };
723
724        let mut out: Vec<u8> = Vec::with_capacity(self.value_size() as usize);
725
726        let ret = unsafe {
727            libbpf_sys::bpf_map_lookup_and_delete_elem(
728                self.as_fd().as_raw_fd(),
729                map_key(self, key),
730                out.as_mut_ptr() as *mut c_void,
731            )
732        };
733
734        if ret == 0 {
735            unsafe {
736                out.set_len(self.value_size() as usize);
737            }
738            Ok(Some(out))
739        } else {
740            let err = io::Error::last_os_error();
741            if err.kind() == io::ErrorKind::NotFound {
742                Ok(None)
743            } else {
744                Err(Error::from(err))
745            }
746        }
747    }
748
749    /// Update an element.
750    ///
751    /// `key` must have exactly [`Self::key_size()`] elements. `value` must have exactly
752    /// [`Self::value_size()`] elements.
753    ///
754    /// For per-cpu maps, [`Self::update_percpu()`] must be used.
755    fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
756        if self.map_type().is_percpu() {
757            return Err(Error::with_invalid_data(format!(
758                "update_percpu() must be used for per-cpu maps (type of the map is {:?})",
759                self.map_type(),
760            )));
761        }
762
763        if value.len() != self.value_size() as usize {
764            return Err(Error::with_invalid_data(format!(
765                "value_size {} != {}",
766                value.len(),
767                self.value_size()
768            )));
769        };
770
771        update_raw(self, key, value, flags)
772    }
773
774    /// Updates many elements in batch mode in the map
775    ///
776    /// `keys` must have exactly `Self::key_size() * count` elements. `values` must have exactly
777    /// `Self::key_size() * count` elements.
778    fn update_batch(
779        &self,
780        keys: &[u8],
781        values: &[u8],
782        count: u32,
783        elem_flags: MapFlags,
784        flags: MapFlags,
785    ) -> Result<()> {
786        if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
787            return Err(Error::with_invalid_data(format!(
788                "batch key_size {} != {} * {}",
789                keys.len(),
790                self.key_size(),
791                count
792            )));
793        };
794
795        if values.len() as u32 / count != self.value_size() || (values.len() as u32) % count != 0 {
796            return Err(Error::with_invalid_data(format!(
797                "batch value_size {} != {} * {}",
798                values.len(),
799                self.value_size(),
800                count
801            )));
802        }
803
804        #[allow(clippy::needless_update)]
805        let opts = libbpf_sys::bpf_map_batch_opts {
806            sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
807            elem_flags: elem_flags.bits(),
808            flags: flags.bits(),
809            // bpf_map_batch_opts might have padding fields on some platform
810            ..Default::default()
811        };
812
813        let mut count = count;
814        let ret = unsafe {
815            libbpf_sys::bpf_map_update_batch(
816                self.as_fd().as_raw_fd(),
817                keys.as_ptr() as *const c_void,
818                values.as_ptr() as *const c_void,
819                &mut count,
820                &opts as *const libbpf_sys::bpf_map_batch_opts,
821            )
822        };
823
824        util::parse_ret(ret)
825    }
826
827    /// Update an element in an per-cpu map with one value per cpu.
828    ///
829    /// `key` must have exactly [`Self::key_size()`] elements. `value` must have one
830    /// element per cpu (see [`num_possible_cpus`][crate::num_possible_cpus])
831    /// with exactly [`Self::value_size()`] elements each.
832    ///
833    /// For per-cpu maps, [`Self::update_percpu()`] must be used.
834    fn update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()> {
835        if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
836            return Err(Error::with_invalid_data(format!(
837                "update() must be used for maps that are not per-cpu (type of the map is {:?})",
838                self.map_type(),
839            )));
840        }
841
842        if values.len() != crate::num_possible_cpus()? {
843            return Err(Error::with_invalid_data(format!(
844                "number of values {} != number of cpus {}",
845                values.len(),
846                crate::num_possible_cpus()?
847            )));
848        };
849
850        let val_size = self.value_size() as usize;
851        let aligned_val_size = percpu_aligned_value_size(self);
852        let buf_size = percpu_buffer_size(self)?;
853
854        let mut value_buf = vec![0; buf_size];
855
856        for (i, val) in values.iter().enumerate() {
857            if val.len() != val_size {
858                return Err(Error::with_invalid_data(format!(
859                    "value size for cpu {} is {} != {}",
860                    i,
861                    val.len(),
862                    val_size
863                )));
864            }
865
866            value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)]
867                .copy_from_slice(val);
868        }
869
870        update_raw(self, key, &value_buf, flags)
871    }
872}
873
874/// An immutable loaded BPF map.
875pub type Map<'obj> = MapImpl<'obj>;
876/// A mutable loaded BPF map.
877pub type MapMut<'obj> = MapImpl<'obj, Mut>;
878
879/// Represents a libbpf-created map.
880///
881/// Some methods require working with raw bytes. You may find libraries such as
882/// [`plain`](https://crates.io/crates/plain) helpful.
883#[derive(Debug)]
884pub struct MapImpl<'obj, T = ()> {
885    ptr: NonNull<libbpf_sys::bpf_map>,
886    _phantom: PhantomData<&'obj T>,
887}
888
889impl<'obj> Map<'obj> {
890    /// Create a [`Map`] from a [`libbpf_sys::bpf_map`].
891    pub fn new(map: &'obj libbpf_sys::bpf_map) -> Self {
892        // SAFETY: We inferred the address from a reference, which is always
893        //         valid.
894        let ptr = unsafe { NonNull::new_unchecked(map as *const _ as *mut _) };
895        assert!(
896            map_fd(ptr).is_some(),
897            "provided BPF map does not have file descriptor"
898        );
899
900        Self {
901            ptr,
902            _phantom: PhantomData,
903        }
904    }
905
906    /// Create a [`Map`] from a [`libbpf_sys::bpf_map`] that does not contain a
907    /// file descriptor.
908    ///
909    /// The caller has to ensure that the [`AsFd`] impl is not used, or a panic
910    /// will be the result.
911    ///
912    /// # Safety
913    ///
914    /// The pointer must point to a loaded map.
915    #[doc(hidden)]
916    pub unsafe fn from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self {
917        Self {
918            ptr,
919            _phantom: PhantomData,
920        }
921    }
922
923    /// Returns whether map is pinned or not flag
924    pub fn is_pinned(&self) -> bool {
925        unsafe { libbpf_sys::bpf_map__is_pinned(self.ptr.as_ptr()) }
926    }
927
928    /// Returns the `pin_path` if the map is pinned, otherwise, `None`
929    /// is returned.
930    pub fn get_pin_path(&self) -> Option<&OsStr> {
931        let path_ptr = unsafe { libbpf_sys::bpf_map__pin_path(self.ptr.as_ptr()) };
932        if path_ptr.is_null() {
933            // means map is not pinned
934            return None;
935        }
936        let path_c_str = unsafe { CStr::from_ptr(path_ptr) };
937        Some(OsStr::from_bytes(path_c_str.to_bytes()))
938    }
939
940    /// Return `true` if the map was set to be auto-created during load, `false` otherwise.
941    pub fn autocreate(&self) -> bool {
942        unsafe { libbpf_sys::bpf_map__autocreate(self.ptr.as_ptr()) }
943    }
944}
945
946impl<'obj> MapMut<'obj> {
947    /// Create a [`MapMut`] from a [`libbpf_sys::bpf_map`].
948    pub fn new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self {
949        // SAFETY: We inferred the address from a reference, which is always
950        //         valid.
951        let ptr = unsafe { NonNull::new_unchecked(map as *mut _) };
952        assert!(
953            map_fd(ptr).is_some(),
954            "provided BPF map does not have file descriptor"
955        );
956
957        Self {
958            ptr,
959            _phantom: PhantomData,
960        }
961    }
962
963    /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
964    /// this map to bpffs.
965    pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
966        let path_c = util::path_to_cstring(path)?;
967        let path_ptr = path_c.as_ptr();
968
969        let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr.as_ptr(), path_ptr) };
970        util::parse_ret(ret)
971    }
972
973    /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
974    /// this map from bpffs.
975    pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
976        let path_c = util::path_to_cstring(path)?;
977        let path_ptr = path_c.as_ptr();
978        let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr.as_ptr(), path_ptr) };
979        util::parse_ret(ret)
980    }
981
982    /// Attach a struct ops map
983    pub fn attach_struct_ops(&mut self) -> Result<Link> {
984        if self.map_type() != MapType::StructOps {
985            return Err(Error::with_invalid_data(format!(
986                "Invalid map type ({:?}) for attach_struct_ops()",
987                self.map_type(),
988            )));
989        }
990
991        let ptr = unsafe { libbpf_sys::bpf_map__attach_struct_ops(self.ptr.as_ptr()) };
992        let ptr = validate_bpf_ret(ptr).context("failed to attach struct_ops")?;
993        // SAFETY: the pointer came from libbpf and has been checked for errors.
994        let link = unsafe { Link::new(ptr) };
995        Ok(link)
996    }
997}
998
999impl<'obj> Deref for MapMut<'obj> {
1000    type Target = Map<'obj>;
1001
1002    fn deref(&self) -> &Self::Target {
1003        unsafe { transmute::<&MapMut<'obj>, &Map<'obj>>(self) }
1004    }
1005}
1006
1007impl<T> AsFd for MapImpl<'_, T> {
1008    #[inline]
1009    fn as_fd(&self) -> BorrowedFd<'_> {
1010        // SANITY: Our map must always have a file descriptor associated with
1011        //         it.
1012        let fd = map_fd(self.ptr).unwrap();
1013        // SAFETY: `fd` is guaranteed to be valid for the lifetime of
1014        //         the created object.
1015        let fd = unsafe { BorrowedFd::borrow_raw(fd) };
1016        fd
1017    }
1018}
1019
1020impl<T> MapCore for MapImpl<'_, T>
1021where
1022    T: Debug,
1023{
1024    fn name(&self) -> &OsStr {
1025        // SAFETY: We ensured `ptr` is valid during construction.
1026        let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
1027        // SAFETY: `bpf_map__name` can return NULL but only if it's passed
1028        //          NULL. We know `ptr` is not NULL.
1029        let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
1030        OsStr::from_bytes(name_c_str.to_bytes())
1031    }
1032
1033    #[inline]
1034    fn map_type(&self) -> MapType {
1035        let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
1036        MapType::from(ty)
1037    }
1038
1039    #[inline]
1040    fn key_size(&self) -> u32 {
1041        unsafe { libbpf_sys::bpf_map__key_size(self.ptr.as_ptr()) }
1042    }
1043
1044    #[inline]
1045    fn value_size(&self) -> u32 {
1046        unsafe { libbpf_sys::bpf_map__value_size(self.ptr.as_ptr()) }
1047    }
1048
1049    #[inline]
1050    fn max_entries(&self) -> u32 {
1051        unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
1052    }
1053}
1054
1055impl AsRawLibbpf for Map<'_> {
1056    type LibbpfType = libbpf_sys::bpf_map;
1057
1058    /// Retrieve the underlying [`libbpf_sys::bpf_map`].
1059    #[inline]
1060    fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
1061        self.ptr
1062    }
1063}
1064
1065/// A handle to a map. Handles can be duplicated and dropped.
1066///
1067/// While possible to [create directly][MapHandle::create], in many cases it is
1068/// useful to create such a handle from an existing [`Map`]:
1069/// ```no_run
1070/// # use libbpf_rs::Map;
1071/// # use libbpf_rs::MapHandle;
1072/// # let get_map = || -> &Map { todo!() };
1073/// let map: &Map = get_map();
1074/// let map_handle = MapHandle::try_from(map).unwrap();
1075/// ```
1076///
1077/// Some methods require working with raw bytes. You may find libraries such as
1078/// [`plain`](https://crates.io/crates/plain) helpful.
1079#[derive(Debug)]
1080pub struct MapHandle {
1081    fd: OwnedFd,
1082    name: OsString,
1083    ty: MapType,
1084    key_size: u32,
1085    value_size: u32,
1086    max_entries: u32,
1087}
1088
1089impl MapHandle {
1090    /// Create a bpf map whose data is not managed by libbpf.
1091    pub fn create<T: AsRef<OsStr>>(
1092        map_type: MapType,
1093        name: Option<T>,
1094        key_size: u32,
1095        value_size: u32,
1096        max_entries: u32,
1097        opts: &libbpf_sys::bpf_map_create_opts,
1098    ) -> Result<Self> {
1099        let name = match name {
1100            Some(name) => name.as_ref().to_os_string(),
1101            // The old version kernel don't support specifying map name.
1102            None => OsString::new(),
1103        };
1104        let name_c_str = CString::new(name.as_bytes()).map_err(|_| {
1105            Error::with_invalid_data(format!("invalid name `{name:?}`: has NUL bytes"))
1106        })?;
1107        let name_c_ptr = if name.is_empty() {
1108            ptr::null()
1109        } else {
1110            name_c_str.as_bytes_with_nul().as_ptr()
1111        };
1112
1113        let fd = unsafe {
1114            libbpf_sys::bpf_map_create(
1115                map_type.into(),
1116                name_c_ptr.cast(),
1117                key_size,
1118                value_size,
1119                max_entries,
1120                opts,
1121            )
1122        };
1123        let () = util::parse_ret(fd)?;
1124
1125        Ok(Self {
1126            // SAFETY: A file descriptor coming from the `bpf_map_create`
1127            //         function is always suitable for ownership and can be
1128            //         cleaned up with close.
1129            fd: unsafe { OwnedFd::from_raw_fd(fd) },
1130            name,
1131            ty: map_type,
1132            key_size,
1133            value_size,
1134            max_entries,
1135        })
1136    }
1137
1138    /// Open a previously pinned map from its path.
1139    ///
1140    /// # Panics
1141    /// If the path contains null bytes.
1142    pub fn from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self> {
1143        fn inner(path: &Path) -> Result<MapHandle> {
1144            let p = CString::new(path.as_os_str().as_bytes()).expect("path contained null bytes");
1145            let fd = parse_ret_i32(unsafe {
1146                // SAFETY
1147                // p is never null since we allocated ourselves.
1148                libbpf_sys::bpf_obj_get(p.as_ptr())
1149            })?;
1150            MapHandle::from_fd(unsafe {
1151                // SAFETY
1152                // A file descriptor coming from the bpf_obj_get function is always suitable for
1153                // ownership and can be cleaned up with close.
1154                OwnedFd::from_raw_fd(fd)
1155            })
1156        }
1157
1158        inner(path.as_ref())
1159    }
1160
1161    /// Open a loaded map from its map id.
1162    pub fn from_map_id(id: u32) -> Result<Self> {
1163        parse_ret_i32(unsafe {
1164            // SAFETY
1165            // This function is always safe to call.
1166            libbpf_sys::bpf_map_get_fd_by_id(id)
1167        })
1168        .map(|fd| unsafe {
1169            // SAFETY
1170            // A file descriptor coming from the bpf_map_get_fd_by_id function is always suitable
1171            // for ownership and can be cleaned up with close.
1172            OwnedFd::from_raw_fd(fd)
1173        })
1174        .and_then(Self::from_fd)
1175    }
1176
1177    fn from_fd(fd: OwnedFd) -> Result<Self> {
1178        let info = MapInfo::new(fd.as_fd())?;
1179        Ok(Self {
1180            fd,
1181            name: info.name()?.into(),
1182            ty: info.map_type(),
1183            key_size: info.info.key_size,
1184            value_size: info.info.value_size,
1185            max_entries: info.info.max_entries,
1186        })
1187    }
1188
1189    /// Freeze the map as read-only from user space.
1190    ///
1191    /// Entries from a frozen map can no longer be updated or deleted with the
1192    /// `bpf()` system call. This operation is not reversible, and the map remains
1193    /// immutable from user space until its destruction. However, read and write
1194    /// permissions for BPF programs to the map remain unchanged.
1195    pub fn freeze(&self) -> Result<()> {
1196        let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd.as_raw_fd()) };
1197
1198        util::parse_ret(ret)
1199    }
1200
1201    /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
1202    /// this map to bpffs.
1203    pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1204        let path_c = util::path_to_cstring(path)?;
1205        let path_ptr = path_c.as_ptr();
1206
1207        let ret = unsafe { libbpf_sys::bpf_obj_pin(self.fd.as_raw_fd(), path_ptr) };
1208        util::parse_ret(ret)
1209    }
1210
1211    /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
1212    /// this map from bpffs.
1213    pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1214        remove_file(path).context("failed to remove pin map")
1215    }
1216}
1217
1218impl MapCore for MapHandle {
1219    #[inline]
1220    fn name(&self) -> &OsStr {
1221        &self.name
1222    }
1223
1224    #[inline]
1225    fn map_type(&self) -> MapType {
1226        self.ty
1227    }
1228
1229    #[inline]
1230    fn key_size(&self) -> u32 {
1231        self.key_size
1232    }
1233
1234    #[inline]
1235    fn value_size(&self) -> u32 {
1236        self.value_size
1237    }
1238
1239    #[inline]
1240    fn max_entries(&self) -> u32 {
1241        self.max_entries
1242    }
1243}
1244
1245impl AsFd for MapHandle {
1246    #[inline]
1247    fn as_fd(&self) -> BorrowedFd<'_> {
1248        self.fd.as_fd()
1249    }
1250}
1251
1252impl<T> TryFrom<&MapImpl<'_, T>> for MapHandle
1253where
1254    T: Debug,
1255{
1256    type Error = Error;
1257
1258    fn try_from(other: &MapImpl<'_, T>) -> Result<Self> {
1259        Ok(Self {
1260            fd: other
1261                .as_fd()
1262                .try_clone_to_owned()
1263                .context("failed to duplicate map file descriptor")?,
1264            name: other.name().to_os_string(),
1265            ty: other.map_type(),
1266            key_size: other.key_size(),
1267            value_size: other.value_size(),
1268            max_entries: other.max_entries(),
1269        })
1270    }
1271}
1272
1273impl TryFrom<&Self> for MapHandle {
1274    type Error = Error;
1275
1276    fn try_from(other: &Self) -> Result<Self> {
1277        Ok(Self {
1278            fd: other
1279                .as_fd()
1280                .try_clone_to_owned()
1281                .context("failed to duplicate map file descriptor")?,
1282            name: other.name().to_os_string(),
1283            ty: other.map_type(),
1284            key_size: other.key_size(),
1285            value_size: other.value_size(),
1286            max_entries: other.max_entries(),
1287        })
1288    }
1289}
1290
1291bitflags! {
1292    /// Flags to configure [`Map`] operations.
1293    #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
1294    pub struct MapFlags: u64 {
1295        /// See [`libbpf_sys::BPF_ANY`].
1296        const ANY      = libbpf_sys::BPF_ANY as _;
1297        /// See [`libbpf_sys::BPF_NOEXIST`].
1298        const NO_EXIST = libbpf_sys::BPF_NOEXIST as _;
1299        /// See [`libbpf_sys::BPF_EXIST`].
1300        const EXIST    = libbpf_sys::BPF_EXIST as _;
1301        /// See [`libbpf_sys::BPF_F_LOCK`].
1302        const LOCK     = libbpf_sys::BPF_F_LOCK as _;
1303    }
1304}
1305
1306/// Type of a [`Map`]. Maps to `enum bpf_map_type` in kernel uapi.
1307// If you add a new per-cpu map, also update `is_percpu`.
1308#[non_exhaustive]
1309#[repr(u32)]
1310#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1311pub enum MapType {
1312    /// An unspecified map type.
1313    Unspec = libbpf_sys::BPF_MAP_TYPE_UNSPEC,
1314    /// A general purpose Hash map storage type.
1315    ///
1316    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html) for more details.
1317    Hash = libbpf_sys::BPF_MAP_TYPE_HASH,
1318    /// An Array map storage type.
1319    ///
1320    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_array.html) for more details.
1321    Array = libbpf_sys::BPF_MAP_TYPE_ARRAY,
1322    /// A program array map which holds only the file descriptors to other eBPF programs. Used for
1323    /// tail-calls.
1324    ///
1325    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_PROG_ARRAY/) for more details.
1326    ProgArray = libbpf_sys::BPF_MAP_TYPE_PROG_ARRAY,
1327    /// An array map which holds only the file descriptors to perf events.
1328    ///
1329    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_PERF_EVENT_ARRAY/) for more details.
1330    PerfEventArray = libbpf_sys::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1331    /// A Hash map with per CPU storage.
1332    ///
1333    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html#per-cpu-hashes) for more details.
1334    PercpuHash = libbpf_sys::BPF_MAP_TYPE_PERCPU_HASH,
1335    /// An Array map with per CPU storage.
1336    ///
1337    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_array.html) for more details.
1338    PercpuArray = libbpf_sys::BPF_MAP_TYPE_PERCPU_ARRAY,
1339    #[allow(missing_docs)]
1340    StackTrace = libbpf_sys::BPF_MAP_TYPE_STACK_TRACE,
1341    #[allow(missing_docs)]
1342    CgroupArray = libbpf_sys::BPF_MAP_TYPE_CGROUP_ARRAY,
1343    /// A Hash map with least recently used (LRU) eviction policy.
1344    ///
1345    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html#bpf-map-type-lru-hash-and-variants) for more details.
1346    LruHash = libbpf_sys::BPF_MAP_TYPE_LRU_HASH,
1347    /// A Hash map with least recently used (LRU) eviction policy with per CPU storage.
1348    ///
1349    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_hash.html#per-cpu-hashes) for more details.
1350    LruPercpuHash = libbpf_sys::BPF_MAP_TYPE_LRU_PERCPU_HASH,
1351    /// A Longest Prefix Match (LPM) algorithm based map.
1352    ///
1353    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_lpm_trie.html) for more details.
1354    LpmTrie = libbpf_sys::BPF_MAP_TYPE_LPM_TRIE,
1355    /// A map in map storage.
1356    /// One level of nesting is supported, where an outer map contains instances of a single type
1357    /// of inner map.
1358    ///
1359    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_of_maps.html) for more details.
1360    ArrayOfMaps = libbpf_sys::BPF_MAP_TYPE_ARRAY_OF_MAPS,
1361    /// A map in map storage.
1362    /// One level of nesting is supported, where an outer map contains instances of a single type
1363    /// of inner map.
1364    ///
1365    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_of_maps.html) for more details.
1366    HashOfMaps = libbpf_sys::BPF_MAP_TYPE_HASH_OF_MAPS,
1367    /// An array map that uses the key as the index to lookup a reference to a net device.
1368    /// Primarily used for XDP BPF Helper.
1369    ///
1370    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_devmap.html) for more details.
1371    Devmap = libbpf_sys::BPF_MAP_TYPE_DEVMAP,
1372    /// An array map holds references to a socket descriptor.
1373    ///
1374    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_sockmap.html) for more details.
1375    Sockmap = libbpf_sys::BPF_MAP_TYPE_SOCKMAP,
1376    /// A map that redirects raw XDP frames to another CPU.
1377    ///
1378    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_cpumap.html) for more details.
1379    Cpumap = libbpf_sys::BPF_MAP_TYPE_CPUMAP,
1380    /// A map that redirects raw XDP frames to `AF_XDP` sockets (XSKs), a new type of address
1381    /// family in the kernel that allows redirection of frames from a driver to user space
1382    /// without having to traverse the full network stack.
1383    ///
1384    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_xskmap.html) for more details.
1385    Xskmap = libbpf_sys::BPF_MAP_TYPE_XSKMAP,
1386    /// A Hash map that holds references to sockets via their socket descriptor.
1387    ///
1388    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_sockmap.html) for more details.
1389    Sockhash = libbpf_sys::BPF_MAP_TYPE_SOCKHASH,
1390    /// Deprecated. Use `CGrpStorage` instead.
1391    ///
1392    /// A Local storage for cgroups.
1393    /// Only available with `CONFIG_CGROUP_BPF` and to programs that attach to cgroups.
1394    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_cgroup_storage.html) for more details.
1395    CgroupStorage = libbpf_sys::BPF_MAP_TYPE_CGROUP_STORAGE,
1396    /// A Local storage for cgroups. Only available with `CONFIG_CGROUPS`.
1397    ///
1398    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_cgrp_storage.html) for more details.
1399    /// See also [Difference between cgrp_storage and cgroup_storage](https://docs.kernel.org/bpf/map_cgrp_storage.html#difference-between-bpf-map-type-cgrp-storage-and-bpf-map-type-cgroup-storage)
1400    CGrpStorage = libbpf_sys::BPF_MAP_TYPE_CGRP_STORAGE,
1401    /// A map that holds references to sockets with `SO_REUSEPORT` option set.
1402    ///
1403    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_REUSEPORT_SOCKARRAY/) for more details.
1404    ReuseportSockarray = libbpf_sys::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
1405    /// A per-CPU variant of [`BPF_MAP_TYPE_CGROUP_STORAGE`][`MapType::CgroupStorage`].
1406    ///
1407    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) for more details.
1408    PercpuCgroupStorage = libbpf_sys::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
1409    /// A FIFO storage.
1410    ///
1411    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_queue_stack.html) for more details.
1412    Queue = libbpf_sys::BPF_MAP_TYPE_QUEUE,
1413    /// A LIFO storage.
1414    ///
1415    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_queue_stack.html) for more details.
1416    Stack = libbpf_sys::BPF_MAP_TYPE_STACK,
1417    /// A socket-local storage.
1418    ///
1419    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_sk_storage.html) for more details.
1420    SkStorage = libbpf_sys::BPF_MAP_TYPE_SK_STORAGE,
1421    /// A Hash map that uses the key as the index to lookup a reference to a net device.
1422    /// Primarily used for XDP BPF Helper.
1423    ///
1424    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_devmap.html) for more details.
1425    DevmapHash = libbpf_sys::BPF_MAP_TYPE_DEVMAP_HASH,
1426    /// A specialized map that act as implementations of "struct ops" structures defined in the
1427    /// kernel.
1428    ///
1429    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_STRUCT_OPS/) for more details.
1430    StructOps = libbpf_sys::BPF_MAP_TYPE_STRUCT_OPS,
1431    /// A ring buffer map to efficiently send large amount of data.
1432    ///
1433    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_RINGBUF/) for more details.
1434    RingBuf = libbpf_sys::BPF_MAP_TYPE_RINGBUF,
1435    /// A storage map that holds data keyed on inodes.
1436    ///
1437    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_INODE_STORAGE/) for more details.
1438    InodeStorage = libbpf_sys::BPF_MAP_TYPE_INODE_STORAGE,
1439    /// A storage map that holds data keyed on tasks.
1440    ///
1441    /// Refer [documentation](https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_TASK_STORAGE/) for more details.
1442    TaskStorage = libbpf_sys::BPF_MAP_TYPE_TASK_STORAGE,
1443    /// Bloom filters are a space-efficient probabilistic data structure used to quickly test
1444    /// whether an element exists in a set. In a bloom filter, false positives are possible
1445    /// whereas false negatives are not.
1446    ///
1447    /// Refer the kernel [documentation](https://docs.kernel.org/bpf/map_bloom_filter.html) for more details.
1448    BloomFilter = libbpf_sys::BPF_MAP_TYPE_BLOOM_FILTER,
1449    #[allow(missing_docs)]
1450    UserRingBuf = libbpf_sys::BPF_MAP_TYPE_USER_RINGBUF,
1451    /// We choose to specify our own "unknown" type here b/c it's really up to the kernel
1452    /// to decide if it wants to reject the map. If it accepts it, it just means whoever
1453    /// using this library is a bit out of date.
1454    Unknown = u32::MAX,
1455}
1456
1457impl MapType {
1458    /// Returns if the map is of one of the per-cpu types.
1459    pub fn is_percpu(&self) -> bool {
1460        matches!(
1461            self,
1462            Self::PercpuArray | Self::PercpuHash | Self::LruPercpuHash | Self::PercpuCgroupStorage
1463        )
1464    }
1465
1466    /// Returns if the map is of one of the hashmap types.
1467    pub fn is_hash_map(&self) -> bool {
1468        matches!(
1469            self,
1470            Self::Hash | Self::PercpuHash | Self::LruHash | Self::LruPercpuHash
1471        )
1472    }
1473
1474    /// Returns if the map is keyless map type as per documentation of libbpf
1475    /// Keyless map types are: Queues, Stacks and Bloom Filters
1476    fn is_keyless(&self) -> bool {
1477        matches!(self, Self::Queue | Self::Stack | Self::BloomFilter)
1478    }
1479
1480    /// Returns if the map is of bloom filter type
1481    pub fn is_bloom_filter(&self) -> bool {
1482        Self::BloomFilter.eq(self)
1483    }
1484
1485    /// Detects if host kernel supports this BPF map type.
1486    ///
1487    /// Make sure the process has required set of CAP_* permissions (or runs as
1488    /// root) when performing feature checking.
1489    pub fn is_supported(&self) -> Result<bool> {
1490        let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, ptr::null()) };
1491        match ret {
1492            0 => Ok(false),
1493            1 => Ok(true),
1494            _ => Err(Error::from_raw_os_error(-ret)),
1495        }
1496    }
1497}
1498
1499impl From<u32> for MapType {
1500    fn from(value: u32) -> Self {
1501        use MapType::*;
1502
1503        match value {
1504            x if x == Unspec as u32 => Unspec,
1505            x if x == Hash as u32 => Hash,
1506            x if x == Array as u32 => Array,
1507            x if x == ProgArray as u32 => ProgArray,
1508            x if x == PerfEventArray as u32 => PerfEventArray,
1509            x if x == PercpuHash as u32 => PercpuHash,
1510            x if x == PercpuArray as u32 => PercpuArray,
1511            x if x == StackTrace as u32 => StackTrace,
1512            x if x == CgroupArray as u32 => CgroupArray,
1513            x if x == LruHash as u32 => LruHash,
1514            x if x == LruPercpuHash as u32 => LruPercpuHash,
1515            x if x == LpmTrie as u32 => LpmTrie,
1516            x if x == ArrayOfMaps as u32 => ArrayOfMaps,
1517            x if x == HashOfMaps as u32 => HashOfMaps,
1518            x if x == Devmap as u32 => Devmap,
1519            x if x == Sockmap as u32 => Sockmap,
1520            x if x == Cpumap as u32 => Cpumap,
1521            x if x == Xskmap as u32 => Xskmap,
1522            x if x == Sockhash as u32 => Sockhash,
1523            x if x == CgroupStorage as u32 => CgroupStorage,
1524            x if x == ReuseportSockarray as u32 => ReuseportSockarray,
1525            x if x == PercpuCgroupStorage as u32 => PercpuCgroupStorage,
1526            x if x == Queue as u32 => Queue,
1527            x if x == Stack as u32 => Stack,
1528            x if x == SkStorage as u32 => SkStorage,
1529            x if x == DevmapHash as u32 => DevmapHash,
1530            x if x == StructOps as u32 => StructOps,
1531            x if x == RingBuf as u32 => RingBuf,
1532            x if x == InodeStorage as u32 => InodeStorage,
1533            x if x == TaskStorage as u32 => TaskStorage,
1534            x if x == BloomFilter as u32 => BloomFilter,
1535            x if x == UserRingBuf as u32 => UserRingBuf,
1536            _ => Unknown,
1537        }
1538    }
1539}
1540
1541impl From<MapType> for u32 {
1542    fn from(value: MapType) -> Self {
1543        value as Self
1544    }
1545}
1546
1547/// An iterator over the keys of a BPF map.
1548#[derive(Debug)]
1549pub struct MapKeyIter<'map> {
1550    map_fd: BorrowedFd<'map>,
1551    prev: Option<Vec<u8>>,
1552    next: Vec<u8>,
1553}
1554
1555impl<'map> MapKeyIter<'map> {
1556    fn new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self {
1557        Self {
1558            map_fd,
1559            prev: None,
1560            next: vec![0; key_size as usize],
1561        }
1562    }
1563}
1564
1565impl Iterator for MapKeyIter<'_> {
1566    type Item = Vec<u8>;
1567
1568    fn next(&mut self) -> Option<Self::Item> {
1569        let prev = self.prev.as_ref().map_or(ptr::null(), Vec::as_ptr);
1570
1571        let ret = unsafe {
1572            libbpf_sys::bpf_map_get_next_key(
1573                self.map_fd.as_raw_fd(),
1574                prev as _,
1575                self.next.as_mut_ptr() as _,
1576            )
1577        };
1578        if ret != 0 {
1579            None
1580        } else {
1581            self.prev = Some(self.next.clone());
1582            Some(self.next.clone())
1583        }
1584    }
1585}
1586
1587/// An iterator over batches of key value pairs of a BPF map.
1588#[derive(Debug)]
1589pub struct BatchedMapIter<'map> {
1590    map_fd: BorrowedFd<'map>,
1591    delete: bool,
1592    count: usize,
1593    key_size: usize,
1594    value_size: usize,
1595    keys: Vec<u8>,
1596    values: Vec<u8>,
1597    prev: Option<Vec<u8>>,
1598    next: Vec<u8>,
1599    batch_opts: libbpf_sys::bpf_map_batch_opts,
1600    index: Option<usize>,
1601}
1602
1603impl<'map> BatchedMapIter<'map> {
1604    fn new(
1605        map_fd: BorrowedFd<'map>,
1606        count: u32,
1607        key_size: u32,
1608        value_size: u32,
1609        batch_opts: libbpf_sys::bpf_map_batch_opts,
1610        delete: bool,
1611    ) -> Self {
1612        Self {
1613            map_fd,
1614            delete,
1615            count: count as usize,
1616            key_size: key_size as usize,
1617            value_size: value_size as usize,
1618            keys: vec![0; (count * key_size) as usize],
1619            values: vec![0; (count * value_size) as usize],
1620            prev: None,
1621            next: vec![0; key_size as usize],
1622            batch_opts,
1623            index: None,
1624        }
1625    }
1626
1627    fn lookup_next_batch(&mut self) {
1628        let prev = self.prev.as_mut().map_or(ptr::null_mut(), Vec::as_mut_ptr);
1629        let mut count = self.count as u32;
1630
1631        let ret = unsafe {
1632            let lookup_fn = if self.delete {
1633                libbpf_sys::bpf_map_lookup_and_delete_batch
1634            } else {
1635                libbpf_sys::bpf_map_lookup_batch
1636            };
1637            lookup_fn(
1638                self.map_fd.as_raw_fd(),
1639                prev.cast(),
1640                self.next.as_mut_ptr().cast(),
1641                self.keys.as_mut_ptr().cast(),
1642                self.values.as_mut_ptr().cast(),
1643                &mut count,
1644                &self.batch_opts,
1645            )
1646        };
1647
1648        if let Err(e) = util::parse_ret(ret) {
1649            match e.kind() {
1650                // in this case we can trust the returned count value
1651                error::ErrorKind::NotFound => {}
1652                // retry with same input arguments
1653                error::ErrorKind::Interrupted => {
1654                    return self.lookup_next_batch();
1655                }
1656                _ => {
1657                    self.index = None;
1658                    return;
1659                }
1660            }
1661        }
1662
1663        self.prev = Some(self.next.clone());
1664        self.index = Some(0);
1665
1666        unsafe {
1667            self.keys.set_len(self.key_size * count as usize);
1668            self.values.set_len(self.value_size * count as usize);
1669        }
1670    }
1671}
1672
1673impl Iterator for BatchedMapIter<'_> {
1674    type Item = (Vec<u8>, Vec<u8>);
1675
1676    fn next(&mut self) -> Option<Self::Item> {
1677        let load_next_batch = match self.index {
1678            Some(index) => {
1679                let batch_finished = index * self.key_size >= self.keys.len();
1680                let last_batch = self.keys.len() < self.key_size * self.count;
1681                batch_finished && !last_batch
1682            }
1683            None => true,
1684        };
1685
1686        if load_next_batch {
1687            self.lookup_next_batch();
1688        }
1689
1690        let index = self.index?;
1691        let key = self.keys.chunks_exact(self.key_size).nth(index)?.to_vec();
1692        let val = self
1693            .values
1694            .chunks_exact(self.value_size)
1695            .nth(index)?
1696            .to_vec();
1697
1698        self.index = Some(index + 1);
1699        Some((key, val))
1700    }
1701}
1702
1703/// A convenience wrapper for [`bpf_map_info`][libbpf_sys::bpf_map_info]. It
1704/// provides the ability to retrieve the details of a certain map.
1705#[derive(Debug)]
1706pub struct MapInfo {
1707    /// The inner [`bpf_map_info`][libbpf_sys::bpf_map_info] object.
1708    pub info: bpf_map_info,
1709}
1710
1711impl MapInfo {
1712    /// Create a `MapInfo` object from a fd.
1713    pub fn new(fd: BorrowedFd<'_>) -> Result<Self> {
1714        let mut map_info = bpf_map_info::default();
1715        let mut size = mem::size_of_val(&map_info) as u32;
1716        // SAFETY: All pointers are derived from references and hence valid.
1717        let () = util::parse_ret(unsafe {
1718            bpf_obj_get_info_by_fd(
1719                fd.as_raw_fd(),
1720                &mut map_info as *mut bpf_map_info as *mut c_void,
1721                &mut size as *mut u32,
1722            )
1723        })?;
1724        Ok(Self { info: map_info })
1725    }
1726
1727    /// Get the map type
1728    #[inline]
1729    pub fn map_type(&self) -> MapType {
1730        MapType::from(self.info.type_)
1731    }
1732
1733    /// Get the name of this map.
1734    ///
1735    /// Returns error if the underlying data in the structure is not a valid
1736    /// utf-8 string.
1737    pub fn name<'a>(&self) -> Result<&'a str> {
1738        // SAFETY: convert &[i8] to &[u8], and then cast that to &str. i8 and u8 has the same size.
1739        let char_slice =
1740            unsafe { from_raw_parts(self.info.name[..].as_ptr().cast(), self.info.name.len()) };
1741
1742        util::c_char_slice_to_cstr(char_slice)
1743            .ok_or_else(|| Error::with_invalid_data("no nul byte found"))?
1744            .to_str()
1745            .map_err(Error::with_invalid_data)
1746    }
1747
1748    /// Get the map flags.
1749    #[inline]
1750    pub fn flags(&self) -> MapFlags {
1751        MapFlags::from_bits_truncate(self.info.map_flags as u64)
1752    }
1753}
1754
1755/// Information about a BPF map obtained from `/proc/self/fdinfo`.
1756///
1757/// This provides information not available through [`MapInfo`], such as
1758/// [`memlock`][MapFdInfo::memlock] (memory usage) and [`frozen`][MapFdInfo::frozen] status.
1759///
1760/// The fields correspond to those printed by
1761/// [`bpf_map_show_fdinfo`](https://github.com/torvalds/linux/blob/37a93dd5c49b/kernel/bpf/syscall.c#L1007)
1762/// in the kernel source. See also bpftool's
1763/// [`get_fdinfo`](https://github.com/torvalds/linux/blob/37a93dd5c49/tools/bpf/bpftool/common.c#L485)
1764/// for the matching userspace parsing logic.
1765#[derive(Debug, Clone)]
1766pub struct MapFdInfo {
1767    /// The map type.
1768    pub map_type: MapType,
1769    /// The size of the map's keys in bytes.
1770    pub key_size: u32,
1771    /// The size of the map's values in bytes.
1772    pub value_size: u32,
1773    /// The maximum number of entries in the map.
1774    pub max_entries: u32,
1775    // The following fields were added in later kernel versions and may not be
1776    // present in older kernels.
1777    /// The map flags.
1778    pub map_flags: Option<u32>,
1779    /// Extra map-specific data.
1780    pub map_extra: Option<u64>,
1781    /// The amount of memory locked by the map in bytes.
1782    pub memlock: Option<u64>,
1783    /// The map's ID.
1784    pub map_id: Option<u32>,
1785    /// Whether the map is frozen.
1786    pub frozen: Option<bool>,
1787    /// The type of the owner program (only for `prog_array` maps).
1788    pub owner_prog_type: Option<ProgramType>,
1789    /// Whether the owner program is JIT-compiled (only for `prog_array` maps).
1790    pub owner_jited: Option<bool>,
1791}
1792
1793impl MapFdInfo {
1794    /// Create a `MapFdInfo` by reading `/proc/self/fdinfo` for the given fd.
1795    pub fn from_fd(fd: BorrowedFd<'_>) -> Result<Self> {
1796        let path = format!("/proc/self/fdinfo/{}", fd.as_raw_fd());
1797        let file = File::open(&path).with_context(|| format!("failed to open `{path}`"))?;
1798        let reader = BufReader::new(file);
1799
1800        let parse = |key: &str, val: &str| -> Result<u32> {
1801            val.parse()
1802                .map_err(|e| Error::with_invalid_data(format!("`{key}`: {e}")))
1803        };
1804
1805        let mut map_type = None;
1806        let mut key_size = None;
1807        let mut value_size = None;
1808        let mut max_entries = None;
1809        let mut map_flags = None;
1810        let mut map_extra = None;
1811        let mut memlock = None;
1812        let mut map_id = None;
1813        let mut frozen = None;
1814        let mut owner_prog_type = None;
1815        let mut owner_jited = None;
1816
1817        for result in reader.lines() {
1818            let line = result?;
1819            let Some((key, value)) = line.split_once('\t') else {
1820                continue;
1821            };
1822            // Keys have a trailing colon, e.g. "map_type:"
1823            let key = key.trim_end_matches(':');
1824            let value = value.trim();
1825
1826            match key {
1827                "map_type" => map_type = Some(parse(key, value)?),
1828                "key_size" => key_size = Some(parse(key, value)?),
1829                "value_size" => value_size = Some(parse(key, value)?),
1830                "max_entries" => max_entries = Some(parse(key, value)?),
1831                "map_flags" => {
1832                    map_flags =
1833                        Some(parse_hex(value).with_context(|| format!("bad `{key}`"))? as u32)
1834                }
1835                "map_extra" => {
1836                    map_extra = Some(parse_hex(value).with_context(|| format!("bad `{key}`"))?)
1837                }
1838                "memlock" => memlock = Some(parse(key, value)? as u64),
1839                "map_id" => map_id = Some(parse(key, value)?),
1840                "frozen" => frozen = Some(parse(key, value)? != 0),
1841                "owner_prog_type" => owner_prog_type = Some(parse(key, value)?),
1842                "owner_jited" => owner_jited = Some(parse(key, value)? != 0),
1843                _ => {}
1844            }
1845        }
1846
1847        let missing = |f| Error::with_invalid_data(format!("missing `{f}` in fdinfo"));
1848
1849        Ok(Self {
1850            map_type: MapType::from(map_type.ok_or_else(|| missing("map_type"))?),
1851            key_size: key_size.ok_or_else(|| missing("key_size"))?,
1852            value_size: value_size.ok_or_else(|| missing("value_size"))?,
1853            max_entries: max_entries.ok_or_else(|| missing("max_entries"))?,
1854            map_flags,
1855            map_extra,
1856            memlock,
1857            map_id,
1858            frozen,
1859            owner_prog_type: owner_prog_type.map(ProgramType::from),
1860            owner_jited,
1861        })
1862    }
1863}
1864
1865/// Parse a value that may be in hex (0x...) or decimal format.
1866fn parse_hex(s: &str) -> Result<u64> {
1867    if let Some(hex) = s.strip_prefix("0x").or_else(|| s.strip_prefix("0X")) {
1868        u64::from_str_radix(hex, 16)
1869    } else {
1870        s.parse()
1871    }
1872    .map_err(Error::with_invalid_data)
1873}
1874
1875#[cfg(test)]
1876mod tests {
1877    use super::*;
1878
1879    use std::mem::discriminant;
1880
1881    #[test]
1882    fn map_type() {
1883        use MapType::*;
1884
1885        for t in [
1886            Unspec,
1887            Hash,
1888            Array,
1889            ProgArray,
1890            PerfEventArray,
1891            PercpuHash,
1892            PercpuArray,
1893            StackTrace,
1894            CgroupArray,
1895            LruHash,
1896            LruPercpuHash,
1897            LpmTrie,
1898            ArrayOfMaps,
1899            HashOfMaps,
1900            Devmap,
1901            Sockmap,
1902            Cpumap,
1903            Xskmap,
1904            Sockhash,
1905            CgroupStorage,
1906            ReuseportSockarray,
1907            PercpuCgroupStorage,
1908            Queue,
1909            Stack,
1910            SkStorage,
1911            DevmapHash,
1912            StructOps,
1913            RingBuf,
1914            InodeStorage,
1915            TaskStorage,
1916            BloomFilter,
1917            UserRingBuf,
1918            Unknown,
1919        ] {
1920            // check if discriminants match after a roundtrip conversion
1921            assert_eq!(discriminant(&t), discriminant(&MapType::from(t as u32)));
1922        }
1923    }
1924
1925    #[test]
1926    fn parse_hex_decimal() {
1927        assert_eq!(parse_hex("0").unwrap(), 0);
1928        assert_eq!(parse_hex("42").unwrap(), 42);
1929        assert_eq!(parse_hex("18446744073709551615").unwrap(), u64::MAX);
1930    }
1931
1932    #[test]
1933    fn parse_hex_hex_prefix() {
1934        assert_eq!(parse_hex("0x0").unwrap(), 0);
1935        assert_eq!(parse_hex("0xff").unwrap(), 255);
1936        assert_eq!(parse_hex("0X1A").unwrap(), 26);
1937        assert_eq!(parse_hex("0xdeadbeef").unwrap(), 0xdeadbeef);
1938    }
1939
1940    #[test]
1941    fn parse_hex_invalid() {
1942        assert!(parse_hex("").is_err());
1943        assert!(parse_hex("xyz").is_err());
1944        assert!(parse_hex("0xGG").is_err());
1945    }
1946}