1use core::ffi::c_void;
2use std::ffi::CStr;
3use std::ffi::CString;
4use std::ffi::OsStr;
5use std::ffi::OsString;
6use std::fmt::Debug;
7use std::fs::remove_file;
8use std::fs::File;
9use std::io;
10use std::io::BufRead as _;
11use std::io::BufReader;
12use std::marker::PhantomData;
13use std::mem;
14use std::mem::transmute;
15use std::ops::Deref;
16use std::os::unix::ffi::OsStrExt;
17use std::os::unix::io::AsFd;
18use std::os::unix::io::AsRawFd;
19use std::os::unix::io::BorrowedFd;
20use std::os::unix::io::FromRawFd;
21use std::os::unix::io::OwnedFd;
22use std::os::unix::io::RawFd;
23use std::path::Path;
24use std::ptr;
25use std::ptr::NonNull;
26use std::slice;
27use std::slice::from_raw_parts;
28
29use bitflags::bitflags;
30use libbpf_sys::bpf_map_info;
31use libbpf_sys::bpf_obj_get_info_by_fd;
32
33use crate::error;
34use crate::util;
35use crate::util::parse_ret_i32;
36use crate::util::validate_bpf_ret;
37use crate::AsRawLibbpf;
38use crate::Error;
39use crate::ErrorExt as _;
40use crate::Link;
41use crate::Mut;
42use crate::ProgramType;
43use crate::Result;
44
45pub type OpenMap<'obj> = OpenMapImpl<'obj>;
47pub type OpenMapMut<'obj> = OpenMapImpl<'obj, Mut>;
49
50#[derive(Debug)]
57#[repr(transparent)]
58pub struct OpenMapImpl<'obj, T = ()> {
59 ptr: NonNull<libbpf_sys::bpf_map>,
60 _phantom: PhantomData<&'obj T>,
61}
62
63impl<'obj> OpenMap<'obj> {
64 pub fn new(object: &'obj libbpf_sys::bpf_map) -> Self {
66 Self {
69 ptr: unsafe { NonNull::new_unchecked(object as *const _ as *mut _) },
70 _phantom: PhantomData,
71 }
72 }
73
74 pub fn name(&self) -> &'obj OsStr {
76 let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
78 let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
81 OsStr::from_bytes(name_c_str.to_bytes())
82 }
83
84 pub fn map_type(&self) -> MapType {
86 let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
87 MapType::from(ty)
88 }
89
90 fn initial_value_raw(&self) -> (*mut u8, usize) {
91 let mut size = 0u64;
92 let ptr = unsafe {
93 libbpf_sys::bpf_map__initial_value(self.ptr.as_ptr(), &mut size as *mut _ as _)
94 };
95 (ptr.cast(), size as _)
96 }
97
98 pub fn initial_value(&self) -> Option<&[u8]> {
100 let (ptr, size) = self.initial_value_raw();
101 if ptr.is_null() {
102 None
103 } else {
104 let data = unsafe { slice::from_raw_parts(ptr.cast::<u8>(), size) };
105 Some(data)
106 }
107 }
108
109 pub fn max_entries(&self) -> u32 {
111 unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
112 }
113
114 pub fn autocreate(&self) -> bool {
116 unsafe { libbpf_sys::bpf_map__autocreate(self.ptr.as_ptr()) }
117 }
118}
119
120impl<'obj> OpenMapMut<'obj> {
121 pub fn new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self {
123 Self {
124 ptr: unsafe { NonNull::new_unchecked(object as *mut _) },
125 _phantom: PhantomData,
126 }
127 }
128
129 pub fn initial_value_mut(&mut self) -> Option<&mut [u8]> {
131 let (ptr, size) = self.initial_value_raw();
132 if ptr.is_null() {
133 None
134 } else {
135 let data = unsafe { slice::from_raw_parts_mut(ptr.cast::<u8>(), size) };
136 Some(data)
137 }
138 }
139
140 pub fn set_map_ifindex(&mut self, idx: u32) {
144 unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) };
145 }
146
147 pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> {
149 let ret = unsafe {
150 libbpf_sys::bpf_map__set_initial_value(
151 self.ptr.as_ptr(),
152 data.as_ptr() as *const c_void,
153 data.len() as libbpf_sys::size_t,
154 )
155 };
156
157 util::parse_ret(ret)
158 }
159
160 pub fn set_type(&mut self, ty: MapType) -> Result<()> {
162 let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) };
163 util::parse_ret(ret)
164 }
165
166 pub fn set_key_size(&mut self, size: u32) -> Result<()> {
168 let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) };
169 util::parse_ret(ret)
170 }
171
172 pub fn set_value_size(&mut self, size: u32) -> Result<()> {
174 let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) };
175 util::parse_ret(ret)
176 }
177
178 pub fn set_max_entries(&mut self, count: u32) -> Result<()> {
180 let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) };
181 util::parse_ret(ret)
182 }
183
184 pub fn set_map_flags(&mut self, flags: u32) -> Result<()> {
186 let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) };
187 util::parse_ret(ret)
188 }
189
190 pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> {
195 let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) };
196 util::parse_ret(ret)
197 }
198
199 pub fn set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()> {
204 let ret = unsafe {
205 libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner_map_fd.as_raw_fd())
206 };
207 util::parse_ret(ret)
208 }
209
210 pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> {
219 let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) };
220 util::parse_ret(ret)
221 }
222
223 pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> {
225 let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) };
226 util::parse_ret(ret)
227 }
228
229 pub fn set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
233 let path_c = util::path_to_cstring(path)?;
234 let path_ptr = path_c.as_ptr();
235
236 let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) };
237 util::parse_ret(ret)
238 }
239
240 pub fn reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()> {
242 let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd.as_raw_fd()) };
243 util::parse_ret(ret)
244 }
245
246 pub fn reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
248 let cstring = util::path_to_cstring(path)?;
249
250 let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) };
251 if fd < 0 {
252 return Err(Error::from(io::Error::last_os_error()));
253 }
254
255 let fd = unsafe { OwnedFd::from_raw_fd(fd) };
256
257 let reuse_result = self.reuse_fd(fd.as_fd());
258
259 reuse_result
260 }
261}
262
263impl<'obj> Deref for OpenMapMut<'obj> {
264 type Target = OpenMap<'obj>;
265
266 fn deref(&self) -> &Self::Target {
267 unsafe { transmute::<&OpenMapMut<'obj>, &OpenMap<'obj>>(self) }
270 }
271}
272
273impl<T> AsRawLibbpf for OpenMapImpl<'_, T> {
274 type LibbpfType = libbpf_sys::bpf_map;
275
276 fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
278 self.ptr
279 }
280}
281
282pub(crate) fn map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd> {
283 let fd = unsafe { libbpf_sys::bpf_map__fd(map.as_ptr()) };
284 let fd = util::parse_ret_i32(fd).ok();
285 fd
286}
287
288fn percpu_aligned_value_size<M>(map: &M) -> usize
291where
292 M: MapCore + ?Sized,
293{
294 let val_size = map.value_size() as usize;
295 util::roundup(val_size, 8)
296}
297
298fn percpu_buffer_size<M>(map: &M) -> Result<usize>
300where
301 M: MapCore + ?Sized,
302{
303 let aligned_val_size = percpu_aligned_value_size(map);
304 let ncpu = crate::num_possible_cpus()?;
305 Ok(ncpu * aligned_val_size)
306}
307
308fn map_key<M>(map: &M, key: &[u8]) -> *const c_void
312where
313 M: MapCore + ?Sized,
314{
315 if map.key_size() == 0 && map.map_type().is_keyless() {
317 return ptr::null();
318 }
319
320 key.as_ptr() as *const c_void
321}
322
323fn lookup_raw<M>(
326 map: &M,
327 key: &[u8],
328 value: &mut [mem::MaybeUninit<u8>],
329 flags: MapFlags,
330) -> Result<bool>
331where
332 M: MapCore + ?Sized,
333{
334 if key.len() != map.key_size() as usize {
335 return Err(Error::with_invalid_data(format!(
336 "key_size {} != {}",
337 key.len(),
338 map.key_size()
339 )));
340 }
341
342 debug_assert_eq!(
344 value.len(),
345 if map.map_type().is_percpu() {
346 percpu_buffer_size(map).unwrap()
347 } else {
348 map.value_size() as usize
349 }
350 );
351
352 let ret = unsafe {
353 libbpf_sys::bpf_map_lookup_elem_flags(
354 map.as_fd().as_raw_fd(),
355 map_key(map, key),
356 value.as_mut_ptr().cast(),
358 flags.bits(),
359 )
360 };
361
362 if ret == 0 {
363 Ok(true)
364 } else {
365 let err = io::Error::last_os_error();
366 if err.kind() == io::ErrorKind::NotFound {
367 Ok(false)
368 } else {
369 Err(Error::from(err))
370 }
371 }
372}
373
374fn lookup_raw_vec<M>(
376 map: &M,
377 key: &[u8],
378 flags: MapFlags,
379 out_size: usize,
380) -> Result<Option<Vec<u8>>>
381where
382 M: MapCore + ?Sized,
383{
384 let mut out = Vec::with_capacity(out_size);
386
387 match lookup_raw(map, key, out.spare_capacity_mut(), flags)? {
388 true => {
389 unsafe {
391 out.set_len(out_size);
392 }
393 Ok(Some(out))
394 }
395 false => Ok(None),
396 }
397}
398
399fn update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>
402where
403 M: MapCore + ?Sized,
404{
405 if key.len() != map.key_size() as usize {
406 return Err(Error::with_invalid_data(format!(
407 "key_size {} != {}",
408 key.len(),
409 map.key_size()
410 )));
411 };
412
413 let ret = unsafe {
414 libbpf_sys::bpf_map_update_elem(
415 map.as_fd().as_raw_fd(),
416 map_key(map, key),
417 value.as_ptr() as *const c_void,
418 flags.bits(),
419 )
420 };
421
422 util::parse_ret(ret)
423}
424
425fn lookup_batch_raw<M>(
427 map: &M,
428 count: u32,
429 elem_flags: MapFlags,
430 flags: MapFlags,
431 delete: bool,
432) -> BatchedMapIter<'_>
433where
434 M: MapCore + ?Sized,
435{
436 #[allow(clippy::needless_update)]
437 let opts = libbpf_sys::bpf_map_batch_opts {
438 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
439 elem_flags: elem_flags.bits(),
440 flags: flags.bits(),
441 ..Default::default()
443 };
444
445 let key_size = if map.map_type().is_hash_map() {
448 map.key_size().max(4)
449 } else {
450 map.key_size()
451 };
452
453 BatchedMapIter::new(map.as_fd(), count, key_size, map.value_size(), opts, delete)
454}
455
456fn check_not_bloom_or_percpu<M>(map: &M) -> Result<()>
458where
459 M: MapCore + ?Sized,
460{
461 if map.map_type().is_bloom_filter() {
462 return Err(Error::with_invalid_data(
463 "lookup_bloom_filter() must be used for bloom filter maps",
464 ));
465 }
466 if map.map_type().is_percpu() {
467 return Err(Error::with_invalid_data(format!(
468 "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})",
469 map.map_type(),
470 )));
471 }
472
473 Ok(())
474}
475
476#[allow(clippy::wildcard_imports)]
477mod private {
478 use super::*;
479
480 pub trait Sealed {}
481
482 impl<T> Sealed for MapImpl<'_, T> {}
483 impl Sealed for MapHandle {}
484}
485
486pub trait MapCore: Debug + AsFd + private::Sealed {
488 fn name(&self) -> &OsStr;
490
491 fn map_type(&self) -> MapType;
493
494 fn key_size(&self) -> u32;
496
497 fn value_size(&self) -> u32;
499
500 fn max_entries(&self) -> u32;
502
503 #[inline]
505 fn info(&self) -> Result<MapInfo> {
506 MapInfo::new(self.as_fd())
507 }
508
509 #[inline]
514 fn query_fdinfo(&self) -> Result<MapFdInfo> {
515 MapFdInfo::from_fd(self.as_fd())
516 }
517
518 fn keys(&self) -> MapKeyIter<'_> {
524 MapKeyIter::new(self.as_fd(), self.key_size())
525 }
526
527 fn lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>> {
536 check_not_bloom_or_percpu(self)?;
537 let out_size = self.value_size() as usize;
538 lookup_raw_vec(self, key, flags, out_size)
539 }
540
541 fn lookup_into(&self, key: &[u8], value: &mut [u8], flags: MapFlags) -> Result<bool> {
554 check_not_bloom_or_percpu(self)?;
555
556 if value.len() != self.value_size() as usize {
557 return Err(Error::with_invalid_data(format!(
558 "value buffer size {} != {}",
559 value.len(),
560 self.value_size()
561 )));
562 }
563
564 let value = unsafe {
566 slice::from_raw_parts_mut::<mem::MaybeUninit<u8>>(
567 value.as_mut_ptr().cast(),
568 value.len(),
569 )
570 };
571 lookup_raw(self, key, value, flags)
572 }
573
574 fn lookup_batch(
578 &self,
579 count: u32,
580 elem_flags: MapFlags,
581 flags: MapFlags,
582 ) -> Result<BatchedMapIter<'_>> {
583 check_not_bloom_or_percpu(self)?;
584 Ok(lookup_batch_raw(self, count, elem_flags, flags, false))
585 }
586
587 fn lookup_and_delete_batch(
591 &self,
592 count: u32,
593 elem_flags: MapFlags,
594 flags: MapFlags,
595 ) -> Result<BatchedMapIter<'_>> {
596 check_not_bloom_or_percpu(self)?;
597 Ok(lookup_batch_raw(self, count, elem_flags, flags, true))
598 }
599
600 fn lookup_bloom_filter(&self, value: &[u8]) -> Result<bool> {
604 let ret = unsafe {
605 libbpf_sys::bpf_map_lookup_elem(
606 self.as_fd().as_raw_fd(),
607 ptr::null(),
608 value.to_vec().as_mut_ptr() as *mut c_void,
609 )
610 };
611
612 if ret == 0 {
613 Ok(true)
614 } else {
615 let err = io::Error::last_os_error();
616 if err.kind() == io::ErrorKind::NotFound {
617 Ok(false)
618 } else {
619 Err(Error::from(err))
620 }
621 }
622 }
623
624 fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>> {
628 if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
629 return Err(Error::with_invalid_data(format!(
630 "lookup() must be used for maps that are not per-cpu (type of the map is {:?})",
631 self.map_type(),
632 )));
633 }
634
635 let val_size = self.value_size() as usize;
636 let aligned_val_size = percpu_aligned_value_size(self);
637 let out_size = percpu_buffer_size(self)?;
638
639 let raw_res = lookup_raw_vec(self, key, flags, out_size)?;
640 if let Some(raw_vals) = raw_res {
641 let mut out = Vec::new();
642 for chunk in raw_vals.chunks_exact(aligned_val_size) {
643 out.push(chunk[..val_size].to_vec());
644 }
645 Ok(Some(out))
646 } else {
647 Ok(None)
648 }
649 }
650
651 fn delete(&self, key: &[u8]) -> Result<()> {
655 if key.len() != self.key_size() as usize {
656 return Err(Error::with_invalid_data(format!(
657 "key_size {} != {}",
658 key.len(),
659 self.key_size()
660 )));
661 };
662
663 let ret = unsafe {
664 libbpf_sys::bpf_map_delete_elem(self.as_fd().as_raw_fd(), key.as_ptr() as *const c_void)
665 };
666 util::parse_ret(ret)
667 }
668
669 fn delete_batch(
673 &self,
674 keys: &[u8],
675 count: u32,
676 elem_flags: MapFlags,
677 flags: MapFlags,
678 ) -> Result<()> {
679 if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
680 return Err(Error::with_invalid_data(format!(
681 "batch key_size {} != {} * {}",
682 keys.len(),
683 self.key_size(),
684 count
685 )));
686 };
687
688 #[allow(clippy::needless_update)]
689 let opts = libbpf_sys::bpf_map_batch_opts {
690 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
691 elem_flags: elem_flags.bits(),
692 flags: flags.bits(),
693 ..Default::default()
695 };
696
697 let mut count = count;
698 let ret = unsafe {
699 libbpf_sys::bpf_map_delete_batch(
700 self.as_fd().as_raw_fd(),
701 keys.as_ptr() as *const c_void,
702 &mut count,
703 &opts as *const libbpf_sys::bpf_map_batch_opts,
704 )
705 };
706 util::parse_ret(ret)
707 }
708
709 fn lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
716 if key.len() != self.key_size() as usize {
717 return Err(Error::with_invalid_data(format!(
718 "key_size {} != {}",
719 key.len(),
720 self.key_size()
721 )));
722 };
723
724 let mut out: Vec<u8> = Vec::with_capacity(self.value_size() as usize);
725
726 let ret = unsafe {
727 libbpf_sys::bpf_map_lookup_and_delete_elem(
728 self.as_fd().as_raw_fd(),
729 map_key(self, key),
730 out.as_mut_ptr() as *mut c_void,
731 )
732 };
733
734 if ret == 0 {
735 unsafe {
736 out.set_len(self.value_size() as usize);
737 }
738 Ok(Some(out))
739 } else {
740 let err = io::Error::last_os_error();
741 if err.kind() == io::ErrorKind::NotFound {
742 Ok(None)
743 } else {
744 Err(Error::from(err))
745 }
746 }
747 }
748
749 fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
756 if self.map_type().is_percpu() {
757 return Err(Error::with_invalid_data(format!(
758 "update_percpu() must be used for per-cpu maps (type of the map is {:?})",
759 self.map_type(),
760 )));
761 }
762
763 if value.len() != self.value_size() as usize {
764 return Err(Error::with_invalid_data(format!(
765 "value_size {} != {}",
766 value.len(),
767 self.value_size()
768 )));
769 };
770
771 update_raw(self, key, value, flags)
772 }
773
774 fn update_batch(
779 &self,
780 keys: &[u8],
781 values: &[u8],
782 count: u32,
783 elem_flags: MapFlags,
784 flags: MapFlags,
785 ) -> Result<()> {
786 if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
787 return Err(Error::with_invalid_data(format!(
788 "batch key_size {} != {} * {}",
789 keys.len(),
790 self.key_size(),
791 count
792 )));
793 };
794
795 if values.len() as u32 / count != self.value_size() || (values.len() as u32) % count != 0 {
796 return Err(Error::with_invalid_data(format!(
797 "batch value_size {} != {} * {}",
798 values.len(),
799 self.value_size(),
800 count
801 )));
802 }
803
804 #[allow(clippy::needless_update)]
805 let opts = libbpf_sys::bpf_map_batch_opts {
806 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
807 elem_flags: elem_flags.bits(),
808 flags: flags.bits(),
809 ..Default::default()
811 };
812
813 let mut count = count;
814 let ret = unsafe {
815 libbpf_sys::bpf_map_update_batch(
816 self.as_fd().as_raw_fd(),
817 keys.as_ptr() as *const c_void,
818 values.as_ptr() as *const c_void,
819 &mut count,
820 &opts as *const libbpf_sys::bpf_map_batch_opts,
821 )
822 };
823
824 util::parse_ret(ret)
825 }
826
827 fn update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()> {
835 if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
836 return Err(Error::with_invalid_data(format!(
837 "update() must be used for maps that are not per-cpu (type of the map is {:?})",
838 self.map_type(),
839 )));
840 }
841
842 if values.len() != crate::num_possible_cpus()? {
843 return Err(Error::with_invalid_data(format!(
844 "number of values {} != number of cpus {}",
845 values.len(),
846 crate::num_possible_cpus()?
847 )));
848 };
849
850 let val_size = self.value_size() as usize;
851 let aligned_val_size = percpu_aligned_value_size(self);
852 let buf_size = percpu_buffer_size(self)?;
853
854 let mut value_buf = vec![0; buf_size];
855
856 for (i, val) in values.iter().enumerate() {
857 if val.len() != val_size {
858 return Err(Error::with_invalid_data(format!(
859 "value size for cpu {} is {} != {}",
860 i,
861 val.len(),
862 val_size
863 )));
864 }
865
866 value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)]
867 .copy_from_slice(val);
868 }
869
870 update_raw(self, key, &value_buf, flags)
871 }
872}
873
874pub type Map<'obj> = MapImpl<'obj>;
876pub type MapMut<'obj> = MapImpl<'obj, Mut>;
878
879#[derive(Debug)]
884pub struct MapImpl<'obj, T = ()> {
885 ptr: NonNull<libbpf_sys::bpf_map>,
886 _phantom: PhantomData<&'obj T>,
887}
888
889impl<'obj> Map<'obj> {
890 pub fn new(map: &'obj libbpf_sys::bpf_map) -> Self {
892 let ptr = unsafe { NonNull::new_unchecked(map as *const _ as *mut _) };
895 assert!(
896 map_fd(ptr).is_some(),
897 "provided BPF map does not have file descriptor"
898 );
899
900 Self {
901 ptr,
902 _phantom: PhantomData,
903 }
904 }
905
906 #[doc(hidden)]
916 pub unsafe fn from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self {
917 Self {
918 ptr,
919 _phantom: PhantomData,
920 }
921 }
922
923 pub fn is_pinned(&self) -> bool {
925 unsafe { libbpf_sys::bpf_map__is_pinned(self.ptr.as_ptr()) }
926 }
927
928 pub fn get_pin_path(&self) -> Option<&OsStr> {
931 let path_ptr = unsafe { libbpf_sys::bpf_map__pin_path(self.ptr.as_ptr()) };
932 if path_ptr.is_null() {
933 return None;
935 }
936 let path_c_str = unsafe { CStr::from_ptr(path_ptr) };
937 Some(OsStr::from_bytes(path_c_str.to_bytes()))
938 }
939
940 pub fn autocreate(&self) -> bool {
942 unsafe { libbpf_sys::bpf_map__autocreate(self.ptr.as_ptr()) }
943 }
944}
945
946impl<'obj> MapMut<'obj> {
947 pub fn new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self {
949 let ptr = unsafe { NonNull::new_unchecked(map as *mut _) };
952 assert!(
953 map_fd(ptr).is_some(),
954 "provided BPF map does not have file descriptor"
955 );
956
957 Self {
958 ptr,
959 _phantom: PhantomData,
960 }
961 }
962
963 pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
966 let path_c = util::path_to_cstring(path)?;
967 let path_ptr = path_c.as_ptr();
968
969 let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr.as_ptr(), path_ptr) };
970 util::parse_ret(ret)
971 }
972
973 pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
976 let path_c = util::path_to_cstring(path)?;
977 let path_ptr = path_c.as_ptr();
978 let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr.as_ptr(), path_ptr) };
979 util::parse_ret(ret)
980 }
981
982 pub fn attach_struct_ops(&mut self) -> Result<Link> {
984 if self.map_type() != MapType::StructOps {
985 return Err(Error::with_invalid_data(format!(
986 "Invalid map type ({:?}) for attach_struct_ops()",
987 self.map_type(),
988 )));
989 }
990
991 let ptr = unsafe { libbpf_sys::bpf_map__attach_struct_ops(self.ptr.as_ptr()) };
992 let ptr = validate_bpf_ret(ptr).context("failed to attach struct_ops")?;
993 let link = unsafe { Link::new(ptr) };
995 Ok(link)
996 }
997}
998
999impl<'obj> Deref for MapMut<'obj> {
1000 type Target = Map<'obj>;
1001
1002 fn deref(&self) -> &Self::Target {
1003 unsafe { transmute::<&MapMut<'obj>, &Map<'obj>>(self) }
1004 }
1005}
1006
1007impl<T> AsFd for MapImpl<'_, T> {
1008 #[inline]
1009 fn as_fd(&self) -> BorrowedFd<'_> {
1010 let fd = map_fd(self.ptr).unwrap();
1013 let fd = unsafe { BorrowedFd::borrow_raw(fd) };
1016 fd
1017 }
1018}
1019
1020impl<T> MapCore for MapImpl<'_, T>
1021where
1022 T: Debug,
1023{
1024 fn name(&self) -> &OsStr {
1025 let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
1027 let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
1030 OsStr::from_bytes(name_c_str.to_bytes())
1031 }
1032
1033 #[inline]
1034 fn map_type(&self) -> MapType {
1035 let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
1036 MapType::from(ty)
1037 }
1038
1039 #[inline]
1040 fn key_size(&self) -> u32 {
1041 unsafe { libbpf_sys::bpf_map__key_size(self.ptr.as_ptr()) }
1042 }
1043
1044 #[inline]
1045 fn value_size(&self) -> u32 {
1046 unsafe { libbpf_sys::bpf_map__value_size(self.ptr.as_ptr()) }
1047 }
1048
1049 #[inline]
1050 fn max_entries(&self) -> u32 {
1051 unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
1052 }
1053}
1054
1055impl AsRawLibbpf for Map<'_> {
1056 type LibbpfType = libbpf_sys::bpf_map;
1057
1058 #[inline]
1060 fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
1061 self.ptr
1062 }
1063}
1064
1065#[derive(Debug)]
1080pub struct MapHandle {
1081 fd: OwnedFd,
1082 name: OsString,
1083 ty: MapType,
1084 key_size: u32,
1085 value_size: u32,
1086 max_entries: u32,
1087}
1088
1089impl MapHandle {
1090 pub fn create<T: AsRef<OsStr>>(
1092 map_type: MapType,
1093 name: Option<T>,
1094 key_size: u32,
1095 value_size: u32,
1096 max_entries: u32,
1097 opts: &libbpf_sys::bpf_map_create_opts,
1098 ) -> Result<Self> {
1099 let name = match name {
1100 Some(name) => name.as_ref().to_os_string(),
1101 None => OsString::new(),
1103 };
1104 let name_c_str = CString::new(name.as_bytes()).map_err(|_| {
1105 Error::with_invalid_data(format!("invalid name `{name:?}`: has NUL bytes"))
1106 })?;
1107 let name_c_ptr = if name.is_empty() {
1108 ptr::null()
1109 } else {
1110 name_c_str.as_bytes_with_nul().as_ptr()
1111 };
1112
1113 let fd = unsafe {
1114 libbpf_sys::bpf_map_create(
1115 map_type.into(),
1116 name_c_ptr.cast(),
1117 key_size,
1118 value_size,
1119 max_entries,
1120 opts,
1121 )
1122 };
1123 let () = util::parse_ret(fd)?;
1124
1125 Ok(Self {
1126 fd: unsafe { OwnedFd::from_raw_fd(fd) },
1130 name,
1131 ty: map_type,
1132 key_size,
1133 value_size,
1134 max_entries,
1135 })
1136 }
1137
1138 pub fn from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self> {
1143 fn inner(path: &Path) -> Result<MapHandle> {
1144 let p = CString::new(path.as_os_str().as_bytes()).expect("path contained null bytes");
1145 let fd = parse_ret_i32(unsafe {
1146 libbpf_sys::bpf_obj_get(p.as_ptr())
1149 })?;
1150 MapHandle::from_fd(unsafe {
1151 OwnedFd::from_raw_fd(fd)
1155 })
1156 }
1157
1158 inner(path.as_ref())
1159 }
1160
1161 pub fn from_map_id(id: u32) -> Result<Self> {
1163 parse_ret_i32(unsafe {
1164 libbpf_sys::bpf_map_get_fd_by_id(id)
1167 })
1168 .map(|fd| unsafe {
1169 OwnedFd::from_raw_fd(fd)
1173 })
1174 .and_then(Self::from_fd)
1175 }
1176
1177 fn from_fd(fd: OwnedFd) -> Result<Self> {
1178 let info = MapInfo::new(fd.as_fd())?;
1179 Ok(Self {
1180 fd,
1181 name: info.name()?.into(),
1182 ty: info.map_type(),
1183 key_size: info.info.key_size,
1184 value_size: info.info.value_size,
1185 max_entries: info.info.max_entries,
1186 })
1187 }
1188
1189 pub fn freeze(&self) -> Result<()> {
1196 let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd.as_raw_fd()) };
1197
1198 util::parse_ret(ret)
1199 }
1200
1201 pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1204 let path_c = util::path_to_cstring(path)?;
1205 let path_ptr = path_c.as_ptr();
1206
1207 let ret = unsafe { libbpf_sys::bpf_obj_pin(self.fd.as_raw_fd(), path_ptr) };
1208 util::parse_ret(ret)
1209 }
1210
1211 pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1214 remove_file(path).context("failed to remove pin map")
1215 }
1216}
1217
1218impl MapCore for MapHandle {
1219 #[inline]
1220 fn name(&self) -> &OsStr {
1221 &self.name
1222 }
1223
1224 #[inline]
1225 fn map_type(&self) -> MapType {
1226 self.ty
1227 }
1228
1229 #[inline]
1230 fn key_size(&self) -> u32 {
1231 self.key_size
1232 }
1233
1234 #[inline]
1235 fn value_size(&self) -> u32 {
1236 self.value_size
1237 }
1238
1239 #[inline]
1240 fn max_entries(&self) -> u32 {
1241 self.max_entries
1242 }
1243}
1244
1245impl AsFd for MapHandle {
1246 #[inline]
1247 fn as_fd(&self) -> BorrowedFd<'_> {
1248 self.fd.as_fd()
1249 }
1250}
1251
1252impl<T> TryFrom<&MapImpl<'_, T>> for MapHandle
1253where
1254 T: Debug,
1255{
1256 type Error = Error;
1257
1258 fn try_from(other: &MapImpl<'_, T>) -> Result<Self> {
1259 Ok(Self {
1260 fd: other
1261 .as_fd()
1262 .try_clone_to_owned()
1263 .context("failed to duplicate map file descriptor")?,
1264 name: other.name().to_os_string(),
1265 ty: other.map_type(),
1266 key_size: other.key_size(),
1267 value_size: other.value_size(),
1268 max_entries: other.max_entries(),
1269 })
1270 }
1271}
1272
1273impl TryFrom<&Self> for MapHandle {
1274 type Error = Error;
1275
1276 fn try_from(other: &Self) -> Result<Self> {
1277 Ok(Self {
1278 fd: other
1279 .as_fd()
1280 .try_clone_to_owned()
1281 .context("failed to duplicate map file descriptor")?,
1282 name: other.name().to_os_string(),
1283 ty: other.map_type(),
1284 key_size: other.key_size(),
1285 value_size: other.value_size(),
1286 max_entries: other.max_entries(),
1287 })
1288 }
1289}
1290
1291bitflags! {
1292 #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
1294 pub struct MapFlags: u64 {
1295 const ANY = libbpf_sys::BPF_ANY as _;
1297 const NO_EXIST = libbpf_sys::BPF_NOEXIST as _;
1299 const EXIST = libbpf_sys::BPF_EXIST as _;
1301 const LOCK = libbpf_sys::BPF_F_LOCK as _;
1303 }
1304}
1305
1306#[non_exhaustive]
1309#[repr(u32)]
1310#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1311pub enum MapType {
1312 Unspec = libbpf_sys::BPF_MAP_TYPE_UNSPEC,
1314 Hash = libbpf_sys::BPF_MAP_TYPE_HASH,
1318 Array = libbpf_sys::BPF_MAP_TYPE_ARRAY,
1322 ProgArray = libbpf_sys::BPF_MAP_TYPE_PROG_ARRAY,
1327 PerfEventArray = libbpf_sys::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1331 PercpuHash = libbpf_sys::BPF_MAP_TYPE_PERCPU_HASH,
1335 PercpuArray = libbpf_sys::BPF_MAP_TYPE_PERCPU_ARRAY,
1339 #[allow(missing_docs)]
1340 StackTrace = libbpf_sys::BPF_MAP_TYPE_STACK_TRACE,
1341 #[allow(missing_docs)]
1342 CgroupArray = libbpf_sys::BPF_MAP_TYPE_CGROUP_ARRAY,
1343 LruHash = libbpf_sys::BPF_MAP_TYPE_LRU_HASH,
1347 LruPercpuHash = libbpf_sys::BPF_MAP_TYPE_LRU_PERCPU_HASH,
1351 LpmTrie = libbpf_sys::BPF_MAP_TYPE_LPM_TRIE,
1355 ArrayOfMaps = libbpf_sys::BPF_MAP_TYPE_ARRAY_OF_MAPS,
1361 HashOfMaps = libbpf_sys::BPF_MAP_TYPE_HASH_OF_MAPS,
1367 Devmap = libbpf_sys::BPF_MAP_TYPE_DEVMAP,
1372 Sockmap = libbpf_sys::BPF_MAP_TYPE_SOCKMAP,
1376 Cpumap = libbpf_sys::BPF_MAP_TYPE_CPUMAP,
1380 Xskmap = libbpf_sys::BPF_MAP_TYPE_XSKMAP,
1386 Sockhash = libbpf_sys::BPF_MAP_TYPE_SOCKHASH,
1390 CgroupStorage = libbpf_sys::BPF_MAP_TYPE_CGROUP_STORAGE,
1396 CGrpStorage = libbpf_sys::BPF_MAP_TYPE_CGRP_STORAGE,
1401 ReuseportSockarray = libbpf_sys::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
1405 PercpuCgroupStorage = libbpf_sys::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
1409 Queue = libbpf_sys::BPF_MAP_TYPE_QUEUE,
1413 Stack = libbpf_sys::BPF_MAP_TYPE_STACK,
1417 SkStorage = libbpf_sys::BPF_MAP_TYPE_SK_STORAGE,
1421 DevmapHash = libbpf_sys::BPF_MAP_TYPE_DEVMAP_HASH,
1426 StructOps = libbpf_sys::BPF_MAP_TYPE_STRUCT_OPS,
1431 RingBuf = libbpf_sys::BPF_MAP_TYPE_RINGBUF,
1435 InodeStorage = libbpf_sys::BPF_MAP_TYPE_INODE_STORAGE,
1439 TaskStorage = libbpf_sys::BPF_MAP_TYPE_TASK_STORAGE,
1443 BloomFilter = libbpf_sys::BPF_MAP_TYPE_BLOOM_FILTER,
1449 #[allow(missing_docs)]
1450 UserRingBuf = libbpf_sys::BPF_MAP_TYPE_USER_RINGBUF,
1451 Unknown = u32::MAX,
1455}
1456
1457impl MapType {
1458 pub fn is_percpu(&self) -> bool {
1460 matches!(
1461 self,
1462 Self::PercpuArray | Self::PercpuHash | Self::LruPercpuHash | Self::PercpuCgroupStorage
1463 )
1464 }
1465
1466 pub fn is_hash_map(&self) -> bool {
1468 matches!(
1469 self,
1470 Self::Hash | Self::PercpuHash | Self::LruHash | Self::LruPercpuHash
1471 )
1472 }
1473
1474 fn is_keyless(&self) -> bool {
1477 matches!(self, Self::Queue | Self::Stack | Self::BloomFilter)
1478 }
1479
1480 pub fn is_bloom_filter(&self) -> bool {
1482 Self::BloomFilter.eq(self)
1483 }
1484
1485 pub fn is_supported(&self) -> Result<bool> {
1490 let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, ptr::null()) };
1491 match ret {
1492 0 => Ok(false),
1493 1 => Ok(true),
1494 _ => Err(Error::from_raw_os_error(-ret)),
1495 }
1496 }
1497}
1498
1499impl From<u32> for MapType {
1500 fn from(value: u32) -> Self {
1501 use MapType::*;
1502
1503 match value {
1504 x if x == Unspec as u32 => Unspec,
1505 x if x == Hash as u32 => Hash,
1506 x if x == Array as u32 => Array,
1507 x if x == ProgArray as u32 => ProgArray,
1508 x if x == PerfEventArray as u32 => PerfEventArray,
1509 x if x == PercpuHash as u32 => PercpuHash,
1510 x if x == PercpuArray as u32 => PercpuArray,
1511 x if x == StackTrace as u32 => StackTrace,
1512 x if x == CgroupArray as u32 => CgroupArray,
1513 x if x == LruHash as u32 => LruHash,
1514 x if x == LruPercpuHash as u32 => LruPercpuHash,
1515 x if x == LpmTrie as u32 => LpmTrie,
1516 x if x == ArrayOfMaps as u32 => ArrayOfMaps,
1517 x if x == HashOfMaps as u32 => HashOfMaps,
1518 x if x == Devmap as u32 => Devmap,
1519 x if x == Sockmap as u32 => Sockmap,
1520 x if x == Cpumap as u32 => Cpumap,
1521 x if x == Xskmap as u32 => Xskmap,
1522 x if x == Sockhash as u32 => Sockhash,
1523 x if x == CgroupStorage as u32 => CgroupStorage,
1524 x if x == ReuseportSockarray as u32 => ReuseportSockarray,
1525 x if x == PercpuCgroupStorage as u32 => PercpuCgroupStorage,
1526 x if x == Queue as u32 => Queue,
1527 x if x == Stack as u32 => Stack,
1528 x if x == SkStorage as u32 => SkStorage,
1529 x if x == DevmapHash as u32 => DevmapHash,
1530 x if x == StructOps as u32 => StructOps,
1531 x if x == RingBuf as u32 => RingBuf,
1532 x if x == InodeStorage as u32 => InodeStorage,
1533 x if x == TaskStorage as u32 => TaskStorage,
1534 x if x == BloomFilter as u32 => BloomFilter,
1535 x if x == UserRingBuf as u32 => UserRingBuf,
1536 _ => Unknown,
1537 }
1538 }
1539}
1540
1541impl From<MapType> for u32 {
1542 fn from(value: MapType) -> Self {
1543 value as Self
1544 }
1545}
1546
1547#[derive(Debug)]
1549pub struct MapKeyIter<'map> {
1550 map_fd: BorrowedFd<'map>,
1551 prev: Option<Vec<u8>>,
1552 next: Vec<u8>,
1553}
1554
1555impl<'map> MapKeyIter<'map> {
1556 fn new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self {
1557 Self {
1558 map_fd,
1559 prev: None,
1560 next: vec![0; key_size as usize],
1561 }
1562 }
1563}
1564
1565impl Iterator for MapKeyIter<'_> {
1566 type Item = Vec<u8>;
1567
1568 fn next(&mut self) -> Option<Self::Item> {
1569 let prev = self.prev.as_ref().map_or(ptr::null(), Vec::as_ptr);
1570
1571 let ret = unsafe {
1572 libbpf_sys::bpf_map_get_next_key(
1573 self.map_fd.as_raw_fd(),
1574 prev as _,
1575 self.next.as_mut_ptr() as _,
1576 )
1577 };
1578 if ret != 0 {
1579 None
1580 } else {
1581 self.prev = Some(self.next.clone());
1582 Some(self.next.clone())
1583 }
1584 }
1585}
1586
1587#[derive(Debug)]
1589pub struct BatchedMapIter<'map> {
1590 map_fd: BorrowedFd<'map>,
1591 delete: bool,
1592 count: usize,
1593 key_size: usize,
1594 value_size: usize,
1595 keys: Vec<u8>,
1596 values: Vec<u8>,
1597 prev: Option<Vec<u8>>,
1598 next: Vec<u8>,
1599 batch_opts: libbpf_sys::bpf_map_batch_opts,
1600 index: Option<usize>,
1601}
1602
1603impl<'map> BatchedMapIter<'map> {
1604 fn new(
1605 map_fd: BorrowedFd<'map>,
1606 count: u32,
1607 key_size: u32,
1608 value_size: u32,
1609 batch_opts: libbpf_sys::bpf_map_batch_opts,
1610 delete: bool,
1611 ) -> Self {
1612 Self {
1613 map_fd,
1614 delete,
1615 count: count as usize,
1616 key_size: key_size as usize,
1617 value_size: value_size as usize,
1618 keys: vec![0; (count * key_size) as usize],
1619 values: vec![0; (count * value_size) as usize],
1620 prev: None,
1621 next: vec![0; key_size as usize],
1622 batch_opts,
1623 index: None,
1624 }
1625 }
1626
1627 fn lookup_next_batch(&mut self) {
1628 let prev = self.prev.as_mut().map_or(ptr::null_mut(), Vec::as_mut_ptr);
1629 let mut count = self.count as u32;
1630
1631 let ret = unsafe {
1632 let lookup_fn = if self.delete {
1633 libbpf_sys::bpf_map_lookup_and_delete_batch
1634 } else {
1635 libbpf_sys::bpf_map_lookup_batch
1636 };
1637 lookup_fn(
1638 self.map_fd.as_raw_fd(),
1639 prev.cast(),
1640 self.next.as_mut_ptr().cast(),
1641 self.keys.as_mut_ptr().cast(),
1642 self.values.as_mut_ptr().cast(),
1643 &mut count,
1644 &self.batch_opts,
1645 )
1646 };
1647
1648 if let Err(e) = util::parse_ret(ret) {
1649 match e.kind() {
1650 error::ErrorKind::NotFound => {}
1652 error::ErrorKind::Interrupted => {
1654 return self.lookup_next_batch();
1655 }
1656 _ => {
1657 self.index = None;
1658 return;
1659 }
1660 }
1661 }
1662
1663 self.prev = Some(self.next.clone());
1664 self.index = Some(0);
1665
1666 unsafe {
1667 self.keys.set_len(self.key_size * count as usize);
1668 self.values.set_len(self.value_size * count as usize);
1669 }
1670 }
1671}
1672
1673impl Iterator for BatchedMapIter<'_> {
1674 type Item = (Vec<u8>, Vec<u8>);
1675
1676 fn next(&mut self) -> Option<Self::Item> {
1677 let load_next_batch = match self.index {
1678 Some(index) => {
1679 let batch_finished = index * self.key_size >= self.keys.len();
1680 let last_batch = self.keys.len() < self.key_size * self.count;
1681 batch_finished && !last_batch
1682 }
1683 None => true,
1684 };
1685
1686 if load_next_batch {
1687 self.lookup_next_batch();
1688 }
1689
1690 let index = self.index?;
1691 let key = self.keys.chunks_exact(self.key_size).nth(index)?.to_vec();
1692 let val = self
1693 .values
1694 .chunks_exact(self.value_size)
1695 .nth(index)?
1696 .to_vec();
1697
1698 self.index = Some(index + 1);
1699 Some((key, val))
1700 }
1701}
1702
1703#[derive(Debug)]
1706pub struct MapInfo {
1707 pub info: bpf_map_info,
1709}
1710
1711impl MapInfo {
1712 pub fn new(fd: BorrowedFd<'_>) -> Result<Self> {
1714 let mut map_info = bpf_map_info::default();
1715 let mut size = mem::size_of_val(&map_info) as u32;
1716 let () = util::parse_ret(unsafe {
1718 bpf_obj_get_info_by_fd(
1719 fd.as_raw_fd(),
1720 &mut map_info as *mut bpf_map_info as *mut c_void,
1721 &mut size as *mut u32,
1722 )
1723 })?;
1724 Ok(Self { info: map_info })
1725 }
1726
1727 #[inline]
1729 pub fn map_type(&self) -> MapType {
1730 MapType::from(self.info.type_)
1731 }
1732
1733 pub fn name<'a>(&self) -> Result<&'a str> {
1738 let char_slice =
1740 unsafe { from_raw_parts(self.info.name[..].as_ptr().cast(), self.info.name.len()) };
1741
1742 util::c_char_slice_to_cstr(char_slice)
1743 .ok_or_else(|| Error::with_invalid_data("no nul byte found"))?
1744 .to_str()
1745 .map_err(Error::with_invalid_data)
1746 }
1747
1748 #[inline]
1750 pub fn flags(&self) -> MapFlags {
1751 MapFlags::from_bits_truncate(self.info.map_flags as u64)
1752 }
1753}
1754
1755#[derive(Debug, Clone)]
1766pub struct MapFdInfo {
1767 pub map_type: MapType,
1769 pub key_size: u32,
1771 pub value_size: u32,
1773 pub max_entries: u32,
1775 pub map_flags: Option<u32>,
1779 pub map_extra: Option<u64>,
1781 pub memlock: Option<u64>,
1783 pub map_id: Option<u32>,
1785 pub frozen: Option<bool>,
1787 pub owner_prog_type: Option<ProgramType>,
1789 pub owner_jited: Option<bool>,
1791}
1792
1793impl MapFdInfo {
1794 pub fn from_fd(fd: BorrowedFd<'_>) -> Result<Self> {
1796 let path = format!("/proc/self/fdinfo/{}", fd.as_raw_fd());
1797 let file = File::open(&path).with_context(|| format!("failed to open `{path}`"))?;
1798 let reader = BufReader::new(file);
1799
1800 let parse = |key: &str, val: &str| -> Result<u32> {
1801 val.parse()
1802 .map_err(|e| Error::with_invalid_data(format!("`{key}`: {e}")))
1803 };
1804
1805 let mut map_type = None;
1806 let mut key_size = None;
1807 let mut value_size = None;
1808 let mut max_entries = None;
1809 let mut map_flags = None;
1810 let mut map_extra = None;
1811 let mut memlock = None;
1812 let mut map_id = None;
1813 let mut frozen = None;
1814 let mut owner_prog_type = None;
1815 let mut owner_jited = None;
1816
1817 for result in reader.lines() {
1818 let line = result?;
1819 let Some((key, value)) = line.split_once('\t') else {
1820 continue;
1821 };
1822 let key = key.trim_end_matches(':');
1824 let value = value.trim();
1825
1826 match key {
1827 "map_type" => map_type = Some(parse(key, value)?),
1828 "key_size" => key_size = Some(parse(key, value)?),
1829 "value_size" => value_size = Some(parse(key, value)?),
1830 "max_entries" => max_entries = Some(parse(key, value)?),
1831 "map_flags" => {
1832 map_flags =
1833 Some(parse_hex(value).with_context(|| format!("bad `{key}`"))? as u32)
1834 }
1835 "map_extra" => {
1836 map_extra = Some(parse_hex(value).with_context(|| format!("bad `{key}`"))?)
1837 }
1838 "memlock" => memlock = Some(parse(key, value)? as u64),
1839 "map_id" => map_id = Some(parse(key, value)?),
1840 "frozen" => frozen = Some(parse(key, value)? != 0),
1841 "owner_prog_type" => owner_prog_type = Some(parse(key, value)?),
1842 "owner_jited" => owner_jited = Some(parse(key, value)? != 0),
1843 _ => {}
1844 }
1845 }
1846
1847 let missing = |f| Error::with_invalid_data(format!("missing `{f}` in fdinfo"));
1848
1849 Ok(Self {
1850 map_type: MapType::from(map_type.ok_or_else(|| missing("map_type"))?),
1851 key_size: key_size.ok_or_else(|| missing("key_size"))?,
1852 value_size: value_size.ok_or_else(|| missing("value_size"))?,
1853 max_entries: max_entries.ok_or_else(|| missing("max_entries"))?,
1854 map_flags,
1855 map_extra,
1856 memlock,
1857 map_id,
1858 frozen,
1859 owner_prog_type: owner_prog_type.map(ProgramType::from),
1860 owner_jited,
1861 })
1862 }
1863}
1864
1865fn parse_hex(s: &str) -> Result<u64> {
1867 if let Some(hex) = s.strip_prefix("0x").or_else(|| s.strip_prefix("0X")) {
1868 u64::from_str_radix(hex, 16)
1869 } else {
1870 s.parse()
1871 }
1872 .map_err(Error::with_invalid_data)
1873}
1874
1875#[cfg(test)]
1876mod tests {
1877 use super::*;
1878
1879 use std::mem::discriminant;
1880
1881 #[test]
1882 fn map_type() {
1883 use MapType::*;
1884
1885 for t in [
1886 Unspec,
1887 Hash,
1888 Array,
1889 ProgArray,
1890 PerfEventArray,
1891 PercpuHash,
1892 PercpuArray,
1893 StackTrace,
1894 CgroupArray,
1895 LruHash,
1896 LruPercpuHash,
1897 LpmTrie,
1898 ArrayOfMaps,
1899 HashOfMaps,
1900 Devmap,
1901 Sockmap,
1902 Cpumap,
1903 Xskmap,
1904 Sockhash,
1905 CgroupStorage,
1906 ReuseportSockarray,
1907 PercpuCgroupStorage,
1908 Queue,
1909 Stack,
1910 SkStorage,
1911 DevmapHash,
1912 StructOps,
1913 RingBuf,
1914 InodeStorage,
1915 TaskStorage,
1916 BloomFilter,
1917 UserRingBuf,
1918 Unknown,
1919 ] {
1920 assert_eq!(discriminant(&t), discriminant(&MapType::from(t as u32)));
1922 }
1923 }
1924
1925 #[test]
1926 fn parse_hex_decimal() {
1927 assert_eq!(parse_hex("0").unwrap(), 0);
1928 assert_eq!(parse_hex("42").unwrap(), 42);
1929 assert_eq!(parse_hex("18446744073709551615").unwrap(), u64::MAX);
1930 }
1931
1932 #[test]
1933 fn parse_hex_hex_prefix() {
1934 assert_eq!(parse_hex("0x0").unwrap(), 0);
1935 assert_eq!(parse_hex("0xff").unwrap(), 255);
1936 assert_eq!(parse_hex("0X1A").unwrap(), 26);
1937 assert_eq!(parse_hex("0xdeadbeef").unwrap(), 0xdeadbeef);
1938 }
1939
1940 #[test]
1941 fn parse_hex_invalid() {
1942 assert!(parse_hex("").is_err());
1943 assert!(parse_hex("xyz").is_err());
1944 assert!(parse_hex("0xGG").is_err());
1945 }
1946}