1use core::ffi::c_void;
2use std::ffi::CStr;
3use std::ffi::CString;
4use std::ffi::OsStr;
5use std::ffi::OsString;
6use std::fmt::Debug;
7use std::fs::remove_file;
8use std::io;
9use std::marker::PhantomData;
10use std::mem;
11use std::mem::transmute;
12use std::ops::Deref;
13use std::os::unix::ffi::OsStrExt;
14use std::os::unix::io::AsFd;
15use std::os::unix::io::AsRawFd;
16use std::os::unix::io::BorrowedFd;
17use std::os::unix::io::FromRawFd;
18use std::os::unix::io::OwnedFd;
19use std::os::unix::io::RawFd;
20use std::path::Path;
21use std::ptr;
22use std::ptr::NonNull;
23use std::slice;
24use std::slice::from_raw_parts;
25
26use bitflags::bitflags;
27use libbpf_sys::bpf_map_info;
28use libbpf_sys::bpf_obj_get_info_by_fd;
29
30use crate::error;
31use crate::util;
32use crate::util::parse_ret_i32;
33use crate::util::validate_bpf_ret;
34use crate::AsRawLibbpf;
35use crate::Error;
36use crate::ErrorExt as _;
37use crate::Link;
38use crate::Mut;
39use crate::Result;
40
41pub type OpenMap<'obj> = OpenMapImpl<'obj>;
43pub type OpenMapMut<'obj> = OpenMapImpl<'obj, Mut>;
45
46#[derive(Debug)]
53#[repr(transparent)]
54pub struct OpenMapImpl<'obj, T = ()> {
55 ptr: NonNull<libbpf_sys::bpf_map>,
56 _phantom: PhantomData<&'obj T>,
57}
58
59impl<'obj> OpenMap<'obj> {
60 pub fn new(object: &'obj libbpf_sys::bpf_map) -> Self {
62 Self {
65 ptr: unsafe { NonNull::new_unchecked(object as *const _ as *mut _) },
66 _phantom: PhantomData,
67 }
68 }
69
70 pub fn name(&self) -> &OsStr {
72 let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
74 let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
77 OsStr::from_bytes(name_c_str.to_bytes())
78 }
79
80 pub fn map_type(&self) -> MapType {
82 let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
83 MapType::from(ty)
84 }
85
86 fn initial_value_raw(&self) -> (*mut u8, usize) {
87 let mut size = 0u64;
88 let ptr = unsafe {
89 libbpf_sys::bpf_map__initial_value(self.ptr.as_ptr(), &mut size as *mut _ as _)
90 };
91 (ptr.cast(), size as _)
92 }
93
94 pub fn initial_value(&self) -> Option<&[u8]> {
96 let (ptr, size) = self.initial_value_raw();
97 if ptr.is_null() {
98 None
99 } else {
100 let data = unsafe { slice::from_raw_parts(ptr.cast::<u8>(), size) };
101 Some(data)
102 }
103 }
104
105 pub fn max_entries(&self) -> u32 {
107 unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
108 }
109}
110
111impl<'obj> OpenMapMut<'obj> {
112 pub fn new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self {
114 Self {
115 ptr: unsafe { NonNull::new_unchecked(object as *mut _) },
116 _phantom: PhantomData,
117 }
118 }
119
120 pub fn initial_value_mut(&mut self) -> Option<&mut [u8]> {
122 let (ptr, size) = self.initial_value_raw();
123 if ptr.is_null() {
124 None
125 } else {
126 let data = unsafe { slice::from_raw_parts_mut(ptr.cast::<u8>(), size) };
127 Some(data)
128 }
129 }
130
131 pub fn set_map_ifindex(&mut self, idx: u32) {
135 unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) };
136 }
137
138 pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> {
140 let ret = unsafe {
141 libbpf_sys::bpf_map__set_initial_value(
142 self.ptr.as_ptr(),
143 data.as_ptr() as *const c_void,
144 data.len() as libbpf_sys::size_t,
145 )
146 };
147
148 util::parse_ret(ret)
149 }
150
151 pub fn set_type(&mut self, ty: MapType) -> Result<()> {
153 let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) };
154 util::parse_ret(ret)
155 }
156
157 pub fn set_key_size(&mut self, size: u32) -> Result<()> {
159 let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) };
160 util::parse_ret(ret)
161 }
162
163 pub fn set_value_size(&mut self, size: u32) -> Result<()> {
165 let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) };
166 util::parse_ret(ret)
167 }
168
169 pub fn set_max_entries(&mut self, count: u32) -> Result<()> {
171 let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) };
172 util::parse_ret(ret)
173 }
174
175 pub fn set_map_flags(&mut self, flags: u32) -> Result<()> {
177 let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) };
178 util::parse_ret(ret)
179 }
180
181 pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> {
186 let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) };
187 util::parse_ret(ret)
188 }
189
190 pub fn set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()> {
195 let ret = unsafe {
196 libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner_map_fd.as_raw_fd())
197 };
198 util::parse_ret(ret)
199 }
200
201 pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> {
210 let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) };
211 util::parse_ret(ret)
212 }
213
214 pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> {
216 let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) };
217 util::parse_ret(ret)
218 }
219
220 pub fn set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
224 let path_c = util::path_to_cstring(path)?;
225 let path_ptr = path_c.as_ptr();
226
227 let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) };
228 util::parse_ret(ret)
229 }
230
231 pub fn reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()> {
233 let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd.as_raw_fd()) };
234 util::parse_ret(ret)
235 }
236
237 pub fn reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
239 let cstring = util::path_to_cstring(path)?;
240
241 let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) };
242 if fd < 0 {
243 return Err(Error::from(io::Error::last_os_error()));
244 }
245
246 let fd = unsafe { OwnedFd::from_raw_fd(fd) };
247
248 let reuse_result = self.reuse_fd(fd.as_fd());
249
250 reuse_result
251 }
252}
253
254impl<'obj> Deref for OpenMapMut<'obj> {
255 type Target = OpenMap<'obj>;
256
257 fn deref(&self) -> &Self::Target {
258 unsafe { transmute::<&OpenMapMut<'obj>, &OpenMap<'obj>>(self) }
261 }
262}
263
264impl<T> AsRawLibbpf for OpenMapImpl<'_, T> {
265 type LibbpfType = libbpf_sys::bpf_map;
266
267 fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
269 self.ptr
270 }
271}
272
273pub(crate) fn map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd> {
274 let fd = unsafe { libbpf_sys::bpf_map__fd(map.as_ptr()) };
275 let fd = util::parse_ret_i32(fd).ok();
276 fd
277}
278
279fn percpu_aligned_value_size<M>(map: &M) -> usize
282where
283 M: MapCore + ?Sized,
284{
285 let val_size = map.value_size() as usize;
286 util::roundup(val_size, 8)
287}
288
289fn percpu_buffer_size<M>(map: &M) -> Result<usize>
291where
292 M: MapCore + ?Sized,
293{
294 let aligned_val_size = percpu_aligned_value_size(map);
295 let ncpu = crate::num_possible_cpus()?;
296 Ok(ncpu * aligned_val_size)
297}
298
299fn map_key<M>(map: &M, key: &[u8]) -> *const c_void
303where
304 M: MapCore + ?Sized,
305{
306 if map.key_size() == 0 && map.map_type().is_keyless() {
308 return ptr::null();
309 }
310
311 key.as_ptr() as *const c_void
312}
313
314fn lookup_raw<M>(map: &M, key: &[u8], flags: MapFlags, out_size: usize) -> Result<Option<Vec<u8>>>
316where
317 M: MapCore + ?Sized,
318{
319 if key.len() != map.key_size() as usize {
320 return Err(Error::with_invalid_data(format!(
321 "key_size {} != {}",
322 key.len(),
323 map.key_size()
324 )));
325 };
326
327 let mut out: Vec<u8> = Vec::with_capacity(out_size);
328
329 let ret = unsafe {
330 libbpf_sys::bpf_map_lookup_elem_flags(
331 map.as_fd().as_raw_fd(),
332 map_key(map, key),
333 out.as_mut_ptr() as *mut c_void,
334 flags.bits(),
335 )
336 };
337
338 if ret == 0 {
339 unsafe {
340 out.set_len(out_size);
341 }
342 Ok(Some(out))
343 } else {
344 let err = io::Error::last_os_error();
345 if err.kind() == io::ErrorKind::NotFound {
346 Ok(None)
347 } else {
348 Err(Error::from(err))
349 }
350 }
351}
352
353fn update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>
356where
357 M: MapCore + ?Sized,
358{
359 if key.len() != map.key_size() as usize {
360 return Err(Error::with_invalid_data(format!(
361 "key_size {} != {}",
362 key.len(),
363 map.key_size()
364 )));
365 };
366
367 let ret = unsafe {
368 libbpf_sys::bpf_map_update_elem(
369 map.as_fd().as_raw_fd(),
370 map_key(map, key),
371 value.as_ptr() as *const c_void,
372 flags.bits(),
373 )
374 };
375
376 util::parse_ret(ret)
377}
378
379fn lookup_batch_raw<M>(
381 map: &M,
382 count: u32,
383 elem_flags: MapFlags,
384 flags: MapFlags,
385 delete: bool,
386) -> BatchedMapIter<'_>
387where
388 M: MapCore + ?Sized,
389{
390 #[allow(clippy::needless_update)]
391 let opts = libbpf_sys::bpf_map_batch_opts {
392 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
393 elem_flags: elem_flags.bits(),
394 flags: flags.bits(),
395 ..Default::default()
397 };
398
399 let key_size = if map.map_type().is_hash_map() {
402 map.key_size().max(4)
403 } else {
404 map.key_size()
405 };
406
407 BatchedMapIter::new(map.as_fd(), count, key_size, map.value_size(), opts, delete)
408}
409
410fn check_not_bloom_or_percpu<M>(map: &M) -> Result<()>
412where
413 M: MapCore + ?Sized,
414{
415 if map.map_type().is_bloom_filter() {
416 return Err(Error::with_invalid_data(
417 "lookup_bloom_filter() must be used for bloom filter maps",
418 ));
419 }
420 if map.map_type().is_percpu() {
421 return Err(Error::with_invalid_data(format!(
422 "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})",
423 map.map_type(),
424 )));
425 }
426
427 Ok(())
428}
429
430#[allow(clippy::wildcard_imports)]
431mod private {
432 use super::*;
433
434 pub trait Sealed {}
435
436 impl<T> Sealed for MapImpl<'_, T> {}
437 impl Sealed for MapHandle {}
438}
439
440pub trait MapCore: Debug + AsFd + private::Sealed {
442 fn name(&self) -> &OsStr;
444
445 fn map_type(&self) -> MapType;
447
448 fn key_size(&self) -> u32;
450
451 fn value_size(&self) -> u32;
453
454 fn max_entries(&self) -> u32;
456
457 #[inline]
459 fn info(&self) -> Result<MapInfo> {
460 MapInfo::new(self.as_fd())
461 }
462
463 fn keys(&self) -> MapKeyIter<'_> {
469 MapKeyIter::new(self.as_fd(), self.key_size())
470 }
471
472 fn lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>> {
481 check_not_bloom_or_percpu(self)?;
482 let out_size = self.value_size() as usize;
483 lookup_raw(self, key, flags, out_size)
484 }
485
486 fn lookup_batch(
490 &self,
491 count: u32,
492 elem_flags: MapFlags,
493 flags: MapFlags,
494 ) -> Result<BatchedMapIter<'_>> {
495 check_not_bloom_or_percpu(self)?;
496 Ok(lookup_batch_raw(self, count, elem_flags, flags, false))
497 }
498
499 fn lookup_and_delete_batch(
503 &self,
504 count: u32,
505 elem_flags: MapFlags,
506 flags: MapFlags,
507 ) -> Result<BatchedMapIter<'_>> {
508 check_not_bloom_or_percpu(self)?;
509 Ok(lookup_batch_raw(self, count, elem_flags, flags, true))
510 }
511
512 fn lookup_bloom_filter(&self, value: &[u8]) -> Result<bool> {
516 let ret = unsafe {
517 libbpf_sys::bpf_map_lookup_elem(
518 self.as_fd().as_raw_fd(),
519 ptr::null(),
520 value.to_vec().as_mut_ptr() as *mut c_void,
521 )
522 };
523
524 if ret == 0 {
525 Ok(true)
526 } else {
527 let err = io::Error::last_os_error();
528 if err.kind() == io::ErrorKind::NotFound {
529 Ok(false)
530 } else {
531 Err(Error::from(err))
532 }
533 }
534 }
535
536 fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>> {
540 if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
541 return Err(Error::with_invalid_data(format!(
542 "lookup() must be used for maps that are not per-cpu (type of the map is {:?})",
543 self.map_type(),
544 )));
545 }
546
547 let val_size = self.value_size() as usize;
548 let aligned_val_size = percpu_aligned_value_size(self);
549 let out_size = percpu_buffer_size(self)?;
550
551 let raw_res = lookup_raw(self, key, flags, out_size)?;
552 if let Some(raw_vals) = raw_res {
553 let mut out = Vec::new();
554 for chunk in raw_vals.chunks_exact(aligned_val_size) {
555 out.push(chunk[..val_size].to_vec());
556 }
557 Ok(Some(out))
558 } else {
559 Ok(None)
560 }
561 }
562
563 fn delete(&self, key: &[u8]) -> Result<()> {
567 if key.len() != self.key_size() as usize {
568 return Err(Error::with_invalid_data(format!(
569 "key_size {} != {}",
570 key.len(),
571 self.key_size()
572 )));
573 };
574
575 let ret = unsafe {
576 libbpf_sys::bpf_map_delete_elem(self.as_fd().as_raw_fd(), key.as_ptr() as *const c_void)
577 };
578 util::parse_ret(ret)
579 }
580
581 fn delete_batch(
585 &self,
586 keys: &[u8],
587 count: u32,
588 elem_flags: MapFlags,
589 flags: MapFlags,
590 ) -> Result<()> {
591 if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
592 return Err(Error::with_invalid_data(format!(
593 "batch key_size {} != {} * {}",
594 keys.len(),
595 self.key_size(),
596 count
597 )));
598 };
599
600 #[allow(clippy::needless_update)]
601 let opts = libbpf_sys::bpf_map_batch_opts {
602 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
603 elem_flags: elem_flags.bits(),
604 flags: flags.bits(),
605 ..Default::default()
607 };
608
609 let mut count = count;
610 let ret = unsafe {
611 libbpf_sys::bpf_map_delete_batch(
612 self.as_fd().as_raw_fd(),
613 keys.as_ptr() as *const c_void,
614 &mut count,
615 &opts as *const libbpf_sys::bpf_map_batch_opts,
616 )
617 };
618 util::parse_ret(ret)
619 }
620
621 fn lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
628 if key.len() != self.key_size() as usize {
629 return Err(Error::with_invalid_data(format!(
630 "key_size {} != {}",
631 key.len(),
632 self.key_size()
633 )));
634 };
635
636 let mut out: Vec<u8> = Vec::with_capacity(self.value_size() as usize);
637
638 let ret = unsafe {
639 libbpf_sys::bpf_map_lookup_and_delete_elem(
640 self.as_fd().as_raw_fd(),
641 map_key(self, key),
642 out.as_mut_ptr() as *mut c_void,
643 )
644 };
645
646 if ret == 0 {
647 unsafe {
648 out.set_len(self.value_size() as usize);
649 }
650 Ok(Some(out))
651 } else {
652 let err = io::Error::last_os_error();
653 if err.kind() == io::ErrorKind::NotFound {
654 Ok(None)
655 } else {
656 Err(Error::from(err))
657 }
658 }
659 }
660
661 fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
668 if self.map_type().is_percpu() {
669 return Err(Error::with_invalid_data(format!(
670 "update_percpu() must be used for per-cpu maps (type of the map is {:?})",
671 self.map_type(),
672 )));
673 }
674
675 if value.len() != self.value_size() as usize {
676 return Err(Error::with_invalid_data(format!(
677 "value_size {} != {}",
678 value.len(),
679 self.value_size()
680 )));
681 };
682
683 update_raw(self, key, value, flags)
684 }
685
686 fn update_batch(
691 &self,
692 keys: &[u8],
693 values: &[u8],
694 count: u32,
695 elem_flags: MapFlags,
696 flags: MapFlags,
697 ) -> Result<()> {
698 if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
699 return Err(Error::with_invalid_data(format!(
700 "batch key_size {} != {} * {}",
701 keys.len(),
702 self.key_size(),
703 count
704 )));
705 };
706
707 if values.len() as u32 / count != self.value_size() || (values.len() as u32) % count != 0 {
708 return Err(Error::with_invalid_data(format!(
709 "batch value_size {} != {} * {}",
710 values.len(),
711 self.value_size(),
712 count
713 )));
714 }
715
716 #[allow(clippy::needless_update)]
717 let opts = libbpf_sys::bpf_map_batch_opts {
718 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
719 elem_flags: elem_flags.bits(),
720 flags: flags.bits(),
721 ..Default::default()
723 };
724
725 let mut count = count;
726 let ret = unsafe {
727 libbpf_sys::bpf_map_update_batch(
728 self.as_fd().as_raw_fd(),
729 keys.as_ptr() as *const c_void,
730 values.as_ptr() as *const c_void,
731 &mut count,
732 &opts as *const libbpf_sys::bpf_map_batch_opts,
733 )
734 };
735
736 util::parse_ret(ret)
737 }
738
739 fn update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()> {
747 if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
748 return Err(Error::with_invalid_data(format!(
749 "update() must be used for maps that are not per-cpu (type of the map is {:?})",
750 self.map_type(),
751 )));
752 }
753
754 if values.len() != crate::num_possible_cpus()? {
755 return Err(Error::with_invalid_data(format!(
756 "number of values {} != number of cpus {}",
757 values.len(),
758 crate::num_possible_cpus()?
759 )));
760 };
761
762 let val_size = self.value_size() as usize;
763 let aligned_val_size = percpu_aligned_value_size(self);
764 let buf_size = percpu_buffer_size(self)?;
765
766 let mut value_buf = vec![0; buf_size];
767
768 for (i, val) in values.iter().enumerate() {
769 if val.len() != val_size {
770 return Err(Error::with_invalid_data(format!(
771 "value size for cpu {} is {} != {}",
772 i,
773 val.len(),
774 val_size
775 )));
776 }
777
778 value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)]
779 .copy_from_slice(val);
780 }
781
782 update_raw(self, key, &value_buf, flags)
783 }
784}
785
786pub type Map<'obj> = MapImpl<'obj>;
788pub type MapMut<'obj> = MapImpl<'obj, Mut>;
790
791#[derive(Debug)]
796pub struct MapImpl<'obj, T = ()> {
797 ptr: NonNull<libbpf_sys::bpf_map>,
798 _phantom: PhantomData<&'obj T>,
799}
800
801impl<'obj> Map<'obj> {
802 pub fn new(map: &'obj libbpf_sys::bpf_map) -> Self {
804 let ptr = unsafe { NonNull::new_unchecked(map as *const _ as *mut _) };
807 assert!(
808 map_fd(ptr).is_some(),
809 "provided BPF map does not have file descriptor"
810 );
811
812 Self {
813 ptr,
814 _phantom: PhantomData,
815 }
816 }
817
818 #[doc(hidden)]
828 pub unsafe fn from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self {
829 Self {
830 ptr,
831 _phantom: PhantomData,
832 }
833 }
834
835 pub fn is_pinned(&self) -> bool {
837 unsafe { libbpf_sys::bpf_map__is_pinned(self.ptr.as_ptr()) }
838 }
839
840 pub fn get_pin_path(&self) -> Option<&OsStr> {
843 let path_ptr = unsafe { libbpf_sys::bpf_map__pin_path(self.ptr.as_ptr()) };
844 if path_ptr.is_null() {
845 return None;
847 }
848 let path_c_str = unsafe { CStr::from_ptr(path_ptr) };
849 Some(OsStr::from_bytes(path_c_str.to_bytes()))
850 }
851}
852
853impl<'obj> MapMut<'obj> {
854 pub fn new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self {
856 let ptr = unsafe { NonNull::new_unchecked(map as *mut _) };
859 assert!(
860 map_fd(ptr).is_some(),
861 "provided BPF map does not have file descriptor"
862 );
863
864 Self {
865 ptr,
866 _phantom: PhantomData,
867 }
868 }
869
870 pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
873 let path_c = util::path_to_cstring(path)?;
874 let path_ptr = path_c.as_ptr();
875
876 let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr.as_ptr(), path_ptr) };
877 util::parse_ret(ret)
878 }
879
880 pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
883 let path_c = util::path_to_cstring(path)?;
884 let path_ptr = path_c.as_ptr();
885 let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr.as_ptr(), path_ptr) };
886 util::parse_ret(ret)
887 }
888
889 pub fn attach_struct_ops(&mut self) -> Result<Link> {
891 if self.map_type() != MapType::StructOps {
892 return Err(Error::with_invalid_data(format!(
893 "Invalid map type ({:?}) for attach_struct_ops()",
894 self.map_type(),
895 )));
896 }
897
898 let ptr = unsafe { libbpf_sys::bpf_map__attach_struct_ops(self.ptr.as_ptr()) };
899 let ptr = validate_bpf_ret(ptr).context("failed to attach struct_ops")?;
900 let link = unsafe { Link::new(ptr) };
902 Ok(link)
903 }
904}
905
906impl<'obj> Deref for MapMut<'obj> {
907 type Target = Map<'obj>;
908
909 fn deref(&self) -> &Self::Target {
910 unsafe { transmute::<&MapMut<'obj>, &Map<'obj>>(self) }
911 }
912}
913
914impl<T> AsFd for MapImpl<'_, T> {
915 #[inline]
916 fn as_fd(&self) -> BorrowedFd<'_> {
917 let fd = map_fd(self.ptr).unwrap();
920 let fd = unsafe { BorrowedFd::borrow_raw(fd) };
923 fd
924 }
925}
926
927impl<T> MapCore for MapImpl<'_, T>
928where
929 T: Debug,
930{
931 fn name(&self) -> &OsStr {
932 let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
934 let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
937 OsStr::from_bytes(name_c_str.to_bytes())
938 }
939
940 #[inline]
941 fn map_type(&self) -> MapType {
942 let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
943 MapType::from(ty)
944 }
945
946 #[inline]
947 fn key_size(&self) -> u32 {
948 unsafe { libbpf_sys::bpf_map__key_size(self.ptr.as_ptr()) }
949 }
950
951 #[inline]
952 fn value_size(&self) -> u32 {
953 unsafe { libbpf_sys::bpf_map__value_size(self.ptr.as_ptr()) }
954 }
955
956 #[inline]
957 fn max_entries(&self) -> u32 {
958 unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
959 }
960}
961
962impl AsRawLibbpf for Map<'_> {
963 type LibbpfType = libbpf_sys::bpf_map;
964
965 #[inline]
967 fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
968 self.ptr
969 }
970}
971
972#[derive(Debug)]
987pub struct MapHandle {
988 fd: OwnedFd,
989 name: OsString,
990 ty: MapType,
991 key_size: u32,
992 value_size: u32,
993 max_entries: u32,
994}
995
996impl MapHandle {
997 pub fn create<T: AsRef<OsStr>>(
999 map_type: MapType,
1000 name: Option<T>,
1001 key_size: u32,
1002 value_size: u32,
1003 max_entries: u32,
1004 opts: &libbpf_sys::bpf_map_create_opts,
1005 ) -> Result<Self> {
1006 let name = match name {
1007 Some(name) => name.as_ref().to_os_string(),
1008 None => OsString::new(),
1010 };
1011 let name_c_str = CString::new(name.as_bytes()).map_err(|_| {
1012 Error::with_invalid_data(format!("invalid name `{name:?}`: has NUL bytes"))
1013 })?;
1014 let name_c_ptr = if name.is_empty() {
1015 ptr::null()
1016 } else {
1017 name_c_str.as_bytes_with_nul().as_ptr()
1018 };
1019
1020 let fd = unsafe {
1021 libbpf_sys::bpf_map_create(
1022 map_type.into(),
1023 name_c_ptr.cast(),
1024 key_size,
1025 value_size,
1026 max_entries,
1027 opts,
1028 )
1029 };
1030 let () = util::parse_ret(fd)?;
1031
1032 Ok(Self {
1033 fd: unsafe { OwnedFd::from_raw_fd(fd) },
1037 name,
1038 ty: map_type,
1039 key_size,
1040 value_size,
1041 max_entries,
1042 })
1043 }
1044
1045 pub fn from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self> {
1050 fn inner(path: &Path) -> Result<MapHandle> {
1051 let p = CString::new(path.as_os_str().as_bytes()).expect("path contained null bytes");
1052 let fd = parse_ret_i32(unsafe {
1053 libbpf_sys::bpf_obj_get(p.as_ptr())
1056 })?;
1057 MapHandle::from_fd(unsafe {
1058 OwnedFd::from_raw_fd(fd)
1062 })
1063 }
1064
1065 inner(path.as_ref())
1066 }
1067
1068 pub fn from_map_id(id: u32) -> Result<Self> {
1070 parse_ret_i32(unsafe {
1071 libbpf_sys::bpf_map_get_fd_by_id(id)
1074 })
1075 .map(|fd| unsafe {
1076 OwnedFd::from_raw_fd(fd)
1080 })
1081 .and_then(Self::from_fd)
1082 }
1083
1084 fn from_fd(fd: OwnedFd) -> Result<Self> {
1085 let info = MapInfo::new(fd.as_fd())?;
1086 Ok(Self {
1087 fd,
1088 name: info.name()?.into(),
1089 ty: info.map_type(),
1090 key_size: info.info.key_size,
1091 value_size: info.info.value_size,
1092 max_entries: info.info.max_entries,
1093 })
1094 }
1095
1096 pub fn freeze(&self) -> Result<()> {
1103 let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd.as_raw_fd()) };
1104
1105 util::parse_ret(ret)
1106 }
1107
1108 pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1111 let path_c = util::path_to_cstring(path)?;
1112 let path_ptr = path_c.as_ptr();
1113
1114 let ret = unsafe { libbpf_sys::bpf_obj_pin(self.fd.as_raw_fd(), path_ptr) };
1115 util::parse_ret(ret)
1116 }
1117
1118 pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1121 remove_file(path).context("failed to remove pin map")
1122 }
1123}
1124
1125impl MapCore for MapHandle {
1126 #[inline]
1127 fn name(&self) -> &OsStr {
1128 &self.name
1129 }
1130
1131 #[inline]
1132 fn map_type(&self) -> MapType {
1133 self.ty
1134 }
1135
1136 #[inline]
1137 fn key_size(&self) -> u32 {
1138 self.key_size
1139 }
1140
1141 #[inline]
1142 fn value_size(&self) -> u32 {
1143 self.value_size
1144 }
1145
1146 #[inline]
1147 fn max_entries(&self) -> u32 {
1148 self.max_entries
1149 }
1150}
1151
1152impl AsFd for MapHandle {
1153 #[inline]
1154 fn as_fd(&self) -> BorrowedFd<'_> {
1155 self.fd.as_fd()
1156 }
1157}
1158
1159impl<T> TryFrom<&MapImpl<'_, T>> for MapHandle
1160where
1161 T: Debug,
1162{
1163 type Error = Error;
1164
1165 fn try_from(other: &MapImpl<'_, T>) -> Result<Self> {
1166 Ok(Self {
1167 fd: other
1168 .as_fd()
1169 .try_clone_to_owned()
1170 .context("failed to duplicate map file descriptor")?,
1171 name: other.name().to_os_string(),
1172 ty: other.map_type(),
1173 key_size: other.key_size(),
1174 value_size: other.value_size(),
1175 max_entries: other.max_entries(),
1176 })
1177 }
1178}
1179
1180impl TryFrom<&MapHandle> for MapHandle {
1181 type Error = Error;
1182
1183 fn try_from(other: &MapHandle) -> Result<Self> {
1184 Ok(Self {
1185 fd: other
1186 .as_fd()
1187 .try_clone_to_owned()
1188 .context("failed to duplicate map file descriptor")?,
1189 name: other.name().to_os_string(),
1190 ty: other.map_type(),
1191 key_size: other.key_size(),
1192 value_size: other.value_size(),
1193 max_entries: other.max_entries(),
1194 })
1195 }
1196}
1197
1198bitflags! {
1199 #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
1201 pub struct MapFlags: u64 {
1202 const ANY = libbpf_sys::BPF_ANY as _;
1204 const NO_EXIST = libbpf_sys::BPF_NOEXIST as _;
1206 const EXIST = libbpf_sys::BPF_EXIST as _;
1208 const LOCK = libbpf_sys::BPF_F_LOCK as _;
1210 }
1211}
1212
1213#[non_exhaustive]
1216#[repr(u32)]
1217#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1218pub enum MapType {
1219 Unspec = libbpf_sys::BPF_MAP_TYPE_UNSPEC,
1221 Hash = libbpf_sys::BPF_MAP_TYPE_HASH,
1225 Array = libbpf_sys::BPF_MAP_TYPE_ARRAY,
1229 ProgArray = libbpf_sys::BPF_MAP_TYPE_PROG_ARRAY,
1234 PerfEventArray = libbpf_sys::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1238 PercpuHash = libbpf_sys::BPF_MAP_TYPE_PERCPU_HASH,
1242 PercpuArray = libbpf_sys::BPF_MAP_TYPE_PERCPU_ARRAY,
1246 #[allow(missing_docs)]
1247 StackTrace = libbpf_sys::BPF_MAP_TYPE_STACK_TRACE,
1248 #[allow(missing_docs)]
1249 CgroupArray = libbpf_sys::BPF_MAP_TYPE_CGROUP_ARRAY,
1250 LruHash = libbpf_sys::BPF_MAP_TYPE_LRU_HASH,
1254 LruPercpuHash = libbpf_sys::BPF_MAP_TYPE_LRU_PERCPU_HASH,
1258 LpmTrie = libbpf_sys::BPF_MAP_TYPE_LPM_TRIE,
1262 ArrayOfMaps = libbpf_sys::BPF_MAP_TYPE_ARRAY_OF_MAPS,
1268 HashOfMaps = libbpf_sys::BPF_MAP_TYPE_HASH_OF_MAPS,
1274 Devmap = libbpf_sys::BPF_MAP_TYPE_DEVMAP,
1279 Sockmap = libbpf_sys::BPF_MAP_TYPE_SOCKMAP,
1283 Cpumap = libbpf_sys::BPF_MAP_TYPE_CPUMAP,
1287 Xskmap = libbpf_sys::BPF_MAP_TYPE_XSKMAP,
1293 Sockhash = libbpf_sys::BPF_MAP_TYPE_SOCKHASH,
1297 CgroupStorage = libbpf_sys::BPF_MAP_TYPE_CGROUP_STORAGE,
1303 CGrpStorage = libbpf_sys::BPF_MAP_TYPE_CGRP_STORAGE,
1308 ReuseportSockarray = libbpf_sys::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
1312 PercpuCgroupStorage = libbpf_sys::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
1316 Queue = libbpf_sys::BPF_MAP_TYPE_QUEUE,
1320 Stack = libbpf_sys::BPF_MAP_TYPE_STACK,
1324 SkStorage = libbpf_sys::BPF_MAP_TYPE_SK_STORAGE,
1328 DevmapHash = libbpf_sys::BPF_MAP_TYPE_DEVMAP_HASH,
1333 StructOps = libbpf_sys::BPF_MAP_TYPE_STRUCT_OPS,
1338 RingBuf = libbpf_sys::BPF_MAP_TYPE_RINGBUF,
1342 InodeStorage = libbpf_sys::BPF_MAP_TYPE_INODE_STORAGE,
1346 TaskStorage = libbpf_sys::BPF_MAP_TYPE_TASK_STORAGE,
1350 BloomFilter = libbpf_sys::BPF_MAP_TYPE_BLOOM_FILTER,
1356 #[allow(missing_docs)]
1357 UserRingBuf = libbpf_sys::BPF_MAP_TYPE_USER_RINGBUF,
1358 Unknown = u32::MAX,
1362}
1363
1364impl MapType {
1365 pub fn is_percpu(&self) -> bool {
1367 matches!(
1368 self,
1369 MapType::PercpuArray
1370 | MapType::PercpuHash
1371 | MapType::LruPercpuHash
1372 | MapType::PercpuCgroupStorage
1373 )
1374 }
1375
1376 pub fn is_hash_map(&self) -> bool {
1378 matches!(
1379 self,
1380 MapType::Hash | MapType::PercpuHash | MapType::LruHash | MapType::LruPercpuHash
1381 )
1382 }
1383
1384 fn is_keyless(&self) -> bool {
1387 matches!(self, MapType::Queue | MapType::Stack | MapType::BloomFilter)
1388 }
1389
1390 pub fn is_bloom_filter(&self) -> bool {
1392 MapType::BloomFilter.eq(self)
1393 }
1394
1395 pub fn is_supported(&self) -> Result<bool> {
1400 let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, ptr::null()) };
1401 match ret {
1402 0 => Ok(false),
1403 1 => Ok(true),
1404 _ => Err(Error::from_raw_os_error(-ret)),
1405 }
1406 }
1407}
1408
1409impl From<u32> for MapType {
1410 fn from(value: u32) -> Self {
1411 use MapType::*;
1412
1413 match value {
1414 x if x == Unspec as u32 => Unspec,
1415 x if x == Hash as u32 => Hash,
1416 x if x == Array as u32 => Array,
1417 x if x == ProgArray as u32 => ProgArray,
1418 x if x == PerfEventArray as u32 => PerfEventArray,
1419 x if x == PercpuHash as u32 => PercpuHash,
1420 x if x == PercpuArray as u32 => PercpuArray,
1421 x if x == StackTrace as u32 => StackTrace,
1422 x if x == CgroupArray as u32 => CgroupArray,
1423 x if x == LruHash as u32 => LruHash,
1424 x if x == LruPercpuHash as u32 => LruPercpuHash,
1425 x if x == LpmTrie as u32 => LpmTrie,
1426 x if x == ArrayOfMaps as u32 => ArrayOfMaps,
1427 x if x == HashOfMaps as u32 => HashOfMaps,
1428 x if x == Devmap as u32 => Devmap,
1429 x if x == Sockmap as u32 => Sockmap,
1430 x if x == Cpumap as u32 => Cpumap,
1431 x if x == Xskmap as u32 => Xskmap,
1432 x if x == Sockhash as u32 => Sockhash,
1433 x if x == CgroupStorage as u32 => CgroupStorage,
1434 x if x == ReuseportSockarray as u32 => ReuseportSockarray,
1435 x if x == PercpuCgroupStorage as u32 => PercpuCgroupStorage,
1436 x if x == Queue as u32 => Queue,
1437 x if x == Stack as u32 => Stack,
1438 x if x == SkStorage as u32 => SkStorage,
1439 x if x == DevmapHash as u32 => DevmapHash,
1440 x if x == StructOps as u32 => StructOps,
1441 x if x == RingBuf as u32 => RingBuf,
1442 x if x == InodeStorage as u32 => InodeStorage,
1443 x if x == TaskStorage as u32 => TaskStorage,
1444 x if x == BloomFilter as u32 => BloomFilter,
1445 x if x == UserRingBuf as u32 => UserRingBuf,
1446 _ => Unknown,
1447 }
1448 }
1449}
1450
1451impl From<MapType> for u32 {
1452 fn from(value: MapType) -> Self {
1453 value as u32
1454 }
1455}
1456
1457#[derive(Debug)]
1459pub struct MapKeyIter<'map> {
1460 map_fd: BorrowedFd<'map>,
1461 prev: Option<Vec<u8>>,
1462 next: Vec<u8>,
1463}
1464
1465impl<'map> MapKeyIter<'map> {
1466 fn new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self {
1467 Self {
1468 map_fd,
1469 prev: None,
1470 next: vec![0; key_size as usize],
1471 }
1472 }
1473}
1474
1475impl Iterator for MapKeyIter<'_> {
1476 type Item = Vec<u8>;
1477
1478 fn next(&mut self) -> Option<Self::Item> {
1479 let prev = self.prev.as_ref().map_or(ptr::null(), Vec::as_ptr);
1480
1481 let ret = unsafe {
1482 libbpf_sys::bpf_map_get_next_key(
1483 self.map_fd.as_raw_fd(),
1484 prev as _,
1485 self.next.as_mut_ptr() as _,
1486 )
1487 };
1488 if ret != 0 {
1489 None
1490 } else {
1491 self.prev = Some(self.next.clone());
1492 Some(self.next.clone())
1493 }
1494 }
1495}
1496
1497#[derive(Debug)]
1499pub struct BatchedMapIter<'map> {
1500 map_fd: BorrowedFd<'map>,
1501 delete: bool,
1502 count: usize,
1503 key_size: usize,
1504 value_size: usize,
1505 keys: Vec<u8>,
1506 values: Vec<u8>,
1507 prev: Option<Vec<u8>>,
1508 next: Vec<u8>,
1509 batch_opts: libbpf_sys::bpf_map_batch_opts,
1510 index: Option<usize>,
1511}
1512
1513impl<'map> BatchedMapIter<'map> {
1514 fn new(
1515 map_fd: BorrowedFd<'map>,
1516 count: u32,
1517 key_size: u32,
1518 value_size: u32,
1519 batch_opts: libbpf_sys::bpf_map_batch_opts,
1520 delete: bool,
1521 ) -> Self {
1522 Self {
1523 map_fd,
1524 delete,
1525 count: count as usize,
1526 key_size: key_size as usize,
1527 value_size: value_size as usize,
1528 keys: vec![0; (count * key_size) as usize],
1529 values: vec![0; (count * value_size) as usize],
1530 prev: None,
1531 next: vec![0; key_size as usize],
1532 batch_opts,
1533 index: None,
1534 }
1535 }
1536
1537 fn lookup_next_batch(&mut self) {
1538 let prev = self.prev.as_mut().map_or(ptr::null_mut(), Vec::as_mut_ptr);
1539 let mut count = self.count as u32;
1540
1541 let ret = unsafe {
1542 let lookup_fn = if self.delete {
1543 libbpf_sys::bpf_map_lookup_and_delete_batch
1544 } else {
1545 libbpf_sys::bpf_map_lookup_batch
1546 };
1547 lookup_fn(
1548 self.map_fd.as_raw_fd(),
1549 prev.cast(),
1550 self.next.as_mut_ptr().cast(),
1551 self.keys.as_mut_ptr().cast(),
1552 self.values.as_mut_ptr().cast(),
1553 &mut count,
1554 &self.batch_opts,
1555 )
1556 };
1557
1558 if let Err(e) = util::parse_ret(ret) {
1559 match e.kind() {
1560 error::ErrorKind::NotFound => {}
1562 error::ErrorKind::Interrupted => {
1564 return self.lookup_next_batch();
1565 }
1566 _ => {
1567 self.index = None;
1568 return;
1569 }
1570 }
1571 }
1572
1573 self.prev = Some(self.next.clone());
1574 self.index = Some(0);
1575
1576 unsafe {
1577 self.keys.set_len(self.key_size * count as usize);
1578 self.values.set_len(self.value_size * count as usize);
1579 }
1580 }
1581}
1582
1583impl Iterator for BatchedMapIter<'_> {
1584 type Item = (Vec<u8>, Vec<u8>);
1585
1586 fn next(&mut self) -> Option<Self::Item> {
1587 let load_next_batch = match self.index {
1588 Some(index) => {
1589 let batch_finished = index * self.key_size >= self.keys.len();
1590 let last_batch = self.keys.len() < self.key_size * self.count;
1591 batch_finished && !last_batch
1592 }
1593 None => true,
1594 };
1595
1596 if load_next_batch {
1597 self.lookup_next_batch();
1598 }
1599
1600 let index = self.index?;
1601 let key = self.keys.chunks_exact(self.key_size).nth(index)?.to_vec();
1602 let val = self
1603 .values
1604 .chunks_exact(self.value_size)
1605 .nth(index)?
1606 .to_vec();
1607
1608 self.index = Some(index + 1);
1609 Some((key, val))
1610 }
1611}
1612
1613#[derive(Debug)]
1616pub struct MapInfo {
1617 pub info: bpf_map_info,
1619}
1620
1621impl MapInfo {
1622 pub fn new(fd: BorrowedFd<'_>) -> Result<Self> {
1624 let mut map_info = bpf_map_info::default();
1625 let mut size = mem::size_of_val(&map_info) as u32;
1626 let () = util::parse_ret(unsafe {
1628 bpf_obj_get_info_by_fd(
1629 fd.as_raw_fd(),
1630 &mut map_info as *mut bpf_map_info as *mut c_void,
1631 &mut size as *mut u32,
1632 )
1633 })?;
1634 Ok(Self { info: map_info })
1635 }
1636
1637 #[inline]
1639 pub fn map_type(&self) -> MapType {
1640 MapType::from(self.info.type_)
1641 }
1642
1643 pub fn name<'a>(&self) -> Result<&'a str> {
1648 let char_slice =
1650 unsafe { from_raw_parts(self.info.name[..].as_ptr().cast(), self.info.name.len()) };
1651
1652 util::c_char_slice_to_cstr(char_slice)
1653 .ok_or_else(|| Error::with_invalid_data("no nul byte found"))?
1654 .to_str()
1655 .map_err(Error::with_invalid_data)
1656 }
1657
1658 #[inline]
1660 pub fn flags(&self) -> MapFlags {
1661 MapFlags::from_bits_truncate(self.info.map_flags as u64)
1662 }
1663}
1664
1665#[cfg(test)]
1666mod tests {
1667 use super::*;
1668
1669 use std::mem::discriminant;
1670
1671 #[test]
1672 fn map_type() {
1673 use MapType::*;
1674
1675 for t in [
1676 Unspec,
1677 Hash,
1678 Array,
1679 ProgArray,
1680 PerfEventArray,
1681 PercpuHash,
1682 PercpuArray,
1683 StackTrace,
1684 CgroupArray,
1685 LruHash,
1686 LruPercpuHash,
1687 LpmTrie,
1688 ArrayOfMaps,
1689 HashOfMaps,
1690 Devmap,
1691 Sockmap,
1692 Cpumap,
1693 Xskmap,
1694 Sockhash,
1695 CgroupStorage,
1696 ReuseportSockarray,
1697 PercpuCgroupStorage,
1698 Queue,
1699 Stack,
1700 SkStorage,
1701 DevmapHash,
1702 StructOps,
1703 RingBuf,
1704 InodeStorage,
1705 TaskStorage,
1706 BloomFilter,
1707 UserRingBuf,
1708 Unknown,
1709 ] {
1710 assert_eq!(discriminant(&t), discriminant(&MapType::from(t as u32)));
1712 }
1713 }
1714}