1use core::ffi::c_void;
2use std::ffi::CStr;
3use std::ffi::CString;
4use std::ffi::OsStr;
5use std::ffi::OsString;
6use std::fmt::Debug;
7use std::fs::remove_file;
8use std::io;
9use std::marker::PhantomData;
10use std::mem;
11use std::mem::transmute;
12use std::ops::Deref;
13use std::os::unix::ffi::OsStrExt;
14use std::os::unix::io::AsFd;
15use std::os::unix::io::AsRawFd;
16use std::os::unix::io::BorrowedFd;
17use std::os::unix::io::FromRawFd;
18use std::os::unix::io::OwnedFd;
19use std::os::unix::io::RawFd;
20use std::path::Path;
21use std::ptr;
22use std::ptr::NonNull;
23use std::slice;
24use std::slice::from_raw_parts;
25
26use bitflags::bitflags;
27use libbpf_sys::bpf_map_info;
28use libbpf_sys::bpf_obj_get_info_by_fd;
29
30use crate::error;
31use crate::util;
32use crate::util::parse_ret_i32;
33use crate::util::validate_bpf_ret;
34use crate::AsRawLibbpf;
35use crate::Error;
36use crate::ErrorExt as _;
37use crate::Link;
38use crate::Mut;
39use crate::Result;
40
41pub type OpenMap<'obj> = OpenMapImpl<'obj>;
43pub type OpenMapMut<'obj> = OpenMapImpl<'obj, Mut>;
45
46#[derive(Debug)]
53#[repr(transparent)]
54pub struct OpenMapImpl<'obj, T = ()> {
55 ptr: NonNull<libbpf_sys::bpf_map>,
56 _phantom: PhantomData<&'obj T>,
57}
58
59impl<'obj> OpenMap<'obj> {
60 pub fn new(object: &'obj libbpf_sys::bpf_map) -> Self {
62 Self {
65 ptr: unsafe { NonNull::new_unchecked(object as *const _ as *mut _) },
66 _phantom: PhantomData,
67 }
68 }
69
70 pub fn name(&self) -> &'obj OsStr {
72 let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
74 let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
77 OsStr::from_bytes(name_c_str.to_bytes())
78 }
79
80 pub fn map_type(&self) -> MapType {
82 let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
83 MapType::from(ty)
84 }
85
86 fn initial_value_raw(&self) -> (*mut u8, usize) {
87 let mut size = 0u64;
88 let ptr = unsafe {
89 libbpf_sys::bpf_map__initial_value(self.ptr.as_ptr(), &mut size as *mut _ as _)
90 };
91 (ptr.cast(), size as _)
92 }
93
94 pub fn initial_value(&self) -> Option<&[u8]> {
96 let (ptr, size) = self.initial_value_raw();
97 if ptr.is_null() {
98 None
99 } else {
100 let data = unsafe { slice::from_raw_parts(ptr.cast::<u8>(), size) };
101 Some(data)
102 }
103 }
104
105 pub fn max_entries(&self) -> u32 {
107 unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
108 }
109
110 pub fn autocreate(&self) -> bool {
112 unsafe { libbpf_sys::bpf_map__autocreate(self.ptr.as_ptr()) }
113 }
114}
115
116impl<'obj> OpenMapMut<'obj> {
117 pub fn new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self {
119 Self {
120 ptr: unsafe { NonNull::new_unchecked(object as *mut _) },
121 _phantom: PhantomData,
122 }
123 }
124
125 pub fn initial_value_mut(&mut self) -> Option<&mut [u8]> {
127 let (ptr, size) = self.initial_value_raw();
128 if ptr.is_null() {
129 None
130 } else {
131 let data = unsafe { slice::from_raw_parts_mut(ptr.cast::<u8>(), size) };
132 Some(data)
133 }
134 }
135
136 pub fn set_map_ifindex(&mut self, idx: u32) {
140 unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) };
141 }
142
143 pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> {
145 let ret = unsafe {
146 libbpf_sys::bpf_map__set_initial_value(
147 self.ptr.as_ptr(),
148 data.as_ptr() as *const c_void,
149 data.len() as libbpf_sys::size_t,
150 )
151 };
152
153 util::parse_ret(ret)
154 }
155
156 pub fn set_type(&mut self, ty: MapType) -> Result<()> {
158 let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) };
159 util::parse_ret(ret)
160 }
161
162 pub fn set_key_size(&mut self, size: u32) -> Result<()> {
164 let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) };
165 util::parse_ret(ret)
166 }
167
168 pub fn set_value_size(&mut self, size: u32) -> Result<()> {
170 let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) };
171 util::parse_ret(ret)
172 }
173
174 pub fn set_max_entries(&mut self, count: u32) -> Result<()> {
176 let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) };
177 util::parse_ret(ret)
178 }
179
180 pub fn set_map_flags(&mut self, flags: u32) -> Result<()> {
182 let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) };
183 util::parse_ret(ret)
184 }
185
186 pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> {
191 let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) };
192 util::parse_ret(ret)
193 }
194
195 pub fn set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()> {
200 let ret = unsafe {
201 libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner_map_fd.as_raw_fd())
202 };
203 util::parse_ret(ret)
204 }
205
206 pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> {
215 let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) };
216 util::parse_ret(ret)
217 }
218
219 pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> {
221 let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) };
222 util::parse_ret(ret)
223 }
224
225 pub fn set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
229 let path_c = util::path_to_cstring(path)?;
230 let path_ptr = path_c.as_ptr();
231
232 let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) };
233 util::parse_ret(ret)
234 }
235
236 pub fn reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()> {
238 let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd.as_raw_fd()) };
239 util::parse_ret(ret)
240 }
241
242 pub fn reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
244 let cstring = util::path_to_cstring(path)?;
245
246 let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) };
247 if fd < 0 {
248 return Err(Error::from(io::Error::last_os_error()));
249 }
250
251 let fd = unsafe { OwnedFd::from_raw_fd(fd) };
252
253 let reuse_result = self.reuse_fd(fd.as_fd());
254
255 reuse_result
256 }
257}
258
259impl<'obj> Deref for OpenMapMut<'obj> {
260 type Target = OpenMap<'obj>;
261
262 fn deref(&self) -> &Self::Target {
263 unsafe { transmute::<&OpenMapMut<'obj>, &OpenMap<'obj>>(self) }
266 }
267}
268
269impl<T> AsRawLibbpf for OpenMapImpl<'_, T> {
270 type LibbpfType = libbpf_sys::bpf_map;
271
272 fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
274 self.ptr
275 }
276}
277
278pub(crate) fn map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd> {
279 let fd = unsafe { libbpf_sys::bpf_map__fd(map.as_ptr()) };
280 let fd = util::parse_ret_i32(fd).ok();
281 fd
282}
283
284fn percpu_aligned_value_size<M>(map: &M) -> usize
287where
288 M: MapCore + ?Sized,
289{
290 let val_size = map.value_size() as usize;
291 util::roundup(val_size, 8)
292}
293
294fn percpu_buffer_size<M>(map: &M) -> Result<usize>
296where
297 M: MapCore + ?Sized,
298{
299 let aligned_val_size = percpu_aligned_value_size(map);
300 let ncpu = crate::num_possible_cpus()?;
301 Ok(ncpu * aligned_val_size)
302}
303
304fn map_key<M>(map: &M, key: &[u8]) -> *const c_void
308where
309 M: MapCore + ?Sized,
310{
311 if map.key_size() == 0 && map.map_type().is_keyless() {
313 return ptr::null();
314 }
315
316 key.as_ptr() as *const c_void
317}
318
319fn lookup_raw<M>(
322 map: &M,
323 key: &[u8],
324 value: &mut [mem::MaybeUninit<u8>],
325 flags: MapFlags,
326) -> Result<bool>
327where
328 M: MapCore + ?Sized,
329{
330 if key.len() != map.key_size() as usize {
331 return Err(Error::with_invalid_data(format!(
332 "key_size {} != {}",
333 key.len(),
334 map.key_size()
335 )));
336 }
337
338 debug_assert_eq!(
340 value.len(),
341 if map.map_type().is_percpu() {
342 percpu_buffer_size(map).unwrap()
343 } else {
344 map.value_size() as usize
345 }
346 );
347
348 let ret = unsafe {
349 libbpf_sys::bpf_map_lookup_elem_flags(
350 map.as_fd().as_raw_fd(),
351 map_key(map, key),
352 value.as_mut_ptr().cast(),
354 flags.bits(),
355 )
356 };
357
358 if ret == 0 {
359 Ok(true)
360 } else {
361 let err = io::Error::last_os_error();
362 if err.kind() == io::ErrorKind::NotFound {
363 Ok(false)
364 } else {
365 Err(Error::from(err))
366 }
367 }
368}
369
370fn lookup_raw_vec<M>(
372 map: &M,
373 key: &[u8],
374 flags: MapFlags,
375 out_size: usize,
376) -> Result<Option<Vec<u8>>>
377where
378 M: MapCore + ?Sized,
379{
380 let mut out = Vec::with_capacity(out_size);
382
383 match lookup_raw(map, key, out.spare_capacity_mut(), flags)? {
384 true => {
385 unsafe {
387 out.set_len(out_size);
388 }
389 Ok(Some(out))
390 }
391 false => Ok(None),
392 }
393}
394
395fn update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>
398where
399 M: MapCore + ?Sized,
400{
401 if key.len() != map.key_size() as usize {
402 return Err(Error::with_invalid_data(format!(
403 "key_size {} != {}",
404 key.len(),
405 map.key_size()
406 )));
407 };
408
409 let ret = unsafe {
410 libbpf_sys::bpf_map_update_elem(
411 map.as_fd().as_raw_fd(),
412 map_key(map, key),
413 value.as_ptr() as *const c_void,
414 flags.bits(),
415 )
416 };
417
418 util::parse_ret(ret)
419}
420
421fn lookup_batch_raw<M>(
423 map: &M,
424 count: u32,
425 elem_flags: MapFlags,
426 flags: MapFlags,
427 delete: bool,
428) -> BatchedMapIter<'_>
429where
430 M: MapCore + ?Sized,
431{
432 #[allow(clippy::needless_update)]
433 let opts = libbpf_sys::bpf_map_batch_opts {
434 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
435 elem_flags: elem_flags.bits(),
436 flags: flags.bits(),
437 ..Default::default()
439 };
440
441 let key_size = if map.map_type().is_hash_map() {
444 map.key_size().max(4)
445 } else {
446 map.key_size()
447 };
448
449 BatchedMapIter::new(map.as_fd(), count, key_size, map.value_size(), opts, delete)
450}
451
452fn check_not_bloom_or_percpu<M>(map: &M) -> Result<()>
454where
455 M: MapCore + ?Sized,
456{
457 if map.map_type().is_bloom_filter() {
458 return Err(Error::with_invalid_data(
459 "lookup_bloom_filter() must be used for bloom filter maps",
460 ));
461 }
462 if map.map_type().is_percpu() {
463 return Err(Error::with_invalid_data(format!(
464 "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})",
465 map.map_type(),
466 )));
467 }
468
469 Ok(())
470}
471
472#[allow(clippy::wildcard_imports)]
473mod private {
474 use super::*;
475
476 pub trait Sealed {}
477
478 impl<T> Sealed for MapImpl<'_, T> {}
479 impl Sealed for MapHandle {}
480}
481
482pub trait MapCore: Debug + AsFd + private::Sealed {
484 fn name(&self) -> &OsStr;
486
487 fn map_type(&self) -> MapType;
489
490 fn key_size(&self) -> u32;
492
493 fn value_size(&self) -> u32;
495
496 fn max_entries(&self) -> u32;
498
499 #[inline]
501 fn info(&self) -> Result<MapInfo> {
502 MapInfo::new(self.as_fd())
503 }
504
505 fn keys(&self) -> MapKeyIter<'_> {
511 MapKeyIter::new(self.as_fd(), self.key_size())
512 }
513
514 fn lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>> {
523 check_not_bloom_or_percpu(self)?;
524 let out_size = self.value_size() as usize;
525 lookup_raw_vec(self, key, flags, out_size)
526 }
527
528 fn lookup_into(&self, key: &[u8], value: &mut [u8], flags: MapFlags) -> Result<bool> {
541 check_not_bloom_or_percpu(self)?;
542
543 if value.len() != self.value_size() as usize {
544 return Err(Error::with_invalid_data(format!(
545 "value buffer size {} != {}",
546 value.len(),
547 self.value_size()
548 )));
549 }
550
551 let value = unsafe {
553 slice::from_raw_parts_mut::<mem::MaybeUninit<u8>>(
554 value.as_mut_ptr().cast(),
555 value.len(),
556 )
557 };
558 lookup_raw(self, key, value, flags)
559 }
560
561 fn lookup_batch(
565 &self,
566 count: u32,
567 elem_flags: MapFlags,
568 flags: MapFlags,
569 ) -> Result<BatchedMapIter<'_>> {
570 check_not_bloom_or_percpu(self)?;
571 Ok(lookup_batch_raw(self, count, elem_flags, flags, false))
572 }
573
574 fn lookup_and_delete_batch(
578 &self,
579 count: u32,
580 elem_flags: MapFlags,
581 flags: MapFlags,
582 ) -> Result<BatchedMapIter<'_>> {
583 check_not_bloom_or_percpu(self)?;
584 Ok(lookup_batch_raw(self, count, elem_flags, flags, true))
585 }
586
587 fn lookup_bloom_filter(&self, value: &[u8]) -> Result<bool> {
591 let ret = unsafe {
592 libbpf_sys::bpf_map_lookup_elem(
593 self.as_fd().as_raw_fd(),
594 ptr::null(),
595 value.to_vec().as_mut_ptr() as *mut c_void,
596 )
597 };
598
599 if ret == 0 {
600 Ok(true)
601 } else {
602 let err = io::Error::last_os_error();
603 if err.kind() == io::ErrorKind::NotFound {
604 Ok(false)
605 } else {
606 Err(Error::from(err))
607 }
608 }
609 }
610
611 fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>> {
615 if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
616 return Err(Error::with_invalid_data(format!(
617 "lookup() must be used for maps that are not per-cpu (type of the map is {:?})",
618 self.map_type(),
619 )));
620 }
621
622 let val_size = self.value_size() as usize;
623 let aligned_val_size = percpu_aligned_value_size(self);
624 let out_size = percpu_buffer_size(self)?;
625
626 let raw_res = lookup_raw_vec(self, key, flags, out_size)?;
627 if let Some(raw_vals) = raw_res {
628 let mut out = Vec::new();
629 for chunk in raw_vals.chunks_exact(aligned_val_size) {
630 out.push(chunk[..val_size].to_vec());
631 }
632 Ok(Some(out))
633 } else {
634 Ok(None)
635 }
636 }
637
638 fn delete(&self, key: &[u8]) -> Result<()> {
642 if key.len() != self.key_size() as usize {
643 return Err(Error::with_invalid_data(format!(
644 "key_size {} != {}",
645 key.len(),
646 self.key_size()
647 )));
648 };
649
650 let ret = unsafe {
651 libbpf_sys::bpf_map_delete_elem(self.as_fd().as_raw_fd(), key.as_ptr() as *const c_void)
652 };
653 util::parse_ret(ret)
654 }
655
656 fn delete_batch(
660 &self,
661 keys: &[u8],
662 count: u32,
663 elem_flags: MapFlags,
664 flags: MapFlags,
665 ) -> Result<()> {
666 if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
667 return Err(Error::with_invalid_data(format!(
668 "batch key_size {} != {} * {}",
669 keys.len(),
670 self.key_size(),
671 count
672 )));
673 };
674
675 #[allow(clippy::needless_update)]
676 let opts = libbpf_sys::bpf_map_batch_opts {
677 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
678 elem_flags: elem_flags.bits(),
679 flags: flags.bits(),
680 ..Default::default()
682 };
683
684 let mut count = count;
685 let ret = unsafe {
686 libbpf_sys::bpf_map_delete_batch(
687 self.as_fd().as_raw_fd(),
688 keys.as_ptr() as *const c_void,
689 &mut count,
690 &opts as *const libbpf_sys::bpf_map_batch_opts,
691 )
692 };
693 util::parse_ret(ret)
694 }
695
696 fn lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
703 if key.len() != self.key_size() as usize {
704 return Err(Error::with_invalid_data(format!(
705 "key_size {} != {}",
706 key.len(),
707 self.key_size()
708 )));
709 };
710
711 let mut out: Vec<u8> = Vec::with_capacity(self.value_size() as usize);
712
713 let ret = unsafe {
714 libbpf_sys::bpf_map_lookup_and_delete_elem(
715 self.as_fd().as_raw_fd(),
716 map_key(self, key),
717 out.as_mut_ptr() as *mut c_void,
718 )
719 };
720
721 if ret == 0 {
722 unsafe {
723 out.set_len(self.value_size() as usize);
724 }
725 Ok(Some(out))
726 } else {
727 let err = io::Error::last_os_error();
728 if err.kind() == io::ErrorKind::NotFound {
729 Ok(None)
730 } else {
731 Err(Error::from(err))
732 }
733 }
734 }
735
736 fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
743 if self.map_type().is_percpu() {
744 return Err(Error::with_invalid_data(format!(
745 "update_percpu() must be used for per-cpu maps (type of the map is {:?})",
746 self.map_type(),
747 )));
748 }
749
750 if value.len() != self.value_size() as usize {
751 return Err(Error::with_invalid_data(format!(
752 "value_size {} != {}",
753 value.len(),
754 self.value_size()
755 )));
756 };
757
758 update_raw(self, key, value, flags)
759 }
760
761 fn update_batch(
766 &self,
767 keys: &[u8],
768 values: &[u8],
769 count: u32,
770 elem_flags: MapFlags,
771 flags: MapFlags,
772 ) -> Result<()> {
773 if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
774 return Err(Error::with_invalid_data(format!(
775 "batch key_size {} != {} * {}",
776 keys.len(),
777 self.key_size(),
778 count
779 )));
780 };
781
782 if values.len() as u32 / count != self.value_size() || (values.len() as u32) % count != 0 {
783 return Err(Error::with_invalid_data(format!(
784 "batch value_size {} != {} * {}",
785 values.len(),
786 self.value_size(),
787 count
788 )));
789 }
790
791 #[allow(clippy::needless_update)]
792 let opts = libbpf_sys::bpf_map_batch_opts {
793 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
794 elem_flags: elem_flags.bits(),
795 flags: flags.bits(),
796 ..Default::default()
798 };
799
800 let mut count = count;
801 let ret = unsafe {
802 libbpf_sys::bpf_map_update_batch(
803 self.as_fd().as_raw_fd(),
804 keys.as_ptr() as *const c_void,
805 values.as_ptr() as *const c_void,
806 &mut count,
807 &opts as *const libbpf_sys::bpf_map_batch_opts,
808 )
809 };
810
811 util::parse_ret(ret)
812 }
813
814 fn update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()> {
822 if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
823 return Err(Error::with_invalid_data(format!(
824 "update() must be used for maps that are not per-cpu (type of the map is {:?})",
825 self.map_type(),
826 )));
827 }
828
829 if values.len() != crate::num_possible_cpus()? {
830 return Err(Error::with_invalid_data(format!(
831 "number of values {} != number of cpus {}",
832 values.len(),
833 crate::num_possible_cpus()?
834 )));
835 };
836
837 let val_size = self.value_size() as usize;
838 let aligned_val_size = percpu_aligned_value_size(self);
839 let buf_size = percpu_buffer_size(self)?;
840
841 let mut value_buf = vec![0; buf_size];
842
843 for (i, val) in values.iter().enumerate() {
844 if val.len() != val_size {
845 return Err(Error::with_invalid_data(format!(
846 "value size for cpu {} is {} != {}",
847 i,
848 val.len(),
849 val_size
850 )));
851 }
852
853 value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)]
854 .copy_from_slice(val);
855 }
856
857 update_raw(self, key, &value_buf, flags)
858 }
859}
860
861pub type Map<'obj> = MapImpl<'obj>;
863pub type MapMut<'obj> = MapImpl<'obj, Mut>;
865
866#[derive(Debug)]
871pub struct MapImpl<'obj, T = ()> {
872 ptr: NonNull<libbpf_sys::bpf_map>,
873 _phantom: PhantomData<&'obj T>,
874}
875
876impl<'obj> Map<'obj> {
877 pub fn new(map: &'obj libbpf_sys::bpf_map) -> Self {
879 let ptr = unsafe { NonNull::new_unchecked(map as *const _ as *mut _) };
882 assert!(
883 map_fd(ptr).is_some(),
884 "provided BPF map does not have file descriptor"
885 );
886
887 Self {
888 ptr,
889 _phantom: PhantomData,
890 }
891 }
892
893 #[doc(hidden)]
903 pub unsafe fn from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self {
904 Self {
905 ptr,
906 _phantom: PhantomData,
907 }
908 }
909
910 pub fn is_pinned(&self) -> bool {
912 unsafe { libbpf_sys::bpf_map__is_pinned(self.ptr.as_ptr()) }
913 }
914
915 pub fn get_pin_path(&self) -> Option<&OsStr> {
918 let path_ptr = unsafe { libbpf_sys::bpf_map__pin_path(self.ptr.as_ptr()) };
919 if path_ptr.is_null() {
920 return None;
922 }
923 let path_c_str = unsafe { CStr::from_ptr(path_ptr) };
924 Some(OsStr::from_bytes(path_c_str.to_bytes()))
925 }
926
927 pub fn autocreate(&self) -> bool {
929 unsafe { libbpf_sys::bpf_map__autocreate(self.ptr.as_ptr()) }
930 }
931}
932
933impl<'obj> MapMut<'obj> {
934 pub fn new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self {
936 let ptr = unsafe { NonNull::new_unchecked(map as *mut _) };
939 assert!(
940 map_fd(ptr).is_some(),
941 "provided BPF map does not have file descriptor"
942 );
943
944 Self {
945 ptr,
946 _phantom: PhantomData,
947 }
948 }
949
950 pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
953 let path_c = util::path_to_cstring(path)?;
954 let path_ptr = path_c.as_ptr();
955
956 let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr.as_ptr(), path_ptr) };
957 util::parse_ret(ret)
958 }
959
960 pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
963 let path_c = util::path_to_cstring(path)?;
964 let path_ptr = path_c.as_ptr();
965 let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr.as_ptr(), path_ptr) };
966 util::parse_ret(ret)
967 }
968
969 pub fn attach_struct_ops(&mut self) -> Result<Link> {
971 if self.map_type() != MapType::StructOps {
972 return Err(Error::with_invalid_data(format!(
973 "Invalid map type ({:?}) for attach_struct_ops()",
974 self.map_type(),
975 )));
976 }
977
978 let ptr = unsafe { libbpf_sys::bpf_map__attach_struct_ops(self.ptr.as_ptr()) };
979 let ptr = validate_bpf_ret(ptr).context("failed to attach struct_ops")?;
980 let link = unsafe { Link::new(ptr) };
982 Ok(link)
983 }
984}
985
986impl<'obj> Deref for MapMut<'obj> {
987 type Target = Map<'obj>;
988
989 fn deref(&self) -> &Self::Target {
990 unsafe { transmute::<&MapMut<'obj>, &Map<'obj>>(self) }
991 }
992}
993
994impl<T> AsFd for MapImpl<'_, T> {
995 #[inline]
996 fn as_fd(&self) -> BorrowedFd<'_> {
997 let fd = map_fd(self.ptr).unwrap();
1000 let fd = unsafe { BorrowedFd::borrow_raw(fd) };
1003 fd
1004 }
1005}
1006
1007impl<T> MapCore for MapImpl<'_, T>
1008where
1009 T: Debug,
1010{
1011 fn name(&self) -> &OsStr {
1012 let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
1014 let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
1017 OsStr::from_bytes(name_c_str.to_bytes())
1018 }
1019
1020 #[inline]
1021 fn map_type(&self) -> MapType {
1022 let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
1023 MapType::from(ty)
1024 }
1025
1026 #[inline]
1027 fn key_size(&self) -> u32 {
1028 unsafe { libbpf_sys::bpf_map__key_size(self.ptr.as_ptr()) }
1029 }
1030
1031 #[inline]
1032 fn value_size(&self) -> u32 {
1033 unsafe { libbpf_sys::bpf_map__value_size(self.ptr.as_ptr()) }
1034 }
1035
1036 #[inline]
1037 fn max_entries(&self) -> u32 {
1038 unsafe { libbpf_sys::bpf_map__max_entries(self.ptr.as_ptr()) }
1039 }
1040}
1041
1042impl AsRawLibbpf for Map<'_> {
1043 type LibbpfType = libbpf_sys::bpf_map;
1044
1045 #[inline]
1047 fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
1048 self.ptr
1049 }
1050}
1051
1052#[derive(Debug)]
1067pub struct MapHandle {
1068 fd: OwnedFd,
1069 name: OsString,
1070 ty: MapType,
1071 key_size: u32,
1072 value_size: u32,
1073 max_entries: u32,
1074}
1075
1076impl MapHandle {
1077 pub fn create<T: AsRef<OsStr>>(
1079 map_type: MapType,
1080 name: Option<T>,
1081 key_size: u32,
1082 value_size: u32,
1083 max_entries: u32,
1084 opts: &libbpf_sys::bpf_map_create_opts,
1085 ) -> Result<Self> {
1086 let name = match name {
1087 Some(name) => name.as_ref().to_os_string(),
1088 None => OsString::new(),
1090 };
1091 let name_c_str = CString::new(name.as_bytes()).map_err(|_| {
1092 Error::with_invalid_data(format!("invalid name `{name:?}`: has NUL bytes"))
1093 })?;
1094 let name_c_ptr = if name.is_empty() {
1095 ptr::null()
1096 } else {
1097 name_c_str.as_bytes_with_nul().as_ptr()
1098 };
1099
1100 let fd = unsafe {
1101 libbpf_sys::bpf_map_create(
1102 map_type.into(),
1103 name_c_ptr.cast(),
1104 key_size,
1105 value_size,
1106 max_entries,
1107 opts,
1108 )
1109 };
1110 let () = util::parse_ret(fd)?;
1111
1112 Ok(Self {
1113 fd: unsafe { OwnedFd::from_raw_fd(fd) },
1117 name,
1118 ty: map_type,
1119 key_size,
1120 value_size,
1121 max_entries,
1122 })
1123 }
1124
1125 pub fn from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self> {
1130 fn inner(path: &Path) -> Result<MapHandle> {
1131 let p = CString::new(path.as_os_str().as_bytes()).expect("path contained null bytes");
1132 let fd = parse_ret_i32(unsafe {
1133 libbpf_sys::bpf_obj_get(p.as_ptr())
1136 })?;
1137 MapHandle::from_fd(unsafe {
1138 OwnedFd::from_raw_fd(fd)
1142 })
1143 }
1144
1145 inner(path.as_ref())
1146 }
1147
1148 pub fn from_map_id(id: u32) -> Result<Self> {
1150 parse_ret_i32(unsafe {
1151 libbpf_sys::bpf_map_get_fd_by_id(id)
1154 })
1155 .map(|fd| unsafe {
1156 OwnedFd::from_raw_fd(fd)
1160 })
1161 .and_then(Self::from_fd)
1162 }
1163
1164 fn from_fd(fd: OwnedFd) -> Result<Self> {
1165 let info = MapInfo::new(fd.as_fd())?;
1166 Ok(Self {
1167 fd,
1168 name: info.name()?.into(),
1169 ty: info.map_type(),
1170 key_size: info.info.key_size,
1171 value_size: info.info.value_size,
1172 max_entries: info.info.max_entries,
1173 })
1174 }
1175
1176 pub fn freeze(&self) -> Result<()> {
1183 let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd.as_raw_fd()) };
1184
1185 util::parse_ret(ret)
1186 }
1187
1188 pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1191 let path_c = util::path_to_cstring(path)?;
1192 let path_ptr = path_c.as_ptr();
1193
1194 let ret = unsafe { libbpf_sys::bpf_obj_pin(self.fd.as_raw_fd(), path_ptr) };
1195 util::parse_ret(ret)
1196 }
1197
1198 pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1201 remove_file(path).context("failed to remove pin map")
1202 }
1203}
1204
1205impl MapCore for MapHandle {
1206 #[inline]
1207 fn name(&self) -> &OsStr {
1208 &self.name
1209 }
1210
1211 #[inline]
1212 fn map_type(&self) -> MapType {
1213 self.ty
1214 }
1215
1216 #[inline]
1217 fn key_size(&self) -> u32 {
1218 self.key_size
1219 }
1220
1221 #[inline]
1222 fn value_size(&self) -> u32 {
1223 self.value_size
1224 }
1225
1226 #[inline]
1227 fn max_entries(&self) -> u32 {
1228 self.max_entries
1229 }
1230}
1231
1232impl AsFd for MapHandle {
1233 #[inline]
1234 fn as_fd(&self) -> BorrowedFd<'_> {
1235 self.fd.as_fd()
1236 }
1237}
1238
1239impl<T> TryFrom<&MapImpl<'_, T>> for MapHandle
1240where
1241 T: Debug,
1242{
1243 type Error = Error;
1244
1245 fn try_from(other: &MapImpl<'_, T>) -> Result<Self> {
1246 Ok(Self {
1247 fd: other
1248 .as_fd()
1249 .try_clone_to_owned()
1250 .context("failed to duplicate map file descriptor")?,
1251 name: other.name().to_os_string(),
1252 ty: other.map_type(),
1253 key_size: other.key_size(),
1254 value_size: other.value_size(),
1255 max_entries: other.max_entries(),
1256 })
1257 }
1258}
1259
1260impl TryFrom<&Self> for MapHandle {
1261 type Error = Error;
1262
1263 fn try_from(other: &Self) -> Result<Self> {
1264 Ok(Self {
1265 fd: other
1266 .as_fd()
1267 .try_clone_to_owned()
1268 .context("failed to duplicate map file descriptor")?,
1269 name: other.name().to_os_string(),
1270 ty: other.map_type(),
1271 key_size: other.key_size(),
1272 value_size: other.value_size(),
1273 max_entries: other.max_entries(),
1274 })
1275 }
1276}
1277
1278bitflags! {
1279 #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
1281 pub struct MapFlags: u64 {
1282 const ANY = libbpf_sys::BPF_ANY as _;
1284 const NO_EXIST = libbpf_sys::BPF_NOEXIST as _;
1286 const EXIST = libbpf_sys::BPF_EXIST as _;
1288 const LOCK = libbpf_sys::BPF_F_LOCK as _;
1290 }
1291}
1292
1293#[non_exhaustive]
1296#[repr(u32)]
1297#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1298pub enum MapType {
1299 Unspec = libbpf_sys::BPF_MAP_TYPE_UNSPEC,
1301 Hash = libbpf_sys::BPF_MAP_TYPE_HASH,
1305 Array = libbpf_sys::BPF_MAP_TYPE_ARRAY,
1309 ProgArray = libbpf_sys::BPF_MAP_TYPE_PROG_ARRAY,
1314 PerfEventArray = libbpf_sys::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1318 PercpuHash = libbpf_sys::BPF_MAP_TYPE_PERCPU_HASH,
1322 PercpuArray = libbpf_sys::BPF_MAP_TYPE_PERCPU_ARRAY,
1326 #[allow(missing_docs)]
1327 StackTrace = libbpf_sys::BPF_MAP_TYPE_STACK_TRACE,
1328 #[allow(missing_docs)]
1329 CgroupArray = libbpf_sys::BPF_MAP_TYPE_CGROUP_ARRAY,
1330 LruHash = libbpf_sys::BPF_MAP_TYPE_LRU_HASH,
1334 LruPercpuHash = libbpf_sys::BPF_MAP_TYPE_LRU_PERCPU_HASH,
1338 LpmTrie = libbpf_sys::BPF_MAP_TYPE_LPM_TRIE,
1342 ArrayOfMaps = libbpf_sys::BPF_MAP_TYPE_ARRAY_OF_MAPS,
1348 HashOfMaps = libbpf_sys::BPF_MAP_TYPE_HASH_OF_MAPS,
1354 Devmap = libbpf_sys::BPF_MAP_TYPE_DEVMAP,
1359 Sockmap = libbpf_sys::BPF_MAP_TYPE_SOCKMAP,
1363 Cpumap = libbpf_sys::BPF_MAP_TYPE_CPUMAP,
1367 Xskmap = libbpf_sys::BPF_MAP_TYPE_XSKMAP,
1373 Sockhash = libbpf_sys::BPF_MAP_TYPE_SOCKHASH,
1377 CgroupStorage = libbpf_sys::BPF_MAP_TYPE_CGROUP_STORAGE,
1383 CGrpStorage = libbpf_sys::BPF_MAP_TYPE_CGRP_STORAGE,
1388 ReuseportSockarray = libbpf_sys::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
1392 PercpuCgroupStorage = libbpf_sys::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
1396 Queue = libbpf_sys::BPF_MAP_TYPE_QUEUE,
1400 Stack = libbpf_sys::BPF_MAP_TYPE_STACK,
1404 SkStorage = libbpf_sys::BPF_MAP_TYPE_SK_STORAGE,
1408 DevmapHash = libbpf_sys::BPF_MAP_TYPE_DEVMAP_HASH,
1413 StructOps = libbpf_sys::BPF_MAP_TYPE_STRUCT_OPS,
1418 RingBuf = libbpf_sys::BPF_MAP_TYPE_RINGBUF,
1422 InodeStorage = libbpf_sys::BPF_MAP_TYPE_INODE_STORAGE,
1426 TaskStorage = libbpf_sys::BPF_MAP_TYPE_TASK_STORAGE,
1430 BloomFilter = libbpf_sys::BPF_MAP_TYPE_BLOOM_FILTER,
1436 #[allow(missing_docs)]
1437 UserRingBuf = libbpf_sys::BPF_MAP_TYPE_USER_RINGBUF,
1438 Unknown = u32::MAX,
1442}
1443
1444impl MapType {
1445 pub fn is_percpu(&self) -> bool {
1447 matches!(
1448 self,
1449 Self::PercpuArray | Self::PercpuHash | Self::LruPercpuHash | Self::PercpuCgroupStorage
1450 )
1451 }
1452
1453 pub fn is_hash_map(&self) -> bool {
1455 matches!(
1456 self,
1457 Self::Hash | Self::PercpuHash | Self::LruHash | Self::LruPercpuHash
1458 )
1459 }
1460
1461 fn is_keyless(&self) -> bool {
1464 matches!(self, Self::Queue | Self::Stack | Self::BloomFilter)
1465 }
1466
1467 pub fn is_bloom_filter(&self) -> bool {
1469 Self::BloomFilter.eq(self)
1470 }
1471
1472 pub fn is_supported(&self) -> Result<bool> {
1477 let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, ptr::null()) };
1478 match ret {
1479 0 => Ok(false),
1480 1 => Ok(true),
1481 _ => Err(Error::from_raw_os_error(-ret)),
1482 }
1483 }
1484}
1485
1486impl From<u32> for MapType {
1487 fn from(value: u32) -> Self {
1488 use MapType::*;
1489
1490 match value {
1491 x if x == Unspec as u32 => Unspec,
1492 x if x == Hash as u32 => Hash,
1493 x if x == Array as u32 => Array,
1494 x if x == ProgArray as u32 => ProgArray,
1495 x if x == PerfEventArray as u32 => PerfEventArray,
1496 x if x == PercpuHash as u32 => PercpuHash,
1497 x if x == PercpuArray as u32 => PercpuArray,
1498 x if x == StackTrace as u32 => StackTrace,
1499 x if x == CgroupArray as u32 => CgroupArray,
1500 x if x == LruHash as u32 => LruHash,
1501 x if x == LruPercpuHash as u32 => LruPercpuHash,
1502 x if x == LpmTrie as u32 => LpmTrie,
1503 x if x == ArrayOfMaps as u32 => ArrayOfMaps,
1504 x if x == HashOfMaps as u32 => HashOfMaps,
1505 x if x == Devmap as u32 => Devmap,
1506 x if x == Sockmap as u32 => Sockmap,
1507 x if x == Cpumap as u32 => Cpumap,
1508 x if x == Xskmap as u32 => Xskmap,
1509 x if x == Sockhash as u32 => Sockhash,
1510 x if x == CgroupStorage as u32 => CgroupStorage,
1511 x if x == ReuseportSockarray as u32 => ReuseportSockarray,
1512 x if x == PercpuCgroupStorage as u32 => PercpuCgroupStorage,
1513 x if x == Queue as u32 => Queue,
1514 x if x == Stack as u32 => Stack,
1515 x if x == SkStorage as u32 => SkStorage,
1516 x if x == DevmapHash as u32 => DevmapHash,
1517 x if x == StructOps as u32 => StructOps,
1518 x if x == RingBuf as u32 => RingBuf,
1519 x if x == InodeStorage as u32 => InodeStorage,
1520 x if x == TaskStorage as u32 => TaskStorage,
1521 x if x == BloomFilter as u32 => BloomFilter,
1522 x if x == UserRingBuf as u32 => UserRingBuf,
1523 _ => Unknown,
1524 }
1525 }
1526}
1527
1528impl From<MapType> for u32 {
1529 fn from(value: MapType) -> Self {
1530 value as Self
1531 }
1532}
1533
1534#[derive(Debug)]
1536pub struct MapKeyIter<'map> {
1537 map_fd: BorrowedFd<'map>,
1538 prev: Option<Vec<u8>>,
1539 next: Vec<u8>,
1540}
1541
1542impl<'map> MapKeyIter<'map> {
1543 fn new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self {
1544 Self {
1545 map_fd,
1546 prev: None,
1547 next: vec![0; key_size as usize],
1548 }
1549 }
1550}
1551
1552impl Iterator for MapKeyIter<'_> {
1553 type Item = Vec<u8>;
1554
1555 fn next(&mut self) -> Option<Self::Item> {
1556 let prev = self.prev.as_ref().map_or(ptr::null(), Vec::as_ptr);
1557
1558 let ret = unsafe {
1559 libbpf_sys::bpf_map_get_next_key(
1560 self.map_fd.as_raw_fd(),
1561 prev as _,
1562 self.next.as_mut_ptr() as _,
1563 )
1564 };
1565 if ret != 0 {
1566 None
1567 } else {
1568 self.prev = Some(self.next.clone());
1569 Some(self.next.clone())
1570 }
1571 }
1572}
1573
1574#[derive(Debug)]
1576pub struct BatchedMapIter<'map> {
1577 map_fd: BorrowedFd<'map>,
1578 delete: bool,
1579 count: usize,
1580 key_size: usize,
1581 value_size: usize,
1582 keys: Vec<u8>,
1583 values: Vec<u8>,
1584 prev: Option<Vec<u8>>,
1585 next: Vec<u8>,
1586 batch_opts: libbpf_sys::bpf_map_batch_opts,
1587 index: Option<usize>,
1588}
1589
1590impl<'map> BatchedMapIter<'map> {
1591 fn new(
1592 map_fd: BorrowedFd<'map>,
1593 count: u32,
1594 key_size: u32,
1595 value_size: u32,
1596 batch_opts: libbpf_sys::bpf_map_batch_opts,
1597 delete: bool,
1598 ) -> Self {
1599 Self {
1600 map_fd,
1601 delete,
1602 count: count as usize,
1603 key_size: key_size as usize,
1604 value_size: value_size as usize,
1605 keys: vec![0; (count * key_size) as usize],
1606 values: vec![0; (count * value_size) as usize],
1607 prev: None,
1608 next: vec![0; key_size as usize],
1609 batch_opts,
1610 index: None,
1611 }
1612 }
1613
1614 fn lookup_next_batch(&mut self) {
1615 let prev = self.prev.as_mut().map_or(ptr::null_mut(), Vec::as_mut_ptr);
1616 let mut count = self.count as u32;
1617
1618 let ret = unsafe {
1619 let lookup_fn = if self.delete {
1620 libbpf_sys::bpf_map_lookup_and_delete_batch
1621 } else {
1622 libbpf_sys::bpf_map_lookup_batch
1623 };
1624 lookup_fn(
1625 self.map_fd.as_raw_fd(),
1626 prev.cast(),
1627 self.next.as_mut_ptr().cast(),
1628 self.keys.as_mut_ptr().cast(),
1629 self.values.as_mut_ptr().cast(),
1630 &mut count,
1631 &self.batch_opts,
1632 )
1633 };
1634
1635 if let Err(e) = util::parse_ret(ret) {
1636 match e.kind() {
1637 error::ErrorKind::NotFound => {}
1639 error::ErrorKind::Interrupted => {
1641 return self.lookup_next_batch();
1642 }
1643 _ => {
1644 self.index = None;
1645 return;
1646 }
1647 }
1648 }
1649
1650 self.prev = Some(self.next.clone());
1651 self.index = Some(0);
1652
1653 unsafe {
1654 self.keys.set_len(self.key_size * count as usize);
1655 self.values.set_len(self.value_size * count as usize);
1656 }
1657 }
1658}
1659
1660impl Iterator for BatchedMapIter<'_> {
1661 type Item = (Vec<u8>, Vec<u8>);
1662
1663 fn next(&mut self) -> Option<Self::Item> {
1664 let load_next_batch = match self.index {
1665 Some(index) => {
1666 let batch_finished = index * self.key_size >= self.keys.len();
1667 let last_batch = self.keys.len() < self.key_size * self.count;
1668 batch_finished && !last_batch
1669 }
1670 None => true,
1671 };
1672
1673 if load_next_batch {
1674 self.lookup_next_batch();
1675 }
1676
1677 let index = self.index?;
1678 let key = self.keys.chunks_exact(self.key_size).nth(index)?.to_vec();
1679 let val = self
1680 .values
1681 .chunks_exact(self.value_size)
1682 .nth(index)?
1683 .to_vec();
1684
1685 self.index = Some(index + 1);
1686 Some((key, val))
1687 }
1688}
1689
1690#[derive(Debug)]
1693pub struct MapInfo {
1694 pub info: bpf_map_info,
1696}
1697
1698impl MapInfo {
1699 pub fn new(fd: BorrowedFd<'_>) -> Result<Self> {
1701 let mut map_info = bpf_map_info::default();
1702 let mut size = mem::size_of_val(&map_info) as u32;
1703 let () = util::parse_ret(unsafe {
1705 bpf_obj_get_info_by_fd(
1706 fd.as_raw_fd(),
1707 &mut map_info as *mut bpf_map_info as *mut c_void,
1708 &mut size as *mut u32,
1709 )
1710 })?;
1711 Ok(Self { info: map_info })
1712 }
1713
1714 #[inline]
1716 pub fn map_type(&self) -> MapType {
1717 MapType::from(self.info.type_)
1718 }
1719
1720 pub fn name<'a>(&self) -> Result<&'a str> {
1725 let char_slice =
1727 unsafe { from_raw_parts(self.info.name[..].as_ptr().cast(), self.info.name.len()) };
1728
1729 util::c_char_slice_to_cstr(char_slice)
1730 .ok_or_else(|| Error::with_invalid_data("no nul byte found"))?
1731 .to_str()
1732 .map_err(Error::with_invalid_data)
1733 }
1734
1735 #[inline]
1737 pub fn flags(&self) -> MapFlags {
1738 MapFlags::from_bits_truncate(self.info.map_flags as u64)
1739 }
1740}
1741
1742#[cfg(test)]
1743mod tests {
1744 use super::*;
1745
1746 use std::mem::discriminant;
1747
1748 #[test]
1749 fn map_type() {
1750 use MapType::*;
1751
1752 for t in [
1753 Unspec,
1754 Hash,
1755 Array,
1756 ProgArray,
1757 PerfEventArray,
1758 PercpuHash,
1759 PercpuArray,
1760 StackTrace,
1761 CgroupArray,
1762 LruHash,
1763 LruPercpuHash,
1764 LpmTrie,
1765 ArrayOfMaps,
1766 HashOfMaps,
1767 Devmap,
1768 Sockmap,
1769 Cpumap,
1770 Xskmap,
1771 Sockhash,
1772 CgroupStorage,
1773 ReuseportSockarray,
1774 PercpuCgroupStorage,
1775 Queue,
1776 Stack,
1777 SkStorage,
1778 DevmapHash,
1779 StructOps,
1780 RingBuf,
1781 InodeStorage,
1782 TaskStorage,
1783 BloomFilter,
1784 UserRingBuf,
1785 Unknown,
1786 ] {
1787 assert_eq!(discriminant(&t), discriminant(&MapType::from(t as u32)));
1789 }
1790 }
1791}