1use std::{
51 borrow::Borrow,
52 ffi::CString,
53 io,
54 marker::PhantomData,
55 ops::Deref,
56 os::fd::{AsFd, BorrowedFd, OwnedFd},
57 path::Path,
58 ptr,
59};
60
61use aya_obj::{EbpfSectionKind, InvalidTypeBinding, generated::bpf_map_type, parse_map_info};
62use thiserror::Error;
63
64use crate::{
65 PinningType, Pod,
66 pin::PinError,
67 sys::{
68 SyscallError, bpf_create_map, bpf_get_object, bpf_map_freeze, bpf_map_get_fd_by_id,
69 bpf_map_get_next_key, bpf_map_update_elem_ptr, bpf_pin_object,
70 },
71 util::nr_cpus,
72};
73
74pub mod array;
75pub mod bloom_filter;
76pub mod hash_map;
77mod info;
78pub mod lpm_trie;
79pub mod of_maps;
80pub mod perf;
81pub mod queue;
82pub mod ring_buf;
83pub mod sk_storage;
84pub mod sock;
85pub mod stack;
86pub mod stack_trace;
87pub mod xdp;
88
89pub use array::{Array, PerCpuArray, ProgramArray};
90pub use bloom_filter::BloomFilter;
91pub use hash_map::{HashMap, PerCpuHashMap};
92pub use info::{MapInfo, MapType, loaded_maps};
93pub use lpm_trie::LpmTrie;
94pub use of_maps::{ArrayOfMaps, HashOfMaps};
95pub use perf::PerfEventArray;
96pub use queue::Queue;
97pub use ring_buf::RingBuf;
98pub use sk_storage::SkStorage;
99pub use sock::{SockHash, SockMap};
100pub use stack::Stack;
101pub use stack_trace::StackTraceMap;
102pub use xdp::{CpuMap, DevMap, DevMapHash, XskMap};
103
104pub trait FromMapData: Sized + sealed::FromMapData {}
108
109impl<T: sealed::FromMapData> FromMapData for T {}
110
111pub trait InnerMap: sealed::InnerMap {}
115
116impl<T: sealed::InnerMap> InnerMap for T {}
117
118pub trait CreatableMap: sealed::CreatableMap {
124 fn create(max_entries: u32, flags: u32) -> Result<Self, MapError> {
126 <Self as sealed::CreatableMap>::create(max_entries, flags)
127 }
128}
129
130impl<T: sealed::CreatableMap> CreatableMap for T {}
131
132mod sealed {
133 use super::{MapData, MapError, MapFd};
134
135 #[expect(unnameable_types, reason = "intentionally unnameable sealed trait")]
136 pub trait FromMapData: Sized {
137 fn from_map_data(map_data: MapData) -> Result<Self, MapError>;
139 }
140
141 #[expect(unnameable_types, reason = "intentionally unnameable sealed trait")]
142 pub trait InnerMap {
143 fn fd(&self) -> &MapFd;
145 }
146
147 #[expect(unnameable_types, reason = "intentionally unnameable sealed trait")]
148 pub trait CreatableMap: Sized {
149 fn create(max_entries: u32, flags: u32) -> Result<Self, MapError>;
150 }
151}
152
153#[derive(Error, Debug)]
154pub enum MapError {
156 #[error(
158 "map `{outer_name}` is a map-of-maps but has no inner map definition; \
159 use #[btf_map] with a BTF-typed map-of-maps that includes an inner map type"
160 )]
161 MissingInnerMapDefinition {
162 outer_name: String,
164 },
165
166 #[error("invalid map type {map_type}")]
168 InvalidMapType {
169 map_type: u32,
171 },
172
173 #[error("invalid map name `{name}`")]
175 InvalidName {
176 name: String,
178 },
179
180 #[error("failed to create map `{name}`")]
182 CreateError {
183 name: String,
185 #[source]
186 io_error: io::Error,
188 },
189
190 #[error("invalid key size {size}, expected {expected}")]
192 InvalidKeySize {
193 size: usize,
195 expected: usize,
197 },
198
199 #[error("invalid value size {size}, expected {expected}")]
201 InvalidValueSize {
202 size: usize,
204 expected: usize,
206 },
207
208 #[error("the index is {index} but `max_entries` is {max_entries}")]
210 OutOfBounds {
211 index: u32,
213 max_entries: u32,
215 },
216
217 #[error("key not found")]
219 KeyNotFound,
220
221 #[error("element not found")]
223 ElementNotFound,
224
225 #[error("the program is not loaded")]
227 ProgramNotLoaded,
228
229 #[error(transparent)]
231 IoError(#[from] io::Error),
232
233 #[error(transparent)]
235 SyscallError(#[from] SyscallError),
236
237 #[error("map `{name:?}` requested pinning. pinning failed")]
239 PinError {
240 name: Option<String>,
242 #[source]
244 error: PinError,
245 },
246
247 #[error("program ids are not supported by the current kernel")]
249 ProgIdNotSupported,
250
251 #[error(
253 "type of {name} ({map_type:?}) is unsupported; see `EbpfLoader::allow_unsupported_maps`"
254 )]
255 Unsupported {
256 name: String,
258 map_type: bpf_map_type,
260 },
261}
262
263impl From<InvalidTypeBinding<u32>> for MapError {
264 fn from(e: InvalidTypeBinding<u32>) -> Self {
265 let InvalidTypeBinding { value } = e;
266 Self::InvalidMapType { map_type: value }
267 }
268}
269
270#[derive(Debug)]
272pub struct MapFd {
273 fd: crate::MockableFd,
274}
275
276impl MapFd {
277 const fn from_fd(fd: crate::MockableFd) -> Self {
278 Self { fd }
279 }
280
281 pub fn try_clone(&self) -> io::Result<Self> {
283 let Self { fd } = self;
284 let fd = fd.try_clone()?;
285 Ok(Self { fd })
286 }
287}
288
289impl AsFd for MapFd {
290 fn as_fd(&self) -> BorrowedFd<'_> {
291 let Self { fd } = self;
292 fd.as_fd()
293 }
294}
295
296#[derive(Debug)]
298pub enum Map {
299 Array(MapData),
301 ArrayOfMaps(MapData),
303 BloomFilter(MapData),
305 CpuMap(MapData),
307 DevMap(MapData),
309 DevMapHash(MapData),
311 HashMap(MapData),
313 HashOfMaps(MapData),
315 LpmTrie(MapData),
317 LruHashMap(MapData),
319 PerCpuArray(MapData),
321 PerCpuHashMap(MapData),
323 PerCpuLruHashMap(MapData),
325 PerfEventArray(MapData),
327 ProgramArray(MapData),
329 Queue(MapData),
331 RingBuf(MapData),
333 SockHash(MapData),
335 SockMap(MapData),
337 SkStorage(MapData),
339 Stack(MapData),
341 StackTraceMap(MapData),
343 Unsupported(MapData),
345 XskMap(MapData),
347}
348
349impl Map {
350 const fn map_type(&self) -> u32 {
352 match self {
353 Self::Array(map) => map.obj.map_type(),
354 Self::ArrayOfMaps(map) => map.obj.map_type(),
355 Self::BloomFilter(map) => map.obj.map_type(),
356 Self::CpuMap(map) => map.obj.map_type(),
357 Self::DevMap(map) => map.obj.map_type(),
358 Self::DevMapHash(map) => map.obj.map_type(),
359 Self::HashMap(map) => map.obj.map_type(),
360 Self::HashOfMaps(map) => map.obj.map_type(),
361 Self::LpmTrie(map) => map.obj.map_type(),
362 Self::LruHashMap(map) => map.obj.map_type(),
363 Self::PerCpuArray(map) => map.obj.map_type(),
364 Self::PerCpuHashMap(map) => map.obj.map_type(),
365 Self::PerCpuLruHashMap(map) => map.obj.map_type(),
366 Self::PerfEventArray(map) => map.obj.map_type(),
367 Self::ProgramArray(map) => map.obj.map_type(),
368 Self::Queue(map) => map.obj.map_type(),
369 Self::RingBuf(map) => map.obj.map_type(),
370 Self::SockHash(map) => map.obj.map_type(),
371 Self::SockMap(map) => map.obj.map_type(),
372 Self::SkStorage(map) => map.obj.map_type(),
373 Self::Stack(map) => map.obj.map_type(),
374 Self::StackTraceMap(map) => map.obj.map_type(),
375 Self::Unsupported(map) => map.obj.map_type(),
376 Self::XskMap(map) => map.obj.map_type(),
377 }
378 }
379
380 pub fn pin<P: AsRef<Path>>(&self, path: P) -> Result<(), PinError> {
385 match self {
386 Self::Array(map) => map.pin(path),
387 Self::ArrayOfMaps(map) => map.pin(path),
388 Self::BloomFilter(map) => map.pin(path),
389 Self::CpuMap(map) => map.pin(path),
390 Self::DevMap(map) => map.pin(path),
391 Self::DevMapHash(map) => map.pin(path),
392 Self::HashMap(map) => map.pin(path),
393 Self::HashOfMaps(map) => map.pin(path),
394 Self::LpmTrie(map) => map.pin(path),
395 Self::LruHashMap(map) => map.pin(path),
396 Self::PerCpuArray(map) => map.pin(path),
397 Self::PerCpuHashMap(map) => map.pin(path),
398 Self::PerCpuLruHashMap(map) => map.pin(path),
399 Self::PerfEventArray(map) => map.pin(path),
400 Self::ProgramArray(map) => map.pin(path),
401 Self::Queue(map) => map.pin(path),
402 Self::RingBuf(map) => map.pin(path),
403 Self::SockHash(map) => map.pin(path),
404 Self::SockMap(map) => map.pin(path),
405 Self::SkStorage(map) => map.pin(path),
406 Self::Stack(map) => map.pin(path),
407 Self::StackTraceMap(map) => map.pin(path),
408 Self::Unsupported(map) => map.pin(path),
409 Self::XskMap(map) => map.pin(path),
410 }
411 }
412
413 pub fn from_map_data(map_data: MapData) -> Result<Self, MapError> {
424 let map_type = map_data.obj.map_type();
425 let map = match bpf_map_type::try_from(map_type)? {
426 bpf_map_type::BPF_MAP_TYPE_HASH => Self::HashMap(map_data),
427 bpf_map_type::BPF_MAP_TYPE_ARRAY => Self::Array(map_data),
428 bpf_map_type::BPF_MAP_TYPE_PROG_ARRAY => Self::ProgramArray(map_data),
429 bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY => Self::PerfEventArray(map_data),
430 bpf_map_type::BPF_MAP_TYPE_PERCPU_HASH => Self::PerCpuHashMap(map_data),
431 bpf_map_type::BPF_MAP_TYPE_PERCPU_ARRAY => Self::PerCpuArray(map_data),
432 bpf_map_type::BPF_MAP_TYPE_STACK_TRACE => Self::StackTraceMap(map_data),
433 bpf_map_type::BPF_MAP_TYPE_LRU_HASH => Self::LruHashMap(map_data),
434 bpf_map_type::BPF_MAP_TYPE_LRU_PERCPU_HASH => Self::PerCpuLruHashMap(map_data),
435 bpf_map_type::BPF_MAP_TYPE_LPM_TRIE => Self::LpmTrie(map_data),
436 bpf_map_type::BPF_MAP_TYPE_DEVMAP => Self::DevMap(map_data),
437 bpf_map_type::BPF_MAP_TYPE_SOCKMAP => Self::SockMap(map_data),
438 bpf_map_type::BPF_MAP_TYPE_CPUMAP => Self::CpuMap(map_data),
439 bpf_map_type::BPF_MAP_TYPE_XSKMAP => Self::XskMap(map_data),
440 bpf_map_type::BPF_MAP_TYPE_SOCKHASH => Self::SockHash(map_data),
441 bpf_map_type::BPF_MAP_TYPE_QUEUE => Self::Queue(map_data),
442 bpf_map_type::BPF_MAP_TYPE_STACK => Self::Stack(map_data),
443 bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH => Self::DevMapHash(map_data),
444 bpf_map_type::BPF_MAP_TYPE_RINGBUF => Self::RingBuf(map_data),
445 bpf_map_type::BPF_MAP_TYPE_BLOOM_FILTER => Self::BloomFilter(map_data),
446 bpf_map_type::BPF_MAP_TYPE_CGROUP_ARRAY => Self::Unsupported(map_data),
447 bpf_map_type::BPF_MAP_TYPE_ARRAY_OF_MAPS => Self::ArrayOfMaps(map_data),
448 bpf_map_type::BPF_MAP_TYPE_HASH_OF_MAPS => Self::HashOfMaps(map_data),
449 bpf_map_type::BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED => Self::Unsupported(map_data),
450 bpf_map_type::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY => Self::Unsupported(map_data),
451 bpf_map_type::BPF_MAP_TYPE_SK_STORAGE => Self::SkStorage(map_data),
452 bpf_map_type::BPF_MAP_TYPE_STRUCT_OPS => Self::Unsupported(map_data),
453 bpf_map_type::BPF_MAP_TYPE_INODE_STORAGE => Self::Unsupported(map_data),
454 bpf_map_type::BPF_MAP_TYPE_TASK_STORAGE => Self::Unsupported(map_data),
455 bpf_map_type::BPF_MAP_TYPE_USER_RINGBUF => Self::Unsupported(map_data),
456 bpf_map_type::BPF_MAP_TYPE_CGRP_STORAGE => Self::Unsupported(map_data),
457 bpf_map_type::BPF_MAP_TYPE_ARENA => Self::Unsupported(map_data),
458 bpf_map_type::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED => {
459 Self::Unsupported(map_data)
460 }
461 bpf_map_type::BPF_MAP_TYPE_UNSPEC => return Err(MapError::InvalidMapType { map_type }),
462 bpf_map_type::__MAX_BPF_MAP_TYPE => return Err(MapError::InvalidMapType { map_type }),
463 };
464 Ok(map)
465 }
466
467 pub const fn fd(&self) -> &MapFd {
469 match self {
470 Self::Array(map) => map.fd(),
471 Self::ArrayOfMaps(map) => map.fd(),
472 Self::BloomFilter(map) => map.fd(),
473 Self::CpuMap(map) => map.fd(),
474 Self::DevMap(map) => map.fd(),
475 Self::DevMapHash(map) => map.fd(),
476 Self::HashMap(map) => map.fd(),
477 Self::HashOfMaps(map) => map.fd(),
478 Self::LpmTrie(map) => map.fd(),
479 Self::LruHashMap(map) => map.fd(),
480 Self::PerCpuArray(map) => map.fd(),
481 Self::PerCpuHashMap(map) => map.fd(),
482 Self::PerCpuLruHashMap(map) => map.fd(),
483 Self::PerfEventArray(map) => map.fd(),
484 Self::ProgramArray(map) => map.fd(),
485 Self::Queue(map) => map.fd(),
486 Self::RingBuf(map) => map.fd(),
487 Self::SockHash(map) => map.fd(),
488 Self::SockMap(map) => map.fd(),
489 Self::SkStorage(map) => map.fd(),
490 Self::Stack(map) => map.fd(),
491 Self::StackTraceMap(map) => map.fd(),
492 Self::Unsupported(map) => map.fd(),
493 Self::XskMap(map) => map.fd(),
494 }
495 }
496}
497
498macro_rules! impl_map_pin {
500 ($ty_param:tt {
501 $($ty:ident),+ $(,)?
502 }) => {
503 $(impl_map_pin!(<$ty_param> $ty);)+
504 };
505 (
506 <($($ty_param:ident),*)>
507 $ty:ident
508 ) => {
509 impl<T: Borrow<MapData>, $($ty_param: Pod),*> $ty<T, $($ty_param),*>
510 {
511 pub fn pin<P: AsRef<Path>>(self, path: P) -> Result<(), PinError> {
516 let data = self.inner.borrow();
517 data.pin(path)
518 }
519 }
520
521 };
522}
523
524impl_map_pin!(() {
525 ProgramArray,
526 SockMap,
527 StackTraceMap,
528 CpuMap,
529 DevMap,
530 DevMapHash,
531 XskMap,
532});
533
534impl_map_pin!((V) {
535 Array,
536 PerCpuArray,
537 SockHash,
538 BloomFilter,
539 Queue,
540 SkStorage,
541 Stack,
542});
543
544impl_map_pin!((K, V) {
545 HashMap,
546 PerCpuHashMap,
547 LpmTrie,
548});
549
550macro_rules! impl_try_from_map {
555 ($ty_param:tt {
561 $($(#[$meta:meta])* $ty:ident $(from $($variant:ident)|+)?),+ $(,)?
562 }) => {
563 $(impl_try_from_map!($(#[$meta])* <$ty_param> $ty $(from $($variant)|+)?);)+
564 };
565 ($(#[$meta:meta])* <$ty_param:tt> $ty:ident) => {
567 impl_try_from_map!($(#[$meta])* <$ty_param> $ty from $ty);
568 };
569 (
571 $(#[$meta:meta])* <($($ty_param:ident),*)> $ty:ident from $($variant:ident)|+
572 ) => {
573 impl_try_from_map!(@impl $(#[$meta])* <'a> ($($ty_param: Pod),*) $ty from $($variant)|+);
574 impl_try_from_map!(@impl $(#[$meta])* <'a mut> ($($ty_param: Pod),*) $ty from $($variant)|+);
575 impl_try_from_map!(@impl $(#[$meta])* <> ($($ty_param: Pod),*) $ty from $($variant)|+);
576 };
577 (@impl
580 $(#[$meta:meta])*
581 <$($l:lifetime $($m:ident)?)?>
582 ($($ty_param:ident $(: $bound:path)?),*)
583 $ty:ident from $($variant:ident)|+
584 ) => {
585 $(#[$meta])*
586 impl<$($l,)? $($ty_param $(: $bound)?),*> TryFrom<$(&$l $($m)?)? Map>
587 for $ty<$(&$l $($m)?)? MapData, $($ty_param),*>
588 {
589 type Error = MapError;
590
591 fn try_from(map: $(&$l $($m)?)? Map) -> Result<Self, Self::Error> {
592 match map {
593 $(Map::$variant(map_data) => Self::new(map_data),)+
594 map => Err(MapError::InvalidMapType {
595 map_type: map.map_type()
596 }),
597 }
598 }
599 }
600 };
601}
602
603impl_try_from_map!(() {
604 CpuMap,
605 DevMap,
606 DevMapHash,
607 PerfEventArray,
608 ProgramArray,
609 RingBuf,
610 SockMap,
611 StackTraceMap,
612 XskMap,
613});
614
615impl_try_from_map!((V) {
616 Array,
617 BloomFilter,
618 PerCpuArray,
619 Queue,
620 SockHash,
621 SkStorage,
622 Stack,
623});
624
625impl_try_from_map!((K, V) {
626 HashMap from HashMap|LruHashMap,
627 LpmTrie,
628 PerCpuHashMap from PerCpuHashMap|PerCpuLruHashMap,
629});
630
631macro_rules! impl_try_from_map_of_maps {
635 ($ty:ident) => {
636 impl_try_from_map_of_maps!($ty <>);
637 };
638 ($ty:ident <$($pre:ident : $pre_bound:path),*>) => {
639 impl_try_from_map!(@impl <'a> ($($pre: $pre_bound,)* V: InnerMap) $ty from $ty);
640 impl_try_from_map!(@impl <'a mut> ($($pre: $pre_bound,)* V: InnerMap) $ty from $ty);
641 impl_try_from_map!(@impl <> ($($pre: $pre_bound,)* V: InnerMap) $ty from $ty);
642 };
643}
644
645impl_try_from_map_of_maps!(ArrayOfMaps);
646impl_try_from_map_of_maps!(HashOfMaps<K: Pod>);
647
648macro_rules! impl_from_map_data {
652 ($ty_param:tt { $($ty:ident),+ $(,)? }) => {
653 $(impl_from_map_data!(<$ty_param> $ty);)+
654 };
655 (<($($ty_param:ident),*)> $ty:ident via $accessor:ident) => {
656 impl<$($ty_param: Pod),*> sealed::FromMapData for $ty<MapData, $($ty_param),*> {
657 fn from_map_data(map_data: MapData) -> Result<Self, MapError> {
658 Self::new(map_data)
659 }
660 }
661 impl<$($ty_param: Pod),*> sealed::InnerMap for $ty<MapData, $($ty_param),*> {
662 fn fd(&self) -> &MapFd {
663 self.$accessor().fd()
664 }
665 }
666 };
667 (<($($ty_param:ident),*)> $ty:ident) => {
668 impl<$($ty_param: Pod),*> sealed::FromMapData for $ty<MapData, $($ty_param),*> {
669 fn from_map_data(map_data: MapData) -> Result<Self, MapError> {
670 Self::new(map_data)
671 }
672 }
673 impl<$($ty_param: Pod),*> sealed::InnerMap for $ty<MapData, $($ty_param),*> {
674 fn fd(&self) -> &MapFd {
675 self.inner.fd()
676 }
677 }
678 };
679}
680
681impl_from_map_data!(() {
685 CpuMap, DevMap, DevMapHash,
686 SockMap, StackTraceMap, XskMap,
687});
688
689impl_from_map_data!(<()> PerfEventArray via map_data);
691impl_from_map_data!(<()> RingBuf via map_data);
692
693impl_from_map_data!((V) {
694 Array, BloomFilter, PerCpuArray,
695 Queue, SockHash, SkStorage, Stack,
696});
697
698impl_from_map_data!((K, V) {
699 HashMap, LpmTrie, PerCpuHashMap,
700});
701
702impl sealed::FromMapData for MapData {
703 fn from_map_data(map_data: MapData) -> Result<Self, MapError> {
704 Ok(map_data)
705 }
706}
707
708impl sealed::InnerMap for MapData {
709 fn fd(&self) -> &MapFd {
710 self.fd()
711 }
712}
713
714impl sealed::InnerMap for MapFd {
715 fn fd(&self) -> &MapFd {
716 self
717 }
718}
719
720macro_rules! impl_creatable_map {
721 ($ty:ident<MapData $(, $p:ident: Pod)*>, $map_type:expr, $key_size:expr, $value_size:expr, $name:expr) => {
722 impl<$($p: Pod),*> sealed::CreatableMap for $ty<MapData, $($p),*> {
723 fn create(max_entries: u32, flags: u32) -> Result<Self, MapError> {
724 let obj = aya_obj::Map::new_from_params(
725 $map_type as u32, $key_size, $value_size, max_entries, flags,
726 );
727 Self::new(MapData::create(obj, $name, None)?)
728 }
729 }
730 };
731}
732
733impl_creatable_map!(Array<MapData, V: Pod>,
734 bpf_map_type::BPF_MAP_TYPE_ARRAY, size_of::<u32>() as u32, size_of::<V>() as u32, "standalone_array");
735impl_creatable_map!(PerCpuArray<MapData, V: Pod>,
736 bpf_map_type::BPF_MAP_TYPE_PERCPU_ARRAY, size_of::<u32>() as u32, size_of::<V>() as u32, "standalone_percpu_array");
737impl_creatable_map!(BloomFilter<MapData, V: Pod>,
738 bpf_map_type::BPF_MAP_TYPE_BLOOM_FILTER, 0, size_of::<V>() as u32, "standalone_bloom_filter");
739impl_creatable_map!(Queue<MapData, V: Pod>,
740 bpf_map_type::BPF_MAP_TYPE_QUEUE, 0, size_of::<V>() as u32, "standalone_queue");
741impl_creatable_map!(Stack<MapData, V: Pod>,
742 bpf_map_type::BPF_MAP_TYPE_STACK, 0, size_of::<V>() as u32, "standalone_stack");
743impl_creatable_map!(HashMap<MapData, K: Pod, V: Pod>,
744 bpf_map_type::BPF_MAP_TYPE_HASH, size_of::<K>() as u32, size_of::<V>() as u32, "standalone_hash");
745impl_creatable_map!(PerCpuHashMap<MapData, K: Pod, V: Pod>,
746 bpf_map_type::BPF_MAP_TYPE_PERCPU_HASH, size_of::<K>() as u32, size_of::<V>() as u32, "standalone_percpu_hash");
747impl_creatable_map!(LpmTrie<MapData, K: Pod, V: Pod>,
748 bpf_map_type::BPF_MAP_TYPE_LPM_TRIE, size_of::<lpm_trie::Key<K>>() as u32, size_of::<V>() as u32, "standalone_lpm_trie");
749
750pub(crate) const fn check_bounds(map: &MapData, index: u32) -> Result<(), MapError> {
751 let max_entries = map.obj.max_entries();
752 if index >= max_entries {
753 Err(MapError::OutOfBounds { index, max_entries })
754 } else {
755 Ok(())
756 }
757}
758
759pub(crate) const fn check_kv_size<K, V>(map: &MapData) -> Result<(), MapError> {
760 let size = size_of::<K>();
761 let expected = map.obj.key_size() as usize;
762 if size != expected {
763 return Err(MapError::InvalidKeySize { size, expected });
764 }
765 let size = size_of::<V>();
766 let expected = map.obj.value_size() as usize;
767 if size != expected {
768 return Err(MapError::InvalidValueSize { size, expected });
769 }
770 Ok(())
771}
772
773pub(crate) const fn check_v_size<V>(map: &MapData) -> Result<(), MapError> {
774 let size = size_of::<V>();
775 let expected = map.obj.value_size() as usize;
776 if size != expected {
777 return Err(MapError::InvalidValueSize { size, expected });
778 }
779 Ok(())
780}
781
782#[derive(Debug)]
786pub struct MapData {
787 obj: aya_obj::Map,
788 fd: MapFd,
789}
790
791impl MapData {
792 pub fn create(
794 obj: aya_obj::Map,
795 name: &str,
796 btf_fd: Option<BorrowedFd<'_>>,
797 ) -> Result<Self, MapError> {
798 Self::create_with_inner_map_fd(obj, name, btf_fd, None)
799 }
800
801 pub(crate) fn create_with_inner_map_fd(
803 mut obj: aya_obj::Map,
804 name: &str,
805 btf_fd: Option<BorrowedFd<'_>>,
806 inner_map_fd: Option<BorrowedFd<'_>>,
807 ) -> Result<Self, MapError> {
808 let c_name = CString::new(name)
809 .map_err(|std::ffi::NulError { .. }| MapError::InvalidName { name: name.into() })?;
810
811 if obj.map_type() == bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32 {
823 let nr_cpus = nr_cpus().map_err(|(_, error)| MapError::IoError(error))? as u32;
824 if obj.max_entries() == 0 || obj.max_entries() > nr_cpus {
825 obj.set_max_entries(nr_cpus);
826 }
827 }
828
829 let fd = bpf_create_map(&c_name, &obj, btf_fd, inner_map_fd).map_err(|io_error| {
830 MapError::CreateError {
831 name: name.into(),
832 io_error,
833 }
834 })?;
835 Ok(Self {
836 obj,
837 fd: MapFd::from_fd(fd),
838 })
839 }
840
841 pub(crate) fn create_pinned_by_name<P: AsRef<Path>>(
842 path: P,
843 obj: aya_obj::Map,
844 name: &str,
845 btf_fd: Option<BorrowedFd<'_>>,
846 inner_map_obj: Option<aya_obj::Map>,
847 ) -> Result<Self, MapError> {
848 use std::os::unix::ffi::OsStrExt as _;
849
850 let path = path.as_ref();
852 let path_string = match CString::new(path.as_os_str().as_bytes()) {
853 Ok(path) => path,
854 Err(error) => {
855 return Err(MapError::PinError {
856 name: Some(name.into()),
857 error: PinError::InvalidPinPath {
858 path: path.to_path_buf(),
859 error,
860 },
861 });
862 }
863 };
864 if let Ok(fd) = bpf_get_object(&path_string) {
865 Ok(Self {
866 obj,
867 fd: MapFd::from_fd(fd),
868 })
869 } else {
870 let inner_map;
871 let inner_map_fd = if let Some(inner) = inner_map_obj {
872 inner_map = Self::create(inner, &format!("{name}.inner"), btf_fd)?;
873 Some(inner_map.fd().as_fd())
874 } else {
875 None
876 };
877 let map = Self::create_with_inner_map_fd(obj, name, btf_fd, inner_map_fd)?;
878 map.pin(path).map_err(|error| MapError::PinError {
879 name: Some(name.into()),
880 error,
881 })?;
882 Ok(map)
883 }
884 }
885
886 pub(crate) fn finalize(&mut self) -> Result<(), MapError> {
887 let Self { obj, fd } = self;
888 if !obj.data().is_empty() {
889 bpf_map_update_elem_ptr(fd.as_fd(), &0, obj.data_mut().as_mut_ptr(), 0)
890 .map_err(|io_error| SyscallError {
891 call: "bpf_map_update_elem",
892 io_error,
893 })
894 .map_err(MapError::from)?;
895 }
896 if obj.section_kind() == EbpfSectionKind::Rodata {
897 bpf_map_freeze(fd.as_fd())
898 .map_err(|io_error| SyscallError {
899 call: "bpf_map_freeze",
900 io_error,
901 })
902 .map_err(MapError::from)?;
903 }
904 Ok(())
905 }
906
907 pub fn from_pin<P: AsRef<Path>>(path: P) -> Result<Self, MapError> {
909 use std::os::unix::ffi::OsStrExt as _;
910
911 let path = path.as_ref();
912 let path_string =
913 CString::new(path.as_os_str().as_bytes()).map_err(|error| MapError::PinError {
914 name: None,
915 error: PinError::InvalidPinPath {
916 path: path.into(),
917 error,
918 },
919 })?;
920
921 let fd = bpf_get_object(&path_string).map_err(|io_error| SyscallError {
922 call: "BPF_OBJ_GET",
923 io_error,
924 })?;
925
926 Self::from_fd_inner(fd)
927 }
928
929 pub fn from_id(id: u32) -> Result<Self, MapError> {
931 let fd = bpf_map_get_fd_by_id(id)?;
932 Self::from_fd_inner(fd)
933 }
934
935 fn from_fd_inner(fd: crate::MockableFd) -> Result<Self, MapError> {
936 let MapInfo(info) = MapInfo::new_from_fd(fd.as_fd())?;
937 Ok(Self {
938 obj: parse_map_info(info, PinningType::None),
939 fd: MapFd::from_fd(fd),
940 })
941 }
942
943 pub fn from_fd(fd: OwnedFd) -> Result<Self, MapError> {
949 let fd = crate::MockableFd::from_fd(fd);
950 Self::from_fd_inner(fd)
951 }
952
953 pub fn pin<P: AsRef<Path>>(&self, path: P) -> Result<(), PinError> {
978 use std::os::unix::ffi::OsStrExt as _;
979
980 let Self { fd, obj: _ } = self;
981 let path = path.as_ref();
982 let path_string = CString::new(path.as_os_str().as_bytes()).map_err(|error| {
983 PinError::InvalidPinPath {
984 path: path.to_path_buf(),
985 error,
986 }
987 })?;
988 bpf_pin_object(fd.as_fd(), &path_string).map_err(|io_error| SyscallError {
989 call: "BPF_OBJ_PIN",
990 io_error,
991 })?;
992 Ok(())
993 }
994
995 pub const fn fd(&self) -> &MapFd {
997 let Self { obj: _, fd } = self;
998 fd
999 }
1000
1001 pub(crate) const fn obj(&self) -> &aya_obj::Map {
1002 let Self { obj, fd: _ } = self;
1003 obj
1004 }
1005
1006 pub fn info(&self) -> Result<MapInfo, MapError> {
1008 MapInfo::new_from_fd(self.fd.as_fd())
1009 }
1010}
1011
1012pub trait IterableMap<K: Pod, V> {
1014 fn map(&self) -> &MapData;
1016
1017 fn get(&self, key: &K) -> Result<V, MapError>;
1019}
1020
1021pub struct MapKeys<'coll, K: Pod> {
1023 map: &'coll MapData,
1024 err: bool,
1025 key: Option<K>,
1026}
1027
1028impl<'coll, K: Pod> MapKeys<'coll, K> {
1029 const fn new(map: &'coll MapData) -> Self {
1030 Self {
1031 map,
1032 err: false,
1033 key: None,
1034 }
1035 }
1036}
1037
1038impl<K: Pod> Iterator for MapKeys<'_, K> {
1039 type Item = Result<K, MapError>;
1040
1041 fn next(&mut self) -> Option<Result<K, MapError>> {
1042 if self.err {
1043 return None;
1044 }
1045
1046 let fd = self.map.fd().as_fd();
1047 let key = bpf_map_get_next_key(fd, self.key.as_ref()).map_err(|io_error| SyscallError {
1048 call: "bpf_map_get_next_key",
1049 io_error,
1050 });
1051 match key {
1052 Err(err) => {
1053 self.err = true;
1054 Some(Err(err.into()))
1055 }
1056 Ok(key) => {
1057 self.key = key;
1058 key.map(Ok)
1059 }
1060 }
1061 }
1062}
1063
1064pub struct MapIter<'coll, K: Pod, V, I: IterableMap<K, V>> {
1066 keys: MapKeys<'coll, K>,
1067 map: &'coll I,
1068 _v: PhantomData<V>,
1069}
1070
1071impl<'coll, K: Pod, V, I: IterableMap<K, V>> MapIter<'coll, K, V, I> {
1072 fn new(map: &'coll I) -> Self {
1073 Self {
1074 keys: MapKeys::new(map.map()),
1075 map,
1076 _v: PhantomData,
1077 }
1078 }
1079}
1080
1081impl<K: Pod, V, I: IterableMap<K, V>> Iterator for MapIter<'_, K, V, I> {
1082 type Item = Result<(K, V), MapError>;
1083
1084 fn next(&mut self) -> Option<Self::Item> {
1085 loop {
1086 match self.keys.next() {
1087 Some(Ok(key)) => match self.map.get(&key) {
1088 Ok(value) => return Some(Ok((key, value))),
1089 Err(MapError::KeyNotFound) => {}
1090 Err(e) => return Some(Err(e)),
1091 },
1092 Some(Err(e)) => return Some(Err(e)),
1093 None => return None,
1094 }
1095 }
1096 }
1097}
1098
1099pub(crate) struct PerCpuKernelMem {
1100 bytes: Vec<u8>,
1101}
1102
1103impl PerCpuKernelMem {
1104 pub(crate) const fn as_mut_ptr(&mut self) -> *mut u8 {
1105 self.bytes.as_mut_ptr()
1106 }
1107}
1108
1109#[derive(Debug)]
1134pub struct PerCpuValues<T: Pod> {
1135 values: Box<[T]>,
1136}
1137
1138impl<T: Pod> TryFrom<Vec<T>> for PerCpuValues<T> {
1139 type Error = io::Error;
1140
1141 fn try_from(values: Vec<T>) -> Result<Self, Self::Error> {
1142 let nr_cpus = nr_cpus().map_err(|(_, error)| error)?;
1143 if values.len() != nr_cpus {
1144 return Err(io::Error::new(
1145 io::ErrorKind::InvalidInput,
1146 format!("not enough values ({}), nr_cpus: {}", values.len(), nr_cpus),
1147 ));
1148 }
1149 Ok(Self {
1150 values: values.into_boxed_slice(),
1151 })
1152 }
1153}
1154
1155impl<T: Pod> PerCpuValues<T> {
1156 pub(crate) fn alloc_kernel_mem() -> Result<PerCpuKernelMem, io::Error> {
1157 let value_size = size_of::<T>().next_multiple_of(8);
1158 let nr_cpus = nr_cpus().map_err(|(_, error)| error)?;
1159 Ok(PerCpuKernelMem {
1160 bytes: vec![0u8; nr_cpus * value_size],
1161 })
1162 }
1163
1164 pub(crate) unsafe fn from_kernel_mem(mem: PerCpuKernelMem) -> Self {
1165 let stride = size_of::<T>().next_multiple_of(8);
1166 let mut values = Vec::new();
1167 let mut offset = 0;
1168 while offset < mem.bytes.len() {
1169 values.push(unsafe { ptr::read_unaligned(mem.bytes.as_ptr().add(offset).cast()) });
1170 offset += stride;
1171 }
1172
1173 Self {
1174 values: values.into_boxed_slice(),
1175 }
1176 }
1177
1178 pub(crate) fn build_kernel_mem(&self) -> Result<PerCpuKernelMem, io::Error> {
1179 let mut mem = Self::alloc_kernel_mem()?;
1180 let mem_ptr = mem.as_mut_ptr();
1181 let value_size = size_of::<T>().next_multiple_of(8);
1182 for (i, value) in self.values.iter().enumerate() {
1183 unsafe { ptr::write_unaligned(mem_ptr.byte_add(i * value_size).cast(), *value) }
1184 }
1185
1186 Ok(mem)
1187 }
1188}
1189
1190impl<T: Pod> Deref for PerCpuValues<T> {
1191 type Target = Box<[T]>;
1192
1193 fn deref(&self) -> &Self::Target {
1194 &self.values
1195 }
1196}
1197
1198#[cfg(test)]
1199mod test_utils {
1200 use aya_obj::{
1201 EbpfSectionKind,
1202 generated::{bpf_cmd, bpf_map_type},
1203 maps::LegacyMap,
1204 };
1205
1206 use crate::{
1207 bpf_map_def,
1208 maps::MapData,
1209 sys::{Syscall, override_syscall},
1210 };
1211
1212 pub(super) fn new_map(obj: aya_obj::Map) -> MapData {
1213 override_syscall(|call| match call {
1214 Syscall::Ebpf {
1215 cmd: bpf_cmd::BPF_MAP_CREATE,
1216 ..
1217 } => Ok(crate::MockableFd::mock_signed_fd().into()),
1218 call => panic!("unexpected syscall {call:?}"),
1219 });
1220 MapData::create(obj, "foo", None).unwrap()
1221 }
1222
1223 pub(super) fn new_obj_map<K>(map_type: bpf_map_type) -> aya_obj::Map {
1224 aya_obj::Map::Legacy(LegacyMap {
1225 def: bpf_map_def {
1226 map_type: map_type as u32,
1227 key_size: size_of::<K>() as u32,
1228 value_size: 4,
1229 max_entries: 1024,
1230 ..Default::default()
1231 },
1232 inner_def: None,
1233 section_index: 0,
1234 section_kind: EbpfSectionKind::Maps,
1235 data: Vec::new(),
1236 symbol_index: None,
1237 })
1238 }
1239
1240 pub(super) fn new_obj_map_with_max_entries<K>(
1241 map_type: bpf_map_type,
1242 max_entries: u32,
1243 ) -> aya_obj::Map {
1244 aya_obj::Map::Legacy(LegacyMap {
1245 def: bpf_map_def {
1246 map_type: map_type as u32,
1247 key_size: size_of::<K>() as u32,
1248 value_size: 4,
1249 max_entries,
1250 ..Default::default()
1251 },
1252 inner_def: None,
1253 section_index: 0,
1254 section_kind: EbpfSectionKind::Maps,
1255 data: Vec::new(),
1256 symbol_index: None,
1257 })
1258 }
1259}
1260
1261#[cfg(test)]
1262mod tests {
1263 use std::{ffi::c_char, os::fd::AsRawFd as _};
1264
1265 use assert_matches::assert_matches;
1266 use aya_obj::generated::{bpf_cmd, bpf_map_info};
1267 use libc::EFAULT;
1268
1269 use super::*;
1270 use crate::sys::{Syscall, override_syscall};
1271
1272 fn new_obj_map() -> aya_obj::Map {
1273 test_utils::new_obj_map::<u32>(bpf_map_type::BPF_MAP_TYPE_HASH)
1274 }
1275
1276 #[test]
1277 fn test_from_map_id() {
1278 override_syscall(|call| match call {
1279 Syscall::Ebpf {
1280 cmd: bpf_cmd::BPF_MAP_GET_FD_BY_ID,
1281 attr,
1282 } => {
1283 assert_eq!(
1284 unsafe { attr.__bindgen_anon_6.__bindgen_anon_1.map_id },
1285 1234
1286 );
1287 Ok(crate::MockableFd::mock_signed_fd().into())
1288 }
1289 Syscall::Ebpf {
1290 cmd: bpf_cmd::BPF_OBJ_GET_INFO_BY_FD,
1291 attr,
1292 } => {
1293 assert_eq!(
1294 unsafe { attr.info.bpf_fd },
1295 crate::MockableFd::mock_unsigned_fd(),
1296 );
1297 Ok(0)
1298 }
1299 _ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
1300 });
1301
1302 assert_matches!(
1303 MapData::from_id(1234),
1304 Ok(MapData {
1305 obj: _,
1306 fd,
1307 }) => assert_eq!(fd.as_fd().as_raw_fd(), crate::MockableFd::mock_signed_fd())
1308 );
1309 }
1310
1311 #[test]
1312 fn test_create() {
1313 override_syscall(|call| match call {
1314 Syscall::Ebpf {
1315 cmd: bpf_cmd::BPF_MAP_CREATE,
1316 ..
1317 } => Ok(crate::MockableFd::mock_signed_fd().into()),
1318 _ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
1319 });
1320
1321 assert_matches!(
1322 MapData::create(new_obj_map(), "foo", None),
1323 Ok(MapData {
1324 obj: _,
1325 fd,
1326 }) => assert_eq!(fd.as_fd().as_raw_fd(), crate::MockableFd::mock_signed_fd())
1327 );
1328 }
1329
1330 #[test]
1331 fn test_create_perf_event_array() {
1332 override_syscall(|call| match call {
1333 Syscall::Ebpf {
1334 cmd: bpf_cmd::BPF_MAP_CREATE,
1335 ..
1336 } => Ok(crate::MockableFd::mock_signed_fd().into()),
1337 _ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
1338 });
1339
1340 let nr_cpus = nr_cpus().unwrap();
1341
1342 assert_matches!(
1344 MapData::create(test_utils::new_obj_map_with_max_entries::<u32>(
1345 bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1346 65535,
1347 ), "foo", None),
1348 Ok(MapData {
1349 obj,
1350 fd,
1351 }) => {
1352 assert_eq!(fd.as_fd().as_raw_fd(), crate::MockableFd::mock_signed_fd());
1353 assert_eq!(obj.max_entries(), nr_cpus as u32)
1354 }
1355 );
1356
1357 assert_matches!(
1359 MapData::create(test_utils::new_obj_map_with_max_entries::<u32>(
1360 bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1361 0,
1362 ), "foo", None),
1363 Ok(MapData {
1364 obj,
1365 fd,
1366 }) => {
1367 assert_eq!(fd.as_fd().as_raw_fd(), crate::MockableFd::mock_signed_fd());
1368 assert_eq!(obj.max_entries(), nr_cpus as u32)
1369 }
1370 );
1371
1372 assert_matches!(
1374 MapData::create(test_utils::new_obj_map_with_max_entries::<u32>(
1375 bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1376 1,
1377 ), "foo", None),
1378 Ok(MapData {
1379 obj,
1380 fd,
1381 }) => {
1382 assert_eq!(fd.as_fd().as_raw_fd(), crate::MockableFd::mock_signed_fd());
1383 assert_eq!(obj.max_entries(), 1)
1384 }
1385 );
1386 }
1387
1388 #[test]
1389 fn test_name() {
1390 const TEST_NAME: &str = "foo";
1391
1392 override_syscall(|call| match call {
1393 Syscall::Ebpf {
1394 cmd: bpf_cmd::BPF_MAP_CREATE,
1395 ..
1396 } => Ok(crate::MockableFd::mock_signed_fd().into()),
1397 Syscall::Ebpf {
1398 cmd: bpf_cmd::BPF_OBJ_GET_INFO_BY_FD,
1399 attr,
1400 } => {
1401 assert_eq!(
1402 unsafe { attr.info.info_len },
1403 size_of::<bpf_map_info>() as u32
1404 );
1405 unsafe {
1406 let name_bytes = std::mem::transmute::<&[u8], &[c_char]>(TEST_NAME.as_bytes());
1407 let map_info = attr.info.info as *mut bpf_map_info;
1408 map_info.write({
1409 let mut map_info = map_info.read();
1410 map_info.name[..name_bytes.len()].copy_from_slice(name_bytes);
1411 map_info
1412 })
1413 }
1414 Ok(0)
1415 }
1416 _ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
1417 });
1418
1419 let map_data = MapData::create(new_obj_map(), TEST_NAME, None).unwrap();
1420 assert_eq!(TEST_NAME, map_data.info().unwrap().name_as_str().unwrap());
1421 }
1422
1423 #[test]
1424 fn test_loaded_maps() {
1425 override_syscall(|call| match call {
1426 Syscall::Ebpf {
1427 cmd: bpf_cmd::BPF_MAP_GET_NEXT_ID,
1428 attr,
1429 } => unsafe {
1430 let id = attr.__bindgen_anon_6.__bindgen_anon_1.start_id;
1431 if id < 5 {
1432 attr.__bindgen_anon_6.next_id = id + 1;
1433 Ok(0)
1434 } else {
1435 Err((-1, io::Error::from_raw_os_error(libc::ENOENT)))
1436 }
1437 },
1438 Syscall::Ebpf {
1439 cmd: bpf_cmd::BPF_MAP_GET_FD_BY_ID,
1440 attr,
1441 } => Ok((unsafe { attr.__bindgen_anon_6.__bindgen_anon_1.map_id }
1442 + crate::MockableFd::mock_unsigned_fd())
1443 .into()),
1444 Syscall::Ebpf {
1445 cmd: bpf_cmd::BPF_OBJ_GET_INFO_BY_FD,
1446 attr,
1447 } => {
1448 unsafe {
1449 let info = attr.info;
1450 let map_info = info.info as *mut bpf_map_info;
1451 map_info.write({
1452 let mut map_info = map_info.read();
1453 map_info.id = info.bpf_fd - crate::MockableFd::mock_unsigned_fd();
1454 map_info.key_size = 32;
1455 map_info.value_size = 64;
1456 map_info.map_flags = 1234;
1457 map_info.max_entries = 99;
1458 map_info
1459 });
1460 }
1461 Ok(0)
1462 }
1463 _ => Err((-1, io::Error::from_raw_os_error(EFAULT))),
1464 });
1465
1466 assert_eq!(
1467 loaded_maps()
1468 .map(|map_info| {
1469 let map_info = map_info.unwrap();
1470 (
1471 map_info.id(),
1472 map_info.key_size(),
1473 map_info.value_size(),
1474 map_info.map_flags(),
1475 map_info.max_entries(),
1476 map_info.fd().unwrap().as_fd().as_raw_fd(),
1477 )
1478 })
1479 .collect::<Vec<_>>(),
1480 (1..6)
1481 .map(|i: u8| (
1482 i.into(),
1483 32,
1484 64,
1485 1234,
1486 99,
1487 crate::MockableFd::mock_signed_fd() + i32::from(i)
1488 ))
1489 .collect::<Vec<_>>(),
1490 );
1491 }
1492
1493 #[test]
1494 fn test_create_failed() {
1495 override_syscall(|_| Err((-1, io::Error::from_raw_os_error(EFAULT))));
1496
1497 assert_matches!(
1498 MapData::create(new_obj_map(), "foo", None),
1499 Err(MapError::CreateError { name, io_error }) => {
1500 assert_eq!(name, "foo");
1501 assert_eq!(io_error.raw_os_error(), Some(EFAULT));
1502 }
1503 );
1504 }
1505}