1pub(crate) mod ringbuf;
4use alloc::{collections::btree_map::BTreeMap, string::String, vec::Vec};
5use core::{
6 ffi::{c_char, c_int, c_void},
7 fmt::Write,
8};
9
10use consts::BPF_F_CURRENT_CPU;
11
12use crate::{
13 BpfError, BpfResult as Result, KernelAuxiliaryOps,
14 map::{BpfCallBackFn, UnifiedMap},
15};
16
17pub mod consts;
18
19pub type RawBPFHelperFn = fn(u64, u64, u64, u64, u64) -> u64;
21
22macro_rules! helper_func {
24 ($name:ident::<$($generic:ident),*>) => {
25 unsafe {
26 core::mem::transmute::<usize, RawBPFHelperFn>($name::<$($generic),*> as *const () as usize)
27 }
28 };
29 ($name:ident) => {
30 unsafe {
31 core::mem::transmute::<usize, RawBPFHelperFn>($name as *const () as usize)
32 }
33 };
34}
35
36use printf_compat::{format, output};
37
38pub unsafe extern "C" fn printf(w: &mut impl Write, str: *const c_char, args: ...) -> c_int {
43 let bytes_written = unsafe { format(str as _, args, output::fmt_write(w)) };
44 bytes_written + 1
45}
46
47fn extract_format_specifiers(format_str: &str) -> usize {
48 let mut fmt_arg_count = 0;
50 let chars: Vec<char> = format_str.chars().collect();
51 let mut i = 0;
52
53 while i < chars.len() {
54 if chars[i] == '%' {
55 if i + 1 < chars.len() && chars[i + 1] == '%' {
56 i += 2;
58 } else {
59 let start = i;
60 i += 1;
61
62 while i < chars.len() && "-+#0 .0123456789lhL*".contains(chars[i]) {
64 i += 1;
65 }
66
67 if i < chars.len() && "cdieEfFgGosuxXpn".contains(chars[i]) {
69 i += 1;
70 let _spec: String = chars[start..i].iter().collect();
71 fmt_arg_count += 1; }
74 }
75 } else {
76 i += 1;
77 }
78 }
79
80 fmt_arg_count
81}
82
83pub fn trace_printf<F: KernelAuxiliaryOps>(
89 fmt_ptr: u64,
90 fmt_len: u64,
91 arg3: u64,
92 arg4: u64,
93 arg5: u64,
94) -> i64 {
95 struct FakeWriter<F: KernelAuxiliaryOps> {
96 _phantom: core::marker::PhantomData<F>,
97 }
98 impl<F: KernelAuxiliaryOps> FakeWriter<F> {
99 fn default() -> Self {
100 FakeWriter {
101 _phantom: core::marker::PhantomData,
102 }
103 }
104 }
105 impl<F: KernelAuxiliaryOps> Write for FakeWriter<F> {
106 fn write_str(&mut self, s: &str) -> core::fmt::Result {
107 F::ebpf_write_str(s).map_err(|_| core::fmt::Error)?;
108 Ok(())
109 }
110 }
111
112 let fmt_str = unsafe {
113 core::str::from_utf8_unchecked(core::slice::from_raw_parts(
114 fmt_ptr as *const u8,
115 fmt_len as usize,
116 ))
117 };
118 let fmt_arg_count = extract_format_specifiers(fmt_str);
119
120 let (arg3, arg4, arg5) = match fmt_arg_count {
121 0 => (0, 0, 0),
122 1 => (unsafe { (arg3 as *const u64).read() }, 0, 0),
123 2 => (
124 unsafe { (arg3 as *const u64).read() },
125 unsafe { (arg4 as *const u64).read() },
126 0,
127 ),
128 3 => (
129 unsafe { (arg3 as *const u64).read() },
130 unsafe { (arg4 as *const u64).read() },
131 unsafe { (arg5 as *const u64).read() },
132 ),
133 _ => {
134 log::error!("trace_printf: too many arguments, only 3 are supported");
135 return -1;
136 }
137 };
138
139 let mut fmt = FakeWriter::<F>::default();
140 unsafe { printf(&mut fmt, fmt_ptr as _, arg3, arg4, arg5) as _ }
141}
142
143pub fn raw_map_lookup_elem<F: KernelAuxiliaryOps>(
145 map: *mut c_void,
146 key: *const c_void,
147) -> *const c_void {
148 let res = F::get_unified_map_from_ptr(map as *const u8, |unified_map| {
149 let meta = unified_map.map_meta();
150 let key_size = meta.key_size as usize;
151 let key = unsafe { core::slice::from_raw_parts(key as *const u8, key_size) };
152 let value = map_lookup_elem(unified_map, key)?;
153 Ok(value)
154 });
155 match res {
156 Ok(Some(value)) => value as _,
157 _ => core::ptr::null(),
158 }
159}
160
161pub fn map_lookup_elem(unified_map: &mut UnifiedMap, key: &[u8]) -> Result<Option<*const u8>> {
163 let map = unified_map.map_mut();
164 let value = map.lookup_elem(key);
165 match value {
166 Ok(Some(value)) => Ok(Some(value.as_ptr())),
167 _ => Ok(None),
168 }
169}
170
171pub fn raw_perf_event_output<F: KernelAuxiliaryOps>(
175 ctx: *mut c_void,
176 map: *mut c_void,
177 flags: u64,
178 data: *mut c_void,
179 size: u64,
180) -> i64 {
181 let res = F::get_unified_map_from_ptr(map as *const u8, |unified_map| {
182 let data = unsafe { core::slice::from_raw_parts(data as *const u8, size as usize) };
183 perf_event_output::<F>(ctx, unified_map, flags, data)
184 });
185
186 match res {
187 Ok(_) => 0,
188 Err(e) => e as _,
189 }
190}
191
192pub fn perf_event_output<F: KernelAuxiliaryOps>(
194 ctx: *mut c_void,
195 unified_map: &mut UnifiedMap,
196 flags: u64,
197 data: &[u8],
198) -> Result<()> {
199 let index = flags as u32;
200 let flags = (flags >> 32) as u32;
201 let key = if index == BPF_F_CURRENT_CPU as u32 {
202 F::current_cpu_id()
203 } else {
204 index
205 };
206 let map = unified_map.map_mut();
207 let fd = map
208 .lookup_elem(&key.to_ne_bytes())?
209 .ok_or(BpfError::ENOENT)?;
210 let fd = u32::from_ne_bytes(fd.try_into().map_err(|_| BpfError::EINVAL)?);
211 F::perf_event_output(ctx, fd, flags, data)?;
212 Ok(())
213}
214
215fn raw_bpf_probe_read(dst: *mut c_void, size: u32, unsafe_ptr: *const c_void) -> i64 {
217 let (dst, src) = unsafe {
218 let dst = core::slice::from_raw_parts_mut(dst as *mut u8, size as usize);
219 let src = core::slice::from_raw_parts(unsafe_ptr as *const u8, size as usize);
220 (dst, src)
221 };
222 let res = bpf_probe_read(dst, src);
223 match res {
224 Ok(_) => 0,
225 Err(e) => e as _,
226 }
227}
228
229pub fn bpf_probe_read(dst: &mut [u8], src: &[u8]) -> Result<()> {
233 dst.copy_from_slice(src);
234 Ok(())
235}
236
237pub fn raw_map_update_elem<F: KernelAuxiliaryOps>(
241 map: *mut c_void,
242 key: *const c_void,
243 value: *const c_void,
244 flags: u64,
245) -> i64 {
246 let res = F::get_unified_map_from_ptr(map as *const u8, |unified_map| {
247 let meta = unified_map.map_meta();
248 let key_size = meta.key_size as usize;
249 let value_size = meta.value_size as usize;
250 let key = unsafe { core::slice::from_raw_parts(key as *const u8, key_size) };
251 let value = unsafe { core::slice::from_raw_parts(value as *const u8, value_size) };
252 map_update_elem(unified_map, key, value, flags)
253 });
254 match res {
255 Ok(_) => 0,
256 Err(e) => e as _,
257 }
258}
259
260pub fn map_update_elem(
262 unified_map: &mut UnifiedMap,
263 key: &[u8],
264 value: &[u8],
265 flags: u64,
266) -> Result<()> {
267 let map = unified_map.map_mut();
268
269 map.update_elem(key, value, flags)
270}
271
272pub fn raw_map_delete_elem<F: KernelAuxiliaryOps>(map: *mut c_void, key: *const c_void) -> i64 {
276 let res = F::get_unified_map_from_ptr(map as *const u8, |unified_map| {
277 let meta = unified_map.map_meta();
278 let key_size = meta.key_size as usize;
279 let key = unsafe { core::slice::from_raw_parts(key as *const u8, key_size) };
280 map_delete_elem(unified_map, key)
281 });
282 match res {
283 Ok(_) => 0,
284 Err(e) => e as _,
285 }
286}
287
288pub fn map_delete_elem(unified_map: &mut UnifiedMap, key: &[u8]) -> Result<()> {
290 let map = unified_map.map_mut();
291
292 map.delete_elem(key)
293}
294
295pub fn raw_map_for_each_elem<F: KernelAuxiliaryOps>(
312 map: *mut c_void,
313 cb: *const c_void,
314 ctx: *const c_void,
315 flags: u64,
316) -> i64 {
317 if cb.is_null() {
318 return BpfError::EINVAL as _;
319 }
320 let res = F::get_unified_map_from_ptr(map as *const u8, |unified_map| {
321 let cb = unsafe { *(cb as *const BpfCallBackFn) };
322 map_for_each_elem(unified_map, cb, ctx as _, flags)
323 });
324 match res {
325 Ok(v) => v as i64,
326 Err(e) => e as _,
327 }
328}
329
330pub fn map_for_each_elem(
332 unified_map: &mut UnifiedMap,
333 cb: BpfCallBackFn,
334 ctx: *const u8,
335 flags: u64,
336) -> Result<u32> {
337 let map = unified_map.map_mut();
338
339 map.for_each_elem(cb, ctx, flags)
340}
341
342pub fn raw_map_lookup_percpu_elem<F: KernelAuxiliaryOps>(
346 map: *mut c_void,
347 key: *const c_void,
348 cpu: u32,
349) -> *const c_void {
350 let res = F::get_unified_map_from_ptr(map as *const u8, |unified_map| {
351 let meta = unified_map.map_meta();
352 let key_size = meta.key_size as usize;
353 let key = unsafe { core::slice::from_raw_parts(key as *const u8, key_size) };
354 map_lookup_percpu_elem(unified_map, key, cpu)
355 });
356 match res {
357 Ok(Some(value)) => value as *const c_void,
358 _ => core::ptr::null_mut(),
359 }
360}
361
362pub fn map_lookup_percpu_elem(
364 unified_map: &mut UnifiedMap,
365 key: &[u8],
366 cpu: u32,
367) -> Result<Option<*const u8>> {
368 let map = unified_map.map_mut();
369 let value = map.lookup_percpu_elem(key, cpu);
370 match value {
371 Ok(Some(value)) => Ok(Some(value.as_ptr())),
372 _ => Ok(None),
373 }
374}
375pub fn raw_map_push_elem<F: KernelAuxiliaryOps>(
379 map: *mut c_void,
380 value: *const c_void,
381 flags: u64,
382) -> i64 {
383 let res = F::get_unified_map_from_ptr(map as *const u8, |unified_map| {
384 let meta = unified_map.map_meta();
385 let value_size = meta.value_size as usize;
386 let value = unsafe { core::slice::from_raw_parts(value as *const u8, value_size) };
387 map_push_elem(unified_map, value, flags)
388 });
389 match res {
390 Ok(_) => 0,
391 Err(e) => e as _,
392 }
393}
394
395pub fn map_push_elem(unified_map: &mut UnifiedMap, value: &[u8], flags: u64) -> Result<()> {
397 let map = unified_map.map_mut();
398
399 map.push_elem(value, flags)
400}
401
402pub fn raw_map_pop_elem<F: KernelAuxiliaryOps>(map: *mut c_void, value: *mut c_void) -> i64 {
406 let res = F::get_unified_map_from_ptr(map as *const u8, |unified_map| {
407 let meta = unified_map.map_meta();
408 let value_size = meta.value_size as usize;
409 let value = unsafe { core::slice::from_raw_parts_mut(value as *mut u8, value_size) };
410 map_pop_elem(unified_map, value)
411 });
412 match res {
413 Ok(_) => 0,
414 Err(e) => e as _,
415 }
416}
417
418pub fn map_pop_elem(unified_map: &mut UnifiedMap, value: &mut [u8]) -> Result<()> {
420 let map = unified_map.map_mut();
421
422 map.pop_elem(value)
423}
424
425pub fn raw_map_peek_elem<F: KernelAuxiliaryOps>(map: *mut c_void, value: *mut c_void) -> i64 {
429 let res = F::get_unified_map_from_ptr(map as *const u8, |unified_map| {
430 let meta = unified_map.map_meta();
431 let value_size = meta.value_size as usize;
432 let value = unsafe { core::slice::from_raw_parts_mut(value as *mut u8, value_size) };
433 map_peek_elem(unified_map, value)
434 });
435 match res {
436 Ok(_) => 0,
437 Err(e) => e as _,
438 }
439}
440
441pub fn map_peek_elem(unified_map: &mut UnifiedMap, value: &mut [u8]) -> Result<()> {
443 let map = unified_map.map_mut();
444
445 map.peek_elem(value)
446}
447
448pub fn bpf_ktime_get_ns<F: KernelAuxiliaryOps>() -> u64 {
450 F::ebpf_time_ns().unwrap_or_default()
451}
452
453fn raw_probe_read_user_str<F: KernelAuxiliaryOps>(
462 dst: *mut c_void,
463 size: u32,
464 unsafe_ptr: *const c_void,
465) -> i64 {
466 let dst = unsafe { core::slice::from_raw_parts_mut(dst as *mut u8, size as usize) };
467 let res = probe_read_user_str::<F>(dst, unsafe_ptr as *const u8);
468 match res {
469 Ok(len) => len as i64,
470 Err(e) => e as _,
471 }
472}
473
474pub fn probe_read_user_str<F: KernelAuxiliaryOps>(dst: &mut [u8], src: *const u8) -> Result<usize> {
476 if dst.is_empty() {
477 return Err(BpfError::EINVAL);
478 }
479 let str = F::string_from_user_cstr(src)?;
480 let len = str.len();
481 let copy_len = len.min(dst.len() - 1); dst[..copy_len].copy_from_slice(&str.as_bytes()[..copy_len]);
483 dst[copy_len] = 0; Ok(copy_len + 1) }
486
487pub fn init_helper_functions<F: KernelAuxiliaryOps>() -> BTreeMap<u32, RawBPFHelperFn> {
489 use consts::*;
490 let mut map = BTreeMap::new();
491
492 map.insert(
494 HELPER_MAP_LOOKUP_ELEM,
495 helper_func!(raw_map_lookup_elem::<F>),
496 );
497 map.insert(
498 HELPER_MAP_UPDATE_ELEM,
499 helper_func!(raw_map_update_elem::<F>),
500 );
501 map.insert(
502 HELPER_MAP_DELETE_ELEM,
503 helper_func!(raw_map_delete_elem::<F>),
504 );
505 map.insert(HELPER_KTIME_GET_NS, helper_func!(bpf_ktime_get_ns::<F>));
506 map.insert(
507 HELPER_MAP_FOR_EACH_ELEM,
508 helper_func!(raw_map_for_each_elem::<F>),
509 );
510 map.insert(
511 HELPER_MAP_LOOKUP_PERCPU_ELEM,
512 helper_func!(raw_map_lookup_percpu_elem::<F>),
513 );
514 map.insert(
518 HELPER_PERF_EVENT_OUTPUT,
519 helper_func!(raw_perf_event_output::<F>),
520 );
521 map.insert(HELPER_BPF_PROBE_READ, helper_func!(raw_bpf_probe_read));
523 map.insert(HELPER_TRACE_PRINTF, helper_func!(trace_printf::<F>));
525
526 map.insert(HELPER_MAP_PUSH_ELEM, helper_func!(raw_map_push_elem::<F>));
528 map.insert(HELPER_MAP_POP_ELEM, helper_func!(raw_map_pop_elem::<F>));
529 map.insert(HELPER_MAP_PEEK_ELEM, helper_func!(raw_map_peek_elem::<F>));
530
531 map.insert(
533 HELPER_PROBE_READ_USER_STR,
534 helper_func!(raw_probe_read_user_str::<F>),
535 );
536
537 use ringbuf::*;
538 map.insert(
540 HELPER_BPF_RINGBUF_OUTPUT,
541 helper_func!(raw_bpf_ringbuf_output::<F>),
542 );
543 map.insert(
544 HELPER_BPF_RINGBUF_RESERVE,
545 helper_func!(raw_bpf_ringbuf_reserve::<F>),
546 );
547 map.insert(
548 HELPER_BPF_RINGBUF_SUBMIT,
549 helper_func!(raw_bpf_ringbuf_submit::<F>),
550 );
551 map.insert(
552 HELPER_BPF_RINGBUF_DISCARD,
553 helper_func!(raw_bpf_ringbuf_discard::<F>),
554 );
555 map.insert(
556 HELPER_BPF_RINGBUF_QUERY,
557 helper_func!(raw_bpf_ringbuf_query::<F>),
558 );
559 map.insert(
560 HELPER_BPF_RINGBUF_RESERVE_DYNPTR,
561 helper_func!(raw_bpf_ringbuf_reserve_dynptr::<F>),
562 );
563 map.insert(
564 HELPER_BPF_RINGBUF_SUBMIT_DYNPTR,
565 helper_func!(raw_bpf_ringbuf_submit_dynptr::<F>),
566 );
567 map.insert(
568 HELPER_BPF_RINGBUF_DISCARD_DYNPTR,
569 helper_func!(raw_bpf_ringbuf_discard_dynptr::<F>),
570 );
571
572 map
573}