1mod bindings {
37 #![allow(non_upper_case_globals)]
38 #![allow(non_camel_case_types)]
39 #![allow(non_snake_case)]
40 #![allow(dead_code)]
41 #![allow(clippy::all)]
42
43 #[cfg(docsrs)]
44 include!("bindings.rs");
45 #[cfg(not(docsrs))]
46 include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
47}
48
49use bindings::*;
50use pnet::datalink::{interfaces, NetworkInterface};
51use std::alloc::{alloc_zeroed, Layout};
52use std::cell::RefCell;
53use std::collections::hash_set::HashSet;
54use std::convert::TryInto;
55use std::ffi::{c_char, c_int, c_void, CStr, CString};
56use std::ptr::copy;
57use std::rc::Rc;
58use std::thread;
59use std::time::Duration;
60
61use libc::strerror;
62
63const DEFAULT_HEADROOM: usize = 256;
64
65static mut POOL: Option<Pool> = None;
71
72#[derive(Debug)]
78struct BufferPool {
79 chunk_size: usize,
80 #[allow(dead_code)]
81 chunk_count: usize,
82
83 pool: HashSet<u64>,
84
85 buffer: *mut c_void, fq_size: usize,
88 cq_size: usize,
89}
90
91#[derive(Debug)]
92struct Pool {
93 chunk_size: usize,
94
95 umem: *mut xsk_umem,
96 buffer_pool: Rc<RefCell<BufferPool>>,
97
98 umem_fq: xsk_ring_prod,
99 umem_cq: xsk_ring_cons,
100
101 refcount: usize,
102}
103
104#[derive(Debug)]
106pub struct Nic {
107 pub interface: NetworkInterface,
110 xsk: *mut xsk_socket,
111
112 rxq: xsk_ring_cons,
114 txq: xsk_ring_prod,
115 umem_fq: xsk_ring_prod,
116 umem_cq: xsk_ring_cons,
117}
118
119#[derive(Debug)]
121pub struct Packet {
122 pub start: usize,
124 pub end: usize,
126 pub buffer_size: usize,
128 pub buffer: *mut u8,
130 private: *mut c_void, buffer_pool: Rc<RefCell<BufferPool>>,
133}
134
135struct ReservedResult {
136 count: u32,
137 idx: u32,
138}
139
140impl BufferPool {
146 fn new(
147 chunk_size: usize,
148 chunk_count: usize,
149 buffer: *mut c_void,
150 fq_size: usize,
151 cq_size: usize,
152 ) -> Self {
153 let pool = (0..chunk_count)
155 .map(|i| (i * chunk_size).try_into().unwrap())
156 .collect::<HashSet<u64>>();
157 Self {
158 chunk_size,
159 chunk_count,
160 pool,
161 buffer,
162 fq_size,
163 cq_size,
164 }
165 }
166
167 fn alloc_addr(&mut self) -> Result<u64, &'static str> {
168 if let Some(addr) = self.pool.iter().next() {
169 let addr = *addr;
170 self.pool.remove(&addr);
171 Ok(addr)
172 } else {
173 Err("Chunk Pool is empty")
174 }
175 }
176
177 fn free_addr(&mut self, chunk_addr: u64) {
178 let chunk_addr = chunk_addr - (chunk_addr % self.chunk_size as u64);
180
181 #[cfg(debug_assertions)]
182 if self.pool.contains(&chunk_addr) {
183 eprintln!("Chunk Pool already contains chunk_addr: {}", chunk_addr);
184 }
185
186 self.pool.insert(chunk_addr);
187
188 #[cfg(debug_assertions)]
189 if self.pool.len() > self.chunk_count {
190 eprintln!("Chunk Pool is overflowed");
191 }
192 }
193
194 fn reserve_fq(&mut self, fq: &mut xsk_ring_prod, len: usize) -> Result<usize, &'static str> {
196 let mut cq_idx = 0;
197 let reserved = unsafe { xsk_ring_prod__reserve(fq, len as u32, &mut cq_idx) };
198
199 for i in 0..reserved {
201 unsafe {
202 *xsk_ring_prod__fill_addr(fq, cq_idx + i) = self.alloc_addr()?;
203 }
204 }
205
206 unsafe {
208 xsk_ring_prod__submit(fq, reserved);
209 }
210
211 Ok(reserved.try_into().unwrap())
212 }
213
214 fn reserve_txq(
216 &mut self,
217 txq: &mut xsk_ring_prod,
218 len: usize,
219 ) -> Result<ReservedResult, &'static str> {
220 let mut idx = 0;
221 let count = unsafe { xsk_ring_prod__reserve(txq, len as u32, &mut idx) };
222
223 Ok(ReservedResult { count, idx })
224 }
225
226 fn release(&mut self, cq: &mut xsk_ring_cons, len: usize) -> Result<u32, String> {
228 let mut cq_idx = 0;
229 let count = unsafe {
230 xsk_ring_cons__peek(cq, len as u32, &mut cq_idx)
232 };
233 if count > 0 {
234 unsafe {
236 xsk_ring_cons__release(cq, count);
237 }
238 }
239
240 Ok(count)
241 }
242
243 fn recv(
244 &mut self,
245 chunk_pool_rc: &Rc<RefCell<Self>>,
246 len: usize,
247 _xsk: &*mut xsk_socket,
248 rxq: &mut xsk_ring_cons,
249 fq: &mut xsk_ring_prod,
250 ) -> Vec<Packet> {
251 let mut packets = Vec::<Packet>::with_capacity(len);
252
253 let mut rx_idx = 0;
254 let received = unsafe { xsk_ring_cons__peek(rxq, len as u32, &mut rx_idx) };
255
256 if received == 0 {
257 return packets;
258 }
259
260 for i in 0..received {
261 let mut packet = Packet::new(chunk_pool_rc);
262 let rx_desc = unsafe { xsk_ring_cons__rx_desc(&*rxq, rx_idx + i).as_ref().unwrap() };
263 packet.end += rx_desc.len as usize;
264 packet.buffer_size = self.chunk_size;
265 packet.buffer = unsafe {
266 xsk_umem__get_data(self.buffer, rx_desc.addr)
267 .cast::<u8>()
268 .sub(DEFAULT_HEADROOM)
269 };
270 packet.private = (rx_desc.addr - DEFAULT_HEADROOM as u64) as *mut c_void;
271
272 packets.push(packet);
273 }
274
275 unsafe {
276 xsk_ring_cons__release(rxq, received);
277 }
278
279 self.reserve_fq(fq, packets.len()).unwrap();
280
281 unsafe {
289 if xsk_ring_prod__needs_wakeup(&*fq) != 0 {
290 libc::recvfrom(
291 xsk_socket__fd(*_xsk),
292 std::ptr::null_mut::<libc::c_void>(),
293 0 as libc::size_t,
294 libc::MSG_DONTWAIT,
295 std::ptr::null_mut::<libc::sockaddr>(),
296 std::ptr::null_mut::<u32>(),
297 );
298 }
299 }
300
301 packets
302 }
303
304 fn send(
305 &mut self,
306 packets: &mut [Packet],
307 xsk: &*mut xsk_socket,
308 tx: &mut xsk_ring_prod,
309 cq: &mut xsk_ring_cons,
310 ) -> usize {
311 self.release(cq, self.cq_size).unwrap();
312 let reserved = self.reserve_txq(tx, packets.len()).unwrap();
313
314 for (i, pkt) in packets.iter().enumerate().take(reserved.count as usize) {
315 let tx_desc = unsafe {
317 xsk_ring_prod__tx_desc(tx, reserved.idx + i as u32)
318 .as_mut()
319 .unwrap()
320 };
321 tx_desc.addr = pkt.private as u64 + pkt.start as u64;
322 tx_desc.len = (pkt.end - pkt.start) as u32;
323 }
324
325 unsafe {
330 xsk_ring_prod__submit(&mut *tx, reserved.count);
331 if xsk_ring_prod__needs_wakeup(tx) != 0 {
332 libc::sendto(
334 xsk_socket__fd(*xsk),
335 std::ptr::null::<libc::c_void>(),
336 0 as libc::size_t,
337 libc::MSG_DONTWAIT,
338 std::ptr::null::<libc::sockaddr>(),
339 0 as libc::socklen_t,
340 );
341 }
342 }
343
344 reserved.count.try_into().unwrap()
345 }
346}
347
348impl Pool {
349 fn new() -> Result<Self, String> {
350 let umem_ptr = alloc_zeroed_layout::<xsk_umem>()?;
351 let fq_ptr = alloc_zeroed_layout::<xsk_ring_prod>()?;
352 let cq_ptr = alloc_zeroed_layout::<xsk_ring_cons>()?;
353
354 let umem = umem_ptr.cast::<xsk_umem>(); let fq = unsafe { std::ptr::read(fq_ptr.cast::<xsk_ring_prod>()) };
356 let cq = unsafe { std::ptr::read(cq_ptr.cast::<xsk_ring_cons>()) };
357
358 let chunk_pool = BufferPool::new(0, 0, std::ptr::null_mut(), 0, 0);
359
360 let chunk_size = 0;
361 let refcount = 0;
362
363 let obj = Self {
364 chunk_size,
365 umem,
366 buffer_pool: Rc::new(RefCell::new(chunk_pool)),
367 umem_fq: fq,
368 umem_cq: cq,
369 refcount,
370 };
371
372 Ok(obj)
373 }
374
375 fn init(
376 chunk_size: usize,
377 chunk_count: usize,
378 fq_size: usize,
379 cq_size: usize,
380 ) -> Result<(), String> {
381 if unsafe { POOL.as_ref() }.is_none() {
382 let mut pool_obj = Pool::new()?;
383 pool_obj.re_init(chunk_size, chunk_count, fq_size, cq_size)?;
384 unsafe { POOL = Some(pool_obj) };
385 }
386
387 Ok(())
388 }
389
390 fn re_init(
391 &mut self,
392 chunk_size: usize,
393 chunk_count: usize,
394 fq_size: usize,
395 cq_size: usize,
396 ) -> Result<(), String> {
397 let umem_buffer_size = chunk_size * chunk_count;
398 let mmap_address = unsafe {
399 libc::mmap(
400 std::ptr::null_mut::<libc::c_void>(),
401 umem_buffer_size,
402 libc::PROT_READ | libc::PROT_WRITE,
403 libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
404 -1, 0, )
407 };
408
409 if mmap_address == libc::MAP_FAILED {
410 return Err("Failed to allocate memory for UMEM.".to_string());
411 }
412
413 let umem_cfg = xsk_umem_config {
414 fill_size: fq_size as u32,
415 comp_size: cq_size as u32,
416 frame_size: chunk_size as u32,
417 frame_headroom: XSK_UMEM__DEFAULT_FRAME_HEADROOM,
418 flags: XSK_UMEM__DEFAULT_FLAGS,
419 };
420
421 let ret = unsafe {
422 xsk_umem__create(
423 &mut self.umem,
424 mmap_address,
425 umem_buffer_size as u64,
426 &mut self.umem_fq,
427 &mut self.umem_cq,
428 &umem_cfg,
429 )
430 };
431
432 if ret != 0 {
433 unsafe {
434 libc::munmap(mmap_address, umem_buffer_size);
436 }
437 let msg = unsafe {
438 CStr::from_ptr(strerror(-ret))
439 .to_string_lossy()
440 .into_owned()
441 };
442
443 return Err(format!("Failed to create UMEM: {}", msg));
444 }
445
446 let chunk_pool = BufferPool::new(chunk_size, chunk_count, mmap_address, fq_size, cq_size);
447 let mut borrow_buffer_pool = self.buffer_pool.borrow_mut();
448 *borrow_buffer_pool = chunk_pool;
449
450 self.chunk_size = chunk_size;
451
452 Ok(())
453 }
454
455 fn alloc_addr(&mut self) -> Result<u64, &'static str> {
456 self.buffer_pool.borrow_mut().alloc_addr()
457 }
458
459 fn try_alloc_packet(&mut self) -> Option<Packet> {
461 match self.alloc_addr() {
462 Err(_) => None,
463 Ok(idx) => {
464 let mut packet: Packet = Packet::new(&self.buffer_pool);
465 packet.buffer_size = self.chunk_size;
466 packet.buffer =
467 unsafe { xsk_umem__get_data(self.buffer_pool.borrow().buffer, idx) as *mut u8 };
468 packet.private = idx as *mut c_void;
469
470 Some(packet)
471 }
472 }
473 }
474}
475
476impl Nic {
477 pub fn new(
491 if_name: &str,
492 chunk_size: usize,
493 chunk_count: usize,
494 fq_size: usize,
495 cq_size: usize,
496 tx_size: usize,
497 rx_size: usize,
498 ) -> Result<Nic, String> {
499 let interface = interfaces()
500 .into_iter()
501 .find(|elem| elem.name.as_str() == if_name)
502 .ok_or(format!("Interface {} not found.", if_name))?;
503
504 let xsk_ptr = alloc_zeroed_layout::<xsk_socket>()?;
505 let rx_ptr = alloc_zeroed_layout::<xsk_ring_cons>()?;
506 let tx_ptr = alloc_zeroed_layout::<xsk_ring_prod>()?;
507 let fq_ptr = alloc_zeroed_layout::<xsk_ring_prod>()?;
508 let cq_ptr = alloc_zeroed_layout::<xsk_ring_cons>()?;
509
510 Pool::init(chunk_size, chunk_count, fq_size, cq_size).unwrap();
517
518 let mut nic = unsafe {
519 Nic {
520 interface: interface.clone(),
521 xsk: xsk_ptr.cast::<xsk_socket>(),
522 rxq: std::ptr::read(rx_ptr.cast::<xsk_ring_cons>()),
523 txq: std::ptr::read(tx_ptr.cast::<xsk_ring_prod>()),
524 umem_fq: std::ptr::read(fq_ptr.cast::<xsk_ring_prod>()),
525 umem_cq: std::ptr::read(cq_ptr.cast::<xsk_ring_cons>()),
526 }
527 };
528
529 match Nic::open(
530 &mut nic,
531 chunk_size,
532 chunk_count,
533 fq_size,
534 cq_size,
535 tx_size,
536 rx_size,
537 ) {
538 Ok(_) => {
539 unsafe {
540 POOL.as_mut().unwrap().refcount += 1;
541 };
542 Ok(nic)
543 }
544 Err(e) => {
545 eprintln!("Failed to open NIC: {}", e);
547 Err(e)
548 }
549 }
550 }
551
552 fn open(
553 &mut self,
554 chunk_size: usize,
555 chunk_count: usize,
556 fq_size: usize,
557 cq_size: usize,
558 rx_ring_size: usize,
559 tx_ring_size: usize,
560 ) -> Result<(), String> {
561 let mut xsk_cfg: xsk_socket_config = xsk_socket_config {
562 rx_size: rx_ring_size.try_into().unwrap(),
563 tx_size: tx_ring_size.try_into().unwrap(),
564 __bindgen_anon_1: xsk_socket_config__bindgen_ty_1 { libxdp_flags: 0 },
565 xdp_flags: XDP_FLAGS_DRV_MODE,
566 bind_flags: XDP_USE_NEED_WAKEUP as u16,
567 };
568 let if_name = CString::new(self.interface.name.clone()).unwrap();
569 let if_ptr = if_name.as_ptr() as *const c_char;
570
571 let ret: c_int = unsafe {
572 xsk_socket__create_shared(
573 &mut self.xsk,
574 if_ptr,
575 0,
576 POOL.as_mut().unwrap().umem,
577 &mut self.rxq,
578 &mut self.txq,
579 &mut self.umem_fq,
580 &mut self.umem_cq,
581 &xsk_cfg,
582 )
583 };
584
585 if ret != 0 {
586 match unsafe { POOL.as_ref().unwrap().refcount } {
587 0 => {
588 unsafe { xsk_umem__delete(POOL.as_mut().unwrap().umem) };
590 thread::sleep(Duration::from_millis(100));
591 unsafe {
592 POOL.as_mut()
593 .unwrap()
594 .re_init(chunk_size, chunk_count, fq_size, cq_size)?
595 };
596 }
597 refcount if refcount > 0 => {
598 thread::sleep(Duration::from_millis(100));
600 }
601 _ => {
602 return Err("Pool Fallback failed".to_string());
603 }
604 }
605
606 xsk_cfg.xdp_flags = XDP_FLAGS_SKB_MODE;
607 let ret: c_int = unsafe {
608 xsk_socket__create_shared(
609 &mut self.xsk,
610 if_ptr,
611 0,
612 POOL.as_mut().unwrap().umem,
613 &mut self.rxq,
614 &mut self.txq,
615 &mut POOL.as_mut().unwrap().umem_fq,
616 &mut POOL.as_mut().unwrap().umem_cq,
617 &xsk_cfg,
618 )
619 };
620
621 if ret != 0 {
622 let msg = unsafe {
623 CStr::from_ptr(strerror(-ret))
624 .to_string_lossy()
625 .into_owned()
626 };
627 let message = format!("Error: {}", msg);
628 return Err(format!("xsk_socket__create failed: {}", message));
629 }
630 }
631
632 if unsafe { POOL.as_ref().unwrap().refcount == 0 } {
642 self.umem_fq = unsafe { POOL.as_ref().unwrap().umem_fq };
643 self.umem_cq = unsafe { POOL.as_ref().unwrap().umem_cq };
644
645 let fq_ptr = alloc_zeroed_layout::<xsk_ring_prod>()?;
646 let cq_ptr = alloc_zeroed_layout::<xsk_ring_cons>()?;
647 unsafe {
648 POOL.as_mut().unwrap().umem_fq = std::ptr::read(fq_ptr.cast::<xsk_ring_prod>());
649 POOL.as_mut().unwrap().umem_cq = std::ptr::read(cq_ptr.cast::<xsk_ring_cons>());
650 };
651 }
652
653 let fq_size = unsafe { POOL.as_mut().unwrap().buffer_pool.borrow().fq_size };
654 unsafe {
655 POOL.as_mut()
656 .unwrap()
657 .buffer_pool
658 .borrow_mut()
659 .reserve_fq(&mut self.umem_fq, fq_size)?
660 };
661
662 Ok(())
663 }
664
665 pub fn alloc_packet(&self) -> Option<Packet> {
671 unsafe { POOL.as_mut().unwrap().try_alloc_packet() }
672 }
673
674 pub fn send(&mut self, packets: &mut Vec<Packet>) -> usize {
682 let sent_count = unsafe {
683 POOL.as_mut().unwrap().buffer_pool.borrow_mut().send(
684 packets,
685 &self.xsk,
686 &mut self.txq,
687 &mut self.umem_cq,
688 )
689 };
690 packets.drain(0..sent_count);
691
692 sent_count
693 }
694
695 pub fn receive(&mut self, len: usize) -> Vec<Packet> {
702 unsafe {
703 POOL.as_mut().unwrap().buffer_pool.borrow_mut().recv(
704 &POOL.as_mut().unwrap().buffer_pool,
705 len,
706 &self.xsk,
707 &mut self.rxq,
708 &mut self.umem_fq,
709 )
710 }
711 }
712}
713
714impl Packet {
715 fn new(chunk_pool: &Rc<RefCell<BufferPool>>) -> Packet {
716 Packet {
717 start: DEFAULT_HEADROOM,
718 end: DEFAULT_HEADROOM,
719 buffer_size: 0,
720 buffer: std::ptr::null_mut(),
721 private: std::ptr::null_mut(),
722 buffer_pool: chunk_pool.clone(),
723 }
724 }
725
726 pub fn replace_data(&mut self, new_data: &[u8]) -> Result<(), String> {
735 if new_data.len() <= self.buffer_size {
736 unsafe {
737 copy(new_data.as_ptr(), self.buffer.offset(0), new_data.len());
739 self.start = 0;
740 self.end = new_data.len();
741
742 Ok(())
743 }
744 } else {
745 Err(String::from(
746 "Data size is over than buffer size of packet.",
747 ))
748 }
749 }
750
751 pub fn get_buffer_mut(&mut self) -> &mut [u8] {
754 unsafe {
755 std::slice::from_raw_parts_mut(
756 self.buffer.offset(self.start.try_into().unwrap()),
757 self.end - self.start,
758 )
759 }
760 }
761
762 pub fn resize(&mut self, new_size: usize) -> Result<(), String> {
770 if new_size > self.buffer_size {
771 return Err(format!(
772 "The requested size is to large. (Max = {})",
773 self.buffer_size
774 ));
775 }
776
777 let temp_end = self.end;
778
779 if new_size > self.buffer_size - self.start {
780 unsafe {
782 copy(
783 self.buffer.offset(self.start.try_into().unwrap()),
784 self.buffer,
785 temp_end,
786 );
787 }
788 self.start = 0;
789 self.end = new_size;
790 return Ok(());
791 }
792
793 self.end = self.start + new_size;
794 Ok(())
795 }
796
797 #[allow(dead_code)]
800 pub fn dump(&self) {
801 let chunk_address = self.private as u64;
802 let buffer_address: *const u8 = self.buffer.cast_const();
803
804 let length: usize = self.end - self.start;
805 let mut count: usize = 0;
806
807 unsafe {
808 println!("---packet dump--- chunk addr: {}", chunk_address);
809
810 loop {
811 let read_offset: usize = count + self.start;
812 let read_address: *const u8 = buffer_address.add(read_offset);
813 print!("{:02X?} ", std::ptr::read(read_address));
814
815 count += 1;
816 if count == length {
817 break;
818 } else if count % 8 == 0 {
819 print!(" ");
820 if count % 16 == 0 {
821 println!();
822 }
823 }
824 }
825 }
826 println!("\n-------\n");
827 }
828}
829
830impl Drop for Pool {
836 fn drop(&mut self) {
837 let ret: c_int = unsafe { xsk_umem__delete(self.umem) };
839 if ret != 0 {
840 eprintln!("failed to free umem");
841 }
842 }
843}
844
845impl Drop for Nic {
846 fn drop(&mut self) {
848 unsafe {
850 xsk_socket__delete(self.xsk);
851 }
852
853 unsafe {
854 POOL.as_mut().unwrap().refcount -= 1;
855 };
856 }
857}
858
859impl Drop for Packet {
860 fn drop(&mut self) {
861 self.buffer_pool.borrow_mut().free_addr(self.private as u64);
862 }
863}
864
865fn alloc_zeroed_layout<T: 'static>() -> Result<*mut u8, String> {
871 let ptr;
872 unsafe {
873 let layout = Layout::new::<T>();
874 ptr = alloc_zeroed(layout);
875 }
876 if ptr.is_null() {
877 Err("failed to allocate memory".to_string())
878 } else {
879 Ok(ptr)
880 }
881}