#![cfg(feature = "sys")]
use std::marker::PhantomData;
use std::ptr;
use std::slice;
use crate::error::Error;
use crate::ffi;
use crate::frame::Frame;
pub struct Ring<'a> {
ring: *mut ffi::netmap_ring,
index: usize,
_marker: PhantomData<&'a mut ffi::netmap_ring>,
}
unsafe impl<'a> Send for Ring<'a> {}
pub struct TxRing<'a>(Ring<'a>);
pub struct RxRing<'a>(Ring<'a>);
impl<'a> Ring<'a> {
pub(crate) fn new(ring: *mut ffi::netmap_ring, index: usize) -> Self {
Self {
ring,
index,
_marker: PhantomData,
}
}
pub fn index(&self) -> usize {
self.index
}
pub fn num_slots(&self) -> usize {
unsafe { (*self.ring).num_slots as usize }
}
pub fn sync(&self) {
unsafe {
if (*self.ring).flags & ffi::NR_TX as u16 != 0 {
ffi::nm_txsync(self.ring, 0);
} else {
ffi::nm_rxsync(self.ring, 0);
}
}
}
}
impl<'a> TxRing<'a> {
pub(crate) fn new(ring: *mut ffi::netmap_ring, index: usize) -> Self {
Self(Ring::new(ring, index))
}
pub fn send(&mut self, buf: &[u8]) -> Result<(), Error> {
if buf.len() > self.max_payload_size() {
return Err(Error::PacketTooLarge(buf.len()));
}
unsafe {
let ring = self.0.ring;
let cur = (*ring).cur;
let slot = (*ring).slot.add(cur as usize);
ptr::copy_nonoverlapping(buf.as_ptr(), (*slot).buf as *mut u8, buf.len());
(*slot).len = buf.len() as u16;
(*ring).head = (*ring).cur.wrapping_add(1);
(*ring).cur = (*ring).head;
Ok(())
}
}
pub fn max_payload_size(&self) -> usize {
unsafe { (*self.0.ring).nr_buf_size as usize }
}
pub fn reserve_batch(&mut self, count: usize) -> Result<BatchReservation<'a>, Error> {
unsafe {
let ring_ptr = self.0.ring;
let head = (*ring_ptr).head;
let tail = (*ring_ptr).tail;
let num_slots = (*ring_ptr).num_slots as u32;
let current_used_slots = (head.wrapping_sub(tail).wrapping_add(num_slots)) % num_slots;
let available_slots = (num_slots - 1).saturating_sub(current_used_slots) as usize;
if available_slots < count {
return Err(Error::InsufficientSpace);
}
}
Ok(BatchReservation {
ring: self.0.ring,
start: unsafe { (*self.0.ring).head },
count,
_marker: PhantomData,
})
}
}
pub struct BatchReservation<'a> {
ring: *mut ffi::netmap_ring,
start: u32,
count: usize,
_marker: PhantomData<&'a mut ffi::netmap_ring>,
}
impl<'a> BatchReservation<'a> {
pub fn packet(&mut self, index: usize, len: usize) -> Result<&mut [u8], Error> {
if index >= self.count {
return Err(Error::InvalidRingIndex(index));
}
unsafe {
let slot_idx = (self.start + index as u32) % (*self.ring).num_slots;
let slot = (*self.ring).slot.add(slot_idx as usize);
(*slot).len = len as u16;
Ok(slice::from_raw_parts_mut((*slot).buf as *mut u8, len))
}
}
pub fn commit(self) {
unsafe {
(*self.ring).head = self.start + self.count as u32;
(*self.ring).cur = (*self.ring).head;
}
}
}
impl<'a> RxRing<'a> {
pub(crate) fn new(ring: *mut ffi::netmap_ring, index: usize) -> Self {
Self(Ring::new(ring, index))
}
pub fn recv(&mut self) -> Option<Frame> {
unsafe {
let ring = self.0.ring;
if (*ring).head == (*ring).tail {
return None;
}
let slot_idx = (*ring).tail % (*ring).num_slots;
let slot = (*ring).slot.add(slot_idx as usize);
let buf = slice::from_raw_parts((*slot).buf as *const u8, (*slot).len as usize);
(*ring).head = (*ring).tail.wrapping_add(1);
(*ring).tail = (*ring).head;
Some(Frame::new(buf))
}
}
pub fn recv_batch(&mut self, batch: &mut [Frame]) -> usize {
unsafe {
let ring = self.0.ring;
let avail = ((*ring).head - (*ring).tail) as usize;
let count = avail.min(batch.len());
for i in 0..count {
let slot_idx = ((*ring).tail + i as u32) % (*ring).num_slots;
let slot = (*ring).slot.add(slot_idx as usize);
let buf = slice::from_raw_parts((*slot).buf as *const u8, (*slot).len as usize);
batch[i] = Frame::new(buf);
}
(*ring).head = (*ring).tail + count as u32;
(*ring).tail = (*ring).head;
count
}
}
}