#[allow(missing_debug_implementations)]
pub struct IpPacketReassemblyTable
{
ipV4FragmentationTable: *mut rte_ip_frag_tbl,
deathRow: rte_ip_frag_death_row,
deathRowPreFetchFactor: u32,
}
impl Drop for IpPacketReassemblyTable
{
#[inline(always)]
fn drop(&mut self)
{
unsafe { rust_rte_ip_frag_table_destroy(self.ipV4FragmentationTable) };
self.freeAllPacketsOnDeathRow();
}
}
impl IpPacketReassemblyTable
{
#[inline(always)]
pub fn create(numberOfBuckets: u16, entriesPerBucket: PowerOfTwoSixteenBit, maximumFlowTimeToLiveInMilliseconds: u64, numaSocketId: Option<NumaSocketId>) -> Option<Self>
{
assert!(numberOfBuckets != 0, "numberOfBuckets can not be zero");
let numberOfBuckets = numberOfBuckets as u32;
let entriesPerBucket = (entriesPerBucket as u16) as u32;
let maximumEntries = numberOfBuckets * entriesPerBucket;
let maximumTimeToLiveInCyclesForEachFragmentedPacket = (unsafe { rte_get_tsc_hz() } + MS_PER_S - 1) / MS_PER_S * maximumFlowTimeToLiveInMilliseconds;
const deathRowPreFetchFactor: u8 = 3;
Self::new(numberOfBuckets as u32, unsafe { PowerOfTwoThirtyTwoBit::from_u32_unchecked(entriesPerBucket) }, maximumEntries, maximumTimeToLiveInCyclesForEachFragmentedPacket, numaSocketId, deathRowPreFetchFactor)
}
#[inline(always)]
fn new(numberOfBuckets: u32, entriesPerBucket: PowerOfTwoThirtyTwoBit, maximumEntries: u32, maximumTimeToLiveInCyclesForEachFragmentedPacket: u64, numaSocketId: Option<NumaSocketId>, deathRowPreFetchFactor: u8) -> Option<Self>
{
assert!(numberOfBuckets != 0, "numberOfBuckets can not be zero");
assert!(maximumTimeToLiveInCyclesForEachFragmentedPacket != 0, "maximumCycles can not be zero");
let bucketEntriesAsU32 = entriesPerBucket.as_u32();
assert!(maximumEntries <= (numberOfBuckets * bucketEntriesAsU32), "maximumEntries should be less than or equal to numberOfBuckets * entriesPerBucket");
let deathRow = rte_ip_frag_death_row::default();
assert!(deathRowPreFetchFactor as usize <= deathRow.row.len(), "The deathRowPreFetchFactor '{}' exceeds the maximum size of deathRow '{}'. Please see the IMPORTANT NOTICE above which explains why this language has been used");
let ipV4FragmentationTable = unsafe { rte_ip_frag_table_create(numberOfBuckets, bucketEntriesAsU32, maximumEntries, maximumTimeToLiveInCyclesForEachFragmentedPacket, numaSocketId.as_c_int()) };
if unlikely(ipV4FragmentationTable.is_null())
{
None
}
else
{
Some
(
Self
{
ipV4FragmentationTable: ipV4FragmentationTable,
deathRow: deathRow,
deathRowPreFetchFactor: deathRowPreFetchFactor as u32,
}
)
}
}
#[inline(always)]
pub fn reassembleFragmentedIpV4Packet(&mut self, incomingFragmentedPacket: *mut rte_mbuf, fragmentArrivalTimeStampFromRdTsc: u64, pointerToIpV4HeaderInsideIncomingFragmentedPacket: *mut ipv4_hdr) -> *mut rte_mbuf
{
unsafe { rte_ipv4_frag_reassemble_packet(self.ipV4FragmentationTable, &mut self.deathRow, incomingFragmentedPacket, fragmentArrivalTimeStampFromRdTsc, pointerToIpV4HeaderInsideIncomingFragmentedPacket) }
}
#[inline(always)]
pub fn reassembleFragmentedIpV6Packet(&mut self, incomingFragmentedPacket: *mut rte_mbuf, fragmentArrivalTimeStampFromRdTsc: u64, pointerToIpV6HeaderInsideIncomingFragmentedPacket: *mut ipv6_hdr, pointerToIpFragmentExtensionHeader: *mut ipv6_extension_fragment) -> *mut rte_mbuf
{
unsafe { rte_ipv6_frag_reassemble_packet(self.ipV4FragmentationTable, &mut self.deathRow, incomingFragmentedPacket, fragmentArrivalTimeStampFromRdTsc, pointerToIpV6HeaderInsideIncomingFragmentedPacket, pointerToIpFragmentExtensionHeader) }
}
#[inline(always)]
pub fn freeAllPacketsOnDeathRowIfFull(&mut self)
{
if unlikely(self.isDeathRowFull())
{
self.freeAllPacketsOnDeathRow()
}
}
#[inline(always)]
fn isDeathRowFull(&self) -> bool
{
self.deathRow.cnt == self.deathRow.row.len() as u32
}
#[inline(always)]
fn freeAllPacketsOnDeathRow(&mut self)
{
unsafe { rte_ip_frag_free_death_row(&mut self.deathRow, self.deathRowPreFetchFactor) };
}
}