Skip to main content

vpp_plugin/vlib/
node_generic.rs

1//! Generic node implementations
2//!
3//! This module contains generic implementations of VPP nodes following set patterns that can be
4//! reused across different plugins.
5
6use std::mem::MaybeUninit;
7
8use arrayvec::ArrayVec;
9
10use crate::{
11    vlib::{
12        self, BufferIndex, MainRef,
13        buffer::BufferRef,
14        node::{FRAME_SIZE, FrameRef, NextNodes, Node, NodeRuntimeRef},
15    },
16    vppinfra::{likely, unlikely},
17};
18
19/// Next node to send a buffer to from a generic node implementation
20#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
21pub enum FeatureNextNode<NextNode> {
22    /// A specific next node defined by the generic node implementation
23    DefinedNode(NextNode),
24    /// The next feature in the feature arc
25    NextFeature,
26}
27
28impl<NextNode> From<NextNode> for FeatureNextNode<NextNode> {
29    fn from(value: NextNode) -> Self {
30        Self::DefinedNode(value)
31    }
32}
33
34/// Trait for generic node implementations processing one buffer at a time in a feature arc
35pub trait GenericFeatureNodeX1<N: Node> {
36    /// Processing a buffer and determining the next node to send it to
37    ///
38    /// # Safety
39    ///
40    /// The safety preconditions vary depending on the specific implementation.
41    unsafe fn map_buffer_to_next(
42        &self,
43        vm: &MainRef,
44        node: &mut NodeRuntimeRef<N>,
45        b0: &mut BufferRef<N::FeatureData>,
46    ) -> FeatureNextNode<N::NextNodes>;
47}
48
49/// Generic implementation of a VPP node processing one buffer at a time in a feature arc
50///
51/// # Safety
52///
53/// - The preconditions of the [`GenericFeatureNodeX1::map_buffer_to_next`] method must be upheld.
54/// - Nodes with this node as a next node must send valid buffer indices in the Vector data.
55/// - This mode must be invoked as part of a feature arc.
56/// - All of the next nodes of this node must have a `Vector` type of `BufferIndex`, `Scalar` of
57///   `()` and `Aux` of `()` (or their C equivalents).
58#[inline(always)]
59pub unsafe fn generic_feature_node_x1<GenericNode, N, FeatureData>(
60    vm: &MainRef,
61    node: &mut NodeRuntimeRef<N>,
62    frame: &mut FrameRef<N>,
63    generic_node_impl: GenericNode,
64) -> u16
65where
66    N: Node<Vector = BufferIndex, Scalar = (), Aux = (), FeatureData = FeatureData>,
67    GenericNode: GenericFeatureNodeX1<N>,
68    FeatureData: Copy,
69{
70    // SAFETY: The safety requirements are documented in the function's safety comment.
71    unsafe {
72        let mut nexts: [MaybeUninit<u16>; FRAME_SIZE] = [MaybeUninit::uninit(); FRAME_SIZE];
73        let mut b = ArrayVec::new();
74
75        let from = frame.get_buffers::<FRAME_SIZE>(vm, &mut b);
76
77        for (i, b0) in b.iter_mut().enumerate() {
78            let next = generic_node_impl.map_buffer_to_next(vm, node, b0);
79            let next = match next {
80                FeatureNextNode::NextFeature => b0.vnet_feature_next().0 as u16,
81                FeatureNextNode::DefinedNode(next) => next.into_u16(),
82            };
83            nexts.get_unchecked_mut(i).write(next);
84        }
85
86        // SAFETY: since every buffer yielded a next node and the number of elements of nexts is the
87        // same as from, then every element is initialised. In addition, since we got all of the
88        // buffer indices from `frame.get_buffers()` then they must all be valid. All the next nodes
89        // expect to receive buffer indices and no other vector, aux or scalar data.
90        let initialized_nexts = nexts.get_unchecked(..b.len()).assume_init_ref();
91        vm.buffer_enqueue_to_next(node, from, initialized_nexts);
92
93        frame.vector().len() as u16
94    }
95}
96
97/// Trait for generic node implementations processing one buffer at a time in a feature arc
98pub trait GenericFeatureNodeX4<N: Node>: GenericFeatureNodeX1<N> {
99    /// Performing prefetching for a given buffer
100    fn prefetch_buffer_x4(
101        &self,
102        _vm: &MainRef,
103        _node: &mut NodeRuntimeRef<N>,
104        b: &mut [&mut BufferRef<N::FeatureData>; 4],
105    ) {
106        // By default we assume that at the very least the buffer headers will be read from, but
107        // plugins should generally override this for their specifics
108        b[0].prefetch_header_load();
109        b[1].prefetch_header_load();
110        b[2].prefetch_header_load();
111        b[3].prefetch_header_load();
112    }
113
114    /// Process four buffers and determining the next nodes to send them to
115    ///
116    /// # Safety
117    ///
118    /// The safety preconditions vary depending on the specific implementation.
119    unsafe fn map_buffer_to_next_x4(
120        &self,
121        vm: &MainRef,
122        node: &mut NodeRuntimeRef<N>,
123        b: &mut [&mut BufferRef<N::FeatureData>; 4],
124    ) -> [FeatureNextNode<N::NextNodes>; 4];
125
126    /// Trace a buffer
127    ///
128    /// This is optional and can be empty if tracing is implemented in
129    /// [`GenericFeatureNodeX4::map_buffer_to_next_x4`] and
130    /// [`GenericFeatureNodeX1::map_buffer_to_next`] instead.
131    ///
132    /// # Safety
133    ///
134    /// The safety preconditions vary depending on the specific implementation.
135    unsafe fn trace_buffer(
136        &self,
137        _vm: &MainRef,
138        _node: &mut NodeRuntimeRef<N>,
139        _b0: &mut BufferRef<N::FeatureData>,
140    ) {
141    }
142}
143
144/// Generic implementation of a VPP node processing four buffers at a time in a feature arc
145///
146/// # Safety
147///
148/// - The preconditions of the [`GenericFeatureNodeX1::map_buffer_to_next`] &
149///   [`GenericFeatureNodeX4::map_buffer_to_next_x4`] methods must be upheld.
150/// - Nodes with this node as a next node must send valid buffer indices in the Vector data.
151/// - This mode must be invoked as part of a feature arc.
152/// - All of the next nodes of this node must have a `Vector` type of `BufferIndex`, `Scalar` of
153///   `()` and `Aux` of `()` (or their C equivalents).
154#[inline(always)]
155pub unsafe fn generic_feature_node_x4<GenericNode, N, FeatureData>(
156    vm: &MainRef,
157    node: &mut NodeRuntimeRef<N>,
158    frame: &mut FrameRef<N>,
159    generic_node_impl: GenericNode,
160) -> u16
161where
162    N: Node<Vector = BufferIndex, Scalar = (), Aux = (), FeatureData = FeatureData>,
163    GenericNode: GenericFeatureNodeX4<N>,
164    FeatureData: Copy,
165{
166    // SAFETY: The safety requirements are documented in the function's safety comment.
167    unsafe {
168        let mut nexts: [MaybeUninit<u16>; FRAME_SIZE] = [MaybeUninit::uninit(); FRAME_SIZE];
169        let mut b = ArrayVec::new();
170
171        let from = frame.get_buffers::<FRAME_SIZE>(vm, &mut b);
172        let len = b.len();
173
174        for stride in 0..len / 4 {
175            let i = stride * 4;
176
177            if i + 8 <= len {
178                let stride_b = b.get_unchecked_mut(i + 4..i + 8);
179                // Convert into array for type safety for trait method
180                let stride_b = stride_b.as_mut_array::<4>().unwrap_unchecked();
181
182                generic_node_impl.prefetch_buffer_x4(vm, node, stride_b);
183            }
184
185            let stride_b = b.get_unchecked_mut(i..i + 4);
186            let stride_nexts = nexts.get_unchecked_mut(i..i + 4);
187            // Convert into array for type safety for trait method
188            let stride_b = stride_b.as_mut_array::<4>().unwrap_unchecked();
189            let stride_nexts = stride_nexts.as_mut_array::<4>().unwrap_unchecked();
190
191            // Optimise for common case where feature arc indices are the same for
192            // all packets and for the case where the packet won't be dropped. This
193            // allows for use of vectorised store of nexts[i..i + 3].
194            if likely(
195                stride_b[0].vnet_buffer().feature_arc_index()
196                    == stride_b[1].vnet_buffer().feature_arc_index()
197                    && stride_b[0].vnet_buffer().feature_arc_index()
198                        == stride_b[2].vnet_buffer().feature_arc_index()
199                    && stride_b[0].vnet_buffer().feature_arc_index()
200                        == stride_b[3].vnet_buffer().feature_arc_index(),
201            ) {
202                let feature_next = stride_b[0].vnet_feature_next().0 as u16;
203                stride_nexts[0].write(feature_next);
204                stride_nexts[1].write(feature_next);
205                stride_nexts[2].write(feature_next);
206                stride_nexts[3].write(feature_next);
207            } else {
208                stride_nexts[0].write(stride_b[0].vnet_feature_next().0 as u16);
209                stride_nexts[1].write(stride_b[1].vnet_feature_next().0 as u16);
210                stride_nexts[2].write(stride_b[2].vnet_feature_next().0 as u16);
211                stride_nexts[3].write(stride_b[3].vnet_feature_next().0 as u16);
212            };
213
214            let feat_nexts = generic_node_impl.map_buffer_to_next_x4(vm, node, stride_b);
215
216            for (next_i, next) in feat_nexts.into_iter().enumerate() {
217                match next {
218                    FeatureNextNode::NextFeature => { /* already set */ }
219                    FeatureNextNode::DefinedNode(next) => {
220                        stride_nexts[next_i].write(next.into_u16());
221                    }
222                };
223            }
224        }
225
226        for i in (len / 4) * 4..len {
227            let b0 = b.get_unchecked_mut(i);
228
229            // Optimise for the case where the packet won't be processed by the feature
230            let mut next_val = b0.vnet_feature_next().0 as u16;
231
232            let next = generic_node_impl.map_buffer_to_next(vm, node, b0);
233            match next {
234                FeatureNextNode::NextFeature => { /* already set */ }
235                FeatureNextNode::DefinedNode(next) => next_val = next.into_u16(),
236            };
237            nexts.get_unchecked_mut(i).write(next_val);
238        }
239
240        // Although it might seem more natural to check the frame flags, existing practice amongst
241        // C plugins is to check the node runtime flags instead, so that is what is followed here.
242        if unlikely(node.flags().contains(vlib::node::NodeFlags::TRACE)) {
243            for b0 in &mut b {
244                if b0.flags().contains(vlib::BufferFlags::IS_TRACED) {
245                    generic_node_impl.trace_buffer(vm, node, b0);
246                }
247            }
248        }
249
250        // SAFETY: since every buffer yielded a next node and the number of elements of nexts is the
251        // same as from, then every element is initialised. In addition, since we got all of the
252        // buffer indices from `frame.get_buffers()` then they must all be valid. All the next nodes
253        // expect to receive buffer indices and no other vector, aux or scalar data.
254        let initialized_nexts = nexts.get_unchecked(..b.len()).assume_init_ref();
255        vm.buffer_enqueue_to_next(node, from, initialized_nexts);
256
257        frame.vector().len() as u16
258    }
259}