Skip to main content

vpp_plugin/vlib/
node_generic.rs

1//! Generic node implementations
2//!
3//! This module contains generic implementations of VPP nodes following set patterns that can be
4//! reused across different plugins.
5
6use core::slice;
7use std::mem::MaybeUninit;
8
9use arrayvec::ArrayVec;
10
11use crate::{
12    vlib::{
13        self, BufferIndex, MainRef,
14        buffer::BufferRef,
15        node::{FRAME_SIZE, FrameRef, NextNodes, Node, NodeRuntimeRef},
16    },
17    vppinfra::{likely, unlikely},
18};
19
20/// Next node to send a buffer to from a generic node implementation
21#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
22pub enum FeatureNextNode<NextNode> {
23    /// A specific next node defined by the generic node implementation
24    DefinedNode(NextNode),
25    /// The next feature in the feature arc
26    NextFeature,
27}
28
29impl<NextNode> From<NextNode> for FeatureNextNode<NextNode> {
30    fn from(value: NextNode) -> Self {
31        Self::DefinedNode(value)
32    }
33}
34
35/// Trait for generic node implementations processing one buffer at a time in a feature arc
36pub trait GenericFeatureNodeX1<N: Node> {
37    /// Processing a buffer and determining the next node to send it to
38    ///
39    /// # Safety
40    ///
41    /// The safety preconditions vary depending on the specific implementation.
42    unsafe fn map_buffer_to_next(
43        &self,
44        vm: &MainRef,
45        node: &mut NodeRuntimeRef<N>,
46        b0: &mut BufferRef<N::FeatureData>,
47    ) -> FeatureNextNode<N::NextNodes>;
48}
49
50/// Generic implementation of a VPP node processing one buffer at a time in a feature arc
51///
52/// # Safety
53///
54/// - The preconditions of the [`GenericFeatureNodeX1::map_buffer_to_next`] method must be upheld.
55/// - Nodes with this node as a next node must send valid buffer indices in the Vector data.
56/// - This mode must be invoked as part of a feature arc.
57/// - All of the next nodes of this node must have a `Vector` type of `BufferIndex`, `Scalar` of
58///   `()` and `Aux` of `()` (or their C equivalents).
59#[inline(always)]
60pub unsafe fn generic_feature_node_x1<GenericNode, N, FeatureData>(
61    vm: &MainRef,
62    node: &mut NodeRuntimeRef<N>,
63    frame: &mut FrameRef<N>,
64    generic_node_impl: GenericNode,
65) -> u16
66where
67    N: Node<Vector = BufferIndex, Scalar = (), Aux = (), FeatureData = FeatureData>,
68    GenericNode: GenericFeatureNodeX1<N>,
69    FeatureData: Copy,
70{
71    // SAFETY: The safety requirements are documented in the function's safety comment.
72    unsafe {
73        let mut nexts: [MaybeUninit<u16>; FRAME_SIZE] = [MaybeUninit::uninit(); FRAME_SIZE];
74        let mut b = ArrayVec::new();
75
76        let from = frame.get_buffers::<FRAME_SIZE>(vm, &mut b);
77
78        for (i, b0) in b.iter_mut().enumerate() {
79            let next = generic_node_impl.map_buffer_to_next(vm, node, b0);
80            let next = match next {
81                FeatureNextNode::NextFeature => b0.vnet_feature_next().0 as u16,
82                FeatureNextNode::DefinedNode(next) => next.into_u16(),
83            };
84            nexts.get_unchecked_mut(i).write(next);
85        }
86
87        // SAFETY: since every buffer yielded a next node and the number of elements of nexts is the
88        // same as from, then every element is initialised. In addition, since we got all of the
89        // buffer indices from `frame.get_buffers()` then they must all be valid. All the next nodes
90        // expect to receive buffer indices and no other vector, aux or scalar data.
91        vm.buffer_enqueue_to_next(
92            node,
93            from,
94            std::mem::transmute::<&[MaybeUninit<u16>], &[u16]>(slice::from_raw_parts(
95                nexts.as_ptr(),
96                b.len(),
97            )),
98        );
99
100        frame.vector().len() as u16
101    }
102}
103
104/// Trait for generic node implementations processing one buffer at a time in a feature arc
105pub trait GenericFeatureNodeX4<N: Node>: GenericFeatureNodeX1<N> {
106    /// Performing prefetching for a given buffer
107    fn prefetch_buffer_x4(
108        &self,
109        _vm: &MainRef,
110        _node: &mut NodeRuntimeRef<N>,
111        b: &mut [&mut BufferRef<N::FeatureData>; 4],
112    ) {
113        // By default we assume that at the very least the buffer headers will be read from, but
114        // plugins should generally override this for their specifics
115        b[0].prefetch_header_load();
116        b[1].prefetch_header_load();
117        b[2].prefetch_header_load();
118        b[3].prefetch_header_load();
119    }
120
121    /// Process four buffers and determining the next nodes to send them to
122    ///
123    /// # Safety
124    ///
125    /// The safety preconditions vary depending on the specific implementation.
126    unsafe fn map_buffer_to_next_x4(
127        &self,
128        vm: &MainRef,
129        node: &mut NodeRuntimeRef<N>,
130        b: &mut [&mut BufferRef<N::FeatureData>; 4],
131    ) -> [FeatureNextNode<N::NextNodes>; 4];
132
133    /// Trace a buffer
134    ///
135    /// This is optional and can be empty if tracing is implemented in
136    /// [`GenericFeatureNodeX4::map_buffer_to_next_x4`] and
137    /// [`GenericFeatureNodeX1::map_buffer_to_next`] instead.
138    ///
139    /// # Safety
140    ///
141    /// The safety preconditions vary depending on the specific implementation.
142    unsafe fn trace_buffer(
143        &self,
144        _vm: &MainRef,
145        _node: &mut NodeRuntimeRef<N>,
146        _b0: &mut BufferRef<N::FeatureData>,
147    ) {
148    }
149}
150
151/// Generic implementation of a VPP node processing four buffers at a time in a feature arc
152///
153/// # Safety
154///
155/// - The preconditions of the [`GenericFeatureNodeX1::map_buffer_to_next`] &
156///   [`GenericFeatureNodeX4::map_buffer_to_next_x4`] methods must be upheld.
157/// - Nodes with this node as a next node must send valid buffer indices in the Vector data.
158/// - This mode must be invoked as part of a feature arc.
159/// - All of the next nodes of this node must have a `Vector` type of `BufferIndex`, `Scalar` of
160///   `()` and `Aux` of `()` (or their C equivalents).
161#[inline(always)]
162pub unsafe fn generic_feature_node_x4<GenericNode, N, FeatureData>(
163    vm: &MainRef,
164    node: &mut NodeRuntimeRef<N>,
165    frame: &mut FrameRef<N>,
166    generic_node_impl: GenericNode,
167) -> u16
168where
169    N: Node<Vector = BufferIndex, Scalar = (), Aux = (), FeatureData = FeatureData>,
170    GenericNode: GenericFeatureNodeX4<N>,
171    FeatureData: Copy,
172{
173    // SAFETY: The safety requirements are documented in the function's safety comment.
174    unsafe {
175        let mut nexts: [MaybeUninit<u16>; FRAME_SIZE] = [MaybeUninit::uninit(); FRAME_SIZE];
176        let mut b = ArrayVec::new();
177
178        let from = frame.get_buffers::<FRAME_SIZE>(vm, &mut b);
179        let len = b.len();
180
181        for stride in 0..len / 4 {
182            let i = stride * 4;
183
184            if stride * 4 + 4 < len {
185                let stride_b = b.get_unchecked_mut(i + 4..i + 8);
186                // Convert into array for type safety for trait method
187                let stride_b: &mut [&mut BufferRef<_>; 4] = stride_b.try_into().unwrap_unchecked();
188
189                generic_node_impl.prefetch_buffer_x4(vm, node, stride_b);
190            }
191
192            let stride_b = b.get_unchecked_mut(i..i + 4);
193            // Convert into array for type safety for trait method
194            let stride_b: &mut [&mut BufferRef<_>; 4] = stride_b.try_into().unwrap_unchecked();
195
196            // Optimise for common case where feature arc indices are the same for
197            // all packets and for the case where the packet won't be dropped. This
198            // allows for use of vectorised store of nexts[i..i + 3].
199            if likely(
200                stride_b[0].vnet_buffer().feature_arc_index()
201                    == stride_b[1].vnet_buffer().feature_arc_index()
202                    && stride_b[0].vnet_buffer().feature_arc_index()
203                        == stride_b[2].vnet_buffer().feature_arc_index()
204                    && stride_b[0].vnet_buffer().feature_arc_index()
205                        == stride_b[3].vnet_buffer().feature_arc_index(),
206            ) {
207                let feature_next = stride_b[0].vnet_feature_next().0 as u16;
208                nexts.get_unchecked_mut(i).write(feature_next);
209                nexts.get_unchecked_mut(i + 1).write(feature_next);
210                nexts.get_unchecked_mut(i + 2).write(feature_next);
211                nexts.get_unchecked_mut(i + 3).write(feature_next);
212            } else {
213                nexts
214                    .get_unchecked_mut(i)
215                    .write(stride_b[0].vnet_feature_next().0 as u16);
216                nexts
217                    .get_unchecked_mut(i + 1)
218                    .write(stride_b[1].vnet_feature_next().0 as u16);
219                nexts
220                    .get_unchecked_mut(i + 2)
221                    .write(stride_b[2].vnet_feature_next().0 as u16);
222                nexts
223                    .get_unchecked_mut(i + 3)
224                    .write(stride_b[3].vnet_feature_next().0 as u16);
225            };
226
227            let feat_nexts = generic_node_impl.map_buffer_to_next_x4(vm, node, stride_b);
228
229            for (next_i, next) in feat_nexts.into_iter().enumerate() {
230                match next {
231                    FeatureNextNode::NextFeature => { /* already set */ }
232                    FeatureNextNode::DefinedNode(next) => {
233                        nexts.get_unchecked_mut(i + next_i).write(next.into_u16());
234                    }
235                };
236            }
237        }
238
239        for i in (len / 4) * 4..len {
240            let b0 = b.get_unchecked_mut(i);
241
242            // Optimise for the case where the packet won't be processed by the feature
243            let mut next_val = b0.vnet_feature_next().0 as u16;
244
245            let next = generic_node_impl.map_buffer_to_next(vm, node, b0);
246            match next {
247                FeatureNextNode::NextFeature => { /* already set */ }
248                FeatureNextNode::DefinedNode(next) => next_val = next.into_u16(),
249            };
250            nexts.get_unchecked_mut(i).write(next_val);
251        }
252
253        // Although it might seem more natural to check the frame flags, existing practice amongst
254        // C plugins is to check the node runtime flags instead, so that is what is followed here.
255        if unlikely(node.flags().contains(vlib::node::NodeFlags::TRACE)) {
256            for b0 in &mut b {
257                if b0.flags().contains(vlib::BufferFlags::IS_TRACED) {
258                    generic_node_impl.trace_buffer(vm, node, b0);
259                }
260            }
261        }
262
263        // SAFETY: since every buffer yielded a next node and the number of elements of nexts is the
264        // same as from, then every element is initialised. In addition, since we got all of the
265        // buffer indices from `frame.get_buffers()` then they must all be valid. All the next nodes
266        // expect to receive buffer indices and no other vector, aux or scalar data.
267        vm.buffer_enqueue_to_next(
268            node,
269            from,
270            std::mem::transmute::<&[MaybeUninit<u16>], &[u16]>(slice::from_raw_parts(
271                nexts.as_ptr(),
272                b.len(),
273            )),
274        );
275
276        frame.vector().len() as u16
277    }
278}