vpp_plugin/vlib/
buffer.rs

1//! vlib buffer abstraction
2//!
3//! This module contains abstractions around VPP's `vlib_buffer_t` structure.
4//! It provides safe access to buffer fields and operations.
5//! It also includes buffer allocation and deallocation functions.
6//! The goal is to provide a safe and ergonomic interface for working with VPP buffers.
7
8use std::{hint::assert_unchecked, mem::MaybeUninit};
9
10use arrayvec::ArrayVec;
11use bitflags::bitflags;
12
13use crate::{
14    bindings::{
15        vlib_add_trace, vlib_buffer_func_main, vlib_buffer_t, vlib_buffer_t__bindgen_ty_1,
16        vlib_buffer_t__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1, CLIB_LOG2_CACHE_LINE_BYTES,
17        VLIB_BUFFER_EXT_HDR_VALID, VLIB_BUFFER_IS_TRACED, VLIB_BUFFER_MIN_CHAIN_SEG_SIZE,
18        VLIB_BUFFER_NEXT_PRESENT, VLIB_BUFFER_PRE_DATA_SIZE, VLIB_BUFFER_TOTAL_LENGTH_VALID,
19    },
20    vlib::{
21        node::{ErrorCounters, Node, NodeRuntimeRef, VectorBufferIndex},
22        MainRef,
23    },
24    vppinfra::likely,
25};
26
27#[cfg(feature = "experimental")]
28use crate::bindings::{vlib_helper_buffer_alloc, vlib_helper_buffer_free};
29#[cfg(feature = "experimental")]
30use std::fmt;
31
32/// VPP buffer index
33#[repr(transparent)]
34#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
35pub struct BufferIndex(u32);
36
37impl BufferIndex {
38    /// Construct a new `BufferIndex`
39    pub const fn new(buffer: u32) -> Self {
40        Self(buffer)
41    }
42}
43
44impl From<u32> for BufferIndex {
45    fn from(value: u32) -> BufferIndex {
46        Self(value)
47    }
48}
49
50impl From<BufferIndex> for u32 {
51    fn from(value: BufferIndex) -> Self {
52        value.0
53    }
54}
55
56impl VectorBufferIndex for BufferIndex {
57    fn as_u32_slice(slice: &[Self]) -> &[u32] {
58        // SAFETY: BufferIndex is a repr(transparent) wrapper around u32 so the src and dst slice
59        // types have the same memory layout
60        unsafe { std::mem::transmute::<&[BufferIndex], &[u32]>(slice) }
61    }
62}
63
64bitflags! {
65    /// vlib buffer flags
66    #[repr(transparent)]
67    #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
68    pub struct BufferFlags: u32 {
69        /// Trace this buffer
70        const IS_TRACED = VLIB_BUFFER_IS_TRACED;
71        /// This is one buffer in a chain of buffers
72        const NEXT_PRESENT = VLIB_BUFFER_NEXT_PRESENT;
73        /// Total length is valid
74        const TOTAL_LENGTH_VALID = VLIB_BUFFER_TOTAL_LENGTH_VALID;
75        /// Contains external buffer manager header
76        const EXT_HDR_VALID = VLIB_BUFFER_EXT_HDR_VALID;
77
78        // Flags can be extended by user/vnet
79        const _ = !0;
80    }
81}
82
83/// Construct a user buffer flag
84///
85/// `n` must be less than 29 and greater than 0.
86pub const fn vlib_buffer_flag_user(n: u32) -> u32 {
87    assert!(n < 29 && n > 0);
88    1 << (32 - n)
89}
90
91/// Reference to a VPP buffer
92///
93/// A `&mut BufferRef<FeatureData>` is equivalent to a `vlib_buffer_t *` in C (a `*mut
94/// vlib_buffer_t` in Rust).
95#[repr(transparent)]
96pub struct BufferRef<FeatureData>(foreign_types::Opaque, std::marker::PhantomData<FeatureData>);
97
98impl<FeatureData> BufferRef<FeatureData> {
99    /// Create a `&BufferRef` from a raw pointer
100    ///
101    /// # Safety
102    ///
103    /// - The pointer must be a valid and properly initialised `vlib_buffer_t`.
104    /// - The pointer must stay valid and the contents must not be mutated for the duration of the
105    ///   lifetime of the returned object.
106    #[inline(always)]
107    pub unsafe fn from_ptr<'a>(ptr: *mut vlib_buffer_t) -> &'a Self {
108        &*(ptr as *mut _)
109    }
110
111    /// Create a `&mut BufferRef` from a raw pointer
112    ///
113    /// # Safety
114    ///
115    /// - The pointer must be a valid and properly initialised `vlib_buffer_t`.
116    /// - The pointer must stay valid and the contents must not be mutated for the duration of the
117    ///   lifetime of the returned object.
118    #[inline(always)]
119    pub unsafe fn from_ptr_mut<'a>(ptr: *mut vlib_buffer_t) -> &'a mut Self {
120        &mut *(ptr as *mut _)
121    }
122
123    /// Returns the raw pointer to the underlying `vlib_buffer_t`
124    pub fn as_ptr(&self) -> *mut vlib_buffer_t {
125        self as *const _ as *mut _
126    }
127
128    fn as_details(&self) -> &vlib_buffer_t__bindgen_ty_1 {
129        // SAFETY: since the reference to self is valid, so must be the pointer and it's safe to
130        // use the __bindgen_anon_1 union arm since the union is just present to force alignment
131        // Creation preconditions mean there are no aliased accesses to the buffer so it's fine
132        // to take a reference
133        unsafe { (*self.as_ptr()).__bindgen_anon_1.as_ref() }
134    }
135
136    fn as_details_mut(&mut self) -> &mut vlib_buffer_t__bindgen_ty_1 {
137        // SAFETY: since the reference to self is valid, so must be the pointer and it's safe to
138        // use the __bindgen_anon_1 union arm since the union is just present to force alignment.
139        // Creation preconditions mean there are no aliased accesses to the buffer so it's fine
140        // to take a reference
141        unsafe { (*self.as_ptr()).__bindgen_anon_1.as_mut() }
142    }
143
144    pub(crate) fn as_metadata(&self) -> &vlib_buffer_t__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1 {
145        // SAFETY: since the reference to self is valid, so must be the pointer and it's safe to
146        // use the __bindgen_anon_1 union arm since the union is just present to force alignment
147        // Creation preconditions mean there are no aliased accesses to the buffer so it's fine
148        // to take a reference
149        unsafe { self.as_details().__bindgen_anon_1.__bindgen_anon_1.as_ref() }
150    }
151
152    pub(crate) fn as_metadata_mut(
153        &mut self,
154    ) -> &mut vlib_buffer_t__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1 {
155        // SAFETY: since the reference to self is valid, so must be the pointer and it's safe to
156        // use the __bindgen_anon_1 union arm since the union is just present to force alignment.
157        // Creation preconditions mean there are no aliased accesses to the buffer so it's fine
158        // to take a reference
159        unsafe {
160            self.as_details_mut()
161                .__bindgen_anon_1
162                .__bindgen_anon_1
163                .as_mut()
164        }
165    }
166
167    fn data(&self) -> *const u8 {
168        self.as_details().data.as_ptr()
169    }
170
171    fn current_data_offset(&self) -> i16 {
172        self.as_metadata().current_data
173    }
174
175    fn current_data_offset_mut(&mut self) -> &mut i16 {
176        &mut self.as_metadata_mut().current_data
177    }
178
179    /// Current length
180    ///
181    /// Typically, this is the amount of packet data remaining from [`Self::current_ptr_mut`].
182    pub fn current_length(&self) -> u16 {
183        self.as_metadata().current_length
184    }
185
186    fn current_length_mut(&mut self) -> &mut u16 {
187        &mut self.as_metadata_mut().current_length
188    }
189
190    /// Buffer flags
191    pub fn flags(&self) -> BufferFlags {
192        BufferFlags::from_bits_retain(self.as_metadata().flags)
193    }
194
195    /// Get a pointer to the current data
196    ///
197    /// This corresponds to the VPP C API `vlib_buffer_get_current`.
198    ///
199    /// # Usage guidance
200    ///
201    /// Note that the pointer returned may point to uninitialised data depending on the context.
202    /// In addition, depending on the context, the remaining data the amount is expected.
203    /// Finally, remaining data is sufficent and it's initialised it may not have been validated
204    /// so care must be taken in determining whether or not lengths in the headers can be trusted.
205    pub fn current_ptr_mut(&mut self) -> *mut u8 {
206        let data = self.data().cast_mut();
207        let current_data = self.current_data_offset();
208
209        debug_assert!(current_data >= -(VLIB_BUFFER_PRE_DATA_SIZE as i16));
210
211        // SAFETY: current_data is asserted to be valid and point into valid (but possibly
212        // unintialised) data or pre_data.
213        unsafe { data.offset(current_data as isize) }
214    }
215
216    // vlib_buffer_has_space
217    /// Check if the buffer has space for `l` more bytes
218    pub fn has_space(&self, l: i16) -> bool {
219        self.current_length() >= l as u16
220    }
221
222    // vlib_buffer_advance
223    /// Advance the current data pointer by `l` bytes
224    ///
225    /// # Safety
226    ///
227    /// - If `l` is positive, the buffer must have at least `l` bytes of data remaining.
228    /// - If `l` is negative, the current data offset must be at least `-l` bytes from the start of
229    ///   the buffer's data area (including pre-data).
230    pub unsafe fn advance(&mut self, l: i16) {
231        debug_assert!(l < 0 || self.current_length() >= l as u16);
232        debug_assert!(
233            l >= 0 || self.current_data_offset() + VLIB_BUFFER_PRE_DATA_SIZE as i16 >= -l
234        );
235
236        *self.current_data_offset_mut() += l;
237        if l >= 0 {
238            *self.current_length_mut() -= l as u16;
239        } else {
240            *self.current_length_mut() += -l as u16;
241        }
242
243        debug_assert!(
244            !self.flags().contains(BufferFlags::NEXT_PRESENT)
245                || self.current_length() >= VLIB_BUFFER_MIN_CHAIN_SEG_SIZE as u16
246        );
247    }
248
249    /// Get a pointer to the end of the current data
250    ///
251    /// This corresponds to the VPP C function `vlib_buffer_get_tail`.
252    pub fn tail_mut(&mut self) -> *mut u8 {
253        let data = self.data().cast_mut();
254        let current_data = self.current_data_offset();
255
256        debug_assert!(current_data >= -(VLIB_BUFFER_PRE_DATA_SIZE as i16));
257
258        // SAFETY: current_data and current_length are asserted to be valid and `current_data +
259        // current_length` asserted to point to the end of valid (but possibly unintialised) data
260        // or pre_data.
261        unsafe {
262            let ptr = data.offset(current_data as isize);
263            ptr.add(self.current_length() as usize)
264        }
265    }
266
267    /// Add trace data to this buffer
268    pub fn add_trace<N: Node>(
269        &mut self,
270        vm: &MainRef,
271        node: &NodeRuntimeRef<N>,
272    ) -> &mut MaybeUninit<N::TraceData> {
273        // SAFETY: pointers are valid and the uninitialised data that is returned cannot be read
274        // by safe code
275        unsafe {
276            &mut *(vlib_add_trace(
277                vm.as_ptr(),
278                node.as_ptr(),
279                self.as_ptr(),
280                std::mem::size_of::<N::TraceData>() as u32,
281            ) as *mut MaybeUninit<N::TraceData>)
282        }
283    }
284
285    /// Set an error reason
286    ///
287    /// This is typically done before sending the packet to the `drop` node, where it use the
288    /// value to display the reason in traces and automatically increment the per-node, per-error
289    /// counter for the error.
290    pub fn set_error<N: Node>(&mut self, node: &NodeRuntimeRef<N>, error: N::Errors) {
291        // SAFETY: vlib_node_runtime_t::errors is sized according to the number of error values
292        // and it is a precondition of the Errors trait that the value return by into_u16() cannot
293        // be greater than or equal to the declared number of error values.
294        unsafe {
295            let error_value = (*node.as_ptr()).errors.add(error.into_u16() as usize);
296            self.as_metadata_mut().error = *error_value;
297        }
298    }
299}
300
301/// Owned buffer (with context)
302///
303/// The `&MainRef` context is necessary to be able to free the buffer on drop.
304#[cfg(feature = "experimental")]
305pub struct BufferWithContext<'a> {
306    buffer: u32,
307    vm: &'a MainRef,
308}
309
310#[cfg(feature = "experimental")]
311impl<'a> BufferWithContext<'a> {
312    /// Creates a `BufferWithContext` directly from a buffer index and a main reference
313    ///
314    /// # Safety
315    /// - The buffer index must be valid and the caller must have ownership of the buffer it
316    ///   corresponds to.
317    pub unsafe fn from_parts(buffer: u32, vm: &'a MainRef) -> Self {
318        Self { buffer, vm }
319    }
320
321    /// Get a mutable reference to the buffer
322    pub fn as_buffer_ref(&mut self) -> &mut BufferRef<()> {
323        let from = &[self.buffer];
324        let mut b: ArrayVec<_, 1> = ArrayVec::new();
325        // SAFETY: capacity of b equals the length of from, `self.buffer` is a valid index and we
326        // force FeatureData to `()` since it isn't known and the buffer cannot be part of a
327        // feature arc.
328        unsafe {
329            self.vm.get_buffers(from, &mut b);
330        }
331        b.remove(0)
332    }
333}
334
335#[cfg(feature = "experimental")]
336impl Drop for BufferWithContext<'_> {
337    fn drop(&mut self) {
338        // SAFETY: we have a reference to MainRef so the pointer must be valid, we pass in a
339        // pointer to buffers consistent with the number of buffers passed in, and self.buffer
340        // is a valid buffer index that we have ownership of.
341        unsafe {
342            vlib_helper_buffer_free(self.vm.as_ptr(), &mut self.buffer, 1);
343        }
344    }
345}
346
347/// Buffer allocation error
348#[derive(Copy, Clone, PartialEq, Eq, Debug)]
349#[cfg(feature = "experimental")]
350pub struct BufferAllocError;
351
352#[cfg(feature = "experimental")]
353impl fmt::Display for BufferAllocError {
354    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
355        write!(f, "buffer allocation error")
356    }
357}
358
359#[cfg(feature = "experimental")]
360impl std::error::Error for BufferAllocError {}
361
362/// u64 x 8
363///
364/// This type exists to strongly hint to the compiler that it should emit vector instructions.
365///
366/// In the future, the implementation might be changed to use standard library portable SIMD once
367/// stabilised (https://github.com/rust-lang/rust/issues/86656), or use arch-specific intrinsics
368/// (if evidenced by high-enough performance improvement).
369#[allow(non_camel_case_types)]
370pub(crate) struct u64x8([u64; 8]);
371
372impl u64x8 {
373    /// Construct a `u64x8` from an array of 8 `u64`s
374    #[inline(always)]
375    pub(crate) fn from_array(a: [u64; 8]) -> Self {
376        Self(a)
377    }
378
379    /// Construct a `u64x8` from a pointer to 8 `u32`s
380    #[inline(always)]
381    pub(crate) unsafe fn from_u32_ptr(ptr: *const u32) -> Self {
382        Self([
383            *ptr.add(0) as u64,
384            *ptr.add(1) as u64,
385            *ptr.add(2) as u64,
386            *ptr.add(3) as u64,
387            *ptr.add(4) as u64,
388            *ptr.add(5) as u64,
389            *ptr.add(6) as u64,
390            *ptr.add(7) as u64,
391        ])
392    }
393
394    /// Shift each element to the left by a given constant value, assigning the result to `self`
395    #[inline(always)]
396    pub(crate) fn shift_elements_left<const OFFSET: u32>(&mut self) {
397        for a in &mut self.0 {
398            *a <<= OFFSET;
399        }
400    }
401
402    /// Add a given value to each element, returning a new u64x8 with the result
403    #[inline(always)]
404    pub(crate) fn add_u64(&self, value: u64) -> Self {
405        Self::from_array([
406            self.0[0] + value,
407            self.0[1] + value,
408            self.0[2] + value,
409            self.0[3] + value,
410            self.0[4] + value,
411            self.0[5] + value,
412            self.0[6] + value,
413            self.0[7] + value,
414        ])
415    }
416
417    /// Write 8 contiguous elements starting from `ptr`
418    #[inline(always)]
419    pub(crate) unsafe fn store(&self, ptr: *mut u64) {
420        *ptr.add(0) = self.0[0];
421        *ptr.add(1) = self.0[1];
422        *ptr.add(2) = self.0[2];
423        *ptr.add(3) = self.0[3];
424        *ptr.add(4) = self.0[4];
425        *ptr.add(5) = self.0[5];
426        *ptr.add(6) = self.0[6];
427        *ptr.add(7) = self.0[7];
428    }
429}
430
431/// Round a value up to the next multiple of the given power-of-two
432const fn next_multiple_of_pow2(val: usize, pow2: usize) -> usize {
433    debug_assert!(pow2.is_power_of_two());
434    (val + pow2 - 1) & !(pow2 - 1)
435}
436
437impl MainRef {
438    /// Get pointers to buffers for the given buffer indices, writing them into the provided `to` arrayvec.
439    ///
440    /// This is similar to `vlib_get_buffers` in the C API.
441    ///
442    /// Note that although it would be more idiomatic to return an `ArrayVec` directly, this
443    /// method takes a mutable reference to an `ArrayVec` to avoid an unnecessary copy when
444    /// returning.
445    ///
446    /// # Safety
447    ///
448    /// - The caller must ensure that `to` has enough capacity to hold all the buffers
449    ///   corresponding to the indices in `from_indices`.
450    /// - Each index in `from_indices` must be valid and the caller must have ownership of the
451    ///   buffer it corresponds to.
452    /// - Each buffer's `feature_arc_index` and `current_config_index` must be consistent with
453    ///   the `FeatureData` type. If they are not known (i.e. because the caller the node isn't
454    ///   being executed in a feature arc), FeatureData should be a zero-sized type such as `()`.
455    /// - The capacity of `from_indices` must be a multiple of 8 (note though that the length is
456    ///   allowed not to be). In other words, it must be valid to read multiples of 8 from the
457    ///   underlying memory (possibly returning uninitialised or stale data) without faulting.
458    #[inline(always)]
459    pub unsafe fn get_buffers<'a, 'me, 'buf: 'me, FeatureData, const N: usize>(
460        &'me self,
461        from_indices: &'a [u32],
462        to: &mut ArrayVec<&'buf mut BufferRef<FeatureData>, N>,
463    ) {
464        debug_assert!(from_indices.len() <= N);
465        assert_unchecked(from_indices.len() <= N);
466
467        #[cfg(debug_assertions)]
468        for from_index in from_indices {
469            let buffer_mem_size = (*(*self.as_ptr()).buffer_main).buffer_mem_size;
470            debug_assert!(((*from_index << CLIB_LOG2_CACHE_LINE_BYTES) as u64) < buffer_mem_size);
471        }
472
473        let buffer_mem_start = (*(*self.as_ptr()).buffer_main).buffer_mem_start;
474
475        // Check for the ArrayVec capacity being a multiple of 8 and if so the later
476        // implementation can perform a write of 8 elements at a time without worrying about
477        // writing beyond the end of the ArrayVec. If not, then fall back to a generic
478        // implementation. This check will be evaluated at compile time and one implementation
479        // or the other chosen.
480        if !N.is_multiple_of(8) {
481            let base = buffer_mem_start as *const i8;
482            for from_index in from_indices.iter() {
483                let ptr = base.add((*from_index << CLIB_LOG2_CACHE_LINE_BYTES) as usize)
484                    as *mut vlib_buffer_t;
485                to.push_unchecked(BufferRef::from_ptr_mut(ptr));
486            }
487            return;
488        }
489
490        let mut len = from_indices.len();
491        len = next_multiple_of_pow2(len, 8);
492
493        let mut from_index = from_indices.as_ptr();
494        let mut to_ptr = to.as_mut_ptr();
495
496        while len >= 64 {
497            let mut from_index_x8_1 = u64x8::from_u32_ptr(from_index);
498            let mut from_index_x8_2 = u64x8::from_u32_ptr(from_index.add(8));
499            let mut from_index_x8_3 = u64x8::from_u32_ptr(from_index.add(2 * 8));
500            let mut from_index_x8_4 = u64x8::from_u32_ptr(from_index.add(3 * 8));
501            let mut from_index_x8_5 = u64x8::from_u32_ptr(from_index.add(4 * 8));
502            let mut from_index_x8_6 = u64x8::from_u32_ptr(from_index.add(5 * 8));
503            let mut from_index_x8_7 = u64x8::from_u32_ptr(from_index.add(6 * 8));
504            let mut from_index_x8_8 = u64x8::from_u32_ptr(from_index.add(7 * 8));
505
506            from_index_x8_1.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
507            from_index_x8_2.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
508            from_index_x8_3.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
509            from_index_x8_4.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
510            from_index_x8_5.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
511            from_index_x8_6.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
512            from_index_x8_7.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
513            from_index_x8_8.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
514
515            let buf_ptr_x8_1 = from_index_x8_1.add_u64(buffer_mem_start);
516            let buf_ptr_x8_2 = from_index_x8_2.add_u64(buffer_mem_start);
517            let buf_ptr_x8_3 = from_index_x8_3.add_u64(buffer_mem_start);
518            let buf_ptr_x8_4 = from_index_x8_4.add_u64(buffer_mem_start);
519            let buf_ptr_x8_5 = from_index_x8_5.add_u64(buffer_mem_start);
520            let buf_ptr_x8_6 = from_index_x8_6.add_u64(buffer_mem_start);
521            let buf_ptr_x8_7 = from_index_x8_7.add_u64(buffer_mem_start);
522            let buf_ptr_x8_8 = from_index_x8_8.add_u64(buffer_mem_start);
523
524            buf_ptr_x8_1.store(to_ptr as *mut u64);
525            buf_ptr_x8_2.store(to_ptr.add(8) as *mut u64);
526            buf_ptr_x8_3.store(to_ptr.add(2 * 8) as *mut u64);
527            buf_ptr_x8_4.store(to_ptr.add(3 * 8) as *mut u64);
528            buf_ptr_x8_5.store(to_ptr.add(4 * 8) as *mut u64);
529            buf_ptr_x8_6.store(to_ptr.add(5 * 8) as *mut u64);
530            buf_ptr_x8_7.store(to_ptr.add(6 * 8) as *mut u64);
531            buf_ptr_x8_8.store(to_ptr.add(7 * 8) as *mut u64);
532
533            to_ptr = to_ptr.add(64);
534            from_index = from_index.add(64);
535            len -= 64;
536        }
537
538        if likely(len >= 32) {
539            let mut from_index_x8_1 = u64x8::from_u32_ptr(from_index);
540            let mut from_index_x8_2 = u64x8::from_u32_ptr(from_index.add(8));
541            let mut from_index_x8_3 = u64x8::from_u32_ptr(from_index.add(2 * 8));
542            let mut from_index_x8_4 = u64x8::from_u32_ptr(from_index.add(3 * 8));
543
544            from_index_x8_1.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
545            from_index_x8_2.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
546            from_index_x8_3.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
547            from_index_x8_4.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
548
549            let buf_ptr_x8_1 = from_index_x8_1.add_u64(buffer_mem_start);
550            let buf_ptr_x8_2 = from_index_x8_2.add_u64(buffer_mem_start);
551            let buf_ptr_x8_3 = from_index_x8_3.add_u64(buffer_mem_start);
552            let buf_ptr_x8_4 = from_index_x8_4.add_u64(buffer_mem_start);
553
554            buf_ptr_x8_1.store(to_ptr as *mut u64);
555            buf_ptr_x8_2.store(to_ptr.add(8) as *mut u64);
556            buf_ptr_x8_3.store(to_ptr.add(2 * 8) as *mut u64);
557            buf_ptr_x8_4.store(to_ptr.add(3 * 8) as *mut u64);
558
559            to_ptr = to_ptr.add(32);
560            from_index = from_index.add(32);
561            len -= 32;
562        }
563
564        if likely(len >= 16) {
565            let mut from_index_x8_1 = u64x8::from_u32_ptr(from_index);
566            let mut from_index_x8_2 = u64x8::from_u32_ptr(from_index.add(8));
567
568            from_index_x8_1.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
569            from_index_x8_2.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
570
571            let buf_ptr_x8_1 = from_index_x8_1.add_u64(buffer_mem_start);
572            let buf_ptr_x8_2 = from_index_x8_2.add_u64(buffer_mem_start);
573
574            buf_ptr_x8_1.store(to_ptr as *mut u64);
575            buf_ptr_x8_2.store(to_ptr.add(8) as *mut u64);
576
577            to_ptr = to_ptr.add(16);
578            from_index = from_index.add(16);
579            len -= 16;
580        }
581
582        if likely(len > 0) {
583            let mut from_index_x8 = u64x8::from_u32_ptr(from_index);
584            from_index_x8.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
585            let buf_ptr_x8 = from_index_x8.add_u64(buffer_mem_start);
586            buf_ptr_x8.store(to_ptr as *mut u64);
587        }
588
589        to.set_len(from_indices.len());
590    }
591
592    /// Enqueues a slice of buffer indices to a next node
593    ///
594    /// This corresponds to the VPP C function `vlib_buffer_enqueue_to_next`.
595    ///
596    /// # Safety
597    ///
598    /// - The length of the from and next slices must match.
599    /// - The next node must have a `Vector` type of `u32` (or the C equivalent).
600    /// - The next node must have a `Scalar` type of `()` (or the C equivalent).
601    /// - The next node must have an `Aux` type of `()` (or the C equivalent).
602    /// - `vlib_buffer_func_main` must have been filled in with valid function pointers (which
603    ///   will be done by VPP at initialisation time).
604    /// - The buffer state, such as `current_data` and `length` must be set according to the
605    ///   preconditions of the next node.
606    /// - Each entry in the `from` slice must be a valid index to a buffer.
607    /// - Each entry in the `nexts` slice must be a valid next node index.
608    #[inline(always)]
609    pub unsafe fn buffer_enqueue_to_next<N: Node, V: VectorBufferIndex>(
610        &self,
611        node: &mut NodeRuntimeRef<N>,
612        from: &[V],
613        nexts: &[u16],
614    ) {
615        debug_assert_eq!(from.len(), nexts.len());
616        // SAFETY: the caller asserts the function preconditions are true
617        unsafe {
618            (vlib_buffer_func_main
619                .buffer_enqueue_to_next_fn
620                .unwrap_unchecked())(
621                self.as_ptr(),
622                node.as_ptr(),
623                VectorBufferIndex::as_u32_slice(from).as_ptr().cast_mut(),
624                nexts.as_ptr() as *mut u16,
625                from.len() as u64,
626            )
627        }
628    }
629
630    /// Allocate a single buffer
631    ///
632    /// This corresponds to the VPP C API of `vlib_alloc_buffers`.
633    #[cfg(feature = "experimental")]
634    pub fn alloc_buffer(&self) -> Result<BufferWithContext<'_>, BufferAllocError> {
635        // SAFETY: we have a reference to self so the pointer must also be valid, we pass in a
636        // buffer pointer that is consistent with the number of buffers asked for, and on exit
637        // of the function either the buffer value is filled in with a valid index we have
638        // ownership of or not depending on the return value of the function.
639        unsafe {
640            let mut buffer = 0;
641            let res = vlib_helper_buffer_alloc(self.as_ptr(), &mut buffer, 1);
642            if res == 1 {
643                Ok(BufferWithContext::from_parts(buffer, self))
644            } else {
645                Err(BufferAllocError)
646            }
647        }
648    }
649}
650
651#[cfg(test)]
652mod tests {
653    use arrayvec::ArrayVec;
654
655    use crate::{
656        bindings::{vlib_buffer_main_t, vlib_buffer_t, vlib_main_t, CLIB_LOG2_CACHE_LINE_BYTES},
657        vlib::{node::FRAME_SIZE, MainRef},
658    };
659
660    #[test]
661    fn get_buffers() {
662        let buffer = vlib_buffer_t::default();
663        let buffers: [vlib_buffer_t; 65] = [buffer; 65];
664        let buffer_indices: ArrayVec<u32, 72> = (0..65)
665            .map(|n| {
666                n * (std::mem::size_of::<vlib_buffer_t>() as u32 >> CLIB_LOG2_CACHE_LINE_BYTES)
667            })
668            .collect();
669        let mut buffer_main = vlib_buffer_main_t {
670            buffer_mem_start: std::ptr::addr_of!(buffers) as u64,
671            buffer_mem_size: std::mem::size_of_val(&buffers) as u64,
672            ..vlib_buffer_main_t::default()
673        };
674        let mut main = vlib_main_t {
675            buffer_main: std::ptr::addr_of_mut!(buffer_main),
676            ..vlib_main_t::default()
677        };
678        // SAFETY: pointers used by MainRef::get_buffers are initialised correctly and valid for
679        // the duration of the call.
680        unsafe {
681            let mut to = ArrayVec::new();
682            let main_ref = MainRef::from_ptr_mut(std::ptr::addr_of_mut!(main));
683            main_ref.get_buffers::<(), FRAME_SIZE>(&buffer_indices, &mut to);
684            let expected: Vec<&vlib_buffer_t> = buffers.iter().collect();
685            assert_eq!(to.len(), expected.len());
686            for (i, buf_ref) in to.iter().enumerate() {
687                assert!(
688                    buf_ref.as_ptr().cast_const() == std::ptr::addr_of!(buffers[i]),
689                    "Buffer index {i} pointers don't match: {:p} expected {:p}",
690                    buf_ref.as_ptr(),
691                    std::ptr::addr_of!(buffers[i])
692                );
693            }
694        }
695    }
696}