Skip to main content

vpp_plugin/vlib/
buffer.rs

1//! vlib buffer abstraction
2//!
3//! This module contains abstractions around VPP's `vlib_buffer_t` structure.
4//! It provides safe access to buffer fields and operations.
5//! It also includes buffer allocation and deallocation functions.
6//! The goal is to provide a safe and ergonomic interface for working with VPP buffers.
7
8use std::{hint::assert_unchecked, mem::MaybeUninit};
9
10use arrayvec::ArrayVec;
11use bitflags::bitflags;
12
13use crate::{
14    bindings::{
15        CLIB_LOG2_CACHE_LINE_BYTES, VLIB_BUFFER_EXT_HDR_VALID, VLIB_BUFFER_IS_TRACED,
16        VLIB_BUFFER_MIN_CHAIN_SEG_SIZE, VLIB_BUFFER_NEXT_PRESENT, VLIB_BUFFER_PRE_DATA_SIZE,
17        VLIB_BUFFER_TOTAL_LENGTH_VALID, vlib_add_trace, vlib_buffer_func_main, vlib_buffer_t,
18        vlib_buffer_t__bindgen_ty_1, vlib_buffer_t__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1,
19    },
20    vlib::{
21        self, MainRef,
22        node::{ErrorCounters, Node, NodeRuntimeRef, VectorBufferIndex},
23    },
24    vppinfra::{
25        cache::{prefetch_load, prefetch_store},
26        likely,
27    },
28};
29
30#[cfg(feature = "experimental")]
31use crate::bindings::{vlib_helper_buffer_alloc, vlib_helper_buffer_free};
32#[cfg(feature = "experimental")]
33use std::fmt;
34
35/// VPP buffer index
36#[repr(transparent)]
37#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
38pub struct BufferIndex(u32);
39
40impl BufferIndex {
41    /// Construct a new `BufferIndex`
42    pub const fn new(buffer: u32) -> Self {
43        Self(buffer)
44    }
45}
46
47impl From<u32> for BufferIndex {
48    fn from(value: u32) -> BufferIndex {
49        Self(value)
50    }
51}
52
53impl From<BufferIndex> for u32 {
54    fn from(value: BufferIndex) -> Self {
55        value.0
56    }
57}
58
59impl VectorBufferIndex for BufferIndex {
60    fn as_u32_slice(slice: &[Self]) -> &[u32] {
61        // SAFETY: BufferIndex is a repr(transparent) wrapper around u32 so the src and dst slice
62        // types have the same memory layout
63        unsafe { std::mem::transmute::<&[BufferIndex], &[u32]>(slice) }
64    }
65}
66
67bitflags! {
68    /// vlib buffer flags
69    #[repr(transparent)]
70    #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
71    pub struct BufferFlags: u32 {
72        /// Trace this buffer
73        const IS_TRACED = VLIB_BUFFER_IS_TRACED;
74        /// This is one buffer in a chain of buffers
75        const NEXT_PRESENT = VLIB_BUFFER_NEXT_PRESENT;
76        /// Total length is valid
77        const TOTAL_LENGTH_VALID = VLIB_BUFFER_TOTAL_LENGTH_VALID;
78        /// Contains external buffer manager header
79        const EXT_HDR_VALID = VLIB_BUFFER_EXT_HDR_VALID;
80
81        // Flags can be extended by user/vnet
82        const _ = !0;
83    }
84}
85
86/// Construct a user buffer flag
87///
88/// `n` must be less than 29 and greater than 0.
89pub const fn vlib_buffer_flag_user(n: u32) -> u32 {
90    assert!(n < 29 && n > 0);
91    1 << (32 - n)
92}
93
94/// Reference to a VPP buffer
95///
96/// A `&mut BufferRef<FeatureData>` is equivalent to a `vlib_buffer_t *` in C (a `*mut
97/// vlib_buffer_t` in Rust).
98#[repr(transparent)]
99pub struct BufferRef<FeatureData>(foreign_types::Opaque, std::marker::PhantomData<FeatureData>);
100
101impl<FeatureData> BufferRef<FeatureData> {
102    /// Create a `&BufferRef` from a raw pointer
103    ///
104    /// # Safety
105    ///
106    /// - The pointer must be a valid and properly initialised `vlib_buffer_t`.
107    /// - The pointer must stay valid and the contents must not be mutated for the duration of the
108    ///   lifetime of the returned object.
109    #[inline(always)]
110    pub unsafe fn from_ptr<'a>(ptr: *mut vlib_buffer_t) -> &'a Self {
111        // SAFETY: The safety requirements are documented in the function's safety comment.
112        unsafe { &*(ptr as *mut _) }
113    }
114
115    /// Create a `&mut BufferRef` from a raw pointer
116    ///
117    /// # Safety
118    ///
119    /// - The pointer must be a valid and properly initialised `vlib_buffer_t`.
120    /// - The pointer must stay valid and the contents must not be mutated for the duration of the
121    ///   lifetime of the returned object.
122    #[inline(always)]
123    pub unsafe fn from_ptr_mut<'a>(ptr: *mut vlib_buffer_t) -> &'a mut Self {
124        // SAFETY: The safety requirements are documented in the function's safety comment.
125        unsafe { &mut *(ptr as *mut _) }
126    }
127
128    /// Returns the raw pointer to the underlying `vlib_buffer_t`
129    pub fn as_ptr(&self) -> *mut vlib_buffer_t {
130        self as *const _ as *mut _
131    }
132
133    fn as_details(&self) -> &vlib_buffer_t__bindgen_ty_1 {
134        // SAFETY: since the reference to self is valid, so must be the pointer and it's safe to
135        // use the __bindgen_anon_1 union arm since the union is just present to force alignment
136        // Creation preconditions mean there are no aliased accesses to the buffer so it's fine
137        // to take a reference
138        unsafe { (*self.as_ptr()).__bindgen_anon_1.as_ref() }
139    }
140
141    fn as_details_mut(&mut self) -> &mut vlib_buffer_t__bindgen_ty_1 {
142        // SAFETY: since the reference to self is valid, so must be the pointer and it's safe to
143        // use the __bindgen_anon_1 union arm since the union is just present to force alignment.
144        // Creation preconditions mean there are no aliased accesses to the buffer so it's fine
145        // to take a reference
146        unsafe { (*self.as_ptr()).__bindgen_anon_1.as_mut() }
147    }
148
149    pub(crate) fn as_metadata(&self) -> &vlib_buffer_t__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1 {
150        // SAFETY: since the reference to self is valid, so must be the pointer and it's safe to
151        // use the __bindgen_anon_1 union arm since the union is just present to force alignment
152        // Creation preconditions mean there are no aliased accesses to the buffer so it's fine
153        // to take a reference
154        unsafe { self.as_details().__bindgen_anon_1.__bindgen_anon_1.as_ref() }
155    }
156
157    pub(crate) fn as_metadata_mut(
158        &mut self,
159    ) -> &mut vlib_buffer_t__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1 {
160        // SAFETY: since the reference to self is valid, so must be the pointer and it's safe to
161        // use the __bindgen_anon_1 union arm since the union is just present to force alignment.
162        // Creation preconditions mean there are no aliased accesses to the buffer so it's fine
163        // to take a reference
164        unsafe {
165            self.as_details_mut()
166                .__bindgen_anon_1
167                .__bindgen_anon_1
168                .as_mut()
169        }
170    }
171
172    fn data(&self) -> *const u8 {
173        self.as_details().data.as_ptr()
174    }
175
176    fn current_data_offset(&self) -> i16 {
177        self.as_metadata().current_data
178    }
179
180    fn current_data_offset_mut(&mut self) -> &mut i16 {
181        &mut self.as_metadata_mut().current_data
182    }
183
184    /// Current length
185    ///
186    /// Typically, this is the amount of packet data remaining from [`Self::current_ptr_mut`].
187    pub fn current_length(&self) -> u16 {
188        self.as_metadata().current_length
189    }
190
191    fn current_length_mut(&mut self) -> &mut u16 {
192        &mut self.as_metadata_mut().current_length
193    }
194
195    /// Buffer flags
196    pub fn flags(&self) -> BufferFlags {
197        BufferFlags::from_bits_retain(self.as_metadata().flags)
198    }
199
200    /// Get a pointer to the current data
201    ///
202    /// This corresponds to the VPP C API `vlib_buffer_get_current`.
203    ///
204    /// # Usage guidance
205    ///
206    /// Note that the pointer returned may point to uninitialised data depending on the context.
207    /// In addition, depending on the context, the remaining data the amount is expected.
208    /// Finally, if remaining data is sufficent and it's initialised it may not have been validated
209    /// so care must be taken in determining whether or not lengths in the headers can be trusted.
210    pub fn current_ptr_mut(&mut self) -> *mut u8 {
211        let data = self.data().cast_mut();
212        let current_data = self.current_data_offset();
213
214        debug_assert!(current_data >= -(VLIB_BUFFER_PRE_DATA_SIZE as i16));
215
216        // SAFETY: current_data is asserted to be valid and point into valid (but possibly
217        // unintialised) data or pre_data.
218        unsafe { data.offset(current_data as isize) }
219    }
220
221    /// Check if the buffer has space for `l` more bytes
222    ///
223    /// This corresponds to the VPP C API `vlib_buffer_has_space`.
224    pub fn has_space(&self, l: i16) -> bool {
225        self.current_length() >= l as u16
226    }
227
228    /// Advance the current data pointer by `l` bytes
229    ///
230    /// This corresponds to the VPP C API `vlib_buffer_advance`.
231    ///
232    /// # Safety
233    ///
234    /// - If `l` is positive, the buffer must have at least `l` bytes of data remaining.
235    /// - If `l` is negative, the current data offset must be at least `-l` bytes from the start of
236    ///   the buffer's data area (including pre-data).
237    pub unsafe fn advance(&mut self, l: i16) {
238        debug_assert!(l < 0 || self.current_length() >= l as u16);
239        debug_assert!(
240            l >= 0 || self.current_data_offset() + VLIB_BUFFER_PRE_DATA_SIZE as i16 >= -l
241        );
242
243        *self.current_data_offset_mut() += l;
244        if l >= 0 {
245            *self.current_length_mut() -= l as u16;
246        } else {
247            *self.current_length_mut() += -l as u16;
248        }
249
250        debug_assert!(
251            !self.flags().contains(BufferFlags::NEXT_PRESENT)
252                || self.current_length() >= VLIB_BUFFER_MIN_CHAIN_SEG_SIZE as u16
253        );
254    }
255
256    /// Get a pointer to the end of the current data
257    ///
258    /// This corresponds to the VPP C function `vlib_buffer_get_tail`.
259    pub fn tail_mut(&mut self) -> *mut u8 {
260        let data = self.data().cast_mut();
261        let current_data = self.current_data_offset();
262
263        debug_assert!(current_data >= -(VLIB_BUFFER_PRE_DATA_SIZE as i16));
264
265        // SAFETY: current_data and current_length are asserted to be valid and `current_data +
266        // current_length` asserted to point to the end of valid (but possibly unintialised) data
267        // or pre_data.
268        unsafe {
269            let ptr = data.offset(current_data as isize);
270            ptr.add(self.current_length() as usize)
271        }
272    }
273
274    /// Add trace data to this buffer
275    pub fn add_trace<N: Node>(
276        &mut self,
277        vm: &MainRef,
278        node: &NodeRuntimeRef<N>,
279    ) -> &mut MaybeUninit<N::TraceData> {
280        // SAFETY: pointers are valid and the uninitialised data that is returned cannot be read
281        // by safe code
282        unsafe {
283            &mut *(vlib_add_trace(
284                vm.as_ptr(),
285                node.as_ptr(),
286                self.as_ptr(),
287                std::mem::size_of::<N::TraceData>() as u32,
288            ) as *mut MaybeUninit<N::TraceData>)
289        }
290    }
291
292    /// Set an error reason
293    ///
294    /// This is typically done before sending the packet to the `drop` node, where it use the
295    /// value to display the reason in traces and automatically increment the per-node, per-error
296    /// counter for the error.
297    pub fn set_error<N: Node>(&mut self, node: &NodeRuntimeRef<N>, error: N::Errors) {
298        // SAFETY: vlib_node_runtime_t::errors is sized according to the number of error values
299        // and it is a precondition of the Errors trait that the value return by into_u16() cannot
300        // be greater than or equal to the declared number of error values.
301        unsafe {
302            let error_value = (*node.as_ptr()).errors.add(error.into_u16() as usize);
303            self.as_metadata_mut().error = *error_value;
304        }
305    }
306
307    /// Get the total length of the buffer chain not including the first buffer
308    #[inline(always)]
309    pub fn total_length_not_including_first_buffer(&self) -> u32 {
310        debug_assert!(self.flags().contains(BufferFlags::TOTAL_LENGTH_VALID));
311        self.as_details().total_length_not_including_first_buffer
312    }
313
314    /// Get the total length of the buffer chain from the current offset
315    ///
316    /// Note that this doesn't take into account any bytes that have been [`Self::advance()`]d
317    /// over.
318    #[inline(always)]
319    pub fn length_in_chain(&self, vm: &vlib::MainRef) -> u64 {
320        let len = self.current_length();
321
322        if likely(!self.flags().contains(BufferFlags::NEXT_PRESENT)) {
323            return len as u64;
324        }
325
326        if likely(self.flags().contains(BufferFlags::TOTAL_LENGTH_VALID)) {
327            return len as u64 + self.total_length_not_including_first_buffer() as u64;
328        }
329
330        // SAFETY: The buffer pointer is valid and the function is called in a valid context.
331        unsafe {
332            crate::bindings::vlib_buffer_length_in_chain_slow_path(vm.as_ptr(), self.as_ptr())
333        }
334    }
335
336    /// Hint to the CPU to prefetch the buffer header for read access.
337    ///
338    /// This is a performance hint that attempts to bring the buffer header into the CPU cache
339    /// prior to reading fields from it. It does not affect program semantics and may be a no-op
340    /// on some platforms. Use this when you will shortly read header fields and want to reduce
341    /// cache miss latency.
342    pub fn prefetch_header_load(&self) {
343        prefetch_load(self.as_ptr());
344    }
345
346    /// Hint to the CPU to prefetch the buffer header for write access.
347    ///
348    /// Similar to `prefetch_header_load` but indicates imminent writes to the header. This is a
349    /// performance optimization only and does not change observable behaviour other than timing.
350    pub fn prefetch_header_store(&self) {
351        prefetch_store(self.as_ptr());
352    }
353
354    /// Hint to the CPU to prefetch the buffer data area for read access.
355    ///
356    /// This brings the buffer's data into cache in preparation for reading the packet payload.
357    /// It is a non-semantic performance hint and may be ignored on some architectures. Use this
358    /// when you will shortly read packet data and want to reduce cache miss latency.
359    pub fn prefetch_data_load(&self) {
360        prefetch_load(&self.as_details().data);
361    }
362
363    /// Hint to the CPU to prefetch the buffer data area for write access.
364    ///
365    /// Similar to `prefetch_data_load` but indicates the caller will write into the data area.
366    /// This is a cache-warming hint to reduce latency on subsequent stores.
367    pub fn prefetch_data_store(&self) {
368        prefetch_store(&self.as_details().data);
369    }
370}
371
372/// Owned buffer (with context)
373///
374/// The `&MainRef` context is necessary to be able to free the buffer on drop.
375#[cfg(feature = "experimental")]
376pub struct BufferWithContext<'a> {
377    buffer: u32,
378    vm: &'a MainRef,
379}
380
381#[cfg(feature = "experimental")]
382impl<'a> BufferWithContext<'a> {
383    /// Creates a `BufferWithContext` directly from a buffer index and a main reference
384    ///
385    /// # Safety
386    /// - The buffer index must be valid and the caller must have ownership of the buffer it
387    ///   corresponds to.
388    pub unsafe fn from_parts(buffer: u32, vm: &'a MainRef) -> Self {
389        Self { buffer, vm }
390    }
391
392    /// Get a mutable reference to the buffer
393    pub fn as_buffer_ref(&mut self) -> &mut BufferRef<()> {
394        let from = &[self.buffer];
395        let mut b: ArrayVec<_, 1> = ArrayVec::new();
396        // SAFETY: capacity of b equals the length of from, `self.buffer` is a valid index and we
397        // force FeatureData to `()` since it isn't known and the buffer cannot be part of a
398        // feature arc.
399        unsafe {
400            self.vm.get_buffers(from, &mut b);
401        }
402        b.remove(0)
403    }
404}
405
406#[cfg(feature = "experimental")]
407impl Drop for BufferWithContext<'_> {
408    fn drop(&mut self) {
409        // SAFETY: we have a reference to MainRef so the pointer must be valid, we pass in a
410        // pointer to buffers consistent with the number of buffers passed in, and self.buffer
411        // is a valid buffer index that we have ownership of.
412        unsafe {
413            vlib_helper_buffer_free(self.vm.as_ptr(), &mut self.buffer, 1);
414        }
415    }
416}
417
418/// Buffer allocation error
419#[derive(Copy, Clone, PartialEq, Eq, Debug)]
420#[cfg(feature = "experimental")]
421pub struct BufferAllocError;
422
423#[cfg(feature = "experimental")]
424impl fmt::Display for BufferAllocError {
425    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
426        write!(f, "buffer allocation error")
427    }
428}
429
430#[cfg(feature = "experimental")]
431impl std::error::Error for BufferAllocError {}
432
433/// u64 x 8
434///
435/// This type exists to strongly hint to the compiler that it should emit vector instructions.
436///
437/// In the future, the implementation might be changed to use standard library portable SIMD once
438/// stabilised (https://github.com/rust-lang/rust/issues/86656), or use arch-specific intrinsics
439/// (if evidenced by high-enough performance improvement).
440#[allow(non_camel_case_types)]
441pub(crate) struct u64x8([u64; 8]);
442
443impl u64x8 {
444    /// Construct a `u64x8` from an array of 8 `u64`s
445    #[inline(always)]
446    pub(crate) fn from_array(a: [u64; 8]) -> Self {
447        Self(a)
448    }
449
450    /// Construct a `u64x8` from a pointer to 8 `u32`s
451    #[inline(always)]
452    pub(crate) unsafe fn from_u32_ptr(ptr: *const u32) -> Self {
453        // SAFETY: The caller must ensure the pointer is valid for reading 8 u32 values.
454        unsafe {
455            Self([
456                *ptr.add(0) as u64,
457                *ptr.add(1) as u64,
458                *ptr.add(2) as u64,
459                *ptr.add(3) as u64,
460                *ptr.add(4) as u64,
461                *ptr.add(5) as u64,
462                *ptr.add(6) as u64,
463                *ptr.add(7) as u64,
464            ])
465        }
466    }
467
468    /// Shift each element to the left by a given constant value, assigning the result to `self`
469    #[inline(always)]
470    pub(crate) fn shift_elements_left<const OFFSET: u32>(&mut self) {
471        for a in &mut self.0 {
472            *a <<= OFFSET;
473        }
474    }
475
476    /// Add a given value to each element, returning a new u64x8 with the result
477    #[inline(always)]
478    pub(crate) fn add_u64(&self, value: u64) -> Self {
479        Self::from_array([
480            self.0[0] + value,
481            self.0[1] + value,
482            self.0[2] + value,
483            self.0[3] + value,
484            self.0[4] + value,
485            self.0[5] + value,
486            self.0[6] + value,
487            self.0[7] + value,
488        ])
489    }
490
491    /// Write 8 contiguous elements starting from `ptr`
492    #[inline(always)]
493    pub(crate) unsafe fn store(&self, ptr: *mut u64) {
494        // SAFETY: The caller must ensure the pointer is valid for writing 8 u64 values.
495        unsafe {
496            *ptr.add(0) = self.0[0];
497            *ptr.add(1) = self.0[1];
498            *ptr.add(2) = self.0[2];
499            *ptr.add(3) = self.0[3];
500            *ptr.add(4) = self.0[4];
501            *ptr.add(5) = self.0[5];
502            *ptr.add(6) = self.0[6];
503            *ptr.add(7) = self.0[7];
504        }
505    }
506}
507
508/// Round a value up to the next multiple of the given power-of-two
509const fn next_multiple_of_pow2(val: usize, pow2: usize) -> usize {
510    debug_assert!(pow2.is_power_of_two());
511    (val + pow2 - 1) & !(pow2 - 1)
512}
513
514impl MainRef {
515    /// Get pointers to buffers for the given buffer indices, writing them into the provided `to` arrayvec.
516    ///
517    /// This is similar to `vlib_get_buffers` in the C API.
518    ///
519    /// Note that although it would be more idiomatic to return an `ArrayVec` directly, this
520    /// method takes a mutable reference to an `ArrayVec` to avoid an unnecessary copy when
521    /// returning.
522    ///
523    /// # Safety
524    ///
525    /// - The caller must ensure that `to` has enough capacity to hold all the buffers
526    ///   corresponding to the indices in `from_indices`.
527    /// - Each index in `from_indices` must be valid and the caller must have ownership of the
528    ///   buffer it corresponds to.
529    /// - Each buffer's `feature_arc_index` and `current_config_index` must be consistent with
530    ///   the `FeatureData` type. If they are not known (i.e. because the caller the node isn't
531    ///   being executed in a feature arc), FeatureData should be a zero-sized type such as `()`.
532    /// - The capacity of `from_indices` must be a multiple of 8 (note though that the length is
533    ///   allowed not to be). In other words, it must be valid to read multiples of 8 from the
534    ///   underlying memory (possibly returning uninitialised or stale data) without faulting.
535    #[inline(always)]
536    pub unsafe fn get_buffers<'a, 'me, 'buf: 'me, FeatureData, const N: usize>(
537        &'me self,
538        from_indices: &'a [u32],
539        to: &mut ArrayVec<&'buf mut BufferRef<FeatureData>, N>,
540    ) {
541        // SAFETY: The safety requirements are documented in the function's safety comment.
542        unsafe {
543            debug_assert!(from_indices.len() <= N);
544            assert_unchecked(from_indices.len() <= N);
545
546            #[cfg(debug_assertions)]
547            for from_index in from_indices {
548                let buffer_mem_size = (*(*self.as_ptr()).buffer_main).buffer_mem_size;
549                debug_assert!(
550                    ((*from_index << CLIB_LOG2_CACHE_LINE_BYTES) as u64) < buffer_mem_size
551                );
552            }
553
554            let buffer_mem_start = (*(*self.as_ptr()).buffer_main).buffer_mem_start;
555
556            // Check for the ArrayVec capacity being a multiple of 8 and if so the later
557            // implementation can perform a write of 8 elements at a time without worrying about
558            // writing beyond the end of the ArrayVec. If not, then fall back to a generic
559            // implementation. This check will be evaluated at compile time and one implementation
560            // or the other chosen.
561            if !N.is_multiple_of(8) {
562                let base = buffer_mem_start as *const i8;
563                for from_index in from_indices.iter() {
564                    let ptr = base.add((*from_index << CLIB_LOG2_CACHE_LINE_BYTES) as usize)
565                        as *mut vlib_buffer_t;
566                    to.push_unchecked(BufferRef::from_ptr_mut(ptr));
567                }
568                return;
569            }
570
571            let mut len = from_indices.len();
572            len = next_multiple_of_pow2(len, 8);
573
574            let mut from_index = from_indices.as_ptr();
575            let mut to_ptr = to.as_mut_ptr();
576
577            while len >= 64 {
578                let mut from_index_x8_1 = u64x8::from_u32_ptr(from_index);
579                let mut from_index_x8_2 = u64x8::from_u32_ptr(from_index.add(8));
580                let mut from_index_x8_3 = u64x8::from_u32_ptr(from_index.add(2 * 8));
581                let mut from_index_x8_4 = u64x8::from_u32_ptr(from_index.add(3 * 8));
582                let mut from_index_x8_5 = u64x8::from_u32_ptr(from_index.add(4 * 8));
583                let mut from_index_x8_6 = u64x8::from_u32_ptr(from_index.add(5 * 8));
584                let mut from_index_x8_7 = u64x8::from_u32_ptr(from_index.add(6 * 8));
585                let mut from_index_x8_8 = u64x8::from_u32_ptr(from_index.add(7 * 8));
586
587                from_index_x8_1.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
588                from_index_x8_2.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
589                from_index_x8_3.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
590                from_index_x8_4.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
591                from_index_x8_5.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
592                from_index_x8_6.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
593                from_index_x8_7.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
594                from_index_x8_8.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
595
596                let buf_ptr_x8_1 = from_index_x8_1.add_u64(buffer_mem_start);
597                let buf_ptr_x8_2 = from_index_x8_2.add_u64(buffer_mem_start);
598                let buf_ptr_x8_3 = from_index_x8_3.add_u64(buffer_mem_start);
599                let buf_ptr_x8_4 = from_index_x8_4.add_u64(buffer_mem_start);
600                let buf_ptr_x8_5 = from_index_x8_5.add_u64(buffer_mem_start);
601                let buf_ptr_x8_6 = from_index_x8_6.add_u64(buffer_mem_start);
602                let buf_ptr_x8_7 = from_index_x8_7.add_u64(buffer_mem_start);
603                let buf_ptr_x8_8 = from_index_x8_8.add_u64(buffer_mem_start);
604
605                buf_ptr_x8_1.store(to_ptr as *mut u64);
606                buf_ptr_x8_2.store(to_ptr.add(8) as *mut u64);
607                buf_ptr_x8_3.store(to_ptr.add(2 * 8) as *mut u64);
608                buf_ptr_x8_4.store(to_ptr.add(3 * 8) as *mut u64);
609                buf_ptr_x8_5.store(to_ptr.add(4 * 8) as *mut u64);
610                buf_ptr_x8_6.store(to_ptr.add(5 * 8) as *mut u64);
611                buf_ptr_x8_7.store(to_ptr.add(6 * 8) as *mut u64);
612                buf_ptr_x8_8.store(to_ptr.add(7 * 8) as *mut u64);
613
614                to_ptr = to_ptr.add(64);
615                from_index = from_index.add(64);
616                len -= 64;
617            }
618
619            if likely(len >= 32) {
620                let mut from_index_x8_1 = u64x8::from_u32_ptr(from_index);
621                let mut from_index_x8_2 = u64x8::from_u32_ptr(from_index.add(8));
622                let mut from_index_x8_3 = u64x8::from_u32_ptr(from_index.add(2 * 8));
623                let mut from_index_x8_4 = u64x8::from_u32_ptr(from_index.add(3 * 8));
624
625                from_index_x8_1.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
626                from_index_x8_2.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
627                from_index_x8_3.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
628                from_index_x8_4.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
629
630                let buf_ptr_x8_1 = from_index_x8_1.add_u64(buffer_mem_start);
631                let buf_ptr_x8_2 = from_index_x8_2.add_u64(buffer_mem_start);
632                let buf_ptr_x8_3 = from_index_x8_3.add_u64(buffer_mem_start);
633                let buf_ptr_x8_4 = from_index_x8_4.add_u64(buffer_mem_start);
634
635                buf_ptr_x8_1.store(to_ptr as *mut u64);
636                buf_ptr_x8_2.store(to_ptr.add(8) as *mut u64);
637                buf_ptr_x8_3.store(to_ptr.add(2 * 8) as *mut u64);
638                buf_ptr_x8_4.store(to_ptr.add(3 * 8) as *mut u64);
639
640                to_ptr = to_ptr.add(32);
641                from_index = from_index.add(32);
642                len -= 32;
643            }
644
645            if likely(len >= 16) {
646                let mut from_index_x8_1 = u64x8::from_u32_ptr(from_index);
647                let mut from_index_x8_2 = u64x8::from_u32_ptr(from_index.add(8));
648
649                from_index_x8_1.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
650                from_index_x8_2.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
651
652                let buf_ptr_x8_1 = from_index_x8_1.add_u64(buffer_mem_start);
653                let buf_ptr_x8_2 = from_index_x8_2.add_u64(buffer_mem_start);
654
655                buf_ptr_x8_1.store(to_ptr as *mut u64);
656                buf_ptr_x8_2.store(to_ptr.add(8) as *mut u64);
657
658                to_ptr = to_ptr.add(16);
659                from_index = from_index.add(16);
660                len -= 16;
661            }
662
663            if likely(len > 0) {
664                let mut from_index_x8 = u64x8::from_u32_ptr(from_index);
665                from_index_x8.shift_elements_left::<CLIB_LOG2_CACHE_LINE_BYTES>();
666                let buf_ptr_x8 = from_index_x8.add_u64(buffer_mem_start);
667                buf_ptr_x8.store(to_ptr as *mut u64);
668            }
669
670            to.set_len(from_indices.len());
671        }
672    }
673
674    /// Enqueues a slice of buffer indices to a next node
675    ///
676    /// This corresponds to the VPP C function `vlib_buffer_enqueue_to_next`.
677    ///
678    /// # Safety
679    ///
680    /// - The length of the from and next slices must match.
681    /// - The next node must have a `Vector` type of `u32` (or the C equivalent).
682    /// - The next node must have a `Scalar` type of `()` (or the C equivalent).
683    /// - The next node must have an `Aux` type of `()` (or the C equivalent).
684    /// - `vlib_buffer_func_main` must have been filled in with valid function pointers (which
685    ///   will be done by VPP at initialisation time).
686    /// - The buffer state, such as `current_data` and `length` must be set according to the
687    ///   preconditions of the next node.
688    /// - Each entry in the `from` slice must be a valid index to a buffer.
689    /// - Each entry in the `nexts` slice must be a valid next node index.
690    #[inline(always)]
691    pub unsafe fn buffer_enqueue_to_next<N: Node, V: VectorBufferIndex>(
692        &self,
693        node: &mut NodeRuntimeRef<N>,
694        from: &[V],
695        nexts: &[u16],
696    ) {
697        debug_assert_eq!(from.len(), nexts.len());
698        // SAFETY: the caller asserts the function preconditions are true
699        unsafe {
700            (vlib_buffer_func_main
701                .buffer_enqueue_to_next_fn
702                .unwrap_unchecked())(
703                self.as_ptr(),
704                node.as_ptr(),
705                VectorBufferIndex::as_u32_slice(from).as_ptr().cast_mut(),
706                nexts.as_ptr() as *mut u16,
707                from.len() as u64,
708            )
709        }
710    }
711
712    /// Allocate a single buffer
713    ///
714    /// This corresponds to the VPP C API of `vlib_alloc_buffers`.
715    #[cfg(feature = "experimental")]
716    pub fn alloc_buffer(&self) -> Result<BufferWithContext<'_>, BufferAllocError> {
717        // SAFETY: we have a reference to self so the pointer must also be valid, we pass in a
718        // buffer pointer that is consistent with the number of buffers asked for, and on exit
719        // of the function either the buffer value is filled in with a valid index we have
720        // ownership of or not depending on the return value of the function.
721        unsafe {
722            let mut buffer = 0;
723            let res = vlib_helper_buffer_alloc(self.as_ptr(), &mut buffer, 1);
724            if res == 1 {
725                Ok(BufferWithContext::from_parts(buffer, self))
726            } else {
727                Err(BufferAllocError)
728            }
729        }
730    }
731}
732
733#[cfg(test)]
734mod tests {
735    use arrayvec::ArrayVec;
736
737    use crate::{
738        bindings::{CLIB_LOG2_CACHE_LINE_BYTES, vlib_buffer_main_t, vlib_buffer_t, vlib_main_t},
739        vlib::{MainRef, node::FRAME_SIZE},
740    };
741
742    #[test]
743    fn get_buffers() {
744        let buffer = vlib_buffer_t::default();
745        let buffers = [buffer; 119];
746        let buffer_indices: ArrayVec<u32, 128> = (0..buffers.len() as u32)
747            .map(|n| {
748                n * (std::mem::size_of::<vlib_buffer_t>() as u32 >> CLIB_LOG2_CACHE_LINE_BYTES)
749            })
750            .collect();
751        let mut buffer_main = vlib_buffer_main_t {
752            buffer_mem_start: std::ptr::addr_of!(buffers) as u64,
753            buffer_mem_size: std::mem::size_of_val(&buffers) as u64,
754            ..vlib_buffer_main_t::default()
755        };
756        let mut main = vlib_main_t {
757            buffer_main: std::ptr::addr_of_mut!(buffer_main),
758            ..vlib_main_t::default()
759        };
760        // SAFETY: pointers used by MainRef::get_buffers are initialised correctly and valid for
761        // the duration of the call.
762        unsafe {
763            let mut to = ArrayVec::new();
764            let main_ref = MainRef::from_ptr_mut(std::ptr::addr_of_mut!(main));
765            main_ref.get_buffers::<(), FRAME_SIZE>(&buffer_indices, &mut to);
766            let expected: Vec<&vlib_buffer_t> = buffers.iter().collect();
767            assert_eq!(to.len(), expected.len());
768            for (i, buf_ref) in to.iter().enumerate() {
769                assert!(
770                    buf_ref.as_ptr().cast_const() == std::ptr::addr_of!(buffers[i]),
771                    "Buffer index {i} pointers don't match: {:p} expected {:p}",
772                    buf_ref.as_ptr(),
773                    std::ptr::addr_of!(buffers[i])
774                );
775            }
776        }
777    }
778}