Skip to main content

dpdk_sys/
stubs.rs

1//! Stub implementations of DPDK functions and types
2//!
3//! These stubs allow the crate to compile and run without DPDK installed.
4//! They provide the same API surface but don't perform actual packet I/O.
5
6use libc::{c_char, c_int, c_uint, c_void};
7use std::ptr;
8
9// ============================================================================
10// Constants
11// ============================================================================
12
13pub const RTE_MAX_ETHPORTS: usize = 32;
14pub const RTE_MAX_LCORE: usize = 128;
15pub const RTE_ETHER_ADDR_LEN: usize = 6;
16pub const RTE_ETHER_TYPE_IPV4: u16 = 0x0800;
17pub const RTE_ETHER_TYPE_IPV6: u16 = 0x86DD;
18pub const RTE_ETHER_TYPE_ARP: u16 = 0x0806;
19pub const RTE_ETHER_TYPE_VLAN: u16 = 0x8100;
20
21pub const RTE_MBUF_DEFAULT_BUF_SIZE: u16 = 2048 + 128; // RTE_PKTMBUF_HEADROOM
22pub const RTE_PKTMBUF_HEADROOM: u16 = 128;
23
24pub const RTE_ETH_TX_OFFLOAD_VLAN_INSERT: u64 = 0x00000001;
25pub const RTE_ETH_TX_OFFLOAD_IPV4_CKSUM: u64 = 0x00000002;
26pub const RTE_ETH_TX_OFFLOAD_UDP_CKSUM: u64 = 0x00000004;
27pub const RTE_ETH_TX_OFFLOAD_TCP_CKSUM: u64 = 0x00000008;
28
29pub const RTE_ETH_RX_OFFLOAD_VLAN_STRIP: u64 = 0x00000001;
30pub const RTE_ETH_RX_OFFLOAD_IPV4_CKSUM: u64 = 0x00000002;
31pub const RTE_ETH_RX_OFFLOAD_UDP_CKSUM: u64 = 0x00000004;
32pub const RTE_ETH_RX_OFFLOAD_TCP_CKSUM: u64 = 0x00000008;
33
34// Mbuf TX offload flags (set by application, consumed by NIC)
35pub const RTE_MBUF_F_TX_IPV4: u64 = 1 << 55;
36pub const RTE_MBUF_F_TX_IP_CKSUM: u64 = 1 << 54;
37pub const RTE_MBUF_F_TX_UDP_CKSUM: u64 = 3 << 52;
38
39// Mbuf TX VLAN offload flag: tells the NIC to insert a VLAN tag from mbuf.vlan_tci
40pub const RTE_MBUF_F_TX_VLAN: u64 = 1 << 57;
41
42// Mbuf RX VLAN offload flag: NIC has stripped the VLAN tag into mbuf.vlan_tci
43pub const RTE_MBUF_F_RX_VLAN: u64 = 1 << 0;
44pub const RTE_MBUF_F_RX_VLAN_STRIPPED: u64 = 1 << 6;
45
46// Mbuf RX offload flags (set by NIC, consumed by application)
47pub const RTE_MBUF_F_RX_IP_CKSUM_MASK: u64 = (1 << 4) | (1 << 7);
48pub const RTE_MBUF_F_RX_IP_CKSUM_GOOD: u64 = 1 << 7;
49pub const RTE_MBUF_F_RX_IP_CKSUM_BAD: u64 = 1 << 4;
50pub const RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN: u64 = 0;
51
52pub const RTE_MBUF_F_RX_L4_CKSUM_MASK: u64 = (1 << 3) | (1 << 8);
53pub const RTE_MBUF_F_RX_L4_CKSUM_GOOD: u64 = 1 << 8;
54pub const RTE_MBUF_F_RX_L4_CKSUM_BAD: u64 = 1 << 3;
55pub const RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN: u64 = 0;
56
57// Error codes
58pub const RTE_ERRNO_BASE: c_int = 1000;
59
60// NUMA socket constants
61pub const SOCKET_ID_ANY: c_int = -1;
62
63// ============================================================================
64// Core Types
65// ============================================================================
66
67/// Ethernet address (MAC address)
68#[repr(C)]
69#[derive(Debug, Clone, Copy, Default)]
70pub struct rte_ether_addr {
71    pub addr_bytes: [u8; RTE_ETHER_ADDR_LEN],
72}
73
74/// Ethernet header
75#[repr(C, packed)]
76#[derive(Debug, Clone, Copy, Default)]
77pub struct rte_ether_hdr {
78    pub dst_addr: rte_ether_addr,
79    pub src_addr: rte_ether_addr,
80    pub ether_type: u16,
81}
82
83/// IPv4 header
84#[repr(C, packed)]
85#[derive(Debug, Clone, Copy, Default)]
86pub struct rte_ipv4_hdr {
87    pub version_ihl: u8,
88    pub type_of_service: u8,
89    pub total_length: u16,
90    pub packet_id: u16,
91    pub fragment_offset: u16,
92    pub time_to_live: u8,
93    pub next_proto_id: u8,
94    pub hdr_checksum: u16,
95    pub src_addr: u32,
96    pub dst_addr: u32,
97}
98
99/// UDP header
100#[repr(C, packed)]
101#[derive(Debug, Clone, Copy, Default)]
102pub struct rte_udp_hdr {
103    pub src_port: u16,
104    pub dst_port: u16,
105    pub dgram_len: u16,
106    pub dgram_cksum: u16,
107}
108
109/// TCP header
110#[repr(C, packed)]
111#[derive(Debug, Clone, Copy, Default)]
112pub struct rte_tcp_hdr {
113    pub src_port: u16,
114    pub dst_port: u16,
115    pub sent_seq: u32,
116    pub recv_ack: u32,
117    pub data_off: u8,
118    pub tcp_flags: u8,
119    pub rx_win: u16,
120    pub cksum: u16,
121    pub tcp_urp: u16,
122}
123
124// ============================================================================
125// Memory Buffer (mbuf) Types
126// ============================================================================
127
128/// Memory buffer for packet data
129#[repr(C)]
130#[derive(Debug)]
131pub struct rte_mbuf {
132    pub buf_addr: *mut c_void,
133    pub buf_iova: u64,
134
135    // First cache line
136    pub data_off: u16,
137    pub refcnt: u16,
138    pub nb_segs: u16,
139    pub port: u16,
140    pub ol_flags: u64,
141
142    // Packet metadata
143    pub packet_type: u32,
144    pub pkt_len: u32,
145    pub data_len: u16,
146    pub vlan_tci: u16,
147
148    // Hash
149    pub hash: rte_mbuf_hash,
150
151    pub vlan_tci_outer: u16,
152    pub buf_len: u16,
153
154    pub pool: *mut rte_mempool,
155
156    // Second cache line
157    pub next: *mut rte_mbuf,
158
159    // Tx offload
160    pub tx_offload: u64,
161
162    pub priv_size: u16,
163    pub timesync: u16,
164    pub seqn: u32,
165
166    pub dynfield1: [u64; 2],
167}
168
169impl Default for rte_mbuf {
170    fn default() -> Self {
171        Self {
172            buf_addr: ptr::null_mut(),
173            buf_iova: 0,
174            data_off: RTE_PKTMBUF_HEADROOM,
175            refcnt: 1,
176            nb_segs: 1,
177            port: 0,
178            ol_flags: 0,
179            packet_type: 0,
180            pkt_len: 0,
181            data_len: 0,
182            vlan_tci: 0,
183            hash: rte_mbuf_hash::default(),
184            vlan_tci_outer: 0,
185            buf_len: 0,
186            pool: ptr::null_mut(),
187            next: ptr::null_mut(),
188            tx_offload: 0,
189            priv_size: 0,
190            timesync: 0,
191            seqn: 0,
192            dynfield1: [0; 2],
193        }
194    }
195}
196
197/// Hash union for rte_mbuf
198#[repr(C)]
199#[derive(Debug, Clone, Copy, Default)]
200pub struct rte_mbuf_hash {
201    pub rss: u32,
202}
203
204// ============================================================================
205// Memory Pool Types
206// ============================================================================
207
208/// Memory pool (simplified stub version for testing)
209#[repr(C)]
210#[derive(Debug, Default)]
211pub struct rte_mempool {
212    /// Pool name
213    pub name: [c_char; 32],
214    /// Total size of the mempool
215    pub size: c_uint,
216    /// Number of elements populated
217    pub populated_size: c_uint,
218    /// Element size
219    pub elt_size: c_uint,
220    /// Flags
221    pub flags: c_uint,
222}
223
224/// Memory pool cache (opaque)
225#[repr(C)]
226pub struct rte_mempool_cache {
227    _private: [u8; 0],
228}
229
230// ============================================================================
231// Ethernet Device Types
232// ============================================================================
233
234/// Ethernet device info
235#[repr(C)]
236#[derive(Debug)]
237pub struct rte_eth_dev_info {
238    pub device: *mut c_void,
239    pub driver_name: *const c_char,
240    pub if_index: c_uint,
241    pub min_mtu: u16,
242    pub max_mtu: u16,
243    pub dev_flags: *const u32,
244    pub min_rx_bufsize: u32,
245    pub max_rx_pktlen: u32,
246    pub max_lro_pkt_size: u32,
247    pub max_rx_queues: u16,
248    pub max_tx_queues: u16,
249    pub max_mac_addrs: u32,
250    pub max_vfs: u16,
251    pub max_vmdq_pools: u16,
252    pub rx_offload_capa: u64,
253    pub tx_offload_capa: u64,
254    pub rx_queue_offload_capa: u64,
255    pub tx_queue_offload_capa: u64,
256    pub reta_size: u16,
257    pub hash_key_size: u8,
258    pub flow_type_rss_offloads: u64,
259    pub default_rxconf: rte_eth_rxconf,
260    pub default_txconf: rte_eth_txconf,
261    pub vmdq_queue_base: u16,
262    pub vmdq_queue_num: u16,
263    pub vmdq_pool_base: u16,
264    pub rx_desc_lim: rte_eth_desc_lim,
265    pub tx_desc_lim: rte_eth_desc_lim,
266    pub speed_capa: u32,
267    pub nb_rx_queues: u16,
268    pub nb_tx_queues: u16,
269    pub dev_capa: u64,
270}
271
272impl Default for rte_eth_dev_info {
273    fn default() -> Self {
274        Self {
275            device: ptr::null_mut(),
276            driver_name: ptr::null(),
277            if_index: 0,
278            min_mtu: 0,
279            max_mtu: 0,
280            dev_flags: ptr::null(),
281            min_rx_bufsize: 0,
282            max_rx_pktlen: 0,
283            max_lro_pkt_size: 0,
284            max_rx_queues: 0,
285            max_tx_queues: 0,
286            max_mac_addrs: 0,
287            max_vfs: 0,
288            max_vmdq_pools: 0,
289            rx_offload_capa: 0,
290            tx_offload_capa: 0,
291            rx_queue_offload_capa: 0,
292            tx_queue_offload_capa: 0,
293            reta_size: 0,
294            hash_key_size: 0,
295            flow_type_rss_offloads: 0,
296            default_rxconf: rte_eth_rxconf::default(),
297            default_txconf: rte_eth_txconf::default(),
298            vmdq_queue_base: 0,
299            vmdq_queue_num: 0,
300            vmdq_pool_base: 0,
301            rx_desc_lim: rte_eth_desc_lim::default(),
302            tx_desc_lim: rte_eth_desc_lim::default(),
303            speed_capa: 0,
304            nb_rx_queues: 0,
305            nb_tx_queues: 0,
306            dev_capa: 0,
307        }
308    }
309}
310
311/// Ethernet device configuration
312#[repr(C)]
313#[derive(Debug, Default, Clone)]
314pub struct rte_eth_conf {
315    pub link_speeds: u32,
316    pub rxmode: rte_eth_rxmode,
317    pub txmode: rte_eth_txmode,
318    pub lpbk_mode: u32,
319    pub rx_adv_conf: rte_eth_rx_adv_conf,
320    pub tx_adv_conf: rte_eth_tx_adv_conf,
321    pub dcb_capability_en: u32,
322    pub intr_conf: rte_eth_intr_conf,
323}
324
325#[repr(C)]
326#[derive(Debug, Clone)]
327pub struct rte_eth_rxmode {
328    pub mq_mode: u32,
329    pub mtu: u32,
330    pub max_lro_pkt_size: u32,
331    pub offloads: u64,
332    pub reserved_64s: [u64; 2],
333    pub reserved_ptrs: [*mut c_void; 2],
334}
335
336impl Default for rte_eth_rxmode {
337    fn default() -> Self {
338        Self {
339            mq_mode: 0,
340            mtu: 0,
341            max_lro_pkt_size: 0,
342            offloads: 0,
343            reserved_64s: [0; 2],
344            reserved_ptrs: [ptr::null_mut(); 2],
345        }
346    }
347}
348
349#[repr(C)]
350#[derive(Debug, Clone)]
351pub struct rte_eth_txmode {
352    pub mq_mode: u32,
353    pub offloads: u64,
354    pub pvid: u16,
355    pub reserved_64s: [u64; 2],
356    pub reserved_ptrs: [*mut c_void; 2],
357}
358
359impl Default for rte_eth_txmode {
360    fn default() -> Self {
361        Self {
362            mq_mode: 0,
363            offloads: 0,
364            pvid: 0,
365            reserved_64s: [0; 2],
366            reserved_ptrs: [ptr::null_mut(); 2],
367        }
368    }
369}
370
371#[repr(C)]
372#[derive(Debug, Clone, Copy)]
373pub struct rte_eth_rxconf {
374    pub rx_thresh: rte_eth_thresh,
375    pub rx_free_thresh: u16,
376    pub rx_drop_en: u8,
377    pub rx_deferred_start: u8,
378    pub rx_nseg: u16,
379    pub share_group: u16,
380    pub share_qid: u16,
381    pub offloads: u64,
382    pub rx_seg: *mut c_void,
383    pub reserved_64s: [u64; 2],
384    pub reserved_ptrs: [*mut c_void; 2],
385}
386
387impl Default for rte_eth_rxconf {
388    fn default() -> Self {
389        Self {
390            rx_thresh: rte_eth_thresh::default(),
391            rx_free_thresh: 0,
392            rx_drop_en: 0,
393            rx_deferred_start: 0,
394            rx_nseg: 0,
395            share_group: 0,
396            share_qid: 0,
397            offloads: 0,
398            rx_seg: ptr::null_mut(),
399            reserved_64s: [0; 2],
400            reserved_ptrs: [ptr::null_mut(); 2],
401        }
402    }
403}
404
405#[repr(C)]
406#[derive(Debug, Clone, Copy)]
407pub struct rte_eth_txconf {
408    pub tx_thresh: rte_eth_thresh,
409    pub tx_rs_thresh: u16,
410    pub tx_free_thresh: u16,
411    pub tx_deferred_start: u8,
412    pub offloads: u64,
413    pub reserved_64s: [u64; 2],
414    pub reserved_ptrs: [*mut c_void; 2],
415}
416
417impl Default for rte_eth_txconf {
418    fn default() -> Self {
419        Self {
420            tx_thresh: rte_eth_thresh::default(),
421            tx_rs_thresh: 0,
422            tx_free_thresh: 0,
423            tx_deferred_start: 0,
424            offloads: 0,
425            reserved_64s: [0; 2],
426            reserved_ptrs: [ptr::null_mut(); 2],
427        }
428    }
429}
430
431#[repr(C)]
432#[derive(Debug, Default, Clone, Copy)]
433pub struct rte_eth_thresh {
434    pub pthresh: u8,
435    pub hthresh: u8,
436    pub wthresh: u8,
437}
438
439#[repr(C)]
440#[derive(Debug, Default, Clone, Copy)]
441pub struct rte_eth_desc_lim {
442    pub nb_max: u16,
443    pub nb_min: u16,
444    pub nb_align: u16,
445    pub nb_seg_max: u16,
446    pub nb_mtu_seg_max: u16,
447}
448
449#[repr(C)]
450#[derive(Debug, Default, Clone)]
451pub struct rte_eth_rx_adv_conf {
452    pub rss_conf: rte_eth_rss_conf,
453}
454
455#[repr(C)]
456#[derive(Debug, Default, Clone)]
457pub struct rte_eth_tx_adv_conf {
458    _placeholder: u8,
459}
460
461#[repr(C)]
462#[derive(Debug, Clone)]
463pub struct rte_eth_rss_conf {
464    pub rss_key: *mut u8,
465    pub rss_key_len: u8,
466    pub rss_hf: u64,
467}
468
469impl Default for rte_eth_rss_conf {
470    fn default() -> Self {
471        Self {
472            rss_key: ptr::null_mut(),
473            rss_key_len: 0,
474            rss_hf: 0,
475        }
476    }
477}
478
479#[repr(C)]
480#[derive(Debug, Default, Clone, Copy)]
481pub struct rte_eth_intr_conf {
482    pub lsc: u16,
483    pub rxq: u16,
484    pub rmv: u16,
485}
486
487/// Link status
488#[repr(C)]
489#[derive(Debug, Default, Clone, Copy)]
490pub struct rte_eth_link {
491    pub link_speed: u32,
492    pub link_duplex: u16,
493    pub link_autoneg: u16,
494    pub link_status: u16,
495}
496
497// Accessor methods matching the bindgen-generated bitfield accessors
498// so that `link.link_duplex()` works identically for stubs and real DPDK.
499impl rte_eth_link {
500    pub fn link_duplex(&self) -> u16 {
501        self.link_duplex
502    }
503    pub fn link_autoneg(&self) -> u16 {
504        self.link_autoneg
505    }
506    pub fn link_status(&self) -> u16 {
507        self.link_status
508    }
509}
510
511/// Ethernet statistics
512#[repr(C)]
513#[derive(Debug, Default, Clone, Copy)]
514pub struct rte_eth_stats {
515    pub ipackets: u64,
516    pub opackets: u64,
517    pub ibytes: u64,
518    pub obytes: u64,
519    pub imissed: u64,
520    pub ierrors: u64,
521    pub oerrors: u64,
522    pub rx_nombuf: u64,
523    pub q_ipackets: [u64; 16],
524    pub q_opackets: [u64; 16],
525    pub q_ibytes: [u64; 16],
526    pub q_obytes: [u64; 16],
527    pub q_errors: [u64; 16],
528}
529
530// ============================================================================
531// Ring Buffer Types
532// ============================================================================
533
534/// Ring buffer (opaque)
535#[repr(C)]
536pub struct rte_ring {
537    _private: [u8; 0],
538}
539
540// ============================================================================
541// Stub Function Implementations
542// ============================================================================
543
544// EAL Functions
545
546/// EAL lifecycle state tracking — detects use-after-cleanup bugs in stubs.
547/// Real DPDK segfaults when you call rte_pktmbuf_pool_create after rte_eal_cleanup
548/// because rte_config->mem_config is NULL. This state lets stubs catch the same bug.
549///
550/// Three states:
551///   0 = never initialized (permissive — allows mempool creation for backward compat)
552///   1 = initialized (rte_eal_init called)
553///  -1 = cleaned up (rte_eal_cleanup called after init — mempool creation denied)
554use std::sync::atomic::{AtomicI32, Ordering};
555static STUB_EAL_STATE: AtomicI32 = AtomicI32::new(0);
556
557/// Returns true if EAL is currently initialized (init called, cleanup not yet called).
558/// Exposed for tests to verify lifecycle behavior.
559pub fn stub_eal_is_initialized() -> bool {
560    STUB_EAL_STATE.load(Ordering::SeqCst) == 1
561}
562
563/// Returns true if EAL was cleaned up after being initialized.
564/// This is the state that causes segfaults with real DPDK.
565pub fn stub_eal_is_cleaned_up() -> bool {
566    STUB_EAL_STATE.load(Ordering::SeqCst) == -1
567}
568
569/// Reset EAL state to "never initialized" (0). For use in test teardown
570/// so serial tests don't leak state to the next test.
571pub fn stub_eal_reset() {
572    STUB_EAL_STATE.store(0, Ordering::SeqCst);
573    STUB_RTE_ERRNO.store(0, Ordering::SeqCst);
574}
575
576#[no_mangle]
577pub extern "C" fn rte_eal_init(_argc: c_int, _argv: *mut *mut c_char) -> c_int {
578    STUB_EAL_STATE.store(1, Ordering::SeqCst);
579    0 // Success
580}
581
582#[no_mangle]
583pub extern "C" fn rte_eal_cleanup() -> c_int {
584    STUB_EAL_STATE.store(-1, Ordering::SeqCst);
585    0
586}
587
588#[no_mangle]
589pub extern "C" fn rte_lcore_id() -> c_uint {
590    0
591}
592
593#[no_mangle]
594pub extern "C" fn rte_lcore_count() -> c_uint {
595    1
596}
597
598#[no_mangle]
599pub extern "C" fn rte_get_main_lcore() -> c_uint {
600    0
601}
602
603#[no_mangle]
604pub extern "C" fn rte_socket_id() -> c_int {
605    0
606}
607
608// Memory Pool Functions
609
610/// Stub mempool tracking for testing
611/// We use thread_local to track allocated mempools and mbufs
612use std::cell::RefCell;
613
614thread_local! {
615    static STUB_MEMPOOL_COUNTER: RefCell<u32> = const { RefCell::new(0) };
616    static STUB_MBUF_COUNTER: RefCell<u32> = const { RefCell::new(0) };
617}
618
619#[no_mangle]
620pub extern "C" fn rte_pktmbuf_pool_create(
621    _name: *const c_char,
622    n: c_uint,
623    _cache_size: c_uint,
624    _priv_size: u16,
625    _data_room_size: u16,
626    _socket_id: c_int,
627) -> *mut rte_mempool {
628    // Guard: real DPDK segfaults if EAL was cleaned up (rte_config->mem_config is NULL).
629    // Return NULL to surface the same class of bug without crashing.
630    // Only block when EAL was explicitly cleaned up (-1), not when never initialized (0),
631    // so existing tests that create mempools without calling rte_eal_init() still work.
632    if stub_eal_is_cleaned_up() {
633        // Set rte_errno to ENODEV so callers get a meaningful error
634        STUB_RTE_ERRNO.store(19, Ordering::SeqCst); // ENODEV
635        return ptr::null_mut();
636    }
637
638    // Create a stub mempool for testing
639    let mempool = Box::new(rte_mempool {
640        size: n,
641        populated_size: n,
642        ..Default::default()
643    });
644    Box::into_raw(mempool)
645}
646
647#[no_mangle]
648pub extern "C" fn rte_mempool_free(mp: *mut rte_mempool) {
649    if !mp.is_null() {
650        unsafe {
651            let _ = Box::from_raw(mp);
652        }
653    }
654}
655
656#[no_mangle]
657pub extern "C" fn rte_pktmbuf_alloc(_mp: *mut rte_mempool) -> *mut rte_mbuf {
658    // Allocate a stub mbuf with a real buffer
659    let buf_size = 2048usize;
660    let buf: Vec<u8> = vec![0u8; buf_size];
661    let buf_ptr = Box::into_raw(buf.into_boxed_slice()) as *mut c_void;
662
663    let mbuf = Box::new(rte_mbuf {
664        buf_addr: buf_ptr,
665        buf_len: buf_size as u16,
666        data_off: RTE_PKTMBUF_HEADROOM,
667        data_len: 0,
668        pkt_len: 0,
669        ..Default::default()
670    });
671    Box::into_raw(mbuf)
672}
673
674#[no_mangle]
675pub extern "C" fn rte_pktmbuf_alloc_bulk(
676    _mp: *mut rte_mempool,
677    mbufs: *mut *mut rte_mbuf,
678    count: c_uint,
679) -> c_int {
680    if mbufs.is_null() {
681        return -1;
682    }
683
684    for i in 0..count as usize {
685        let mbuf = rte_pktmbuf_alloc(_mp);
686        if mbuf.is_null() {
687            // Free previously allocated mbufs
688            for j in 0..i {
689                unsafe {
690                    rte_pktmbuf_free(*mbufs.add(j));
691                }
692            }
693            return -1;
694        }
695        unsafe {
696            *mbufs.add(i) = mbuf;
697        }
698    }
699    0
700}
701
702#[no_mangle]
703pub extern "C" fn rte_pktmbuf_free(m: *mut rte_mbuf) {
704    if !m.is_null() {
705        unsafe {
706            let mbuf = Box::from_raw(m);
707            // Free the buffer if it exists
708            // We allocated it with Box::into_raw(vec.into_boxed_slice())
709            // so we need to reconstruct the boxed slice to free it
710            if !mbuf.buf_addr.is_null() && mbuf.buf_len > 0 {
711                let slice = std::slice::from_raw_parts_mut(
712                    mbuf.buf_addr as *mut u8,
713                    mbuf.buf_len as usize,
714                );
715                let _ = Box::from_raw(slice as *mut [u8]);
716            }
717        }
718    }
719}
720
721#[no_mangle]
722pub extern "C" fn rte_pktmbuf_clone(
723    _md: *mut rte_mbuf,
724    _mp: *mut rte_mempool,
725) -> *mut rte_mbuf {
726    ptr::null_mut()
727}
728
729#[no_mangle]
730pub extern "C" fn rte_mempool_avail_count(mp: *mut rte_mempool) -> c_uint {
731    if mp.is_null() {
732        return 0;
733    }
734    unsafe { (*mp).populated_size }
735}
736
737#[no_mangle]
738pub extern "C" fn rte_mempool_in_use_count(_mp: *mut rte_mempool) -> c_uint {
739    // Stub: always return 0 (nothing in use)
740    0
741}
742
743#[no_mangle]
744pub extern "C" fn rte_mempool_full(mp: *mut rte_mempool) -> c_int {
745    // Stub: return 1 (true) if pool has elements
746    if mp.is_null() {
747        return 0;
748    }
749    1
750}
751
752#[no_mangle]
753pub extern "C" fn rte_mempool_empty(mp: *mut rte_mempool) -> c_int {
754    // Stub: return 0 (false) - pool is not empty
755    if mp.is_null() {
756        return 1;
757    }
758    0
759}
760
761// Ethernet Device Functions
762#[no_mangle]
763pub extern "C" fn rte_eth_dev_count_avail() -> u16 {
764    1 // Pretend we have one device for testing
765}
766
767#[no_mangle]
768pub extern "C" fn rte_eth_dev_configure(
769    _port_id: u16,
770    _nb_rx_queue: u16,
771    _nb_tx_queue: u16,
772    _eth_conf: *const rte_eth_conf,
773) -> c_int {
774    0
775}
776
777#[no_mangle]
778pub extern "C" fn rte_eth_rx_queue_setup(
779    _port_id: u16,
780    _rx_queue_id: u16,
781    _nb_rx_desc: u16,
782    _socket_id: c_uint,
783    _rx_conf: *const rte_eth_rxconf,
784    _mb_pool: *mut rte_mempool,
785) -> c_int {
786    0
787}
788
789#[no_mangle]
790pub extern "C" fn rte_eth_tx_queue_setup(
791    _port_id: u16,
792    _tx_queue_id: u16,
793    _nb_tx_desc: u16,
794    _socket_id: c_uint,
795    _tx_conf: *const rte_eth_txconf,
796) -> c_int {
797    0
798}
799
800#[no_mangle]
801pub extern "C" fn rte_eth_dev_start(_port_id: u16) -> c_int {
802    0
803}
804
805#[no_mangle]
806pub extern "C" fn rte_eth_dev_stop(_port_id: u16) -> c_int {
807    0
808}
809
810#[no_mangle]
811pub extern "C" fn rte_eth_dev_close(_port_id: u16) -> c_int {
812    0
813}
814
815#[no_mangle]
816pub extern "C" fn rte_eth_promiscuous_enable(_port_id: u16) -> c_int {
817    0
818}
819
820#[no_mangle]
821pub extern "C" fn rte_eth_promiscuous_disable(_port_id: u16) -> c_int {
822    0
823}
824
825#[no_mangle]
826pub extern "C" fn rte_eth_promiscuous_get(_port_id: u16) -> c_int {
827    1 // Promiscuous is enabled by default in stubs
828}
829
830#[no_mangle]
831pub extern "C" fn rte_eth_allmulticast_enable(_port_id: u16) -> c_int {
832    0
833}
834
835#[no_mangle]
836pub extern "C" fn rte_eth_allmulticast_disable(_port_id: u16) -> c_int {
837    0
838}
839
840#[no_mangle]
841pub extern "C" fn rte_eth_allmulticast_get(_port_id: u16) -> c_int {
842    0 // All-multicast disabled by default
843}
844
845#[no_mangle]
846pub extern "C" fn rte_eth_dev_set_mc_addr_list(
847    _port_id: u16,
848    _mc_addr_set: *mut rte_ether_addr,
849    _nb_mc_addr: u32,
850) -> c_int {
851    0 // Success
852}
853
854#[no_mangle]
855pub extern "C" fn rte_eth_dev_info_get(
856    _port_id: u16,
857    dev_info: *mut rte_eth_dev_info,
858) -> c_int {
859    if !dev_info.is_null() {
860        unsafe {
861            (*dev_info).max_rx_queues = 16;
862            (*dev_info).max_tx_queues = 16;
863            // Report VLAN offload capabilities so the HW VLAN code path is
864            // exercised in tests (alongside existing checksum offload support).
865            (*dev_info).rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP
866                | RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
867                | RTE_ETH_RX_OFFLOAD_UDP_CKSUM
868                | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
869            (*dev_info).tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT
870                | RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
871                | RTE_ETH_TX_OFFLOAD_UDP_CKSUM
872                | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
873        }
874    }
875    0
876}
877
878#[no_mangle]
879pub extern "C" fn rte_eth_dev_socket_id(_port_id: u16) -> c_int {
880    0 // NUMA node 0
881}
882
883#[no_mangle]
884pub extern "C" fn rte_eth_link_get(_port_id: u16, link: *mut rte_eth_link) -> c_int {
885    if !link.is_null() {
886        unsafe {
887            (*link).link_speed = 10000; // 10 Gbps
888            (*link).link_duplex = 1;
889            (*link).link_status = 1; // Link up
890        }
891    }
892    0
893}
894
895#[no_mangle]
896pub extern "C" fn rte_eth_link_get_nowait(_port_id: u16, link: *mut rte_eth_link) -> c_int {
897    rte_eth_link_get(_port_id, link)
898}
899
900#[no_mangle]
901pub extern "C" fn rte_eth_stats_get(_port_id: u16, _stats: *mut rte_eth_stats) -> c_int {
902    0
903}
904
905#[no_mangle]
906pub extern "C" fn rte_eth_stats_reset(_port_id: u16) -> c_int {
907    0
908}
909
910#[no_mangle]
911pub extern "C" fn rte_eth_macaddr_get(_port_id: u16, mac_addr: *mut rte_ether_addr) -> c_int {
912    if !mac_addr.is_null() {
913        unsafe {
914            (*mac_addr).addr_bytes = [0x02, 0x00, 0x00, 0x00, 0x00, 0x01];
915        }
916    }
917    0
918}
919
920// Packet RX/TX Functions
921#[no_mangle]
922pub extern "C" fn rte_eth_rx_burst(
923    _port_id: u16,
924    _queue_id: u16,
925    _rx_pkts: *mut *mut rte_mbuf,
926    _nb_pkts: u16,
927) -> u16 {
928    0 // No packets received (stub)
929}
930
931#[no_mangle]
932pub extern "C" fn rte_eth_tx_burst(
933    _port_id: u16,
934    _queue_id: u16,
935    _tx_pkts: *mut *mut rte_mbuf,
936    _nb_pkts: u16,
937) -> u16 {
938    0 // No packets sent (stub)
939}
940
941// Error handling
942
943/// Stub rte_errno — set by stub functions that need to report errors
944static STUB_RTE_ERRNO: AtomicI32 = AtomicI32::new(0);
945
946#[no_mangle]
947pub extern "C" fn rte_errno() -> c_int {
948    STUB_RTE_ERRNO.load(Ordering::SeqCst)
949}
950
951#[no_mangle]
952pub extern "C" fn rte_strerror(_errnum: c_int) -> *const c_char {
953    b"No error (stub)\0".as_ptr() as *const c_char
954}
955
956// Utility functions
957#[no_mangle]
958pub extern "C" fn rte_cpu_to_be_16(x: u16) -> u16 {
959    x.to_be()
960}
961
962#[no_mangle]
963pub extern "C" fn rte_cpu_to_be_32(x: u32) -> u32 {
964    x.to_be()
965}
966
967#[no_mangle]
968pub extern "C" fn rte_be_to_cpu_16(x: u16) -> u16 {
969    u16::from_be(x)
970}
971
972#[no_mangle]
973pub extern "C" fn rte_be_to_cpu_32(x: u32) -> u32 {
974    u32::from_be(x)
975}
976
977// Checksum functions
978#[no_mangle]
979pub extern "C" fn rte_ipv4_cksum(_ipv4_hdr: *const rte_ipv4_hdr) -> u16 {
980    0 // Stub - would compute checksum
981}
982
983#[no_mangle]
984pub extern "C" fn rte_ipv4_udptcp_cksum(
985    _ipv4_hdr: *const rte_ipv4_hdr,
986    _l4_hdr: *const c_void,
987) -> u16 {
988    0 // Stub - would compute checksum
989}
990
991// Mbuf offload field access — same public API as shim.rs so callers are
992// identical regardless of stub vs real DPDK.
993
994#[inline]
995pub unsafe fn mbuf_set_tx_offload(m: *mut rte_mbuf, val: u64) {
996    (*m).tx_offload = val;
997}
998
999#[inline]
1000pub unsafe fn mbuf_get_tx_offload(m: *const rte_mbuf) -> u64 {
1001    (*m).tx_offload
1002}
1003
1004#[cfg(test)]
1005mod tests {
1006    use super::*;
1007
1008    #[test]
1009    fn test_eal_init() {
1010        let result = rte_eal_init(0, ptr::null_mut());
1011        assert_eq!(result, 0);
1012    }
1013
1014    #[test]
1015    fn test_eth_dev_count() {
1016        let count = rte_eth_dev_count_avail();
1017        assert_eq!(count, 1);
1018    }
1019
1020    #[test]
1021    fn test_lcore_id() {
1022        let id = rte_lcore_id();
1023        assert_eq!(id, 0);
1024    }
1025}