Skip to main content

esp_emac/dma/descriptor/
mod.rs

1// SPDX-License-Identifier: GPL-2.0-or-later OR Apache-2.0
2// Copyright (c) Viacheslav Bocharov <v@baodeep.com> and JetHome (r)
3
4//! TX and RX DMA descriptor structures.
5//!
6//! The crate runs the **enhanced 8-word descriptor layout** (32 bytes
7//! per descriptor) selected by `DMABUSMODE.ATDS = 1`. Words 4-7 carry
8//! the extended status / timestamp fields; the CPU never reads them
9//! today, but they exist in memory so the DMA engine doesn't stomp
10//! adjacent descriptors when chained at a 32-byte stride.
11//!
12//! | Word | TX (TDES)              | RX (RDES)              |
13//! |------|------------------------|------------------------|
14//! | 0    | Status / control       | Status                 |
15//! | 1    | Buffer 1 size + flags  | Buffer 1 size + flags  |
16//! | 2    | Buffer 1 address       | Buffer 1 address       |
17//! | 3    | Next-descriptor addr   | Next-descriptor addr   |
18//! | 4    | Reserved / extended    | Extended status        |
19//! | 5    | Reserved               | Reserved               |
20//! | 6    | Timestamp low          | Timestamp low          |
21//! | 7    | Timestamp high         | Timestamp high         |
22//!
23//! The OWN bit (bit 31 of word 0) governs ownership: when set the DMA
24//! engine owns the descriptor; when clear the CPU may access it.
25//!
26//! The legacy 4-word/16-byte layout (`ATDS = 0`) isn't supported by
27//! this crate — the enhanced layout matches what `ph-esp32-mac` /
28//! ESP-IDF use and is required for the timestamp / IPv4 checksum
29//! offload features even if the crate doesn't currently surface them.
30
31pub mod bits;
32
33use bits::{rdes0, rdes1, tdes0, tdes1};
34
35// =============================================================================
36// VolatileCell
37// =============================================================================
38
39/// Volatile cell wrapper for DMA descriptor fields.
40///
41/// Prevents the compiler from reordering or caching register-like memory
42/// accesses. All reads and writes go through `core::ptr::{read,write}_volatile`.
43#[repr(transparent)]
44pub struct VolatileCell<T: Copy> {
45    value: core::cell::UnsafeCell<T>,
46}
47
48// SAFETY: DMA descriptors are accessed from ISR context and main context.
49// Volatile access + OWN-bit protocol ensures correctness.
50unsafe impl<T: Copy> Sync for VolatileCell<T> {}
51
52impl<T: Copy> VolatileCell<T> {
53    /// Create a new volatile cell with the given initial value.
54    #[inline(always)]
55    pub const fn new(value: T) -> Self {
56        Self {
57            value: core::cell::UnsafeCell::new(value),
58        }
59    }
60
61    /// Read the value (volatile read).
62    #[inline(always)]
63    pub fn get(&self) -> T {
64        // SAFETY: Volatile access to a valid UnsafeCell-backed pointer.
65        unsafe { core::ptr::read_volatile(self.value.get()) }
66    }
67
68    /// Write a value (volatile write).
69    #[inline(always)]
70    pub fn set(&self, value: T) {
71        // SAFETY: Volatile access to a valid UnsafeCell-backed pointer.
72        unsafe { core::ptr::write_volatile(self.value.get(), value) }
73    }
74
75    /// Update the value using a function (read-modify-write).
76    #[inline(always)]
77    pub fn update<F>(&self, f: F)
78    where
79        F: FnOnce(T) -> T,
80    {
81        let old = self.get();
82        self.set(f(old));
83    }
84}
85
86impl<T: Copy + Default> Default for VolatileCell<T> {
87    fn default() -> Self {
88        Self::new(T::default())
89    }
90}
91
92// =============================================================================
93// TX Descriptor
94// =============================================================================
95
96/// TX DMA descriptor — enhanced 8-word layout (32 bytes).
97///
98/// The ESP32 GMAC requires the enhanced descriptor format when
99/// `DMABUSMODE.ATDS = 1` (which is what the IDF / ph-esp32-mac driver
100/// runs with). Reserved fields below are written by the DMA but unused
101/// by the CPU; they exist purely so the descriptor stride is 32 bytes
102/// and the DMA does not stomp adjacent descriptors when chained.
103#[repr(C, align(4))]
104pub struct TxDescriptor {
105    /// TDES0: Status and control bits (OWN, first/last segment, etc.).
106    tdes0: VolatileCell<u32>,
107    /// TDES1: Buffer 1 size and control flags.
108    tdes1: VolatileCell<u32>,
109    /// TDES2: Buffer 1 address.
110    buffer_addr: VolatileCell<u32>,
111    /// TDES3: Next descriptor address (chained mode).
112    next_desc_addr: VolatileCell<u32>,
113    /// TDES4: Reserved (extended status on ESP32-P4 / ATDS-enabled devices).
114    _reserved4: VolatileCell<u32>,
115    /// TDES5: Reserved.
116    _reserved5: VolatileCell<u32>,
117    /// TDES6: Timestamp low (when timestamping is enabled).
118    _ts_low: VolatileCell<u32>,
119    /// TDES7: Timestamp high (when timestamping is enabled).
120    _ts_high: VolatileCell<u32>,
121}
122
123#[allow(dead_code)]
124impl TxDescriptor {
125    /// Descriptor size in bytes (enhanced 8-word layout).
126    pub const SIZE: usize = 32;
127
128    /// Create a new zeroed TX descriptor.
129    #[must_use]
130    pub const fn new() -> Self {
131        Self {
132            tdes0: VolatileCell::new(0),
133            tdes1: VolatileCell::new(0),
134            buffer_addr: VolatileCell::new(0),
135            next_desc_addr: VolatileCell::new(0),
136            _reserved4: VolatileCell::new(0),
137            _reserved5: VolatileCell::new(0),
138            _ts_low: VolatileCell::new(0),
139            _ts_high: VolatileCell::new(0),
140        }
141    }
142
143    /// Initialize descriptor for chained mode.
144    ///
145    /// Sets the buffer pointer, next-descriptor pointer, and the
146    /// `SECOND_ADDR_CHAINED` flag. The descriptor is left CPU-owned.
147    pub fn setup_chained(&self, buffer: *const u8, next_desc: *const TxDescriptor) {
148        self.buffer_addr.set(buffer as u32);
149        self.next_desc_addr.set(next_desc as u32);
150        self.tdes0.set(tdes0::SECOND_ADDR_CHAINED);
151        self.tdes1.set(0);
152    }
153
154    /// Check if DMA owns this descriptor.
155    #[inline(always)]
156    #[must_use]
157    pub fn is_owned(&self) -> bool {
158        (self.tdes0.get() & tdes0::OWN) != 0
159    }
160
161    /// Give ownership to DMA.
162    #[inline(always)]
163    pub fn set_owned(&self) {
164        self.tdes0.update(|v| v | tdes0::OWN);
165    }
166
167    /// Take ownership from DMA.
168    #[inline(always)]
169    pub fn clear_owned(&self) {
170        self.tdes0.update(|v| v & !tdes0::OWN);
171    }
172
173    /// Prepare descriptor for transmission with segment flags.
174    ///
175    /// Sets the buffer length and first/last segment flags.
176    /// Does **not** set the OWN bit — call [`set_owned`](Self::set_owned)
177    /// afterwards to submit to DMA.
178    ///
179    /// CIC (Checksum Insertion Control) is always set to 0b11 (bits 23:22),
180    /// which instructs the MAC to insert the IPv4 header checksum and the
181    /// TCP/UDP/ICMP payload checksum including the pseudo-header. For
182    /// non-IPv4 frames the MAC ignores the CIC field, so setting it
183    /// unconditionally is safe.
184    pub fn prepare(&self, len: usize, first: bool, last: bool) {
185        // CIC = 0b11: full TCP/UDP/ICMP + IPv4-header checksum insertion.
186        let mut flags = tdes0::SECOND_ADDR_CHAINED | (0b11u32 << tdes0::CHECKSUM_INSERT_SHIFT);
187
188        if first {
189            flags |= tdes0::FIRST_SEGMENT;
190        }
191        if last {
192            flags |= tdes0::LAST_SEGMENT | tdes0::INTERRUPT_ON_COMPLETE;
193        }
194
195        self.tdes1.set((len as u32) & tdes1::BUFFER1_SIZE_MASK);
196        self.tdes0.set(flags);
197    }
198
199    /// Prepare and submit to DMA in one operation.
200    pub fn prepare_and_submit(&self, len: usize, first: bool, last: bool) {
201        self.prepare(len, first, last);
202        self.set_owned();
203    }
204
205    /// Check if transmission had errors (error summary bit).
206    #[inline(always)]
207    #[must_use]
208    pub fn has_error(&self) -> bool {
209        (self.tdes0.get() & tdes0::ERR_SUMMARY) != 0
210    }
211
212    /// Get all error flags from TDES0.
213    #[inline(always)]
214    #[must_use]
215    pub fn error_flags(&self) -> u32 {
216        self.tdes0.get() & tdes0::ALL_ERRORS
217    }
218
219    /// Get buffer address (TDES2).
220    #[inline(always)]
221    #[must_use]
222    pub fn buffer_addr(&self) -> u32 {
223        self.buffer_addr.get()
224    }
225
226    /// Get next descriptor address (TDES3, chained mode).
227    #[inline(always)]
228    #[must_use]
229    pub fn next_desc_addr(&self) -> u32 {
230        self.next_desc_addr.get()
231    }
232
233    /// Reset descriptor to initial state, preserving the chain pointer.
234    pub fn reset(&self) {
235        let next = self.next_desc_addr.get();
236        self.tdes0.set(tdes0::SECOND_ADDR_CHAINED);
237        self.tdes1.set(0);
238        self.next_desc_addr.set(next);
239    }
240
241    /// Raw TDES0 value (for debugging / tests).
242    #[inline(always)]
243    #[must_use]
244    pub fn raw_tdes0(&self) -> u32 {
245        self.tdes0.get()
246    }
247
248    /// Raw TDES1 value (for debugging / tests).
249    #[inline(always)]
250    #[must_use]
251    pub fn raw_tdes1(&self) -> u32 {
252        self.tdes1.get()
253    }
254}
255
256impl Default for TxDescriptor {
257    fn default() -> Self {
258        Self::new()
259    }
260}
261
262// SAFETY: TxDescriptor uses volatile cells for all DMA-accessed fields.
263unsafe impl Sync for TxDescriptor {}
264// SAFETY: TxDescriptor can be sent between threads.
265unsafe impl Send for TxDescriptor {}
266
267// =============================================================================
268// RX Descriptor
269// =============================================================================
270
271/// RX DMA descriptor — enhanced 8-word layout (32 bytes).
272///
273/// See [`TxDescriptor`] for why we run the enhanced layout.
274#[repr(C, align(4))]
275pub struct RxDescriptor {
276    /// RDES0: Status bits (OWN, first/last, frame length, errors).
277    rdes0: VolatileCell<u32>,
278    /// RDES1: Buffer 1 size and control flags.
279    rdes1: VolatileCell<u32>,
280    /// RDES2: Buffer 1 address.
281    buffer_addr: VolatileCell<u32>,
282    /// RDES3: Next descriptor address (chained mode).
283    next_desc_addr: VolatileCell<u32>,
284    /// RDES4: Extended status (when enabled).
285    _ext_status: VolatileCell<u32>,
286    /// RDES5: Reserved.
287    _reserved5: VolatileCell<u32>,
288    /// RDES6: Timestamp low (when timestamping is enabled).
289    _ts_low: VolatileCell<u32>,
290    /// RDES7: Timestamp high (when timestamping is enabled).
291    _ts_high: VolatileCell<u32>,
292}
293
294#[allow(dead_code)]
295impl RxDescriptor {
296    /// Descriptor size in bytes (enhanced 8-word layout).
297    pub const SIZE: usize = 32;
298
299    /// Create a new zeroed RX descriptor. Call [`setup_chained`](Self::setup_chained) before use.
300    #[must_use]
301    pub const fn new() -> Self {
302        Self {
303            rdes0: VolatileCell::new(0),
304            rdes1: VolatileCell::new(0),
305            buffer_addr: VolatileCell::new(0),
306            next_desc_addr: VolatileCell::new(0),
307            _ext_status: VolatileCell::new(0),
308            _reserved5: VolatileCell::new(0),
309            _ts_low: VolatileCell::new(0),
310            _ts_high: VolatileCell::new(0),
311        }
312    }
313
314    /// Configure descriptor in chained mode and give to DMA.
315    ///
316    /// Sets the buffer pointer, buffer size, next-descriptor pointer,
317    /// the `SECOND_ADDR_CHAINED` flag, and the OWN bit.
318    pub fn setup_chained(
319        &self,
320        buffer: *mut u8,
321        buffer_size: usize,
322        next_desc: *const RxDescriptor,
323    ) {
324        self.buffer_addr.set(buffer as u32);
325        self.next_desc_addr.set(next_desc as u32);
326        self.rdes1
327            .set(rdes1::SECOND_ADDR_CHAINED | ((buffer_size as u32) & rdes1::BUFFER1_SIZE_MASK));
328        // Give ownership to DMA.
329        self.rdes0.set(rdes0::OWN);
330    }
331
332    /// Check if DMA owns this descriptor.
333    #[inline(always)]
334    #[must_use]
335    pub fn is_owned(&self) -> bool {
336        (self.rdes0.get() & rdes0::OWN) != 0
337    }
338
339    /// Give ownership to DMA.
340    #[inline(always)]
341    pub fn set_owned(&self) {
342        self.rdes0.set(rdes0::OWN);
343    }
344
345    /// Take ownership from DMA.
346    #[inline(always)]
347    pub fn clear_owned(&self) {
348        self.rdes0.update(|v| v & !rdes0::OWN);
349    }
350
351    /// First descriptor of a frame.
352    #[inline(always)]
353    #[must_use]
354    pub fn is_first(&self) -> bool {
355        (self.rdes0.get() & rdes0::FIRST_DESC) != 0
356    }
357
358    /// Last descriptor of a frame.
359    #[inline(always)]
360    #[must_use]
361    pub fn is_last(&self) -> bool {
362        (self.rdes0.get() & rdes0::LAST_DESC) != 0
363    }
364
365    /// Complete frame in a single descriptor (both first and last).
366    #[inline(always)]
367    #[must_use]
368    pub fn is_complete_frame(&self) -> bool {
369        let status = self.rdes0.get();
370        (status & (rdes0::FIRST_DESC | rdes0::LAST_DESC)) == (rdes0::FIRST_DESC | rdes0::LAST_DESC)
371    }
372
373    /// Check if the error summary bit is set.
374    #[inline(always)]
375    #[must_use]
376    pub fn has_error(&self) -> bool {
377        (self.rdes0.get() & rdes0::ERR_SUMMARY) != 0
378    }
379
380    /// Raw error flags from RDES0.
381    #[inline(always)]
382    #[must_use]
383    pub fn error_flags(&self) -> u32 {
384        self.rdes0.get() & rdes0::ALL_ERRORS
385    }
386
387    /// Frame length including CRC (valid on last descriptor).
388    #[inline(always)]
389    #[must_use]
390    pub fn frame_length(&self) -> usize {
391        ((self.rdes0.get() & rdes0::FRAME_LEN_MASK) >> rdes0::FRAME_LEN_SHIFT) as usize
392    }
393
394    /// Frame length excluding the 4-byte CRC.
395    #[inline(always)]
396    #[must_use]
397    pub fn payload_length(&self) -> usize {
398        self.frame_length().saturating_sub(4)
399    }
400
401    /// Buffer address (RDES2).
402    #[inline(always)]
403    #[must_use]
404    pub fn buffer_addr(&self) -> u32 {
405        self.buffer_addr.get()
406    }
407
408    /// Next descriptor address (RDES3, chained mode).
409    #[inline(always)]
410    #[must_use]
411    pub fn next_desc_addr(&self) -> u32 {
412        self.next_desc_addr.get()
413    }
414
415    /// Configured buffer size from RDES1.
416    #[inline(always)]
417    #[must_use]
418    pub fn buffer_size(&self) -> usize {
419        (self.rdes1.get() & rdes1::BUFFER1_SIZE_MASK) as usize
420    }
421
422    /// Clear status and return the descriptor to DMA for reuse.
423    pub fn recycle(&self) {
424        self.rdes0.set(rdes0::OWN);
425    }
426
427    /// Raw RDES0 value (for debugging / tests).
428    #[inline(always)]
429    #[must_use]
430    pub fn raw_rdes0(&self) -> u32 {
431        self.rdes0.get()
432    }
433
434    /// Raw RDES1 value (for debugging / tests).
435    #[inline(always)]
436    #[must_use]
437    pub fn raw_rdes1(&self) -> u32 {
438        self.rdes1.get()
439    }
440
441    /// Set raw RDES0 value (test only — simulates DMA hardware writes).
442    #[cfg(test)]
443    pub fn set_raw_rdes0(&self, val: u32) {
444        self.rdes0.set(val);
445    }
446}
447
448impl Default for RxDescriptor {
449    fn default() -> Self {
450        Self::new()
451    }
452}
453
454// SAFETY: RxDescriptor uses volatile cells for all DMA-accessed fields.
455unsafe impl Sync for RxDescriptor {}
456// SAFETY: RxDescriptor can be sent between threads.
457unsafe impl Send for RxDescriptor {}
458
459// =============================================================================
460// Tests
461// =============================================================================
462
463#[cfg(test)]
464mod tests {
465    use super::*;
466
467    // =========================================================================
468    // VolatileCell Tests
469    // =========================================================================
470
471    #[test]
472    fn volatile_cell_new() {
473        let cell = VolatileCell::new(42u32);
474        assert_eq!(cell.get(), 42);
475    }
476
477    #[test]
478    fn volatile_cell_get_set() {
479        let cell = VolatileCell::new(0u32);
480        assert_eq!(cell.get(), 0);
481        cell.set(0xDEAD_BEEF);
482        assert_eq!(cell.get(), 0xDEAD_BEEF);
483    }
484
485    #[test]
486    fn volatile_cell_update() {
487        let cell = VolatileCell::new(0x0000_00FFu32);
488        cell.update(|v| v | 0xFF00_0000);
489        assert_eq!(cell.get(), 0xFF00_00FF);
490    }
491
492    #[test]
493    fn volatile_cell_default() {
494        let cell = VolatileCell::<u32>::default();
495        assert_eq!(cell.get(), 0);
496    }
497
498    // =========================================================================
499    // TX Descriptor Layout Tests
500    // =========================================================================
501
502    #[test]
503    fn tx_descriptor_size() {
504        assert_eq!(core::mem::size_of::<TxDescriptor>(), 32);
505        assert_eq!(TxDescriptor::SIZE, core::mem::size_of::<TxDescriptor>());
506    }
507
508    #[test]
509    fn tx_descriptor_alignment() {
510        assert_eq!(core::mem::align_of::<TxDescriptor>(), 4);
511    }
512
513    // =========================================================================
514    // TX Descriptor Ownership Tests
515    // =========================================================================
516
517    #[test]
518    fn tx_descriptor_new_not_owned() {
519        let desc = TxDescriptor::new();
520        assert!(!desc.is_owned());
521    }
522
523    #[test]
524    fn tx_descriptor_is_owned() {
525        let desc = TxDescriptor::new();
526        desc.set_owned();
527        assert!(desc.is_owned());
528        desc.clear_owned();
529        assert!(!desc.is_owned());
530    }
531
532    #[test]
533    fn tdes0_own_bit() {
534        // OWN bit must be bit 31.
535        let desc = TxDescriptor::new();
536        desc.set_owned();
537        assert_eq!(desc.raw_tdes0() & tdes0::OWN, tdes0::OWN);
538        assert_eq!(tdes0::OWN, 1 << 31);
539    }
540
541    // =========================================================================
542    // TX Descriptor Setup / Prepare Tests
543    // =========================================================================
544
545    #[test]
546    fn tx_descriptor_setup_chained() {
547        let desc = TxDescriptor::new();
548        let buf = [0u8; 64];
549        let next = TxDescriptor::new();
550
551        desc.setup_chained(buf.as_ptr(), &next as *const TxDescriptor);
552
553        assert_eq!(desc.buffer_addr(), buf.as_ptr() as u32);
554        assert_eq!(desc.next_desc_addr(), &next as *const TxDescriptor as u32);
555        assert!(desc.raw_tdes0() & tdes0::SECOND_ADDR_CHAINED != 0);
556        assert!(!desc.is_owned());
557    }
558
559    #[test]
560    fn tx_descriptor_prepare_single_frame() {
561        let desc = TxDescriptor::new();
562        desc.prepare(1500, true, true);
563
564        let raw0 = desc.raw_tdes0();
565        assert!(raw0 & tdes0::FIRST_SEGMENT != 0);
566        assert!(raw0 & tdes0::LAST_SEGMENT != 0);
567        assert!(raw0 & tdes0::INTERRUPT_ON_COMPLETE != 0);
568        assert!(raw0 & tdes0::OWN == 0, "prepare must not set OWN");
569
570        let len = desc.raw_tdes1() & tdes1::BUFFER1_SIZE_MASK;
571        assert_eq!(len, 1500);
572    }
573
574    #[test]
575    fn tdes0_first_last_bits() {
576        let desc = TxDescriptor::new();
577
578        // First segment only.
579        desc.prepare(100, true, false);
580        let raw = desc.raw_tdes0();
581        assert!(raw & tdes0::FIRST_SEGMENT != 0);
582        assert!(raw & tdes0::LAST_SEGMENT == 0);
583
584        // Last segment only.
585        desc.prepare(100, false, true);
586        let raw = desc.raw_tdes0();
587        assert!(raw & tdes0::FIRST_SEGMENT == 0);
588        assert!(raw & tdes0::LAST_SEGMENT != 0);
589    }
590
591    #[test]
592    fn tx_descriptor_prepare_sets_cic_full_offload() {
593        // CIC = 0b11 in bits 23:22 means the MAC inserts IPv4 + TCP/UDP/ICMP
594        // checksums with pseudo-header. Verify prepare() always sets it.
595        let desc = TxDescriptor::new();
596        desc.prepare(64, true, true);
597        let raw = desc.raw_tdes0();
598        let cic = (raw >> tdes0::CHECKSUM_INSERT_SHIFT) & 0x3;
599        assert_eq!(cic, 0b11, "CIC must be 0b11 for full HW checksum offload");
600    }
601
602    #[test]
603    fn tx_descriptor_prepare_and_submit() {
604        let desc = TxDescriptor::new();
605        desc.prepare_and_submit(256, true, true);
606        assert!(desc.is_owned());
607        assert_eq!(desc.raw_tdes1() & tdes1::BUFFER1_SIZE_MASK, 256);
608    }
609
610    #[test]
611    fn tx_descriptor_no_errors_initially() {
612        let desc = TxDescriptor::new();
613        assert!(!desc.has_error());
614        assert_eq!(desc.error_flags(), 0);
615    }
616
617    #[test]
618    fn tx_descriptor_error_detection() {
619        let desc = TxDescriptor::new();
620        desc.tdes0.set(tdes0::ERR_SUMMARY | tdes0::UNDERFLOW_ERR);
621        assert!(desc.has_error());
622        assert!(desc.error_flags() & tdes0::UNDERFLOW_ERR != 0);
623    }
624
625    #[test]
626    fn tx_descriptor_reset_preserves_chain() {
627        let desc = TxDescriptor::new();
628        let next_addr = 0x1234_5678u32;
629        desc.next_desc_addr.set(next_addr);
630        desc.prepare_and_submit(1000, true, true);
631
632        desc.reset();
633
634        assert!(!desc.is_owned());
635        assert_eq!(desc.raw_tdes1() & tdes1::BUFFER1_SIZE_MASK, 0);
636        assert_eq!(desc.next_desc_addr(), next_addr);
637        assert!(desc.raw_tdes0() & tdes0::SECOND_ADDR_CHAINED != 0);
638    }
639
640    // =========================================================================
641    // RX Descriptor Layout Tests
642    // =========================================================================
643
644    #[test]
645    fn rx_descriptor_size() {
646        assert_eq!(core::mem::size_of::<RxDescriptor>(), 32);
647        assert_eq!(RxDescriptor::SIZE, core::mem::size_of::<RxDescriptor>());
648    }
649
650    #[test]
651    fn rx_descriptor_alignment() {
652        assert_eq!(core::mem::align_of::<RxDescriptor>(), 4);
653    }
654
655    // =========================================================================
656    // RX Descriptor Ownership Tests
657    // =========================================================================
658
659    #[test]
660    fn rx_descriptor_new_not_owned() {
661        let desc = RxDescriptor::new();
662        assert!(!desc.is_owned());
663    }
664
665    #[test]
666    fn rdes0_own_bit() {
667        let desc = RxDescriptor::new();
668        desc.set_owned();
669        assert_eq!(desc.raw_rdes0() & rdes0::OWN, rdes0::OWN);
670        assert_eq!(rdes0::OWN, 1 << 31);
671    }
672
673    // =========================================================================
674    // RX Descriptor Setup / Chained Tests
675    // =========================================================================
676
677    #[test]
678    fn rx_descriptor_setup_chained() {
679        let desc = RxDescriptor::new();
680        let mut buf = [0u8; 1600];
681        let next = RxDescriptor::new();
682
683        desc.setup_chained(buf.as_mut_ptr(), 1600, &next as *const RxDescriptor);
684
685        assert_eq!(desc.buffer_addr(), buf.as_ptr() as u32);
686        assert_eq!(desc.next_desc_addr(), &next as *const RxDescriptor as u32);
687        assert_eq!(desc.buffer_size(), 1600);
688        assert!(desc.is_owned(), "setup_chained gives to DMA");
689        assert!(desc.raw_rdes1() & rdes1::SECOND_ADDR_CHAINED != 0);
690    }
691
692    // =========================================================================
693    // RX Descriptor Status Tests
694    // =========================================================================
695
696    #[test]
697    fn rx_descriptor_first_last_flags() {
698        let desc = RxDescriptor::new();
699        assert!(!desc.is_first());
700        assert!(!desc.is_last());
701
702        desc.rdes0.set(rdes0::FIRST_DESC | rdes0::LAST_DESC);
703        assert!(desc.is_first());
704        assert!(desc.is_last());
705        assert!(desc.is_complete_frame());
706    }
707
708    #[test]
709    fn rx_descriptor_payload_length() {
710        let desc = RxDescriptor::new();
711
712        // Frame length 1504 (including CRC), payload = 1500.
713        desc.rdes0.set(1504 << rdes0::FRAME_LEN_SHIFT);
714        assert_eq!(desc.frame_length(), 1504);
715        assert_eq!(desc.payload_length(), 1500);
716    }
717
718    #[test]
719    fn rx_descriptor_payload_length_short_frame() {
720        let desc = RxDescriptor::new();
721        // Frame shorter than CRC — saturating_sub prevents underflow.
722        desc.rdes0.set(2 << rdes0::FRAME_LEN_SHIFT);
723        assert_eq!(desc.payload_length(), 0);
724    }
725
726    #[test]
727    fn rx_descriptor_error_detection() {
728        let desc = RxDescriptor::new();
729        assert!(!desc.has_error());
730
731        desc.rdes0
732            .set(rdes0::ERR_SUMMARY | rdes0::CRC_ERR | rdes0::OVERFLOW_ERR);
733        assert!(desc.has_error());
734        assert!(desc.error_flags() & rdes0::CRC_ERR != 0);
735        assert!(desc.error_flags() & rdes0::OVERFLOW_ERR != 0);
736    }
737
738    // =========================================================================
739    // RX Descriptor Recycle Test
740    // =========================================================================
741
742    #[test]
743    fn rx_descriptor_recycle() {
744        let desc = RxDescriptor::new();
745        desc.rdes1.set(1600);
746        desc.rdes0
747            .set(rdes0::FIRST_DESC | rdes0::LAST_DESC | (100 << rdes0::FRAME_LEN_SHIFT));
748
749        desc.recycle();
750
751        assert!(desc.is_owned());
752        // Buffer size in RDES1 is preserved.
753        assert_eq!(desc.buffer_size(), 1600);
754    }
755}