Skip to main content

esp_emac/dma/
engine.rs

1// SPDX-License-Identifier: GPL-2.0-or-later OR Apache-2.0
2// Copyright (c) Viacheslav Bocharov <v@baodeep.com> and JetHome (r)
3
4//! DMA engine managing TX/RX descriptor rings and frame I/O.
5//!
6//! The engine owns the descriptor rings and data buffers, providing
7//! a high-level interface for frame transmission and reception without
8//! any register access. Register programming is handled by the caller.
9
10use crate::dma::descriptor::{RxDescriptor, TxDescriptor};
11use crate::dma::ring::DescriptorRing;
12use crate::error::EmacError;
13
14/// DMA engine with statically allocated buffers.
15///
16/// # Const generics
17/// - `RX`: number of RX descriptors/buffers
18/// - `TX`: number of TX descriptors/buffers
19/// - `BUF`: buffer size per descriptor (bytes)
20pub struct DmaEngine<const RX: usize, const TX: usize, const BUF: usize> {
21    /// RX descriptor ring.
22    rx_ring: DescriptorRing<RxDescriptor, RX>,
23    /// TX descriptor ring.
24    tx_ring: DescriptorRing<TxDescriptor, TX>,
25    /// RX data buffers.
26    rx_buffers: [[u8; BUF]; RX],
27    /// TX data buffers.
28    tx_buffers: [[u8; BUF]; TX],
29    /// Whether the engine has been initialized.
30    initialized: bool,
31}
32
33impl<const RX: usize, const TX: usize, const BUF: usize> DmaEngine<RX, TX, BUF> {
34    /// Create a new DMA engine (all zeroed, not yet initialized).
35    #[must_use]
36    pub const fn new() -> Self {
37        Self {
38            rx_ring: DescriptorRing::new([const { RxDescriptor::new() }; RX]),
39            tx_ring: DescriptorRing::new([const { TxDescriptor::new() }; TX]),
40            rx_buffers: [[0u8; BUF]; RX],
41            tx_buffers: [[0u8; BUF]; TX],
42            initialized: false,
43        }
44    }
45
46    /// Initialize descriptor chains.
47    ///
48    /// Sets up chained descriptors: each points to its buffer and the next
49    /// descriptor. The last descriptor chains back to the first (circular).
50    ///
51    /// Returns `(rx_base_addr, tx_base_addr)` for programming DMA registers.
52    pub fn init(&mut self) -> (u32, u32) {
53        // Set up RX descriptors: each points to its buffer and next descriptor.
54        for i in 0..RX {
55            let next_idx = (i + 1) % RX;
56            let buffer_ptr = self.rx_buffers[i].as_mut_ptr();
57            let next_desc = self.rx_ring.get(next_idx) as *const RxDescriptor;
58            self.rx_ring
59                .get(i)
60                .setup_chained(buffer_ptr, BUF, next_desc);
61        }
62
63        // Set up TX descriptors: each points to its buffer and next descriptor.
64        for i in 0..TX {
65            let next_idx = (i + 1) % TX;
66            let buffer_ptr = self.tx_buffers[i].as_ptr();
67            let next_desc = self.tx_ring.get(next_idx) as *const TxDescriptor;
68            self.tx_ring.get(i).setup_chained(buffer_ptr, next_desc);
69        }
70
71        self.rx_ring.reset();
72        self.tx_ring.reset();
73        self.initialized = true;
74
75        let rx_base = self.rx_ring.base_addr() as u32;
76        let tx_base = self.tx_ring.base_addr() as u32;
77        (rx_base, tx_base)
78    }
79
80    /// Check if initialized.
81    #[inline(always)]
82    #[must_use]
83    pub fn is_initialized(&self) -> bool {
84        self.initialized
85    }
86
87    /// Calculate total static memory usage in bytes.
88    #[must_use]
89    pub const fn memory_usage() -> usize {
90        let rx_desc = RX * RxDescriptor::SIZE;
91        let tx_desc = TX * TxDescriptor::SIZE;
92        let rx_buf = RX * BUF;
93        let tx_buf = TX * BUF;
94        rx_desc + tx_desc + rx_buf + tx_buf
95    }
96
97    // ── Transmission ──────────────────────────────────────
98
99    /// Check if there's room to transmit a frame of given length.
100    #[must_use]
101    pub fn can_transmit(&self, len: usize) -> bool {
102        if len == 0 || len > BUF * TX {
103            return false;
104        }
105        let needed = len.div_ceil(BUF);
106        self.tx_available() >= needed
107    }
108
109    /// Count available (CPU-owned) TX descriptors starting from current.
110    #[must_use]
111    pub fn tx_available(&self) -> usize {
112        let mut count = 0;
113        for i in 0..TX {
114            let idx = (self.tx_ring.current_index() + i) % TX;
115            if !self.tx_ring.get(idx).is_owned() {
116                count += 1;
117            } else {
118                break;
119            }
120        }
121        count
122    }
123
124    /// Submit a frame for transmission. Returns number of bytes sent.
125    ///
126    /// For frames larger than `BUF`, uses multiple descriptors (scatter-gather).
127    /// Descriptors are given to DMA in reverse order to prevent a race where
128    /// the DMA starts processing before all descriptors are ready.
129    pub fn transmit(&mut self, data: &[u8]) -> Result<usize, EmacError> {
130        if data.is_empty() {
131            return Err(EmacError::InvalidLength);
132        }
133
134        if data.len() > BUF * TX {
135            return Err(EmacError::FrameTooLarge);
136        }
137
138        let desc_count = data.len().div_ceil(BUF);
139        if self.tx_available() < desc_count {
140            return Err(EmacError::NoDescriptorsAvailable);
141        }
142
143        let current = self.tx_ring.current_index();
144        let mut remaining = data.len();
145        let mut offset = 0usize;
146
147        // Prepare each descriptor with its data chunk.
148        for i in 0..desc_count {
149            let idx = (current + i) % TX;
150            let desc = self.tx_ring.get(idx);
151
152            if desc.is_owned() {
153                return Err(EmacError::DescriptorBusy);
154            }
155
156            let chunk_size = core::cmp::min(remaining, BUF);
157            self.tx_buffers[idx][..chunk_size].copy_from_slice(&data[offset..offset + chunk_size]);
158            desc.prepare(chunk_size, i == 0, i == desc_count - 1);
159
160            remaining -= chunk_size;
161            offset += chunk_size;
162        }
163
164        // Give to DMA in reverse order (prevents race condition).
165        for i in (0..desc_count).rev() {
166            let idx = (current + i) % TX;
167            self.tx_ring.get(idx).set_owned();
168        }
169
170        self.tx_ring.advance_by(desc_count);
171        Ok(data.len())
172    }
173
174    /// Reclaim completed TX descriptors (return from DMA to CPU ownership).
175    ///
176    /// Returns the number of descriptors reclaimed.
177    pub fn tx_reclaim(&mut self) -> usize {
178        let mut reclaimed = 0;
179        for i in 0..TX {
180            let idx = (self.tx_ring.current_index() + i) % TX;
181            let desc = self.tx_ring.get(idx);
182            if !desc.is_owned() {
183                reclaimed += 1;
184            }
185        }
186        reclaimed
187    }
188
189    // ── Reception ─────────────────────────────────────────
190
191    /// Check if there is something for `receive` to do on the current
192    /// descriptor. Returns `true` when:
193    ///
194    /// - the current descriptor is CPU-owned and carries an error flag
195    ///   (CRC, overflow, etc.) — `receive` will recycle it as part of
196    ///   its error path, so the driver still needs to be woken; or
197    /// - a complete (possibly multi-descriptor) frame is available, as
198    ///   determined by `peek_frame_length`.
199    ///
200    /// Two earlier implementations both had bugs:
201    /// 1. `!current.is_owned() && current.is_last()` missed multi-
202    ///    descriptor frames whose head wasn't itself `LAST`.
203    /// 2. `peek_frame_length().is_some()` fixed (1) but missed error
204    ///    frames (peek declines to report a length for them), so the
205    ///    embassy-net driver never called `receive` to flush a
206    ///    CRC-failed frame — RX would wedge as the ring filled with
207    ///    untouched error descriptors.
208    #[must_use]
209    pub fn rx_available(&self) -> bool {
210        let desc = self.rx_ring.current();
211        if desc.is_owned() {
212            return false;
213        }
214        // Errored descriptor still needs draining via `receive` so the
215        // chain doesn't fill up with bad frames; peek would say None.
216        if desc.has_error() {
217            return true;
218        }
219        self.peek_frame_length().is_some()
220    }
221
222    /// Receive a frame into the provided buffer.
223    ///
224    /// Returns the number of bytes received, or `None` if no frame is ready.
225    /// For single-descriptor frames, copies payload from the RX buffer.
226    /// For multi-descriptor frames, copies from each descriptor's buffer.
227    pub fn receive(&mut self, buffer: &mut [u8]) -> Result<Option<usize>, EmacError> {
228        let first_desc = self.rx_ring.current();
229
230        // Not owned by CPU — no frame ready.
231        if first_desc.is_owned() {
232            return Ok(None);
233        }
234
235        // Single-descriptor frame (common case).
236        if first_desc.is_first() && first_desc.is_last() {
237            if first_desc.has_error() {
238                first_desc.recycle();
239                self.rx_ring.advance();
240                return Err(EmacError::FrameError);
241            }
242
243            let frame_len = first_desc.payload_length();
244            if buffer.len() < frame_len {
245                first_desc.recycle();
246                self.rx_ring.advance();
247                return Err(EmacError::BufferTooSmall);
248            }
249
250            let idx = self.rx_ring.current_index();
251            buffer[..frame_len].copy_from_slice(&self.rx_buffers[idx][..frame_len]);
252            first_desc.recycle();
253            self.rx_ring.advance();
254            return Ok(Some(frame_len));
255        }
256
257        // Multi-descriptor frame: must start with first segment.
258        if !first_desc.is_first() {
259            self.flush_rx_frame();
260            return Ok(None);
261        }
262
263        if first_desc.has_error() {
264            self.flush_rx_frame();
265            return Err(EmacError::FrameError);
266        }
267
268        // Walk descriptors to find total frame length.
269        let mut frame_len = 0usize;
270        let mut desc_count = 0usize;
271        let current = self.rx_ring.current_index();
272
273        for i in 0..RX {
274            let idx = (current + i) % RX;
275            let desc = self.rx_ring.get(idx);
276
277            if desc.is_owned() {
278                // Frame not complete yet.
279                return Ok(None);
280            }
281
282            desc_count += 1;
283
284            if desc.is_last() {
285                frame_len = desc.payload_length();
286                break;
287            }
288        }
289
290        if frame_len == 0 {
291            return Ok(None);
292        }
293
294        if buffer.len() < frame_len {
295            self.flush_rx_frame();
296            return Err(EmacError::BufferTooSmall);
297        }
298
299        // Copy data from all descriptors.
300        let mut copied = 0usize;
301        let last_desc_i = desc_count - 1;
302
303        for i in 0..desc_count {
304            let idx = (current + i) % RX;
305            let copy_len = if i == last_desc_i {
306                frame_len - copied
307            } else {
308                BUF
309            };
310            let copy_len = core::cmp::min(copy_len, frame_len - copied);
311
312            if copy_len > 0 {
313                buffer[copied..copied + copy_len]
314                    .copy_from_slice(&self.rx_buffers[idx][..copy_len]);
315                copied += copy_len;
316            }
317            self.rx_ring.get(idx).recycle();
318        }
319
320        self.rx_ring.advance_by(desc_count);
321        Ok(Some(frame_len))
322    }
323
324    /// Get the length of the next available frame without consuming it.
325    #[must_use]
326    pub fn peek_frame_length(&self) -> Option<usize> {
327        let desc = self.rx_ring.current();
328
329        if desc.is_owned() {
330            return None;
331        }
332
333        if desc.has_error() {
334            return None;
335        }
336
337        // Complete single-descriptor frame.
338        if desc.is_first() && desc.is_last() {
339            return Some(desc.payload_length());
340        }
341
342        // Multi-descriptor: walk to find the last descriptor.
343        if desc.is_first() {
344            for i in 1..RX {
345                let idx = (self.rx_ring.current_index() + i) % RX;
346                let d = self.rx_ring.get(idx);
347
348                if d.is_owned() {
349                    return None;
350                }
351
352                if d.is_last() {
353                    return Some(d.payload_length());
354                }
355            }
356        }
357
358        None
359    }
360
361    /// Count free RX descriptors (owned by DMA, ready to receive).
362    #[must_use]
363    pub fn rx_free_count(&self) -> usize {
364        let mut count = 0;
365        for i in 0..RX {
366            if self.rx_ring.get(i).is_owned() {
367                count += 1;
368            }
369        }
370        count
371    }
372
373    /// Reset all descriptors to initial state.
374    ///
375    /// Re-initializes the chains and returns base addresses.
376    pub fn reset(&mut self) -> (u32, u32) {
377        self.init()
378    }
379
380    /// Discard the current RX frame (for errors or incomplete frames).
381    fn flush_rx_frame(&mut self) {
382        loop {
383            let desc = self.rx_ring.current();
384
385            if desc.is_owned() {
386                break;
387            }
388
389            let is_last = desc.is_last();
390            desc.recycle();
391            self.rx_ring.advance();
392
393            if is_last {
394                break;
395            }
396        }
397    }
398
399    /// RX ring base address (for debugging).
400    #[must_use]
401    pub fn rx_ring_base(&self) -> u32 {
402        self.rx_ring.base_addr() as u32
403    }
404
405    /// TX ring base address (for debugging).
406    #[must_use]
407    pub fn tx_ring_base(&self) -> u32 {
408        self.tx_ring.base_addr() as u32
409    }
410
411    /// Current RX ring index.
412    #[must_use]
413    pub fn rx_current_index(&self) -> usize {
414        self.rx_ring.current_index()
415    }
416
417    /// Current TX ring index.
418    #[must_use]
419    pub fn tx_current_index(&self) -> usize {
420        self.tx_ring.current_index()
421    }
422}
423
424impl<const RX: usize, const TX: usize, const BUF: usize> Default for DmaEngine<RX, TX, BUF> {
425    fn default() -> Self {
426        Self::new()
427    }
428}
429
430// SAFETY: DmaEngine can be shared between threads when properly synchronized.
431unsafe impl<const RX: usize, const TX: usize, const BUF: usize> Sync for DmaEngine<RX, TX, BUF> {}
432
433// SAFETY: DmaEngine can be sent between threads.
434unsafe impl<const RX: usize, const TX: usize, const BUF: usize> Send for DmaEngine<RX, TX, BUF> {}
435
436// =============================================================================
437// Tests
438// =============================================================================
439
440#[cfg(test)]
441mod tests {
442    use super::*;
443    use crate::dma::descriptor::bits::rdes0;
444
445    // ── Initialization ────────────────────────────────────
446
447    #[test]
448    fn new_not_initialized() {
449        let engine: DmaEngine<4, 4, 256> = DmaEngine::new();
450        assert!(!engine.is_initialized());
451    }
452
453    #[test]
454    fn init_sets_initialized() {
455        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
456        let _ = engine.init();
457        assert!(engine.is_initialized());
458    }
459
460    #[test]
461    fn init_returns_base_addresses() {
462        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
463        let (rx_base, tx_base) = engine.init();
464
465        // Both addresses must be non-zero.
466        assert_ne!(rx_base, 0);
467        assert_ne!(tx_base, 0);
468
469        // RX and TX rings must have different addresses.
470        assert_ne!(rx_base, tx_base);
471    }
472
473    #[test]
474    fn init_chains_rx_descriptors() {
475        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
476        let (rx_base, _) = engine.init();
477
478        // After init, all RX descriptors are owned by DMA (setup_chained sets OWN).
479        for i in 0..4 {
480            let desc = engine.rx_ring.get(i);
481            assert!(desc.is_owned(), "RX desc {} should be DMA-owned", i);
482            assert_ne!(desc.buffer_addr(), 0, "RX desc {} buffer must be set", i);
483            assert_ne!(desc.next_desc_addr(), 0, "RX desc {} chain must be set", i);
484        }
485
486        // Last descriptor chains back to first (circular).
487        assert_eq!(engine.rx_ring.get(3).next_desc_addr(), rx_base);
488    }
489
490    #[test]
491    fn init_chains_tx_descriptors() {
492        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
493        let (_, tx_base) = engine.init();
494
495        // After init, all TX descriptors are CPU-owned (setup_chained does not set OWN).
496        for i in 0..4 {
497            let desc = engine.tx_ring.get(i);
498            assert!(!desc.is_owned(), "TX desc {} should be CPU-owned", i);
499            assert_ne!(desc.buffer_addr(), 0, "TX desc {} buffer must be set", i);
500            assert_ne!(desc.next_desc_addr(), 0, "TX desc {} chain must be set", i);
501        }
502
503        // Last descriptor chains back to first (circular).
504        assert_eq!(engine.tx_ring.get(3).next_desc_addr(), tx_base);
505    }
506
507    // ── Memory usage ──────────────────────────────────────
508
509    #[test]
510    fn memory_usage_calculation() {
511        // 4 * 32 (rx desc) + 4 * 32 (tx desc) + 4 * 256 (rx buf) + 4 * 256 (tx buf)
512        // = 128 + 128 + 1024 + 1024 = 2304
513        let usage = DmaEngine::<4, 4, 256>::memory_usage();
514        assert_eq!(usage, 2304);
515    }
516
517    #[test]
518    fn memory_usage_scales() {
519        let small = DmaEngine::<2, 2, 512>::memory_usage();
520        let large = DmaEngine::<10, 10, 1600>::memory_usage();
521        assert!(large > small);
522    }
523
524    // ── TX available / can_transmit ───────────────────────
525
526    #[test]
527    fn tx_available_after_init() {
528        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
529        let _ = engine.init();
530        assert_eq!(engine.tx_available(), 4);
531    }
532
533    #[test]
534    fn can_transmit_empty() {
535        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
536        let _ = engine.init();
537        assert!(!engine.can_transmit(0));
538    }
539
540    #[test]
541    fn can_transmit_single_buffer() {
542        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
543        let _ = engine.init();
544        assert!(engine.can_transmit(100));
545        assert!(engine.can_transmit(256));
546    }
547
548    #[test]
549    fn can_transmit_too_large() {
550        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
551        let _ = engine.init();
552        // 4 * 256 = 1024, anything above should fail.
553        assert!(!engine.can_transmit(1025));
554    }
555
556    #[test]
557    fn can_transmit_multi_buffer() {
558        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
559        let _ = engine.init();
560        // 512 bytes needs 2 descriptors of 256 each.
561        assert!(engine.can_transmit(512));
562        // 1024 bytes needs all 4 descriptors.
563        assert!(engine.can_transmit(1024));
564    }
565
566    // ── Transmit ──────────────────────────────────────────
567
568    #[test]
569    fn transmit_single_frame() {
570        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
571        let _ = engine.init();
572
573        let data = [0xABu8; 100];
574        let result = engine.transmit(&data);
575        assert_eq!(result, Ok(100));
576    }
577
578    #[test]
579    fn transmit_sets_own_bit() {
580        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
581        let _ = engine.init();
582
583        let data = [0xABu8; 100];
584        let _ = engine.transmit(&data);
585
586        // After transmit, descriptor 0 should be DMA-owned.
587        assert!(engine.tx_ring.get(0).is_owned());
588        // TX ring should have advanced to index 1.
589        assert_eq!(engine.tx_current_index(), 1);
590    }
591
592    #[test]
593    fn transmit_copies_data_to_buffer() {
594        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
595        let _ = engine.init();
596
597        let data = [0xCA; 64];
598        let _ = engine.transmit(&data);
599
600        // Verify the data was copied to the TX buffer.
601        assert_eq!(&engine.tx_buffers[0][..64], &data[..]);
602    }
603
604    #[test]
605    fn transmit_scatter_gather() {
606        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
607        let _ = engine.init();
608
609        // 400 bytes needs 2 descriptors (256 + 144).
610        let data = [0xBBu8; 400];
611        let result = engine.transmit(&data);
612        assert_eq!(result, Ok(400));
613
614        // Both descriptors should be DMA-owned.
615        assert!(engine.tx_ring.get(0).is_owned());
616        assert!(engine.tx_ring.get(1).is_owned());
617
618        // Ring should have advanced by 2.
619        assert_eq!(engine.tx_current_index(), 2);
620    }
621
622    #[test]
623    fn transmit_when_full() {
624        let mut engine: DmaEngine<2, 2, 256> = DmaEngine::new();
625        let _ = engine.init();
626
627        // Fill both TX slots.
628        let data = [0xAAu8; 100];
629        assert!(engine.transmit(&data).is_ok());
630        assert!(engine.transmit(&data).is_ok());
631
632        // Third transmit should fail.
633        let result = engine.transmit(&data);
634        assert_eq!(result, Err(EmacError::NoDescriptorsAvailable));
635    }
636
637    #[test]
638    fn transmit_empty_data_returns_error() {
639        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
640        let _ = engine.init();
641
642        let result = engine.transmit(&[]);
643        assert_eq!(result, Err(EmacError::InvalidLength));
644    }
645
646    #[test]
647    fn transmit_too_large_returns_error() {
648        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
649        let _ = engine.init();
650
651        let data = [0u8; 2048]; // 4 * 256 = 1024, so 2048 is too large.
652        let result = engine.transmit(&data);
653        assert_eq!(result, Err(EmacError::FrameTooLarge));
654    }
655
656    // ── TX reclaim ────────────────────────────────────────
657
658    #[test]
659    fn tx_reclaim_returns_completed() {
660        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
661        let _ = engine.init();
662
663        // Transmit a frame (gives descriptor 0 to DMA).
664        let _ = engine.transmit(&[0xAA; 100]);
665        assert!(engine.tx_ring.get(0).is_owned());
666
667        // Simulate DMA completion by clearing OWN bit.
668        engine.tx_ring.get(0).clear_owned();
669
670        let reclaimed = engine.tx_reclaim();
671        // All 4 descriptors are CPU-owned now (desc 0 cleared + 1,2,3 never submitted).
672        assert_eq!(reclaimed, 4);
673    }
674
675    // ── RX available ──────────────────────────────────────
676
677    #[test]
678    fn rx_available_no_frame() {
679        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
680        let _ = engine.init();
681
682        // After init, all RX descriptors are DMA-owned.
683        assert!(!engine.rx_available());
684    }
685
686    #[test]
687    fn receive_no_frame_returns_none() {
688        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
689        let _ = engine.init();
690
691        let mut buf = [0u8; 256];
692        let result = engine.receive(&mut buf);
693        assert_eq!(result, Ok(None));
694    }
695
696    // ── Simulated RX ──────────────────────────────────────
697
698    /// Helper: simulate a received single-descriptor frame.
699    ///
700    /// Writes data into the RX buffer at `desc_index`, then sets the
701    /// descriptor's RDES0 to indicate a complete frame (first+last, frame
702    /// length, CPU-owned).
703    fn simulate_rx_frame(engine: &mut DmaEngine<4, 4, 256>, desc_index: usize, data: &[u8]) {
704        // Copy payload into the RX buffer.
705        engine.rx_buffers[desc_index][..data.len()].copy_from_slice(data);
706
707        // Frame length in RDES0 includes the 4-byte CRC.
708        let frame_len_with_crc = (data.len() + 4) as u32;
709        let rdes0_val =
710            rdes0::FIRST_DESC | rdes0::LAST_DESC | (frame_len_with_crc << rdes0::FRAME_LEN_SHIFT);
711        // OWN bit is NOT set — CPU owns it (simulates DMA completion).
712        engine.rx_ring.get(desc_index).set_raw_rdes0(rdes0_val);
713    }
714
715    #[test]
716    fn receive_simulated_frame() {
717        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
718        let _ = engine.init();
719
720        // Simulate receiving a 64-byte frame at descriptor 0.
721        let payload = [0xDE; 64];
722        simulate_rx_frame(&mut engine, 0, &payload);
723
724        let mut buf = [0u8; 256];
725        let result = engine.receive(&mut buf);
726        assert_eq!(result, Ok(Some(64)));
727        assert_eq!(&buf[..64], &payload[..]);
728    }
729
730    #[test]
731    fn receive_advances_ring() {
732        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
733        let _ = engine.init();
734
735        simulate_rx_frame(&mut engine, 0, &[0xAA; 32]);
736        assert_eq!(engine.rx_current_index(), 0);
737
738        let mut buf = [0u8; 256];
739        let _ = engine.receive(&mut buf);
740        assert_eq!(engine.rx_current_index(), 1);
741    }
742
743    #[test]
744    fn receive_recycles_descriptor() {
745        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
746        let _ = engine.init();
747
748        simulate_rx_frame(&mut engine, 0, &[0xAA; 32]);
749        let mut buf = [0u8; 256];
750        let _ = engine.receive(&mut buf);
751
752        // After receive, the descriptor should be recycled (DMA-owned again).
753        assert!(engine.rx_ring.get(0).is_owned());
754    }
755
756    #[test]
757    fn peek_frame_length_returns_size() {
758        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
759        let _ = engine.init();
760
761        simulate_rx_frame(&mut engine, 0, &[0xBB; 100]);
762
763        let len = engine.peek_frame_length();
764        assert_eq!(len, Some(100));
765
766        // peek should NOT consume the frame.
767        assert!(engine.rx_available());
768    }
769
770    #[test]
771    fn peek_frame_length_none_when_empty() {
772        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
773        let _ = engine.init();
774
775        assert_eq!(engine.peek_frame_length(), None);
776    }
777
778    /// Simulate a two-descriptor RX frame: `desc[start]` carries
779    /// `FIRST` + payload[..mid], `desc[start+1]` carries `LAST` +
780    /// payload[mid..] with the total frame length in its RDES0.
781    fn simulate_rx_frame_two_descriptors(
782        engine: &mut DmaEngine<4, 4, 256>,
783        start: usize,
784        chunks: (&[u8], &[u8]),
785    ) {
786        let (a, b) = chunks;
787        engine.rx_buffers[start][..a.len()].copy_from_slice(a);
788        engine.rx_buffers[start + 1][..b.len()].copy_from_slice(b);
789
790        // First descriptor: FIRST, no LAST, no length encoded
791        // (length lives in the LAST descriptor on this Synopsys core).
792        engine.rx_ring.get(start).set_raw_rdes0(rdes0::FIRST_DESC);
793
794        // Last descriptor: LAST, no FIRST, total frame length (incl. CRC).
795        let frame_len_with_crc = (a.len() + b.len() + 4) as u32;
796        let rdes0_val = rdes0::LAST_DESC | (frame_len_with_crc << rdes0::FRAME_LEN_SHIFT);
797        engine.rx_ring.get(start + 1).set_raw_rdes0(rdes0_val);
798    }
799
800    #[test]
801    fn rx_available_true_for_errored_frame() {
802        // Regression: switching `rx_available` to `peek_frame_length`
803        // accidentally hid CPU-owned-but-errored descriptors. The
804        // embassy driver gates on `rx_available`, so an errored frame
805        // would never make it to `receive` and the descriptor would
806        // sit unrecycled until something else drained the chain.
807        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
808        let _ = engine.init();
809
810        // Mark desc[0] as CPU-owned with the ERR_SUMMARY bit set,
811        // FIRST + LAST so it could be a complete (but broken) frame.
812        let rdes0_val = rdes0::FIRST_DESC | rdes0::LAST_DESC | rdes0::ERR_SUMMARY | rdes0::CRC_ERR;
813        engine.rx_ring.get(0).set_raw_rdes0(rdes0_val);
814
815        // peek_frame_length still returns None for errored descriptors
816        // — that's intentional, callers shouldn't get a length back.
817        assert_eq!(engine.peek_frame_length(), None);
818        // …but rx_available must say "yes, drain me" so the driver
819        // wakes up and lets `receive` recycle the descriptor.
820        assert!(
821            engine.rx_available(),
822            "rx_available must signal errored frames so receive can flush them"
823        );
824    }
825
826    #[test]
827    fn rx_available_true_for_multi_descriptor_frame() {
828        // Regression: previous `rx_available` returned `!current.is_owned() &&
829        // current.is_last()`, which silently said "no frame" for any RX
830        // chain where the current descriptor was the FIRST (not LAST)
831        // chunk. Now it must agree with `peek_frame_length`.
832        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
833        let _ = engine.init();
834
835        let chunk_a = [0xAA; 200];
836        let chunk_b = [0xBB; 100];
837        simulate_rx_frame_two_descriptors(&mut engine, 0, (&chunk_a, &chunk_b));
838
839        // `current` points at desc[0] which is FIRST but NOT LAST. The
840        // pre-fix implementation returned `false` here.
841        assert!(
842            engine.rx_available(),
843            "rx_available must report a multi-descriptor frame as ready"
844        );
845        assert_eq!(engine.peek_frame_length(), Some(300));
846    }
847
848    #[test]
849    fn rx_free_count_after_init() {
850        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
851        let _ = engine.init();
852
853        // After init, all RX descriptors are DMA-owned (free for receiving).
854        assert_eq!(engine.rx_free_count(), 4);
855    }
856
857    #[test]
858    fn rx_free_count_after_receive() {
859        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
860        let _ = engine.init();
861
862        simulate_rx_frame(&mut engine, 0, &[0xCC; 48]);
863        // One descriptor is now CPU-owned (received but not yet processed).
864        assert_eq!(engine.rx_free_count(), 3);
865
866        // Process it.
867        let mut buf = [0u8; 256];
868        let _ = engine.receive(&mut buf);
869
870        // After receive, it's recycled back to DMA.
871        assert_eq!(engine.rx_free_count(), 4);
872    }
873
874    #[test]
875    fn receive_buffer_too_small() {
876        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
877        let _ = engine.init();
878
879        simulate_rx_frame(&mut engine, 0, &[0xDD; 200]);
880
881        let mut buf = [0u8; 32]; // Too small for 200 bytes.
882        let result = engine.receive(&mut buf);
883        assert_eq!(result, Err(EmacError::BufferTooSmall));
884
885        // Descriptor should still be recycled after error.
886        assert!(engine.rx_ring.get(0).is_owned());
887    }
888
889    // ── Reset ─────────────────────────────────────────────
890
891    #[test]
892    fn reset_reinitializes() {
893        let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
894        let _ = engine.init();
895
896        // Transmit something to change state.
897        let _ = engine.transmit(&[0xAA; 100]);
898        assert_eq!(engine.tx_current_index(), 1);
899
900        // Reset.
901        let (rx_base, tx_base) = engine.reset();
902        assert_ne!(rx_base, 0);
903        assert_ne!(tx_base, 0);
904
905        // Ring indices should be back to 0.
906        assert_eq!(engine.rx_current_index(), 0);
907        assert_eq!(engine.tx_current_index(), 0);
908
909        // All TX descriptors should be CPU-owned.
910        assert_eq!(engine.tx_available(), 4);
911
912        // All RX descriptors should be DMA-owned.
913        assert_eq!(engine.rx_free_count(), 4);
914    }
915
916    // ── Default trait ─────────────────────────────────────
917
918    #[test]
919    fn default_trait() {
920        let d1: DmaEngine<4, 4, 256> = DmaEngine::new();
921        let d2: DmaEngine<4, 4, 256> = DmaEngine::default();
922        assert_eq!(d1.is_initialized(), d2.is_initialized());
923    }
924}