1use crate::dma::descriptor::{RxDescriptor, TxDescriptor};
11use crate::dma::ring::DescriptorRing;
12use crate::error::EmacError;
13
14pub struct DmaEngine<const RX: usize, const TX: usize, const BUF: usize> {
21 rx_ring: DescriptorRing<RxDescriptor, RX>,
23 tx_ring: DescriptorRing<TxDescriptor, TX>,
25 rx_buffers: [[u8; BUF]; RX],
27 tx_buffers: [[u8; BUF]; TX],
29 initialized: bool,
31}
32
33impl<const RX: usize, const TX: usize, const BUF: usize> DmaEngine<RX, TX, BUF> {
34 #[must_use]
36 pub const fn new() -> Self {
37 Self {
38 rx_ring: DescriptorRing::new([const { RxDescriptor::new() }; RX]),
39 tx_ring: DescriptorRing::new([const { TxDescriptor::new() }; TX]),
40 rx_buffers: [[0u8; BUF]; RX],
41 tx_buffers: [[0u8; BUF]; TX],
42 initialized: false,
43 }
44 }
45
46 pub fn init(&mut self) -> (u32, u32) {
53 for i in 0..RX {
55 let next_idx = (i + 1) % RX;
56 let buffer_ptr = self.rx_buffers[i].as_mut_ptr();
57 let next_desc = self.rx_ring.get(next_idx) as *const RxDescriptor;
58 self.rx_ring
59 .get(i)
60 .setup_chained(buffer_ptr, BUF, next_desc);
61 }
62
63 for i in 0..TX {
65 let next_idx = (i + 1) % TX;
66 let buffer_ptr = self.tx_buffers[i].as_ptr();
67 let next_desc = self.tx_ring.get(next_idx) as *const TxDescriptor;
68 self.tx_ring.get(i).setup_chained(buffer_ptr, next_desc);
69 }
70
71 self.rx_ring.reset();
72 self.tx_ring.reset();
73 self.initialized = true;
74
75 let rx_base = self.rx_ring.base_addr() as u32;
76 let tx_base = self.tx_ring.base_addr() as u32;
77 (rx_base, tx_base)
78 }
79
80 #[inline(always)]
82 #[must_use]
83 pub fn is_initialized(&self) -> bool {
84 self.initialized
85 }
86
87 #[must_use]
89 pub const fn memory_usage() -> usize {
90 let rx_desc = RX * RxDescriptor::SIZE;
91 let tx_desc = TX * TxDescriptor::SIZE;
92 let rx_buf = RX * BUF;
93 let tx_buf = TX * BUF;
94 rx_desc + tx_desc + rx_buf + tx_buf
95 }
96
97 #[must_use]
101 pub fn can_transmit(&self, len: usize) -> bool {
102 if len == 0 || len > BUF * TX {
103 return false;
104 }
105 let needed = len.div_ceil(BUF);
106 self.tx_available() >= needed
107 }
108
109 #[must_use]
111 pub fn tx_available(&self) -> usize {
112 let mut count = 0;
113 for i in 0..TX {
114 let idx = (self.tx_ring.current_index() + i) % TX;
115 if !self.tx_ring.get(idx).is_owned() {
116 count += 1;
117 } else {
118 break;
119 }
120 }
121 count
122 }
123
124 pub fn transmit(&mut self, data: &[u8]) -> Result<usize, EmacError> {
130 if data.is_empty() {
131 return Err(EmacError::InvalidLength);
132 }
133
134 if data.len() > BUF * TX {
135 return Err(EmacError::FrameTooLarge);
136 }
137
138 let desc_count = data.len().div_ceil(BUF);
139 if self.tx_available() < desc_count {
140 return Err(EmacError::NoDescriptorsAvailable);
141 }
142
143 let current = self.tx_ring.current_index();
144 let mut remaining = data.len();
145 let mut offset = 0usize;
146
147 for i in 0..desc_count {
149 let idx = (current + i) % TX;
150 let desc = self.tx_ring.get(idx);
151
152 if desc.is_owned() {
153 return Err(EmacError::DescriptorBusy);
154 }
155
156 let chunk_size = core::cmp::min(remaining, BUF);
157 self.tx_buffers[idx][..chunk_size].copy_from_slice(&data[offset..offset + chunk_size]);
158 desc.prepare(chunk_size, i == 0, i == desc_count - 1);
159
160 remaining -= chunk_size;
161 offset += chunk_size;
162 }
163
164 for i in (0..desc_count).rev() {
166 let idx = (current + i) % TX;
167 self.tx_ring.get(idx).set_owned();
168 }
169
170 self.tx_ring.advance_by(desc_count);
171 Ok(data.len())
172 }
173
174 pub fn tx_reclaim(&mut self) -> usize {
178 let mut reclaimed = 0;
179 for i in 0..TX {
180 let idx = (self.tx_ring.current_index() + i) % TX;
181 let desc = self.tx_ring.get(idx);
182 if !desc.is_owned() {
183 reclaimed += 1;
184 }
185 }
186 reclaimed
187 }
188
189 #[must_use]
209 pub fn rx_available(&self) -> bool {
210 let desc = self.rx_ring.current();
211 if desc.is_owned() {
212 return false;
213 }
214 if desc.has_error() {
217 return true;
218 }
219 self.peek_frame_length().is_some()
220 }
221
222 pub fn receive(&mut self, buffer: &mut [u8]) -> Result<Option<usize>, EmacError> {
228 let first_desc = self.rx_ring.current();
229
230 if first_desc.is_owned() {
232 return Ok(None);
233 }
234
235 if first_desc.is_first() && first_desc.is_last() {
237 if first_desc.has_error() {
238 first_desc.recycle();
239 self.rx_ring.advance();
240 return Err(EmacError::FrameError);
241 }
242
243 let frame_len = first_desc.payload_length();
244 if buffer.len() < frame_len {
245 first_desc.recycle();
246 self.rx_ring.advance();
247 return Err(EmacError::BufferTooSmall);
248 }
249
250 let idx = self.rx_ring.current_index();
251 buffer[..frame_len].copy_from_slice(&self.rx_buffers[idx][..frame_len]);
252 first_desc.recycle();
253 self.rx_ring.advance();
254 return Ok(Some(frame_len));
255 }
256
257 if !first_desc.is_first() {
259 self.flush_rx_frame();
260 return Ok(None);
261 }
262
263 if first_desc.has_error() {
264 self.flush_rx_frame();
265 return Err(EmacError::FrameError);
266 }
267
268 let mut frame_len = 0usize;
270 let mut desc_count = 0usize;
271 let current = self.rx_ring.current_index();
272
273 for i in 0..RX {
274 let idx = (current + i) % RX;
275 let desc = self.rx_ring.get(idx);
276
277 if desc.is_owned() {
278 return Ok(None);
280 }
281
282 desc_count += 1;
283
284 if desc.is_last() {
285 frame_len = desc.payload_length();
286 break;
287 }
288 }
289
290 if frame_len == 0 {
291 return Ok(None);
292 }
293
294 if buffer.len() < frame_len {
295 self.flush_rx_frame();
296 return Err(EmacError::BufferTooSmall);
297 }
298
299 let mut copied = 0usize;
301 let last_desc_i = desc_count - 1;
302
303 for i in 0..desc_count {
304 let idx = (current + i) % RX;
305 let copy_len = if i == last_desc_i {
306 frame_len - copied
307 } else {
308 BUF
309 };
310 let copy_len = core::cmp::min(copy_len, frame_len - copied);
311
312 if copy_len > 0 {
313 buffer[copied..copied + copy_len]
314 .copy_from_slice(&self.rx_buffers[idx][..copy_len]);
315 copied += copy_len;
316 }
317 self.rx_ring.get(idx).recycle();
318 }
319
320 self.rx_ring.advance_by(desc_count);
321 Ok(Some(frame_len))
322 }
323
324 #[must_use]
326 pub fn peek_frame_length(&self) -> Option<usize> {
327 let desc = self.rx_ring.current();
328
329 if desc.is_owned() {
330 return None;
331 }
332
333 if desc.has_error() {
334 return None;
335 }
336
337 if desc.is_first() && desc.is_last() {
339 return Some(desc.payload_length());
340 }
341
342 if desc.is_first() {
344 for i in 1..RX {
345 let idx = (self.rx_ring.current_index() + i) % RX;
346 let d = self.rx_ring.get(idx);
347
348 if d.is_owned() {
349 return None;
350 }
351
352 if d.is_last() {
353 return Some(d.payload_length());
354 }
355 }
356 }
357
358 None
359 }
360
361 #[must_use]
363 pub fn rx_free_count(&self) -> usize {
364 let mut count = 0;
365 for i in 0..RX {
366 if self.rx_ring.get(i).is_owned() {
367 count += 1;
368 }
369 }
370 count
371 }
372
373 pub fn reset(&mut self) -> (u32, u32) {
377 self.init()
378 }
379
380 fn flush_rx_frame(&mut self) {
382 loop {
383 let desc = self.rx_ring.current();
384
385 if desc.is_owned() {
386 break;
387 }
388
389 let is_last = desc.is_last();
390 desc.recycle();
391 self.rx_ring.advance();
392
393 if is_last {
394 break;
395 }
396 }
397 }
398
399 #[must_use]
401 pub fn rx_ring_base(&self) -> u32 {
402 self.rx_ring.base_addr() as u32
403 }
404
405 #[must_use]
407 pub fn tx_ring_base(&self) -> u32 {
408 self.tx_ring.base_addr() as u32
409 }
410
411 #[must_use]
413 pub fn rx_current_index(&self) -> usize {
414 self.rx_ring.current_index()
415 }
416
417 #[must_use]
419 pub fn tx_current_index(&self) -> usize {
420 self.tx_ring.current_index()
421 }
422}
423
424impl<const RX: usize, const TX: usize, const BUF: usize> Default for DmaEngine<RX, TX, BUF> {
425 fn default() -> Self {
426 Self::new()
427 }
428}
429
430unsafe impl<const RX: usize, const TX: usize, const BUF: usize> Sync for DmaEngine<RX, TX, BUF> {}
432
433unsafe impl<const RX: usize, const TX: usize, const BUF: usize> Send for DmaEngine<RX, TX, BUF> {}
435
436#[cfg(test)]
441mod tests {
442 use super::*;
443 use crate::dma::descriptor::bits::rdes0;
444
445 #[test]
448 fn new_not_initialized() {
449 let engine: DmaEngine<4, 4, 256> = DmaEngine::new();
450 assert!(!engine.is_initialized());
451 }
452
453 #[test]
454 fn init_sets_initialized() {
455 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
456 let _ = engine.init();
457 assert!(engine.is_initialized());
458 }
459
460 #[test]
461 fn init_returns_base_addresses() {
462 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
463 let (rx_base, tx_base) = engine.init();
464
465 assert_ne!(rx_base, 0);
467 assert_ne!(tx_base, 0);
468
469 assert_ne!(rx_base, tx_base);
471 }
472
473 #[test]
474 fn init_chains_rx_descriptors() {
475 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
476 let (rx_base, _) = engine.init();
477
478 for i in 0..4 {
480 let desc = engine.rx_ring.get(i);
481 assert!(desc.is_owned(), "RX desc {} should be DMA-owned", i);
482 assert_ne!(desc.buffer_addr(), 0, "RX desc {} buffer must be set", i);
483 assert_ne!(desc.next_desc_addr(), 0, "RX desc {} chain must be set", i);
484 }
485
486 assert_eq!(engine.rx_ring.get(3).next_desc_addr(), rx_base);
488 }
489
490 #[test]
491 fn init_chains_tx_descriptors() {
492 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
493 let (_, tx_base) = engine.init();
494
495 for i in 0..4 {
497 let desc = engine.tx_ring.get(i);
498 assert!(!desc.is_owned(), "TX desc {} should be CPU-owned", i);
499 assert_ne!(desc.buffer_addr(), 0, "TX desc {} buffer must be set", i);
500 assert_ne!(desc.next_desc_addr(), 0, "TX desc {} chain must be set", i);
501 }
502
503 assert_eq!(engine.tx_ring.get(3).next_desc_addr(), tx_base);
505 }
506
507 #[test]
510 fn memory_usage_calculation() {
511 let usage = DmaEngine::<4, 4, 256>::memory_usage();
514 assert_eq!(usage, 2304);
515 }
516
517 #[test]
518 fn memory_usage_scales() {
519 let small = DmaEngine::<2, 2, 512>::memory_usage();
520 let large = DmaEngine::<10, 10, 1600>::memory_usage();
521 assert!(large > small);
522 }
523
524 #[test]
527 fn tx_available_after_init() {
528 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
529 let _ = engine.init();
530 assert_eq!(engine.tx_available(), 4);
531 }
532
533 #[test]
534 fn can_transmit_empty() {
535 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
536 let _ = engine.init();
537 assert!(!engine.can_transmit(0));
538 }
539
540 #[test]
541 fn can_transmit_single_buffer() {
542 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
543 let _ = engine.init();
544 assert!(engine.can_transmit(100));
545 assert!(engine.can_transmit(256));
546 }
547
548 #[test]
549 fn can_transmit_too_large() {
550 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
551 let _ = engine.init();
552 assert!(!engine.can_transmit(1025));
554 }
555
556 #[test]
557 fn can_transmit_multi_buffer() {
558 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
559 let _ = engine.init();
560 assert!(engine.can_transmit(512));
562 assert!(engine.can_transmit(1024));
564 }
565
566 #[test]
569 fn transmit_single_frame() {
570 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
571 let _ = engine.init();
572
573 let data = [0xABu8; 100];
574 let result = engine.transmit(&data);
575 assert_eq!(result, Ok(100));
576 }
577
578 #[test]
579 fn transmit_sets_own_bit() {
580 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
581 let _ = engine.init();
582
583 let data = [0xABu8; 100];
584 let _ = engine.transmit(&data);
585
586 assert!(engine.tx_ring.get(0).is_owned());
588 assert_eq!(engine.tx_current_index(), 1);
590 }
591
592 #[test]
593 fn transmit_copies_data_to_buffer() {
594 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
595 let _ = engine.init();
596
597 let data = [0xCA; 64];
598 let _ = engine.transmit(&data);
599
600 assert_eq!(&engine.tx_buffers[0][..64], &data[..]);
602 }
603
604 #[test]
605 fn transmit_scatter_gather() {
606 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
607 let _ = engine.init();
608
609 let data = [0xBBu8; 400];
611 let result = engine.transmit(&data);
612 assert_eq!(result, Ok(400));
613
614 assert!(engine.tx_ring.get(0).is_owned());
616 assert!(engine.tx_ring.get(1).is_owned());
617
618 assert_eq!(engine.tx_current_index(), 2);
620 }
621
622 #[test]
623 fn transmit_when_full() {
624 let mut engine: DmaEngine<2, 2, 256> = DmaEngine::new();
625 let _ = engine.init();
626
627 let data = [0xAAu8; 100];
629 assert!(engine.transmit(&data).is_ok());
630 assert!(engine.transmit(&data).is_ok());
631
632 let result = engine.transmit(&data);
634 assert_eq!(result, Err(EmacError::NoDescriptorsAvailable));
635 }
636
637 #[test]
638 fn transmit_empty_data_returns_error() {
639 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
640 let _ = engine.init();
641
642 let result = engine.transmit(&[]);
643 assert_eq!(result, Err(EmacError::InvalidLength));
644 }
645
646 #[test]
647 fn transmit_too_large_returns_error() {
648 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
649 let _ = engine.init();
650
651 let data = [0u8; 2048]; let result = engine.transmit(&data);
653 assert_eq!(result, Err(EmacError::FrameTooLarge));
654 }
655
656 #[test]
659 fn tx_reclaim_returns_completed() {
660 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
661 let _ = engine.init();
662
663 let _ = engine.transmit(&[0xAA; 100]);
665 assert!(engine.tx_ring.get(0).is_owned());
666
667 engine.tx_ring.get(0).clear_owned();
669
670 let reclaimed = engine.tx_reclaim();
671 assert_eq!(reclaimed, 4);
673 }
674
675 #[test]
678 fn rx_available_no_frame() {
679 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
680 let _ = engine.init();
681
682 assert!(!engine.rx_available());
684 }
685
686 #[test]
687 fn receive_no_frame_returns_none() {
688 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
689 let _ = engine.init();
690
691 let mut buf = [0u8; 256];
692 let result = engine.receive(&mut buf);
693 assert_eq!(result, Ok(None));
694 }
695
696 fn simulate_rx_frame(engine: &mut DmaEngine<4, 4, 256>, desc_index: usize, data: &[u8]) {
704 engine.rx_buffers[desc_index][..data.len()].copy_from_slice(data);
706
707 let frame_len_with_crc = (data.len() + 4) as u32;
709 let rdes0_val =
710 rdes0::FIRST_DESC | rdes0::LAST_DESC | (frame_len_with_crc << rdes0::FRAME_LEN_SHIFT);
711 engine.rx_ring.get(desc_index).set_raw_rdes0(rdes0_val);
713 }
714
715 #[test]
716 fn receive_simulated_frame() {
717 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
718 let _ = engine.init();
719
720 let payload = [0xDE; 64];
722 simulate_rx_frame(&mut engine, 0, &payload);
723
724 let mut buf = [0u8; 256];
725 let result = engine.receive(&mut buf);
726 assert_eq!(result, Ok(Some(64)));
727 assert_eq!(&buf[..64], &payload[..]);
728 }
729
730 #[test]
731 fn receive_advances_ring() {
732 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
733 let _ = engine.init();
734
735 simulate_rx_frame(&mut engine, 0, &[0xAA; 32]);
736 assert_eq!(engine.rx_current_index(), 0);
737
738 let mut buf = [0u8; 256];
739 let _ = engine.receive(&mut buf);
740 assert_eq!(engine.rx_current_index(), 1);
741 }
742
743 #[test]
744 fn receive_recycles_descriptor() {
745 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
746 let _ = engine.init();
747
748 simulate_rx_frame(&mut engine, 0, &[0xAA; 32]);
749 let mut buf = [0u8; 256];
750 let _ = engine.receive(&mut buf);
751
752 assert!(engine.rx_ring.get(0).is_owned());
754 }
755
756 #[test]
757 fn peek_frame_length_returns_size() {
758 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
759 let _ = engine.init();
760
761 simulate_rx_frame(&mut engine, 0, &[0xBB; 100]);
762
763 let len = engine.peek_frame_length();
764 assert_eq!(len, Some(100));
765
766 assert!(engine.rx_available());
768 }
769
770 #[test]
771 fn peek_frame_length_none_when_empty() {
772 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
773 let _ = engine.init();
774
775 assert_eq!(engine.peek_frame_length(), None);
776 }
777
778 fn simulate_rx_frame_two_descriptors(
782 engine: &mut DmaEngine<4, 4, 256>,
783 start: usize,
784 chunks: (&[u8], &[u8]),
785 ) {
786 let (a, b) = chunks;
787 engine.rx_buffers[start][..a.len()].copy_from_slice(a);
788 engine.rx_buffers[start + 1][..b.len()].copy_from_slice(b);
789
790 engine.rx_ring.get(start).set_raw_rdes0(rdes0::FIRST_DESC);
793
794 let frame_len_with_crc = (a.len() + b.len() + 4) as u32;
796 let rdes0_val = rdes0::LAST_DESC | (frame_len_with_crc << rdes0::FRAME_LEN_SHIFT);
797 engine.rx_ring.get(start + 1).set_raw_rdes0(rdes0_val);
798 }
799
800 #[test]
801 fn rx_available_true_for_errored_frame() {
802 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
808 let _ = engine.init();
809
810 let rdes0_val = rdes0::FIRST_DESC | rdes0::LAST_DESC | rdes0::ERR_SUMMARY | rdes0::CRC_ERR;
813 engine.rx_ring.get(0).set_raw_rdes0(rdes0_val);
814
815 assert_eq!(engine.peek_frame_length(), None);
818 assert!(
821 engine.rx_available(),
822 "rx_available must signal errored frames so receive can flush them"
823 );
824 }
825
826 #[test]
827 fn rx_available_true_for_multi_descriptor_frame() {
828 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
833 let _ = engine.init();
834
835 let chunk_a = [0xAA; 200];
836 let chunk_b = [0xBB; 100];
837 simulate_rx_frame_two_descriptors(&mut engine, 0, (&chunk_a, &chunk_b));
838
839 assert!(
842 engine.rx_available(),
843 "rx_available must report a multi-descriptor frame as ready"
844 );
845 assert_eq!(engine.peek_frame_length(), Some(300));
846 }
847
848 #[test]
849 fn rx_free_count_after_init() {
850 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
851 let _ = engine.init();
852
853 assert_eq!(engine.rx_free_count(), 4);
855 }
856
857 #[test]
858 fn rx_free_count_after_receive() {
859 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
860 let _ = engine.init();
861
862 simulate_rx_frame(&mut engine, 0, &[0xCC; 48]);
863 assert_eq!(engine.rx_free_count(), 3);
865
866 let mut buf = [0u8; 256];
868 let _ = engine.receive(&mut buf);
869
870 assert_eq!(engine.rx_free_count(), 4);
872 }
873
874 #[test]
875 fn receive_buffer_too_small() {
876 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
877 let _ = engine.init();
878
879 simulate_rx_frame(&mut engine, 0, &[0xDD; 200]);
880
881 let mut buf = [0u8; 32]; let result = engine.receive(&mut buf);
883 assert_eq!(result, Err(EmacError::BufferTooSmall));
884
885 assert!(engine.rx_ring.get(0).is_owned());
887 }
888
889 #[test]
892 fn reset_reinitializes() {
893 let mut engine: DmaEngine<4, 4, 256> = DmaEngine::new();
894 let _ = engine.init();
895
896 let _ = engine.transmit(&[0xAA; 100]);
898 assert_eq!(engine.tx_current_index(), 1);
899
900 let (rx_base, tx_base) = engine.reset();
902 assert_ne!(rx_base, 0);
903 assert_ne!(tx_base, 0);
904
905 assert_eq!(engine.rx_current_index(), 0);
907 assert_eq!(engine.tx_current_index(), 0);
908
909 assert_eq!(engine.tx_available(), 4);
911
912 assert_eq!(engine.rx_free_count(), 4);
914 }
915
916 #[test]
919 fn default_trait() {
920 let d1: DmaEngine<4, 4, 256> = DmaEngine::new();
921 let d2: DmaEngine<4, 4, 256> = DmaEngine::default();
922 assert_eq!(d1.is_initialized(), d2.is_initialized());
923 }
924}