1pub mod bits;
32
33use bits::{rdes0, rdes1, tdes0, tdes1};
34
35#[repr(transparent)]
44pub struct VolatileCell<T: Copy> {
45 value: core::cell::UnsafeCell<T>,
46}
47
48unsafe impl<T: Copy> Sync for VolatileCell<T> {}
51
52impl<T: Copy> VolatileCell<T> {
53 #[inline(always)]
55 pub const fn new(value: T) -> Self {
56 Self {
57 value: core::cell::UnsafeCell::new(value),
58 }
59 }
60
61 #[inline(always)]
63 pub fn get(&self) -> T {
64 unsafe { core::ptr::read_volatile(self.value.get()) }
66 }
67
68 #[inline(always)]
70 pub fn set(&self, value: T) {
71 unsafe { core::ptr::write_volatile(self.value.get(), value) }
73 }
74
75 #[inline(always)]
77 pub fn update<F>(&self, f: F)
78 where
79 F: FnOnce(T) -> T,
80 {
81 let old = self.get();
82 self.set(f(old));
83 }
84}
85
86impl<T: Copy + Default> Default for VolatileCell<T> {
87 fn default() -> Self {
88 Self::new(T::default())
89 }
90}
91
92#[repr(C, align(4))]
104pub struct TxDescriptor {
105 tdes0: VolatileCell<u32>,
107 tdes1: VolatileCell<u32>,
109 buffer_addr: VolatileCell<u32>,
111 next_desc_addr: VolatileCell<u32>,
113 _reserved4: VolatileCell<u32>,
115 _reserved5: VolatileCell<u32>,
117 _ts_low: VolatileCell<u32>,
119 _ts_high: VolatileCell<u32>,
121}
122
123#[allow(dead_code)]
124impl TxDescriptor {
125 pub const SIZE: usize = 32;
127
128 #[must_use]
130 pub const fn new() -> Self {
131 Self {
132 tdes0: VolatileCell::new(0),
133 tdes1: VolatileCell::new(0),
134 buffer_addr: VolatileCell::new(0),
135 next_desc_addr: VolatileCell::new(0),
136 _reserved4: VolatileCell::new(0),
137 _reserved5: VolatileCell::new(0),
138 _ts_low: VolatileCell::new(0),
139 _ts_high: VolatileCell::new(0),
140 }
141 }
142
143 pub fn setup_chained(&self, buffer: *const u8, next_desc: *const TxDescriptor) {
148 self.buffer_addr.set(buffer as u32);
149 self.next_desc_addr.set(next_desc as u32);
150 self.tdes0.set(tdes0::SECOND_ADDR_CHAINED);
151 self.tdes1.set(0);
152 }
153
154 #[inline(always)]
156 #[must_use]
157 pub fn is_owned(&self) -> bool {
158 (self.tdes0.get() & tdes0::OWN) != 0
159 }
160
161 #[inline(always)]
163 pub fn set_owned(&self) {
164 self.tdes0.update(|v| v | tdes0::OWN);
165 }
166
167 #[inline(always)]
169 pub fn clear_owned(&self) {
170 self.tdes0.update(|v| v & !tdes0::OWN);
171 }
172
173 pub fn prepare(&self, len: usize, first: bool, last: bool) {
185 let mut flags = tdes0::SECOND_ADDR_CHAINED | (0b11u32 << tdes0::CHECKSUM_INSERT_SHIFT);
187
188 if first {
189 flags |= tdes0::FIRST_SEGMENT;
190 }
191 if last {
192 flags |= tdes0::LAST_SEGMENT | tdes0::INTERRUPT_ON_COMPLETE;
193 }
194
195 self.tdes1.set((len as u32) & tdes1::BUFFER1_SIZE_MASK);
196 self.tdes0.set(flags);
197 }
198
199 pub fn prepare_and_submit(&self, len: usize, first: bool, last: bool) {
201 self.prepare(len, first, last);
202 self.set_owned();
203 }
204
205 #[inline(always)]
207 #[must_use]
208 pub fn has_error(&self) -> bool {
209 (self.tdes0.get() & tdes0::ERR_SUMMARY) != 0
210 }
211
212 #[inline(always)]
214 #[must_use]
215 pub fn error_flags(&self) -> u32 {
216 self.tdes0.get() & tdes0::ALL_ERRORS
217 }
218
219 #[inline(always)]
221 #[must_use]
222 pub fn buffer_addr(&self) -> u32 {
223 self.buffer_addr.get()
224 }
225
226 #[inline(always)]
228 #[must_use]
229 pub fn next_desc_addr(&self) -> u32 {
230 self.next_desc_addr.get()
231 }
232
233 pub fn reset(&self) {
235 let next = self.next_desc_addr.get();
236 self.tdes0.set(tdes0::SECOND_ADDR_CHAINED);
237 self.tdes1.set(0);
238 self.next_desc_addr.set(next);
239 }
240
241 #[inline(always)]
243 #[must_use]
244 pub fn raw_tdes0(&self) -> u32 {
245 self.tdes0.get()
246 }
247
248 #[inline(always)]
250 #[must_use]
251 pub fn raw_tdes1(&self) -> u32 {
252 self.tdes1.get()
253 }
254}
255
256impl Default for TxDescriptor {
257 fn default() -> Self {
258 Self::new()
259 }
260}
261
262unsafe impl Sync for TxDescriptor {}
264unsafe impl Send for TxDescriptor {}
266
267#[repr(C, align(4))]
275pub struct RxDescriptor {
276 rdes0: VolatileCell<u32>,
278 rdes1: VolatileCell<u32>,
280 buffer_addr: VolatileCell<u32>,
282 next_desc_addr: VolatileCell<u32>,
284 _ext_status: VolatileCell<u32>,
286 _reserved5: VolatileCell<u32>,
288 _ts_low: VolatileCell<u32>,
290 _ts_high: VolatileCell<u32>,
292}
293
294#[allow(dead_code)]
295impl RxDescriptor {
296 pub const SIZE: usize = 32;
298
299 #[must_use]
301 pub const fn new() -> Self {
302 Self {
303 rdes0: VolatileCell::new(0),
304 rdes1: VolatileCell::new(0),
305 buffer_addr: VolatileCell::new(0),
306 next_desc_addr: VolatileCell::new(0),
307 _ext_status: VolatileCell::new(0),
308 _reserved5: VolatileCell::new(0),
309 _ts_low: VolatileCell::new(0),
310 _ts_high: VolatileCell::new(0),
311 }
312 }
313
314 pub fn setup_chained(
319 &self,
320 buffer: *mut u8,
321 buffer_size: usize,
322 next_desc: *const RxDescriptor,
323 ) {
324 self.buffer_addr.set(buffer as u32);
325 self.next_desc_addr.set(next_desc as u32);
326 self.rdes1
327 .set(rdes1::SECOND_ADDR_CHAINED | ((buffer_size as u32) & rdes1::BUFFER1_SIZE_MASK));
328 self.rdes0.set(rdes0::OWN);
330 }
331
332 #[inline(always)]
334 #[must_use]
335 pub fn is_owned(&self) -> bool {
336 (self.rdes0.get() & rdes0::OWN) != 0
337 }
338
339 #[inline(always)]
341 pub fn set_owned(&self) {
342 self.rdes0.set(rdes0::OWN);
343 }
344
345 #[inline(always)]
347 pub fn clear_owned(&self) {
348 self.rdes0.update(|v| v & !rdes0::OWN);
349 }
350
351 #[inline(always)]
353 #[must_use]
354 pub fn is_first(&self) -> bool {
355 (self.rdes0.get() & rdes0::FIRST_DESC) != 0
356 }
357
358 #[inline(always)]
360 #[must_use]
361 pub fn is_last(&self) -> bool {
362 (self.rdes0.get() & rdes0::LAST_DESC) != 0
363 }
364
365 #[inline(always)]
367 #[must_use]
368 pub fn is_complete_frame(&self) -> bool {
369 let status = self.rdes0.get();
370 (status & (rdes0::FIRST_DESC | rdes0::LAST_DESC)) == (rdes0::FIRST_DESC | rdes0::LAST_DESC)
371 }
372
373 #[inline(always)]
375 #[must_use]
376 pub fn has_error(&self) -> bool {
377 (self.rdes0.get() & rdes0::ERR_SUMMARY) != 0
378 }
379
380 #[inline(always)]
382 #[must_use]
383 pub fn error_flags(&self) -> u32 {
384 self.rdes0.get() & rdes0::ALL_ERRORS
385 }
386
387 #[inline(always)]
389 #[must_use]
390 pub fn frame_length(&self) -> usize {
391 ((self.rdes0.get() & rdes0::FRAME_LEN_MASK) >> rdes0::FRAME_LEN_SHIFT) as usize
392 }
393
394 #[inline(always)]
396 #[must_use]
397 pub fn payload_length(&self) -> usize {
398 self.frame_length().saturating_sub(4)
399 }
400
401 #[inline(always)]
403 #[must_use]
404 pub fn buffer_addr(&self) -> u32 {
405 self.buffer_addr.get()
406 }
407
408 #[inline(always)]
410 #[must_use]
411 pub fn next_desc_addr(&self) -> u32 {
412 self.next_desc_addr.get()
413 }
414
415 #[inline(always)]
417 #[must_use]
418 pub fn buffer_size(&self) -> usize {
419 (self.rdes1.get() & rdes1::BUFFER1_SIZE_MASK) as usize
420 }
421
422 pub fn recycle(&self) {
424 self.rdes0.set(rdes0::OWN);
425 }
426
427 #[inline(always)]
429 #[must_use]
430 pub fn raw_rdes0(&self) -> u32 {
431 self.rdes0.get()
432 }
433
434 #[inline(always)]
436 #[must_use]
437 pub fn raw_rdes1(&self) -> u32 {
438 self.rdes1.get()
439 }
440
441 #[cfg(test)]
443 pub fn set_raw_rdes0(&self, val: u32) {
444 self.rdes0.set(val);
445 }
446}
447
448impl Default for RxDescriptor {
449 fn default() -> Self {
450 Self::new()
451 }
452}
453
454unsafe impl Sync for RxDescriptor {}
456unsafe impl Send for RxDescriptor {}
458
459#[cfg(test)]
464mod tests {
465 use super::*;
466
467 #[test]
472 fn volatile_cell_new() {
473 let cell = VolatileCell::new(42u32);
474 assert_eq!(cell.get(), 42);
475 }
476
477 #[test]
478 fn volatile_cell_get_set() {
479 let cell = VolatileCell::new(0u32);
480 assert_eq!(cell.get(), 0);
481 cell.set(0xDEAD_BEEF);
482 assert_eq!(cell.get(), 0xDEAD_BEEF);
483 }
484
485 #[test]
486 fn volatile_cell_update() {
487 let cell = VolatileCell::new(0x0000_00FFu32);
488 cell.update(|v| v | 0xFF00_0000);
489 assert_eq!(cell.get(), 0xFF00_00FF);
490 }
491
492 #[test]
493 fn volatile_cell_default() {
494 let cell = VolatileCell::<u32>::default();
495 assert_eq!(cell.get(), 0);
496 }
497
498 #[test]
503 fn tx_descriptor_size() {
504 assert_eq!(core::mem::size_of::<TxDescriptor>(), 32);
505 assert_eq!(TxDescriptor::SIZE, core::mem::size_of::<TxDescriptor>());
506 }
507
508 #[test]
509 fn tx_descriptor_alignment() {
510 assert_eq!(core::mem::align_of::<TxDescriptor>(), 4);
511 }
512
513 #[test]
518 fn tx_descriptor_new_not_owned() {
519 let desc = TxDescriptor::new();
520 assert!(!desc.is_owned());
521 }
522
523 #[test]
524 fn tx_descriptor_is_owned() {
525 let desc = TxDescriptor::new();
526 desc.set_owned();
527 assert!(desc.is_owned());
528 desc.clear_owned();
529 assert!(!desc.is_owned());
530 }
531
532 #[test]
533 fn tdes0_own_bit() {
534 let desc = TxDescriptor::new();
536 desc.set_owned();
537 assert_eq!(desc.raw_tdes0() & tdes0::OWN, tdes0::OWN);
538 assert_eq!(tdes0::OWN, 1 << 31);
539 }
540
541 #[test]
546 fn tx_descriptor_setup_chained() {
547 let desc = TxDescriptor::new();
548 let buf = [0u8; 64];
549 let next = TxDescriptor::new();
550
551 desc.setup_chained(buf.as_ptr(), &next as *const TxDescriptor);
552
553 assert_eq!(desc.buffer_addr(), buf.as_ptr() as u32);
554 assert_eq!(desc.next_desc_addr(), &next as *const TxDescriptor as u32);
555 assert!(desc.raw_tdes0() & tdes0::SECOND_ADDR_CHAINED != 0);
556 assert!(!desc.is_owned());
557 }
558
559 #[test]
560 fn tx_descriptor_prepare_single_frame() {
561 let desc = TxDescriptor::new();
562 desc.prepare(1500, true, true);
563
564 let raw0 = desc.raw_tdes0();
565 assert!(raw0 & tdes0::FIRST_SEGMENT != 0);
566 assert!(raw0 & tdes0::LAST_SEGMENT != 0);
567 assert!(raw0 & tdes0::INTERRUPT_ON_COMPLETE != 0);
568 assert!(raw0 & tdes0::OWN == 0, "prepare must not set OWN");
569
570 let len = desc.raw_tdes1() & tdes1::BUFFER1_SIZE_MASK;
571 assert_eq!(len, 1500);
572 }
573
574 #[test]
575 fn tdes0_first_last_bits() {
576 let desc = TxDescriptor::new();
577
578 desc.prepare(100, true, false);
580 let raw = desc.raw_tdes0();
581 assert!(raw & tdes0::FIRST_SEGMENT != 0);
582 assert!(raw & tdes0::LAST_SEGMENT == 0);
583
584 desc.prepare(100, false, true);
586 let raw = desc.raw_tdes0();
587 assert!(raw & tdes0::FIRST_SEGMENT == 0);
588 assert!(raw & tdes0::LAST_SEGMENT != 0);
589 }
590
591 #[test]
592 fn tx_descriptor_prepare_sets_cic_full_offload() {
593 let desc = TxDescriptor::new();
596 desc.prepare(64, true, true);
597 let raw = desc.raw_tdes0();
598 let cic = (raw >> tdes0::CHECKSUM_INSERT_SHIFT) & 0x3;
599 assert_eq!(cic, 0b11, "CIC must be 0b11 for full HW checksum offload");
600 }
601
602 #[test]
603 fn tx_descriptor_prepare_and_submit() {
604 let desc = TxDescriptor::new();
605 desc.prepare_and_submit(256, true, true);
606 assert!(desc.is_owned());
607 assert_eq!(desc.raw_tdes1() & tdes1::BUFFER1_SIZE_MASK, 256);
608 }
609
610 #[test]
611 fn tx_descriptor_no_errors_initially() {
612 let desc = TxDescriptor::new();
613 assert!(!desc.has_error());
614 assert_eq!(desc.error_flags(), 0);
615 }
616
617 #[test]
618 fn tx_descriptor_error_detection() {
619 let desc = TxDescriptor::new();
620 desc.tdes0.set(tdes0::ERR_SUMMARY | tdes0::UNDERFLOW_ERR);
621 assert!(desc.has_error());
622 assert!(desc.error_flags() & tdes0::UNDERFLOW_ERR != 0);
623 }
624
625 #[test]
626 fn tx_descriptor_reset_preserves_chain() {
627 let desc = TxDescriptor::new();
628 let next_addr = 0x1234_5678u32;
629 desc.next_desc_addr.set(next_addr);
630 desc.prepare_and_submit(1000, true, true);
631
632 desc.reset();
633
634 assert!(!desc.is_owned());
635 assert_eq!(desc.raw_tdes1() & tdes1::BUFFER1_SIZE_MASK, 0);
636 assert_eq!(desc.next_desc_addr(), next_addr);
637 assert!(desc.raw_tdes0() & tdes0::SECOND_ADDR_CHAINED != 0);
638 }
639
640 #[test]
645 fn rx_descriptor_size() {
646 assert_eq!(core::mem::size_of::<RxDescriptor>(), 32);
647 assert_eq!(RxDescriptor::SIZE, core::mem::size_of::<RxDescriptor>());
648 }
649
650 #[test]
651 fn rx_descriptor_alignment() {
652 assert_eq!(core::mem::align_of::<RxDescriptor>(), 4);
653 }
654
655 #[test]
660 fn rx_descriptor_new_not_owned() {
661 let desc = RxDescriptor::new();
662 assert!(!desc.is_owned());
663 }
664
665 #[test]
666 fn rdes0_own_bit() {
667 let desc = RxDescriptor::new();
668 desc.set_owned();
669 assert_eq!(desc.raw_rdes0() & rdes0::OWN, rdes0::OWN);
670 assert_eq!(rdes0::OWN, 1 << 31);
671 }
672
673 #[test]
678 fn rx_descriptor_setup_chained() {
679 let desc = RxDescriptor::new();
680 let mut buf = [0u8; 1600];
681 let next = RxDescriptor::new();
682
683 desc.setup_chained(buf.as_mut_ptr(), 1600, &next as *const RxDescriptor);
684
685 assert_eq!(desc.buffer_addr(), buf.as_ptr() as u32);
686 assert_eq!(desc.next_desc_addr(), &next as *const RxDescriptor as u32);
687 assert_eq!(desc.buffer_size(), 1600);
688 assert!(desc.is_owned(), "setup_chained gives to DMA");
689 assert!(desc.raw_rdes1() & rdes1::SECOND_ADDR_CHAINED != 0);
690 }
691
692 #[test]
697 fn rx_descriptor_first_last_flags() {
698 let desc = RxDescriptor::new();
699 assert!(!desc.is_first());
700 assert!(!desc.is_last());
701
702 desc.rdes0.set(rdes0::FIRST_DESC | rdes0::LAST_DESC);
703 assert!(desc.is_first());
704 assert!(desc.is_last());
705 assert!(desc.is_complete_frame());
706 }
707
708 #[test]
709 fn rx_descriptor_payload_length() {
710 let desc = RxDescriptor::new();
711
712 desc.rdes0.set(1504 << rdes0::FRAME_LEN_SHIFT);
714 assert_eq!(desc.frame_length(), 1504);
715 assert_eq!(desc.payload_length(), 1500);
716 }
717
718 #[test]
719 fn rx_descriptor_payload_length_short_frame() {
720 let desc = RxDescriptor::new();
721 desc.rdes0.set(2 << rdes0::FRAME_LEN_SHIFT);
723 assert_eq!(desc.payload_length(), 0);
724 }
725
726 #[test]
727 fn rx_descriptor_error_detection() {
728 let desc = RxDescriptor::new();
729 assert!(!desc.has_error());
730
731 desc.rdes0
732 .set(rdes0::ERR_SUMMARY | rdes0::CRC_ERR | rdes0::OVERFLOW_ERR);
733 assert!(desc.has_error());
734 assert!(desc.error_flags() & rdes0::CRC_ERR != 0);
735 assert!(desc.error_flags() & rdes0::OVERFLOW_ERR != 0);
736 }
737
738 #[test]
743 fn rx_descriptor_recycle() {
744 let desc = RxDescriptor::new();
745 desc.rdes1.set(1600);
746 desc.rdes0
747 .set(rdes0::FIRST_DESC | rdes0::LAST_DESC | (100 << rdes0::FRAME_LEN_SHIFT));
748
749 desc.recycle();
750
751 assert!(desc.is_owned());
752 assert_eq!(desc.buffer_size(), 1600);
754 }
755}