stm32h7_ethernet/
ethernet.rs

1//! Ethernet PHY layer for the STM32H7
2//!
3//! As well as this implementation, another notable implementation can
4//! be found as part of the [quartiq/stabilizer] project. The two
5//! implementations were developed independently, but both in the same
6//! year (2019) and they have many similarities.
7//!
8//! In particular, reference @cjbe's [notes] on ordering accesses to
9//! the DMA descriptors.
10//!
11//! > The CPU is allowed to access normal memory writes out-of-order. Here
12//! > the write to the OWN flag in the DMA descriptor (normal memory) was
13//! > placed after the DMA tail pointer advance (in device memory, so not
14//! > reorderable). This meant the ethernet DMA engine stalled as it saw a
15//! > descriptor it did not own, and only restarted and sent the packet when
16//! > the next packet was released.
17//! >
18//! > This fix will work as long as the CPU data cache is disabled. If we
19//! > want to enable the cache, the simplest method would be to mark SRAM3
20//! > as uncacheable via the MPU.
21//!
22//! [quartiq/stabilizer]: https://github.com/quartiq/stabilizer
23//! [notes]: https://github.com/quartiq/stabilizer/commit/ab1735950b2108eaa8d51eb63efadcd2e25c35c4
24
25use smoltcp::{
26    self,
27    phy::{self, DeviceCapabilities},
28    time::Instant,
29    wire::EthernetAddress,
30};
31use stm32h7xx_hal::stm32;
32
33use crate::ETH_PHY_ADDR;
34use crate::{StationManagement, PHY};
35
36// 6 DMAC, 6 SMAC, 4 q tag, 2 ethernet type II, 1500 ip MTU, 4 CRC, 2
37// padding
38const ETH_BUF_SIZE: usize = 1536;
39const ETH_NUM_TD: usize = 4;
40const ETH_NUM_RD: usize = 4;
41
42#[allow(dead_code)]
43mod cr_consts {
44    /* For HCLK 60-100 MHz */
45    pub const ETH_MACMIIAR_CR_HCLK_DIV_42: u8 = 0;
46    /* For HCLK 100-150 MHz */
47    pub const ETH_MACMIIAR_CR_HCLK_DIV_62: u8 = 1;
48    /* For HCLK 20-35 MHz */
49    pub const ETH_MACMIIAR_CR_HCLK_DIV_16: u8 = 2;
50    /* For HCLK 35-60 MHz */
51    pub const ETH_MACMIIAR_CR_HCLK_DIV_26: u8 = 3;
52    /* For HCLK 150-250 MHz */
53    pub const ETH_MACMIIAR_CR_HCLK_DIV_102: u8 = 4;
54    /* For HCLK 250-300 MHz */
55    pub const ETH_MACMIIAR_CR_HCLK_DIV_124: u8 = 5;
56}
57use self::cr_consts::*;
58
59// set clock range in MAC MII address register
60// 200 MHz AHB clock = eth_hclk
61const CLOCK_RANGE: u8 = ETH_MACMIIAR_CR_HCLK_DIV_102;
62
63/// Transmit and Receive Descriptor fields
64#[allow(dead_code)]
65mod emac_consts {
66    pub const EMAC_DES3_OWN: u32 = 0x8000_0000;
67    pub const EMAC_DES3_CTXT: u32 = 0x4000_0000;
68    pub const EMAC_DES3_FD: u32 = 0x2000_0000;
69    pub const EMAC_DES3_LD: u32 = 0x1000_0000;
70    pub const EMAC_DES3_ES: u32 = 0x0000_8000;
71    pub const EMAC_TDES2_IOC: u32 = 0x8000_0000;
72    pub const EMAC_RDES3_IOC: u32 = 0x4000_0000;
73    pub const EMAC_RDES3_PL: u32 = 0x0000_7FFF;
74    pub const EMAC_RDES3_BUF1V: u32 = 0x0100_0000;
75    pub const EMAC_TDES2_B1L: u32 = 0x0000_3FFF;
76    pub const EMAC_DES0_BUF1AP: u32 = 0xFFFF_FFFF;
77}
78use self::emac_consts::*;
79
80/// Transmit Descriptor representation
81///
82/// * tdes0: transmit buffer address
83/// * tdes1:
84/// * tdes2: buffer lengths
85/// * tdes3: control and payload/frame length
86///
87/// Note that Copy and Clone are derived to support initialising an
88/// array of TDes, but you may not move a TDes after its address has
89/// been given to the ETH_DMA engine.
90#[derive(Copy, Clone)]
91#[repr(C, packed)]
92struct TDes {
93    tdes0: u32,
94    tdes1: u32,
95    tdes2: u32,
96    tdes3: u32,
97}
98
99impl TDes {
100    /// Initialises this TDes to point at the given buffer.
101    pub fn init(&mut self) {
102        self.tdes0 = 0;
103        self.tdes1 = 0;
104        self.tdes2 = 0;
105        self.tdes3 = 0; // Owned by us
106    }
107
108    /// Return true if this TDes is not currently owned by the DMA
109    pub fn available(&self) -> bool {
110        self.tdes3 & EMAC_DES3_OWN == 0
111    }
112}
113
114/// Store a ring of TDes and associated buffers
115#[repr(C, packed)]
116struct TDesRing {
117    td: [TDes; ETH_NUM_TD],
118    tbuf: [[u32; ETH_BUF_SIZE / 4]; ETH_NUM_TD],
119    tdidx: usize,
120}
121
122impl TDesRing {
123    const fn new() -> Self {
124        Self {
125            td: [TDes {
126                tdes0: 0,
127                tdes1: 0,
128                tdes2: 0,
129                tdes3: 0,
130            }; ETH_NUM_TD],
131            tbuf: [[0; ETH_BUF_SIZE / 4]; ETH_NUM_TD],
132            tdidx: 0,
133        }
134    }
135
136    /// Initialise this TDesRing. Assume TDesRing is corrupt
137    ///
138    /// The current memory address of the buffers inside this TDesRing
139    /// will be stored in the descriptors, so ensure the TDesRing is
140    /// not moved after initialisation.
141    pub fn init(&mut self) {
142        for td in self.td.iter_mut() {
143            td.init();
144        }
145        self.tdidx = 0;
146
147        cortex_m::interrupt::free(|_cs| unsafe {
148            let dma = &*stm32::ETHERNET_DMA::ptr();
149
150            dma.dmactx_dlar
151                .write(|w| w.bits(&self.td as *const _ as u32));
152
153            dma.dmactx_rlr
154                .write(|w| w.tdrl().bits(self.td.len() as u16 - 1));
155
156            dma.dmactx_dtpr
157                .write(|w| w.bits(&self.td[0] as *const _ as u32));
158        });
159    }
160
161    /// Return true if a TDes is available for use
162    pub fn available(&self) -> bool {
163        self.td[self.tdidx].available()
164    }
165
166    /// Release the next TDes to the DMA engine for transmission
167    pub fn release(&mut self) {
168        let x = self.tdidx;
169        assert!(self.td[x].tdes3 & EMAC_DES3_OWN == 0); // Owned by us
170
171        // unsafe: tbuf is actually aligned, but with repr(packed) the
172        // compiler cannot infer this
173        let address = unsafe { self.tbuf[x].as_ptr() as u32 };
174
175        // Read format
176        self.td[x].tdes0 = address; // Buffer 1
177        self.td[x].tdes1 = 0; // Not used
178        assert!(self.td[x].tdes2 & !EMAC_TDES2_B1L == 0); // Not used
179        assert!(self.td[x].tdes2 & EMAC_TDES2_B1L > 0); // Length must be valid
180        self.td[x].tdes3 = 0;
181        self.td[x].tdes3 |= EMAC_DES3_FD; // FD: Contains first buffer of packet
182        self.td[x].tdes3 |= EMAC_DES3_LD; // LD: Contains last buffer of packet
183        self.td[x].tdes3 |= EMAC_DES3_OWN; // Give the DMA engine ownership
184
185        // Move the tail pointer (TPR) to the next descriptor
186        let x = (x + 1) % ETH_NUM_TD;
187        cortex_m::interrupt::free(|_cs| unsafe {
188            let dma = &*stm32::ETHERNET_DMA::ptr();
189
190            // Ensure changes to the descriptor are committed before
191            // DMA engine sees tail pointer store
192            cortex_m::asm::dsb();
193
194            dma.dmactx_dtpr
195                .write(|w| w.bits(&(self.td[x]) as *const _ as u32));
196        });
197
198        self.tdidx = x;
199    }
200
201    /// Access the buffer pointed to by the next TDes
202    pub unsafe fn buf_as_slice_mut(&mut self, length: usize) -> &mut [u8] {
203        let x = self.tdidx;
204
205        // Set address in descriptor
206        self.td[x].tdes0 = self.tbuf[x].as_ptr() as u32; // Buffer 1
207
208        // Set length in descriptor
209        let len = core::cmp::min(length, ETH_BUF_SIZE);
210        self.td[x].tdes2 = (length as u32) & EMAC_TDES2_B1L;
211
212        // Return buffer slice
213        let addr = self.tbuf[x].as_ptr() as *mut _;
214        core::slice::from_raw_parts_mut(addr, len)
215    }
216}
217
218/// Receive Descriptor representation
219///
220/// * rdes0: recieve buffer address
221/// * rdes1:
222/// * rdes2:
223/// * rdes3: OWN and Status
224///
225/// Note that Copy and Clone are derived to support initialising an
226/// array of RDes, but you may not move a RDes after its address has
227/// been given to the ETH_DMA engine.
228#[derive(Copy, Clone)]
229#[repr(C, packed)]
230struct RDes {
231    rdes0: u32,
232    rdes1: u32,
233    rdes2: u32,
234    rdes3: u32,
235}
236
237impl RDes {
238    /// Initialises RDes
239    pub fn init(&mut self) {
240        self.rdes0 = 0;
241        self.rdes1 = 0;
242        self.rdes2 = 0;
243        self.rdes3 = 0; // Owned by us
244    }
245
246    /// Return true if this RDes is acceptable to us
247    pub fn valid(&self) -> bool {
248        // Write-back descriptor is valid if:
249        //
250        // Contains first buffer of packet AND contains last buf of
251        // packet AND no errors AND not a contex descriptor
252        self.rdes3
253            & (EMAC_DES3_FD | EMAC_DES3_LD | EMAC_DES3_ES | EMAC_DES3_CTXT)
254            == (EMAC_DES3_FD | EMAC_DES3_LD)
255    }
256
257    /// Return true if this RDes is not currently owned by the DMA
258    pub fn available(&self) -> bool {
259        self.rdes3 & EMAC_DES3_OWN == 0 // Owned by us
260    }
261}
262
263/// Store a ring of RDes and associated buffers
264#[repr(C, packed)]
265struct RDesRing {
266    rd: [RDes; ETH_NUM_RD],
267    rbuf: [[u32; ETH_BUF_SIZE / 4]; ETH_NUM_RD],
268    rdidx: usize,
269}
270
271impl RDesRing {
272    const fn new() -> Self {
273        Self {
274            rd: [RDes {
275                rdes0: 0,
276                rdes1: 0,
277                rdes2: 0,
278                rdes3: 0,
279            }; ETH_NUM_RD],
280            rbuf: [[0; ETH_BUF_SIZE / 4]; ETH_NUM_RD],
281            rdidx: 0,
282        }
283    }
284
285    /// Initialise this RDesRing. Assume RDesRing is corrupt
286    ///
287    /// The current memory address of the buffers inside this RDesRing
288    /// will be stored in the descriptors, so ensure the RDesRing is
289    /// not moved after initialisation.
290    pub fn init(&mut self) {
291        for rd in self.rd.iter_mut() {
292            rd.init();
293        }
294        self.rdidx = 0;
295
296        // Initialise pointers in the DMA engine
297        cortex_m::interrupt::free(|_cs| unsafe {
298            let dma = &*stm32::ETHERNET_DMA::ptr();
299
300            dma.dmacrx_dlar
301                .write(|w| w.bits(&self.rd as *const _ as u32));
302
303            dma.dmacrx_rlr
304                .write(|w| w.rdrl().bits(self.rd.len() as u16 - 1));
305        });
306
307        // Release descriptors to the DMA engine
308        while self.available() {
309            self.release()
310        }
311    }
312
313    /// Return true if a RDes is available for use
314    pub fn available(&self) -> bool {
315        self.rd[self.rdidx].available()
316    }
317
318    /// Return true if current RDes is valid
319    pub fn valid(&self) -> bool {
320        self.rd[self.rdidx].valid()
321    }
322
323    /// Release the next RDes to the DMA engine
324    pub fn release(&mut self) {
325        let x = self.rdidx;
326        assert!(self.rd[x].rdes3 & EMAC_DES3_OWN == 0); // Owned by us
327
328        // unsafe: rbuf is actually aligned, but with repr(packed) the
329        // compiler cannot infer this
330        let address = unsafe { self.rbuf[x].as_ptr() as u32 };
331
332        // Read format
333        self.rd[x].rdes0 = address; // Buffer 1
334        self.rd[x].rdes1 = 0; // Reserved
335        self.rd[x].rdes2 = 0; // Marked as invalid
336        self.rd[x].rdes3 = 0;
337        self.rd[x].rdes3 |= EMAC_DES3_OWN; // Give the DMA engine ownership
338        self.rd[x].rdes3 |= EMAC_RDES3_BUF1V; // BUF1V: 1st buffer address is valid
339        self.rd[x].rdes3 |= EMAC_RDES3_IOC; // IOC: Interrupt on complete
340
341        // Move the tail pointer (TPR) to this descriptor
342        cortex_m::interrupt::free(|_cs| unsafe {
343            let dma = &*stm32::ETHERNET_DMA::ptr();
344
345            // Ensure changes to the descriptor are committed before
346            // DMA engine sees tail pointer store
347            cortex_m::asm::dsb();
348
349            dma.dmacrx_dtpr
350                .write(|w| w.bits(&(self.rd[x]) as *const _ as u32));
351        });
352
353        // Update active descriptor
354        self.rdidx = (x + 1) % ETH_NUM_RD;
355    }
356
357    /// Access the buffer pointed to by the next RDes
358    ///
359    /// # Safety
360    ///
361    /// Ensure that release() is called between subsequent calls to this
362    /// function.
363    #[allow(clippy::mut_from_ref)]
364    pub unsafe fn buf_as_slice_mut(&self) -> &mut [u8] {
365        let x = self.rdidx;
366
367        // Write-back format
368        let addr = self.rbuf[x].as_ptr() as *mut u8;
369        let len = (self.rd[x].rdes3 & EMAC_RDES3_PL) as usize;
370
371        let len = core::cmp::min(len, ETH_BUF_SIZE);
372        core::slice::from_raw_parts_mut(addr, len)
373    }
374}
375
376pub struct DesRing {
377    tx: TDesRing,
378    rx: RDesRing,
379}
380impl DesRing {
381    pub const fn new() -> DesRing {
382        DesRing {
383            tx: TDesRing::new(),
384            rx: RDesRing::new(),
385        }
386    }
387}
388
389///
390/// Ethernet DMA
391///
392pub struct EthernetDMA<'a> {
393    ring: &'a mut DesRing,
394    eth_dma: stm32::ETHERNET_DMA,
395}
396
397///
398/// Ethernet MAC
399///
400pub struct EthernetMAC {
401    eth_mac: stm32::ETHERNET_MAC,
402}
403
404/// Create and initialise the ethernet driver.
405///
406/// You must move in ETH_MAC, ETH_MTL, ETH_DMA.
407///
408/// Sets up the descriptor structures, sets up the peripheral
409/// clocks and GPIO configuration, and configures the ETH MAC and
410/// DMA peripherals.
411///
412/// Brings up the PHY.
413///
414/// # Safety
415///
416/// `EthernetDMA` shall not be moved as it is initialised here
417pub unsafe fn ethernet_init(
418    eth_mac: stm32::ETHERNET_MAC,
419    eth_mtl: stm32::ETHERNET_MTL,
420    eth_dma: stm32::ETHERNET_DMA,
421    ring: &mut DesRing,
422    mac_addr: EthernetAddress,
423) -> (EthernetDMA, EthernetMAC) {
424    // RCC
425    {
426        let rcc = &*stm32::RCC::ptr();
427        let syscfg = &*stm32::SYSCFG::ptr();
428
429        rcc.apb4enr.modify(|_, w| w.syscfgen().set_bit());
430        rcc.ahb1enr.modify(|_, w| {
431            w.eth1macen()
432                .set_bit()
433                .eth1txen()
434                .set_bit()
435                .eth1rxen()
436                .set_bit()
437        });
438        syscfg.pmcr.modify(|_, w| w.epis().bits(0b100)); // RMII
439    }
440
441    // reset ETH_MAC - write 1 then 0
442    //rcc.ahb1rstr.modify(|_, w| w.eth1macrst().set_bit());
443    //rcc.ahb1rstr.modify(|_, w| w.eth1macrst().clear_bit());
444
445    cortex_m::interrupt::free(|_cs| {
446        // reset ETH_DMA - write 1 and wait for 0
447        eth_dma.dmamr.modify(|_, w| w.swr().set_bit());
448        while eth_dma.dmamr.read().swr().bit_is_set() {}
449
450        // 200 MHz
451        eth_mac
452            .mac1ustcr
453            .modify(|_, w| w.tic_1us_cntr().bits(200 - 1));
454
455        // Configuration Register
456        eth_mac.maccr.modify(|_, w| {
457            w.arpen()
458                .clear_bit()
459                .ipc()
460                .set_bit()
461                .ipg()
462                .bits(0b000) // 96 bit
463                .ecrsfd()
464                .clear_bit()
465                .dcrs()
466                .clear_bit()
467                .bl()
468                .bits(0b00) // 19
469                .prelen()
470                .bits(0b00) // 7
471                // CRC stripping for Type frames
472                .cst()
473                .set_bit()
474                // Fast Ethernet speed
475                .fes()
476                .set_bit()
477                // Duplex mode
478                .dm()
479                .set_bit()
480                // Automatic pad/CRC stripping
481                .acs()
482                .set_bit()
483                // Retry disable in half-duplex mode
484                .dr()
485                .set_bit()
486        });
487        eth_mac.macecr.modify(|_, w| {
488            w.eipgen()
489                .clear_bit()
490                .usp()
491                .clear_bit()
492                .spen()
493                .clear_bit()
494                .dcrcc()
495                .clear_bit()
496        });
497        // Set the MAC address
498        eth_mac.maca0lr.write(|w| {
499            w.addrlo().bits(
500                u32::from(mac_addr.0[0])
501                    | (u32::from(mac_addr.0[1]) << 8)
502                    | (u32::from(mac_addr.0[2]) << 16)
503                    | (u32::from(mac_addr.0[3]) << 24),
504            )
505        });
506        eth_mac.maca0hr.write(
507            |w| {
508                w.addrhi().bits(
509                    u16::from(mac_addr.0[4]) | (u16::from(mac_addr.0[5]) << 8),
510                )
511            }, //.sa().clear_bit()
512               //.mbc().bits(0b000000)
513        );
514        // frame filter register
515        eth_mac.macpfr.modify(|_, w| {
516            w.dntu()
517                .clear_bit()
518                .ipfe()
519                .clear_bit()
520                .vtfe()
521                .clear_bit()
522                .hpf()
523                .clear_bit()
524                .saf()
525                .clear_bit()
526                .saif()
527                .clear_bit()
528                .pcf()
529                .bits(0b00)
530                .dbf()
531                .clear_bit()
532                .pm()
533                .clear_bit()
534                .daif()
535                .clear_bit()
536                .hmc()
537                .clear_bit()
538                .huc()
539                .clear_bit()
540                // Receive All
541                .ra()
542                .set_bit()
543                // Promiscuous mode
544                .pr()
545                .clear_bit()
546        });
547        eth_mac.macwtr.write(|w| w.pwe().clear_bit());
548        // Flow Control Register
549        eth_mac.macqtx_fcr.modify(|_, w| {
550            // Pause time
551            w.pt().bits(0x100)
552        });
553        eth_mac.macrx_fcr.modify(|_, w| w);
554        eth_mtl.mtlrx_qomr.modify(|_, w| {
555            w
556                // Receive store and forward
557                .rsf()
558                .set_bit()
559                // Dropping of TCP/IP checksum error frames disable
560                .dis_tcp_ef()
561                .clear_bit()
562                // Forward error frames
563                .fep()
564                .clear_bit()
565                // Forward undersized good packets
566                .fup()
567                .clear_bit()
568        });
569        eth_mtl.mtltx_qomr.modify(|_, w| {
570            w
571                // Transmit store and forward
572                .tsf()
573                .set_bit()
574        });
575
576        // operation mode register
577        eth_dma.dmamr.modify(|_, w| {
578            w.intm()
579                .bits(0b00)
580                // Rx Tx priority ratio 1:1
581                .pr()
582                .bits(0b000)
583                .txpr()
584                .clear_bit()
585                .da()
586                .clear_bit()
587        });
588        // bus mode register
589        eth_dma.dmasbmr.modify(|_, w| {
590            // Address-aligned beats
591            w.aal()
592                .set_bit()
593                // Fixed burst
594                .fb()
595                .set_bit()
596        });
597        eth_dma
598            .dmaccr
599            .modify(|_, w| w.dsl().bits(0).pblx8().clear_bit().mss().bits(536));
600        eth_dma.dmactx_cr.modify(|_, w| {
601            w
602                // Tx DMA PBL
603                .txpbl()
604                .bits(32)
605                .tse()
606                .clear_bit()
607                // Operate on second frame
608                .osf()
609                .clear_bit()
610        });
611
612        eth_dma.dmacrx_cr.modify(|_, w| {
613            w
614                // receive buffer size
615                .rbsz()
616                .bits(ETH_BUF_SIZE as u16)
617                // Rx DMA PBL
618                .rxpbl()
619                .bits(32)
620                // Disable flushing of received frames
621                .rpf()
622                .clear_bit()
623        });
624
625        // Initialise DMA descriptors
626        ring.tx.init();
627        ring.rx.init();
628
629        // Ensure the DMA descriptors are committed
630        cortex_m::asm::dsb();
631
632        // Manage MAC transmission and reception
633        eth_mac.maccr.modify(|_, w| {
634            w.re()
635                .bit(true) // Receiver Enable
636                .te()
637                .bit(true) // Transmiter Enable
638        });
639        eth_mtl.mtltx_qomr.modify(|_, w| w.ftq().set_bit());
640
641        // Manage DMA transmission and reception
642        eth_dma.dmactx_cr.modify(|_, w| w.st().set_bit());
643        eth_dma.dmacrx_cr.modify(|_, w| w.sr().set_bit());
644
645        eth_dma
646            .dmacsr
647            .modify(|_, w| w.tps().set_bit().rps().set_bit());
648    });
649
650    // MAC layer
651    let mut mac = EthernetMAC { eth_mac };
652    mac.phy_reset();
653    mac.phy_init(); // Init PHY
654
655    let dma = EthernetDMA { ring, eth_dma };
656
657    (dma, mac)
658}
659
660///
661/// PHY Operations
662///
663impl StationManagement for EthernetMAC {
664    /// Read a register over SMI.
665    fn smi_read(&mut self, reg: u8) -> u16 {
666        while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
667        self.eth_mac.macmdioar.modify(|_, w| unsafe {
668            w.pa()
669                .bits(ETH_PHY_ADDR)
670                .rda()
671                .bits(reg)
672                .goc()
673                .bits(0b11) // read
674                .cr()
675                .bits(CLOCK_RANGE)
676                .mb()
677                .set_bit()
678        });
679        while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
680        self.eth_mac.macmdiodr.read().md().bits()
681    }
682
683    /// Write a register over SMI.
684    fn smi_write(&mut self, reg: u8, val: u16) {
685        while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
686        self.eth_mac
687            .macmdiodr
688            .write(|w| unsafe { w.md().bits(val) });
689        self.eth_mac.macmdioar.modify(|_, w| unsafe {
690            w.pa()
691                .bits(ETH_PHY_ADDR)
692                .rda()
693                .bits(reg)
694                .goc()
695                .bits(0b01) // write
696                .cr()
697                .bits(CLOCK_RANGE)
698                .mb()
699                .set_bit()
700        });
701        while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
702    }
703}
704
705/// Define TxToken type and implement consume method
706pub struct TxToken<'a>(&'a mut TDesRing);
707
708impl<'a> phy::TxToken for TxToken<'a> {
709    fn consume<R, F>(
710        self,
711        _timestamp: Instant,
712        len: usize,
713        f: F,
714    ) -> smoltcp::Result<R>
715    where
716        F: FnOnce(&mut [u8]) -> smoltcp::Result<R>,
717    {
718        assert!(len <= ETH_BUF_SIZE);
719
720        let result = f(unsafe { self.0.buf_as_slice_mut(len) });
721        self.0.release();
722        result
723    }
724}
725
726/// Define RxToken type and implement consume method
727pub struct RxToken<'a>(&'a mut RDesRing);
728
729impl<'a> phy::RxToken for RxToken<'a> {
730    fn consume<R, F>(self, _timestamp: Instant, f: F) -> smoltcp::Result<R>
731    where
732        F: FnOnce(&mut [u8]) -> smoltcp::Result<R>,
733    {
734        let result = f(unsafe { self.0.buf_as_slice_mut() });
735        self.0.release();
736        result
737    }
738}
739
740/// Implement the smoltcp Device interface
741impl<'a> phy::Device<'a> for EthernetDMA<'_> {
742    type RxToken = RxToken<'a>;
743    type TxToken = TxToken<'a>;
744
745    fn capabilities(&self) -> DeviceCapabilities {
746        let mut caps = DeviceCapabilities::default();
747        // ethernet frame type II (6 smac, 6 dmac, 2 ethertype),
748        // sans CRC (4), 1500 IP MTU
749        caps.max_transmission_unit = 1514;
750        caps.max_burst_size = Some(core::cmp::min(ETH_NUM_TD, ETH_NUM_RD));
751        caps
752    }
753
754    fn receive(&mut self) -> Option<(RxToken, TxToken)> {
755        // Skip all queued packets with errors.
756        while self.ring.rx.available() && !self.ring.rx.valid() {
757            warn!("Releasing invalid descriptor!");
758            self.ring.rx.release()
759        }
760
761        if self.ring.rx.available() && self.ring.tx.available() {
762            Some((RxToken(&mut self.ring.rx), TxToken(&mut self.ring.tx)))
763        } else {
764            None
765        }
766    }
767
768    fn transmit(&mut self) -> Option<TxToken> {
769        if self.ring.tx.available() {
770            Some(TxToken(&mut self.ring.tx))
771        } else {
772            None
773        }
774    }
775}
776
777impl EthernetDMA<'_> {
778    /// Return the number of packets dropped since this method was
779    /// last called
780    pub fn number_packets_dropped(&self) -> u32 {
781        self.eth_dma.dmacmfcr.read().mfc().bits() as u32
782    }
783}
784
785pub unsafe fn interrupt_handler() {
786    let eth_dma = &*stm32::ETHERNET_DMA::ptr();
787    eth_dma
788        .dmacsr
789        .write(|w| w.nis().set_bit().ri().set_bit().ti().set_bit());
790}
791
792pub unsafe fn enable_interrupt() {
793    let eth_dma = &*stm32::ETHERNET_DMA::ptr();
794    eth_dma
795        .dmacier
796        .modify(|_, w| w.nie().set_bit().rie().set_bit().tie().set_bit());
797}