zynq7000_hal/eth/
smoltcp.rs

1//! smoltcp driver for the Zynq 7000 ethernet peripheral.
2use arbitrary_int::u14;
3
4pub use crate::eth::{EthernetId, InterruptResult};
5use crate::{
6    cache::{CACHE_LINE_SIZE, clean_and_invalidate_data_cache_range, invalidate_data_cache_range},
7    eth::{rx_descr, tx_descr},
8};
9
10/// This interrupt handler should be called when a Gigabit Ethernet interrupt occurs.
11pub fn on_interrupt(eth_id: EthernetId) -> InterruptResult {
12    super::on_interrupt(eth_id, false, false)
13}
14
15pub struct SmoltcpRxToken<'a> {
16    pub(crate) descr_list: &'a mut super::rx_descr::DescriptorListWrapper<'static>,
17    pub(crate) slot_index: usize,
18    pub(crate) rx_buf: &'a mut super::AlignedBuffer,
19    pub(crate) rx_size: usize,
20}
21
22impl embassy_net_driver::RxToken for SmoltcpRxToken<'_> {
23    fn consume<R, F>(self, f: F) -> R
24    where
25        F: FnOnce(&mut [u8]) -> R,
26    {
27        self.consume(f)
28    }
29}
30
31impl smoltcp::phy::RxToken for SmoltcpRxToken<'_> {
32    fn consume<R, F>(self, f: F) -> R
33    where
34        F: FnOnce(&[u8]) -> R,
35    {
36        // Convert the mutable slice to an immutable slice for the closure
37        self.consume(|buf| f(&buf[..]))
38    }
39}
40
41impl SmoltcpRxToken<'_> {
42    fn consume<R, F>(self, f: F) -> R
43    where
44        F: FnOnce(&mut [u8]) -> R,
45    {
46        // The DMA will write the received frame into DDR. The L1 and L2 cache lines for the
47        // particular reception address need to be invalidated, to avoid fetching stale data from
48        // the cache instead of the DDR.
49        let clean_invalidate_len = (self.rx_size + CACHE_LINE_SIZE - 1) & !(CACHE_LINE_SIZE - 1);
50        invalidate_data_cache_range(self.rx_buf.0.as_ptr() as u32, clean_invalidate_len)
51            .expect("RX buffer or buffer size not aligned to cache line size");
52        log::debug!("eth rx {} bytes", self.rx_size);
53        log::trace!("rx data: {:x?}", &self.rx_buf.0[0..self.rx_size]);
54        let result = f(&mut self.rx_buf.0[0..self.rx_size]);
55        self.descr_list.clear_slot(self.slot_index);
56        // Okay, this is weird, but we have to do this. I encountered this bug where ICMP replies
57        // were duplicated after the descriptor rings wrapped. My theory is that there is
58        // some data in the cache after the embassy reception function which needs to be cleaned.
59        clean_and_invalidate_data_cache_range(self.rx_buf.0.as_ptr() as u32, clean_invalidate_len)
60            .expect("RX buffer or buffer size not aligned to cache line size");
61        result
62    }
63}
64
65pub struct SmoltcpTxToken<'a> {
66    pub(crate) eth_id: super::EthernetId,
67    pub(crate) descr_list: &'a mut super::tx_descr::DescriptorListWrapper<'static>,
68    pub(crate) tx_bufs: &'a mut [super::AlignedBuffer],
69}
70
71impl smoltcp::phy::TxToken for SmoltcpTxToken<'_> {
72    fn consume<R, F>(self, len: usize, f: F) -> R
73    where
74        F: FnOnce(&mut [u8]) -> R,
75    {
76        self.consume(len, f)
77    }
78}
79
80impl embassy_net_driver::TxToken for SmoltcpTxToken<'_> {
81    fn consume<R, F>(self, len: usize, f: F) -> R
82    where
83        F: FnOnce(&mut [u8]) -> R,
84    {
85        self.consume(len, f)
86    }
87}
88
89impl SmoltcpTxToken<'_> {
90    fn consume<R, F>(self, len: usize, f: F) -> R
91    where
92        F: FnOnce(&mut [u8]) -> R,
93    {
94        assert!(len <= super::MTU, "packet length exceeds MTU");
95        // In the transmit call, it was checked that the buffer queue actually is not full.
96        let tx_idx = self.descr_list.current_tx_idx();
97        let buffer = self.tx_bufs.get_mut(tx_idx).unwrap();
98        let addr = buffer.0.as_ptr() as u32;
99        let result = f(&mut buffer.0[0..len]);
100        let clean_invalidate_len = (len + CACHE_LINE_SIZE - 1) & !(CACHE_LINE_SIZE - 1);
101        // DMA accesses the DDR memory directly, so we need to flush everything that might
102        // still be in the L1 or L2 cache to the DDR.
103        clean_and_invalidate_data_cache_range(buffer.0.as_ptr() as u32, clean_invalidate_len)
104            .expect("TX buffer or buffer size not aligned to cache line size");
105        log::debug!("eth tx {len} bytes");
106        log::trace!("tx data: {:x?}", &buffer.0[0..len]);
107        self.descr_list
108            .prepare_transfer_unchecked(Some(addr), u14::new(len as u16), true, false);
109        let mut regs = unsafe { self.eth_id.steal_regs() };
110        regs.modify_net_ctrl(|mut val| {
111            val.set_start_tx(true);
112            val
113        });
114        result
115    }
116}
117
118pub struct Driver(CommonSmoltcpDriver);
119
120impl smoltcp::phy::Device for Driver {
121    type RxToken<'a>
122        = SmoltcpRxToken<'a>
123    where
124        Self: 'a;
125
126    type TxToken<'a>
127        = SmoltcpTxToken<'a>
128    where
129        Self: 'a;
130
131    fn receive(
132        &mut self,
133        _timestamp: smoltcp::time::Instant,
134    ) -> Option<(Self::RxToken<'_>, Self::TxToken<'_>)> {
135        self.0.receive()
136    }
137
138    fn transmit(&mut self, _timestamp: smoltcp::time::Instant) -> Option<Self::TxToken<'_>> {
139        self.0.transmit()
140    }
141
142    fn capabilities(&self) -> smoltcp::phy::DeviceCapabilities {
143        let mut capabilities = smoltcp::phy::DeviceCapabilities::default();
144        capabilities.medium = smoltcp::phy::Medium::Ethernet;
145        capabilities.max_transmission_unit = super::MTU;
146        capabilities.max_burst_size = Some(self.0.burst_size);
147        capabilities.checksum.ipv4 = smoltcp::phy::Checksum::Both;
148        capabilities.checksum.udp = smoltcp::phy::Checksum::Both;
149        capabilities.checksum.tcp = smoltcp::phy::Checksum::Both;
150        capabilities
151    }
152}
153
154pub struct DescriptorsAndBuffers {
155    pub rx_descr: super::rx_descr::DescriptorListWrapper<'static>,
156    pub rx_bufs: &'static mut [super::AlignedBuffer],
157    pub tx_descr: super::tx_descr::DescriptorListWrapper<'static>,
158    pub tx_bufs: &'static mut [super::AlignedBuffer],
159}
160
161impl DescriptorsAndBuffers {
162    pub fn new(
163        rx_descr: super::rx_descr::DescriptorListWrapper<'static>,
164        rx_bufs: &'static mut [super::AlignedBuffer],
165        tx_descr: super::tx_descr::DescriptorListWrapper<'static>,
166        tx_bufs: &'static mut [super::AlignedBuffer],
167    ) -> Option<Self> {
168        if rx_descr.len() != rx_bufs.len() || tx_descr.len() != tx_bufs.len() {
169            return None;
170        }
171        Some(Self {
172            rx_descr,
173            rx_bufs,
174            tx_descr,
175            tx_bufs,
176        })
177    }
178
179    #[inline]
180    pub fn tx_burst_len(&self) -> usize {
181        self.tx_descr.len()
182    }
183}
184
185pub(crate) struct CommonSmoltcpDriver {
186    pub eth_id: super::EthernetId,
187    pub mac_addr: [u8; 6],
188    pub bufs: DescriptorsAndBuffers,
189    pub burst_size: usize,
190}
191
192impl CommonSmoltcpDriver {
193    pub fn new(
194        eth_id: super::EthernetId,
195        mac_addr: [u8; 6],
196        bufs: DescriptorsAndBuffers,
197        burst_size: usize,
198    ) -> Self {
199        Self {
200            burst_size,
201            eth_id,
202            mac_addr,
203            bufs,
204        }
205    }
206
207    pub fn handle_completed_tx_transfers(&mut self) {
208        loop {
209            let result = self.bufs.tx_descr.check_and_handle_completed_transfer();
210            match result {
211                tx_descr::BusyHandlingResult::TxError(tx_error) => {
212                    // TODO: An ideal error reporting system would send a message to the user,
213                    // but we would need to introduce an abstraction for that..
214                    // For now, we log unexpected checksum errors and other errors.
215                    if let Some(cksum_error) = tx_error.checksum_generation {
216                        // These can occur for ICMP packets.
217                        if cksum_error
218                            != tx_descr::TransmitChecksumGenerationStatus::PacketNotTcpUdp
219                            && cksum_error
220                                != tx_descr::TransmitChecksumGenerationStatus::NotVlanOrSnapOrIp
221                        {
222                            log::warn!("TX checksum generation error: {tx_error:?}");
223                        }
224                    } else {
225                        log::warn!("TX error: {tx_error:?}");
226                    }
227                }
228                tx_descr::BusyHandlingResult::SlotHandled(_) => (),
229                tx_descr::BusyHandlingResult::Complete => break,
230            }
231        }
232    }
233
234    pub fn receive(&mut self) -> Option<(SmoltcpRxToken<'_>, SmoltcpTxToken<'_>)> {
235        self.handle_completed_tx_transfers();
236        if self.bufs.tx_descr.full() {
237            // TODO: When introducing an abstraction for notifying the user, send a message
238            // for this case.
239            log::warn!("TX descriptor queue is full");
240            return None;
241        }
242        match self.bufs.rx_descr.scan_and_handle_next_received_frame(true) {
243            // Nothing to do.
244            rx_descr::FrameScanResult::NoFrames => None,
245            rx_descr::FrameScanResult::SingleFrame {
246                index,
247                size,
248                status,
249            } => {
250                log::trace!(
251                    "eth rx frame, fsc status {:?}, cksum status {:?}",
252                    status.fcs_status(),
253                    status.type_id_match_info_or_chksum_status()
254                );
255
256                let rx_buf = self.bufs.rx_bufs.get_mut(index).unwrap();
257                Some((
258                    SmoltcpRxToken {
259                        descr_list: &mut self.bufs.rx_descr,
260                        slot_index: index,
261                        rx_buf,
262                        rx_size: size,
263                    },
264                    SmoltcpTxToken {
265                        eth_id: self.eth_id,
266                        descr_list: &mut self.bufs.tx_descr,
267                        tx_bufs: self.bufs.tx_bufs,
268                    },
269                ))
270            }
271            rx_descr::FrameScanResult::MultiSlotFrame {
272                first_slot_index: _,
273                last_slot_index: _,
274            } => {
275                // We can not really handle multi-slot frames.. this should never happen.
276                None
277            }
278            rx_descr::FrameScanResult::BrokenFragments {
279                first_slot_index: _,
280                last_slot_index: _,
281            } => None,
282        }
283    }
284
285    pub fn transmit(&mut self) -> Option<SmoltcpTxToken<'_>> {
286        // Handle any completed frames first.
287        self.handle_completed_tx_transfers();
288        if self.bufs.tx_descr.full() {
289            // TODO: When introducing an abstraction for notifying the user, send a message
290            // for this case.
291            log::warn!("TX descriptor queue is full");
292            return None;
293        }
294        Some(SmoltcpTxToken {
295            eth_id: self.eth_id,
296            descr_list: &mut self.bufs.tx_descr,
297            tx_bufs: self.bufs.tx_bufs,
298        })
299    }
300}