Skip to main content

eth_intel/e1000/
mod.rs

1extern crate alloc;
2
3use alloc::{boxed::Box, vec, vec::Vec};
4use core::mem::size_of;
5
6use dma_api::{DArray, DeviceDma, DmaDirection, DmaOp};
7use mmio_api::{Mmio, MmioAddr, MmioOp};
8use rdif_eth::{
9    BuffConfig, Event, IRxQueue, ITxQueue, Interface, NetError, RequestId, RxRequest, RxResponse,
10    TxRequest,
11};
12
13use crate::err::{Error, Result};
14
15mod descriptor;
16mod registers;
17
18use descriptor::{RxDesc, TxDesc};
19use registers::*;
20
21const QUEUE_SIZE: usize = 256;
22const QUEUE_ID0: usize = 0;
23const MAX_PACKET: usize = 2048;
24
25pub struct E1000 {
26    regs: Regs,
27    _mmio: Mmio,
28    dma: DeviceDma,
29    mac: [u8; 6],
30    irq_enabled: bool,
31    tx_created: bool,
32    rx_created: bool,
33}
34
35impl E1000 {
36    pub fn check_vid_did(vid: u16, did: u16) -> bool {
37        vid == 0x8086 && [0x100e, 0x100f].contains(&did)
38    }
39
40    pub fn new(
41        bar_addr: impl Into<MmioAddr>,
42        bar_size: usize,
43        dma_mask: u64,
44        dma_op: &'static dyn DmaOp,
45        mmio_op: &'static dyn MmioOp,
46    ) -> Result<Self> {
47        mmio_api::init(mmio_op);
48        let mmio = mmio_api::ioremap(bar_addr.into(), bar_size)?;
49        let regs = Regs::new(mmio.as_nonnull_ptr());
50        let dma = DeviceDma::new(dma_mask, dma_op);
51
52        regs.reset();
53        regs.disable_all_irq();
54
55        // CTRL.SLU: set link up in software for basic bring-up.
56        regs.write(CTRL, regs.read(CTRL) | (1 << 6));
57
58        let mac = regs.mac_addr();
59
60        Ok(Self {
61            regs,
62            _mmio: mmio,
63            dma,
64            mac,
65            irq_enabled: false,
66            tx_created: false,
67            rx_created: false,
68        })
69    }
70}
71
72impl rdif_eth::DriverGeneric for E1000 {
73    fn name(&self) -> &str {
74        "eth-intel-e1000"
75    }
76}
77
78impl Interface for E1000 {
79    fn mac_address(&self) -> [u8; 6] {
80        self.mac
81    }
82
83    fn create_tx_queue(&mut self) -> Option<Box<dyn ITxQueue>> {
84        if self.tx_created {
85            return None;
86        }
87
88        let desc = self
89            .dma
90            .array_zero_with_align::<TxDesc>(QUEUE_SIZE, 16, DmaDirection::Bidirectional)
91            .ok()?;
92
93        let tx_buf = self
94            .dma
95            .array_zero_with_align::<[u8; MAX_PACKET]>(QUEUE_SIZE, 16, DmaDirection::Bidirectional)
96            .ok()?;
97
98        let desc_base = desc.dma_addr().as_u64();
99        let _buf_base = tx_buf.dma_addr().as_u64();
100
101        self.regs.write(TDBAL, desc_base as u32);
102        self.regs.write(TDBAH, (desc_base >> 32) as u32);
103        self.regs
104            .write(TDLEN, (QUEUE_SIZE * size_of::<TxDesc>()) as u32);
105        self.regs.write(TDH, 0);
106        self.regs.write(TDT, 0);
107
108        // TCTL.EN + TCTL.PSP + CT + COLD, typical minimal values.
109        self.regs
110            .write(TCTL, (1 << 1) | (1 << 3) | (0x10 << 4) | (0x40 << 12));
111        self.regs.write(TIPG, 10 | (8 << 10) | (6 << 20));
112
113        let queue = E1000TxQueue {
114            regs: self.regs,
115            desc,
116            tx_buf,
117            reqs: vec![None; QUEUE_SIZE],
118            next_req: 1,
119            next_submit: 0,
120        };
121
122        self.tx_created = true;
123        Some(Box::new(queue))
124    }
125
126    fn create_rx_queue(&mut self) -> Option<Box<dyn IRxQueue>> {
127        if self.rx_created {
128            return None;
129        }
130
131        let desc = self
132            .dma
133            .array_zero_with_align::<RxDesc>(QUEUE_SIZE, 16, DmaDirection::Bidirectional)
134            .ok()?;
135
136        let desc_base = desc.dma_addr().as_u64();
137
138        self.regs.write(RDBAL, desc_base as u32);
139        self.regs.write(RDBAH, (desc_base >> 32) as u32);
140        self.regs
141            .write(RDLEN, (QUEUE_SIZE * size_of::<RxDesc>()) as u32);
142        self.regs.write(RDH, 0);
143        self.regs.write(RDT, 0);
144
145        // RCTL.EN + BAM + SECRC (2048-byte buffer mode).
146        self.regs.write(RCTL, (1 << 1) | (1 << 15) | (1 << 26));
147
148        let queue = E1000RxQueue {
149            regs: self.regs,
150            desc,
151            reqs: vec![None; QUEUE_SIZE],
152            next_req: 1,
153            next_submit: 0,
154        };
155
156        self.rx_created = true;
157        Some(Box::new(queue))
158    }
159
160    fn enable_irq(&mut self) {
161        self.regs.enable_default_irq();
162        self.irq_enabled = true;
163    }
164
165    fn disable_irq(&mut self) {
166        self.regs.disable_all_irq();
167        self.irq_enabled = false;
168    }
169
170    fn is_irq_enabled(&self) -> bool {
171        self.irq_enabled
172    }
173
174    fn handle_irq(&mut self) -> Event {
175        let mut ev = Event::none();
176        let icr = self.regs.read(ICR);
177
178        if icr & (1 << 0) != 0 {
179            ev.tx_queue.insert(QUEUE_ID0);
180        }
181        if icr & (1 << 7) != 0 {
182            ev.rx_queue.insert(QUEUE_ID0);
183        }
184
185        ev
186    }
187}
188
189struct E1000TxQueue {
190    regs: Regs,
191    desc: DArray<TxDesc>,
192    tx_buf: DArray<[u8; MAX_PACKET]>,
193    reqs: Vec<Option<RequestId>>,
194    next_req: usize,
195    next_submit: usize,
196}
197
198impl E1000TxQueue {
199    fn desc_dma_addr(&self, idx: usize) -> u64 {
200        self.tx_buf.dma_addr().as_u64() + (idx * MAX_PACKET) as u64
201    }
202}
203
204impl ITxQueue for E1000TxQueue {
205    fn id(&self) -> usize {
206        QUEUE_ID0
207    }
208
209    fn mtu(&self) -> usize {
210        1500
211    }
212
213    fn buff_config(&self) -> BuffConfig {
214        BuffConfig {
215            dma_mask: u64::MAX,
216            align: 16,
217            size: MAX_PACKET,
218        }
219    }
220
221    fn submit_request(
222        &mut self,
223        request: TxRequest<'_>,
224    ) -> core::result::Result<RequestId, NetError> {
225        if request.data.len() > MAX_PACKET {
226            return Err(NetError::Other(Box::new(Error::InvalidArgument(
227                "tx packet too large",
228            ))));
229        }
230
231        let idx = self.next_submit;
232        let next = (idx + 1) % QUEUE_SIZE;
233        let hw_head = self.regs.read(TDH) as usize;
234
235        if next == hw_head {
236            return Err(NetError::Retry);
237        }
238
239        let mut slot = self
240            .tx_buf
241            .read(idx)
242            .ok_or_else(|| NetError::Other(Box::new(Error::Other("invalid tx slot"))))?;
243        slot[..request.data.len()].copy_from_slice(request.data);
244        self.tx_buf.set(idx, slot);
245
246        let desc = TxDesc::new(self.desc_dma_addr(idx), request.data.len() as u16);
247        self.desc.set(idx, desc);
248
249        let req_id = RequestId::new(self.next_req);
250        self.next_req += 1;
251        self.reqs[idx] = Some(req_id);
252
253        self.next_submit = next;
254        self.regs.write(TDT, next as u32);
255
256        Ok(req_id)
257    }
258
259    fn poll_request(&mut self, request: RequestId) -> core::result::Result<(), NetError> {
260        for idx in 0..QUEUE_SIZE {
261            if self.reqs[idx] != Some(request) {
262                continue;
263            }
264
265            let desc = self
266                .desc
267                .read(idx)
268                .ok_or_else(|| NetError::Other(Box::new(Error::Other("invalid tx desc"))))?;
269            if desc.is_done() {
270                self.reqs[idx] = None;
271                return Ok(());
272            }
273
274            return Err(NetError::Retry);
275        }
276
277        Err(NetError::Other(Box::new(Error::Other("request not found"))))
278    }
279}
280
281struct E1000RxQueue {
282    regs: Regs,
283    desc: DArray<RxDesc>,
284    reqs: Vec<Option<RequestId>>,
285    next_req: usize,
286    next_submit: usize,
287}
288
289impl IRxQueue for E1000RxQueue {
290    fn id(&self) -> usize {
291        QUEUE_ID0
292    }
293
294    fn mtu(&self) -> usize {
295        1500
296    }
297
298    fn buff_config(&self) -> BuffConfig {
299        BuffConfig {
300            dma_mask: u64::MAX,
301            align: 16,
302            size: MAX_PACKET,
303        }
304    }
305
306    fn submit_request(&mut self, request: RxRequest) -> core::result::Result<RequestId, NetError> {
307        let idx = self.next_submit;
308        let next = (idx + 1) % QUEUE_SIZE;
309        let hw_head = self.regs.read(RDH) as usize;
310
311        if next == hw_head {
312            return Err(NetError::Retry);
313        }
314
315        let desc = RxDesc::new(request.buffer.bus);
316        self.desc.set(idx, desc);
317
318        let req_id = RequestId::new(self.next_req);
319        self.next_req += 1;
320        self.reqs[idx] = Some(req_id);
321
322        self.next_submit = next;
323        self.regs.write(RDT, next as u32);
324
325        Ok(req_id)
326    }
327
328    fn poll_request(&mut self, request: RequestId) -> core::result::Result<RxResponse, NetError> {
329        for idx in 0..QUEUE_SIZE {
330            if self.reqs[idx] != Some(request) {
331                continue;
332            }
333
334            let desc = self
335                .desc
336                .read(idx)
337                .ok_or_else(|| NetError::Other(Box::new(Error::Other("invalid rx desc"))))?;
338            if desc.is_done() {
339                self.reqs[idx] = None;
340                return Ok(RxResponse {
341                    len: desc.length as usize,
342                });
343            }
344
345            return Err(NetError::Retry);
346        }
347
348        Err(NetError::Other(Box::new(Error::Other("request not found"))))
349    }
350}