1extern crate alloc;
2
3use alloc::boxed::Box;
4use core::mem::size_of;
5
6use dma_api::{DArray, DeviceDma, DmaDirection, DmaOp};
7use mmio_api::{Mmio, MmioAddr, MmioOp};
8use rdif_eth::{Event, IRxQueue, ITxQueue, Interface, NetError, QueueConfig};
9
10use crate::err::{Error, Result};
11
12mod descriptor;
13mod registers;
14
15use descriptor::{RxDesc, TxDesc};
16use registers::*;
17
18const QUEUE_SIZE: usize = 256;
19const QUEUE_ID0: usize = 0;
20const MAX_PACKET: usize = 2048;
21
22pub struct E1000 {
23 regs: Regs,
24 _mmio: Mmio,
25 dma: DeviceDma,
26 mac: [u8; 6],
27 irq_enabled: bool,
28 tx_created: bool,
29 rx_created: bool,
30}
31
32impl E1000 {
33 pub fn check_vid_did(vid: u16, did: u16) -> bool {
34 vid == 0x8086 && [0x100e, 0x100f].contains(&did)
35 }
36
37 pub fn new(
38 bar_addr: impl Into<MmioAddr>,
39 bar_size: usize,
40 dma_mask: u64,
41 dma_op: &'static dyn DmaOp,
42 mmio_op: &'static dyn MmioOp,
43 ) -> Result<Self> {
44 mmio_api::init(mmio_op);
45 let mmio = mmio_api::ioremap(bar_addr.into(), bar_size)?;
46 let regs = Regs::new(mmio.as_nonnull_ptr());
47 let dma = DeviceDma::new(dma_mask, dma_op);
48
49 regs.reset();
50 regs.disable_all_irq();
51
52 regs.write(CTRL, regs.read(CTRL) | (1 << 6));
54
55 let mac = regs.mac_addr();
56
57 Ok(Self {
58 regs,
59 _mmio: mmio,
60 dma,
61 mac,
62 irq_enabled: false,
63 tx_created: false,
64 rx_created: false,
65 })
66 }
67}
68
69impl rdif_eth::DriverGeneric for E1000 {
70 fn name(&self) -> &str {
71 "eth-intel-e1000"
72 }
73}
74
75impl Interface for E1000 {
76 fn mac_address(&self) -> [u8; 6] {
77 self.mac
78 }
79
80 fn create_tx_queue(&mut self) -> Option<Box<dyn ITxQueue>> {
81 if self.tx_created {
82 return None;
83 }
84
85 let desc = self
86 .dma
87 .array_zero_with_align::<TxDesc>(QUEUE_SIZE, 16, DmaDirection::Bidirectional)
88 .ok()?;
89
90 let desc_base = desc.dma_addr().as_u64();
91
92 self.regs.write(TDBAL, desc_base as u32);
93 self.regs.write(TDBAH, (desc_base >> 32) as u32);
94 self.regs
95 .write(TDLEN, (QUEUE_SIZE * size_of::<TxDesc>()) as u32);
96 self.regs.write(TDH, 0);
97 self.regs.write(TDT, 0);
98
99 self.regs
101 .write(TCTL, (1 << 1) | (1 << 3) | (0x10 << 4) | (0x40 << 12));
102 self.regs.write(TIPG, 10 | (8 << 10) | (6 << 20));
103
104 let queue = E1000TxQueue {
105 regs: self.regs,
106 desc,
107 dma_mask: self.dma.dma_mask(),
108 bus_addrs: [None; QUEUE_SIZE],
109 next_submit: 0,
110 next_reclaim: 0,
111 };
112
113 self.tx_created = true;
114 Some(Box::new(queue))
115 }
116
117 fn create_rx_queue(&mut self) -> Option<Box<dyn IRxQueue>> {
118 if self.rx_created {
119 return None;
120 }
121
122 let desc = self
123 .dma
124 .array_zero_with_align::<RxDesc>(QUEUE_SIZE, 16, DmaDirection::Bidirectional)
125 .ok()?;
126
127 let desc_base = desc.dma_addr().as_u64();
128
129 self.regs.write(RDBAL, desc_base as u32);
130 self.regs.write(RDBAH, (desc_base >> 32) as u32);
131 self.regs
132 .write(RDLEN, (QUEUE_SIZE * size_of::<RxDesc>()) as u32);
133 self.regs.write(RDH, 0);
134 self.regs.write(RDT, 0);
135
136 self.regs.write(RCTL, (1 << 1) | (1 << 15) | (1 << 26));
138
139 let queue = E1000RxQueue {
140 regs: self.regs,
141 desc,
142 dma_mask: self.dma.dma_mask(),
143 bus_addrs: [None; QUEUE_SIZE],
144 next_submit: 0,
145 next_reclaim: 0,
146 };
147
148 self.rx_created = true;
149 Some(Box::new(queue))
150 }
151
152 fn enable_irq(&mut self) {
153 self.regs.enable_default_irq();
154 self.irq_enabled = true;
155 }
156
157 fn disable_irq(&mut self) {
158 self.regs.disable_all_irq();
159 self.irq_enabled = false;
160 }
161
162 fn is_irq_enabled(&self) -> bool {
163 self.irq_enabled
164 }
165
166 fn handle_irq(&mut self) -> Event {
167 let mut ev = Event::none();
168 let icr = self.regs.read(ICR);
169
170 if icr & (1 << 0) != 0 {
171 ev.tx_queue.insert(QUEUE_ID0);
172 }
173 if icr & (1 << 7) != 0 {
174 ev.rx_queue.insert(QUEUE_ID0);
175 }
176
177 ev
178 }
179}
180
181struct E1000TxQueue {
182 regs: Regs,
183 desc: DArray<TxDesc>,
184 dma_mask: u64,
185 bus_addrs: [Option<u64>; QUEUE_SIZE],
186 next_submit: usize,
187 next_reclaim: usize,
188}
189
190impl ITxQueue for E1000TxQueue {
191 fn id(&self) -> usize {
192 QUEUE_ID0
193 }
194
195 fn config(&self) -> QueueConfig {
196 QueueConfig {
197 dma_mask: self.dma_mask,
198 align: 16,
199 buf_size: MAX_PACKET,
200 ring_size: QUEUE_SIZE,
201 }
202 }
203
204 fn submit(&mut self, bus_addr: u64, len: usize) -> core::result::Result<(), NetError> {
205 if len > MAX_PACKET {
206 return Err(NetError::Other(Box::new(Error::InvalidArgument(
207 "tx packet too large",
208 ))));
209 }
210
211 let idx = self.next_submit;
212 let next = (idx + 1) % QUEUE_SIZE;
213 let hw_head = self.regs.read(TDH) as usize;
214
215 if next == hw_head {
216 return Err(NetError::Retry);
217 }
218
219 self.desc.set(idx, TxDesc::new(bus_addr, len as u16));
220 self.bus_addrs[idx] = Some(bus_addr);
221 self.next_submit = next;
222 self.regs.write(TDT, next as u32);
223
224 Ok(())
225 }
226
227 fn reclaim(&mut self) -> Option<u64> {
228 let idx = self.next_reclaim;
229 let desc = self.desc.read(idx)?;
230 if !desc.is_done() {
231 return None;
232 }
233
234 self.next_reclaim = (idx + 1) % QUEUE_SIZE;
235 self.bus_addrs[idx].take()
236 }
237}
238
239struct E1000RxQueue {
240 regs: Regs,
241 desc: DArray<RxDesc>,
242 dma_mask: u64,
243 bus_addrs: [Option<u64>; QUEUE_SIZE],
244 next_submit: usize,
245 next_reclaim: usize,
246}
247
248impl IRxQueue for E1000RxQueue {
249 fn id(&self) -> usize {
250 QUEUE_ID0
251 }
252
253 fn config(&self) -> QueueConfig {
254 QueueConfig {
255 dma_mask: self.dma_mask,
256 align: 16,
257 buf_size: MAX_PACKET,
258 ring_size: QUEUE_SIZE,
259 }
260 }
261
262 fn submit(&mut self, bus_addr: u64, len: usize) -> core::result::Result<(), NetError> {
263 if len > MAX_PACKET {
264 return Err(NetError::Other(Box::new(Error::InvalidArgument(
265 "rx buffer too large",
266 ))));
267 }
268
269 let idx = self.next_submit;
270 let next = (idx + 1) % QUEUE_SIZE;
271 let hw_head = self.regs.read(RDH) as usize;
272
273 if next == hw_head {
274 return Err(NetError::Retry);
275 }
276
277 self.desc.set(idx, RxDesc::new(bus_addr));
278 self.bus_addrs[idx] = Some(bus_addr);
279 self.next_submit = next;
280 self.regs.write(RDT, next as u32);
281
282 Ok(())
283 }
284
285 fn reclaim(&mut self) -> Option<(u64, usize)> {
286 let idx = self.next_reclaim;
287 let desc = self.desc.read(idx)?;
288 if !desc.is_done() {
289 return None;
290 }
291
292 self.next_reclaim = (idx + 1) % QUEUE_SIZE;
293 self.bus_addrs[idx]
294 .take()
295 .map(|bus_addr| (bus_addr, desc.length as usize))
296 }
297}