e1000_driver/e1000/
e1000.rs1use super::e1000_const::*;
3use super::super::Ext;
4use super::super::Volatile;
5use alloc::vec::Vec;
6use core::{cmp::min, mem::size_of, slice::from_raw_parts_mut};
7use crate::utils::*;
8
9const TX_RING_SIZE: usize = 256;
10const RX_RING_SIZE: usize = 256;
11const MBUF_SIZE: usize = 2048;
12
13pub trait KernelFunc {
15 const PAGE_SIZE: usize = 4096;
17
18 fn dma_alloc_coherent(&mut self, pages: usize) -> (usize, usize);
24
25 fn dma_free_coherent(&mut self, vaddr: usize, pages: usize);
27}
28
29pub struct E1000Device<'a, K: KernelFunc> {
31 regs: &'static mut [Volatile<u32>],
32 rx_ring_dma: usize,
33 tx_ring_dma: usize,
34 rx_ring: &'a mut [RxDesc], tx_ring: &'a mut [TxDesc],
36 rx_mbufs: Vec<usize>,
37 tx_mbufs: Vec<usize>,
38 mbuf_size: usize,
39 kfn: K,
41}
42
43#[derive(Debug, Clone)]
47#[repr(C, align(16))]
48pub struct TxDesc {
49 addr: u64,
50 length: u16,
51 cso: u8,
52 cmd: u8,
53 status: u8,
54 css: u8,
55 special: u16,
56}
57
58#[derive(Debug, Clone)]
60#[repr(C, align(16))]
61pub struct RxDesc {
62 addr: u64, length: u16, csum: u16, status: u8, errors: u8, special: u16,
68}
69
70impl<'a, K: KernelFunc> E1000Device<'a, K> {
71 pub fn new(mut kfn: K, mapped_regs: usize) -> Result<Self, i32> {
73 info!("New E1000 device @ {:#x}", mapped_regs);
74 let alloc_tx_ring_pages =
76 ((TX_RING_SIZE * size_of::<TxDesc>()) + (K::PAGE_SIZE - 1)) / K::PAGE_SIZE;
77 let alloc_rx_ring_pages =
78 ((RX_RING_SIZE * size_of::<RxDesc>()) + (K::PAGE_SIZE - 1)) / K::PAGE_SIZE;
79 let (tx_ring_vaddr, tx_ring_dma) = kfn.dma_alloc_coherent(alloc_tx_ring_pages);
80 let (rx_ring_vaddr, rx_ring_dma) = kfn.dma_alloc_coherent(alloc_rx_ring_pages);
81
82 let tx_ring = unsafe { from_raw_parts_mut(tx_ring_vaddr as *mut TxDesc, TX_RING_SIZE) };
83 let rx_ring = unsafe { from_raw_parts_mut(rx_ring_vaddr as *mut RxDesc, RX_RING_SIZE) };
84
85 tx_ring.fill(TxDesc {
86 addr: 0,
87 length: 0,
88 cso: 0,
89 cmd: 0,
90 status: 0,
91 css: 0,
92 special: 0,
93 });
94 rx_ring.fill(RxDesc {
95 addr: 0,
96 length: 0,
97 csum: 0,
98 status: 0,
99 errors: 0,
100 special: 0,
101 });
102
103 let mut tx_mbufs = Vec::with_capacity(tx_ring.len());
104 let mut rx_mbufs = Vec::with_capacity(rx_ring.len());
105
106 let alloc_tx_buffer_pages =
108 ((TX_RING_SIZE * MBUF_SIZE) + (K::PAGE_SIZE - 1)) / K::PAGE_SIZE;
109 let (mut tx_mbufs_vaddr, mut tx_mbufs_dma) = kfn.dma_alloc_coherent(alloc_tx_buffer_pages);
110
111 for i in 0..TX_RING_SIZE {
112 tx_ring[i].status = E1000_TXD_STAT_DD as u8;
113 tx_ring[i].addr = tx_mbufs_dma as u64;
114 tx_mbufs.push(tx_mbufs_vaddr);
115 tx_mbufs_dma += MBUF_SIZE;
116 tx_mbufs_vaddr += MBUF_SIZE;
117 }
118
119 let alloc_rx_buffer_pages =
121 ((RX_RING_SIZE * MBUF_SIZE) + (K::PAGE_SIZE - 1)) / K::PAGE_SIZE;
122 let (mut rx_mbufs_vaddr, mut rx_mbufs_dma) = kfn.dma_alloc_coherent(alloc_rx_buffer_pages);
124 if rx_mbufs_vaddr == 0 {
125 panic!("e1000, alloc dma rx buffer failed");
126 }
127
128 for i in 0..RX_RING_SIZE {
129 rx_ring[i].addr = rx_mbufs_dma as u64;
130 rx_mbufs.push(rx_mbufs_vaddr);
131 rx_mbufs_dma += MBUF_SIZE;
132 rx_mbufs_vaddr += MBUF_SIZE;
133 }
134
135 let len = 0x1FFFF / size_of::<u32>();
158 let regs = unsafe { from_raw_parts_mut(mapped_regs as *mut Volatile<u32>, len) };
160
161 let mut e1000dev = E1000Device {
162 regs,
163 rx_ring_dma,
164 tx_ring_dma,
165 rx_ring,
166 tx_ring,
167 rx_mbufs,
168 tx_mbufs,
169 mbuf_size: MBUF_SIZE,
170 kfn,
171 };
172 e1000dev.e1000_init();
173
174 Ok(e1000dev)
175 }
176
177 pub fn e1000_init(&mut self) {
180 let stat = self.regs[E1000_STAT].read();
181 let ctl = self.regs[E1000_CTL].read();
182 info!("e1000 CTL: {:#x}, Status: {:#x}", ctl, stat);
183
184 self.regs[E1000_IMS].write(0); self.regs[E1000_CTL].write(ctl | E1000_CTL_RST);
187 self.regs[E1000_IMS].write(0); fence_w();
192
193 if (self.tx_ring.len() * size_of::<TxDesc>()) % 128 != 0 {
195 error!("e1000, size of tx_ring is invalid");
197 }
198 self.regs[E1000_TDBAL].write(self.tx_ring_dma as u32);
199
200 self.regs[E1000_TDLEN].write((self.tx_ring.len() * size_of::<TxDesc>()) as u32);
201 self.regs[E1000_TDT].write(0);
202 self.regs[E1000_TDH].write(0);
203
204 if (self.rx_ring.len() * size_of::<RxDesc>()) % 128 != 0 {
206 error!("e1000, size of rx_ring is invalid");
207 }
208 self.regs[E1000_RDBAL].write(self.rx_ring_dma as u32);
209
210 self.regs[E1000_RDH].write(0);
211 self.regs[E1000_RDT].write((RX_RING_SIZE - 1) as u32);
212 self.regs[E1000_RDLEN].write((self.rx_ring.len() * size_of::<RxDesc>()) as u32);
213
214 self.regs[E1000_RA].write(0x12005452);
216 self.regs[E1000_RA + 1].write(0x5534 | (1 << 31));
217 for i in 0..(4096 / 32) {
219 self.regs[E1000_MTA + i].write(0);
220 }
221 self.regs[E1000_TCTL].write(
223 E1000_TCTL_EN | E1000_TCTL_PSP | (0x10 << E1000_TCTL_CT_SHIFT) | (0x40 << E1000_TCTL_COLD_SHIFT),
227 );
228 self.regs[E1000_TIPG].write(10 | (8 << 10) | (6 << 20)); self.regs[E1000_RCTL].write(
232 E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | E1000_RCTL_SECRC,
236 ); self.regs[E1000_TIDV].write(0);
239 self.regs[E1000_TADV].write(0);
240 self.regs[E1000_RDTR].write(0); self.regs[E1000_RADV].write(0); self.regs[E1000_IMS].write(1 << 7); self.regs[E1000_ICR].read(); self.e1000_write_flush();
247 info!("e1000_init has been completed");
248 }
249
250 pub fn e1000_transmit(&mut self, packet: &[u8]) -> i32 {
252 let tindex = self.regs[E1000_TDT].read() as usize;
253 info!("Read E1000_TDT = {:#x}", tindex);
254 if (self.tx_ring[tindex].status & E1000_TXD_STAT_DD as u8) == 0 {
255 error!("E1000 hasn't finished the corresponding previous transmission request");
256 return -1;
257 }
258
259 let mut length = packet.len();
260 if length > self.mbuf_size {
261 error!("The packet: {} to be send is TOO LARGE", length);
262 length = min(length, self.mbuf_size);
263 }
264
265 let mbuf = unsafe { from_raw_parts_mut(self.tx_mbufs[tindex] as *mut u8, length) };
266 mbuf.copy_from_slice(packet);
267
268 info!(">>>>>>>>> TX PKT {}", length);
269 info!("\n\r");
270 self.tx_ring[tindex].length = length as u16;
273 self.tx_ring[tindex].status = 0;
274 self.tx_ring[tindex].cmd = (E1000_TXD_CMD_RS | E1000_TXD_CMD_EOP) as u8;
275
276 self.regs[E1000_TDT].write(((tindex + 1) % TX_RING_SIZE) as u32);
277
278 self.e1000_write_flush();
279 fence_w();
281
282 length as i32
283 }
284
285 pub fn e1000_recv(&mut self) -> Option<Vec<Vec<u8>>> {
288 let mut recv_packets = Vec::new();
292 let mut rindex = (self.regs[E1000_RDT].read() as usize + 1) % RX_RING_SIZE;
293 while (self.rx_ring[rindex].status & E1000_RXD_STAT_DD as u8) != 0 {
295 info!("Read E1000_RDT + 1 = {:#x}", rindex);
296 let len = self.rx_ring[rindex].length as usize;
297 let mbuf = unsafe { from_raw_parts_mut(self.rx_mbufs[rindex] as *mut u8, len) };
298 info!("RX PKT {} <<<<<<<<<", len);
299 recv_packets.push(mbuf.to_vec());
301
302 net_rx(mbuf);
304
305 fence();
306 mbuf[..min(64, len)].fill(0);
308
309 self.rx_ring[rindex].status = 0;
310 self.regs[E1000_RDT].write(rindex as u32);
311
312 self.e1000_write_flush();
313 fence_w();
315
316 rindex = (rindex + 1) % RX_RING_SIZE;
317 }
318 info!("e1000_recv\n\r");
319
320 if recv_packets.len() > 0 {
321 Some(recv_packets)
322 } else {
323 None
324 }
325 }
326
327 pub fn e1000_irq_disable(&mut self) {
334 self.regs[E1000_IMC].write(!0);
335 self.e1000_write_flush();
336 }
337
338 pub fn e1000_irq_enable(&mut self) {
340 self.regs[E1000_IMS].write(IMS_ENABLE_MASK);
341 self.e1000_write_flush();
342 }
343
344 pub fn e1000_write_flush(&mut self) {
346 self.regs[E1000_STAT].read();
347 }
348
349 pub fn e1000_cause_lsc_int(&mut self) {
351 self.regs[E1000_ICS].write(E1000_ICR_LSC);
352 }
353
354 pub fn e1000_intr(&mut self) -> u32 {
356 self.regs[E1000_ICR].read()
362 }
363}
364
365pub fn net_rx(packet: &mut [u8]) {
368 }