1use crate::descriptor::{AdvancedRxDescriptor, AdvancedTxDescriptor, RX_STATUS_DD, RX_STATUS_EOP};
61use crate::interrupts::Interrupts;
62use crate::memory::{alloc_pkt, Dma, MemPool, Packet, PACKET_HEADROOM};
63use crate::NicDevice;
64use crate::{constants::*, hal::IxgbeHal};
65use crate::{IxgbeError, IxgbeResult};
66use alloc::boxed::Box;
67use alloc::sync::Arc;
68use alloc::{collections::VecDeque, vec::Vec};
69use core::marker::PhantomData;
70use core::ptr::NonNull;
71use core::time::Duration;
72use core::{mem, ptr};
73use smoltcp::wire::{EthernetFrame, PrettyPrinter};
74
75const DRIVER_NAME: &str = "ixgbe";
76
77const MAX_QUEUES: u16 = 64;
78
79const PKT_BUF_ENTRY_SIZE: usize = 2048;
80const MIN_MEMPOOL_SIZE: usize = 4096;
81
82const TX_CLEAN_BATCH: usize = 32;
85
86fn wrap_ring(index: usize, ring_size: usize) -> usize {
87 (index + 1) & (ring_size - 1)
88}
89
90pub struct IxgbeDevice<H: IxgbeHal, const QS: usize> {
92 addr: *mut u8,
93 len: usize,
94 num_rx_queues: u16,
95 num_tx_queues: u16,
96 rx_queues: Vec<IxgbeRxQueue>,
97 tx_queues: Vec<IxgbeTxQueue>,
98 interrupts: Interrupts,
99 _marker: PhantomData<H>,
100}
101
102struct IxgbeRxQueue {
103 descriptors: Box<[NonNull<AdvancedRxDescriptor>]>,
104 num_descriptors: usize,
105 pool: Arc<MemPool>,
106 bufs_in_use: Vec<usize>,
107 rx_index: usize,
108}
109
110impl IxgbeRxQueue {
111 fn can_recv(&self) -> bool {
112 let rx_index = self.rx_index;
113
114 let desc = unsafe { self.descriptors[rx_index].as_ref() };
115 let status = desc.get_ext_status() as u8;
116 status & RX_STATUS_DD != 0
117 }
118}
119
120struct IxgbeTxQueue {
121 descriptors: Box<[NonNull<AdvancedTxDescriptor>]>,
122 num_descriptors: usize,
123 pool: Option<Arc<MemPool>>,
124 bufs_in_use: VecDeque<usize>,
125 clean_index: usize,
126 tx_index: usize,
127}
128
129impl IxgbeTxQueue {
130 fn can_send(&self) -> bool {
131 let next_tx_index = wrap_ring(self.tx_index, self.num_descriptors);
132 next_tx_index != self.clean_index
133 }
134}
135
136pub struct IxgbeNetBuf {
141 packet: Packet,
142}
143
144impl IxgbeNetBuf {
145 pub fn alloc(pool: &Arc<MemPool>, size: usize) -> IxgbeResult<Self> {
156 if let Some(pkt) = alloc_pkt(pool, size) {
157 Ok(Self { packet: pkt })
158 } else {
159 Err(IxgbeError::NoMemory)
160 }
161 }
162
163 pub fn packet(&self) -> &[u8] {
165 self.packet.as_bytes()
166 }
167
168 pub fn packet_mut(&mut self) -> &mut [u8] {
170 self.packet.as_mut_bytes()
171 }
172
173 pub fn packet_len(&self) -> usize {
175 self.packet.len
176 }
177
178 pub fn pool_entry(&self) -> usize {
180 self.packet.pool_entry
181 }
182
183 pub fn construct(pool_entry: usize, pool: &Arc<MemPool>, len: usize) -> IxgbeResult<Self> {
199 let pkt = unsafe {
200 Packet::new(
201 pool.get_virt_addr(pool_entry).add(PACKET_HEADROOM),
202 pool.get_phys_addr(pool_entry) + PACKET_HEADROOM,
203 len,
204 Arc::clone(pool),
205 pool_entry,
206 )
207 };
208 Ok(Self { packet: pkt })
209 }
210}
211
212impl<H: IxgbeHal, const QS: usize> NicDevice<H> for IxgbeDevice<H, QS> {
213 fn get_driver_name(&self) -> &str {
214 DRIVER_NAME
215 }
216
217 fn get_link_speed(&self) -> u16 {
219 let speed = self.get_reg32(IXGBE_LINKS);
220 if (speed & IXGBE_LINKS_UP) == 0 {
221 return 0;
222 }
223 match speed & IXGBE_LINKS_SPEED_82599 {
224 IXGBE_LINKS_SPEED_100_82599 => 100,
225 IXGBE_LINKS_SPEED_1G_82599 => 1000,
226 IXGBE_LINKS_SPEED_10G_82599 => 10000,
227 _ => 0,
228 }
229 }
230
231 fn get_mac_addr(&self) -> [u8; 6] {
233 let low = self.get_reg32(IXGBE_RAL(0));
234 let high = self.get_reg32(IXGBE_RAH(0));
235
236 [
237 (low & 0xff) as u8,
238 (low >> 8 & 0xff) as u8,
239 (low >> 16 & 0xff) as u8,
240 (low >> 24) as u8,
241 (high & 0xff) as u8,
242 (high >> 8 & 0xff) as u8,
243 ]
244 }
245
246 fn reset_stats(&mut self) {
248 self.get_reg32(IXGBE_GPRC);
249 self.get_reg32(IXGBE_GPTC);
250 self.get_reg32(IXGBE_GORCL);
251 self.get_reg32(IXGBE_GORCH);
252 self.get_reg32(IXGBE_GOTCL);
253 self.get_reg32(IXGBE_GOTCH);
254 }
255
256 fn recycle_tx_buffers(&mut self, queue_id: u16) -> IxgbeResult {
257 let queue = self
258 .tx_queues
259 .get_mut(queue_id as usize)
260 .ok_or(IxgbeError::InvalidQueue)?;
261
262 let mut clean_index = queue.clean_index;
263 let cur_index = queue.tx_index;
264
265 loop {
266 let mut cleanable = cur_index as i32 - clean_index as i32;
267
268 if cleanable < 0 {
269 cleanable += queue.num_descriptors as i32;
270 }
271
272 if cleanable < TX_CLEAN_BATCH as i32 {
273 break;
274 }
275
276 let mut cleanup_to = clean_index + TX_CLEAN_BATCH - 1;
277
278 if cleanup_to >= queue.num_descriptors {
279 cleanup_to -= queue.num_descriptors;
280 }
281
282 let status = unsafe {
283 let descs = queue.descriptors[cleanup_to].as_mut();
284 descs.paylen_popts_cc_idx_sta.read()
285 };
286
287 if (status & IXGBE_ADVTXD_STAT_DD) != 0 {
288 if let Some(ref pool) = queue.pool {
289 if TX_CLEAN_BATCH >= queue.bufs_in_use.len() {
290 pool.free_stack
291 .borrow_mut()
292 .extend(queue.bufs_in_use.drain(..))
293 } else {
294 pool.free_stack
295 .borrow_mut()
296 .extend(queue.bufs_in_use.drain(..TX_CLEAN_BATCH))
297 }
298 }
299
300 clean_index = wrap_ring(cleanup_to, queue.num_descriptors);
301 } else {
302 break;
303 }
304 }
305
306 queue.clean_index = clean_index;
307
308 Ok(())
309 }
310
311 fn receive_packets<F>(
312 &mut self,
313 queue_id: u16,
314 packet_nums: usize,
315 mut f: F,
316 ) -> IxgbeResult<usize>
317 where
318 F: FnMut(IxgbeNetBuf),
319 {
320 let mut recv_nums = 0;
321 let queue = self
322 .rx_queues
323 .get_mut(queue_id as usize)
324 .ok_or(IxgbeError::InvalidQueue)?;
325
326 if !queue.can_recv() {
328 return Err(IxgbeError::NotReady);
329 }
330
331 let mut rx_index = queue.rx_index;
332 let mut last_rx_index = queue.rx_index;
333
334 for _ in 0..packet_nums {
335 let desc = unsafe { queue.descriptors[rx_index].as_mut() };
336 let status = desc.get_ext_status() as u8;
337
338 if (status & RX_STATUS_DD) == 0 {
339 break;
340 }
341
342 if (status & RX_STATUS_EOP) == 0 {
343 panic!("Increase buffer size or decrease MTU")
344 }
345
346 let pool = &queue.pool;
347
348 if let Some(buf) = pool.alloc_buf() {
349 let idx = mem::replace(&mut queue.bufs_in_use[rx_index], buf);
350
351 let packet = unsafe {
352 Packet::new(
353 pool.get_virt_addr(idx),
354 pool.get_phys_addr(idx),
355 desc.length() as usize,
356 pool.clone(),
357 idx,
358 )
359 };
360 #[cfg(target_arch = "x86_64")]
362 packet.prefrtch(crate::memory::Prefetch::Time0);
363
364 let rx_buf = IxgbeNetBuf { packet };
365
366 f(rx_buf);
369 recv_nums += 1;
370
371 desc.set_packet_address(pool.get_phys_addr(queue.bufs_in_use[rx_index]) as u64);
372 desc.reset_status();
373
374 last_rx_index = rx_index;
375 rx_index = wrap_ring(rx_index, queue.num_descriptors);
376 } else {
377 error!("Ixgbe alloc buffer failed: No Memory!");
378 break;
379 }
380 }
381
382 if rx_index != last_rx_index {
383 self.set_reg32(IXGBE_RDT(u32::from(queue_id)), last_rx_index as u32);
384 self.rx_queues[queue_id as usize].rx_index = rx_index;
385 }
386
387 Ok(recv_nums)
388 }
389
390 fn send(&mut self, queue_id: u16, tx_buf: IxgbeNetBuf) -> IxgbeResult {
393 let queue = self
394 .tx_queues
395 .get_mut(queue_id as usize)
396 .ok_or(IxgbeError::InvalidQueue)?;
397
398 if !queue.can_send() {
399 warn!("Queue {queue_id} is full");
400 return Err(IxgbeError::QueueFull);
401 }
402
403 let cur_index = queue.tx_index;
404
405 let packet = tx_buf.packet;
406
407 trace!(
408 "[ixgbe-driver] SEND PACKET: {}",
409 PrettyPrinter::<EthernetFrame<&[u8]>>::new("", &packet.as_bytes())
410 );
411
412 if queue.pool.is_some() {
413 if !Arc::ptr_eq(queue.pool.as_ref().unwrap(), &packet.pool) {
414 queue.pool = Some(packet.pool.clone());
415 }
416 } else {
417 queue.pool = Some(packet.pool.clone());
418 }
419
420 assert!(
421 Arc::ptr_eq(queue.pool.as_ref().unwrap(), &packet.pool),
422 "Distince memory pools for a single tx queue are not supported yet."
423 );
424
425 queue.tx_index = wrap_ring(queue.tx_index, queue.num_descriptors);
426
427 trace!(
428 "TX phys_addr: {:#x}, virt_addr: {:#x}",
429 packet.get_phys_addr() as u64,
430 packet.get_virt_addr() as u64
431 );
432
433 let desc = unsafe { queue.descriptors[cur_index].as_mut() };
435 desc.send(packet.get_phys_addr() as u64, packet.len() as u16);
436
437 trace!(
438 "packet phys addr: {:#x}, len: {}",
439 packet.get_phys_addr(),
440 packet.len()
441 );
442
443 queue.bufs_in_use.push_back(packet.pool_entry);
444 mem::forget(packet);
445
446 self.set_reg32(
447 IXGBE_TDT(u32::from(queue_id)),
448 self.tx_queues[queue_id as usize].tx_index as u32,
449 );
450
451 debug!("[Ixgbe::send] SEND PACKET COMPLETE");
452 Ok(())
453 }
454
455 fn can_receive(&self, queue_id: u16) -> IxgbeResult<bool> {
457 let queue = self
458 .rx_queues
459 .get(queue_id as usize)
460 .ok_or(IxgbeError::InvalidQueue)?;
461 Ok(queue.can_recv())
462 }
463
464 fn can_send(&self, queue_id: u16) -> IxgbeResult<bool> {
466 let queue = self
467 .tx_queues
468 .get(queue_id as usize)
469 .ok_or(IxgbeError::InvalidQueue)?;
470 Ok(queue.can_send())
471 }
472}
473
474impl<H: IxgbeHal, const QS: usize> IxgbeDevice<H, QS> {
475 pub fn init(
506 base: usize,
507 len: usize,
508 num_rx_queues: u16,
509 num_tx_queues: u16,
510 pool: &Arc<MemPool>,
511 ) -> IxgbeResult<Self> {
512 info!(
513 "Initializing ixgbe device@base: {base:#x}, len: {len:#x}, num_rx_queues: {num_rx_queues}, num_tx_queues: {num_tx_queues}"
514 );
515 let rx_queues = Vec::with_capacity(num_rx_queues as usize);
517 let tx_queues = Vec::with_capacity(num_tx_queues as usize);
518
519 #[cfg(feature = "irq")]
520 let mut interrupts = Interrupts::default();
521 #[cfg(feature = "irq")]
522 {
523 interrupts.interrupts_enabled = true;
524 interrupts.itr_rate = 0x028;
525 }
526 #[cfg(not(feature = "irq"))]
527 let interrupts = Interrupts::default();
528
529 let mut dev = IxgbeDevice {
530 addr: base as *mut u8,
531 len,
532 num_rx_queues,
533 num_tx_queues,
534 rx_queues,
535 tx_queues,
536 interrupts,
537 _marker: PhantomData,
538 };
539
540 #[cfg(feature = "irq")]
541 {
542 for queue_id in 0..num_rx_queues {
543 dev.enable_msix_interrupt(queue_id);
544 }
545 }
546
547 dev.reset_and_init(pool)?;
548 Ok(dev)
549 }
550
551 pub fn num_rx_queues(&self) -> u16 {
553 self.num_rx_queues
554 }
555
556 pub fn num_tx_queues(&self) -> u16 {
558 self.num_tx_queues
559 }
560
561 #[cfg(feature = "irq")]
562 #[cfg(feature = "irq")]
571 pub fn enable_msi_interrupt(&self, queue_id: u16) {
572 self.set_ivar(0, queue_id, 0);
575
576 self.set_reg32(IXGBE_EIAC, 0x0000_0000);
583
584 self.set_reg32(IXGBE_EITR(u32::from(queue_id)), self.interrupts.itr_rate);
589
590 self.clear_interrupts();
592
593 let mut mask: u32 = self.get_reg32(IXGBE_EIMS);
595 mask |= 1 << queue_id;
596 self.set_reg32(IXGBE_EIMS, mask);
597 debug!("Using MSI interrupts");
598 }
599
600 #[cfg(feature = "irq")]
601 #[cfg(feature = "irq")]
610 pub fn enable_msix_interrupt(&self, queue_id: u16) {
611 let mut gpie: u32 = self.get_reg32(IXGBE_GPIE);
614 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_EIAME;
615 self.set_reg32(IXGBE_GPIE, gpie);
616
617 self.set_ivar(0, queue_id, u32::from(queue_id));
619
620 self.set_reg32(IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
628
629 self.set_reg32(IXGBE_EITR(u32::from(queue_id)), self.interrupts.itr_rate);
650
651 let mut mask: u32 = self.get_reg32(IXGBE_EIMS);
653 mask |= 1 << queue_id;
654 self.set_reg32(IXGBE_EIMS, mask);
655 debug!("Using MSIX interrupts");
656 }
657}
658
659impl<H: IxgbeHal, const QS: usize> IxgbeDevice<H, QS> {
661 fn reset_and_init(&mut self, pool: &Arc<MemPool>) -> IxgbeResult {
663 info!("resetting device ixgbe device");
664 self.disable_interrupts();
666
667 self.set_reg32(IXGBE_CTRL, IXGBE_CTRL_RST_MASK);
669 self.wait_clear_reg32(IXGBE_CTRL, IXGBE_CTRL_RST_MASK);
670 let _ = H::wait_until(Duration::from_millis(1000));
672
673 self.disable_interrupts();
675
676 let mac = self.get_mac_addr();
677 info!(
678 "mac address: {:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
679 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]
680 );
681
682 self.wait_set_reg32(IXGBE_EEC, IXGBE_EEC_ARD);
684
685 self.wait_set_reg32(IXGBE_RDRXCTL, IXGBE_RDRXCTL_DMAIDONE);
687
688 self.init_link();
693
694 self.reset_stats();
697
698 self.init_rx(pool)?;
700
701 self.init_tx()?;
703
704 for i in 0..self.num_rx_queues {
705 self.start_rx_queue(i)?;
706 }
707
708 for i in 0..self.num_tx_queues {
709 self.start_tx_queue(i)?;
710 }
711
712 self.set_promisc(true);
714
715 self.wait_for_link();
717
718 info!("Success to initialize and reset Intel 10G NIC regs.");
719
720 Ok(())
721 }
722
723 #[allow(clippy::needless_range_loop)]
726 fn init_rx(&mut self, pool: &Arc<MemPool>) -> IxgbeResult {
727 self.clear_flags32(IXGBE_RXCTRL, IXGBE_RXCTRL_RXEN);
729
730 self.set_reg32(IXGBE_RXPBSIZE(0), IXGBE_RXPBSIZE_128KB);
732 for i in 1..8 {
733 self.set_reg32(IXGBE_RXPBSIZE(i), 0);
734 }
735
736 self.set_flags32(IXGBE_HLREG0, IXGBE_HLREG0_RXCRCSTRP);
738 self.set_flags32(IXGBE_RDRXCTL, IXGBE_RDRXCTL_CRCSTRIP);
739
740 self.set_flags32(IXGBE_FCTRL, IXGBE_FCTRL_BAM);
742
743 for i in 0..self.num_rx_queues {
745 info!("initializing rx queue {i}");
746 self.set_reg32(
748 IXGBE_SRRCTL(u32::from(i)),
749 (self.get_reg32(IXGBE_SRRCTL(u32::from(i))) & !IXGBE_SRRCTL_DESCTYPE_MASK)
750 | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF,
751 );
752 self.set_flags32(IXGBE_SRRCTL(u32::from(i)), IXGBE_SRRCTL_DROP_EN);
754
755 assert_eq!(mem::size_of::<AdvancedTxDescriptor>(), 16);
756 let ring_size_bytes = QS * mem::size_of::<AdvancedRxDescriptor>();
758 let dma: Dma<AdvancedRxDescriptor, H> = Dma::allocate(ring_size_bytes, true)?;
759
760 let mut descriptors: [NonNull<AdvancedRxDescriptor>; QS] = [NonNull::dangling(); QS];
762
763 unsafe {
764 for desc_id in 0..QS {
765 descriptors[desc_id] = NonNull::new(dma.virt.add(desc_id)).unwrap();
766 descriptors[desc_id].as_mut().init();
767 }
768 }
769
770 self.set_reg32(
771 IXGBE_RDBAL(u32::from(i)),
772 (dma.phys as u64 & 0xffff_ffff) as u32,
773 );
774 self.set_reg32(IXGBE_RDBAH(u32::from(i)), (dma.phys as u64 >> 32) as u32);
775 self.set_reg32(IXGBE_RDLEN(u32::from(i)), ring_size_bytes as u32);
776
777 info!("rx ring {} phys addr: {:#x}", i, dma.phys);
778 info!("rx ring {} virt addr: {:p}", i, dma.virt);
779
780 self.set_reg32(IXGBE_RDH(u32::from(i)), 0);
782 self.set_reg32(IXGBE_RDT(u32::from(i)), 0);
783
784 let rx_queue = IxgbeRxQueue {
785 descriptors: Box::new(descriptors),
786 pool: Arc::clone(pool),
787 num_descriptors: QS,
788 rx_index: 0,
789 bufs_in_use: Vec::with_capacity(QS),
790 };
791
792 self.rx_queues.push(rx_queue);
793 }
794
795 self.set_flags32(IXGBE_CTRL_EXT, IXGBE_CTRL_EXT_NS_DIS);
797
798 for i in 0..self.num_rx_queues {
800 self.clear_flags32(IXGBE_DCA_RXCTRL(u32::from(i)), 1 << 12);
801 }
802
803 self.set_flags32(IXGBE_RXCTRL, IXGBE_RXCTRL_RXEN);
805
806 Ok(())
807 }
808
809 #[allow(clippy::needless_range_loop)]
812 fn init_tx(&mut self) -> IxgbeResult {
813 self.set_flags32(IXGBE_HLREG0, IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
815
816 self.set_reg32(IXGBE_TXPBSIZE(0), IXGBE_TXPBSIZE_40KB);
818 for i in 1..8 {
819 self.set_reg32(IXGBE_TXPBSIZE(i), 0xff);
820 }
821
822 self.set_reg32(IXGBE_DTXMXSZRQ, 0xffff);
824 self.clear_flags32(IXGBE_RTTDCS, IXGBE_RTTDCS_ARBDIS);
825
826 for i in 0..self.num_tx_queues {
828 info!("initializing tx queue {i}");
829 assert_eq!(mem::size_of::<AdvancedTxDescriptor>(), 16);
831 let ring_size_bytes = QS * mem::size_of::<AdvancedTxDescriptor>();
832
833 let dma: Dma<AdvancedTxDescriptor, H> = Dma::allocate(ring_size_bytes, true)?;
834
835 let mut descriptors: [NonNull<AdvancedTxDescriptor>; QS] = [NonNull::dangling(); QS];
836
837 unsafe {
838 for desc_id in 0..QS {
839 descriptors[desc_id] = NonNull::new(dma.virt.add(desc_id)).unwrap();
840 descriptors[desc_id].as_mut().init();
841 }
842 }
843
844 self.set_reg32(
845 IXGBE_TDBAL(u32::from(i)),
846 (dma.phys as u64 & 0xffff_ffff) as u32,
847 );
848 self.set_reg32(IXGBE_TDBAH(u32::from(i)), (dma.phys as u64 >> 32) as u32);
849 self.set_reg32(IXGBE_TDLEN(u32::from(i)), ring_size_bytes as u32);
850
851 trace!("tx ring {} phys addr: {:#x}", i, dma.phys);
852 trace!("tx ring {} virt addr: {:p}", i, dma.virt);
853
854 let mut txdctl = self.get_reg32(IXGBE_TXDCTL(u32::from(i)));
858 txdctl &= !(0x7F | (0x7F << 8) | (0x7F << 16));
861 txdctl |= 36 | (8 << 8) | (4 << 16);
862
863 self.set_reg32(IXGBE_TXDCTL(u32::from(i)), txdctl);
864
865 let tx_queue = IxgbeTxQueue {
866 descriptors: Box::new(descriptors),
867 bufs_in_use: VecDeque::with_capacity(QS),
868 pool: None,
869 num_descriptors: QS,
870 clean_index: 0,
871 tx_index: 0,
872 };
873
874 self.tx_queues.push(tx_queue);
875 }
876
877 self.set_reg32(IXGBE_DMATXCTL, IXGBE_DMATXCTL_TE);
879
880 Ok(())
881 }
882
883 fn start_rx_queue(&mut self, queue_id: u16) -> IxgbeResult {
885 debug!("starting rx queue {queue_id}");
886
887 let queue = &mut self.rx_queues[queue_id as usize];
888
889 if queue.num_descriptors & (queue.num_descriptors - 1) != 0 {
890 return Err(IxgbeError::QueueNotAligned);
892 }
893
894 for i in 0..queue.num_descriptors {
895 let pool = &queue.pool;
896
897 let id = match pool.alloc_buf() {
898 Some(x) => x,
899 None => return Err(IxgbeError::NoMemory),
900 };
901
902 unsafe {
903 let desc = queue.descriptors[i].as_mut();
904 desc.set_packet_address(pool.get_phys_addr(id) as u64);
905 desc.reset_status();
906 }
907
908 queue.bufs_in_use.push(id);
910 }
911
912 let queue = &self.rx_queues[queue_id as usize];
913
914 self.set_flags32(IXGBE_RXDCTL(u32::from(queue_id)), IXGBE_RXDCTL_ENABLE);
916 self.wait_set_reg32(IXGBE_RXDCTL(u32::from(queue_id)), IXGBE_RXDCTL_ENABLE);
917
918 self.set_reg32(IXGBE_RDH(u32::from(queue_id)), 0);
920
921 self.set_reg32(
923 IXGBE_RDT(u32::from(queue_id)),
924 (queue.num_descriptors - 1) as u32,
925 );
926
927 Ok(())
928 }
929
930 fn start_tx_queue(&mut self, queue_id: u16) -> IxgbeResult {
932 debug!("starting tx queue {queue_id}");
933
934 let queue = &mut self.tx_queues[queue_id as usize];
935
936 if queue.num_descriptors & (queue.num_descriptors - 1) != 0 {
937 return Err(IxgbeError::QueueNotAligned);
938 }
939
940 self.set_reg32(IXGBE_TDH(u32::from(queue_id)), 0);
942 self.set_reg32(IXGBE_TDT(u32::from(queue_id)), 0);
943
944 self.set_flags32(IXGBE_TXDCTL(u32::from(queue_id)), IXGBE_TXDCTL_ENABLE);
946 self.wait_set_reg32(IXGBE_TXDCTL(u32::from(queue_id)), IXGBE_TXDCTL_ENABLE);
947
948 Ok(())
949 }
950
951 fn init_link(&self) {
954 self.set_reg32(
956 IXGBE_AUTOC,
957 (self.get_reg32(IXGBE_AUTOC) & !IXGBE_AUTOC_LMS_MASK) | IXGBE_AUTOC_LMS_10G_SERIAL,
958 );
959 self.set_reg32(
960 IXGBE_AUTOC,
961 (self.get_reg32(IXGBE_AUTOC) & !IXGBE_AUTOC_10G_PMA_PMD_MASK) | IXGBE_AUTOC_10G_XAUI,
962 );
963 self.set_flags32(IXGBE_AUTOC, IXGBE_AUTOC_AN_RESTART);
965 }
967
968 fn disable_interrupts(&self) {
970 self.set_reg32(IXGBE_EIMS, 0x0000_0000);
972 self.clear_interrupts();
973 }
974
975 fn disable_interrupt(&self, queue_id: u16) {
977 let mut mask: u32 = self.get_reg32(IXGBE_EIMS);
979 mask &= !(1 << queue_id);
980 self.set_reg32(IXGBE_EIMS, mask);
981 self.clear_interrupt(queue_id);
982 debug!("Using polling");
983 }
984
985 fn clear_interrupt(&self, queue_id: u16) {
987 self.set_reg32(IXGBE_EIMC, 1 << queue_id);
989 self.get_reg32(IXGBE_EICR);
990 }
991
992 fn clear_interrupts(&self) {
994 self.set_reg32(IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
996 self.get_reg32(IXGBE_EICR);
997 }
998
999 fn wait_for_link(&self) {
1001 #[cfg(target_arch = "x86_64")]
1002 {
1003 info!("waiting for link");
1004 let _ = H::wait_until(Duration::from_secs(10));
1005 let mut speed = self.get_link_speed();
1006 while speed == 0 {
1007 let _ = H::wait_until(Duration::from_millis(100));
1008 speed = self.get_link_speed();
1009 }
1010 info!("link speed is {} Mbit/s", self.get_link_speed());
1011 }
1012 }
1013
1014 fn set_promisc(&self, enabled: bool) {
1016 if enabled {
1017 info!("enabling promisc mode");
1018 self.set_flags32(IXGBE_FCTRL, IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE);
1019 } else {
1020 info!("disabling promisc mode");
1021 self.clear_flags32(IXGBE_FCTRL, IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE);
1022 }
1023 }
1024
1025 fn get_reg32(&self, reg: u32) -> u32 {
1031 assert!(reg as usize <= self.len - 4, "memory access out of bounds");
1032
1033 unsafe { ptr::read_volatile((self.addr as usize + reg as usize) as *mut u32) }
1034 }
1035
1036 fn set_reg32(&self, reg: u32, value: u32) {
1042 assert!(reg as usize <= self.len - 4, "memory access out of bounds");
1043
1044 unsafe {
1045 ptr::write_volatile((self.addr as usize + reg as usize) as *mut u32, value);
1046 }
1047 }
1048
1049 fn set_flags32(&self, reg: u32, flags: u32) {
1051 self.set_reg32(reg, self.get_reg32(reg) | flags);
1052 }
1053
1054 fn clear_flags32(&self, reg: u32, flags: u32) {
1056 self.set_reg32(reg, self.get_reg32(reg) & !flags);
1057 }
1058
1059 fn wait_clear_reg32(&self, reg: u32, value: u32) {
1061 loop {
1062 let current = self.get_reg32(reg);
1063 if (current & value) == 0 {
1064 break;
1065 }
1066 let _ = H::wait_until(Duration::from_millis(100));
1069 }
1070 }
1071
1072 fn wait_set_reg32(&self, reg: u32, value: u32) {
1074 loop {
1075 let current = self.get_reg32(reg);
1076 if (current & value) == value {
1077 break;
1078 }
1079 let _ = H::wait_until(Duration::from_millis(100));
1080 }
1081 }
1082
1083 fn set_ivar(&self, direction: u32, queue: u16, mut msix_vector: u32) {
1086 let mut ivar: u32;
1087 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
1089 let index = 16 * (u32::from(queue) & 1) + 8 * direction;
1090 ivar = self.get_reg32(IXGBE_IVAR(u32::from(queue) >> 1));
1091 ivar &= !(0xFF << index);
1092 ivar |= msix_vector << index;
1093 self.set_reg32(IXGBE_IVAR(u32::from(queue) >> 1), ivar);
1094 }
1095}
1096
1097unsafe impl<H: IxgbeHal, const QS: usize> Sync for IxgbeDevice<H, QS> {}
1098unsafe impl<H: IxgbeHal, const QS: usize> Send for IxgbeDevice<H, QS> {}