eth_igb/ring/
tx.rs

1use core::ops::{Deref, DerefMut};
2
3use alloc::sync::Arc;
4use log::trace;
5
6use crate::descriptor::{TxAdvDescCmd, TxAdvDescType};
7
8use super::*;
9struct RingInner {
10    base: Ring<AdvTxDesc>,
11    finished: usize,
12}
13
14impl Deref for RingInner {
15    type Target = super::Ring<AdvTxDesc>;
16
17    fn deref(&self) -> &Self::Target {
18        &self.base
19    }
20}
21
22impl DerefMut for RingInner {
23    fn deref_mut(&mut self) -> &mut Self::Target {
24        &mut self.base
25    }
26}
27
28impl RingInner {
29    fn new(base: Ring<AdvTxDesc>) -> Self {
30        Self { base, finished: 0 }
31    }
32
33    pub fn init(&mut self) -> Result<(), DError> {
34        debug!("init tx");
35        // Step 1: Allocate a region of memory for the transmit descriptor list
36        // (Already done in Ring::new())
37        let bus_addr = self.base.bus_addr();
38
39        // Step 2: Program the descriptor base address with the address of the region
40        self.reg_write(TDBAL, (bus_addr & 0xFFFFFFFF) as u32);
41        self.reg_write(TDBAH, (bus_addr >> 32) as u32);
42
43        // Step 3: Set the length register to the size of the descriptor ring
44        let size_bytes = self.base.size_bytes();
45        self.reg_write(TDLEN, size_bytes as u32);
46
47        // Step 4: Program the TXDCTL register with the desired TX descriptor write back policy
48        // Suggested values: WTHRESH = 1, all other fields 0
49        self.reg_write(TXDCTL, TXDCTL::WTHRESH.val(1).value);
50
51        self.reg_write(TDH, 0);
52        self.reg_write(TDT, 0);
53
54        // Step 5: If needed, set the TDWBAL/TWDBAH to enable head write back
55        // (Not implemented in this basic version)
56
57        // Step 6: Enable the queue using TXDCTL.ENABLE (queue zero is enabled by default)
58        self.reg_write(
59            TXDCTL,
60            (TXDCTL::WTHRESH.val(1) + TXDCTL::ENABLE::Enabled).value,
61        );
62
63        // Step 7: Poll the TXDCTL register until the ENABLE bit is set
64        wait_for(
65            || self.reg_read(TXDCTL) & TXDCTL::ENABLE::Enabled.value > 0,
66            Duration::from_millis(1),
67            Some(1000),
68        )?;
69
70        // Note: The tail register of the queue (TDT[n]) should not be bumped until the queue is enabled
71        // Step 8: Enable transmit path by setting TCTL.EN should be done only after all other settings are done
72        // This is handled by the MAC layer through mac.enable_tx()
73        debug!("TX ring initialized successfully");
74        Ok(())
75    }
76
77    /// 获取当前头部指针值
78    pub fn get_tx_head(&self) -> u32 {
79        self.reg_read(TDH)
80    }
81
82    /// 获取当前尾部指针值
83    pub fn get_tx_tail(&self) -> u32 {
84        self.reg_read(TDT)
85    }
86
87    /// 发送单个数据包
88    pub fn send_packet(&mut self, request: Request) -> Result<(), DError> {
89        if request.buff.len() > PACKET_SIZE as usize {
90            return Err(DError::InvalidParameter);
91        }
92        trace!("send {}", request.buff.len());
93        request.buff.confirm_write_all();
94        let tail = self.get_tx_tail() as usize;
95        let next_tail = (tail + 1) % self.count();
96        let head = self.get_tx_head() as usize;
97
98        // 检查是否有空间
99        if next_tail == head {
100            return Err(DError::NoMemory); // 环形缓冲区已满
101        }
102
103        // 设置描述符
104        let desc = AdvTxDesc::new(
105            request.bus_addr(),
106            request.buff.len(),
107            TxAdvDescType::Data,
108            &[
109                TxAdvDescCmd::EOP,
110                TxAdvDescCmd::RS,
111                TxAdvDescCmd::IFCS,
112                TxAdvDescCmd::DEXT,
113            ],
114        );
115
116        self.descriptors.set(tail, desc);
117        self.meta_ls[tail].request = Some(request);
118
119        // 内存屏障确保描述符写入完成
120        mb();
121
122        // 更新尾部指针
123        self.reg_write(TDT, next_tail as u32);
124
125        Ok(())
126    }
127
128    fn next_finished(&mut self) -> Option<Request> {
129        let head = self.get_tx_head() as usize;
130        if self.finished == head {
131            return None; // 没有新的完成描述符
132        }
133        let index = self.finished;
134
135        trace!("next_finished index: {index}");
136
137        // 检查描述符是否已完成
138        unsafe {
139            let desc = &self.descriptors[index];
140            if !desc.write.is_done() {
141                trace!("TxRing: next_finished descriptor not done at index: {index}");
142                return None; // 描述符未完成,无法获取数据
143            }
144        }
145        let request = self.meta_ls[index]
146            .request
147            .take()
148            .expect("Request should be set");
149
150        self.finished = (self.finished + 1) % self.count();
151        Some(request)
152    }
153}
154
155pub struct TxRing(Arc<UnsafeCell<RingInner>>);
156
157unsafe impl Send for TxRing {}
158
159impl TxRing {
160    #[allow(clippy::arc_with_non_send_sync)]
161    pub(crate) fn new(idx: usize, mmio_base: NonNull<u8>, size: usize) -> Result<Self, DError> {
162        let mut ring_inner = RingInner::new(Ring::new(idx, mmio_base, size, PACKET_SIZE as usize)?);
163
164        ring_inner.init()?;
165        let ring = Arc::new(UnsafeCell::new(ring_inner));
166        Ok(Self(ring))
167    }
168
169    fn this(&self) -> &RingInner {
170        unsafe { &*self.0.get() }
171    }
172
173    fn this_mut(&mut self) -> &mut RingInner {
174        unsafe { &mut *self.0.get() }
175    }
176
177    pub fn send(&mut self, request: Request) -> Result<(), DError> {
178        self.this_mut().send_packet(request)
179    }
180
181    pub fn request_max_count(&self) -> usize {
182        self.this().count() - 1
183    }
184
185    pub fn is_queue_full(&self) -> bool {
186        let head = self.this().get_tx_head() as usize;
187        let tail = self.this().get_tx_tail() as usize;
188        (tail + 1) % self.this().count() == head
189    }
190
191    pub fn next_finished(&mut self) -> Option<Request> {
192        self.this_mut().next_finished()
193    }
194}