1use core::ops::{Deref, DerefMut};
2
3use super::*;
4use crate::{
5 DError,
6 descriptor::{AdvRxDesc, AdvRxDescRead},
7};
8use alloc::sync::Arc;
9use log::{error, trace};
10
11struct RingInner {
12 base: Ring<AdvRxDesc>,
13}
14
15impl RingInner {
16 fn new(ring: Ring<AdvRxDesc>) -> Result<Self, DError> {
17 Ok(Self { base: ring })
18 }
19
20 fn init(&mut self) -> Result<(), DError> {
21 let bus_addr = self.bus_addr();
22 let size_bytes = self.size_bytes();
23
24 self.reg_write(RDBAL, (bus_addr & 0xFFFFFFFF) as u32);
26 self.reg_write(RDBAH, (bus_addr >> 32) as u32);
27
28 self.reg_write(RDLEN, size_bytes as u32);
30
31 let pkt_size_kb = self.pkt_size / 1024;
32
33 self.reg_write(
35 SRRCTL,
36 (SRRCTL::DESCTYPE::AdvancedOneBuffer + SRRCTL::BSIZEPACKET.val(pkt_size_kb as _)).value,
37 );
38
39 self.reg_write(RDH, 0);
44 self.reg_write(RDT, 0);
45
46 self.enable_queue();
50
51 wait_for(
55 || self.reg_read(RXDCTL) & RXDCTL::ENABLE::Enabled.value > 0,
56 Duration::from_millis(1),
57 Some(1000),
58 )?;
59
60 Ok(())
67 }
68
69 pub fn enable_queue(&mut self) {
70 self.reg_write(
72 RXDCTL,
73 (RXDCTL::PTHRESH.val(8)
74 + RXDCTL::HTHRESH.val(8)
75 + RXDCTL::WTHRESH.val(1)
76 + RXDCTL::ENABLE::Enabled)
77 .value,
78 );
79 }
80
81 pub fn disable_queue(&mut self) {
82 self.reg_write(
84 RXDCTL,
85 (RXDCTL::PTHRESH.val(8)
86 + RXDCTL::HTHRESH.val(8)
87 + RXDCTL::WTHRESH.val(1)
88 + RXDCTL::ENABLE::Disabled)
89 .value,
90 );
91 }
92
93 pub fn get_head(&self) -> u32 {
108 self.reg_read(RDH)
109 }
110
111 pub fn get_tail(&self) -> u32 {
113 self.reg_read(RDT)
114 }
115
116 pub fn update_tail(&mut self, mut tail: usize) {
118 if tail == self.descriptors.len() {
119 tail = 0;
120 }
121 self.reg_write(RDT, tail as u32);
122 }
123}
124impl Deref for RingInner {
125 type Target = super::Ring<AdvRxDesc>;
126
127 fn deref(&self) -> &Self::Target {
128 &self.base
129 }
130}
131
132impl DerefMut for RingInner {
133 fn deref_mut(&mut self) -> &mut Self::Target {
134 &mut self.base
135 }
136}
137
138pub struct RxRing(Arc<UnsafeCell<RingInner>>);
139
140unsafe impl Send for RxRing {}
141
142impl RxRing {
143 #[allow(clippy::arc_with_non_send_sync)]
144 pub(crate) fn new(idx: usize, mmio_base: NonNull<u8>, size: usize) -> Result<Self, DError> {
145 let base = Ring::new(
146 idx,
147 mmio_base,
148 size,
149 PACKET_SIZE as usize,
150 )?;
151 let mut ring_inner = RingInner::new(base)?;
152 ring_inner.init()?;
153 let ring = Arc::new(UnsafeCell::new(ring_inner));
154 Ok(Self(ring))
155 }
156
157 fn this(&self) -> &RingInner {
158 unsafe { &*self.0.get() }
159 }
160 fn this_mut(&mut self) -> &mut RingInner {
161 unsafe { &mut *self.0.get() }
162 }
163
164 pub fn packet_size(&self) -> usize {
165 self.this().pkt_size
166 }
167
168 pub fn next_pkt(&mut self) -> Option<RxPacket<'_>> {
169 let index = self.next_index();
170 let head = self.this().get_head() as usize;
171 if head == index {
172 return None; }
174 let len;
175 unsafe {
176 let desc = &self.this().descriptors[index];
177 if !desc.write.is_done() {
179 trace!("RxRing: next_pkt descriptor not done at index: {index}");
180 return None; }
182 len = desc.write.packet_length() as usize;
183 }
184
185 trace!("RxRing: next_pkt index: {index}");
186 let request = self.this_mut().meta_ls[index]
187 .request
188 .take()
189 .expect("Request should be set");
190
191 Some(RxPacket {
192 ring: self,
193 request,
194 len,
195 })
196 }
197
198 pub fn submit(&mut self, request: Request) -> Result<(), DError> {
199 let index = self.this_mut().get_tail() as usize;
200 let ring = self.this_mut();
201 if index + 1 == ring.get_head() as usize {
202 error!("RxRing: submit no available buffer at index: {index}");
203 return Err(DError::NoMemory); }
205
206 let desc = AdvRxDesc {
208 read: AdvRxDescRead::new(request.bus_addr(), 0, false),
209 };
210 ring.descriptors.set(index, desc);
211 ring.meta_ls[index].request = Some(request);
212
213 ring.update_tail(index + 1);
215
216 Ok(())
217 }
218
219 fn next_index(&self) -> usize {
220 let ring = self.this();
221 (ring.get_tail() as usize + 1) % ring.count()
222 }
223
224 pub fn request_max_count(&self) -> usize {
225 self.this().count() - 1
226 }
227}
228
229impl Drop for RxRing {
230 fn drop(&mut self) {
231 self.this_mut().disable_queue();
233 }
234}
235
236pub struct RxPacket<'a> {
237 pub request: Request,
238 ring: &'a mut RxRing,
239 len: usize,
240}
241
242impl<'a> RxPacket<'a> {
243 pub fn re_submit(self) -> Result<(), DError> {
244 self.ring.submit(self.request)
245 }
246}
247
248impl Deref for RxPacket<'_> {
249 type Target = [u8];
250
251 fn deref(&self) -> &Self::Target {
252 &self.request.deref()[..self.len]
253 }
254}