1use core::{cell::RefCell, convert::Infallible, sync::atomic::AtomicBool};
19
20use critical_section::Mutex;
21use embassy_sync::waitqueue::AtomicWaker;
22use raw_slice::RawBufSlice;
23
24use crate::{FIFO_DEPTH, Tx};
25
26#[cfg(feature = "1-waker")]
28pub const NUM_WAKERS: usize = 1;
29#[cfg(feature = "2-wakers")]
31pub const NUM_WAKERS: usize = 2;
32#[cfg(feature = "4-wakers")]
34pub const NUM_WAKERS: usize = 4;
35#[cfg(feature = "8-wakers")]
37pub const NUM_WAKERS: usize = 8;
38#[cfg(feature = "16-wakers")]
40pub const NUM_WAKERS: usize = 16;
41#[cfg(feature = "32-wakers")]
43pub const NUM_WAKERS: usize = 32;
44static UART_TX_WAKERS: [AtomicWaker; NUM_WAKERS] = [const { AtomicWaker::new() }; NUM_WAKERS];
45static TX_CONTEXTS: [Mutex<RefCell<TxContext>>; NUM_WAKERS] =
46 [const { Mutex::new(RefCell::new(TxContext::new())) }; NUM_WAKERS];
47static TX_DONE: [AtomicBool; NUM_WAKERS] = [const { AtomicBool::new(false) }; NUM_WAKERS];
50
51#[derive(Debug, thiserror::Error)]
53#[error("invalid waker slot index: {0}")]
54pub struct InvalidWakerIndex(pub usize);
55
56pub fn on_interrupt_tx(uartlite_tx: &mut Tx, waker_slot: usize) {
64 if waker_slot >= NUM_WAKERS {
65 return;
66 }
67 let status = uartlite_tx.regs.read_stat_reg();
68 if !status.intr_enabled() {
70 return;
71 }
72 let mut context = critical_section::with(|cs| {
73 let context_ref = TX_CONTEXTS[waker_slot].borrow(cs);
74 *context_ref.borrow()
75 });
76 if context.slice.is_null() {
78 return;
79 }
80 let slice_len = context.slice.len().unwrap();
81 if (context.progress >= slice_len && status.tx_fifo_empty()) || slice_len == 0 {
82 critical_section::with(|cs| {
84 let context_ref = TX_CONTEXTS[waker_slot].borrow(cs);
85 *context_ref.borrow_mut() = context;
86 });
87 TX_DONE[waker_slot].store(true, core::sync::atomic::Ordering::Relaxed);
89 UART_TX_WAKERS[waker_slot].wake();
90 return;
91 }
92 let slice = unsafe { context.slice.get() }.expect("slice is invalid");
95 while context.progress < slice_len {
96 if uartlite_tx.regs.read_stat_reg().tx_fifo_full() {
97 break;
98 }
99 uartlite_tx.write_fifo_unchecked(slice[context.progress]);
102 context.progress += 1;
103 }
104 critical_section::with(|cs| {
106 let context_ref = TX_CONTEXTS[waker_slot].borrow(cs);
107 *context_ref.borrow_mut() = context;
108 });
109}
110
111#[derive(Debug, Copy, Clone)]
113pub struct TxContext {
114 progress: usize,
115 slice: RawBufSlice,
116}
117
118#[allow(clippy::new_without_default)]
119impl TxContext {
120 pub const fn new() -> Self {
122 Self {
123 progress: 0,
124 slice: RawBufSlice::new_nulled(),
125 }
126 }
127}
128
129pub struct TxFuture<'tx> {
131 waker_idx: usize,
132 tx: &'tx mut TxAsync,
133}
134
135impl<'tx> TxFuture<'tx> {
136 pub unsafe fn new(
143 tx: &'tx mut TxAsync,
144 waker_idx: usize,
145 data: &[u8],
146 ) -> Result<TxFuture<'tx>, InvalidWakerIndex> {
147 TX_DONE[waker_idx].store(false, core::sync::atomic::Ordering::Relaxed);
148 tx.tx.reset_fifo();
149
150 let init_fill_count = core::cmp::min(data.len(), FIFO_DEPTH);
151 for data in data.iter().take(init_fill_count) {
153 tx.tx.write_fifo_unchecked(*data);
154 }
155 critical_section::with(|cs| {
156 let context_ref = TX_CONTEXTS[waker_idx].borrow(cs);
157 let mut context = context_ref.borrow_mut();
158 unsafe {
159 context.slice.set(data);
160 }
161 context.progress = init_fill_count;
162 });
163 Ok(Self { waker_idx, tx })
164 }
165}
166
167impl Future for TxFuture<'_> {
168 type Output = usize;
169
170 fn poll(
171 self: core::pin::Pin<&mut Self>,
172 cx: &mut core::task::Context<'_>,
173 ) -> core::task::Poll<Self::Output> {
174 UART_TX_WAKERS[self.waker_idx].register(cx.waker());
175 if TX_DONE[self.waker_idx].swap(false, core::sync::atomic::Ordering::Relaxed) {
176 let progress = critical_section::with(|cs| {
177 let mut ctx = TX_CONTEXTS[self.waker_idx].borrow(cs).borrow_mut();
178 ctx.slice.set_null();
179 ctx.progress
180 });
181 return core::task::Poll::Ready(progress);
182 }
183 core::task::Poll::Pending
184 }
185}
186
187impl Drop for TxFuture<'_> {
188 fn drop(&mut self) {
189 if !TX_DONE[self.waker_idx].load(core::sync::atomic::Ordering::Relaxed) {
190 critical_section::with(|cs| {
191 let context_ref = TX_CONTEXTS[self.waker_idx].borrow(cs);
192 let mut context_mut = context_ref.borrow_mut();
193 context_mut.slice.set_null();
194 context_mut.progress = 0;
195 self.tx.tx.reset_fifo();
197 });
198 }
199 }
200}
201
202pub struct TxAsync {
204 pub(crate) tx: Tx,
205 waker_idx: usize,
206}
207
208impl TxAsync {
209 pub fn new(tx: Tx, waker_idx: usize) -> Result<Self, InvalidWakerIndex> {
211 if waker_idx >= NUM_WAKERS {
212 return Err(InvalidWakerIndex(waker_idx));
213 }
214 Ok(Self { tx, waker_idx })
215 }
216
217 pub async fn write(&mut self, buf: &[u8]) -> usize {
222 if buf.is_empty() {
223 return 0;
224 }
225 let fut = unsafe { TxFuture::new(self, self.waker_idx, buf).unwrap() };
226 fut.await
227 }
228
229 pub fn release(self) -> Tx {
231 self.tx
232 }
233}
234
235impl embedded_io::ErrorType for TxAsync {
236 type Error = Infallible;
237}
238
239impl embedded_io_async::Write for TxAsync {
240 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
245 Ok(self.write(buf).await)
246 }
247
248 async fn flush(&mut self) -> Result<(), Self::Error> {
250 Ok(())
251 }
252}