zynq7000_hal/uart/
tx_async.rs1use core::{cell::RefCell, convert::Infallible, sync::atomic::AtomicBool};
2
3use critical_section::Mutex;
4use embassy_sync::waitqueue::AtomicWaker;
5use raw_slice::RawBufSlice;
6
7use crate::uart::{FIFO_DEPTH, Tx, UartId};
8
9#[derive(Debug)]
10pub enum TransferType {
11 Read,
12 Write,
13 Transfer,
14 TransferInPlace,
15}
16
17static UART_TX_WAKERS: [AtomicWaker; 2] = [const { AtomicWaker::new() }; 2];
18static TX_CONTEXTS: [Mutex<RefCell<TxContext>>; 2] =
19 [const { Mutex::new(RefCell::new(TxContext::new())) }; 2];
20static TX_DONE: [AtomicBool; 2] = [const { AtomicBool::new(false) }; 2];
23
24pub fn on_interrupt_tx(peripheral: UartId) {
30 let mut tx_with_irq = unsafe { Tx::steal(peripheral) };
31 let idx = peripheral as usize;
32 let imr = tx_with_irq.regs().read_imr();
33 if !imr.tx_over() && !imr.tx_near_full() && !imr.tx_full() && !imr.tx_empty() && !imr.tx_full()
35 {
36 return;
37 }
38
39 let isr = tx_with_irq.regs().read_isr();
40 let unexpected_overrun = isr.tx_over();
41 let mut context = critical_section::with(|cs| {
42 let context_ref = TX_CONTEXTS[idx].borrow(cs);
43 *context_ref.borrow()
44 });
45 if context.slice.is_null() {
47 return;
48 }
49 let slice_len = context.slice.len().unwrap();
50 context.tx_overrun = unexpected_overrun;
51 if (context.progress >= slice_len && isr.tx_empty()) || slice_len == 0 {
52 critical_section::with(|cs| {
54 let context_ref = TX_CONTEXTS[idx].borrow(cs);
55 *context_ref.borrow_mut() = context;
56 });
57 TX_DONE[idx].store(true, core::sync::atomic::Ordering::Relaxed);
59 tx_with_irq.disable_interrupts();
60 tx_with_irq.clear_interrupts();
61 UART_TX_WAKERS[idx].wake();
62 return;
63 }
64 let slice = unsafe { context.slice.get() }.expect("slice is invalid");
67 while context.progress < slice_len {
68 if tx_with_irq.regs().read_sr().tx_full() {
69 break;
70 }
71 tx_with_irq.write_fifo_unchecked(slice[context.progress]);
74 context.progress += 1;
75 }
76
77 critical_section::with(|cs| {
79 let context_ref = TX_CONTEXTS[idx].borrow(cs);
80 *context_ref.borrow_mut() = context;
81 });
82 tx_with_irq.clear_interrupts();
84}
85
86#[derive(Debug, Copy, Clone)]
87pub struct TxContext {
88 progress: usize,
89 tx_overrun: bool,
90 slice: RawBufSlice,
91}
92
93#[allow(clippy::new_without_default)]
94impl TxContext {
95 pub const fn new() -> Self {
96 Self {
97 progress: 0,
98 tx_overrun: false,
99 slice: RawBufSlice::new_nulled(),
100 }
101 }
102}
103
104pub struct TxFuture {
105 id: UartId,
106}
107
108impl TxFuture {
109 pub unsafe fn new(tx_with_irq: &mut Tx, data: &[u8]) -> Self {
114 let idx = tx_with_irq.uart_idx() as usize;
115 TX_DONE[idx].store(false, core::sync::atomic::Ordering::Relaxed);
116 tx_with_irq.disable_interrupts();
117 tx_with_irq.disable();
118
119 let init_fill_count = core::cmp::min(data.len(), FIFO_DEPTH);
120 critical_section::with(|cs| {
121 let context_ref = TX_CONTEXTS[idx].borrow(cs);
122 let mut context = context_ref.borrow_mut();
123 unsafe {
124 context.slice.set(data);
125 }
126 context.progress = init_fill_count; });
128 tx_with_irq.enable(true);
129 for data in data.iter().take(init_fill_count) {
130 tx_with_irq.write_fifo_unchecked(*data);
131 }
132 tx_with_irq.enable_interrupts();
133
134 Self {
135 id: tx_with_irq.uart_idx(),
136 }
137 }
138}
139
140impl Future for TxFuture {
141 type Output = usize;
142
143 fn poll(
144 self: core::pin::Pin<&mut Self>,
145 cx: &mut core::task::Context<'_>,
146 ) -> core::task::Poll<Self::Output> {
147 UART_TX_WAKERS[self.id as usize].register(cx.waker());
148 if TX_DONE[self.id as usize].swap(false, core::sync::atomic::Ordering::Relaxed) {
149 let progress = critical_section::with(|cs| {
150 let mut ctx = TX_CONTEXTS[self.id as usize].borrow(cs).borrow_mut();
151 ctx.slice.set_null();
152 ctx.progress
153 });
154 return core::task::Poll::Ready(progress);
155 }
156 core::task::Poll::Pending
157 }
158}
159
160impl Drop for TxFuture {
161 fn drop(&mut self) {
162 let mut tx = unsafe { Tx::steal(self.id) };
163 tx.disable_interrupts();
164 }
165}
166
167pub struct TxAsync {
168 tx: Tx,
169}
170
171impl TxAsync {
172 pub fn new(tx: Tx) -> Self {
173 Self { tx }
174 }
175
176 pub async fn write(&mut self, buf: &[u8]) -> usize {
181 if buf.is_empty() {
182 return 0;
183 }
184 let fut = unsafe { TxFuture::new(&mut self.tx, buf) };
185 fut.await
186 }
187
188 pub fn release(self) -> Tx {
189 self.tx
190 }
191}
192
193impl embedded_io::ErrorType for TxAsync {
194 type Error = Infallible;
195}
196
197impl embedded_io_async::Write for TxAsync {
198 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
203 Ok(self.write(buf).await)
204 }
205
206 async fn flush(&mut self) -> Result<(), Self::Error> {
208 Ok(())
209 }
210}