vorago_shared_hal/uart/
tx_asynch.rs

1//! # Async UART transmission functionality.
2//!
3//! This module provides the [TxAsync] struct which implements the [embedded_io_async::Write] trait.
4//! This trait allows for asynchronous sending of data streams. Please note that this module does
5//! not specify/declare the interrupt handlers which must be provided for async support to work.
6//! However, it the [on_interrupt_tx] interrupt handler.
7//!
8//! This handler should be called in ALL user interrupt handlers which handle UART TX interrupts
9//! for a given UART bank.
10use core::{cell::RefCell, future::Future};
11
12use critical_section::Mutex;
13use embassy_sync::waitqueue::AtomicWaker;
14use embedded_io_async::Write;
15use portable_atomic::AtomicBool;
16use raw_slice::RawBufSlice;
17
18use super::*;
19
20static UART_TX_WAKERS: [AtomicWaker; 2] = [const { AtomicWaker::new() }; 2];
21static TX_CONTEXTS: [Mutex<RefCell<TxContext>>; 2] =
22    [const { Mutex::new(RefCell::new(TxContext::new())) }; 2];
23// Completion flag. Kept outside of the context structure as an atomic to avoid
24// critical section.
25static TX_DONE: [AtomicBool; 2] = [const { AtomicBool::new(false) }; 2];
26
27/// This is a generic interrupt handler to handle asynchronous UART TX operations for a given
28/// UART bank.
29///
30/// The user has to call this once in the interrupt handler responsible for the TX interrupts on
31/// the given UART bank.
32pub fn on_interrupt_tx(bank: Bank) {
33    let mut uart = unsafe { bank.steal_regs() };
34    let idx = bank as usize;
35    let irq_enabled = uart.read_irq_enabled();
36    // IRQ is not related to TX.
37    if !irq_enabled.tx() && !irq_enabled.tx_empty() {
38        return;
39    }
40
41    let tx_status = uart.read_tx_status();
42    let unexpected_overrun = tx_status.wr_lost();
43    let mut context = critical_section::with(|cs| {
44        let context_ref = TX_CONTEXTS[idx].borrow(cs);
45        *context_ref.borrow()
46    });
47    context.tx_overrun = unexpected_overrun;
48    // Safety: We documented that the user provided slice must outlive the future, so we convert
49    // the raw pointer back to the slice here.
50    let slice = unsafe { context.slice.get().unwrap() };
51    if context.progress >= slice.len() && !tx_status.tx_busy() {
52        uart.modify_irq_enabled(|mut value| {
53            value.set_tx(false);
54            value.set_tx_empty(false);
55            value.set_tx_status(false);
56            value
57        });
58        uart.modify_enable(|mut value| {
59            value.set_tx(false);
60            value
61        });
62        // Write back updated context structure.
63        critical_section::with(|cs| {
64            let context_ref = TX_CONTEXTS[idx].borrow(cs);
65            *context_ref.borrow_mut() = context;
66        });
67        // Transfer is done.
68        TX_DONE[idx].store(true, core::sync::atomic::Ordering::Relaxed);
69        UART_TX_WAKERS[idx].wake();
70        return;
71    }
72    while context.progress < slice.len() {
73        if !uart.read_tx_status().ready() {
74            break;
75        }
76        // Safety: TX structure is owned by the future which does not write into the the data
77        // register, so we can assume we are the only one writing to the data register.
78        uart.write_data(Data::new_with_raw_value(slice[context.progress] as u32));
79        context.progress += 1;
80    }
81
82    // Write back updated context structure.
83    critical_section::with(|cs| {
84        let context_ref = TX_CONTEXTS[idx].borrow(cs);
85        *context_ref.borrow_mut() = context;
86    });
87}
88
89#[derive(Debug, Copy, Clone)]
90pub struct TxContext {
91    progress: usize,
92    tx_overrun: bool,
93    slice: RawBufSlice,
94}
95
96#[allow(clippy::new_without_default)]
97impl TxContext {
98    pub const fn new() -> Self {
99        Self {
100            progress: 0,
101            tx_overrun: false,
102            slice: RawBufSlice::new_nulled(),
103        }
104    }
105}
106
107pub struct TxFuture {
108    id: Bank,
109}
110
111impl TxFuture {
112    /// # Safety
113    ///
114    /// This function stores the raw pointer of the passed data slice. The user MUST ensure
115    /// that the slice outlives the data structure.
116    pub unsafe fn new(tx: &mut Tx, data: &[u8]) -> Self {
117        TX_DONE[tx.id as usize].store(false, core::sync::atomic::Ordering::Relaxed);
118        tx.disable_interrupts();
119        tx.disable();
120        tx.clear_fifo();
121
122        let init_fill_count = core::cmp::min(data.len(), 16);
123        // We fill the FIFO.
124        for data in data.iter().take(init_fill_count) {
125            tx.regs.write_data(Data::new_with_raw_value(*data as u32));
126        }
127        critical_section::with(|cs| {
128            let context_ref = TX_CONTEXTS[tx.id as usize].borrow(cs);
129            let mut context = context_ref.borrow_mut();
130            unsafe { context.slice.set(data) };
131            context.progress = init_fill_count;
132
133            // Ensure those are enabled inside a critical section at the same time. Can lead to
134            // weird glitches otherwise.
135            tx.enable_interrupts(
136                #[cfg(feature = "vor4x")]
137                true,
138            );
139            tx.enable();
140        });
141        Self { id: tx.id }
142    }
143}
144
145impl Future for TxFuture {
146    type Output = Result<usize, TxOverrunError>;
147
148    fn poll(
149        self: core::pin::Pin<&mut Self>,
150        cx: &mut core::task::Context<'_>,
151    ) -> core::task::Poll<Self::Output> {
152        UART_TX_WAKERS[self.id as usize].register(cx.waker());
153        if TX_DONE[self.id as usize].swap(false, core::sync::atomic::Ordering::Relaxed) {
154            let progress = critical_section::with(|cs| {
155                TX_CONTEXTS[self.id as usize].borrow(cs).borrow().progress
156            });
157            return core::task::Poll::Ready(Ok(progress));
158        }
159        core::task::Poll::Pending
160    }
161}
162
163impl Drop for TxFuture {
164    fn drop(&mut self) {
165        let mut reg_block = unsafe { self.id.steal_regs() };
166
167        disable_tx_interrupts(&mut reg_block);
168        disable_tx(&mut reg_block);
169    }
170}
171
172pub struct TxAsync(Tx);
173
174impl TxAsync {
175    pub fn new(tx: Tx) -> Self {
176        Self(tx)
177    }
178
179    pub fn release(self) -> Tx {
180        self.0
181    }
182}
183
184#[derive(Debug, thiserror::Error)]
185#[cfg_attr(feature = "defmt", derive(defmt::Format))]
186#[error("TX overrun error")]
187pub struct TxOverrunError;
188
189impl embedded_io_async::Error for TxOverrunError {
190    fn kind(&self) -> embedded_io_async::ErrorKind {
191        embedded_io_async::ErrorKind::Other
192    }
193}
194
195impl embedded_io::ErrorType for TxAsync {
196    type Error = TxOverrunError;
197}
198
199impl Write for TxAsync {
200    /// Write a buffer asynchronously.
201    ///
202    /// This implementation is not side effect free, and a started future might have already
203    /// written part of the passed buffer.
204    async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
205        let fut = unsafe { TxFuture::new(&mut self.0, buf) };
206        fut.await
207    }
208}