embassy_stm32/usart/ringbuffered.rs
1use core::future::poll_fn;
2use core::mem;
3use core::sync::atomic::{compiler_fence, Ordering};
4use core::task::Poll;
5
6use embassy_embedded_hal::SetConfig;
7use embedded_io_async::ReadReady;
8use futures_util::future::{select, Either};
9
10use super::{rdr, reconfigure, set_baudrate, sr, Config, ConfigError, Error, Info, State, UartRx};
11use crate::dma::ReadableRingBuffer;
12use crate::gpio::{AnyPin, SealedPin as _};
13use crate::mode::Async;
14use crate::time::Hertz;
15use crate::usart::Regs;
16use crate::Peri;
17
18/// Rx-only Ring-buffered UART Driver
19///
20/// Created with [UartRx::into_ring_buffered]
21///
22/// ### Notes on 'waiting for bytes'
23///
24/// The `read(buf)` (but not `read()`) and `read_exact(buf)` functions
25/// may need to wait for bytes to arrive, if the ring buffer does not
26/// contain enough bytes to fill the buffer passed by the caller of
27/// the function, or is empty.
28///
29/// Waiting for bytes operates in one of two modes, depending on
30/// the behavior of the sender and the size of the buffer passed
31/// to the function:
32///
33/// - If the sender sends intermittently, the 'idle line'
34/// condition will be detected when the sender stops, and any
35/// bytes in the ring buffer will be returned. If there are no
36/// bytes in the buffer, the check will be repeated each time the
37/// 'idle line' condition is detected, so if the sender sends just
38/// a single byte, it will be returned once the 'idle line'
39/// condition is detected.
40///
41/// - If the sender sends continuously, the call will wait until
42/// the DMA controller indicates that it has written to either the
43/// middle byte or last byte of the ring buffer ('half transfer'
44/// or 'transfer complete', respectively). This does not indicate
45/// the buffer is half-full or full, though, because the DMA
46/// controller does not detect those conditions; it sends an
47/// interrupt when those specific buffer addresses have been
48/// written.
49///
50/// In both cases this will result in variable latency due to the
51/// buffering effect. For example, if the baudrate is 2400 bps, and
52/// the configuration is 8 data bits, no parity bit, and one stop bit,
53/// then a byte will be received every ~4.16ms. If the ring buffer is
54/// 32 bytes, then a 'wait for bytes' delay may have to wait for 16
55/// bytes in the worst case, resulting in a delay (latency) of
56/// ~62.46ms for the first byte in the ring buffer. If the sender
57/// sends only 6 bytes and then stops, but the buffer was empty when
58/// the read function was called, then those bytes may not be returned
59/// until ~24.96ms after the first byte was received (time for 5
60/// additional bytes plus the 'idle frame' which triggers the 'idle
61/// line' condition).
62///
63/// Applications subject to this latency must be careful if they
64/// also apply timeouts during reception, as it may appear (to
65/// them) that the sender has stopped sending when it did not. In
66/// the example above, a 50ms timeout (12 bytes at 2400bps) might
67/// seem to be reasonable to detect that the sender has stopped
68/// sending, but would be falsely triggered in the worst-case
69/// buffer delay scenario.
70///
71/// Note: This latency is caused by the limited capabilities of the
72/// STM32 DMA controller; since it cannot generate an interrupt when
73/// it stores a byte into an empty ring buffer, or in any other
74/// configurable conditions, it is not possible to take notice of the
75/// contents of the ring buffer more quickly without introducing
76/// polling. As a result the latency can be reduced by calling the
77/// read functions repeatedly with smaller buffers to receive the
78/// available bytes, as each call to a read function will explicitly
79/// check the ring buffer for available bytes.
80pub struct RingBufferedUartRx<'d> {
81 info: &'static Info,
82 state: &'static State,
83 kernel_clock: Hertz,
84 rx: Option<Peri<'d, AnyPin>>,
85 rts: Option<Peri<'d, AnyPin>>,
86 ring_buf: ReadableRingBuffer<'d, u8>,
87}
88
89impl<'d> SetConfig for RingBufferedUartRx<'d> {
90 type Config = Config;
91 type ConfigError = ConfigError;
92
93 fn set_config(&mut self, config: &Self::Config) -> Result<(), Self::ConfigError> {
94 self.set_config(config)
95 }
96}
97
98impl<'d> UartRx<'d, Async> {
99 /// Turn the `UartRx` into a buffered uart which can continously receive in the background
100 /// without the possibility of losing bytes. The `dma_buf` is a buffer registered to the
101 /// DMA controller, and must be large enough to prevent overflows.
102 pub fn into_ring_buffered(mut self, dma_buf: &'d mut [u8]) -> RingBufferedUartRx<'d> {
103 assert!(!dma_buf.is_empty() && dma_buf.len() <= 0xFFFF);
104
105 let opts = Default::default();
106
107 // Safety: we forget the struct before this function returns.
108 let rx_dma = self.rx_dma.as_mut().unwrap();
109 let request = rx_dma.request;
110 let rx_dma = unsafe { rx_dma.channel.clone_unchecked() };
111
112 let info = self.info;
113 let state = self.state;
114 let kernel_clock = self.kernel_clock;
115 let ring_buf = unsafe { ReadableRingBuffer::new(rx_dma, request, rdr(info.regs), dma_buf, opts) };
116 let rx = unsafe { self.rx.as_ref().map(|x| x.clone_unchecked()) };
117 let rts = unsafe { self.rts.as_ref().map(|x| x.clone_unchecked()) };
118
119 // Don't disable the clock
120 mem::forget(self);
121
122 RingBufferedUartRx {
123 info,
124 state,
125 kernel_clock,
126 rx,
127 rts,
128 ring_buf,
129 }
130 }
131}
132
133impl<'d> RingBufferedUartRx<'d> {
134 /// Reconfigure the driver
135 pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> {
136 reconfigure(self.info, self.kernel_clock, config)
137 }
138
139 /// Configure and start the DMA backed UART receiver
140 ///
141 /// Note: This is also done automatically by the read functions if
142 /// required.
143 pub fn start_uart(&mut self) {
144 // Clear the buffer so that it is ready to receive data
145 compiler_fence(Ordering::SeqCst);
146 self.ring_buf.start();
147
148 let r = self.info.regs;
149 // clear all interrupts and DMA Rx Request
150 r.cr1().modify(|w| {
151 // disable RXNE interrupt
152 w.set_rxneie(false);
153 // enable parity interrupt if not ParityNone
154 w.set_peie(w.pce());
155 // enable idle line interrupt
156 w.set_idleie(true);
157 });
158 r.cr3().modify(|w| {
159 // enable Error Interrupt: (Frame error, Noise error, Overrun error)
160 w.set_eie(true);
161 // enable DMA Rx Request
162 w.set_dmar(true);
163 });
164 }
165
166 /// Stop DMA backed UART receiver
167 fn stop_uart(&mut self) {
168 self.ring_buf.request_pause();
169
170 let r = self.info.regs;
171 // clear all interrupts and DMA Rx Request
172 r.cr1().modify(|w| {
173 // disable RXNE interrupt
174 w.set_rxneie(false);
175 // disable parity interrupt
176 w.set_peie(false);
177 // disable idle line interrupt
178 w.set_idleie(false);
179 });
180 r.cr3().modify(|w| {
181 // disable Error Interrupt: (Frame error, Noise error, Overrun error)
182 w.set_eie(false);
183 // disable DMA Rx Request
184 w.set_dmar(false);
185 });
186
187 compiler_fence(Ordering::SeqCst);
188 }
189
190 /// (Re-)start DMA and Uart if it is not running (has not been started yet or has failed), and
191 /// check for errors in status register. Error flags are checked/cleared first.
192 fn start_dma_or_check_errors(&mut self) -> Result<(), Error> {
193 let r = self.info.regs;
194
195 check_idle_and_errors(r)?;
196 if !r.cr3().read().dmar() {
197 self.start_uart();
198 }
199 Ok(())
200 }
201
202 /// Read bytes that are available in the ring buffer, or wait for
203 /// bytes to become available and return them.
204 ///
205 /// Background reception is started if necessary (if `start_uart()` had
206 /// not previously been called, or if an error was detected which
207 /// caused background reception to be stopped).
208 ///
209 /// Background reception is terminated when an error is returned.
210 /// It must be started again by calling `start_uart()` or by
211 /// calling a read function again.
212 pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
213 self.start_dma_or_check_errors()?;
214
215 // In half-duplex mode, we need to disable the Transmitter and enable the Receiver
216 // since they can't operate simultaneously on the shared line
217 let r = self.info.regs;
218 if r.cr3().read().hdsel() && r.cr1().read().te() {
219 r.cr1().modify(|reg| {
220 reg.set_re(true);
221 reg.set_te(false);
222 });
223 }
224
225 loop {
226 match self.ring_buf.read(buf) {
227 Ok((0, _)) => {}
228 Ok((len, _)) => {
229 return Ok(len);
230 }
231 Err(_) => {
232 self.stop_uart();
233 return Err(Error::Overrun);
234 }
235 }
236
237 match self.wait_for_data_or_idle().await {
238 Ok(_) => {}
239 Err(err) => {
240 self.stop_uart();
241 return Err(err);
242 }
243 }
244 }
245 }
246
247 /// Wait for uart idle or dma half-full or full
248 async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> {
249 compiler_fence(Ordering::SeqCst);
250
251 // Future which completes when idle line is detected
252 let s = self.state;
253 let uart = poll_fn(|cx| {
254 s.rx_waker.register(cx.waker());
255
256 compiler_fence(Ordering::SeqCst);
257
258 if check_idle_and_errors(self.info.regs)? {
259 // Idle line is detected
260 Poll::Ready(Ok(()))
261 } else {
262 Poll::Pending
263 }
264 });
265
266 let mut dma_init = false;
267 // Future which completes when the DMA controller indicates it
268 // has written to the ring buffer's middle byte, or last byte
269 let dma = poll_fn(|cx| {
270 self.ring_buf.set_waker(cx.waker());
271
272 let status = match dma_init {
273 false => Poll::Pending,
274 true => Poll::Ready(()),
275 };
276
277 dma_init = true;
278 status
279 });
280
281 match select(uart, dma).await {
282 Either::Left((result, _)) => result,
283 Either::Right(((), _)) => Ok(()),
284 }
285 }
286
287 /// Set baudrate
288 pub fn set_baudrate(&self, baudrate: u32) -> Result<(), ConfigError> {
289 set_baudrate(self.info, self.kernel_clock, baudrate)
290 }
291}
292
293impl Drop for RingBufferedUartRx<'_> {
294 fn drop(&mut self) {
295 self.stop_uart();
296 self.rx.as_ref().map(|x| x.set_as_disconnected());
297 self.rts.as_ref().map(|x| x.set_as_disconnected());
298 super::drop_tx_rx(self.info, self.state);
299 }
300}
301
302/// Check and clear idle and error interrupts, return true if idle, Err(e) on error
303///
304/// All flags are read and cleared in a single step, respectively. When more than one flag is set
305/// at the same time, all flags will be cleared but only one flag will be reported. So the other
306/// flag(s) will gone missing unnoticed. The error flags are checked first, the idle flag last.
307///
308/// For usart_v1 and usart_v2, all status flags must be handled together anyway because all flags
309/// are cleared by a single read to the RDR register.
310fn check_idle_and_errors(r: Regs) -> Result<bool, Error> {
311 // Critical section is required so that the flags aren't set after read and before clear
312 let sr = critical_section::with(|_| {
313 // SAFETY: read only and we only use Rx related flags
314 let sr = sr(r).read();
315
316 #[cfg(any(usart_v3, usart_v4))]
317 r.icr().write(|w| {
318 w.set_idle(true);
319 w.set_pe(true);
320 w.set_fe(true);
321 w.set_ne(true);
322 w.set_ore(true);
323 });
324 #[cfg(not(any(usart_v3, usart_v4)))]
325 unsafe {
326 // This read also clears the error and idle interrupt flags on v1 (TODO and v2?)
327 rdr(r).read_volatile()
328 };
329 sr
330 });
331 if sr.pe() {
332 Err(Error::Parity)
333 } else if sr.fe() {
334 Err(Error::Framing)
335 } else if sr.ne() {
336 Err(Error::Noise)
337 } else if sr.ore() {
338 Err(Error::Overrun)
339 } else {
340 r.cr1().modify(|w| w.set_idleie(true));
341 Ok(sr.idle())
342 }
343}
344
345impl embedded_io_async::ErrorType for RingBufferedUartRx<'_> {
346 type Error = Error;
347}
348
349impl embedded_io_async::Read for RingBufferedUartRx<'_> {
350 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
351 self.read(buf).await
352 }
353}
354
355impl embedded_hal_nb::serial::Read for RingBufferedUartRx<'_> {
356 fn read(&mut self) -> nb::Result<u8, Self::Error> {
357 self.start_dma_or_check_errors()?;
358
359 let mut buf = [0u8; 1];
360 match self.ring_buf.read(&mut buf) {
361 Ok((0, _)) => Err(nb::Error::WouldBlock),
362 Ok((len, _)) => {
363 assert!(len == 1);
364 Ok(buf[0])
365 }
366 Err(_) => {
367 self.stop_uart();
368 Err(nb::Error::Other(Error::Overrun))
369 }
370 }
371 }
372}
373
374impl embedded_hal_nb::serial::ErrorType for RingBufferedUartRx<'_> {
375 type Error = Error;
376}
377
378impl ReadReady for RingBufferedUartRx<'_> {
379 fn read_ready(&mut self) -> Result<bool, Self::Error> {
380 let len = self.ring_buf.len().map_err(|e| match e {
381 crate::dma::ringbuffer::Error::Overrun => Self::Error::Overrun,
382 crate::dma::ringbuffer::Error::DmaUnsynced => {
383 error!(
384 "Ringbuffer error: DmaUNsynced, driver implementation is
385 probably bugged please open an issue"
386 );
387 // we report this as overrun since its recoverable in the same way
388 Self::Error::Overrun
389 }
390 })?;
391 Ok(len > 0)
392 }
393}