embassy_nrf/
uarte.rs

1//! Universal Asynchronous Receiver Transmitter (UART) driver.
2//!
3//! The UART driver is provided in two flavors - this one and also [crate::buffered_uarte::BufferedUarte].
4//! The [Uarte] here is useful for those use-cases where reading the UARTE peripheral is
5//! exclusively awaited on. If the [Uarte] is required to be awaited on with some other future,
6//! for example when using `futures_util::future::select`, then you should consider
7//! [crate::buffered_uarte::BufferedUarte] so that reads may continue while processing these
8//! other futures. If you do not then you may lose data between reads.
9//!
10//! An advantage of the [Uarte] has over [crate::buffered_uarte::BufferedUarte] is that less
11//! memory may be used given that buffers are passed in directly to its read and write
12//! methods.
13
14#![macro_use]
15
16use core::future::poll_fn;
17use core::marker::PhantomData;
18use core::sync::atomic::{compiler_fence, AtomicU8, Ordering};
19use core::task::Poll;
20
21use embassy_hal_internal::drop::OnDrop;
22use embassy_hal_internal::{Peri, PeripheralType};
23use embassy_sync::waitqueue::AtomicWaker;
24// Re-export SVD variants to allow user to directly set values.
25pub use pac::uarte::vals::{Baudrate, ConfigParity as Parity};
26
27use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
28use crate::gpio::{self, AnyPin, Pin as GpioPin, PselBits, SealedPin as _, DISCONNECTED};
29use crate::interrupt::typelevel::Interrupt;
30use crate::pac::gpio::vals as gpiovals;
31use crate::pac::uarte::vals;
32use crate::ppi::{AnyConfigurableChannel, ConfigurableChannel, Event, Ppi, Task};
33use crate::timer::{Frequency, Instance as TimerInstance, Timer};
34use crate::util::slice_in_ram_or;
35use crate::{interrupt, pac};
36
37/// UARTE config.
38#[derive(Clone)]
39#[non_exhaustive]
40pub struct Config {
41    /// Parity bit.
42    pub parity: Parity,
43    /// Baud rate.
44    pub baudrate: Baudrate,
45}
46
47impl Default for Config {
48    fn default() -> Self {
49        Self {
50            parity: Parity::EXCLUDED,
51            baudrate: Baudrate::BAUD115200,
52        }
53    }
54}
55
56bitflags::bitflags! {
57    /// Error source flags
58    pub(crate) struct ErrorSource: u32 {
59        /// Buffer overrun
60        const OVERRUN = 0x01;
61        /// Parity error
62        const PARITY = 0x02;
63        /// Framing error
64        const FRAMING = 0x04;
65        /// Break condition
66        const BREAK = 0x08;
67    }
68}
69
70impl ErrorSource {
71    #[inline]
72    fn check(self) -> Result<(), Error> {
73        if self.contains(ErrorSource::OVERRUN) {
74            Err(Error::Overrun)
75        } else if self.contains(ErrorSource::PARITY) {
76            Err(Error::Parity)
77        } else if self.contains(ErrorSource::FRAMING) {
78            Err(Error::Framing)
79        } else if self.contains(ErrorSource::BREAK) {
80            Err(Error::Break)
81        } else {
82            Ok(())
83        }
84    }
85}
86
87/// UART error.
88#[derive(Debug, Clone, Copy, PartialEq, Eq)]
89#[cfg_attr(feature = "defmt", derive(defmt::Format))]
90#[non_exhaustive]
91pub enum Error {
92    /// Buffer was too long.
93    BufferTooLong,
94    /// The buffer is not in data RAM. It's most likely in flash, and nRF's DMA cannot access flash.
95    BufferNotInRAM,
96    /// Framing Error
97    Framing,
98    /// Parity Error
99    Parity,
100    /// Buffer Overrun
101    Overrun,
102    /// Break condition
103    Break,
104}
105
106/// Interrupt handler.
107pub struct InterruptHandler<T: Instance> {
108    _phantom: PhantomData<T>,
109}
110
111impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
112    unsafe fn on_interrupt() {
113        let r = T::regs();
114        let s = T::state();
115
116        let endrx = r.events_endrx().read();
117        let error = r.events_error().read();
118        if endrx != 0 || error != 0 {
119            s.rx_waker.wake();
120            if endrx != 0 {
121                r.intenclr().write(|w| w.set_endrx(true));
122            }
123            if error != 0 {
124                r.intenclr().write(|w| w.set_error(true));
125            }
126        }
127        if r.events_endtx().read() != 0 {
128            s.tx_waker.wake();
129            r.intenclr().write(|w| w.set_endtx(true));
130        }
131    }
132}
133
134/// UARTE driver.
135pub struct Uarte<'d, T: Instance> {
136    tx: UarteTx<'d, T>,
137    rx: UarteRx<'d, T>,
138}
139
140/// Transmitter part of the UARTE driver.
141///
142/// This can be obtained via [`Uarte::split`], or created directly.
143pub struct UarteTx<'d, T: Instance> {
144    _p: Peri<'d, T>,
145}
146
147/// Receiver part of the UARTE driver.
148///
149/// This can be obtained via [`Uarte::split`], or created directly.
150pub struct UarteRx<'d, T: Instance> {
151    _p: Peri<'d, T>,
152}
153
154impl<'d, T: Instance> Uarte<'d, T> {
155    /// Create a new UARTE without hardware flow control
156    pub fn new(
157        uarte: Peri<'d, T>,
158        rxd: Peri<'d, impl GpioPin>,
159        txd: Peri<'d, impl GpioPin>,
160        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
161        config: Config,
162    ) -> Self {
163        Self::new_inner(uarte, rxd.into(), txd.into(), None, None, config)
164    }
165
166    /// Create a new UARTE with hardware flow control (RTS/CTS)
167    pub fn new_with_rtscts(
168        uarte: Peri<'d, T>,
169        rxd: Peri<'d, impl GpioPin>,
170        txd: Peri<'d, impl GpioPin>,
171        cts: Peri<'d, impl GpioPin>,
172        rts: Peri<'d, impl GpioPin>,
173        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
174        config: Config,
175    ) -> Self {
176        Self::new_inner(
177            uarte,
178            rxd.into(),
179            txd.into(),
180            Some(cts.into()),
181            Some(rts.into()),
182            config,
183        )
184    }
185
186    fn new_inner(
187        uarte: Peri<'d, T>,
188        rxd: Peri<'d, AnyPin>,
189        txd: Peri<'d, AnyPin>,
190        cts: Option<Peri<'d, AnyPin>>,
191        rts: Option<Peri<'d, AnyPin>>,
192        config: Config,
193    ) -> Self {
194        let r = T::regs();
195
196        let hardware_flow_control = match (rts.is_some(), cts.is_some()) {
197            (false, false) => false,
198            (true, true) => true,
199            _ => panic!("RTS and CTS pins must be either both set or none set."),
200        };
201        configure(r, config, hardware_flow_control);
202        configure_rx_pins(r, rxd, rts);
203        configure_tx_pins(r, txd, cts);
204
205        T::Interrupt::unpend();
206        unsafe { T::Interrupt::enable() };
207        r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
208
209        let s = T::state();
210        s.tx_rx_refcount.store(2, Ordering::Relaxed);
211
212        Self {
213            tx: UarteTx {
214                _p: unsafe { uarte.clone_unchecked() },
215            },
216            rx: UarteRx { _p: uarte },
217        }
218    }
219
220    /// Split the Uarte into the transmitter and receiver parts.
221    ///
222    /// This is useful to concurrently transmit and receive from independent tasks.
223    pub fn split(self) -> (UarteTx<'d, T>, UarteRx<'d, T>) {
224        (self.tx, self.rx)
225    }
226
227    /// Split the UART in reader and writer parts, by reference.
228    ///
229    /// The returned halves borrow from `self`, so you can drop them and go back to using
230    /// the "un-split" `self`. This allows temporarily splitting the UART.
231    pub fn split_by_ref(&mut self) -> (&mut UarteTx<'d, T>, &mut UarteRx<'d, T>) {
232        (&mut self.tx, &mut self.rx)
233    }
234
235    /// Split the Uarte into the transmitter and receiver with idle support parts.
236    ///
237    /// This is useful to concurrently transmit and receive from independent tasks.
238    pub fn split_with_idle<U: TimerInstance>(
239        self,
240        timer: Peri<'d, U>,
241        ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
242        ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
243    ) -> (UarteTx<'d, T>, UarteRxWithIdle<'d, T, U>) {
244        (self.tx, self.rx.with_idle(timer, ppi_ch1, ppi_ch2))
245    }
246
247    /// Return the endtx event for use with PPI
248    pub fn event_endtx(&self) -> Event<'_> {
249        let r = T::regs();
250        Event::from_reg(r.events_endtx())
251    }
252
253    /// Read bytes until the buffer is filled.
254    pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
255        self.rx.read(buffer).await
256    }
257
258    /// Write all bytes in the buffer.
259    pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
260        self.tx.write(buffer).await
261    }
262
263    /// Same as [`write`](Uarte::write) but will fail instead of copying data into RAM. Consult the module level documentation to learn more.
264    pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
265        self.tx.write_from_ram(buffer).await
266    }
267
268    /// Read bytes until the buffer is filled.
269    pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
270        self.rx.blocking_read(buffer)
271    }
272
273    /// Write all bytes in the buffer.
274    pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
275        self.tx.blocking_write(buffer)
276    }
277
278    /// Same as [`blocking_write`](Uarte::blocking_write) but will fail instead of copying data into RAM. Consult the module level documentation to learn more.
279    pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
280        self.tx.blocking_write_from_ram(buffer)
281    }
282}
283
284pub(crate) fn configure_tx_pins(r: pac::uarte::Uarte, txd: Peri<'_, AnyPin>, cts: Option<Peri<'_, AnyPin>>) {
285    txd.set_high();
286    txd.conf().write(|w| {
287        w.set_dir(gpiovals::Dir::OUTPUT);
288        w.set_input(gpiovals::Input::DISCONNECT);
289        w.set_drive(gpiovals::Drive::H0H1);
290    });
291    r.psel().txd().write_value(txd.psel_bits());
292
293    if let Some(pin) = &cts {
294        pin.conf().write(|w| {
295            w.set_dir(gpiovals::Dir::INPUT);
296            w.set_input(gpiovals::Input::CONNECT);
297            w.set_drive(gpiovals::Drive::H0H1);
298        });
299    }
300    r.psel().cts().write_value(cts.psel_bits());
301}
302
303pub(crate) fn configure_rx_pins(r: pac::uarte::Uarte, rxd: Peri<'_, AnyPin>, rts: Option<Peri<'_, AnyPin>>) {
304    rxd.conf().write(|w| {
305        w.set_dir(gpiovals::Dir::INPUT);
306        w.set_input(gpiovals::Input::CONNECT);
307        w.set_drive(gpiovals::Drive::H0H1);
308    });
309    r.psel().rxd().write_value(rxd.psel_bits());
310
311    if let Some(pin) = &rts {
312        pin.set_high();
313        pin.conf().write(|w| {
314            w.set_dir(gpiovals::Dir::OUTPUT);
315            w.set_input(gpiovals::Input::DISCONNECT);
316            w.set_drive(gpiovals::Drive::H0H1);
317        });
318    }
319    r.psel().rts().write_value(rts.psel_bits());
320}
321
322pub(crate) fn configure(r: pac::uarte::Uarte, config: Config, hardware_flow_control: bool) {
323    r.config().write(|w| {
324        w.set_hwfc(hardware_flow_control);
325        w.set_parity(config.parity);
326    });
327    r.baudrate().write(|w| w.set_baudrate(config.baudrate));
328
329    // Disable all interrupts
330    r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
331
332    // Reset rxstarted, txstarted. These are used by drop to know whether a transfer was
333    // stopped midway or not.
334    r.events_rxstarted().write_value(0);
335    r.events_txstarted().write_value(0);
336
337    // reset all pins
338    r.psel().txd().write_value(DISCONNECTED);
339    r.psel().rxd().write_value(DISCONNECTED);
340    r.psel().cts().write_value(DISCONNECTED);
341    r.psel().rts().write_value(DISCONNECTED);
342
343    apply_workaround_for_enable_anomaly(r);
344}
345
346impl<'d, T: Instance> UarteTx<'d, T> {
347    /// Create a new tx-only UARTE without hardware flow control
348    pub fn new(
349        uarte: Peri<'d, T>,
350        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
351        txd: Peri<'d, impl GpioPin>,
352        config: Config,
353    ) -> Self {
354        Self::new_inner(uarte, txd.into(), None, config)
355    }
356
357    /// Create a new tx-only UARTE with hardware flow control (RTS/CTS)
358    pub fn new_with_rtscts(
359        uarte: Peri<'d, T>,
360        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
361        txd: Peri<'d, impl GpioPin>,
362        cts: Peri<'d, impl GpioPin>,
363        config: Config,
364    ) -> Self {
365        Self::new_inner(uarte, txd.into(), Some(cts.into()), config)
366    }
367
368    fn new_inner(uarte: Peri<'d, T>, txd: Peri<'d, AnyPin>, cts: Option<Peri<'d, AnyPin>>, config: Config) -> Self {
369        let r = T::regs();
370
371        configure(r, config, cts.is_some());
372        configure_tx_pins(r, txd, cts);
373
374        T::Interrupt::unpend();
375        unsafe { T::Interrupt::enable() };
376        r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
377
378        let s = T::state();
379        s.tx_rx_refcount.store(1, Ordering::Relaxed);
380
381        Self { _p: uarte }
382    }
383
384    /// Write all bytes in the buffer.
385    pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
386        match self.write_from_ram(buffer).await {
387            Ok(_) => Ok(()),
388            Err(Error::BufferNotInRAM) => {
389                trace!("Copying UARTE tx buffer into RAM for DMA");
390                let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
391                ram_buf.copy_from_slice(buffer);
392                self.write_from_ram(ram_buf).await
393            }
394            Err(error) => Err(error),
395        }
396    }
397
398    /// Same as [`write`](Self::write) but will fail instead of copying data into RAM. Consult the module level documentation to learn more.
399    pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
400        if buffer.is_empty() {
401            return Ok(());
402        }
403
404        slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
405        if buffer.len() > EASY_DMA_SIZE {
406            return Err(Error::BufferTooLong);
407        }
408
409        let ptr = buffer.as_ptr();
410        let len = buffer.len();
411
412        let r = T::regs();
413        let s = T::state();
414
415        let drop = OnDrop::new(move || {
416            trace!("write drop: stopping");
417
418            r.intenclr().write(|w| w.set_endtx(true));
419            r.events_txstopped().write_value(0);
420            r.tasks_stoptx().write_value(1);
421
422            // TX is stopped almost instantly, spinning is fine.
423            while r.events_endtx().read() == 0 {}
424            trace!("write drop: stopped");
425        });
426
427        r.txd().ptr().write_value(ptr as u32);
428        r.txd().maxcnt().write(|w| w.set_maxcnt(len as _));
429
430        r.events_endtx().write_value(0);
431        r.intenset().write(|w| w.set_endtx(true));
432
433        compiler_fence(Ordering::SeqCst);
434
435        trace!("starttx");
436        r.tasks_starttx().write_value(1);
437
438        poll_fn(|cx| {
439            s.tx_waker.register(cx.waker());
440            if r.events_endtx().read() != 0 {
441                return Poll::Ready(());
442            }
443            Poll::Pending
444        })
445        .await;
446
447        compiler_fence(Ordering::SeqCst);
448        r.events_txstarted().write_value(0);
449        drop.defuse();
450
451        Ok(())
452    }
453
454    /// Write all bytes in the buffer.
455    pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
456        match self.blocking_write_from_ram(buffer) {
457            Ok(_) => Ok(()),
458            Err(Error::BufferNotInRAM) => {
459                trace!("Copying UARTE tx buffer into RAM for DMA");
460                let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
461                ram_buf.copy_from_slice(buffer);
462                self.blocking_write_from_ram(ram_buf)
463            }
464            Err(error) => Err(error),
465        }
466    }
467
468    /// Same as [`write_from_ram`](Self::write_from_ram) but will fail instead of copying data into RAM. Consult the module level documentation to learn more.
469    pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
470        if buffer.is_empty() {
471            return Ok(());
472        }
473
474        slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
475        if buffer.len() > EASY_DMA_SIZE {
476            return Err(Error::BufferTooLong);
477        }
478
479        let ptr = buffer.as_ptr();
480        let len = buffer.len();
481
482        let r = T::regs();
483
484        r.txd().ptr().write_value(ptr as u32);
485        r.txd().maxcnt().write(|w| w.set_maxcnt(len as _));
486
487        r.events_endtx().write_value(0);
488        r.intenclr().write(|w| w.set_endtx(true));
489
490        compiler_fence(Ordering::SeqCst);
491
492        trace!("starttx");
493        r.tasks_starttx().write_value(1);
494
495        while r.events_endtx().read() == 0 {}
496
497        compiler_fence(Ordering::SeqCst);
498        r.events_txstarted().write_value(0);
499
500        Ok(())
501    }
502}
503
504impl<'a, T: Instance> Drop for UarteTx<'a, T> {
505    fn drop(&mut self) {
506        trace!("uarte tx drop");
507
508        let r = T::regs();
509
510        let did_stoptx = r.events_txstarted().read() != 0;
511        trace!("did_stoptx {}", did_stoptx);
512
513        // Wait for txstopped, if needed.
514        while did_stoptx && r.events_txstopped().read() == 0 {}
515
516        let s = T::state();
517
518        drop_tx_rx(r, s);
519    }
520}
521
522impl<'d, T: Instance> UarteRx<'d, T> {
523    /// Create a new rx-only UARTE without hardware flow control
524    pub fn new(
525        uarte: Peri<'d, T>,
526        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
527        rxd: Peri<'d, impl GpioPin>,
528        config: Config,
529    ) -> Self {
530        Self::new_inner(uarte, rxd.into(), None, config)
531    }
532
533    /// Create a new rx-only UARTE with hardware flow control (RTS/CTS)
534    pub fn new_with_rtscts(
535        uarte: Peri<'d, T>,
536        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
537        rxd: Peri<'d, impl GpioPin>,
538        rts: Peri<'d, impl GpioPin>,
539        config: Config,
540    ) -> Self {
541        Self::new_inner(uarte, rxd.into(), Some(rts.into()), config)
542    }
543
544    /// Check for errors and clear the error register if an error occured.
545    fn check_and_clear_errors(&mut self) -> Result<(), Error> {
546        let r = T::regs();
547        let err_bits = r.errorsrc().read();
548        r.errorsrc().write_value(err_bits);
549        ErrorSource::from_bits_truncate(err_bits.0).check()
550    }
551
552    fn new_inner(uarte: Peri<'d, T>, rxd: Peri<'d, AnyPin>, rts: Option<Peri<'d, AnyPin>>, config: Config) -> Self {
553        let r = T::regs();
554
555        configure(r, config, rts.is_some());
556        configure_rx_pins(r, rxd, rts);
557
558        T::Interrupt::unpend();
559        unsafe { T::Interrupt::enable() };
560        r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
561
562        let s = T::state();
563        s.tx_rx_refcount.store(1, Ordering::Relaxed);
564
565        Self { _p: uarte }
566    }
567
568    /// Upgrade to an instance that supports idle line detection.
569    pub fn with_idle<U: TimerInstance>(
570        self,
571        timer: Peri<'d, U>,
572        ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
573        ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
574    ) -> UarteRxWithIdle<'d, T, U> {
575        let timer = Timer::new(timer);
576
577        let r = T::regs();
578
579        // BAUDRATE register values are `baudrate * 2^32 / 16000000`
580        // source: https://devzone.nordicsemi.com/f/nordic-q-a/391/uart-baudrate-register-values
581        //
582        // We want to stop RX if line is idle for 2 bytes worth of time
583        // That is 20 bits (each byte is 1 start bit + 8 data bits + 1 stop bit)
584        // This gives us the amount of 16M ticks for 20 bits.
585        let baudrate = r.baudrate().read().baudrate();
586        let timeout = 0x8000_0000 / (baudrate.to_bits() / 40);
587
588        timer.set_frequency(Frequency::F16MHz);
589        timer.cc(0).write(timeout);
590        timer.cc(0).short_compare_clear();
591        timer.cc(0).short_compare_stop();
592
593        let mut ppi_ch1 = Ppi::new_one_to_two(
594            ppi_ch1.into(),
595            Event::from_reg(r.events_rxdrdy()),
596            timer.task_clear(),
597            timer.task_start(),
598        );
599        ppi_ch1.enable();
600
601        let mut ppi_ch2 = Ppi::new_one_to_one(
602            ppi_ch2.into(),
603            timer.cc(0).event_compare(),
604            Task::from_reg(r.tasks_stoprx()),
605        );
606        ppi_ch2.enable();
607
608        UarteRxWithIdle {
609            rx: self,
610            timer,
611            ppi_ch1,
612            _ppi_ch2: ppi_ch2,
613        }
614    }
615
616    /// Read bytes until the buffer is filled.
617    pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
618        if buffer.is_empty() {
619            return Ok(());
620        }
621        if buffer.len() > EASY_DMA_SIZE {
622            return Err(Error::BufferTooLong);
623        }
624
625        let ptr = buffer.as_ptr();
626        let len = buffer.len();
627
628        let r = T::regs();
629        let s = T::state();
630
631        let drop = OnDrop::new(move || {
632            trace!("read drop: stopping");
633
634            r.intenclr().write(|w| {
635                w.set_endrx(true);
636                w.set_error(true);
637            });
638            r.events_rxto().write_value(0);
639            r.events_error().write_value(0);
640            r.tasks_stoprx().write_value(1);
641
642            while r.events_endrx().read() == 0 {}
643
644            trace!("read drop: stopped");
645        });
646
647        r.rxd().ptr().write_value(ptr as u32);
648        r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
649
650        r.events_endrx().write_value(0);
651        r.events_error().write_value(0);
652        r.intenset().write(|w| {
653            w.set_endrx(true);
654            w.set_error(true);
655        });
656
657        compiler_fence(Ordering::SeqCst);
658
659        trace!("startrx");
660        r.tasks_startrx().write_value(1);
661
662        let result = poll_fn(|cx| {
663            s.rx_waker.register(cx.waker());
664
665            if let Err(e) = self.check_and_clear_errors() {
666                r.tasks_stoprx().write_value(1);
667                return Poll::Ready(Err(e));
668            }
669            if r.events_endrx().read() != 0 {
670                return Poll::Ready(Ok(()));
671            }
672            Poll::Pending
673        })
674        .await;
675
676        compiler_fence(Ordering::SeqCst);
677        r.events_rxstarted().write_value(0);
678        drop.defuse();
679
680        result
681    }
682
683    /// Read bytes until the buffer is filled.
684    pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
685        if buffer.is_empty() {
686            return Ok(());
687        }
688        if buffer.len() > EASY_DMA_SIZE {
689            return Err(Error::BufferTooLong);
690        }
691
692        let ptr = buffer.as_ptr();
693        let len = buffer.len();
694
695        let r = T::regs();
696
697        r.rxd().ptr().write_value(ptr as u32);
698        r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
699
700        r.events_endrx().write_value(0);
701        r.events_error().write_value(0);
702        r.intenclr().write(|w| {
703            w.set_endrx(true);
704            w.set_error(true);
705        });
706
707        compiler_fence(Ordering::SeqCst);
708
709        trace!("startrx");
710        r.tasks_startrx().write_value(1);
711
712        while r.events_endrx().read() == 0 && r.events_error().read() == 0 {}
713
714        compiler_fence(Ordering::SeqCst);
715        r.events_rxstarted().write_value(0);
716
717        self.check_and_clear_errors()
718    }
719}
720
721impl<'a, T: Instance> Drop for UarteRx<'a, T> {
722    fn drop(&mut self) {
723        trace!("uarte rx drop");
724
725        let r = T::regs();
726
727        let did_stoprx = r.events_rxstarted().read() != 0;
728        trace!("did_stoprx {}", did_stoprx);
729
730        // Wait for rxto, if needed.
731        while did_stoprx && r.events_rxto().read() == 0 {}
732
733        let s = T::state();
734
735        drop_tx_rx(r, s);
736    }
737}
738
739/// Receiver part of the UARTE driver, with `read_until_idle` support.
740///
741/// This can be obtained via [`Uarte::split_with_idle`].
742pub struct UarteRxWithIdle<'d, T: Instance, U: TimerInstance> {
743    rx: UarteRx<'d, T>,
744    timer: Timer<'d, U>,
745    ppi_ch1: Ppi<'d, AnyConfigurableChannel, 1, 2>,
746    _ppi_ch2: Ppi<'d, AnyConfigurableChannel, 1, 1>,
747}
748
749impl<'d, T: Instance, U: TimerInstance> UarteRxWithIdle<'d, T, U> {
750    /// Read bytes until the buffer is filled.
751    pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
752        self.ppi_ch1.disable();
753        self.rx.read(buffer).await
754    }
755
756    /// Read bytes until the buffer is filled.
757    pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
758        self.ppi_ch1.disable();
759        self.rx.blocking_read(buffer)
760    }
761
762    /// Read bytes until the buffer is filled, or the line becomes idle.
763    ///
764    /// Returns the amount of bytes read.
765    pub async fn read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
766        if buffer.is_empty() {
767            return Ok(0);
768        }
769        if buffer.len() > EASY_DMA_SIZE {
770            return Err(Error::BufferTooLong);
771        }
772
773        let ptr = buffer.as_ptr();
774        let len = buffer.len();
775
776        let r = T::regs();
777        let s = T::state();
778
779        self.ppi_ch1.enable();
780
781        let drop = OnDrop::new(|| {
782            self.timer.stop();
783
784            r.intenclr().write(|w| {
785                w.set_endrx(true);
786                w.set_error(true);
787            });
788            r.events_rxto().write_value(0);
789            r.events_error().write_value(0);
790            r.tasks_stoprx().write_value(1);
791
792            while r.events_endrx().read() == 0 {}
793        });
794
795        r.rxd().ptr().write_value(ptr as u32);
796        r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
797
798        r.events_endrx().write_value(0);
799        r.events_error().write_value(0);
800        r.intenset().write(|w| {
801            w.set_endrx(true);
802            w.set_error(true);
803        });
804
805        compiler_fence(Ordering::SeqCst);
806
807        r.tasks_startrx().write_value(1);
808
809        let result = poll_fn(|cx| {
810            s.rx_waker.register(cx.waker());
811
812            if let Err(e) = self.rx.check_and_clear_errors() {
813                r.tasks_stoprx().write_value(1);
814                return Poll::Ready(Err(e));
815            }
816            if r.events_endrx().read() != 0 {
817                return Poll::Ready(Ok(()));
818            }
819
820            Poll::Pending
821        })
822        .await;
823
824        compiler_fence(Ordering::SeqCst);
825        let n = r.rxd().amount().read().0 as usize;
826
827        self.timer.stop();
828        r.events_rxstarted().write_value(0);
829
830        drop.defuse();
831
832        result.map(|_| n)
833    }
834
835    /// Read bytes until the buffer is filled, or the line becomes idle.
836    ///
837    /// Returns the amount of bytes read.
838    pub fn blocking_read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
839        if buffer.is_empty() {
840            return Ok(0);
841        }
842        if buffer.len() > EASY_DMA_SIZE {
843            return Err(Error::BufferTooLong);
844        }
845
846        let ptr = buffer.as_ptr();
847        let len = buffer.len();
848
849        let r = T::regs();
850
851        self.ppi_ch1.enable();
852
853        r.rxd().ptr().write_value(ptr as u32);
854        r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
855
856        r.events_endrx().write_value(0);
857        r.events_error().write_value(0);
858        r.intenclr().write(|w| {
859            w.set_endrx(true);
860            w.set_error(true);
861        });
862
863        compiler_fence(Ordering::SeqCst);
864
865        r.tasks_startrx().write_value(1);
866
867        while r.events_endrx().read() == 0 && r.events_error().read() == 0 {}
868
869        compiler_fence(Ordering::SeqCst);
870        let n = r.rxd().amount().read().0 as usize;
871
872        self.timer.stop();
873        r.events_rxstarted().write_value(0);
874
875        self.rx.check_and_clear_errors().map(|_| n)
876    }
877}
878
879#[cfg(not(any(feature = "_nrf9160", feature = "_nrf5340")))]
880pub(crate) fn apply_workaround_for_enable_anomaly(_r: pac::uarte::Uarte) {
881    // Do nothing
882}
883
884#[cfg(any(feature = "_nrf9160", feature = "_nrf5340"))]
885pub(crate) fn apply_workaround_for_enable_anomaly(r: pac::uarte::Uarte) {
886    // Apply workaround for anomalies:
887    // - nRF9160 - anomaly 23
888    // - nRF5340 - anomaly 44
889    let rp = r.as_ptr() as *mut u32;
890    let rxenable_reg = unsafe { rp.add(0x564 / 4) };
891    let txenable_reg = unsafe { rp.add(0x568 / 4) };
892
893    // NB Safety: This is taken from Nordic's driver -
894    // https://github.com/NordicSemiconductor/nrfx/blob/master/drivers/src/nrfx_uarte.c#L197
895    if unsafe { core::ptr::read_volatile(txenable_reg) } == 1 {
896        r.tasks_stoptx().write_value(1);
897    }
898
899    // NB Safety: This is taken from Nordic's driver -
900    // https://github.com/NordicSemiconductor/nrfx/blob/master/drivers/src/nrfx_uarte.c#L197
901    if unsafe { core::ptr::read_volatile(rxenable_reg) } == 1 {
902        r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
903        r.tasks_stoprx().write_value(1);
904
905        let mut workaround_succeded = false;
906        // The UARTE is able to receive up to four bytes after the STOPRX task has been triggered.
907        // On lowest supported baud rate (1200 baud), with parity bit and two stop bits configured
908        // (resulting in 12 bits per data byte sent), this may take up to 40 ms.
909        for _ in 0..40000 {
910            // NB Safety: This is taken from Nordic's driver -
911            // https://github.com/NordicSemiconductor/nrfx/blob/master/drivers/src/nrfx_uarte.c#L197
912            if unsafe { core::ptr::read_volatile(rxenable_reg) } == 0 {
913                workaround_succeded = true;
914                break;
915            } else {
916                // Need to sleep for 1us here
917            }
918        }
919
920        if !workaround_succeded {
921            panic!("Failed to apply workaround for UART");
922        }
923
924        // write back the bits we just read to clear them
925        let errors = r.errorsrc().read();
926        r.errorsrc().write_value(errors);
927        r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
928    }
929}
930
931pub(crate) fn drop_tx_rx(r: pac::uarte::Uarte, s: &State) {
932    if s.tx_rx_refcount.fetch_sub(1, Ordering::Relaxed) == 1 {
933        // Finally we can disable, and we do so for the peripheral
934        // i.e. not just rx concerns.
935        r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
936
937        gpio::deconfigure_pin(r.psel().rxd().read());
938        gpio::deconfigure_pin(r.psel().txd().read());
939        gpio::deconfigure_pin(r.psel().rts().read());
940        gpio::deconfigure_pin(r.psel().cts().read());
941
942        trace!("uarte tx and rx drop: done");
943    }
944}
945
946pub(crate) struct State {
947    pub(crate) rx_waker: AtomicWaker,
948    pub(crate) tx_waker: AtomicWaker,
949    pub(crate) tx_rx_refcount: AtomicU8,
950}
951impl State {
952    pub(crate) const fn new() -> Self {
953        Self {
954            rx_waker: AtomicWaker::new(),
955            tx_waker: AtomicWaker::new(),
956            tx_rx_refcount: AtomicU8::new(0),
957        }
958    }
959}
960
961pub(crate) trait SealedInstance {
962    fn regs() -> pac::uarte::Uarte;
963    fn state() -> &'static State;
964    fn buffered_state() -> &'static crate::buffered_uarte::State;
965}
966
967/// UARTE peripheral instance.
968#[allow(private_bounds)]
969pub trait Instance: SealedInstance + PeripheralType + 'static + Send {
970    /// Interrupt for this peripheral.
971    type Interrupt: interrupt::typelevel::Interrupt;
972}
973
974macro_rules! impl_uarte {
975    ($type:ident, $pac_type:ident, $irq:ident) => {
976        impl crate::uarte::SealedInstance for peripherals::$type {
977            fn regs() -> pac::uarte::Uarte {
978                pac::$pac_type
979            }
980            fn state() -> &'static crate::uarte::State {
981                static STATE: crate::uarte::State = crate::uarte::State::new();
982                &STATE
983            }
984            fn buffered_state() -> &'static crate::buffered_uarte::State {
985                static STATE: crate::buffered_uarte::State = crate::buffered_uarte::State::new();
986                &STATE
987            }
988        }
989        impl crate::uarte::Instance for peripherals::$type {
990            type Interrupt = crate::interrupt::typelevel::$irq;
991        }
992    };
993}
994
995// ====================
996
997mod eh02 {
998    use super::*;
999
1000    impl<'d, T: Instance> embedded_hal_02::blocking::serial::Write<u8> for Uarte<'d, T> {
1001        type Error = Error;
1002
1003        fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1004            self.blocking_write(buffer)
1005        }
1006
1007        fn bflush(&mut self) -> Result<(), Self::Error> {
1008            Ok(())
1009        }
1010    }
1011
1012    impl<'d, T: Instance> embedded_hal_02::blocking::serial::Write<u8> for UarteTx<'d, T> {
1013        type Error = Error;
1014
1015        fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1016            self.blocking_write(buffer)
1017        }
1018
1019        fn bflush(&mut self) -> Result<(), Self::Error> {
1020            Ok(())
1021        }
1022    }
1023}
1024
1025mod _embedded_io {
1026    use super::*;
1027
1028    impl embedded_io_async::Error for Error {
1029        fn kind(&self) -> embedded_io_async::ErrorKind {
1030            match *self {
1031                Error::BufferTooLong => embedded_io_async::ErrorKind::InvalidInput,
1032                Error::BufferNotInRAM => embedded_io_async::ErrorKind::Unsupported,
1033                Error::Framing => embedded_io_async::ErrorKind::InvalidData,
1034                Error::Parity => embedded_io_async::ErrorKind::InvalidData,
1035                Error::Overrun => embedded_io_async::ErrorKind::OutOfMemory,
1036                Error::Break => embedded_io_async::ErrorKind::ConnectionAborted,
1037            }
1038        }
1039    }
1040
1041    impl<'d, U: Instance> embedded_io_async::ErrorType for Uarte<'d, U> {
1042        type Error = Error;
1043    }
1044
1045    impl<'d, U: Instance> embedded_io_async::ErrorType for UarteTx<'d, U> {
1046        type Error = Error;
1047    }
1048
1049    impl<'d, U: Instance> embedded_io_async::Write for Uarte<'d, U> {
1050        async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1051            self.write(buf).await?;
1052            Ok(buf.len())
1053        }
1054    }
1055
1056    impl<'d: 'd, U: Instance> embedded_io_async::Write for UarteTx<'d, U> {
1057        async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1058            self.write(buf).await?;
1059            Ok(buf.len())
1060        }
1061    }
1062}