embassy_nrf/
uarte.rs

1//! Universal Asynchronous Receiver Transmitter (UART) driver.
2//!
3//! The UART driver is provided in two flavors - this one and also [crate::buffered_uarte::BufferedUarte].
4//! The [Uarte] here is useful for those use-cases where reading the UARTE peripheral is
5//! exclusively awaited on. If the [Uarte] is required to be awaited on with some other future,
6//! for example when using `futures_util::future::select`, then you should consider
7//! [crate::buffered_uarte::BufferedUarte] so that reads may continue while processing these
8//! other futures. If you do not then you may lose data between reads.
9//!
10//! An advantage of the [Uarte] has over [crate::buffered_uarte::BufferedUarte] is that less
11//! memory may be used given that buffers are passed in directly to its read and write
12//! methods.
13
14#![macro_use]
15
16use core::future::poll_fn;
17use core::marker::PhantomData;
18use core::sync::atomic::{compiler_fence, AtomicU8, Ordering};
19use core::task::Poll;
20
21use embassy_hal_internal::drop::OnDrop;
22use embassy_hal_internal::{Peri, PeripheralType};
23use embassy_sync::waitqueue::AtomicWaker;
24// Re-export SVD variants to allow user to directly set values.
25pub use pac::uarte::vals::{Baudrate, ConfigParity as Parity};
26
27use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
28use crate::gpio::{self, AnyPin, Pin as GpioPin, PselBits, SealedPin as _, DISCONNECTED};
29use crate::interrupt::typelevel::Interrupt;
30use crate::pac::gpio::vals as gpiovals;
31use crate::pac::uarte::vals;
32use crate::ppi::{AnyConfigurableChannel, ConfigurableChannel, Event, Ppi, Task};
33use crate::timer::{Frequency, Instance as TimerInstance, Timer};
34use crate::util::slice_in_ram_or;
35use crate::{interrupt, pac};
36
37/// UARTE config.
38#[derive(Clone)]
39#[non_exhaustive]
40pub struct Config {
41    /// Parity bit.
42    pub parity: Parity,
43    /// Baud rate.
44    pub baudrate: Baudrate,
45}
46
47impl Default for Config {
48    fn default() -> Self {
49        Self {
50            parity: Parity::EXCLUDED,
51            baudrate: Baudrate::BAUD115200,
52        }
53    }
54}
55
56bitflags::bitflags! {
57    /// Error source flags
58    pub(crate) struct ErrorSource: u32 {
59        /// Buffer overrun
60        const OVERRUN = 0x01;
61        /// Parity error
62        const PARITY = 0x02;
63        /// Framing error
64        const FRAMING = 0x04;
65        /// Break condition
66        const BREAK = 0x08;
67    }
68}
69
70impl ErrorSource {
71    #[inline]
72    fn check(self) -> Result<(), Error> {
73        if self.contains(ErrorSource::OVERRUN) {
74            Err(Error::Overrun)
75        } else if self.contains(ErrorSource::PARITY) {
76            Err(Error::Parity)
77        } else if self.contains(ErrorSource::FRAMING) {
78            Err(Error::Framing)
79        } else if self.contains(ErrorSource::BREAK) {
80            Err(Error::Break)
81        } else {
82            Ok(())
83        }
84    }
85}
86
87/// UART error.
88#[derive(Debug, Clone, Copy, PartialEq, Eq)]
89#[cfg_attr(feature = "defmt", derive(defmt::Format))]
90#[non_exhaustive]
91pub enum Error {
92    /// Buffer was too long.
93    BufferTooLong,
94    /// The buffer is not in data RAM. It's most likely in flash, and nRF's DMA cannot access flash.
95    BufferNotInRAM,
96    /// Framing Error
97    Framing,
98    /// Parity Error
99    Parity,
100    /// Buffer Overrun
101    Overrun,
102    /// Break condition
103    Break,
104}
105
106/// Interrupt handler.
107pub struct InterruptHandler<T: Instance> {
108    _phantom: PhantomData<T>,
109}
110
111impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
112    unsafe fn on_interrupt() {
113        let r = T::regs();
114        let s = T::state();
115
116        let endrx = r.events_endrx().read();
117        let error = r.events_error().read();
118        if endrx != 0 || error != 0 {
119            s.rx_waker.wake();
120            if endrx != 0 {
121                r.intenclr().write(|w| w.set_endrx(true));
122            }
123            if error != 0 {
124                r.intenclr().write(|w| w.set_error(true));
125            }
126        }
127        if r.events_endtx().read() != 0 {
128            s.tx_waker.wake();
129            r.intenclr().write(|w| w.set_endtx(true));
130        }
131    }
132}
133
134/// UARTE driver.
135pub struct Uarte<'d> {
136    tx: UarteTx<'d>,
137    rx: UarteRx<'d>,
138}
139
140/// Transmitter part of the UARTE driver.
141///
142/// This can be obtained via [`Uarte::split`], or created directly.
143pub struct UarteTx<'d> {
144    r: pac::uarte::Uarte,
145    state: &'static State,
146    _p: PhantomData<&'d ()>,
147}
148
149/// Receiver part of the UARTE driver.
150///
151/// This can be obtained via [`Uarte::split`], or created directly.
152pub struct UarteRx<'d> {
153    r: pac::uarte::Uarte,
154    state: &'static State,
155    _p: PhantomData<&'d ()>,
156}
157
158impl<'d> Uarte<'d> {
159    /// Create a new UARTE without hardware flow control
160    pub fn new<T: Instance>(
161        uarte: Peri<'d, T>,
162        rxd: Peri<'d, impl GpioPin>,
163        txd: Peri<'d, impl GpioPin>,
164        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
165        config: Config,
166    ) -> Self {
167        Self::new_inner(uarte, rxd.into(), txd.into(), None, None, config)
168    }
169
170    /// Create a new UARTE with hardware flow control (RTS/CTS)
171    pub fn new_with_rtscts<T: Instance>(
172        uarte: Peri<'d, T>,
173        rxd: Peri<'d, impl GpioPin>,
174        txd: Peri<'d, impl GpioPin>,
175        cts: Peri<'d, impl GpioPin>,
176        rts: Peri<'d, impl GpioPin>,
177        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
178        config: Config,
179    ) -> Self {
180        Self::new_inner(
181            uarte,
182            rxd.into(),
183            txd.into(),
184            Some(cts.into()),
185            Some(rts.into()),
186            config,
187        )
188    }
189
190    fn new_inner<T: Instance>(
191        _uarte: Peri<'d, T>,
192        rxd: Peri<'d, AnyPin>,
193        txd: Peri<'d, AnyPin>,
194        cts: Option<Peri<'d, AnyPin>>,
195        rts: Option<Peri<'d, AnyPin>>,
196        config: Config,
197    ) -> Self {
198        let r = T::regs();
199
200        let hardware_flow_control = match (rts.is_some(), cts.is_some()) {
201            (false, false) => false,
202            (true, true) => true,
203            _ => panic!("RTS and CTS pins must be either both set or none set."),
204        };
205        configure(r, config, hardware_flow_control);
206        configure_rx_pins(r, rxd, rts);
207        configure_tx_pins(r, txd, cts);
208
209        T::Interrupt::unpend();
210        unsafe { T::Interrupt::enable() };
211        r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
212
213        let s = T::state();
214        s.tx_rx_refcount.store(2, Ordering::Relaxed);
215
216        Self {
217            tx: UarteTx {
218                r: T::regs(),
219                state: T::state(),
220                _p: PhantomData {},
221            },
222            rx: UarteRx {
223                r: T::regs(),
224                state: T::state(),
225                _p: PhantomData {},
226            },
227        }
228    }
229
230    /// Split the Uarte into the transmitter and receiver parts.
231    ///
232    /// This is useful to concurrently transmit and receive from independent tasks.
233    pub fn split(self) -> (UarteTx<'d>, UarteRx<'d>) {
234        (self.tx, self.rx)
235    }
236
237    /// Split the UART in reader and writer parts, by reference.
238    ///
239    /// The returned halves borrow from `self`, so you can drop them and go back to using
240    /// the "un-split" `self`. This allows temporarily splitting the UART.
241    pub fn split_by_ref(&mut self) -> (&mut UarteTx<'d>, &mut UarteRx<'d>) {
242        (&mut self.tx, &mut self.rx)
243    }
244
245    /// Split the Uarte into the transmitter and receiver with idle support parts.
246    ///
247    /// This is useful to concurrently transmit and receive from independent tasks.
248    pub fn split_with_idle<U: TimerInstance>(
249        self,
250        timer: Peri<'d, U>,
251        ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
252        ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
253    ) -> (UarteTx<'d>, UarteRxWithIdle<'d>) {
254        (self.tx, self.rx.with_idle(timer, ppi_ch1, ppi_ch2))
255    }
256
257    /// Return the endtx event for use with PPI
258    pub fn event_endtx(&self) -> Event<'_> {
259        let r = self.tx.r;
260        Event::from_reg(r.events_endtx())
261    }
262
263    /// Read bytes until the buffer is filled.
264    pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
265        self.rx.read(buffer).await
266    }
267
268    /// Write all bytes in the buffer.
269    pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
270        self.tx.write(buffer).await
271    }
272
273    /// Same as [`write`](Uarte::write) but will fail instead of copying data into RAM. Consult the module level documentation to learn more.
274    pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
275        self.tx.write_from_ram(buffer).await
276    }
277
278    /// Read bytes until the buffer is filled.
279    pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
280        self.rx.blocking_read(buffer)
281    }
282
283    /// Write all bytes in the buffer.
284    pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
285        self.tx.blocking_write(buffer)
286    }
287
288    /// Same as [`blocking_write`](Uarte::blocking_write) but will fail instead of copying data into RAM. Consult the module level documentation to learn more.
289    pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
290        self.tx.blocking_write_from_ram(buffer)
291    }
292}
293
294pub(crate) fn configure_tx_pins(r: pac::uarte::Uarte, txd: Peri<'_, AnyPin>, cts: Option<Peri<'_, AnyPin>>) {
295    txd.set_high();
296    txd.conf().write(|w| {
297        w.set_dir(gpiovals::Dir::OUTPUT);
298        w.set_input(gpiovals::Input::DISCONNECT);
299        w.set_drive(gpiovals::Drive::H0H1);
300    });
301    r.psel().txd().write_value(txd.psel_bits());
302
303    if let Some(pin) = &cts {
304        pin.conf().write(|w| {
305            w.set_dir(gpiovals::Dir::INPUT);
306            w.set_input(gpiovals::Input::CONNECT);
307            w.set_drive(gpiovals::Drive::H0H1);
308        });
309    }
310    r.psel().cts().write_value(cts.psel_bits());
311}
312
313pub(crate) fn configure_rx_pins(r: pac::uarte::Uarte, rxd: Peri<'_, AnyPin>, rts: Option<Peri<'_, AnyPin>>) {
314    rxd.conf().write(|w| {
315        w.set_dir(gpiovals::Dir::INPUT);
316        w.set_input(gpiovals::Input::CONNECT);
317        w.set_drive(gpiovals::Drive::H0H1);
318    });
319    r.psel().rxd().write_value(rxd.psel_bits());
320
321    if let Some(pin) = &rts {
322        pin.set_high();
323        pin.conf().write(|w| {
324            w.set_dir(gpiovals::Dir::OUTPUT);
325            w.set_input(gpiovals::Input::DISCONNECT);
326            w.set_drive(gpiovals::Drive::H0H1);
327        });
328    }
329    r.psel().rts().write_value(rts.psel_bits());
330}
331
332pub(crate) fn configure(r: pac::uarte::Uarte, config: Config, hardware_flow_control: bool) {
333    r.config().write(|w| {
334        w.set_hwfc(hardware_flow_control);
335        w.set_parity(config.parity);
336    });
337    r.baudrate().write(|w| w.set_baudrate(config.baudrate));
338
339    // Disable all interrupts
340    r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
341
342    // Reset rxstarted, txstarted. These are used by drop to know whether a transfer was
343    // stopped midway or not.
344    r.events_rxstarted().write_value(0);
345    r.events_txstarted().write_value(0);
346
347    // reset all pins
348    r.psel().txd().write_value(DISCONNECTED);
349    r.psel().rxd().write_value(DISCONNECTED);
350    r.psel().cts().write_value(DISCONNECTED);
351    r.psel().rts().write_value(DISCONNECTED);
352
353    apply_workaround_for_enable_anomaly(r);
354}
355
356impl<'d> UarteTx<'d> {
357    /// Create a new tx-only UARTE without hardware flow control
358    pub fn new<T: Instance>(
359        uarte: Peri<'d, T>,
360        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
361        txd: Peri<'d, impl GpioPin>,
362        config: Config,
363    ) -> Self {
364        Self::new_inner(uarte, txd.into(), None, config)
365    }
366
367    /// Create a new tx-only UARTE with hardware flow control (RTS/CTS)
368    pub fn new_with_rtscts<T: Instance>(
369        uarte: Peri<'d, T>,
370        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
371        txd: Peri<'d, impl GpioPin>,
372        cts: Peri<'d, impl GpioPin>,
373        config: Config,
374    ) -> Self {
375        Self::new_inner(uarte, txd.into(), Some(cts.into()), config)
376    }
377
378    fn new_inner<T: Instance>(
379        _uarte: Peri<'d, T>,
380        txd: Peri<'d, AnyPin>,
381        cts: Option<Peri<'d, AnyPin>>,
382        config: Config,
383    ) -> Self {
384        let r = T::regs();
385
386        configure(r, config, cts.is_some());
387        configure_tx_pins(r, txd, cts);
388
389        T::Interrupt::unpend();
390        unsafe { T::Interrupt::enable() };
391        r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
392
393        let s = T::state();
394        s.tx_rx_refcount.store(1, Ordering::Relaxed);
395
396        Self {
397            r: T::regs(),
398            state: T::state(),
399            _p: PhantomData {},
400        }
401    }
402
403    /// Write all bytes in the buffer.
404    pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
405        match self.write_from_ram(buffer).await {
406            Ok(_) => Ok(()),
407            Err(Error::BufferNotInRAM) => {
408                trace!("Copying UARTE tx buffer into RAM for DMA");
409                let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
410                ram_buf.copy_from_slice(buffer);
411                self.write_from_ram(ram_buf).await
412            }
413            Err(error) => Err(error),
414        }
415    }
416
417    /// Same as [`write`](Self::write) but will fail instead of copying data into RAM. Consult the module level documentation to learn more.
418    pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
419        if buffer.is_empty() {
420            return Ok(());
421        }
422
423        slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
424        if buffer.len() > EASY_DMA_SIZE {
425            return Err(Error::BufferTooLong);
426        }
427
428        let ptr = buffer.as_ptr();
429        let len = buffer.len();
430
431        let r = self.r;
432        let s = self.state;
433
434        let drop = OnDrop::new(move || {
435            trace!("write drop: stopping");
436
437            r.intenclr().write(|w| w.set_endtx(true));
438            r.events_txstopped().write_value(0);
439            r.tasks_stoptx().write_value(1);
440
441            // TX is stopped almost instantly, spinning is fine.
442            while r.events_endtx().read() == 0 {}
443            trace!("write drop: stopped");
444        });
445
446        r.txd().ptr().write_value(ptr as u32);
447        r.txd().maxcnt().write(|w| w.set_maxcnt(len as _));
448
449        r.events_endtx().write_value(0);
450        r.intenset().write(|w| w.set_endtx(true));
451
452        compiler_fence(Ordering::SeqCst);
453
454        trace!("starttx");
455        r.tasks_starttx().write_value(1);
456
457        poll_fn(|cx| {
458            s.tx_waker.register(cx.waker());
459            if r.events_endtx().read() != 0 {
460                return Poll::Ready(());
461            }
462            Poll::Pending
463        })
464        .await;
465
466        compiler_fence(Ordering::SeqCst);
467        r.events_txstarted().write_value(0);
468        drop.defuse();
469
470        Ok(())
471    }
472
473    /// Write all bytes in the buffer.
474    pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
475        match self.blocking_write_from_ram(buffer) {
476            Ok(_) => Ok(()),
477            Err(Error::BufferNotInRAM) => {
478                trace!("Copying UARTE tx buffer into RAM for DMA");
479                let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
480                ram_buf.copy_from_slice(buffer);
481                self.blocking_write_from_ram(ram_buf)
482            }
483            Err(error) => Err(error),
484        }
485    }
486
487    /// Same as [`write_from_ram`](Self::write_from_ram) but will fail instead of copying data into RAM. Consult the module level documentation to learn more.
488    pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
489        if buffer.is_empty() {
490            return Ok(());
491        }
492
493        slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
494        if buffer.len() > EASY_DMA_SIZE {
495            return Err(Error::BufferTooLong);
496        }
497
498        let ptr = buffer.as_ptr();
499        let len = buffer.len();
500
501        let r = self.r;
502
503        r.txd().ptr().write_value(ptr as u32);
504        r.txd().maxcnt().write(|w| w.set_maxcnt(len as _));
505
506        r.events_endtx().write_value(0);
507        r.intenclr().write(|w| w.set_endtx(true));
508
509        compiler_fence(Ordering::SeqCst);
510
511        trace!("starttx");
512        r.tasks_starttx().write_value(1);
513
514        while r.events_endtx().read() == 0 {}
515
516        compiler_fence(Ordering::SeqCst);
517        r.events_txstarted().write_value(0);
518
519        Ok(())
520    }
521}
522
523impl<'a> Drop for UarteTx<'a> {
524    fn drop(&mut self) {
525        trace!("uarte tx drop");
526
527        let r = self.r;
528
529        let did_stoptx = r.events_txstarted().read() != 0;
530        trace!("did_stoptx {}", did_stoptx);
531
532        // Wait for txstopped, if needed.
533        while did_stoptx && r.events_txstopped().read() == 0 {}
534
535        let s = self.state;
536
537        drop_tx_rx(r, s);
538    }
539}
540
541impl<'d> UarteRx<'d> {
542    /// Create a new rx-only UARTE without hardware flow control
543    pub fn new<T: Instance>(
544        uarte: Peri<'d, T>,
545        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
546        rxd: Peri<'d, impl GpioPin>,
547        config: Config,
548    ) -> Self {
549        Self::new_inner(uarte, rxd.into(), None, config)
550    }
551
552    /// Create a new rx-only UARTE with hardware flow control (RTS/CTS)
553    pub fn new_with_rtscts<T: Instance>(
554        uarte: Peri<'d, T>,
555        _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
556        rxd: Peri<'d, impl GpioPin>,
557        rts: Peri<'d, impl GpioPin>,
558        config: Config,
559    ) -> Self {
560        Self::new_inner(uarte, rxd.into(), Some(rts.into()), config)
561    }
562
563    /// Check for errors and clear the error register if an error occured.
564    fn check_and_clear_errors(&mut self) -> Result<(), Error> {
565        let r = self.r;
566        let err_bits = r.errorsrc().read();
567        r.errorsrc().write_value(err_bits);
568        ErrorSource::from_bits_truncate(err_bits.0).check()
569    }
570
571    fn new_inner<T: Instance>(
572        _uarte: Peri<'d, T>,
573        rxd: Peri<'d, AnyPin>,
574        rts: Option<Peri<'d, AnyPin>>,
575        config: Config,
576    ) -> Self {
577        let r = T::regs();
578
579        configure(r, config, rts.is_some());
580        configure_rx_pins(r, rxd, rts);
581
582        T::Interrupt::unpend();
583        unsafe { T::Interrupt::enable() };
584        r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
585
586        let s = T::state();
587        s.tx_rx_refcount.store(1, Ordering::Relaxed);
588
589        Self {
590            r: T::regs(),
591            state: T::state(),
592            _p: PhantomData {},
593        }
594    }
595
596    /// Upgrade to an instance that supports idle line detection.
597    pub fn with_idle<U: TimerInstance>(
598        self,
599        timer: Peri<'d, U>,
600        ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
601        ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
602    ) -> UarteRxWithIdle<'d> {
603        let timer = Timer::new(timer);
604
605        let r = self.r;
606
607        // BAUDRATE register values are `baudrate * 2^32 / 16000000`
608        // source: https://devzone.nordicsemi.com/f/nordic-q-a/391/uart-baudrate-register-values
609        //
610        // We want to stop RX if line is idle for 2 bytes worth of time
611        // That is 20 bits (each byte is 1 start bit + 8 data bits + 1 stop bit)
612        // This gives us the amount of 16M ticks for 20 bits.
613        let baudrate = r.baudrate().read().baudrate();
614        let timeout = 0x8000_0000 / (baudrate.to_bits() / 40);
615
616        timer.set_frequency(Frequency::F16MHz);
617        timer.cc(0).write(timeout);
618        timer.cc(0).short_compare_clear();
619        timer.cc(0).short_compare_stop();
620
621        let mut ppi_ch1 = Ppi::new_one_to_two(
622            ppi_ch1.into(),
623            Event::from_reg(r.events_rxdrdy()),
624            timer.task_clear(),
625            timer.task_start(),
626        );
627        ppi_ch1.enable();
628
629        let mut ppi_ch2 = Ppi::new_one_to_one(
630            ppi_ch2.into(),
631            timer.cc(0).event_compare(),
632            Task::from_reg(r.tasks_stoprx()),
633        );
634        ppi_ch2.enable();
635
636        let state = self.state;
637
638        UarteRxWithIdle {
639            rx: self,
640            timer,
641            ppi_ch1: ppi_ch1,
642            _ppi_ch2: ppi_ch2,
643            r: r,
644            state: state,
645        }
646    }
647
648    /// Read bytes until the buffer is filled.
649    pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
650        if buffer.is_empty() {
651            return Ok(());
652        }
653        if buffer.len() > EASY_DMA_SIZE {
654            return Err(Error::BufferTooLong);
655        }
656
657        let ptr = buffer.as_ptr();
658        let len = buffer.len();
659
660        let r = self.r;
661        let s = self.state;
662
663        let drop = OnDrop::new(move || {
664            trace!("read drop: stopping");
665
666            r.intenclr().write(|w| {
667                w.set_endrx(true);
668                w.set_error(true);
669            });
670            r.events_rxto().write_value(0);
671            r.events_error().write_value(0);
672            r.tasks_stoprx().write_value(1);
673
674            while r.events_endrx().read() == 0 {}
675
676            trace!("read drop: stopped");
677        });
678
679        r.rxd().ptr().write_value(ptr as u32);
680        r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
681
682        r.events_endrx().write_value(0);
683        r.events_error().write_value(0);
684        r.intenset().write(|w| {
685            w.set_endrx(true);
686            w.set_error(true);
687        });
688
689        compiler_fence(Ordering::SeqCst);
690
691        trace!("startrx");
692        r.tasks_startrx().write_value(1);
693
694        let result = poll_fn(|cx| {
695            s.rx_waker.register(cx.waker());
696
697            if let Err(e) = self.check_and_clear_errors() {
698                r.tasks_stoprx().write_value(1);
699                return Poll::Ready(Err(e));
700            }
701            if r.events_endrx().read() != 0 {
702                return Poll::Ready(Ok(()));
703            }
704            Poll::Pending
705        })
706        .await;
707
708        compiler_fence(Ordering::SeqCst);
709        r.events_rxstarted().write_value(0);
710        drop.defuse();
711
712        result
713    }
714
715    /// Read bytes until the buffer is filled.
716    pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
717        if buffer.is_empty() {
718            return Ok(());
719        }
720        if buffer.len() > EASY_DMA_SIZE {
721            return Err(Error::BufferTooLong);
722        }
723
724        let ptr = buffer.as_ptr();
725        let len = buffer.len();
726
727        let r = self.r;
728
729        r.rxd().ptr().write_value(ptr as u32);
730        r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
731
732        r.events_endrx().write_value(0);
733        r.events_error().write_value(0);
734        r.intenclr().write(|w| {
735            w.set_endrx(true);
736            w.set_error(true);
737        });
738
739        compiler_fence(Ordering::SeqCst);
740
741        trace!("startrx");
742        r.tasks_startrx().write_value(1);
743
744        while r.events_endrx().read() == 0 && r.events_error().read() == 0 {}
745
746        compiler_fence(Ordering::SeqCst);
747        r.events_rxstarted().write_value(0);
748
749        self.check_and_clear_errors()
750    }
751}
752
753impl<'a> Drop for UarteRx<'a> {
754    fn drop(&mut self) {
755        trace!("uarte rx drop");
756
757        let r = self.r;
758
759        let did_stoprx = r.events_rxstarted().read() != 0;
760        trace!("did_stoprx {}", did_stoprx);
761
762        // Wait for rxto, if needed.
763        while did_stoprx && r.events_rxto().read() == 0 {}
764
765        let s = self.state;
766
767        drop_tx_rx(r, s);
768    }
769}
770
771/// Receiver part of the UARTE driver, with `read_until_idle` support.
772///
773/// This can be obtained via [`Uarte::split_with_idle`].
774pub struct UarteRxWithIdle<'d> {
775    rx: UarteRx<'d>,
776    timer: Timer<'d>,
777    ppi_ch1: Ppi<'d, AnyConfigurableChannel, 1, 2>,
778    _ppi_ch2: Ppi<'d, AnyConfigurableChannel, 1, 1>,
779    r: pac::uarte::Uarte,
780    state: &'static State,
781}
782
783impl<'d> UarteRxWithIdle<'d> {
784    /// Read bytes until the buffer is filled.
785    pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
786        self.ppi_ch1.disable();
787        self.rx.read(buffer).await
788    }
789
790    /// Read bytes until the buffer is filled.
791    pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
792        self.ppi_ch1.disable();
793        self.rx.blocking_read(buffer)
794    }
795
796    /// Read bytes until the buffer is filled, or the line becomes idle.
797    ///
798    /// Returns the amount of bytes read.
799    pub async fn read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
800        if buffer.is_empty() {
801            return Ok(0);
802        }
803        if buffer.len() > EASY_DMA_SIZE {
804            return Err(Error::BufferTooLong);
805        }
806
807        let ptr = buffer.as_ptr();
808        let len = buffer.len();
809
810        let r = self.r;
811        let s = self.state;
812
813        self.ppi_ch1.enable();
814
815        let drop = OnDrop::new(|| {
816            self.timer.stop();
817
818            r.intenclr().write(|w| {
819                w.set_endrx(true);
820                w.set_error(true);
821            });
822            r.events_rxto().write_value(0);
823            r.events_error().write_value(0);
824            r.tasks_stoprx().write_value(1);
825
826            while r.events_endrx().read() == 0 {}
827        });
828
829        r.rxd().ptr().write_value(ptr as u32);
830        r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
831
832        r.events_endrx().write_value(0);
833        r.events_error().write_value(0);
834        r.intenset().write(|w| {
835            w.set_endrx(true);
836            w.set_error(true);
837        });
838
839        compiler_fence(Ordering::SeqCst);
840
841        r.tasks_startrx().write_value(1);
842
843        let result = poll_fn(|cx| {
844            s.rx_waker.register(cx.waker());
845
846            if let Err(e) = self.rx.check_and_clear_errors() {
847                r.tasks_stoprx().write_value(1);
848                return Poll::Ready(Err(e));
849            }
850            if r.events_endrx().read() != 0 {
851                return Poll::Ready(Ok(()));
852            }
853
854            Poll::Pending
855        })
856        .await;
857
858        compiler_fence(Ordering::SeqCst);
859        let n = r.rxd().amount().read().0 as usize;
860
861        self.timer.stop();
862        r.events_rxstarted().write_value(0);
863
864        drop.defuse();
865
866        result.map(|_| n)
867    }
868
869    /// Read bytes until the buffer is filled, or the line becomes idle.
870    ///
871    /// Returns the amount of bytes read.
872    pub fn blocking_read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
873        if buffer.is_empty() {
874            return Ok(0);
875        }
876        if buffer.len() > EASY_DMA_SIZE {
877            return Err(Error::BufferTooLong);
878        }
879
880        let ptr = buffer.as_ptr();
881        let len = buffer.len();
882
883        let r = self.r;
884
885        self.ppi_ch1.enable();
886
887        r.rxd().ptr().write_value(ptr as u32);
888        r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
889
890        r.events_endrx().write_value(0);
891        r.events_error().write_value(0);
892        r.intenclr().write(|w| {
893            w.set_endrx(true);
894            w.set_error(true);
895        });
896
897        compiler_fence(Ordering::SeqCst);
898
899        r.tasks_startrx().write_value(1);
900
901        while r.events_endrx().read() == 0 && r.events_error().read() == 0 {}
902
903        compiler_fence(Ordering::SeqCst);
904        let n = r.rxd().amount().read().0 as usize;
905
906        self.timer.stop();
907        r.events_rxstarted().write_value(0);
908
909        self.rx.check_and_clear_errors().map(|_| n)
910    }
911}
912
913#[cfg(not(any(feature = "_nrf9160", feature = "_nrf5340")))]
914pub(crate) fn apply_workaround_for_enable_anomaly(_r: pac::uarte::Uarte) {
915    // Do nothing
916}
917
918#[cfg(any(feature = "_nrf9160", feature = "_nrf5340"))]
919pub(crate) fn apply_workaround_for_enable_anomaly(r: pac::uarte::Uarte) {
920    // Apply workaround for anomalies:
921    // - nRF9160 - anomaly 23
922    // - nRF5340 - anomaly 44
923    let rp = r.as_ptr() as *mut u32;
924    let rxenable_reg = unsafe { rp.add(0x564 / 4) };
925    let txenable_reg = unsafe { rp.add(0x568 / 4) };
926
927    // NB Safety: This is taken from Nordic's driver -
928    // https://github.com/NordicSemiconductor/nrfx/blob/master/drivers/src/nrfx_uarte.c#L197
929    if unsafe { core::ptr::read_volatile(txenable_reg) } == 1 {
930        r.tasks_stoptx().write_value(1);
931    }
932
933    // NB Safety: This is taken from Nordic's driver -
934    // https://github.com/NordicSemiconductor/nrfx/blob/master/drivers/src/nrfx_uarte.c#L197
935    if unsafe { core::ptr::read_volatile(rxenable_reg) } == 1 {
936        r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
937        r.tasks_stoprx().write_value(1);
938
939        let mut workaround_succeded = false;
940        // The UARTE is able to receive up to four bytes after the STOPRX task has been triggered.
941        // On lowest supported baud rate (1200 baud), with parity bit and two stop bits configured
942        // (resulting in 12 bits per data byte sent), this may take up to 40 ms.
943        for _ in 0..40000 {
944            // NB Safety: This is taken from Nordic's driver -
945            // https://github.com/NordicSemiconductor/nrfx/blob/master/drivers/src/nrfx_uarte.c#L197
946            if unsafe { core::ptr::read_volatile(rxenable_reg) } == 0 {
947                workaround_succeded = true;
948                break;
949            } else {
950                // Need to sleep for 1us here
951            }
952        }
953
954        if !workaround_succeded {
955            panic!("Failed to apply workaround for UART");
956        }
957
958        // write back the bits we just read to clear them
959        let errors = r.errorsrc().read();
960        r.errorsrc().write_value(errors);
961        r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
962    }
963}
964
965pub(crate) fn drop_tx_rx(r: pac::uarte::Uarte, s: &State) {
966    if s.tx_rx_refcount.fetch_sub(1, Ordering::Relaxed) == 1 {
967        // Finally we can disable, and we do so for the peripheral
968        // i.e. not just rx concerns.
969        r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
970
971        gpio::deconfigure_pin(r.psel().rxd().read());
972        gpio::deconfigure_pin(r.psel().txd().read());
973        gpio::deconfigure_pin(r.psel().rts().read());
974        gpio::deconfigure_pin(r.psel().cts().read());
975
976        trace!("uarte tx and rx drop: done");
977    }
978}
979
980pub(crate) struct State {
981    pub(crate) rx_waker: AtomicWaker,
982    pub(crate) tx_waker: AtomicWaker,
983    pub(crate) tx_rx_refcount: AtomicU8,
984}
985impl State {
986    pub(crate) const fn new() -> Self {
987        Self {
988            rx_waker: AtomicWaker::new(),
989            tx_waker: AtomicWaker::new(),
990            tx_rx_refcount: AtomicU8::new(0),
991        }
992    }
993}
994
995pub(crate) trait SealedInstance {
996    fn regs() -> pac::uarte::Uarte;
997    fn state() -> &'static State;
998    fn buffered_state() -> &'static crate::buffered_uarte::State;
999}
1000
1001/// UARTE peripheral instance.
1002#[allow(private_bounds)]
1003pub trait Instance: SealedInstance + PeripheralType + 'static + Send {
1004    /// Interrupt for this peripheral.
1005    type Interrupt: interrupt::typelevel::Interrupt;
1006}
1007
1008macro_rules! impl_uarte {
1009    ($type:ident, $pac_type:ident, $irq:ident) => {
1010        impl crate::uarte::SealedInstance for peripherals::$type {
1011            fn regs() -> pac::uarte::Uarte {
1012                pac::$pac_type
1013            }
1014            fn state() -> &'static crate::uarte::State {
1015                static STATE: crate::uarte::State = crate::uarte::State::new();
1016                &STATE
1017            }
1018            fn buffered_state() -> &'static crate::buffered_uarte::State {
1019                static STATE: crate::buffered_uarte::State = crate::buffered_uarte::State::new();
1020                &STATE
1021            }
1022        }
1023        impl crate::uarte::Instance for peripherals::$type {
1024            type Interrupt = crate::interrupt::typelevel::$irq;
1025        }
1026    };
1027}
1028
1029// ====================
1030
1031mod eh02 {
1032    use super::*;
1033
1034    impl<'d> embedded_hal_02::blocking::serial::Write<u8> for Uarte<'d> {
1035        type Error = Error;
1036
1037        fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1038            self.blocking_write(buffer)
1039        }
1040
1041        fn bflush(&mut self) -> Result<(), Self::Error> {
1042            Ok(())
1043        }
1044    }
1045
1046    impl<'d> embedded_hal_02::blocking::serial::Write<u8> for UarteTx<'d> {
1047        type Error = Error;
1048
1049        fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1050            self.blocking_write(buffer)
1051        }
1052
1053        fn bflush(&mut self) -> Result<(), Self::Error> {
1054            Ok(())
1055        }
1056    }
1057}
1058
1059mod _embedded_io {
1060    use super::*;
1061
1062    impl embedded_io_async::Error for Error {
1063        fn kind(&self) -> embedded_io_async::ErrorKind {
1064            match *self {
1065                Error::BufferTooLong => embedded_io_async::ErrorKind::InvalidInput,
1066                Error::BufferNotInRAM => embedded_io_async::ErrorKind::Unsupported,
1067                Error::Framing => embedded_io_async::ErrorKind::InvalidData,
1068                Error::Parity => embedded_io_async::ErrorKind::InvalidData,
1069                Error::Overrun => embedded_io_async::ErrorKind::OutOfMemory,
1070                Error::Break => embedded_io_async::ErrorKind::ConnectionAborted,
1071            }
1072        }
1073    }
1074
1075    impl<'d> embedded_io_async::ErrorType for Uarte<'d> {
1076        type Error = Error;
1077    }
1078
1079    impl<'d> embedded_io_async::ErrorType for UarteTx<'d> {
1080        type Error = Error;
1081    }
1082
1083    impl<'d> embedded_io_async::Write for Uarte<'d> {
1084        async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1085            self.write(buf).await?;
1086            Ok(buf.len())
1087        }
1088    }
1089
1090    impl<'d> embedded_io_async::Write for UarteTx<'d> {
1091        async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1092            self.write(buf).await?;
1093            Ok(buf.len())
1094        }
1095    }
1096}