1#![macro_use]
15
16use core::future::poll_fn;
17use core::marker::PhantomData;
18use core::sync::atomic::{AtomicU8, Ordering, compiler_fence};
19use core::task::Poll;
20
21use embassy_hal_internal::drop::OnDrop;
22use embassy_hal_internal::{Peri, PeripheralType};
23use embassy_sync::waitqueue::AtomicWaker;
24pub use pac::uarte::vals::{Baudrate, ConfigParity as Parity};
26
27use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
28use crate::gpio::{self, AnyPin, DISCONNECTED, Pin as GpioPin, PselBits, SealedPin as _};
29use crate::interrupt::typelevel::Interrupt;
30use crate::pac::gpio::vals as gpiovals;
31use crate::pac::uarte::vals;
32use crate::ppi::{AnyConfigurableChannel, ConfigurableChannel, Event, Ppi, Task};
33use crate::timer::{Frequency, Instance as TimerInstance, Timer};
34use crate::util::slice_in_ram_or;
35use crate::{interrupt, pac};
36
37#[derive(Clone)]
39#[non_exhaustive]
40pub struct Config {
41 pub parity: Parity,
43 pub baudrate: Baudrate,
45}
46
47impl Default for Config {
48 fn default() -> Self {
49 Self {
50 parity: Parity::EXCLUDED,
51 baudrate: Baudrate::BAUD115200,
52 }
53 }
54}
55
56bitflags::bitflags! {
57 pub(crate) struct ErrorSource: u32 {
59 const OVERRUN = 0x01;
61 const PARITY = 0x02;
63 const FRAMING = 0x04;
65 const BREAK = 0x08;
67 }
68}
69
70impl ErrorSource {
71 #[inline]
72 fn check(self) -> Result<(), Error> {
73 if self.contains(ErrorSource::OVERRUN) {
74 Err(Error::Overrun)
75 } else if self.contains(ErrorSource::PARITY) {
76 Err(Error::Parity)
77 } else if self.contains(ErrorSource::FRAMING) {
78 Err(Error::Framing)
79 } else if self.contains(ErrorSource::BREAK) {
80 Err(Error::Break)
81 } else {
82 Ok(())
83 }
84 }
85}
86
87#[derive(Debug, Clone, Copy, PartialEq, Eq)]
89#[cfg_attr(feature = "defmt", derive(defmt::Format))]
90#[non_exhaustive]
91pub enum Error {
92 BufferTooLong,
94 BufferNotInRAM,
96 Framing,
98 Parity,
100 Overrun,
102 Break,
104}
105
106pub struct InterruptHandler<T: Instance> {
108 _phantom: PhantomData<T>,
109}
110
111impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
112 unsafe fn on_interrupt() {
113 let r = T::regs();
114 let s = T::state();
115
116 let endrx = r.events_dma().rx().end().read();
117 let error = r.events_error().read();
118 let rxto = r.events_rxto().read();
119 if endrx != 0 || error != 0 || rxto != 0 {
120 s.rx_waker.wake();
121 if endrx != 0 {
122 r.intenclr().write(|w| w.set_dmarxend(true));
123 }
124 if error != 0 {
125 r.intenclr().write(|w| w.set_error(true));
126 }
127 if rxto != 0 {
128 r.intenclr().write(|w| w.set_rxto(true));
129 }
130 }
131 if r.events_dma().tx().end().read() != 0 {
132 s.tx_waker.wake();
133 r.intenclr().write(|w| w.set_dmatxend(true));
134 }
135 }
136}
137
138pub struct Uarte<'d> {
140 tx: UarteTx<'d>,
141 rx: UarteRx<'d>,
142}
143
144pub struct UarteTx<'d> {
148 r: pac::uarte::Uarte,
149 state: &'static State,
150 _p: PhantomData<&'d ()>,
151}
152
153pub struct UarteRx<'d> {
157 r: pac::uarte::Uarte,
158 state: &'static State,
159 _p: PhantomData<&'d ()>,
160 rx_on: bool,
161}
162
163impl<'d> Uarte<'d> {
164 pub fn new<T: Instance>(
166 uarte: Peri<'d, T>,
167 rxd: Peri<'d, impl GpioPin>,
168 txd: Peri<'d, impl GpioPin>,
169 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
170 config: Config,
171 ) -> Self {
172 Self::new_inner(uarte, rxd.into(), txd.into(), None, None, config)
173 }
174
175 pub fn new_with_rtscts<T: Instance>(
177 uarte: Peri<'d, T>,
178 rxd: Peri<'d, impl GpioPin>,
179 txd: Peri<'d, impl GpioPin>,
180 cts: Peri<'d, impl GpioPin>,
181 rts: Peri<'d, impl GpioPin>,
182 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
183 config: Config,
184 ) -> Self {
185 Self::new_inner(
186 uarte,
187 rxd.into(),
188 txd.into(),
189 Some(cts.into()),
190 Some(rts.into()),
191 config,
192 )
193 }
194
195 fn new_inner<T: Instance>(
196 _uarte: Peri<'d, T>,
197 rxd: Peri<'d, AnyPin>,
198 txd: Peri<'d, AnyPin>,
199 cts: Option<Peri<'d, AnyPin>>,
200 rts: Option<Peri<'d, AnyPin>>,
201 config: Config,
202 ) -> Self {
203 let r = T::regs();
204
205 let hardware_flow_control = match (rts.is_some(), cts.is_some()) {
206 (false, false) => false,
207 (true, true) => true,
208 _ => panic!("RTS and CTS pins must be either both set or none set."),
209 };
210 configure(r, config, hardware_flow_control);
211 configure_rx_pins(r, rxd, rts);
212 configure_tx_pins(r, txd, cts);
213
214 T::Interrupt::unpend();
215 unsafe { T::Interrupt::enable() };
216 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
217
218 let s = T::state();
219 s.tx_rx_refcount.store(2, Ordering::Relaxed);
220
221 Self {
222 tx: UarteTx {
223 r: T::regs(),
224 state: T::state(),
225 _p: PhantomData {},
226 },
227 rx: UarteRx {
228 r: T::regs(),
229 state: T::state(),
230 _p: PhantomData {},
231 rx_on: false,
232 },
233 }
234 }
235
236 pub fn split(self) -> (UarteTx<'d>, UarteRx<'d>) {
240 (self.tx, self.rx)
241 }
242
243 pub fn split_by_ref(&mut self) -> (&mut UarteTx<'d>, &mut UarteRx<'d>) {
248 (&mut self.tx, &mut self.rx)
249 }
250
251 pub fn split_with_idle<U: TimerInstance>(
255 self,
256 timer: Peri<'d, U>,
257 ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
258 ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
259 ) -> (UarteTx<'d>, UarteRxWithIdle<'d>) {
260 (self.tx, self.rx.with_idle(timer, ppi_ch1, ppi_ch2))
261 }
262
263 pub fn event_endtx(&self) -> Event<'_> {
265 let r = self.tx.r;
266 Event::from_reg(r.events_dma().tx().end())
267 }
268
269 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
271 self.rx.read(buffer).await
272 }
273
274 pub async fn flush_rx(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
276 self.rx.flush_rx(buffer).await
277 }
278
279 pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
281 self.tx.write(buffer).await
282 }
283
284 pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
286 self.tx.write_from_ram(buffer).await
287 }
288
289 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
291 self.rx.blocking_read(buffer)
292 }
293
294 pub fn blocking_flush_rx(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
296 self.rx.blocking_flush_rx(buffer)
297 }
298
299 pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
301 self.tx.blocking_write(buffer)
302 }
303
304 pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
306 self.tx.blocking_write_from_ram(buffer)
307 }
308}
309
310pub(crate) fn configure_tx_pins(r: pac::uarte::Uarte, txd: Peri<'_, AnyPin>, cts: Option<Peri<'_, AnyPin>>) {
311 txd.set_high();
312 txd.conf().write(|w| {
313 w.set_dir(gpiovals::Dir::OUTPUT);
314 w.set_input(gpiovals::Input::DISCONNECT);
315 #[cfg(not(feature = "_nrf54l"))]
316 w.set_drive(gpiovals::Drive::H0H1);
317 #[cfg(feature = "_nrf54l")]
318 {
319 w.set_drive0(gpiovals::Drive::H);
320 w.set_drive1(gpiovals::Drive::H);
321 }
322 });
323 r.psel().txd().write_value(txd.psel_bits());
324
325 if let Some(pin) = &cts {
326 pin.conf().write(|w| {
327 w.set_dir(gpiovals::Dir::INPUT);
328 w.set_input(gpiovals::Input::CONNECT);
329 #[cfg(not(feature = "_nrf54l"))]
330 w.set_drive(gpiovals::Drive::H0H1);
331 #[cfg(feature = "_nrf54l")]
332 {
333 w.set_drive0(gpiovals::Drive::H);
334 w.set_drive1(gpiovals::Drive::H);
335 }
336 });
337 }
338 r.psel().cts().write_value(cts.psel_bits());
339}
340
341pub(crate) fn configure_rx_pins(r: pac::uarte::Uarte, rxd: Peri<'_, AnyPin>, rts: Option<Peri<'_, AnyPin>>) {
342 rxd.conf().write(|w| {
343 w.set_dir(gpiovals::Dir::INPUT);
344 w.set_input(gpiovals::Input::CONNECT);
345 #[cfg(not(feature = "_nrf54l"))]
346 w.set_drive(gpiovals::Drive::H0H1);
347 #[cfg(feature = "_nrf54l")]
348 {
349 w.set_drive0(gpiovals::Drive::H);
350 w.set_drive1(gpiovals::Drive::H);
351 }
352 });
353 r.psel().rxd().write_value(rxd.psel_bits());
354
355 if let Some(pin) = &rts {
356 pin.set_high();
357 pin.conf().write(|w| {
358 w.set_dir(gpiovals::Dir::OUTPUT);
359 w.set_input(gpiovals::Input::DISCONNECT);
360 #[cfg(not(feature = "_nrf54l"))]
361 w.set_drive(gpiovals::Drive::H0H1);
362 #[cfg(feature = "_nrf54l")]
363 {
364 w.set_drive0(gpiovals::Drive::H);
365 w.set_drive1(gpiovals::Drive::H);
366 }
367 });
368 }
369 r.psel().rts().write_value(rts.psel_bits());
370}
371
372pub(crate) fn configure(r: pac::uarte::Uarte, config: Config, hardware_flow_control: bool) {
373 r.config().write(|w| {
374 w.set_hwfc(hardware_flow_control);
375 w.set_parity(config.parity);
376 #[cfg(feature = "_nrf54l")]
377 w.set_framesize(vals::Framesize::_8BIT);
378 #[cfg(feature = "_nrf54l")]
379 w.set_frametimeout(true);
380 });
381 r.baudrate().write(|w| w.set_baudrate(config.baudrate));
382
383 r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
385
386 r.events_dma().rx().ready().write_value(0);
389 r.events_dma().tx().ready().write_value(0);
390
391 r.psel().txd().write_value(DISCONNECTED);
393 r.psel().rxd().write_value(DISCONNECTED);
394 r.psel().cts().write_value(DISCONNECTED);
395 r.psel().rts().write_value(DISCONNECTED);
396
397 apply_workaround_for_enable_anomaly(r);
398}
399
400impl<'d> UarteTx<'d> {
401 pub fn new<T: Instance>(
403 uarte: Peri<'d, T>,
404 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
405 txd: Peri<'d, impl GpioPin>,
406 config: Config,
407 ) -> Self {
408 Self::new_inner(uarte, txd.into(), None, config)
409 }
410
411 pub fn new_with_rtscts<T: Instance>(
413 uarte: Peri<'d, T>,
414 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
415 txd: Peri<'d, impl GpioPin>,
416 cts: Peri<'d, impl GpioPin>,
417 config: Config,
418 ) -> Self {
419 Self::new_inner(uarte, txd.into(), Some(cts.into()), config)
420 }
421
422 fn new_inner<T: Instance>(
423 _uarte: Peri<'d, T>,
424 txd: Peri<'d, AnyPin>,
425 cts: Option<Peri<'d, AnyPin>>,
426 config: Config,
427 ) -> Self {
428 let r = T::regs();
429
430 configure(r, config, cts.is_some());
431 configure_tx_pins(r, txd, cts);
432
433 T::Interrupt::unpend();
434 unsafe { T::Interrupt::enable() };
435 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
436
437 let s = T::state();
438 s.tx_rx_refcount.store(1, Ordering::Relaxed);
439
440 Self {
441 r: T::regs(),
442 state: T::state(),
443 _p: PhantomData {},
444 }
445 }
446
447 pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
449 match self.write_from_ram(buffer).await {
450 Ok(_) => Ok(()),
451 Err(Error::BufferNotInRAM) => {
452 trace!("Copying UARTE tx buffer into RAM for DMA");
453 let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
454 ram_buf.copy_from_slice(buffer);
455 self.write_from_ram(ram_buf).await
456 }
457 Err(error) => Err(error),
458 }
459 }
460
461 pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
463 if buffer.is_empty() {
464 return Ok(());
465 }
466
467 slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
468 if buffer.len() > EASY_DMA_SIZE {
469 return Err(Error::BufferTooLong);
470 }
471
472 let ptr = buffer.as_ptr();
473 let len = buffer.len();
474
475 let r = self.r;
476 let s = self.state;
477
478 let drop = OnDrop::new(move || {
479 trace!("write drop: stopping");
480
481 r.intenclr().write(|w| w.set_dmatxend(true));
482 r.events_txstopped().write_value(0);
483 r.tasks_dma().tx().stop().write_value(1);
484
485 while r.events_dma().tx().end().read() == 0 {}
487 trace!("write drop: stopped");
488 });
489
490 r.dma().tx().ptr().write_value(ptr as u32);
491 r.dma().tx().maxcnt().write(|w| w.set_maxcnt(len as _));
492
493 r.events_dma().tx().end().write_value(0);
494 r.intenset().write(|w| w.set_dmatxend(true));
495
496 compiler_fence(Ordering::SeqCst);
497
498 trace!("starttx");
499 r.tasks_dma().tx().start().write_value(1);
500
501 poll_fn(|cx| {
502 s.tx_waker.register(cx.waker());
503 if r.events_dma().tx().end().read() != 0 {
504 return Poll::Ready(());
505 }
506 Poll::Pending
507 })
508 .await;
509
510 compiler_fence(Ordering::SeqCst);
511 r.events_dma().tx().ready().write_value(0);
512 drop.defuse();
513
514 Ok(())
515 }
516
517 pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
519 match self.blocking_write_from_ram(buffer) {
520 Ok(_) => Ok(()),
521 Err(Error::BufferNotInRAM) => {
522 trace!("Copying UARTE tx buffer into RAM for DMA");
523 let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
524 ram_buf.copy_from_slice(buffer);
525 self.blocking_write_from_ram(ram_buf)
526 }
527 Err(error) => Err(error),
528 }
529 }
530
531 pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
533 if buffer.is_empty() {
534 return Ok(());
535 }
536
537 slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
538 if buffer.len() > EASY_DMA_SIZE {
539 return Err(Error::BufferTooLong);
540 }
541
542 let ptr = buffer.as_ptr();
543 let len = buffer.len();
544
545 let r = self.r;
546
547 r.dma().tx().ptr().write_value(ptr as u32);
548 r.dma().tx().maxcnt().write(|w| w.set_maxcnt(len as _));
549
550 r.events_dma().tx().end().write_value(0);
551 r.intenclr().write(|w| w.set_dmatxend(true));
552
553 compiler_fence(Ordering::SeqCst);
554
555 trace!("starttx");
556 r.tasks_dma().tx().start().write_value(1);
557
558 while r.events_dma().tx().end().read() == 0 {}
559
560 compiler_fence(Ordering::SeqCst);
561 r.events_dma().tx().ready().write_value(0);
562
563 Ok(())
564 }
565}
566
567impl<'a> Drop for UarteTx<'a> {
568 fn drop(&mut self) {
569 trace!("uarte tx drop");
570
571 let r = self.r;
572
573 let did_stoptx = r.events_dma().tx().ready().read() != 0;
574 trace!("did_stoptx {}", did_stoptx);
575
576 while did_stoptx && r.events_txstopped().read() == 0 {}
578
579 let s = self.state;
580
581 drop_tx_rx(r, s);
582 }
583}
584
585impl<'d> UarteRx<'d> {
586 pub fn new<T: Instance>(
588 uarte: Peri<'d, T>,
589 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
590 rxd: Peri<'d, impl GpioPin>,
591 config: Config,
592 ) -> Self {
593 Self::new_inner(uarte, rxd.into(), None, config)
594 }
595
596 pub fn new_with_rtscts<T: Instance>(
598 uarte: Peri<'d, T>,
599 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
600 rxd: Peri<'d, impl GpioPin>,
601 rts: Peri<'d, impl GpioPin>,
602 config: Config,
603 ) -> Self {
604 Self::new_inner(uarte, rxd.into(), Some(rts.into()), config)
605 }
606
607 fn check_and_clear_errors(&mut self) -> Result<(), Error> {
609 let r = self.r;
610 let err_bits = r.errorsrc().read();
611 r.errorsrc().write_value(err_bits);
612 ErrorSource::from_bits_truncate(err_bits.0).check()
613 }
614
615 fn new_inner<T: Instance>(
616 _uarte: Peri<'d, T>,
617 rxd: Peri<'d, AnyPin>,
618 rts: Option<Peri<'d, AnyPin>>,
619 config: Config,
620 ) -> Self {
621 let r = T::regs();
622
623 configure(r, config, rts.is_some());
624 configure_rx_pins(r, rxd, rts);
625
626 T::Interrupt::unpend();
627 unsafe { T::Interrupt::enable() };
628 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
629
630 let s = T::state();
631 s.tx_rx_refcount.store(1, Ordering::Relaxed);
632
633 Self {
634 r: T::regs(),
635 state: T::state(),
636 _p: PhantomData {},
637 rx_on: false,
638 }
639 }
640
641 pub fn with_idle<U: TimerInstance>(
643 self,
644 timer: Peri<'d, U>,
645 ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
646 ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
647 ) -> UarteRxWithIdle<'d> {
648 let timer = Timer::new(timer);
649
650 let r = self.r;
651
652 let baudrate = r.baudrate().read().baudrate();
659 let timeout = 0x8000_0000 / (baudrate.to_bits() / 40);
660
661 timer.set_frequency(Frequency::F16MHz);
662 timer.cc(0).write(timeout);
663 timer.cc(0).short_compare_clear();
664 timer.cc(0).short_compare_stop();
665
666 let mut ppi_ch1 = Ppi::new_one_to_two(
667 ppi_ch1.into(),
668 Event::from_reg(r.events_rxdrdy()),
669 timer.task_clear(),
670 timer.task_start(),
671 );
672 ppi_ch1.enable();
673
674 let mut ppi_ch2 = Ppi::new_one_to_one(
675 ppi_ch2.into(),
676 timer.cc(0).event_compare(),
677 Task::from_reg(r.tasks_dma().rx().stop()),
678 );
679 ppi_ch2.enable();
680
681 let state = self.state;
682
683 UarteRxWithIdle {
684 rx: self,
685 timer,
686 ppi_ch1: ppi_ch1,
687 _ppi_ch2: ppi_ch2,
688 r: r,
689 state: state,
690 }
691 }
692
693 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
695 if buffer.is_empty() {
696 return Ok(());
697 }
698 if buffer.len() > EASY_DMA_SIZE {
699 return Err(Error::BufferTooLong);
700 }
701
702 let ptr = buffer.as_ptr();
703 let len = buffer.len();
704
705 let r = self.r;
706 let s = self.state;
707
708 let drop = OnDrop::new(move || {
709 trace!("read drop: stopping");
710
711 r.intenclr().write(|w| {
712 w.set_dmarxend(true);
713 w.set_error(true);
714 });
715 r.events_error().write_value(0);
716 r.tasks_dma().rx().stop().write_value(1);
717
718 while r.events_dma().rx().end().read() == 0 {}
719
720 trace!("read drop: stopped");
721 });
722
723 r.dma().rx().ptr().write_value(ptr as u32);
724 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
725
726 self.rx_on = true;
727 r.events_rxto().write_value(0);
728 r.events_dma().rx().end().write_value(0);
729 r.events_error().write_value(0);
730 r.intenset().write(|w| {
731 w.set_dmarxend(true);
732 w.set_error(true);
733 });
734
735 compiler_fence(Ordering::SeqCst);
736
737 trace!("startrx");
738 r.tasks_dma().rx().start().write_value(1);
739
740 let result = poll_fn(|cx| {
741 s.rx_waker.register(cx.waker());
742
743 if let Err(e) = self.check_and_clear_errors() {
744 r.tasks_dma().rx().stop().write_value(1);
745 return Poll::Ready(Err(e));
746 }
747 if r.events_dma().rx().end().read() != 0 {
748 return Poll::Ready(Ok(()));
749 }
750 Poll::Pending
751 })
752 .await;
753
754 compiler_fence(Ordering::SeqCst);
755 r.events_dma().rx().ready().write_value(0);
756 drop.defuse();
757
758 result
759 }
760
761 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
763 if buffer.is_empty() {
764 return Ok(());
765 }
766 if buffer.len() > EASY_DMA_SIZE {
767 return Err(Error::BufferTooLong);
768 }
769
770 let ptr = buffer.as_ptr();
771 let len = buffer.len();
772
773 let r = self.r;
774
775 r.dma().rx().ptr().write_value(ptr as u32);
776 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
777
778 r.events_dma().rx().end().write_value(0);
779 r.events_error().write_value(0);
780 r.intenclr().write(|w| {
781 w.set_dmarxend(true);
782 w.set_error(true);
783 });
784
785 compiler_fence(Ordering::SeqCst);
786
787 trace!("startrx");
788 r.tasks_dma().rx().start().write_value(1);
789
790 while r.events_dma().rx().end().read() == 0 && r.events_error().read() == 0 {}
791
792 compiler_fence(Ordering::SeqCst);
793 r.events_dma().rx().ready().write_value(0);
794
795 self.check_and_clear_errors()
796 }
797
798 pub async fn stop_rx(&mut self) {
806 let r = self.r;
807 let s = self.state;
808
809 let drop = OnDrop::new(move || {
811 r.intenclr().write(|w| {
812 w.set_rxto(true);
813 });
814 });
815
816 r.intenset().write(|w| {
820 w.set_rxto(true);
821 });
822
823 compiler_fence(Ordering::SeqCst);
824
825 trace!("stop_rx");
827 r.tasks_dma().rx().stop().write_value(1);
828
829 poll_fn(|cx| {
831 s.rx_waker.register(cx.waker());
832
833 if !self.rx_on || r.events_rxto().read() != 0 {
834 return Poll::Ready(());
835 }
836 Poll::Pending
837 })
838 .await;
839
840 compiler_fence(Ordering::SeqCst);
841
842 self.rx_on = false;
844 r.intenclr().write(|w| {
845 w.set_rxto(true);
846 });
847
848 drop.defuse();
849 }
850
851 pub fn blocking_stop_rx(&mut self) {
859 let r = self.r;
860
861 compiler_fence(Ordering::SeqCst);
862
863 trace!("stop_rx");
867 r.tasks_dma().rx().stop().write_value(1);
868
869 while self.rx_on && r.events_rxto().read() == 0 {}
871 self.rx_on = false;
872
873 compiler_fence(Ordering::SeqCst);
874
875 self.rx_on = false;
876 }
877
878 pub async fn flush_rx(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
880 if buffer.is_empty() {
881 return Ok(0);
882 }
883 if buffer.len() > EASY_DMA_SIZE {
884 return Err(Error::BufferTooLong);
885 }
886
887 let ptr = buffer.as_ptr();
888 let len = buffer.len();
889
890 let r = self.r;
891 let s = self.state;
892
893 let drop = OnDrop::new(move || {
894 trace!("flush_rx drop: stopping");
895
896 r.intenclr().write(|w| {
897 w.set_dmarxend(true);
898 });
899 r.tasks_dma().rx().stop().write_value(1);
900
901 while r.events_dma().rx().end().read() == 0 {}
902
903 trace!("flush_rx drop: stopped");
904 });
905
906 r.dma().rx().ptr().write_value(ptr as u32);
907 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
908
909 r.events_dma().rx().end().write_value(0);
910 r.intenset().write(|w| {
911 w.set_dmarxend(true);
912 });
913
914 compiler_fence(Ordering::SeqCst);
915
916 trace!("flush_rx");
917 r.tasks_flushrx().write_value(1);
918
919 poll_fn(|cx| {
920 s.rx_waker.register(cx.waker());
921
922 if r.events_dma().rx().end().read() != 0 {
923 return Poll::Ready(());
924 }
925 Poll::Pending
926 })
927 .await;
928
929 compiler_fence(Ordering::SeqCst);
930 r.events_dma().rx().ready().write_value(0);
931 let amount = r.dma().rx().amount().read().amount();
932 drop.defuse();
933
934 Ok(amount as usize)
935 }
936
937 pub fn blocking_flush_rx(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
939 if buffer.is_empty() {
940 return Ok(0);
941 }
942 if buffer.len() > EASY_DMA_SIZE {
943 return Err(Error::BufferTooLong);
944 }
945
946 let ptr = buffer.as_ptr();
947 let len = buffer.len();
948
949 let r = self.r;
950
951 r.dma().rx().ptr().write_value(ptr as u32);
952 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
953
954 r.events_dma().rx().end().write_value(0);
955 r.intenclr().write(|w| {
956 w.set_dmarxend(true);
957 });
958
959 compiler_fence(Ordering::SeqCst);
960
961 trace!("flush_rx");
962 r.tasks_flushrx().write_value(1);
963
964 while r.events_dma().rx().end().read() == 0 {}
965
966 compiler_fence(Ordering::SeqCst);
967 r.events_dma().rx().ready().write_value(0);
968
969 let amount = r.dma().rx().amount().read().amount();
970 Ok(amount as usize)
971 }
972}
973
974impl<'a> Drop for UarteRx<'a> {
975 fn drop(&mut self) {
976 trace!("uarte rx drop");
977
978 let r = self.r;
979
980 let did_stoprx = r.events_dma().rx().ready().read() != 0;
981 trace!("did_stoprx {}", did_stoprx);
982
983 while did_stoprx && r.events_rxto().read() == 0 {}
985
986 let s = self.state;
987
988 drop_tx_rx(r, s);
989 }
990}
991
992pub struct UarteRxWithIdle<'d> {
996 rx: UarteRx<'d>,
997 timer: Timer<'d>,
998 ppi_ch1: Ppi<'d, AnyConfigurableChannel, 1, 2>,
999 _ppi_ch2: Ppi<'d, AnyConfigurableChannel, 1, 1>,
1000 r: pac::uarte::Uarte,
1001 state: &'static State,
1002}
1003
1004impl<'d> UarteRxWithIdle<'d> {
1005 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
1007 self.ppi_ch1.disable();
1008 self.rx.read(buffer).await
1009 }
1010
1011 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
1013 self.ppi_ch1.disable();
1014 self.rx.blocking_read(buffer)
1015 }
1016
1017 pub async fn stop_rx(&mut self) {
1025 self.rx.stop_rx().await
1026 }
1027
1028 pub fn blocking_stop_rx(&mut self) {
1036 self.rx.blocking_stop_rx()
1037 }
1038
1039 pub async fn flush_rx(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
1041 self.rx.flush_rx(buffer).await
1042 }
1043
1044 pub fn blocking_flush_rx(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
1046 self.rx.blocking_flush_rx(buffer)
1047 }
1048
1049 pub async fn read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
1053 if buffer.is_empty() {
1054 return Ok(0);
1055 }
1056 if buffer.len() > EASY_DMA_SIZE {
1057 return Err(Error::BufferTooLong);
1058 }
1059
1060 let ptr = buffer.as_ptr();
1061 let len = buffer.len();
1062
1063 let r = self.r;
1064 let s = self.state;
1065
1066 self.ppi_ch1.enable();
1067
1068 let drop = OnDrop::new(|| {
1069 self.timer.stop();
1070
1071 r.intenclr().write(|w| {
1072 w.set_dmarxend(true);
1073 w.set_error(true);
1074 });
1075 r.events_error().write_value(0);
1076 r.tasks_dma().rx().stop().write_value(1);
1077
1078 while r.events_dma().rx().end().read() == 0 {}
1079 });
1080
1081 r.dma().rx().ptr().write_value(ptr as u32);
1082 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
1083
1084 self.rx.rx_on = true;
1085 r.events_rxto().write_value(0);
1086 r.events_dma().rx().end().write_value(0);
1087 r.events_error().write_value(0);
1088 r.intenset().write(|w| {
1089 w.set_dmarxend(true);
1090 w.set_error(true);
1091 });
1092
1093 compiler_fence(Ordering::SeqCst);
1094
1095 r.tasks_dma().rx().start().write_value(1);
1096
1097 let result = poll_fn(|cx| {
1098 s.rx_waker.register(cx.waker());
1099
1100 if let Err(e) = self.rx.check_and_clear_errors() {
1101 r.tasks_dma().rx().stop().write_value(1);
1102 return Poll::Ready(Err(e));
1103 }
1104 if r.events_dma().rx().end().read() != 0 {
1105 return Poll::Ready(Ok(()));
1106 }
1107
1108 Poll::Pending
1109 })
1110 .await;
1111
1112 compiler_fence(Ordering::SeqCst);
1113 let n = r.dma().rx().amount().read().0 as usize;
1114
1115 self.timer.stop();
1116 r.events_dma().rx().ready().write_value(0);
1117
1118 drop.defuse();
1119
1120 result.map(|_| n)
1121 }
1122
1123 pub fn blocking_read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
1127 if buffer.is_empty() {
1128 return Ok(0);
1129 }
1130 if buffer.len() > EASY_DMA_SIZE {
1131 return Err(Error::BufferTooLong);
1132 }
1133
1134 let ptr = buffer.as_ptr();
1135 let len = buffer.len();
1136
1137 let r = self.r;
1138
1139 self.ppi_ch1.enable();
1140
1141 r.dma().rx().ptr().write_value(ptr as u32);
1142 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
1143
1144 self.rx.rx_on = true;
1145 r.events_rxto().write_value(0);
1146 r.events_dma().rx().end().write_value(0);
1147 r.events_error().write_value(0);
1148 r.intenclr().write(|w| {
1149 w.set_dmarxend(true);
1150 w.set_error(true);
1151 });
1152
1153 compiler_fence(Ordering::SeqCst);
1154
1155 r.tasks_dma().rx().start().write_value(1);
1156
1157 while r.events_dma().rx().end().read() == 0 && r.events_error().read() == 0 {}
1158
1159 compiler_fence(Ordering::SeqCst);
1160 let n = r.dma().rx().amount().read().0 as usize;
1161
1162 self.timer.stop();
1163 r.events_dma().rx().ready().write_value(0);
1164
1165 self.rx.check_and_clear_errors().map(|_| n)
1166 }
1167}
1168
1169#[cfg(not(any(feature = "_nrf9160", feature = "_nrf5340")))]
1170pub(crate) fn apply_workaround_for_enable_anomaly(_r: pac::uarte::Uarte) {
1171 }
1173
1174#[cfg(any(feature = "_nrf9160", feature = "_nrf5340"))]
1175pub(crate) fn apply_workaround_for_enable_anomaly(r: pac::uarte::Uarte) {
1176 let rp = r.as_ptr() as *mut u32;
1180 let rxenable_reg = unsafe { rp.add(0x564 / 4) };
1181 let txenable_reg = unsafe { rp.add(0x568 / 4) };
1182
1183 if unsafe { core::ptr::read_volatile(txenable_reg) } == 1 {
1186 r.tasks_dma().tx().stop().write_value(1);
1187 }
1188
1189 if unsafe { core::ptr::read_volatile(rxenable_reg) } == 1 {
1192 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
1193 r.tasks_dma().rx().stop().write_value(1);
1194
1195 let mut workaround_succeded = false;
1196 for _ in 0..40000 {
1200 if unsafe { core::ptr::read_volatile(rxenable_reg) } == 0 {
1203 workaround_succeded = true;
1204 break;
1205 } else {
1206 #[cfg(feature = "_nrf9160")]
1210 const CLOCK_SPEED: u32 = 64_000_000;
1211 #[cfg(feature = "_nrf5340")]
1212 const CLOCK_SPEED: u32 = 128_000_000;
1213
1214 cortex_m::asm::delay(CLOCK_SPEED / 1_000_000);
1215 }
1216 }
1217
1218 if !workaround_succeded {
1219 panic!("Failed to apply workaround for UART");
1220 }
1221
1222 let errors = r.errorsrc().read();
1224 r.errorsrc().write_value(errors);
1225 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
1226 }
1227}
1228
1229pub(crate) fn drop_tx_rx(r: pac::uarte::Uarte, s: &State) {
1230 if s.tx_rx_refcount.fetch_sub(1, Ordering::Relaxed) == 1 {
1231 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
1234
1235 gpio::deconfigure_pin(r.psel().rxd().read());
1236 gpio::deconfigure_pin(r.psel().txd().read());
1237 gpio::deconfigure_pin(r.psel().rts().read());
1238 gpio::deconfigure_pin(r.psel().cts().read());
1239
1240 trace!("uarte tx and rx drop: done");
1241 }
1242}
1243
1244pub(crate) struct State {
1245 pub(crate) rx_waker: AtomicWaker,
1246 pub(crate) tx_waker: AtomicWaker,
1247 pub(crate) tx_rx_refcount: AtomicU8,
1248}
1249impl State {
1250 pub(crate) const fn new() -> Self {
1251 Self {
1252 rx_waker: AtomicWaker::new(),
1253 tx_waker: AtomicWaker::new(),
1254 tx_rx_refcount: AtomicU8::new(0),
1255 }
1256 }
1257}
1258
1259pub(crate) trait SealedInstance {
1260 fn regs() -> pac::uarte::Uarte;
1261 fn state() -> &'static State;
1262 fn buffered_state() -> &'static crate::buffered_uarte::State;
1263}
1264
1265#[allow(private_bounds)]
1267pub trait Instance: SealedInstance + PeripheralType + 'static + Send {
1268 type Interrupt: interrupt::typelevel::Interrupt;
1270}
1271
1272macro_rules! impl_uarte {
1273 ($type:ident, $pac_type:ident, $irq:ident) => {
1274 impl crate::uarte::SealedInstance for peripherals::$type {
1275 fn regs() -> pac::uarte::Uarte {
1276 pac::$pac_type
1277 }
1278 fn state() -> &'static crate::uarte::State {
1279 static STATE: crate::uarte::State = crate::uarte::State::new();
1280 &STATE
1281 }
1282 fn buffered_state() -> &'static crate::buffered_uarte::State {
1283 static STATE: crate::buffered_uarte::State = crate::buffered_uarte::State::new();
1284 &STATE
1285 }
1286 }
1287 impl crate::uarte::Instance for peripherals::$type {
1288 type Interrupt = crate::interrupt::typelevel::$irq;
1289 }
1290 };
1291}
1292
1293mod eh02 {
1296 use super::*;
1297
1298 impl<'d> embedded_hal_02::blocking::serial::Write<u8> for Uarte<'d> {
1299 type Error = Error;
1300
1301 fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1302 self.blocking_write(buffer)
1303 }
1304
1305 fn bflush(&mut self) -> Result<(), Self::Error> {
1306 Ok(())
1307 }
1308 }
1309
1310 impl<'d> embedded_hal_02::blocking::serial::Write<u8> for UarteTx<'d> {
1311 type Error = Error;
1312
1313 fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1314 self.blocking_write(buffer)
1315 }
1316
1317 fn bflush(&mut self) -> Result<(), Self::Error> {
1318 Ok(())
1319 }
1320 }
1321}
1322
1323impl core::fmt::Display for Error {
1324 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
1325 match *self {
1326 Self::BufferTooLong => f.write_str("BufferTooLong"),
1327 Self::BufferNotInRAM => f.write_str("BufferNotInRAM"),
1328 Self::Framing => f.write_str("Framing"),
1329 Self::Parity => f.write_str("Parity"),
1330 Self::Overrun => f.write_str("Overrun"),
1331 Self::Break => f.write_str("Break"),
1332 }
1333 }
1334}
1335impl core::error::Error for Error {}
1336
1337mod _embedded_io {
1338 use super::*;
1339
1340 impl embedded_io_async::Error for Error {
1341 fn kind(&self) -> embedded_io_async::ErrorKind {
1342 match *self {
1343 Error::BufferTooLong => embedded_io_async::ErrorKind::InvalidInput,
1344 Error::BufferNotInRAM => embedded_io_async::ErrorKind::Unsupported,
1345 Error::Framing => embedded_io_async::ErrorKind::InvalidData,
1346 Error::Parity => embedded_io_async::ErrorKind::InvalidData,
1347 Error::Overrun => embedded_io_async::ErrorKind::OutOfMemory,
1348 Error::Break => embedded_io_async::ErrorKind::ConnectionAborted,
1349 }
1350 }
1351 }
1352
1353 impl<'d> embedded_io_async::ErrorType for Uarte<'d> {
1354 type Error = Error;
1355 }
1356
1357 impl<'d> embedded_io_async::ErrorType for UarteTx<'d> {
1358 type Error = Error;
1359 }
1360
1361 impl<'d> embedded_io_async::Write for Uarte<'d> {
1362 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1363 self.write(buf).await?;
1364 Ok(buf.len())
1365 }
1366 async fn flush(&mut self) -> Result<(), Self::Error> {
1367 Ok(())
1368 }
1369 }
1370
1371 impl<'d> embedded_io_async::Write for UarteTx<'d> {
1372 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1373 self.write(buf).await?;
1374 Ok(buf.len())
1375 }
1376 async fn flush(&mut self) -> Result<(), Self::Error> {
1377 Ok(())
1378 }
1379 }
1380}