1#![macro_use]
15
16use core::future::poll_fn;
17use core::marker::PhantomData;
18use core::sync::atomic::{AtomicU8, Ordering, compiler_fence};
19use core::task::Poll;
20
21use embassy_hal_internal::drop::OnDrop;
22use embassy_hal_internal::{Peri, PeripheralType};
23use embassy_sync::waitqueue::AtomicWaker;
24pub use pac::uarte::vals::{Baudrate, ConfigParity as Parity};
26
27use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
28use crate::gpio::{self, AnyPin, DISCONNECTED, Pin as GpioPin, PselBits, SealedPin as _};
29use crate::interrupt::typelevel::Interrupt;
30use crate::pac::gpio::vals as gpiovals;
31use crate::pac::uarte::vals;
32use crate::ppi::{AnyConfigurableChannel, ConfigurableChannel, Event, Ppi, Task};
33use crate::timer::{Frequency, Instance as TimerInstance, Timer};
34use crate::util::slice_in_ram_or;
35use crate::{interrupt, pac};
36
37#[derive(Clone)]
39#[non_exhaustive]
40pub struct Config {
41 pub parity: Parity,
43 pub baudrate: Baudrate,
45}
46
47impl Default for Config {
48 fn default() -> Self {
49 Self {
50 parity: Parity::EXCLUDED,
51 baudrate: Baudrate::BAUD115200,
52 }
53 }
54}
55
56bitflags::bitflags! {
57 pub(crate) struct ErrorSource: u32 {
59 const OVERRUN = 0x01;
61 const PARITY = 0x02;
63 const FRAMING = 0x04;
65 const BREAK = 0x08;
67 }
68}
69
70impl ErrorSource {
71 #[inline]
72 fn check(self) -> Result<(), Error> {
73 if self.contains(ErrorSource::OVERRUN) {
74 Err(Error::Overrun)
75 } else if self.contains(ErrorSource::PARITY) {
76 Err(Error::Parity)
77 } else if self.contains(ErrorSource::FRAMING) {
78 Err(Error::Framing)
79 } else if self.contains(ErrorSource::BREAK) {
80 Err(Error::Break)
81 } else {
82 Ok(())
83 }
84 }
85}
86
87#[derive(Debug, Clone, Copy, PartialEq, Eq)]
89#[cfg_attr(feature = "defmt", derive(defmt::Format))]
90#[non_exhaustive]
91pub enum Error {
92 BufferTooLong,
94 BufferNotInRAM,
96 Framing,
98 Parity,
100 Overrun,
102 Break,
104}
105
106pub struct InterruptHandler<T: Instance> {
108 _phantom: PhantomData<T>,
109}
110
111impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
112 unsafe fn on_interrupt() {
113 let r = T::regs();
114 let s = T::state();
115
116 let endrx = r.events_dma().rx().end().read();
117 let error = r.events_error().read();
118 if endrx != 0 || error != 0 {
119 s.rx_waker.wake();
120 if endrx != 0 {
121 r.intenclr().write(|w| w.set_dmarxend(true));
122 }
123 if error != 0 {
124 r.intenclr().write(|w| w.set_error(true));
125 }
126 }
127 if r.events_dma().tx().end().read() != 0 {
128 s.tx_waker.wake();
129 r.intenclr().write(|w| w.set_dmatxend(true));
130 }
131 }
132}
133
134pub struct Uarte<'d> {
136 tx: UarteTx<'d>,
137 rx: UarteRx<'d>,
138}
139
140pub struct UarteTx<'d> {
144 r: pac::uarte::Uarte,
145 state: &'static State,
146 _p: PhantomData<&'d ()>,
147}
148
149pub struct UarteRx<'d> {
153 r: pac::uarte::Uarte,
154 state: &'static State,
155 _p: PhantomData<&'d ()>,
156}
157
158impl<'d> Uarte<'d> {
159 pub fn new<T: Instance>(
161 uarte: Peri<'d, T>,
162 rxd: Peri<'d, impl GpioPin>,
163 txd: Peri<'d, impl GpioPin>,
164 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
165 config: Config,
166 ) -> Self {
167 Self::new_inner(uarte, rxd.into(), txd.into(), None, None, config)
168 }
169
170 pub fn new_with_rtscts<T: Instance>(
172 uarte: Peri<'d, T>,
173 rxd: Peri<'d, impl GpioPin>,
174 txd: Peri<'d, impl GpioPin>,
175 cts: Peri<'d, impl GpioPin>,
176 rts: Peri<'d, impl GpioPin>,
177 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
178 config: Config,
179 ) -> Self {
180 Self::new_inner(
181 uarte,
182 rxd.into(),
183 txd.into(),
184 Some(cts.into()),
185 Some(rts.into()),
186 config,
187 )
188 }
189
190 fn new_inner<T: Instance>(
191 _uarte: Peri<'d, T>,
192 rxd: Peri<'d, AnyPin>,
193 txd: Peri<'d, AnyPin>,
194 cts: Option<Peri<'d, AnyPin>>,
195 rts: Option<Peri<'d, AnyPin>>,
196 config: Config,
197 ) -> Self {
198 let r = T::regs();
199
200 let hardware_flow_control = match (rts.is_some(), cts.is_some()) {
201 (false, false) => false,
202 (true, true) => true,
203 _ => panic!("RTS and CTS pins must be either both set or none set."),
204 };
205 configure(r, config, hardware_flow_control);
206 configure_rx_pins(r, rxd, rts);
207 configure_tx_pins(r, txd, cts);
208
209 T::Interrupt::unpend();
210 unsafe { T::Interrupt::enable() };
211 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
212
213 let s = T::state();
214 s.tx_rx_refcount.store(2, Ordering::Relaxed);
215
216 Self {
217 tx: UarteTx {
218 r: T::regs(),
219 state: T::state(),
220 _p: PhantomData {},
221 },
222 rx: UarteRx {
223 r: T::regs(),
224 state: T::state(),
225 _p: PhantomData {},
226 },
227 }
228 }
229
230 pub fn split(self) -> (UarteTx<'d>, UarteRx<'d>) {
234 (self.tx, self.rx)
235 }
236
237 pub fn split_by_ref(&mut self) -> (&mut UarteTx<'d>, &mut UarteRx<'d>) {
242 (&mut self.tx, &mut self.rx)
243 }
244
245 pub fn split_with_idle<U: TimerInstance>(
249 self,
250 timer: Peri<'d, U>,
251 ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
252 ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
253 ) -> (UarteTx<'d>, UarteRxWithIdle<'d>) {
254 (self.tx, self.rx.with_idle(timer, ppi_ch1, ppi_ch2))
255 }
256
257 pub fn event_endtx(&self) -> Event<'_> {
259 let r = self.tx.r;
260 Event::from_reg(r.events_dma().tx().end())
261 }
262
263 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
265 self.rx.read(buffer).await
266 }
267
268 pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
270 self.tx.write(buffer).await
271 }
272
273 pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
275 self.tx.write_from_ram(buffer).await
276 }
277
278 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
280 self.rx.blocking_read(buffer)
281 }
282
283 pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
285 self.tx.blocking_write(buffer)
286 }
287
288 pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
290 self.tx.blocking_write_from_ram(buffer)
291 }
292}
293
294pub(crate) fn configure_tx_pins(r: pac::uarte::Uarte, txd: Peri<'_, AnyPin>, cts: Option<Peri<'_, AnyPin>>) {
295 txd.set_high();
296 txd.conf().write(|w| {
297 w.set_dir(gpiovals::Dir::OUTPUT);
298 w.set_input(gpiovals::Input::DISCONNECT);
299 #[cfg(not(feature = "_nrf54l"))]
300 w.set_drive(gpiovals::Drive::H0H1);
301 #[cfg(feature = "_nrf54l")]
302 {
303 w.set_drive0(gpiovals::Drive::H);
304 w.set_drive1(gpiovals::Drive::H);
305 }
306 });
307 r.psel().txd().write_value(txd.psel_bits());
308
309 if let Some(pin) = &cts {
310 pin.conf().write(|w| {
311 w.set_dir(gpiovals::Dir::INPUT);
312 w.set_input(gpiovals::Input::CONNECT);
313 #[cfg(not(feature = "_nrf54l"))]
314 w.set_drive(gpiovals::Drive::H0H1);
315 #[cfg(feature = "_nrf54l")]
316 {
317 w.set_drive0(gpiovals::Drive::H);
318 w.set_drive1(gpiovals::Drive::H);
319 }
320 });
321 }
322 r.psel().cts().write_value(cts.psel_bits());
323}
324
325pub(crate) fn configure_rx_pins(r: pac::uarte::Uarte, rxd: Peri<'_, AnyPin>, rts: Option<Peri<'_, AnyPin>>) {
326 rxd.conf().write(|w| {
327 w.set_dir(gpiovals::Dir::INPUT);
328 w.set_input(gpiovals::Input::CONNECT);
329 #[cfg(not(feature = "_nrf54l"))]
330 w.set_drive(gpiovals::Drive::H0H1);
331 #[cfg(feature = "_nrf54l")]
332 {
333 w.set_drive0(gpiovals::Drive::H);
334 w.set_drive1(gpiovals::Drive::H);
335 }
336 });
337 r.psel().rxd().write_value(rxd.psel_bits());
338
339 if let Some(pin) = &rts {
340 pin.set_high();
341 pin.conf().write(|w| {
342 w.set_dir(gpiovals::Dir::OUTPUT);
343 w.set_input(gpiovals::Input::DISCONNECT);
344 #[cfg(not(feature = "_nrf54l"))]
345 w.set_drive(gpiovals::Drive::H0H1);
346 #[cfg(feature = "_nrf54l")]
347 {
348 w.set_drive0(gpiovals::Drive::H);
349 w.set_drive1(gpiovals::Drive::H);
350 }
351 });
352 }
353 r.psel().rts().write_value(rts.psel_bits());
354}
355
356pub(crate) fn configure(r: pac::uarte::Uarte, config: Config, hardware_flow_control: bool) {
357 r.config().write(|w| {
358 w.set_hwfc(hardware_flow_control);
359 w.set_parity(config.parity);
360 #[cfg(feature = "_nrf54l")]
361 w.set_framesize(vals::Framesize::_8BIT);
362 #[cfg(feature = "_nrf54l")]
363 w.set_frametimeout(true);
364 });
365 r.baudrate().write(|w| w.set_baudrate(config.baudrate));
366
367 r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
369
370 r.events_dma().rx().ready().write_value(0);
373 r.events_dma().tx().ready().write_value(0);
374
375 r.psel().txd().write_value(DISCONNECTED);
377 r.psel().rxd().write_value(DISCONNECTED);
378 r.psel().cts().write_value(DISCONNECTED);
379 r.psel().rts().write_value(DISCONNECTED);
380
381 apply_workaround_for_enable_anomaly(r);
382}
383
384impl<'d> UarteTx<'d> {
385 pub fn new<T: Instance>(
387 uarte: Peri<'d, T>,
388 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
389 txd: Peri<'d, impl GpioPin>,
390 config: Config,
391 ) -> Self {
392 Self::new_inner(uarte, txd.into(), None, config)
393 }
394
395 pub fn new_with_rtscts<T: Instance>(
397 uarte: Peri<'d, T>,
398 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
399 txd: Peri<'d, impl GpioPin>,
400 cts: Peri<'d, impl GpioPin>,
401 config: Config,
402 ) -> Self {
403 Self::new_inner(uarte, txd.into(), Some(cts.into()), config)
404 }
405
406 fn new_inner<T: Instance>(
407 _uarte: Peri<'d, T>,
408 txd: Peri<'d, AnyPin>,
409 cts: Option<Peri<'d, AnyPin>>,
410 config: Config,
411 ) -> Self {
412 let r = T::regs();
413
414 configure(r, config, cts.is_some());
415 configure_tx_pins(r, txd, cts);
416
417 T::Interrupt::unpend();
418 unsafe { T::Interrupt::enable() };
419 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
420
421 let s = T::state();
422 s.tx_rx_refcount.store(1, Ordering::Relaxed);
423
424 Self {
425 r: T::regs(),
426 state: T::state(),
427 _p: PhantomData {},
428 }
429 }
430
431 pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
433 match self.write_from_ram(buffer).await {
434 Ok(_) => Ok(()),
435 Err(Error::BufferNotInRAM) => {
436 trace!("Copying UARTE tx buffer into RAM for DMA");
437 let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
438 ram_buf.copy_from_slice(buffer);
439 self.write_from_ram(ram_buf).await
440 }
441 Err(error) => Err(error),
442 }
443 }
444
445 pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
447 if buffer.is_empty() {
448 return Ok(());
449 }
450
451 slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
452 if buffer.len() > EASY_DMA_SIZE {
453 return Err(Error::BufferTooLong);
454 }
455
456 let ptr = buffer.as_ptr();
457 let len = buffer.len();
458
459 let r = self.r;
460 let s = self.state;
461
462 let drop = OnDrop::new(move || {
463 trace!("write drop: stopping");
464
465 r.intenclr().write(|w| w.set_dmatxend(true));
466 r.events_txstopped().write_value(0);
467 r.tasks_dma().tx().stop().write_value(1);
468
469 while r.events_dma().tx().end().read() == 0 {}
471 trace!("write drop: stopped");
472 });
473
474 r.dma().tx().ptr().write_value(ptr as u32);
475 r.dma().tx().maxcnt().write(|w| w.set_maxcnt(len as _));
476
477 r.events_dma().tx().end().write_value(0);
478 r.intenset().write(|w| w.set_dmatxend(true));
479
480 compiler_fence(Ordering::SeqCst);
481
482 trace!("starttx");
483 r.tasks_dma().tx().start().write_value(1);
484
485 poll_fn(|cx| {
486 s.tx_waker.register(cx.waker());
487 if r.events_dma().tx().end().read() != 0 {
488 return Poll::Ready(());
489 }
490 Poll::Pending
491 })
492 .await;
493
494 compiler_fence(Ordering::SeqCst);
495 r.events_dma().tx().ready().write_value(0);
496 drop.defuse();
497
498 Ok(())
499 }
500
501 pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
503 match self.blocking_write_from_ram(buffer) {
504 Ok(_) => Ok(()),
505 Err(Error::BufferNotInRAM) => {
506 trace!("Copying UARTE tx buffer into RAM for DMA");
507 let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
508 ram_buf.copy_from_slice(buffer);
509 self.blocking_write_from_ram(ram_buf)
510 }
511 Err(error) => Err(error),
512 }
513 }
514
515 pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
517 if buffer.is_empty() {
518 return Ok(());
519 }
520
521 slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
522 if buffer.len() > EASY_DMA_SIZE {
523 return Err(Error::BufferTooLong);
524 }
525
526 let ptr = buffer.as_ptr();
527 let len = buffer.len();
528
529 let r = self.r;
530
531 r.dma().tx().ptr().write_value(ptr as u32);
532 r.dma().tx().maxcnt().write(|w| w.set_maxcnt(len as _));
533
534 r.events_dma().tx().end().write_value(0);
535 r.intenclr().write(|w| w.set_dmatxend(true));
536
537 compiler_fence(Ordering::SeqCst);
538
539 trace!("starttx");
540 r.tasks_dma().tx().start().write_value(1);
541
542 while r.events_dma().tx().end().read() == 0 {}
543
544 compiler_fence(Ordering::SeqCst);
545 r.events_dma().tx().ready().write_value(0);
546
547 Ok(())
548 }
549}
550
551impl<'a> Drop for UarteTx<'a> {
552 fn drop(&mut self) {
553 trace!("uarte tx drop");
554
555 let r = self.r;
556
557 let did_stoptx = r.events_dma().tx().ready().read() != 0;
558 trace!("did_stoptx {}", did_stoptx);
559
560 while did_stoptx && r.events_txstopped().read() == 0 {}
562
563 let s = self.state;
564
565 drop_tx_rx(r, s);
566 }
567}
568
569impl<'d> UarteRx<'d> {
570 pub fn new<T: Instance>(
572 uarte: Peri<'d, T>,
573 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
574 rxd: Peri<'d, impl GpioPin>,
575 config: Config,
576 ) -> Self {
577 Self::new_inner(uarte, rxd.into(), None, config)
578 }
579
580 pub fn new_with_rtscts<T: Instance>(
582 uarte: Peri<'d, T>,
583 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
584 rxd: Peri<'d, impl GpioPin>,
585 rts: Peri<'d, impl GpioPin>,
586 config: Config,
587 ) -> Self {
588 Self::new_inner(uarte, rxd.into(), Some(rts.into()), config)
589 }
590
591 fn check_and_clear_errors(&mut self) -> Result<(), Error> {
593 let r = self.r;
594 let err_bits = r.errorsrc().read();
595 r.errorsrc().write_value(err_bits);
596 ErrorSource::from_bits_truncate(err_bits.0).check()
597 }
598
599 fn new_inner<T: Instance>(
600 _uarte: Peri<'d, T>,
601 rxd: Peri<'d, AnyPin>,
602 rts: Option<Peri<'d, AnyPin>>,
603 config: Config,
604 ) -> Self {
605 let r = T::regs();
606
607 configure(r, config, rts.is_some());
608 configure_rx_pins(r, rxd, rts);
609
610 T::Interrupt::unpend();
611 unsafe { T::Interrupt::enable() };
612 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
613
614 let s = T::state();
615 s.tx_rx_refcount.store(1, Ordering::Relaxed);
616
617 Self {
618 r: T::regs(),
619 state: T::state(),
620 _p: PhantomData {},
621 }
622 }
623
624 pub fn with_idle<U: TimerInstance>(
626 self,
627 timer: Peri<'d, U>,
628 ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
629 ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
630 ) -> UarteRxWithIdle<'d> {
631 let timer = Timer::new(timer);
632
633 let r = self.r;
634
635 let baudrate = r.baudrate().read().baudrate();
642 let timeout = 0x8000_0000 / (baudrate.to_bits() / 40);
643
644 timer.set_frequency(Frequency::F16MHz);
645 timer.cc(0).write(timeout);
646 timer.cc(0).short_compare_clear();
647 timer.cc(0).short_compare_stop();
648
649 let mut ppi_ch1 = Ppi::new_one_to_two(
650 ppi_ch1.into(),
651 Event::from_reg(r.events_rxdrdy()),
652 timer.task_clear(),
653 timer.task_start(),
654 );
655 ppi_ch1.enable();
656
657 let mut ppi_ch2 = Ppi::new_one_to_one(
658 ppi_ch2.into(),
659 timer.cc(0).event_compare(),
660 Task::from_reg(r.tasks_dma().rx().stop()),
661 );
662 ppi_ch2.enable();
663
664 let state = self.state;
665
666 UarteRxWithIdle {
667 rx: self,
668 timer,
669 ppi_ch1: ppi_ch1,
670 _ppi_ch2: ppi_ch2,
671 r: r,
672 state: state,
673 }
674 }
675
676 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
678 if buffer.is_empty() {
679 return Ok(());
680 }
681 if buffer.len() > EASY_DMA_SIZE {
682 return Err(Error::BufferTooLong);
683 }
684
685 let ptr = buffer.as_ptr();
686 let len = buffer.len();
687
688 let r = self.r;
689 let s = self.state;
690
691 let drop = OnDrop::new(move || {
692 trace!("read drop: stopping");
693
694 r.intenclr().write(|w| {
695 w.set_dmarxend(true);
696 w.set_error(true);
697 });
698 r.events_rxto().write_value(0);
699 r.events_error().write_value(0);
700 r.tasks_dma().rx().stop().write_value(1);
701
702 while r.events_dma().rx().end().read() == 0 {}
703
704 trace!("read drop: stopped");
705 });
706
707 r.dma().rx().ptr().write_value(ptr as u32);
708 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
709
710 r.events_dma().rx().end().write_value(0);
711 r.events_error().write_value(0);
712 r.intenset().write(|w| {
713 w.set_dmarxend(true);
714 w.set_error(true);
715 });
716
717 compiler_fence(Ordering::SeqCst);
718
719 trace!("startrx");
720 r.tasks_dma().rx().start().write_value(1);
721
722 let result = poll_fn(|cx| {
723 s.rx_waker.register(cx.waker());
724
725 if let Err(e) = self.check_and_clear_errors() {
726 r.tasks_dma().rx().stop().write_value(1);
727 return Poll::Ready(Err(e));
728 }
729 if r.events_dma().rx().end().read() != 0 {
730 return Poll::Ready(Ok(()));
731 }
732 Poll::Pending
733 })
734 .await;
735
736 compiler_fence(Ordering::SeqCst);
737 r.events_dma().rx().ready().write_value(0);
738 drop.defuse();
739
740 result
741 }
742
743 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
745 if buffer.is_empty() {
746 return Ok(());
747 }
748 if buffer.len() > EASY_DMA_SIZE {
749 return Err(Error::BufferTooLong);
750 }
751
752 let ptr = buffer.as_ptr();
753 let len = buffer.len();
754
755 let r = self.r;
756
757 r.dma().rx().ptr().write_value(ptr as u32);
758 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
759
760 r.events_dma().rx().end().write_value(0);
761 r.events_error().write_value(0);
762 r.intenclr().write(|w| {
763 w.set_dmarxend(true);
764 w.set_error(true);
765 });
766
767 compiler_fence(Ordering::SeqCst);
768
769 trace!("startrx");
770 r.tasks_dma().rx().start().write_value(1);
771
772 while r.events_dma().rx().end().read() == 0 && r.events_error().read() == 0 {}
773
774 compiler_fence(Ordering::SeqCst);
775 r.events_dma().rx().ready().write_value(0);
776
777 self.check_and_clear_errors()
778 }
779}
780
781impl<'a> Drop for UarteRx<'a> {
782 fn drop(&mut self) {
783 trace!("uarte rx drop");
784
785 let r = self.r;
786
787 let did_stoprx = r.events_dma().rx().ready().read() != 0;
788 trace!("did_stoprx {}", did_stoprx);
789
790 while did_stoprx && r.events_rxto().read() == 0 {}
792
793 let s = self.state;
794
795 drop_tx_rx(r, s);
796 }
797}
798
799pub struct UarteRxWithIdle<'d> {
803 rx: UarteRx<'d>,
804 timer: Timer<'d>,
805 ppi_ch1: Ppi<'d, AnyConfigurableChannel, 1, 2>,
806 _ppi_ch2: Ppi<'d, AnyConfigurableChannel, 1, 1>,
807 r: pac::uarte::Uarte,
808 state: &'static State,
809}
810
811impl<'d> UarteRxWithIdle<'d> {
812 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
814 self.ppi_ch1.disable();
815 self.rx.read(buffer).await
816 }
817
818 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
820 self.ppi_ch1.disable();
821 self.rx.blocking_read(buffer)
822 }
823
824 pub async fn read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
828 if buffer.is_empty() {
829 return Ok(0);
830 }
831 if buffer.len() > EASY_DMA_SIZE {
832 return Err(Error::BufferTooLong);
833 }
834
835 let ptr = buffer.as_ptr();
836 let len = buffer.len();
837
838 let r = self.r;
839 let s = self.state;
840
841 self.ppi_ch1.enable();
842
843 let drop = OnDrop::new(|| {
844 self.timer.stop();
845
846 r.intenclr().write(|w| {
847 w.set_dmarxend(true);
848 w.set_error(true);
849 });
850 r.events_rxto().write_value(0);
851 r.events_error().write_value(0);
852 r.tasks_dma().rx().stop().write_value(1);
853
854 while r.events_dma().rx().end().read() == 0 {}
855 });
856
857 r.dma().rx().ptr().write_value(ptr as u32);
858 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
859
860 r.events_dma().rx().end().write_value(0);
861 r.events_error().write_value(0);
862 r.intenset().write(|w| {
863 w.set_dmarxend(true);
864 w.set_error(true);
865 });
866
867 compiler_fence(Ordering::SeqCst);
868
869 r.tasks_dma().rx().start().write_value(1);
870
871 let result = poll_fn(|cx| {
872 s.rx_waker.register(cx.waker());
873
874 if let Err(e) = self.rx.check_and_clear_errors() {
875 r.tasks_dma().rx().stop().write_value(1);
876 return Poll::Ready(Err(e));
877 }
878 if r.events_dma().rx().end().read() != 0 {
879 return Poll::Ready(Ok(()));
880 }
881
882 Poll::Pending
883 })
884 .await;
885
886 compiler_fence(Ordering::SeqCst);
887 let n = r.dma().rx().amount().read().0 as usize;
888
889 self.timer.stop();
890 r.events_dma().rx().ready().write_value(0);
891
892 drop.defuse();
893
894 result.map(|_| n)
895 }
896
897 pub fn blocking_read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
901 if buffer.is_empty() {
902 return Ok(0);
903 }
904 if buffer.len() > EASY_DMA_SIZE {
905 return Err(Error::BufferTooLong);
906 }
907
908 let ptr = buffer.as_ptr();
909 let len = buffer.len();
910
911 let r = self.r;
912
913 self.ppi_ch1.enable();
914
915 r.dma().rx().ptr().write_value(ptr as u32);
916 r.dma().rx().maxcnt().write(|w| w.set_maxcnt(len as _));
917
918 r.events_dma().rx().end().write_value(0);
919 r.events_error().write_value(0);
920 r.intenclr().write(|w| {
921 w.set_dmarxend(true);
922 w.set_error(true);
923 });
924
925 compiler_fence(Ordering::SeqCst);
926
927 r.tasks_dma().rx().start().write_value(1);
928
929 while r.events_dma().rx().end().read() == 0 && r.events_error().read() == 0 {}
930
931 compiler_fence(Ordering::SeqCst);
932 let n = r.dma().rx().amount().read().0 as usize;
933
934 self.timer.stop();
935 r.events_dma().rx().ready().write_value(0);
936
937 self.rx.check_and_clear_errors().map(|_| n)
938 }
939}
940
941#[cfg(not(any(feature = "_nrf9160", feature = "_nrf5340")))]
942pub(crate) fn apply_workaround_for_enable_anomaly(_r: pac::uarte::Uarte) {
943 }
945
946#[cfg(any(feature = "_nrf9160", feature = "_nrf5340"))]
947pub(crate) fn apply_workaround_for_enable_anomaly(r: pac::uarte::Uarte) {
948 let rp = r.as_ptr() as *mut u32;
952 let rxenable_reg = unsafe { rp.add(0x564 / 4) };
953 let txenable_reg = unsafe { rp.add(0x568 / 4) };
954
955 if unsafe { core::ptr::read_volatile(txenable_reg) } == 1 {
958 r.tasks_dma().tx().stop().write_value(1);
959 }
960
961 if unsafe { core::ptr::read_volatile(rxenable_reg) } == 1 {
964 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
965 r.tasks_dma().rx().stop().write_value(1);
966
967 let mut workaround_succeded = false;
968 for _ in 0..40000 {
972 if unsafe { core::ptr::read_volatile(rxenable_reg) } == 0 {
975 workaround_succeded = true;
976 break;
977 } else {
978 #[cfg(feature = "_nrf9160")]
982 const CLOCK_SPEED: u32 = 64_000_000;
983 #[cfg(feature = "_nrf5340")]
984 const CLOCK_SPEED: u32 = 128_000_000;
985
986 cortex_m::asm::delay(CLOCK_SPEED / 1_000_000);
987 }
988 }
989
990 if !workaround_succeded {
991 panic!("Failed to apply workaround for UART");
992 }
993
994 let errors = r.errorsrc().read();
996 r.errorsrc().write_value(errors);
997 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
998 }
999}
1000
1001pub(crate) fn drop_tx_rx(r: pac::uarte::Uarte, s: &State) {
1002 if s.tx_rx_refcount.fetch_sub(1, Ordering::Relaxed) == 1 {
1003 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
1006
1007 gpio::deconfigure_pin(r.psel().rxd().read());
1008 gpio::deconfigure_pin(r.psel().txd().read());
1009 gpio::deconfigure_pin(r.psel().rts().read());
1010 gpio::deconfigure_pin(r.psel().cts().read());
1011
1012 trace!("uarte tx and rx drop: done");
1013 }
1014}
1015
1016pub(crate) struct State {
1017 pub(crate) rx_waker: AtomicWaker,
1018 pub(crate) tx_waker: AtomicWaker,
1019 pub(crate) tx_rx_refcount: AtomicU8,
1020}
1021impl State {
1022 pub(crate) const fn new() -> Self {
1023 Self {
1024 rx_waker: AtomicWaker::new(),
1025 tx_waker: AtomicWaker::new(),
1026 tx_rx_refcount: AtomicU8::new(0),
1027 }
1028 }
1029}
1030
1031pub(crate) trait SealedInstance {
1032 fn regs() -> pac::uarte::Uarte;
1033 fn state() -> &'static State;
1034 fn buffered_state() -> &'static crate::buffered_uarte::State;
1035}
1036
1037#[allow(private_bounds)]
1039pub trait Instance: SealedInstance + PeripheralType + 'static + Send {
1040 type Interrupt: interrupt::typelevel::Interrupt;
1042}
1043
1044macro_rules! impl_uarte {
1045 ($type:ident, $pac_type:ident, $irq:ident) => {
1046 impl crate::uarte::SealedInstance for peripherals::$type {
1047 fn regs() -> pac::uarte::Uarte {
1048 pac::$pac_type
1049 }
1050 fn state() -> &'static crate::uarte::State {
1051 static STATE: crate::uarte::State = crate::uarte::State::new();
1052 &STATE
1053 }
1054 fn buffered_state() -> &'static crate::buffered_uarte::State {
1055 static STATE: crate::buffered_uarte::State = crate::buffered_uarte::State::new();
1056 &STATE
1057 }
1058 }
1059 impl crate::uarte::Instance for peripherals::$type {
1060 type Interrupt = crate::interrupt::typelevel::$irq;
1061 }
1062 };
1063}
1064
1065mod eh02 {
1068 use super::*;
1069
1070 impl<'d> embedded_hal_02::blocking::serial::Write<u8> for Uarte<'d> {
1071 type Error = Error;
1072
1073 fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1074 self.blocking_write(buffer)
1075 }
1076
1077 fn bflush(&mut self) -> Result<(), Self::Error> {
1078 Ok(())
1079 }
1080 }
1081
1082 impl<'d> embedded_hal_02::blocking::serial::Write<u8> for UarteTx<'d> {
1083 type Error = Error;
1084
1085 fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1086 self.blocking_write(buffer)
1087 }
1088
1089 fn bflush(&mut self) -> Result<(), Self::Error> {
1090 Ok(())
1091 }
1092 }
1093}
1094
1095mod _embedded_io {
1096 use super::*;
1097
1098 impl embedded_io_async::Error for Error {
1099 fn kind(&self) -> embedded_io_async::ErrorKind {
1100 match *self {
1101 Error::BufferTooLong => embedded_io_async::ErrorKind::InvalidInput,
1102 Error::BufferNotInRAM => embedded_io_async::ErrorKind::Unsupported,
1103 Error::Framing => embedded_io_async::ErrorKind::InvalidData,
1104 Error::Parity => embedded_io_async::ErrorKind::InvalidData,
1105 Error::Overrun => embedded_io_async::ErrorKind::OutOfMemory,
1106 Error::Break => embedded_io_async::ErrorKind::ConnectionAborted,
1107 }
1108 }
1109 }
1110
1111 impl<'d> embedded_io_async::ErrorType for Uarte<'d> {
1112 type Error = Error;
1113 }
1114
1115 impl<'d> embedded_io_async::ErrorType for UarteTx<'d> {
1116 type Error = Error;
1117 }
1118
1119 impl<'d> embedded_io_async::Write for Uarte<'d> {
1120 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1121 self.write(buf).await?;
1122 Ok(buf.len())
1123 }
1124 }
1125
1126 impl<'d> embedded_io_async::Write for UarteTx<'d> {
1127 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1128 self.write(buf).await?;
1129 Ok(buf.len())
1130 }
1131 }
1132}