1#![macro_use]
15
16use core::future::poll_fn;
17use core::marker::PhantomData;
18use core::sync::atomic::{compiler_fence, AtomicU8, Ordering};
19use core::task::Poll;
20
21use embassy_hal_internal::drop::OnDrop;
22use embassy_hal_internal::{Peri, PeripheralType};
23use embassy_sync::waitqueue::AtomicWaker;
24pub use pac::uarte::vals::{Baudrate, ConfigParity as Parity};
26
27use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
28use crate::gpio::{self, AnyPin, Pin as GpioPin, PselBits, SealedPin as _, DISCONNECTED};
29use crate::interrupt::typelevel::Interrupt;
30use crate::pac::gpio::vals as gpiovals;
31use crate::pac::uarte::vals;
32use crate::ppi::{AnyConfigurableChannel, ConfigurableChannel, Event, Ppi, Task};
33use crate::timer::{Frequency, Instance as TimerInstance, Timer};
34use crate::util::slice_in_ram_or;
35use crate::{interrupt, pac};
36
37#[derive(Clone)]
39#[non_exhaustive]
40pub struct Config {
41 pub parity: Parity,
43 pub baudrate: Baudrate,
45}
46
47impl Default for Config {
48 fn default() -> Self {
49 Self {
50 parity: Parity::EXCLUDED,
51 baudrate: Baudrate::BAUD115200,
52 }
53 }
54}
55
56bitflags::bitflags! {
57 pub(crate) struct ErrorSource: u32 {
59 const OVERRUN = 0x01;
61 const PARITY = 0x02;
63 const FRAMING = 0x04;
65 const BREAK = 0x08;
67 }
68}
69
70impl ErrorSource {
71 #[inline]
72 fn check(self) -> Result<(), Error> {
73 if self.contains(ErrorSource::OVERRUN) {
74 Err(Error::Overrun)
75 } else if self.contains(ErrorSource::PARITY) {
76 Err(Error::Parity)
77 } else if self.contains(ErrorSource::FRAMING) {
78 Err(Error::Framing)
79 } else if self.contains(ErrorSource::BREAK) {
80 Err(Error::Break)
81 } else {
82 Ok(())
83 }
84 }
85}
86
87#[derive(Debug, Clone, Copy, PartialEq, Eq)]
89#[cfg_attr(feature = "defmt", derive(defmt::Format))]
90#[non_exhaustive]
91pub enum Error {
92 BufferTooLong,
94 BufferNotInRAM,
96 Framing,
98 Parity,
100 Overrun,
102 Break,
104}
105
106pub struct InterruptHandler<T: Instance> {
108 _phantom: PhantomData<T>,
109}
110
111impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
112 unsafe fn on_interrupt() {
113 let r = T::regs();
114 let s = T::state();
115
116 let endrx = r.events_endrx().read();
117 let error = r.events_error().read();
118 if endrx != 0 || error != 0 {
119 s.rx_waker.wake();
120 if endrx != 0 {
121 r.intenclr().write(|w| w.set_endrx(true));
122 }
123 if error != 0 {
124 r.intenclr().write(|w| w.set_error(true));
125 }
126 }
127 if r.events_endtx().read() != 0 {
128 s.tx_waker.wake();
129 r.intenclr().write(|w| w.set_endtx(true));
130 }
131 }
132}
133
134pub struct Uarte<'d> {
136 tx: UarteTx<'d>,
137 rx: UarteRx<'d>,
138}
139
140pub struct UarteTx<'d> {
144 r: pac::uarte::Uarte,
145 state: &'static State,
146 _p: PhantomData<&'d ()>,
147}
148
149pub struct UarteRx<'d> {
153 r: pac::uarte::Uarte,
154 state: &'static State,
155 _p: PhantomData<&'d ()>,
156}
157
158impl<'d> Uarte<'d> {
159 pub fn new<T: Instance>(
161 uarte: Peri<'d, T>,
162 rxd: Peri<'d, impl GpioPin>,
163 txd: Peri<'d, impl GpioPin>,
164 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
165 config: Config,
166 ) -> Self {
167 Self::new_inner(uarte, rxd.into(), txd.into(), None, None, config)
168 }
169
170 pub fn new_with_rtscts<T: Instance>(
172 uarte: Peri<'d, T>,
173 rxd: Peri<'d, impl GpioPin>,
174 txd: Peri<'d, impl GpioPin>,
175 cts: Peri<'d, impl GpioPin>,
176 rts: Peri<'d, impl GpioPin>,
177 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
178 config: Config,
179 ) -> Self {
180 Self::new_inner(
181 uarte,
182 rxd.into(),
183 txd.into(),
184 Some(cts.into()),
185 Some(rts.into()),
186 config,
187 )
188 }
189
190 fn new_inner<T: Instance>(
191 _uarte: Peri<'d, T>,
192 rxd: Peri<'d, AnyPin>,
193 txd: Peri<'d, AnyPin>,
194 cts: Option<Peri<'d, AnyPin>>,
195 rts: Option<Peri<'d, AnyPin>>,
196 config: Config,
197 ) -> Self {
198 let r = T::regs();
199
200 let hardware_flow_control = match (rts.is_some(), cts.is_some()) {
201 (false, false) => false,
202 (true, true) => true,
203 _ => panic!("RTS and CTS pins must be either both set or none set."),
204 };
205 configure(r, config, hardware_flow_control);
206 configure_rx_pins(r, rxd, rts);
207 configure_tx_pins(r, txd, cts);
208
209 T::Interrupt::unpend();
210 unsafe { T::Interrupt::enable() };
211 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
212
213 let s = T::state();
214 s.tx_rx_refcount.store(2, Ordering::Relaxed);
215
216 Self {
217 tx: UarteTx {
218 r: T::regs(),
219 state: T::state(),
220 _p: PhantomData {},
221 },
222 rx: UarteRx {
223 r: T::regs(),
224 state: T::state(),
225 _p: PhantomData {},
226 },
227 }
228 }
229
230 pub fn split(self) -> (UarteTx<'d>, UarteRx<'d>) {
234 (self.tx, self.rx)
235 }
236
237 pub fn split_by_ref(&mut self) -> (&mut UarteTx<'d>, &mut UarteRx<'d>) {
242 (&mut self.tx, &mut self.rx)
243 }
244
245 pub fn split_with_idle<U: TimerInstance>(
249 self,
250 timer: Peri<'d, U>,
251 ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
252 ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
253 ) -> (UarteTx<'d>, UarteRxWithIdle<'d>) {
254 (self.tx, self.rx.with_idle(timer, ppi_ch1, ppi_ch2))
255 }
256
257 pub fn event_endtx(&self) -> Event<'_> {
259 let r = self.tx.r;
260 Event::from_reg(r.events_endtx())
261 }
262
263 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
265 self.rx.read(buffer).await
266 }
267
268 pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
270 self.tx.write(buffer).await
271 }
272
273 pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
275 self.tx.write_from_ram(buffer).await
276 }
277
278 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
280 self.rx.blocking_read(buffer)
281 }
282
283 pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
285 self.tx.blocking_write(buffer)
286 }
287
288 pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
290 self.tx.blocking_write_from_ram(buffer)
291 }
292}
293
294pub(crate) fn configure_tx_pins(r: pac::uarte::Uarte, txd: Peri<'_, AnyPin>, cts: Option<Peri<'_, AnyPin>>) {
295 txd.set_high();
296 txd.conf().write(|w| {
297 w.set_dir(gpiovals::Dir::OUTPUT);
298 w.set_input(gpiovals::Input::DISCONNECT);
299 w.set_drive(gpiovals::Drive::H0H1);
300 });
301 r.psel().txd().write_value(txd.psel_bits());
302
303 if let Some(pin) = &cts {
304 pin.conf().write(|w| {
305 w.set_dir(gpiovals::Dir::INPUT);
306 w.set_input(gpiovals::Input::CONNECT);
307 w.set_drive(gpiovals::Drive::H0H1);
308 });
309 }
310 r.psel().cts().write_value(cts.psel_bits());
311}
312
313pub(crate) fn configure_rx_pins(r: pac::uarte::Uarte, rxd: Peri<'_, AnyPin>, rts: Option<Peri<'_, AnyPin>>) {
314 rxd.conf().write(|w| {
315 w.set_dir(gpiovals::Dir::INPUT);
316 w.set_input(gpiovals::Input::CONNECT);
317 w.set_drive(gpiovals::Drive::H0H1);
318 });
319 r.psel().rxd().write_value(rxd.psel_bits());
320
321 if let Some(pin) = &rts {
322 pin.set_high();
323 pin.conf().write(|w| {
324 w.set_dir(gpiovals::Dir::OUTPUT);
325 w.set_input(gpiovals::Input::DISCONNECT);
326 w.set_drive(gpiovals::Drive::H0H1);
327 });
328 }
329 r.psel().rts().write_value(rts.psel_bits());
330}
331
332pub(crate) fn configure(r: pac::uarte::Uarte, config: Config, hardware_flow_control: bool) {
333 r.config().write(|w| {
334 w.set_hwfc(hardware_flow_control);
335 w.set_parity(config.parity);
336 });
337 r.baudrate().write(|w| w.set_baudrate(config.baudrate));
338
339 r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
341
342 r.events_rxstarted().write_value(0);
345 r.events_txstarted().write_value(0);
346
347 r.psel().txd().write_value(DISCONNECTED);
349 r.psel().rxd().write_value(DISCONNECTED);
350 r.psel().cts().write_value(DISCONNECTED);
351 r.psel().rts().write_value(DISCONNECTED);
352
353 apply_workaround_for_enable_anomaly(r);
354}
355
356impl<'d> UarteTx<'d> {
357 pub fn new<T: Instance>(
359 uarte: Peri<'d, T>,
360 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
361 txd: Peri<'d, impl GpioPin>,
362 config: Config,
363 ) -> Self {
364 Self::new_inner(uarte, txd.into(), None, config)
365 }
366
367 pub fn new_with_rtscts<T: Instance>(
369 uarte: Peri<'d, T>,
370 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
371 txd: Peri<'d, impl GpioPin>,
372 cts: Peri<'d, impl GpioPin>,
373 config: Config,
374 ) -> Self {
375 Self::new_inner(uarte, txd.into(), Some(cts.into()), config)
376 }
377
378 fn new_inner<T: Instance>(
379 _uarte: Peri<'d, T>,
380 txd: Peri<'d, AnyPin>,
381 cts: Option<Peri<'d, AnyPin>>,
382 config: Config,
383 ) -> Self {
384 let r = T::regs();
385
386 configure(r, config, cts.is_some());
387 configure_tx_pins(r, txd, cts);
388
389 T::Interrupt::unpend();
390 unsafe { T::Interrupt::enable() };
391 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
392
393 let s = T::state();
394 s.tx_rx_refcount.store(1, Ordering::Relaxed);
395
396 Self {
397 r: T::regs(),
398 state: T::state(),
399 _p: PhantomData {},
400 }
401 }
402
403 pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
405 match self.write_from_ram(buffer).await {
406 Ok(_) => Ok(()),
407 Err(Error::BufferNotInRAM) => {
408 trace!("Copying UARTE tx buffer into RAM for DMA");
409 let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
410 ram_buf.copy_from_slice(buffer);
411 self.write_from_ram(ram_buf).await
412 }
413 Err(error) => Err(error),
414 }
415 }
416
417 pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
419 if buffer.is_empty() {
420 return Ok(());
421 }
422
423 slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
424 if buffer.len() > EASY_DMA_SIZE {
425 return Err(Error::BufferTooLong);
426 }
427
428 let ptr = buffer.as_ptr();
429 let len = buffer.len();
430
431 let r = self.r;
432 let s = self.state;
433
434 let drop = OnDrop::new(move || {
435 trace!("write drop: stopping");
436
437 r.intenclr().write(|w| w.set_endtx(true));
438 r.events_txstopped().write_value(0);
439 r.tasks_stoptx().write_value(1);
440
441 while r.events_endtx().read() == 0 {}
443 trace!("write drop: stopped");
444 });
445
446 r.txd().ptr().write_value(ptr as u32);
447 r.txd().maxcnt().write(|w| w.set_maxcnt(len as _));
448
449 r.events_endtx().write_value(0);
450 r.intenset().write(|w| w.set_endtx(true));
451
452 compiler_fence(Ordering::SeqCst);
453
454 trace!("starttx");
455 r.tasks_starttx().write_value(1);
456
457 poll_fn(|cx| {
458 s.tx_waker.register(cx.waker());
459 if r.events_endtx().read() != 0 {
460 return Poll::Ready(());
461 }
462 Poll::Pending
463 })
464 .await;
465
466 compiler_fence(Ordering::SeqCst);
467 r.events_txstarted().write_value(0);
468 drop.defuse();
469
470 Ok(())
471 }
472
473 pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
475 match self.blocking_write_from_ram(buffer) {
476 Ok(_) => Ok(()),
477 Err(Error::BufferNotInRAM) => {
478 trace!("Copying UARTE tx buffer into RAM for DMA");
479 let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
480 ram_buf.copy_from_slice(buffer);
481 self.blocking_write_from_ram(ram_buf)
482 }
483 Err(error) => Err(error),
484 }
485 }
486
487 pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
489 if buffer.is_empty() {
490 return Ok(());
491 }
492
493 slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
494 if buffer.len() > EASY_DMA_SIZE {
495 return Err(Error::BufferTooLong);
496 }
497
498 let ptr = buffer.as_ptr();
499 let len = buffer.len();
500
501 let r = self.r;
502
503 r.txd().ptr().write_value(ptr as u32);
504 r.txd().maxcnt().write(|w| w.set_maxcnt(len as _));
505
506 r.events_endtx().write_value(0);
507 r.intenclr().write(|w| w.set_endtx(true));
508
509 compiler_fence(Ordering::SeqCst);
510
511 trace!("starttx");
512 r.tasks_starttx().write_value(1);
513
514 while r.events_endtx().read() == 0 {}
515
516 compiler_fence(Ordering::SeqCst);
517 r.events_txstarted().write_value(0);
518
519 Ok(())
520 }
521}
522
523impl<'a> Drop for UarteTx<'a> {
524 fn drop(&mut self) {
525 trace!("uarte tx drop");
526
527 let r = self.r;
528
529 let did_stoptx = r.events_txstarted().read() != 0;
530 trace!("did_stoptx {}", did_stoptx);
531
532 while did_stoptx && r.events_txstopped().read() == 0 {}
534
535 let s = self.state;
536
537 drop_tx_rx(r, s);
538 }
539}
540
541impl<'d> UarteRx<'d> {
542 pub fn new<T: Instance>(
544 uarte: Peri<'d, T>,
545 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
546 rxd: Peri<'d, impl GpioPin>,
547 config: Config,
548 ) -> Self {
549 Self::new_inner(uarte, rxd.into(), None, config)
550 }
551
552 pub fn new_with_rtscts<T: Instance>(
554 uarte: Peri<'d, T>,
555 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
556 rxd: Peri<'d, impl GpioPin>,
557 rts: Peri<'d, impl GpioPin>,
558 config: Config,
559 ) -> Self {
560 Self::new_inner(uarte, rxd.into(), Some(rts.into()), config)
561 }
562
563 fn check_and_clear_errors(&mut self) -> Result<(), Error> {
565 let r = self.r;
566 let err_bits = r.errorsrc().read();
567 r.errorsrc().write_value(err_bits);
568 ErrorSource::from_bits_truncate(err_bits.0).check()
569 }
570
571 fn new_inner<T: Instance>(
572 _uarte: Peri<'d, T>,
573 rxd: Peri<'d, AnyPin>,
574 rts: Option<Peri<'d, AnyPin>>,
575 config: Config,
576 ) -> Self {
577 let r = T::regs();
578
579 configure(r, config, rts.is_some());
580 configure_rx_pins(r, rxd, rts);
581
582 T::Interrupt::unpend();
583 unsafe { T::Interrupt::enable() };
584 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
585
586 let s = T::state();
587 s.tx_rx_refcount.store(1, Ordering::Relaxed);
588
589 Self {
590 r: T::regs(),
591 state: T::state(),
592 _p: PhantomData {},
593 }
594 }
595
596 pub fn with_idle<U: TimerInstance>(
598 self,
599 timer: Peri<'d, U>,
600 ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
601 ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
602 ) -> UarteRxWithIdle<'d> {
603 let timer = Timer::new(timer);
604
605 let r = self.r;
606
607 let baudrate = r.baudrate().read().baudrate();
614 let timeout = 0x8000_0000 / (baudrate.to_bits() / 40);
615
616 timer.set_frequency(Frequency::F16MHz);
617 timer.cc(0).write(timeout);
618 timer.cc(0).short_compare_clear();
619 timer.cc(0).short_compare_stop();
620
621 let mut ppi_ch1 = Ppi::new_one_to_two(
622 ppi_ch1.into(),
623 Event::from_reg(r.events_rxdrdy()),
624 timer.task_clear(),
625 timer.task_start(),
626 );
627 ppi_ch1.enable();
628
629 let mut ppi_ch2 = Ppi::new_one_to_one(
630 ppi_ch2.into(),
631 timer.cc(0).event_compare(),
632 Task::from_reg(r.tasks_stoprx()),
633 );
634 ppi_ch2.enable();
635
636 let state = self.state;
637
638 UarteRxWithIdle {
639 rx: self,
640 timer,
641 ppi_ch1: ppi_ch1,
642 _ppi_ch2: ppi_ch2,
643 r: r,
644 state: state,
645 }
646 }
647
648 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
650 if buffer.is_empty() {
651 return Ok(());
652 }
653 if buffer.len() > EASY_DMA_SIZE {
654 return Err(Error::BufferTooLong);
655 }
656
657 let ptr = buffer.as_ptr();
658 let len = buffer.len();
659
660 let r = self.r;
661 let s = self.state;
662
663 let drop = OnDrop::new(move || {
664 trace!("read drop: stopping");
665
666 r.intenclr().write(|w| {
667 w.set_endrx(true);
668 w.set_error(true);
669 });
670 r.events_rxto().write_value(0);
671 r.events_error().write_value(0);
672 r.tasks_stoprx().write_value(1);
673
674 while r.events_endrx().read() == 0 {}
675
676 trace!("read drop: stopped");
677 });
678
679 r.rxd().ptr().write_value(ptr as u32);
680 r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
681
682 r.events_endrx().write_value(0);
683 r.events_error().write_value(0);
684 r.intenset().write(|w| {
685 w.set_endrx(true);
686 w.set_error(true);
687 });
688
689 compiler_fence(Ordering::SeqCst);
690
691 trace!("startrx");
692 r.tasks_startrx().write_value(1);
693
694 let result = poll_fn(|cx| {
695 s.rx_waker.register(cx.waker());
696
697 if let Err(e) = self.check_and_clear_errors() {
698 r.tasks_stoprx().write_value(1);
699 return Poll::Ready(Err(e));
700 }
701 if r.events_endrx().read() != 0 {
702 return Poll::Ready(Ok(()));
703 }
704 Poll::Pending
705 })
706 .await;
707
708 compiler_fence(Ordering::SeqCst);
709 r.events_rxstarted().write_value(0);
710 drop.defuse();
711
712 result
713 }
714
715 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
717 if buffer.is_empty() {
718 return Ok(());
719 }
720 if buffer.len() > EASY_DMA_SIZE {
721 return Err(Error::BufferTooLong);
722 }
723
724 let ptr = buffer.as_ptr();
725 let len = buffer.len();
726
727 let r = self.r;
728
729 r.rxd().ptr().write_value(ptr as u32);
730 r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
731
732 r.events_endrx().write_value(0);
733 r.events_error().write_value(0);
734 r.intenclr().write(|w| {
735 w.set_endrx(true);
736 w.set_error(true);
737 });
738
739 compiler_fence(Ordering::SeqCst);
740
741 trace!("startrx");
742 r.tasks_startrx().write_value(1);
743
744 while r.events_endrx().read() == 0 && r.events_error().read() == 0 {}
745
746 compiler_fence(Ordering::SeqCst);
747 r.events_rxstarted().write_value(0);
748
749 self.check_and_clear_errors()
750 }
751}
752
753impl<'a> Drop for UarteRx<'a> {
754 fn drop(&mut self) {
755 trace!("uarte rx drop");
756
757 let r = self.r;
758
759 let did_stoprx = r.events_rxstarted().read() != 0;
760 trace!("did_stoprx {}", did_stoprx);
761
762 while did_stoprx && r.events_rxto().read() == 0 {}
764
765 let s = self.state;
766
767 drop_tx_rx(r, s);
768 }
769}
770
771pub struct UarteRxWithIdle<'d> {
775 rx: UarteRx<'d>,
776 timer: Timer<'d>,
777 ppi_ch1: Ppi<'d, AnyConfigurableChannel, 1, 2>,
778 _ppi_ch2: Ppi<'d, AnyConfigurableChannel, 1, 1>,
779 r: pac::uarte::Uarte,
780 state: &'static State,
781}
782
783impl<'d> UarteRxWithIdle<'d> {
784 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
786 self.ppi_ch1.disable();
787 self.rx.read(buffer).await
788 }
789
790 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
792 self.ppi_ch1.disable();
793 self.rx.blocking_read(buffer)
794 }
795
796 pub async fn read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
800 if buffer.is_empty() {
801 return Ok(0);
802 }
803 if buffer.len() > EASY_DMA_SIZE {
804 return Err(Error::BufferTooLong);
805 }
806
807 let ptr = buffer.as_ptr();
808 let len = buffer.len();
809
810 let r = self.r;
811 let s = self.state;
812
813 self.ppi_ch1.enable();
814
815 let drop = OnDrop::new(|| {
816 self.timer.stop();
817
818 r.intenclr().write(|w| {
819 w.set_endrx(true);
820 w.set_error(true);
821 });
822 r.events_rxto().write_value(0);
823 r.events_error().write_value(0);
824 r.tasks_stoprx().write_value(1);
825
826 while r.events_endrx().read() == 0 {}
827 });
828
829 r.rxd().ptr().write_value(ptr as u32);
830 r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
831
832 r.events_endrx().write_value(0);
833 r.events_error().write_value(0);
834 r.intenset().write(|w| {
835 w.set_endrx(true);
836 w.set_error(true);
837 });
838
839 compiler_fence(Ordering::SeqCst);
840
841 r.tasks_startrx().write_value(1);
842
843 let result = poll_fn(|cx| {
844 s.rx_waker.register(cx.waker());
845
846 if let Err(e) = self.rx.check_and_clear_errors() {
847 r.tasks_stoprx().write_value(1);
848 return Poll::Ready(Err(e));
849 }
850 if r.events_endrx().read() != 0 {
851 return Poll::Ready(Ok(()));
852 }
853
854 Poll::Pending
855 })
856 .await;
857
858 compiler_fence(Ordering::SeqCst);
859 let n = r.rxd().amount().read().0 as usize;
860
861 self.timer.stop();
862 r.events_rxstarted().write_value(0);
863
864 drop.defuse();
865
866 result.map(|_| n)
867 }
868
869 pub fn blocking_read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
873 if buffer.is_empty() {
874 return Ok(0);
875 }
876 if buffer.len() > EASY_DMA_SIZE {
877 return Err(Error::BufferTooLong);
878 }
879
880 let ptr = buffer.as_ptr();
881 let len = buffer.len();
882
883 let r = self.r;
884
885 self.ppi_ch1.enable();
886
887 r.rxd().ptr().write_value(ptr as u32);
888 r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
889
890 r.events_endrx().write_value(0);
891 r.events_error().write_value(0);
892 r.intenclr().write(|w| {
893 w.set_endrx(true);
894 w.set_error(true);
895 });
896
897 compiler_fence(Ordering::SeqCst);
898
899 r.tasks_startrx().write_value(1);
900
901 while r.events_endrx().read() == 0 && r.events_error().read() == 0 {}
902
903 compiler_fence(Ordering::SeqCst);
904 let n = r.rxd().amount().read().0 as usize;
905
906 self.timer.stop();
907 r.events_rxstarted().write_value(0);
908
909 self.rx.check_and_clear_errors().map(|_| n)
910 }
911}
912
913#[cfg(not(any(feature = "_nrf9160", feature = "_nrf5340")))]
914pub(crate) fn apply_workaround_for_enable_anomaly(_r: pac::uarte::Uarte) {
915 }
917
918#[cfg(any(feature = "_nrf9160", feature = "_nrf5340"))]
919pub(crate) fn apply_workaround_for_enable_anomaly(r: pac::uarte::Uarte) {
920 let rp = r.as_ptr() as *mut u32;
924 let rxenable_reg = unsafe { rp.add(0x564 / 4) };
925 let txenable_reg = unsafe { rp.add(0x568 / 4) };
926
927 if unsafe { core::ptr::read_volatile(txenable_reg) } == 1 {
930 r.tasks_stoptx().write_value(1);
931 }
932
933 if unsafe { core::ptr::read_volatile(rxenable_reg) } == 1 {
936 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
937 r.tasks_stoprx().write_value(1);
938
939 let mut workaround_succeded = false;
940 for _ in 0..40000 {
944 if unsafe { core::ptr::read_volatile(rxenable_reg) } == 0 {
947 workaround_succeded = true;
948 break;
949 } else {
950 }
952 }
953
954 if !workaround_succeded {
955 panic!("Failed to apply workaround for UART");
956 }
957
958 let errors = r.errorsrc().read();
960 r.errorsrc().write_value(errors);
961 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
962 }
963}
964
965pub(crate) fn drop_tx_rx(r: pac::uarte::Uarte, s: &State) {
966 if s.tx_rx_refcount.fetch_sub(1, Ordering::Relaxed) == 1 {
967 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
970
971 gpio::deconfigure_pin(r.psel().rxd().read());
972 gpio::deconfigure_pin(r.psel().txd().read());
973 gpio::deconfigure_pin(r.psel().rts().read());
974 gpio::deconfigure_pin(r.psel().cts().read());
975
976 trace!("uarte tx and rx drop: done");
977 }
978}
979
980pub(crate) struct State {
981 pub(crate) rx_waker: AtomicWaker,
982 pub(crate) tx_waker: AtomicWaker,
983 pub(crate) tx_rx_refcount: AtomicU8,
984}
985impl State {
986 pub(crate) const fn new() -> Self {
987 Self {
988 rx_waker: AtomicWaker::new(),
989 tx_waker: AtomicWaker::new(),
990 tx_rx_refcount: AtomicU8::new(0),
991 }
992 }
993}
994
995pub(crate) trait SealedInstance {
996 fn regs() -> pac::uarte::Uarte;
997 fn state() -> &'static State;
998 fn buffered_state() -> &'static crate::buffered_uarte::State;
999}
1000
1001#[allow(private_bounds)]
1003pub trait Instance: SealedInstance + PeripheralType + 'static + Send {
1004 type Interrupt: interrupt::typelevel::Interrupt;
1006}
1007
1008macro_rules! impl_uarte {
1009 ($type:ident, $pac_type:ident, $irq:ident) => {
1010 impl crate::uarte::SealedInstance for peripherals::$type {
1011 fn regs() -> pac::uarte::Uarte {
1012 pac::$pac_type
1013 }
1014 fn state() -> &'static crate::uarte::State {
1015 static STATE: crate::uarte::State = crate::uarte::State::new();
1016 &STATE
1017 }
1018 fn buffered_state() -> &'static crate::buffered_uarte::State {
1019 static STATE: crate::buffered_uarte::State = crate::buffered_uarte::State::new();
1020 &STATE
1021 }
1022 }
1023 impl crate::uarte::Instance for peripherals::$type {
1024 type Interrupt = crate::interrupt::typelevel::$irq;
1025 }
1026 };
1027}
1028
1029mod eh02 {
1032 use super::*;
1033
1034 impl<'d> embedded_hal_02::blocking::serial::Write<u8> for Uarte<'d> {
1035 type Error = Error;
1036
1037 fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1038 self.blocking_write(buffer)
1039 }
1040
1041 fn bflush(&mut self) -> Result<(), Self::Error> {
1042 Ok(())
1043 }
1044 }
1045
1046 impl<'d> embedded_hal_02::blocking::serial::Write<u8> for UarteTx<'d> {
1047 type Error = Error;
1048
1049 fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1050 self.blocking_write(buffer)
1051 }
1052
1053 fn bflush(&mut self) -> Result<(), Self::Error> {
1054 Ok(())
1055 }
1056 }
1057}
1058
1059mod _embedded_io {
1060 use super::*;
1061
1062 impl embedded_io_async::Error for Error {
1063 fn kind(&self) -> embedded_io_async::ErrorKind {
1064 match *self {
1065 Error::BufferTooLong => embedded_io_async::ErrorKind::InvalidInput,
1066 Error::BufferNotInRAM => embedded_io_async::ErrorKind::Unsupported,
1067 Error::Framing => embedded_io_async::ErrorKind::InvalidData,
1068 Error::Parity => embedded_io_async::ErrorKind::InvalidData,
1069 Error::Overrun => embedded_io_async::ErrorKind::OutOfMemory,
1070 Error::Break => embedded_io_async::ErrorKind::ConnectionAborted,
1071 }
1072 }
1073 }
1074
1075 impl<'d> embedded_io_async::ErrorType for Uarte<'d> {
1076 type Error = Error;
1077 }
1078
1079 impl<'d> embedded_io_async::ErrorType for UarteTx<'d> {
1080 type Error = Error;
1081 }
1082
1083 impl<'d> embedded_io_async::Write for Uarte<'d> {
1084 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1085 self.write(buf).await?;
1086 Ok(buf.len())
1087 }
1088 }
1089
1090 impl<'d> embedded_io_async::Write for UarteTx<'d> {
1091 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1092 self.write(buf).await?;
1093 Ok(buf.len())
1094 }
1095 }
1096}