1#![macro_use]
15
16use core::future::poll_fn;
17use core::marker::PhantomData;
18use core::sync::atomic::{compiler_fence, AtomicU8, Ordering};
19use core::task::Poll;
20
21use embassy_hal_internal::drop::OnDrop;
22use embassy_hal_internal::{Peri, PeripheralType};
23use embassy_sync::waitqueue::AtomicWaker;
24pub use pac::uarte::vals::{Baudrate, ConfigParity as Parity};
26
27use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
28use crate::gpio::{self, AnyPin, Pin as GpioPin, PselBits, SealedPin as _, DISCONNECTED};
29use crate::interrupt::typelevel::Interrupt;
30use crate::pac::gpio::vals as gpiovals;
31use crate::pac::uarte::vals;
32use crate::ppi::{AnyConfigurableChannel, ConfigurableChannel, Event, Ppi, Task};
33use crate::timer::{Frequency, Instance as TimerInstance, Timer};
34use crate::util::slice_in_ram_or;
35use crate::{interrupt, pac};
36
37#[derive(Clone)]
39#[non_exhaustive]
40pub struct Config {
41 pub parity: Parity,
43 pub baudrate: Baudrate,
45}
46
47impl Default for Config {
48 fn default() -> Self {
49 Self {
50 parity: Parity::EXCLUDED,
51 baudrate: Baudrate::BAUD115200,
52 }
53 }
54}
55
56bitflags::bitflags! {
57 pub(crate) struct ErrorSource: u32 {
59 const OVERRUN = 0x01;
61 const PARITY = 0x02;
63 const FRAMING = 0x04;
65 const BREAK = 0x08;
67 }
68}
69
70impl ErrorSource {
71 #[inline]
72 fn check(self) -> Result<(), Error> {
73 if self.contains(ErrorSource::OVERRUN) {
74 Err(Error::Overrun)
75 } else if self.contains(ErrorSource::PARITY) {
76 Err(Error::Parity)
77 } else if self.contains(ErrorSource::FRAMING) {
78 Err(Error::Framing)
79 } else if self.contains(ErrorSource::BREAK) {
80 Err(Error::Break)
81 } else {
82 Ok(())
83 }
84 }
85}
86
87#[derive(Debug, Clone, Copy, PartialEq, Eq)]
89#[cfg_attr(feature = "defmt", derive(defmt::Format))]
90#[non_exhaustive]
91pub enum Error {
92 BufferTooLong,
94 BufferNotInRAM,
96 Framing,
98 Parity,
100 Overrun,
102 Break,
104}
105
106pub struct InterruptHandler<T: Instance> {
108 _phantom: PhantomData<T>,
109}
110
111impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
112 unsafe fn on_interrupt() {
113 let r = T::regs();
114 let s = T::state();
115
116 let endrx = r.events_endrx().read();
117 let error = r.events_error().read();
118 if endrx != 0 || error != 0 {
119 s.rx_waker.wake();
120 if endrx != 0 {
121 r.intenclr().write(|w| w.set_endrx(true));
122 }
123 if error != 0 {
124 r.intenclr().write(|w| w.set_error(true));
125 }
126 }
127 if r.events_endtx().read() != 0 {
128 s.tx_waker.wake();
129 r.intenclr().write(|w| w.set_endtx(true));
130 }
131 }
132}
133
134pub struct Uarte<'d, T: Instance> {
136 tx: UarteTx<'d, T>,
137 rx: UarteRx<'d, T>,
138}
139
140pub struct UarteTx<'d, T: Instance> {
144 _p: Peri<'d, T>,
145}
146
147pub struct UarteRx<'d, T: Instance> {
151 _p: Peri<'d, T>,
152}
153
154impl<'d, T: Instance> Uarte<'d, T> {
155 pub fn new(
157 uarte: Peri<'d, T>,
158 rxd: Peri<'d, impl GpioPin>,
159 txd: Peri<'d, impl GpioPin>,
160 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
161 config: Config,
162 ) -> Self {
163 Self::new_inner(uarte, rxd.into(), txd.into(), None, None, config)
164 }
165
166 pub fn new_with_rtscts(
168 uarte: Peri<'d, T>,
169 rxd: Peri<'d, impl GpioPin>,
170 txd: Peri<'d, impl GpioPin>,
171 cts: Peri<'d, impl GpioPin>,
172 rts: Peri<'d, impl GpioPin>,
173 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
174 config: Config,
175 ) -> Self {
176 Self::new_inner(
177 uarte,
178 rxd.into(),
179 txd.into(),
180 Some(cts.into()),
181 Some(rts.into()),
182 config,
183 )
184 }
185
186 fn new_inner(
187 uarte: Peri<'d, T>,
188 rxd: Peri<'d, AnyPin>,
189 txd: Peri<'d, AnyPin>,
190 cts: Option<Peri<'d, AnyPin>>,
191 rts: Option<Peri<'d, AnyPin>>,
192 config: Config,
193 ) -> Self {
194 let r = T::regs();
195
196 let hardware_flow_control = match (rts.is_some(), cts.is_some()) {
197 (false, false) => false,
198 (true, true) => true,
199 _ => panic!("RTS and CTS pins must be either both set or none set."),
200 };
201 configure(r, config, hardware_flow_control);
202 configure_rx_pins(r, rxd, rts);
203 configure_tx_pins(r, txd, cts);
204
205 T::Interrupt::unpend();
206 unsafe { T::Interrupt::enable() };
207 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
208
209 let s = T::state();
210 s.tx_rx_refcount.store(2, Ordering::Relaxed);
211
212 Self {
213 tx: UarteTx {
214 _p: unsafe { uarte.clone_unchecked() },
215 },
216 rx: UarteRx { _p: uarte },
217 }
218 }
219
220 pub fn split(self) -> (UarteTx<'d, T>, UarteRx<'d, T>) {
224 (self.tx, self.rx)
225 }
226
227 pub fn split_by_ref(&mut self) -> (&mut UarteTx<'d, T>, &mut UarteRx<'d, T>) {
232 (&mut self.tx, &mut self.rx)
233 }
234
235 pub fn split_with_idle<U: TimerInstance>(
239 self,
240 timer: Peri<'d, U>,
241 ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
242 ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
243 ) -> (UarteTx<'d, T>, UarteRxWithIdle<'d, T, U>) {
244 (self.tx, self.rx.with_idle(timer, ppi_ch1, ppi_ch2))
245 }
246
247 pub fn event_endtx(&self) -> Event<'_> {
249 let r = T::regs();
250 Event::from_reg(r.events_endtx())
251 }
252
253 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
255 self.rx.read(buffer).await
256 }
257
258 pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
260 self.tx.write(buffer).await
261 }
262
263 pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
265 self.tx.write_from_ram(buffer).await
266 }
267
268 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
270 self.rx.blocking_read(buffer)
271 }
272
273 pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
275 self.tx.blocking_write(buffer)
276 }
277
278 pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
280 self.tx.blocking_write_from_ram(buffer)
281 }
282}
283
284pub(crate) fn configure_tx_pins(r: pac::uarte::Uarte, txd: Peri<'_, AnyPin>, cts: Option<Peri<'_, AnyPin>>) {
285 txd.set_high();
286 txd.conf().write(|w| {
287 w.set_dir(gpiovals::Dir::OUTPUT);
288 w.set_input(gpiovals::Input::DISCONNECT);
289 w.set_drive(gpiovals::Drive::H0H1);
290 });
291 r.psel().txd().write_value(txd.psel_bits());
292
293 if let Some(pin) = &cts {
294 pin.conf().write(|w| {
295 w.set_dir(gpiovals::Dir::INPUT);
296 w.set_input(gpiovals::Input::CONNECT);
297 w.set_drive(gpiovals::Drive::H0H1);
298 });
299 }
300 r.psel().cts().write_value(cts.psel_bits());
301}
302
303pub(crate) fn configure_rx_pins(r: pac::uarte::Uarte, rxd: Peri<'_, AnyPin>, rts: Option<Peri<'_, AnyPin>>) {
304 rxd.conf().write(|w| {
305 w.set_dir(gpiovals::Dir::INPUT);
306 w.set_input(gpiovals::Input::CONNECT);
307 w.set_drive(gpiovals::Drive::H0H1);
308 });
309 r.psel().rxd().write_value(rxd.psel_bits());
310
311 if let Some(pin) = &rts {
312 pin.set_high();
313 pin.conf().write(|w| {
314 w.set_dir(gpiovals::Dir::OUTPUT);
315 w.set_input(gpiovals::Input::DISCONNECT);
316 w.set_drive(gpiovals::Drive::H0H1);
317 });
318 }
319 r.psel().rts().write_value(rts.psel_bits());
320}
321
322pub(crate) fn configure(r: pac::uarte::Uarte, config: Config, hardware_flow_control: bool) {
323 r.config().write(|w| {
324 w.set_hwfc(hardware_flow_control);
325 w.set_parity(config.parity);
326 });
327 r.baudrate().write(|w| w.set_baudrate(config.baudrate));
328
329 r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
331
332 r.events_rxstarted().write_value(0);
335 r.events_txstarted().write_value(0);
336
337 r.psel().txd().write_value(DISCONNECTED);
339 r.psel().rxd().write_value(DISCONNECTED);
340 r.psel().cts().write_value(DISCONNECTED);
341 r.psel().rts().write_value(DISCONNECTED);
342
343 apply_workaround_for_enable_anomaly(r);
344}
345
346impl<'d, T: Instance> UarteTx<'d, T> {
347 pub fn new(
349 uarte: Peri<'d, T>,
350 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
351 txd: Peri<'d, impl GpioPin>,
352 config: Config,
353 ) -> Self {
354 Self::new_inner(uarte, txd.into(), None, config)
355 }
356
357 pub fn new_with_rtscts(
359 uarte: Peri<'d, T>,
360 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
361 txd: Peri<'d, impl GpioPin>,
362 cts: Peri<'d, impl GpioPin>,
363 config: Config,
364 ) -> Self {
365 Self::new_inner(uarte, txd.into(), Some(cts.into()), config)
366 }
367
368 fn new_inner(uarte: Peri<'d, T>, txd: Peri<'d, AnyPin>, cts: Option<Peri<'d, AnyPin>>, config: Config) -> Self {
369 let r = T::regs();
370
371 configure(r, config, cts.is_some());
372 configure_tx_pins(r, txd, cts);
373
374 T::Interrupt::unpend();
375 unsafe { T::Interrupt::enable() };
376 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
377
378 let s = T::state();
379 s.tx_rx_refcount.store(1, Ordering::Relaxed);
380
381 Self { _p: uarte }
382 }
383
384 pub async fn write(&mut self, buffer: &[u8]) -> Result<(), Error> {
386 match self.write_from_ram(buffer).await {
387 Ok(_) => Ok(()),
388 Err(Error::BufferNotInRAM) => {
389 trace!("Copying UARTE tx buffer into RAM for DMA");
390 let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
391 ram_buf.copy_from_slice(buffer);
392 self.write_from_ram(ram_buf).await
393 }
394 Err(error) => Err(error),
395 }
396 }
397
398 pub async fn write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
400 if buffer.is_empty() {
401 return Ok(());
402 }
403
404 slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
405 if buffer.len() > EASY_DMA_SIZE {
406 return Err(Error::BufferTooLong);
407 }
408
409 let ptr = buffer.as_ptr();
410 let len = buffer.len();
411
412 let r = T::regs();
413 let s = T::state();
414
415 let drop = OnDrop::new(move || {
416 trace!("write drop: stopping");
417
418 r.intenclr().write(|w| w.set_endtx(true));
419 r.events_txstopped().write_value(0);
420 r.tasks_stoptx().write_value(1);
421
422 while r.events_endtx().read() == 0 {}
424 trace!("write drop: stopped");
425 });
426
427 r.txd().ptr().write_value(ptr as u32);
428 r.txd().maxcnt().write(|w| w.set_maxcnt(len as _));
429
430 r.events_endtx().write_value(0);
431 r.intenset().write(|w| w.set_endtx(true));
432
433 compiler_fence(Ordering::SeqCst);
434
435 trace!("starttx");
436 r.tasks_starttx().write_value(1);
437
438 poll_fn(|cx| {
439 s.tx_waker.register(cx.waker());
440 if r.events_endtx().read() != 0 {
441 return Poll::Ready(());
442 }
443 Poll::Pending
444 })
445 .await;
446
447 compiler_fence(Ordering::SeqCst);
448 r.events_txstarted().write_value(0);
449 drop.defuse();
450
451 Ok(())
452 }
453
454 pub fn blocking_write(&mut self, buffer: &[u8]) -> Result<(), Error> {
456 match self.blocking_write_from_ram(buffer) {
457 Ok(_) => Ok(()),
458 Err(Error::BufferNotInRAM) => {
459 trace!("Copying UARTE tx buffer into RAM for DMA");
460 let ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..buffer.len()];
461 ram_buf.copy_from_slice(buffer);
462 self.blocking_write_from_ram(ram_buf)
463 }
464 Err(error) => Err(error),
465 }
466 }
467
468 pub fn blocking_write_from_ram(&mut self, buffer: &[u8]) -> Result<(), Error> {
470 if buffer.is_empty() {
471 return Ok(());
472 }
473
474 slice_in_ram_or(buffer, Error::BufferNotInRAM)?;
475 if buffer.len() > EASY_DMA_SIZE {
476 return Err(Error::BufferTooLong);
477 }
478
479 let ptr = buffer.as_ptr();
480 let len = buffer.len();
481
482 let r = T::regs();
483
484 r.txd().ptr().write_value(ptr as u32);
485 r.txd().maxcnt().write(|w| w.set_maxcnt(len as _));
486
487 r.events_endtx().write_value(0);
488 r.intenclr().write(|w| w.set_endtx(true));
489
490 compiler_fence(Ordering::SeqCst);
491
492 trace!("starttx");
493 r.tasks_starttx().write_value(1);
494
495 while r.events_endtx().read() == 0 {}
496
497 compiler_fence(Ordering::SeqCst);
498 r.events_txstarted().write_value(0);
499
500 Ok(())
501 }
502}
503
504impl<'a, T: Instance> Drop for UarteTx<'a, T> {
505 fn drop(&mut self) {
506 trace!("uarte tx drop");
507
508 let r = T::regs();
509
510 let did_stoptx = r.events_txstarted().read() != 0;
511 trace!("did_stoptx {}", did_stoptx);
512
513 while did_stoptx && r.events_txstopped().read() == 0 {}
515
516 let s = T::state();
517
518 drop_tx_rx(r, s);
519 }
520}
521
522impl<'d, T: Instance> UarteRx<'d, T> {
523 pub fn new(
525 uarte: Peri<'d, T>,
526 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
527 rxd: Peri<'d, impl GpioPin>,
528 config: Config,
529 ) -> Self {
530 Self::new_inner(uarte, rxd.into(), None, config)
531 }
532
533 pub fn new_with_rtscts(
535 uarte: Peri<'d, T>,
536 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
537 rxd: Peri<'d, impl GpioPin>,
538 rts: Peri<'d, impl GpioPin>,
539 config: Config,
540 ) -> Self {
541 Self::new_inner(uarte, rxd.into(), Some(rts.into()), config)
542 }
543
544 fn check_and_clear_errors(&mut self) -> Result<(), Error> {
546 let r = T::regs();
547 let err_bits = r.errorsrc().read();
548 r.errorsrc().write_value(err_bits);
549 ErrorSource::from_bits_truncate(err_bits.0).check()
550 }
551
552 fn new_inner(uarte: Peri<'d, T>, rxd: Peri<'d, AnyPin>, rts: Option<Peri<'d, AnyPin>>, config: Config) -> Self {
553 let r = T::regs();
554
555 configure(r, config, rts.is_some());
556 configure_rx_pins(r, rxd, rts);
557
558 T::Interrupt::unpend();
559 unsafe { T::Interrupt::enable() };
560 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
561
562 let s = T::state();
563 s.tx_rx_refcount.store(1, Ordering::Relaxed);
564
565 Self { _p: uarte }
566 }
567
568 pub fn with_idle<U: TimerInstance>(
570 self,
571 timer: Peri<'d, U>,
572 ppi_ch1: Peri<'d, impl ConfigurableChannel + 'd>,
573 ppi_ch2: Peri<'d, impl ConfigurableChannel + 'd>,
574 ) -> UarteRxWithIdle<'d, T, U> {
575 let timer = Timer::new(timer);
576
577 let r = T::regs();
578
579 let baudrate = r.baudrate().read().baudrate();
586 let timeout = 0x8000_0000 / (baudrate.to_bits() / 40);
587
588 timer.set_frequency(Frequency::F16MHz);
589 timer.cc(0).write(timeout);
590 timer.cc(0).short_compare_clear();
591 timer.cc(0).short_compare_stop();
592
593 let mut ppi_ch1 = Ppi::new_one_to_two(
594 ppi_ch1.into(),
595 Event::from_reg(r.events_rxdrdy()),
596 timer.task_clear(),
597 timer.task_start(),
598 );
599 ppi_ch1.enable();
600
601 let mut ppi_ch2 = Ppi::new_one_to_one(
602 ppi_ch2.into(),
603 timer.cc(0).event_compare(),
604 Task::from_reg(r.tasks_stoprx()),
605 );
606 ppi_ch2.enable();
607
608 UarteRxWithIdle {
609 rx: self,
610 timer,
611 ppi_ch1,
612 _ppi_ch2: ppi_ch2,
613 }
614 }
615
616 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
618 if buffer.is_empty() {
619 return Ok(());
620 }
621 if buffer.len() > EASY_DMA_SIZE {
622 return Err(Error::BufferTooLong);
623 }
624
625 let ptr = buffer.as_ptr();
626 let len = buffer.len();
627
628 let r = T::regs();
629 let s = T::state();
630
631 let drop = OnDrop::new(move || {
632 trace!("read drop: stopping");
633
634 r.intenclr().write(|w| {
635 w.set_endrx(true);
636 w.set_error(true);
637 });
638 r.events_rxto().write_value(0);
639 r.events_error().write_value(0);
640 r.tasks_stoprx().write_value(1);
641
642 while r.events_endrx().read() == 0 {}
643
644 trace!("read drop: stopped");
645 });
646
647 r.rxd().ptr().write_value(ptr as u32);
648 r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
649
650 r.events_endrx().write_value(0);
651 r.events_error().write_value(0);
652 r.intenset().write(|w| {
653 w.set_endrx(true);
654 w.set_error(true);
655 });
656
657 compiler_fence(Ordering::SeqCst);
658
659 trace!("startrx");
660 r.tasks_startrx().write_value(1);
661
662 let result = poll_fn(|cx| {
663 s.rx_waker.register(cx.waker());
664
665 if let Err(e) = self.check_and_clear_errors() {
666 r.tasks_stoprx().write_value(1);
667 return Poll::Ready(Err(e));
668 }
669 if r.events_endrx().read() != 0 {
670 return Poll::Ready(Ok(()));
671 }
672 Poll::Pending
673 })
674 .await;
675
676 compiler_fence(Ordering::SeqCst);
677 r.events_rxstarted().write_value(0);
678 drop.defuse();
679
680 result
681 }
682
683 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
685 if buffer.is_empty() {
686 return Ok(());
687 }
688 if buffer.len() > EASY_DMA_SIZE {
689 return Err(Error::BufferTooLong);
690 }
691
692 let ptr = buffer.as_ptr();
693 let len = buffer.len();
694
695 let r = T::regs();
696
697 r.rxd().ptr().write_value(ptr as u32);
698 r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
699
700 r.events_endrx().write_value(0);
701 r.events_error().write_value(0);
702 r.intenclr().write(|w| {
703 w.set_endrx(true);
704 w.set_error(true);
705 });
706
707 compiler_fence(Ordering::SeqCst);
708
709 trace!("startrx");
710 r.tasks_startrx().write_value(1);
711
712 while r.events_endrx().read() == 0 && r.events_error().read() == 0 {}
713
714 compiler_fence(Ordering::SeqCst);
715 r.events_rxstarted().write_value(0);
716
717 self.check_and_clear_errors()
718 }
719}
720
721impl<'a, T: Instance> Drop for UarteRx<'a, T> {
722 fn drop(&mut self) {
723 trace!("uarte rx drop");
724
725 let r = T::regs();
726
727 let did_stoprx = r.events_rxstarted().read() != 0;
728 trace!("did_stoprx {}", did_stoprx);
729
730 while did_stoprx && r.events_rxto().read() == 0 {}
732
733 let s = T::state();
734
735 drop_tx_rx(r, s);
736 }
737}
738
739pub struct UarteRxWithIdle<'d, T: Instance, U: TimerInstance> {
743 rx: UarteRx<'d, T>,
744 timer: Timer<'d, U>,
745 ppi_ch1: Ppi<'d, AnyConfigurableChannel, 1, 2>,
746 _ppi_ch2: Ppi<'d, AnyConfigurableChannel, 1, 1>,
747}
748
749impl<'d, T: Instance, U: TimerInstance> UarteRxWithIdle<'d, T, U> {
750 pub async fn read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
752 self.ppi_ch1.disable();
753 self.rx.read(buffer).await
754 }
755
756 pub fn blocking_read(&mut self, buffer: &mut [u8]) -> Result<(), Error> {
758 self.ppi_ch1.disable();
759 self.rx.blocking_read(buffer)
760 }
761
762 pub async fn read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
766 if buffer.is_empty() {
767 return Ok(0);
768 }
769 if buffer.len() > EASY_DMA_SIZE {
770 return Err(Error::BufferTooLong);
771 }
772
773 let ptr = buffer.as_ptr();
774 let len = buffer.len();
775
776 let r = T::regs();
777 let s = T::state();
778
779 self.ppi_ch1.enable();
780
781 let drop = OnDrop::new(|| {
782 self.timer.stop();
783
784 r.intenclr().write(|w| {
785 w.set_endrx(true);
786 w.set_error(true);
787 });
788 r.events_rxto().write_value(0);
789 r.events_error().write_value(0);
790 r.tasks_stoprx().write_value(1);
791
792 while r.events_endrx().read() == 0 {}
793 });
794
795 r.rxd().ptr().write_value(ptr as u32);
796 r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
797
798 r.events_endrx().write_value(0);
799 r.events_error().write_value(0);
800 r.intenset().write(|w| {
801 w.set_endrx(true);
802 w.set_error(true);
803 });
804
805 compiler_fence(Ordering::SeqCst);
806
807 r.tasks_startrx().write_value(1);
808
809 let result = poll_fn(|cx| {
810 s.rx_waker.register(cx.waker());
811
812 if let Err(e) = self.rx.check_and_clear_errors() {
813 r.tasks_stoprx().write_value(1);
814 return Poll::Ready(Err(e));
815 }
816 if r.events_endrx().read() != 0 {
817 return Poll::Ready(Ok(()));
818 }
819
820 Poll::Pending
821 })
822 .await;
823
824 compiler_fence(Ordering::SeqCst);
825 let n = r.rxd().amount().read().0 as usize;
826
827 self.timer.stop();
828 r.events_rxstarted().write_value(0);
829
830 drop.defuse();
831
832 result.map(|_| n)
833 }
834
835 pub fn blocking_read_until_idle(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
839 if buffer.is_empty() {
840 return Ok(0);
841 }
842 if buffer.len() > EASY_DMA_SIZE {
843 return Err(Error::BufferTooLong);
844 }
845
846 let ptr = buffer.as_ptr();
847 let len = buffer.len();
848
849 let r = T::regs();
850
851 self.ppi_ch1.enable();
852
853 r.rxd().ptr().write_value(ptr as u32);
854 r.rxd().maxcnt().write(|w| w.set_maxcnt(len as _));
855
856 r.events_endrx().write_value(0);
857 r.events_error().write_value(0);
858 r.intenclr().write(|w| {
859 w.set_endrx(true);
860 w.set_error(true);
861 });
862
863 compiler_fence(Ordering::SeqCst);
864
865 r.tasks_startrx().write_value(1);
866
867 while r.events_endrx().read() == 0 && r.events_error().read() == 0 {}
868
869 compiler_fence(Ordering::SeqCst);
870 let n = r.rxd().amount().read().0 as usize;
871
872 self.timer.stop();
873 r.events_rxstarted().write_value(0);
874
875 self.rx.check_and_clear_errors().map(|_| n)
876 }
877}
878
879#[cfg(not(any(feature = "_nrf9160", feature = "_nrf5340")))]
880pub(crate) fn apply_workaround_for_enable_anomaly(_r: pac::uarte::Uarte) {
881 }
883
884#[cfg(any(feature = "_nrf9160", feature = "_nrf5340"))]
885pub(crate) fn apply_workaround_for_enable_anomaly(r: pac::uarte::Uarte) {
886 let rp = r.as_ptr() as *mut u32;
890 let rxenable_reg = unsafe { rp.add(0x564 / 4) };
891 let txenable_reg = unsafe { rp.add(0x568 / 4) };
892
893 if unsafe { core::ptr::read_volatile(txenable_reg) } == 1 {
896 r.tasks_stoptx().write_value(1);
897 }
898
899 if unsafe { core::ptr::read_volatile(rxenable_reg) } == 1 {
902 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
903 r.tasks_stoprx().write_value(1);
904
905 let mut workaround_succeded = false;
906 for _ in 0..40000 {
910 if unsafe { core::ptr::read_volatile(rxenable_reg) } == 0 {
913 workaround_succeded = true;
914 break;
915 } else {
916 }
918 }
919
920 if !workaround_succeded {
921 panic!("Failed to apply workaround for UART");
922 }
923
924 let errors = r.errorsrc().read();
926 r.errorsrc().write_value(errors);
927 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
928 }
929}
930
931pub(crate) fn drop_tx_rx(r: pac::uarte::Uarte, s: &State) {
932 if s.tx_rx_refcount.fetch_sub(1, Ordering::Relaxed) == 1 {
933 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
936
937 gpio::deconfigure_pin(r.psel().rxd().read());
938 gpio::deconfigure_pin(r.psel().txd().read());
939 gpio::deconfigure_pin(r.psel().rts().read());
940 gpio::deconfigure_pin(r.psel().cts().read());
941
942 trace!("uarte tx and rx drop: done");
943 }
944}
945
946pub(crate) struct State {
947 pub(crate) rx_waker: AtomicWaker,
948 pub(crate) tx_waker: AtomicWaker,
949 pub(crate) tx_rx_refcount: AtomicU8,
950}
951impl State {
952 pub(crate) const fn new() -> Self {
953 Self {
954 rx_waker: AtomicWaker::new(),
955 tx_waker: AtomicWaker::new(),
956 tx_rx_refcount: AtomicU8::new(0),
957 }
958 }
959}
960
961pub(crate) trait SealedInstance {
962 fn regs() -> pac::uarte::Uarte;
963 fn state() -> &'static State;
964 fn buffered_state() -> &'static crate::buffered_uarte::State;
965}
966
967#[allow(private_bounds)]
969pub trait Instance: SealedInstance + PeripheralType + 'static + Send {
970 type Interrupt: interrupt::typelevel::Interrupt;
972}
973
974macro_rules! impl_uarte {
975 ($type:ident, $pac_type:ident, $irq:ident) => {
976 impl crate::uarte::SealedInstance for peripherals::$type {
977 fn regs() -> pac::uarte::Uarte {
978 pac::$pac_type
979 }
980 fn state() -> &'static crate::uarte::State {
981 static STATE: crate::uarte::State = crate::uarte::State::new();
982 &STATE
983 }
984 fn buffered_state() -> &'static crate::buffered_uarte::State {
985 static STATE: crate::buffered_uarte::State = crate::buffered_uarte::State::new();
986 &STATE
987 }
988 }
989 impl crate::uarte::Instance for peripherals::$type {
990 type Interrupt = crate::interrupt::typelevel::$irq;
991 }
992 };
993}
994
995mod eh02 {
998 use super::*;
999
1000 impl<'d, T: Instance> embedded_hal_02::blocking::serial::Write<u8> for Uarte<'d, T> {
1001 type Error = Error;
1002
1003 fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1004 self.blocking_write(buffer)
1005 }
1006
1007 fn bflush(&mut self) -> Result<(), Self::Error> {
1008 Ok(())
1009 }
1010 }
1011
1012 impl<'d, T: Instance> embedded_hal_02::blocking::serial::Write<u8> for UarteTx<'d, T> {
1013 type Error = Error;
1014
1015 fn bwrite_all(&mut self, buffer: &[u8]) -> Result<(), Self::Error> {
1016 self.blocking_write(buffer)
1017 }
1018
1019 fn bflush(&mut self) -> Result<(), Self::Error> {
1020 Ok(())
1021 }
1022 }
1023}
1024
1025mod _embedded_io {
1026 use super::*;
1027
1028 impl embedded_io_async::Error for Error {
1029 fn kind(&self) -> embedded_io_async::ErrorKind {
1030 match *self {
1031 Error::BufferTooLong => embedded_io_async::ErrorKind::InvalidInput,
1032 Error::BufferNotInRAM => embedded_io_async::ErrorKind::Unsupported,
1033 Error::Framing => embedded_io_async::ErrorKind::InvalidData,
1034 Error::Parity => embedded_io_async::ErrorKind::InvalidData,
1035 Error::Overrun => embedded_io_async::ErrorKind::OutOfMemory,
1036 Error::Break => embedded_io_async::ErrorKind::ConnectionAborted,
1037 }
1038 }
1039 }
1040
1041 impl<'d, U: Instance> embedded_io_async::ErrorType for Uarte<'d, U> {
1042 type Error = Error;
1043 }
1044
1045 impl<'d, U: Instance> embedded_io_async::ErrorType for UarteTx<'d, U> {
1046 type Error = Error;
1047 }
1048
1049 impl<'d, U: Instance> embedded_io_async::Write for Uarte<'d, U> {
1050 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1051 self.write(buf).await?;
1052 Ok(buf.len())
1053 }
1054 }
1055
1056 impl<'d: 'd, U: Instance> embedded_io_async::Write for UarteTx<'d, U> {
1057 async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
1058 self.write(buf).await?;
1059 Ok(buf.len())
1060 }
1061 }
1062}