use core::cmp::min;
use core::future::{poll_fn, Future};
use core::marker::PhantomData;
use core::slice;
use core::sync::atomic::{compiler_fence, AtomicBool, AtomicU8, AtomicUsize, Ordering};
use core::task::Poll;
use embassy_hal_internal::atomic_ring_buffer::RingBuffer;
use embassy_hal_internal::{into_ref, PeripheralRef};
use pac::uarte::vals;
pub use pac::uarte::vals::{Baudrate, ConfigParity as Parity};
use crate::gpio::{AnyPin, Pin as GpioPin};
use crate::interrupt::typelevel::Interrupt;
use crate::ppi::{
self, AnyConfigurableChannel, AnyGroup, Channel, ConfigurableChannel, Event, Group, Ppi, PpiGroup, Task,
};
use crate::timer::{Instance as TimerInstance, Timer};
use crate::uarte::{configure, configure_rx_pins, configure_tx_pins, drop_tx_rx, Config, Instance as UarteInstance};
use crate::{interrupt, pac, Peripheral, EASY_DMA_SIZE};
pub(crate) struct State {
tx_buf: RingBuffer,
tx_count: AtomicUsize,
rx_buf: RingBuffer,
rx_started: AtomicBool,
rx_started_count: AtomicU8,
rx_ended_count: AtomicU8,
rx_ppi_ch: AtomicU8,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub enum Error {
}
impl State {
pub(crate) const fn new() -> Self {
Self {
tx_buf: RingBuffer::new(),
tx_count: AtomicUsize::new(0),
rx_buf: RingBuffer::new(),
rx_started: AtomicBool::new(false),
rx_started_count: AtomicU8::new(0),
rx_ended_count: AtomicU8::new(0),
rx_ppi_ch: AtomicU8::new(0),
}
}
}
pub struct InterruptHandler<U: UarteInstance> {
_phantom: PhantomData<U>,
}
impl<U: UarteInstance> interrupt::typelevel::Handler<U::Interrupt> for InterruptHandler<U> {
unsafe fn on_interrupt() {
let r = U::regs();
let ss = U::state();
let s = U::buffered_state();
if let Some(mut rx) = unsafe { s.rx_buf.try_writer() } {
let buf_len = s.rx_buf.len();
let half_len = buf_len / 2;
if r.events_error().read() != 0 {
r.events_error().write_value(0);
let errs = r.errorsrc().read();
r.errorsrc().write_value(errs);
if errs.overrun() {
panic!("BufferedUarte overrun");
}
}
if r.inten().read().rxdrdy() && r.events_rxdrdy().read() != 0 {
r.intenclr().write(|w| w.set_rxdrdy(true));
r.events_rxdrdy().write_value(0);
ss.rx_waker.wake();
}
if r.events_endrx().read() != 0 {
r.events_endrx().write_value(0);
let val = s.rx_ended_count.load(Ordering::Relaxed);
s.rx_ended_count.store(val.wrapping_add(1), Ordering::Relaxed);
}
if r.events_rxstarted().read() != 0 || !s.rx_started.load(Ordering::Relaxed) {
let (ptr, len) = rx.push_buf();
if len >= half_len {
r.events_rxstarted().write_value(0);
r.rxd().ptr().write_value(ptr as u32);
r.rxd().maxcnt().write(|w| w.set_maxcnt(half_len as _));
let chn = s.rx_ppi_ch.load(Ordering::Relaxed);
ppi::regs().chenset().write(|w| w.0 = 1 << chn);
if r.events_endrx().read() != 0 {
r.events_endrx().write_value(0);
let val = s.rx_ended_count.load(Ordering::Relaxed);
s.rx_ended_count.store(val.wrapping_add(1), Ordering::Relaxed);
}
let rx_ended = s.rx_ended_count.load(Ordering::Relaxed);
let rx_started = s.rx_started_count.load(Ordering::Relaxed);
let rxend_happened = rx_started == rx_ended;
let ppi_ch_enabled = ppi::regs().chen().read().ch(chn as _);
if rxend_happened && ppi_ch_enabled {
ppi::regs().chenclr().write(|w| w.set_ch(chn as _, true));
r.tasks_startrx().write_value(1);
}
rx.push_done(half_len);
s.rx_started_count.store(rx_started.wrapping_add(1), Ordering::Relaxed);
s.rx_started.store(true, Ordering::Relaxed);
} else {
r.intenclr().write(|w| w.set_rxstarted(true));
}
}
}
if let Some(mut tx) = unsafe { s.tx_buf.try_reader() } {
if r.events_endtx().read() != 0 {
r.events_endtx().write_value(0);
let n = s.tx_count.load(Ordering::Relaxed);
tx.pop_done(n);
ss.tx_waker.wake();
s.tx_count.store(0, Ordering::Relaxed);
}
if s.tx_count.load(Ordering::Relaxed) == 0 {
let (ptr, len) = tx.pop_buf();
let len = len.min(EASY_DMA_SIZE);
if len != 0 {
s.tx_count.store(len, Ordering::Relaxed);
r.txd().ptr().write_value(ptr as u32);
r.txd().maxcnt().write(|w| w.set_maxcnt(len as _));
r.tasks_starttx().write_value(1);
}
}
}
}
}
pub struct BufferedUarte<'d, U: UarteInstance, T: TimerInstance> {
tx: BufferedUarteTx<'d, U>,
rx: BufferedUarteRx<'d, U, T>,
}
impl<'d, U: UarteInstance, T: TimerInstance> Unpin for BufferedUarte<'d, U, T> {}
impl<'d, U: UarteInstance, T: TimerInstance> BufferedUarte<'d, U, T> {
#[allow(clippy::too_many_arguments)]
pub fn new(
uarte: impl Peripheral<P = U> + 'd,
timer: impl Peripheral<P = T> + 'd,
ppi_ch1: impl Peripheral<P = impl ConfigurableChannel> + 'd,
ppi_ch2: impl Peripheral<P = impl ConfigurableChannel> + 'd,
ppi_group: impl Peripheral<P = impl Group> + 'd,
_irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
rxd: impl Peripheral<P = impl GpioPin> + 'd,
txd: impl Peripheral<P = impl GpioPin> + 'd,
config: Config,
rx_buffer: &'d mut [u8],
tx_buffer: &'d mut [u8],
) -> Self {
into_ref!(uarte, timer, rxd, txd, ppi_ch1, ppi_ch2, ppi_group);
Self::new_inner(
uarte,
timer,
ppi_ch1.map_into(),
ppi_ch2.map_into(),
ppi_group.map_into(),
rxd.map_into(),
txd.map_into(),
None,
None,
config,
rx_buffer,
tx_buffer,
)
}
#[allow(clippy::too_many_arguments)]
pub fn new_with_rtscts(
uarte: impl Peripheral<P = U> + 'd,
timer: impl Peripheral<P = T> + 'd,
ppi_ch1: impl Peripheral<P = impl ConfigurableChannel> + 'd,
ppi_ch2: impl Peripheral<P = impl ConfigurableChannel> + 'd,
ppi_group: impl Peripheral<P = impl Group> + 'd,
_irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
rxd: impl Peripheral<P = impl GpioPin> + 'd,
txd: impl Peripheral<P = impl GpioPin> + 'd,
cts: impl Peripheral<P = impl GpioPin> + 'd,
rts: impl Peripheral<P = impl GpioPin> + 'd,
config: Config,
rx_buffer: &'d mut [u8],
tx_buffer: &'d mut [u8],
) -> Self {
into_ref!(uarte, timer, rxd, txd, cts, rts, ppi_ch1, ppi_ch2, ppi_group);
Self::new_inner(
uarte,
timer,
ppi_ch1.map_into(),
ppi_ch2.map_into(),
ppi_group.map_into(),
rxd.map_into(),
txd.map_into(),
Some(cts.map_into()),
Some(rts.map_into()),
config,
rx_buffer,
tx_buffer,
)
}
#[allow(clippy::too_many_arguments)]
fn new_inner(
peri: PeripheralRef<'d, U>,
timer: PeripheralRef<'d, T>,
ppi_ch1: PeripheralRef<'d, AnyConfigurableChannel>,
ppi_ch2: PeripheralRef<'d, AnyConfigurableChannel>,
ppi_group: PeripheralRef<'d, AnyGroup>,
rxd: PeripheralRef<'d, AnyPin>,
txd: PeripheralRef<'d, AnyPin>,
cts: Option<PeripheralRef<'d, AnyPin>>,
rts: Option<PeripheralRef<'d, AnyPin>>,
config: Config,
rx_buffer: &'d mut [u8],
tx_buffer: &'d mut [u8],
) -> Self {
configure(U::regs(), config, cts.is_some());
let tx = BufferedUarteTx::new_innerer(unsafe { peri.clone_unchecked() }, txd, cts, tx_buffer);
let rx = BufferedUarteRx::new_innerer(peri, timer, ppi_ch1, ppi_ch2, ppi_group, rxd, rts, rx_buffer);
U::regs().enable().write(|w| w.set_enable(vals::Enable::ENABLED));
U::Interrupt::pend();
unsafe { U::Interrupt::enable() };
U::state().tx_rx_refcount.store(2, Ordering::Relaxed);
Self { tx, rx }
}
pub fn set_baudrate(&mut self, baudrate: Baudrate) {
let r = U::regs();
r.baudrate().write(|w| w.set_baudrate(baudrate));
}
pub fn split(self) -> (BufferedUarteRx<'d, U, T>, BufferedUarteTx<'d, U>) {
(self.rx, self.tx)
}
pub fn split_by_ref(&mut self) -> (&mut BufferedUarteRx<'d, U, T>, &mut BufferedUarteTx<'d, U>) {
(&mut self.rx, &mut self.tx)
}
pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
self.rx.read(buf).await
}
pub async fn fill_buf(&mut self) -> Result<&[u8], Error> {
self.rx.fill_buf().await
}
pub fn consume(&mut self, amt: usize) {
self.rx.consume(amt)
}
pub async fn write(&mut self, buf: &[u8]) -> Result<usize, Error> {
self.tx.write(buf).await
}
pub fn try_write(&mut self, buf: &[u8]) -> Result<usize, Error> {
self.tx.try_write(buf)
}
pub async fn flush(&mut self) -> Result<(), Error> {
self.tx.flush().await
}
}
pub struct BufferedUarteTx<'d, U: UarteInstance> {
_peri: PeripheralRef<'d, U>,
}
impl<'d, U: UarteInstance> BufferedUarteTx<'d, U> {
pub fn new(
uarte: impl Peripheral<P = U> + 'd,
_irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
txd: impl Peripheral<P = impl GpioPin> + 'd,
config: Config,
tx_buffer: &'d mut [u8],
) -> Self {
into_ref!(uarte, txd);
Self::new_inner(uarte, txd.map_into(), None, config, tx_buffer)
}
pub fn new_with_cts(
uarte: impl Peripheral<P = U> + 'd,
_irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
txd: impl Peripheral<P = impl GpioPin> + 'd,
cts: impl Peripheral<P = impl GpioPin> + 'd,
config: Config,
tx_buffer: &'d mut [u8],
) -> Self {
into_ref!(uarte, txd, cts);
Self::new_inner(uarte, txd.map_into(), Some(cts.map_into()), config, tx_buffer)
}
fn new_inner(
peri: PeripheralRef<'d, U>,
txd: PeripheralRef<'d, AnyPin>,
cts: Option<PeripheralRef<'d, AnyPin>>,
config: Config,
tx_buffer: &'d mut [u8],
) -> Self {
configure(U::regs(), config, cts.is_some());
let this = Self::new_innerer(peri, txd, cts, tx_buffer);
U::regs().enable().write(|w| w.set_enable(vals::Enable::ENABLED));
U::Interrupt::pend();
unsafe { U::Interrupt::enable() };
U::state().tx_rx_refcount.store(1, Ordering::Relaxed);
this
}
fn new_innerer(
peri: PeripheralRef<'d, U>,
txd: PeripheralRef<'d, AnyPin>,
cts: Option<PeripheralRef<'d, AnyPin>>,
tx_buffer: &'d mut [u8],
) -> Self {
let r = U::regs();
configure_tx_pins(r, txd, cts);
let s = U::buffered_state();
s.tx_count.store(0, Ordering::Relaxed);
let len = tx_buffer.len();
unsafe { s.tx_buf.init(tx_buffer.as_mut_ptr(), len) };
r.events_txstarted().write_value(0);
r.intenset().write(|w| {
w.set_endtx(true);
});
Self { _peri: peri }
}
pub fn write<'a>(&'a mut self, buf: &'a [u8]) -> impl Future<Output = Result<usize, Error>> + 'a {
poll_fn(move |cx| {
let ss = U::state();
let s = U::buffered_state();
let mut tx = unsafe { s.tx_buf.writer() };
let tx_buf = tx.push_slice();
if tx_buf.is_empty() {
ss.tx_waker.register(cx.waker());
return Poll::Pending;
}
let n = min(tx_buf.len(), buf.len());
tx_buf[..n].copy_from_slice(&buf[..n]);
tx.push_done(n);
compiler_fence(Ordering::SeqCst);
U::Interrupt::pend();
Poll::Ready(Ok(n))
})
}
pub fn try_write(&mut self, buf: &[u8]) -> Result<usize, Error> {
let s = U::buffered_state();
let mut tx = unsafe { s.tx_buf.writer() };
let tx_buf = tx.push_slice();
if tx_buf.is_empty() {
return Ok(0);
}
let n = min(tx_buf.len(), buf.len());
tx_buf[..n].copy_from_slice(&buf[..n]);
tx.push_done(n);
compiler_fence(Ordering::SeqCst);
U::Interrupt::pend();
Ok(n)
}
pub fn flush(&mut self) -> impl Future<Output = Result<(), Error>> + '_ {
poll_fn(move |cx| {
let ss = U::state();
let s = U::buffered_state();
if !s.tx_buf.is_empty() {
ss.tx_waker.register(cx.waker());
return Poll::Pending;
}
Poll::Ready(Ok(()))
})
}
}
impl<'a, U: UarteInstance> Drop for BufferedUarteTx<'a, U> {
fn drop(&mut self) {
let r = U::regs();
r.intenclr().write(|w| {
w.set_txdrdy(true);
w.set_txstarted(true);
w.set_txstopped(true);
});
r.events_txstopped().write_value(0);
r.tasks_stoptx().write_value(1);
while r.events_txstopped().read() == 0 {}
let s = U::buffered_state();
unsafe { s.tx_buf.deinit() }
let s = U::state();
drop_tx_rx(r, s);
}
}
pub struct BufferedUarteRx<'d, U: UarteInstance, T: TimerInstance> {
_peri: PeripheralRef<'d, U>,
timer: Timer<'d, T>,
_ppi_ch1: Ppi<'d, AnyConfigurableChannel, 1, 1>,
_ppi_ch2: Ppi<'d, AnyConfigurableChannel, 1, 2>,
_ppi_group: PpiGroup<'d, AnyGroup>,
}
impl<'d, U: UarteInstance, T: TimerInstance> BufferedUarteRx<'d, U, T> {
#[allow(clippy::too_many_arguments)]
pub fn new(
uarte: impl Peripheral<P = U> + 'd,
timer: impl Peripheral<P = T> + 'd,
ppi_ch1: impl Peripheral<P = impl ConfigurableChannel> + 'd,
ppi_ch2: impl Peripheral<P = impl ConfigurableChannel> + 'd,
ppi_group: impl Peripheral<P = impl Group> + 'd,
_irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
rxd: impl Peripheral<P = impl GpioPin> + 'd,
config: Config,
rx_buffer: &'d mut [u8],
) -> Self {
into_ref!(uarte, timer, rxd, ppi_ch1, ppi_ch2, ppi_group);
Self::new_inner(
uarte,
timer,
ppi_ch1.map_into(),
ppi_ch2.map_into(),
ppi_group.map_into(),
rxd.map_into(),
None,
config,
rx_buffer,
)
}
#[allow(clippy::too_many_arguments)]
pub fn new_with_rts(
uarte: impl Peripheral<P = U> + 'd,
timer: impl Peripheral<P = T> + 'd,
ppi_ch1: impl Peripheral<P = impl ConfigurableChannel> + 'd,
ppi_ch2: impl Peripheral<P = impl ConfigurableChannel> + 'd,
ppi_group: impl Peripheral<P = impl Group> + 'd,
_irq: impl interrupt::typelevel::Binding<U::Interrupt, InterruptHandler<U>> + 'd,
rxd: impl Peripheral<P = impl GpioPin> + 'd,
rts: impl Peripheral<P = impl GpioPin> + 'd,
config: Config,
rx_buffer: &'d mut [u8],
) -> Self {
into_ref!(uarte, timer, rxd, rts, ppi_ch1, ppi_ch2, ppi_group);
Self::new_inner(
uarte,
timer,
ppi_ch1.map_into(),
ppi_ch2.map_into(),
ppi_group.map_into(),
rxd.map_into(),
Some(rts.map_into()),
config,
rx_buffer,
)
}
#[allow(clippy::too_many_arguments)]
fn new_inner(
peri: PeripheralRef<'d, U>,
timer: PeripheralRef<'d, T>,
ppi_ch1: PeripheralRef<'d, AnyConfigurableChannel>,
ppi_ch2: PeripheralRef<'d, AnyConfigurableChannel>,
ppi_group: PeripheralRef<'d, AnyGroup>,
rxd: PeripheralRef<'d, AnyPin>,
rts: Option<PeripheralRef<'d, AnyPin>>,
config: Config,
rx_buffer: &'d mut [u8],
) -> Self {
configure(U::regs(), config, rts.is_some());
let this = Self::new_innerer(peri, timer, ppi_ch1, ppi_ch2, ppi_group, rxd, rts, rx_buffer);
U::regs().enable().write(|w| w.set_enable(vals::Enable::ENABLED));
U::Interrupt::pend();
unsafe { U::Interrupt::enable() };
U::state().tx_rx_refcount.store(1, Ordering::Relaxed);
this
}
#[allow(clippy::too_many_arguments)]
fn new_innerer(
peri: PeripheralRef<'d, U>,
timer: PeripheralRef<'d, T>,
ppi_ch1: PeripheralRef<'d, AnyConfigurableChannel>,
ppi_ch2: PeripheralRef<'d, AnyConfigurableChannel>,
ppi_group: PeripheralRef<'d, AnyGroup>,
rxd: PeripheralRef<'d, AnyPin>,
rts: Option<PeripheralRef<'d, AnyPin>>,
rx_buffer: &'d mut [u8],
) -> Self {
assert!(rx_buffer.len() % 2 == 0);
let r = U::regs();
configure_rx_pins(r, rxd, rts);
let s = U::buffered_state();
s.rx_started_count.store(0, Ordering::Relaxed);
s.rx_ended_count.store(0, Ordering::Relaxed);
s.rx_started.store(false, Ordering::Relaxed);
let rx_len = rx_buffer.len().min(EASY_DMA_SIZE * 2);
unsafe { s.rx_buf.init(rx_buffer.as_mut_ptr(), rx_len) };
let errors = r.errorsrc().read();
r.errorsrc().write_value(errors);
r.events_rxstarted().write_value(0);
r.events_error().write_value(0);
r.events_endrx().write_value(0);
r.intenset().write(|w| {
w.set_endtx(true);
w.set_rxstarted(true);
w.set_error(true);
w.set_endrx(true);
});
let timer = Timer::new_counter(timer);
timer.cc(1).write(rx_len as u32 * 2);
timer.cc(1).short_compare_clear();
timer.clear();
timer.start();
let mut ppi_ch1 = Ppi::new_one_to_one(ppi_ch1, Event::from_reg(r.events_rxdrdy()), timer.task_count());
ppi_ch1.enable();
s.rx_ppi_ch.store(ppi_ch2.number() as u8, Ordering::Relaxed);
let mut ppi_group = PpiGroup::new(ppi_group);
let mut ppi_ch2 = Ppi::new_one_to_two(
ppi_ch2,
Event::from_reg(r.events_endrx()),
Task::from_reg(r.tasks_startrx()),
ppi_group.task_disable_all(),
);
ppi_ch2.disable();
ppi_group.add_channel(&ppi_ch2);
Self {
_peri: peri,
timer,
_ppi_ch1: ppi_ch1,
_ppi_ch2: ppi_ch2,
_ppi_group: ppi_group,
}
}
pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
let data = self.fill_buf().await?;
let n = data.len().min(buf.len());
buf[..n].copy_from_slice(&data[..n]);
self.consume(n);
Ok(n)
}
pub fn fill_buf(&mut self) -> impl Future<Output = Result<&'_ [u8], Error>> {
poll_fn(move |cx| {
compiler_fence(Ordering::SeqCst);
let r = U::regs();
let s = U::buffered_state();
let ss = U::state();
T::regs().tasks_capture(0).write_value(1);
let mut end = T::regs().cc(0).read() as usize;
if end > s.rx_buf.len() * 2 {
end = 0
}
let mut start = s.rx_buf.start.load(Ordering::Relaxed);
let len = s.rx_buf.len();
if start == end {
ss.rx_waker.register(cx.waker());
r.intenset().write(|w| w.set_rxdrdy(true));
return Poll::Pending;
}
if start >= len {
start -= len
}
if end >= len {
end -= len
}
let n = if end > start { end - start } else { len - start };
assert!(n != 0);
let buf = s.rx_buf.buf.load(Ordering::Relaxed);
Poll::Ready(Ok(unsafe { slice::from_raw_parts(buf.add(start), n) }))
})
}
pub fn consume(&mut self, amt: usize) {
if amt == 0 {
return;
}
let s = U::buffered_state();
let mut rx = unsafe { s.rx_buf.reader() };
rx.pop_done(amt);
U::regs().intenset().write(|w| w.set_rxstarted(true));
}
fn read_ready() -> Result<bool, Error> {
let state = U::buffered_state();
Ok(!state.rx_buf.is_empty())
}
}
impl<'a, U: UarteInstance, T: TimerInstance> Drop for BufferedUarteRx<'a, U, T> {
fn drop(&mut self) {
self._ppi_group.disable_all();
let r = U::regs();
self.timer.stop();
r.intenclr().write(|w| {
w.set_rxdrdy(true);
w.set_rxstarted(true);
w.set_rxto(true);
});
r.events_rxto().write_value(0);
r.tasks_stoprx().write_value(1);
while r.events_rxto().read() == 0 {}
let s = U::buffered_state();
unsafe { s.rx_buf.deinit() }
let s = U::state();
drop_tx_rx(r, s);
}
}
mod _embedded_io {
use super::*;
impl embedded_io_async::Error for Error {
fn kind(&self) -> embedded_io_async::ErrorKind {
match *self {}
}
}
impl<'d, U: UarteInstance, T: TimerInstance> embedded_io_async::ErrorType for BufferedUarte<'d, U, T> {
type Error = Error;
}
impl<'d, U: UarteInstance, T: TimerInstance> embedded_io_async::ErrorType for BufferedUarteRx<'d, U, T> {
type Error = Error;
}
impl<'d, U: UarteInstance> embedded_io_async::ErrorType for BufferedUarteTx<'d, U> {
type Error = Error;
}
impl<'d, U: UarteInstance, T: TimerInstance> embedded_io_async::Read for BufferedUarte<'d, U, T> {
async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
self.read(buf).await
}
}
impl<'d: 'd, U: UarteInstance, T: TimerInstance> embedded_io_async::Read for BufferedUarteRx<'d, U, T> {
async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
self.read(buf).await
}
}
impl<'d, U: UarteInstance, T: TimerInstance + 'd> embedded_io_async::ReadReady for BufferedUarte<'d, U, T> {
fn read_ready(&mut self) -> Result<bool, Self::Error> {
BufferedUarteRx::<'d, U, T>::read_ready()
}
}
impl<'d, U: UarteInstance, T: TimerInstance + 'd> embedded_io_async::ReadReady for BufferedUarteRx<'d, U, T> {
fn read_ready(&mut self) -> Result<bool, Self::Error> {
Self::read_ready()
}
}
impl<'d, U: UarteInstance, T: TimerInstance> embedded_io_async::BufRead for BufferedUarte<'d, U, T> {
async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> {
self.fill_buf().await
}
fn consume(&mut self, amt: usize) {
self.consume(amt)
}
}
impl<'d: 'd, U: UarteInstance, T: TimerInstance> embedded_io_async::BufRead for BufferedUarteRx<'d, U, T> {
async fn fill_buf(&mut self) -> Result<&[u8], Self::Error> {
self.fill_buf().await
}
fn consume(&mut self, amt: usize) {
self.consume(amt)
}
}
impl<'d, U: UarteInstance, T: TimerInstance> embedded_io_async::Write for BufferedUarte<'d, U, T> {
async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
self.write(buf).await
}
async fn flush(&mut self) -> Result<(), Self::Error> {
self.flush().await
}
}
impl<'d: 'd, U: UarteInstance> embedded_io_async::Write for BufferedUarteTx<'d, U> {
async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
self.write(buf).await
}
async fn flush(&mut self) -> Result<(), Self::Error> {
self.flush().await
}
}
}