sifli_hal/dma/
dma.rs

1// The following code is modified from embassy-stm32 under MIT license
2// https://github.com/embassy-rs/embassy/tree/main/embassy-stm32
3// Special thanks to the Embassy Project and its contributors for their work!
4
5use core::future::{poll_fn, Future};
6use core::pin::Pin;
7use core::sync::atomic::{fence, AtomicUsize, Ordering};
8use core::task::{Context, Poll, Waker};
9
10use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
11use embassy_sync::waitqueue::AtomicWaker;
12
13use super::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer};
14use super::word::{Word, WordSize};
15use super::{AnyChannel, Channel, Request, STATE};
16use crate::{interrupt, pac, peripherals};
17
18pub use pac::dmac::vals::Pl as Priority;
19pub use pac::dmac::vals::Dir as Dir;
20
21pub(crate) struct ChannelInfo {
22    pub(crate) dma: pac::dmac::Dmac,
23    pub(crate) num: usize,
24}
25
26/// DMA transfer options.
27#[derive(Debug, Copy, Clone, PartialEq, Eq)]
28#[cfg_attr(feature = "defmt", derive(defmt::Format))]
29#[non_exhaustive]
30pub struct TransferOptions {
31    /// Request priority level
32    pub priority: Priority,
33    /// Channel Interrupt priority level
34    pub interrupt_priority: interrupt::Priority,
35    /// Enable circular DMA
36    ///
37    /// Note:
38    /// If you enable circular mode manually, you may want to build and `.await` the `Transfer` in a separate task.
39    /// Since DMA in circular mode need manually stop, `.await` in current task would block the task forever.
40    pub circular: bool,
41    /// Enable half transfer interrupt
42    pub half_transfer_ir: bool,
43    /// Enable transfer complete interrupt
44    pub complete_transfer_ir: bool,
45}
46
47impl Default for TransferOptions {
48    fn default() -> Self {
49        Self {
50            priority: Priority::VeryHigh,
51            interrupt_priority: interrupt::Priority::P1,
52            circular: false,
53            half_transfer_ir: false,
54            complete_transfer_ir: true,
55        }
56    }
57}
58
59impl From<WordSize> for pac::dmac::vals::Size {
60    fn from(raw: WordSize) -> Self {
61        match raw {
62            WordSize::OneByte => Self::Bits8,
63            WordSize::TwoBytes => Self::Bits16,
64            WordSize::FourBytes => Self::Bits32,
65        }
66    }
67}
68
69pub(crate) struct ChannelState {
70    waker: AtomicWaker,
71    complete_count: AtomicUsize,
72}
73
74impl ChannelState {
75    pub(crate) const NEW: Self = Self {
76        waker: AtomicWaker::new(),
77        complete_count: AtomicUsize::new(0),
78    };
79}
80
81/// safety: must be called only once
82pub(crate) unsafe fn init(
83    cs: critical_section::CriticalSection,
84) {
85    crate::rcc::enable_and_reset_with_cs::<peripherals::DMAC1>(cs);
86}
87
88impl AnyChannel {
89    /// Safety: Must be called with a matching set of parameters for a valid dma channel
90    pub(crate) unsafe fn on_irq(&self) {
91        let info = self.info();
92        let state = &STATE[self.id as usize];
93        let r = info.dma;
94        let cr = r.ccr(info.num);
95        let isr = r.isr().read();
96
97        if isr.teif(info.num) {
98            panic!("DMA: error on DMA@{:08x} channel {}", r.as_ptr() as u32, info.num);
99        }
100
101        if isr.htif(info.num) && cr.read().htie() {
102            // Acknowledge half transfer complete interrupt
103            r.ifcr().write(|w| w.set_chtif(info.num % 4, true));
104        } else if isr.tcif(info.num % 4) && cr.read().tcie() {
105            // Acknowledge transfer complete interrupt
106            r.ifcr().write(|w| w.set_ctcif(info.num % 4, true));
107
108            // stop the channel.
109            // we should set EN manually on sf32. (?)
110            if !r.ccr(info.num).read().circ() {
111                r.ccr(info.num).modify(|w| {
112                    w.set_en(false); 
113                });
114            }
115            state.complete_count.fetch_add(1, Ordering::Release);
116        } else {
117            return;
118        }
119        state.waker.wake();
120    }
121
122    unsafe fn configure(
123        &self,
124        request: Request,
125        dir: Dir,
126        peri_addr: *const u32,
127        mem_addr: *mut u32,
128        mem_len: usize,
129        incr_mem: bool,
130        mem_size: WordSize,
131        peri_size: WordSize,
132        options: TransferOptions,
133    ) {
134        // "Preceding reads and writes cannot be moved past subsequent writes."
135        fence(Ordering::SeqCst);
136
137        let info = self.info();
138        let r = info.dma;
139        let state: &ChannelState = &STATE[self.id as usize];
140        let channel_num = info.num;
141
142        state.complete_count.store(0, Ordering::Release);
143        self.clear_irqs();
144
145        // NDTR is the number of transfers in the *peripheral* word size.
146        // ex: if mem_size=1, peri_size=4 and ndtr=3 it'll do 12 mem transfers, 3 peri transfers.
147        let ndtr = match (mem_size, peri_size) {
148            (WordSize::FourBytes, WordSize::OneByte) => mem_len * 4,
149            (WordSize::FourBytes, WordSize::TwoBytes) | (WordSize::TwoBytes, WordSize::OneByte) => mem_len * 2,
150            (WordSize::FourBytes, WordSize::FourBytes)
151            | (WordSize::TwoBytes, WordSize::TwoBytes)
152            | (WordSize::OneByte, WordSize::OneByte) => mem_len,
153            (WordSize::TwoBytes, WordSize::FourBytes) | (WordSize::OneByte, WordSize::TwoBytes) => {
154                assert!(mem_len % 2 == 0);
155                mem_len / 2
156            }
157            (WordSize::OneByte, WordSize::FourBytes) => {
158                assert!(mem_len % 4 == 0);
159                mem_len / 4
160            }
161        };
162
163        assert!(ndtr > 0 && ndtr <= 0xFFFF);
164
165        
166        r.cpar(channel_num).write_value(pac::dmac::regs::Cpar(peri_addr as _));
167        
168        // r.cm0ar(channel_num).write_value(pac::dmac::regs::Cm0ar(0x2000_0100 as _));
169        // 0x1200_00AA -> 0x6200_00AA
170        let mem_addr = if mem_addr as u32 >= 0x2000_0000 {
171            mem_addr as u32
172        } else {
173            mem_addr as u32  + 0x5000_0000
174        };
175        info!("mem_addr {:X}", (mem_addr as u32 + 0x5000_0000));
176        r.cm0ar(channel_num).write_value(pac::dmac::regs::Cm0ar(mem_addr));
177        r.cndtr(channel_num).write_value(pac::dmac::regs::Cndtr(ndtr as _));
178        r.cselr(channel_num / 4)
179            .modify(|w| w.set_cs(channel_num % 4, request as u8));
180        r.ccr(channel_num).write(|w| {
181            w.set_dir(dir.into());
182            w.set_msize(mem_size.into());
183            w.set_psize(peri_size.into());
184            w.set_pl(options.priority.into());
185            w.set_minc(incr_mem);
186            w.set_pinc(false);
187            w.set_teie(true);
188            w.set_htie(options.half_transfer_ir);
189            w.set_tcie(options.complete_transfer_ir);
190            w.set_circ(options.circular);
191            w.set_en(false); // don't start yet
192        });
193
194        crate::_generated::enable_dma_channel_interrupt_priority(self.id, options.interrupt_priority);
195    }
196
197    fn start(&self) {
198        let info = self.info();
199        let r = info.dma;
200        r.ccr(info.num).modify(|w| w.set_en(true))
201    }
202
203    fn clear_irqs(&self) {
204        let info = self.info();
205        let r = info.dma;
206
207        r.ifcr().write(|w| {
208            w.set_chtif(info.num, true);
209            w.set_ctcif(info.num, true);
210            w.set_cteif(info.num, true);
211        });
212    }
213
214    fn request_stop(&self) {
215        let info = self.info();
216        let r = info.dma;
217        // Disable the channel. Keep the IEs enabled so the irqs still fire.
218        r.ccr(info.num).write(|w| {
219            w.set_teie(true);
220            w.set_tcie(true);
221        });
222    }
223
224    fn request_pause(&self) {
225        let info = self.info();
226        let r = info.dma;
227        // Disable the channel without overwriting the existing configuration
228        r.ccr(info.num).modify(|w| {
229            w.set_en(false);
230        });
231    }
232
233    fn is_running(&self) -> bool {
234        let info = self.info();
235        let r = info.dma;
236        r.ccr(info.num).read().en()
237    }
238
239    fn get_remaining_transfers(&self) -> u16 {
240        let info = self.info();
241        let r = info.dma;
242        r.cndtr(info.num).read().ndt()
243    }
244
245    fn disable_circular_mode(&self) {
246        let info = self.info();
247        let r = info.dma;
248        r.ccr(info.num).modify(|w| {
249            w.set_circ(false);
250        })
251    }
252
253    fn poll_stop(&self) -> Poll<()> {
254        use core::sync::atomic::compiler_fence;
255        compiler_fence(Ordering::SeqCst);
256
257        if !self.is_running() {
258            Poll::Ready(())
259        } else {
260            Poll::Pending
261        }
262    }
263}
264
265/// DMA transfer.
266#[must_use = "futures do nothing unless you `.await` or poll them"]
267pub struct Transfer<'a> {
268    channel: PeripheralRef<'a, AnyChannel>,
269}
270
271impl<'a> Transfer<'a> {
272    /// Create a new read DMA transfer (peripheral to memory).
273    pub unsafe fn new_read<W: Word>(
274        channel: impl Peripheral<P = impl Channel> + 'a,
275        request: Request,
276        peri_addr: *mut W,
277        buf: &'a mut [W],
278        options: TransferOptions,
279    ) -> Self {
280        Self::new_read_raw(channel, request, peri_addr, buf, options)
281    }
282
283    /// Create a new read DMA transfer (peripheral to memory), using raw pointers.
284    pub unsafe fn new_read_raw<MW: Word, PW: Word>(
285        channel: impl Peripheral<P = impl Channel> + 'a,
286        request: Request,
287        peri_addr: *mut PW,
288        buf: *mut [MW],
289        options: TransferOptions,
290    ) -> Self {
291        into_ref!(channel);
292
293        Self::new_inner(
294            channel.map_into(),
295            request,
296            Dir::PeripheralToMemory,
297            peri_addr as *const u32,
298            buf as *mut MW as *mut u32,
299            buf.len(),
300            true,
301            MW::size(),
302            PW::size(),
303            options,
304        )
305    }
306
307    /// Create a new write DMA transfer (memory to peripheral).
308    pub unsafe fn new_write<MW: Word, PW: Word>(
309        channel: impl Peripheral<P = impl Channel> + 'a,
310        request: Request,
311        buf: &'a [MW],
312        peri_addr: *mut PW,
313        options: TransferOptions,
314    ) -> Self {
315        Self::new_write_raw(channel, request, buf, peri_addr, options)
316    }
317
318    /// Create a new write DMA transfer (memory to peripheral), using raw pointers.
319    pub unsafe fn new_write_raw<MW: Word, PW: Word>(
320        channel: impl Peripheral<P = impl Channel> + 'a,
321        request: Request,
322        buf: *const [MW],
323        peri_addr: *mut PW,
324        options: TransferOptions,
325    ) -> Self {
326        into_ref!(channel);
327
328        Self::new_inner(
329            channel.map_into(),
330            request,
331            Dir::MemoryToPeripheral,
332            peri_addr as *const u32,
333            buf as *const MW as *mut u32,
334            buf.len(),
335            true,
336            MW::size(),
337            PW::size(),
338            options,
339        )
340    }
341
342    /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
343    pub unsafe fn new_write_repeated<W: Word>(
344        channel: impl Peripheral<P = impl Channel> + 'a,
345        request: Request,
346        repeated: &'a W,
347        count: usize,
348        peri_addr: *mut W,
349        options: TransferOptions,
350    ) -> Self {
351        into_ref!(channel);
352
353        Self::new_inner(
354            channel.map_into(),
355            request,
356            Dir::MemoryToPeripheral,
357            peri_addr as *const u32,
358            repeated as *const W as *mut u32,
359            count,
360            false,
361            W::size(),
362            W::size(),
363            options,
364        )
365    }
366
367    unsafe fn new_inner(
368        channel: PeripheralRef<'a, AnyChannel>,
369        request: Request,
370        dir: Dir,
371        peri_addr: *const u32,
372        mem_addr: *mut u32,
373        mem_len: usize,
374        incr_mem: bool,
375        mem_size: WordSize,
376        peri_size: WordSize,
377        options: TransferOptions,
378    ) -> Self {
379        assert!(mem_len > 0 && mem_len <= 0xFFFF);
380
381        channel.configure(
382            request, dir, peri_addr, mem_addr, mem_len, incr_mem, mem_size, peri_size, options,
383        );
384        channel.start();
385        Self { channel }
386    }
387
388    /// Request the transfer to stop.
389    /// The configuration for this channel will **not be preserved**. If you need to restart the transfer
390    /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
391    ///
392    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
393    pub fn request_stop(&mut self) {
394        self.channel.request_stop()
395    }
396
397    /// Request the transfer to pause, keeping the existing configuration for this channel.
398    /// To restart the transfer, call [`start`](Self::start) again.
399    ///
400    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
401    pub fn request_pause(&mut self) {
402        self.channel.request_pause()
403    }
404
405    /// Return whether this transfer is still running.
406    ///
407    /// If this returns `false`, it can be because either the transfer finished, or
408    /// it was requested to stop early with [`request_stop`](Self::request_stop).
409    pub fn is_running(&mut self) -> bool {
410        self.channel.is_running()
411    }
412
413    /// Gets the total remaining transfers for the channel
414    /// Note: this will be zero for transfers that completed without cancellation.
415    pub fn get_remaining_transfers(&self) -> u16 {
416        self.channel.get_remaining_transfers()
417    }
418
419    /// Blocking wait until the transfer finishes.
420    pub fn blocking_wait(mut self) {
421        while self.is_running() {}
422
423        // "Subsequent reads and writes cannot be moved ahead of preceding reads."
424        fence(Ordering::SeqCst);
425
426        core::mem::forget(self);
427    }
428}
429
430impl<'a> Drop for Transfer<'a> {
431    fn drop(&mut self) {
432        self.request_stop();
433        while self.is_running() {}
434
435        // "Subsequent reads and writes cannot be moved ahead of preceding reads."
436        fence(Ordering::SeqCst);
437    }
438}
439
440impl<'a> Unpin for Transfer<'a> {}
441impl<'a> Future for Transfer<'a> {
442    type Output = ();
443    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
444        let state: &ChannelState = &STATE[self.channel.id as usize];
445
446        state.waker.register(cx.waker());
447
448        if self.is_running() {
449            Poll::Pending
450        } else {
451            Poll::Ready(())
452        }
453    }
454}
455// ==============================
456
457struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>);
458
459impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
460    fn get_remaining_transfers(&self) -> usize {
461        self.0.get_remaining_transfers() as _
462    }
463
464    fn reset_complete_count(&mut self) -> usize {
465        let state = &STATE[self.0.id as usize];
466        return state.complete_count.swap(0, Ordering::AcqRel);
467    }
468
469    fn set_waker(&mut self, waker: &Waker) {
470        STATE[self.0.id as usize].waker.register(waker);
471    }
472}
473
474/// Ringbuffer for receiving data using DMA circular mode.
475pub struct ReadableRingBuffer<'a, W: Word> {
476    channel: PeripheralRef<'a, AnyChannel>,
477    ringbuf: ReadableDmaRingBuffer<'a, W>,
478}
479
480impl<'a, W: Word> ReadableRingBuffer<'a, W> {
481    /// Create a new ring buffer.
482    pub unsafe fn new(
483        channel: impl Peripheral<P = impl Channel> + 'a,
484        request: Request,
485        peri_addr: *mut W,
486        buffer: &'a mut [W],
487        mut options: TransferOptions,
488    ) -> Self {
489        into_ref!(channel);
490        let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
491
492        let buffer_ptr = buffer.as_mut_ptr();
493        let len = buffer.len();
494        let dir = Dir::PeripheralToMemory;
495        let data_size = W::size();
496
497        options.half_transfer_ir = true;
498        options.complete_transfer_ir = true;
499        options.circular = true;
500
501        channel.configure(
502            request,
503            dir,
504            peri_addr as *mut u32,
505            buffer_ptr as *mut u32,
506            len,
507            true,
508            data_size,
509            data_size,
510            options,
511        );
512
513        Self {
514            channel,
515            ringbuf: ReadableDmaRingBuffer::new(buffer),
516        }
517    }
518
519    /// Start the ring buffer operation.
520    ///
521    /// You must call this after creating it for it to work.
522    pub fn start(&mut self) {
523        self.channel.start();
524    }
525
526    /// Clear all data in the ring buffer.
527    pub fn clear(&mut self) {
528        self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow()));
529    }
530
531    /// Read elements from the ring buffer
532    /// Return a tuple of the length read and the length remaining in the buffer
533    /// If not all of the elements were read, then there will be some elements in the buffer remaining
534    /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
535    /// Error is returned if the portion to be read was overwritten by the DMA controller.
536    pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> {
537        self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
538    }
539
540    /// Read an exact number of elements from the ringbuffer.
541    ///
542    /// Returns the remaining number of elements available for immediate reading.
543    /// Error is returned if the portion to be read was overwritten by the DMA controller.
544    ///
545    /// Async/Wake Behavior:
546    /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
547    /// and when it wraps around. This means that when called with a buffer of length 'M', when this
548    /// ring buffer was created with a buffer of size 'N':
549    /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
550    /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
551    pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, Error> {
552        self.ringbuf
553            .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
554            .await
555    }
556
557    /// The current length of the ringbuffer
558    pub fn len(&mut self) -> Result<usize, Error> {
559        Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
560    }
561
562    /// The capacity of the ringbuffer
563    pub const fn capacity(&self) -> usize {
564        self.ringbuf.cap()
565    }
566
567    /// Set a waker to be woken when at least one byte is received.
568    pub fn set_waker(&mut self, waker: &Waker) {
569        DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
570    }
571
572    /// Request the DMA to stop.
573    /// The configuration for this channel will **not be preserved**. If you need to restart the transfer
574    /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
575    ///
576    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
577    pub fn request_stop(&mut self) {
578        self.channel.request_stop()
579    }
580
581    /// Request the transfer to pause, keeping the existing configuration for this channel.
582    /// To restart the transfer, call [`start`](Self::start) again.
583    ///
584    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
585    pub fn request_pause(&mut self) {
586        self.channel.request_pause()
587    }
588
589    /// Return whether DMA is still running.
590    ///
591    /// If this returns `false`, it can be because either the transfer finished, or
592    /// it was requested to stop early with [`request_stop`](Self::request_stop).
593    pub fn is_running(&mut self) -> bool {
594        self.channel.is_running()
595    }
596
597    /// Stop the DMA transfer and await until the buffer is full.
598    ///
599    /// This disables the DMA transfer's circular mode so that the transfer
600    /// stops when the buffer is full.
601    ///
602    /// This is designed to be used with streaming input data such as the
603    /// I2S/SAI or ADC.
604    ///
605    /// When using the UART, you probably want `request_stop()`.
606    pub async fn stop(&mut self) {
607        self.channel.disable_circular_mode();
608        //wait until cr.susp reads as true
609        poll_fn(|cx| {
610            self.set_waker(cx.waker());
611            self.channel.poll_stop()
612        })
613        .await
614    }
615}
616
617impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
618    fn drop(&mut self) {
619        self.request_stop();
620        while self.is_running() {}
621
622        // "Subsequent reads and writes cannot be moved ahead of preceding reads."
623        fence(Ordering::SeqCst);
624    }
625}
626
627/// Ringbuffer for writing data using DMA circular mode.
628pub struct WritableRingBuffer<'a, W: Word> {
629    channel: PeripheralRef<'a, AnyChannel>,
630    ringbuf: WritableDmaRingBuffer<'a, W>,
631}
632
633impl<'a, W: Word> WritableRingBuffer<'a, W> {
634    /// Create a new ring buffer.
635    pub unsafe fn new(
636        channel: impl Peripheral<P = impl Channel> + 'a,
637        request: Request,
638        peri_addr: *mut W,
639        buffer: &'a mut [W],
640        mut options: TransferOptions,
641    ) -> Self {
642        into_ref!(channel);
643        let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
644
645        let len = buffer.len();
646        let dir = Dir::MemoryToPeripheral;
647        let data_size = W::size();
648        let buffer_ptr = buffer.as_mut_ptr();
649
650        options.half_transfer_ir = true;
651        options.complete_transfer_ir = true;
652        options.circular = true;
653
654        channel.configure(
655            request,
656            dir,
657            peri_addr as *mut u32,
658            buffer_ptr as *mut u32,
659            len,
660            true,
661            data_size,
662            data_size,
663            options,
664        );
665
666        Self {
667            channel,
668            ringbuf: WritableDmaRingBuffer::new(buffer),
669        }
670    }
671
672    /// Start the ring buffer operation.
673    ///
674    /// You must call this after creating it for it to work.
675    pub fn start(&mut self) {
676        self.channel.start();
677    }
678
679    /// Clear all data in the ring buffer.
680    pub fn clear(&mut self) {
681        self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow()));
682    }
683
684    /// Write elements directly to the raw buffer.
685    /// This can be used to fill the buffer before starting the DMA transfer.
686    pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
687        self.ringbuf.write_immediate(buf)
688    }
689
690    /// Write elements from the ring buffer
691    /// Return a tuple of the length written and the length remaining in the buffer
692    pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
693        self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
694    }
695
696    /// Write an exact number of elements to the ringbuffer.
697    pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, Error> {
698        self.ringbuf
699            .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
700            .await
701    }
702
703    /// Wait for any ring buffer write error.
704    pub async fn wait_write_error(&mut self) -> Result<usize, Error> {
705        self.ringbuf
706            .wait_write_error(&mut DmaCtrlImpl(self.channel.reborrow()))
707            .await
708    }
709
710    /// The current length of the ringbuffer
711    pub fn len(&mut self) -> Result<usize, Error> {
712        Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
713    }
714
715    /// The capacity of the ringbuffer
716    pub const fn capacity(&self) -> usize {
717        self.ringbuf.cap()
718    }
719
720    /// Set a waker to be woken when at least one byte is received.
721    pub fn set_waker(&mut self, waker: &Waker) {
722        DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
723    }
724
725    /// Request the DMA to stop.
726    /// The configuration for this channel will **not be preserved**. If you need to restart the transfer
727    /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
728    ///
729    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
730    pub fn request_stop(&mut self) {
731        self.channel.request_stop()
732    }
733
734    /// Request the transfer to pause, keeping the existing configuration for this channel.
735    /// To restart the transfer, call [`start`](Self::start) again.
736    ///
737    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
738    pub fn request_pause(&mut self) {
739        self.channel.request_pause()
740    }
741
742    /// Return whether DMA is still running.
743    ///
744    /// If this returns `false`, it can be because either the transfer finished, or
745    /// it was requested to stop early with [`request_stop`](Self::request_stop).
746    pub fn is_running(&mut self) -> bool {
747        self.channel.is_running()
748    }
749
750    /// Stop the DMA transfer and await until the buffer is empty.
751    ///
752    /// This disables the DMA transfer's circular mode so that the transfer
753    /// stops when all available data has been written.
754    ///
755    /// This is designed to be used with streaming output data such as the
756    /// I2S/SAI or DAC.
757    pub async fn stop(&mut self) {
758        self.channel.disable_circular_mode();
759        //wait until cr.susp reads as true
760        poll_fn(|cx| {
761            self.set_waker(cx.waker());
762            self.channel.poll_stop()
763        })
764        .await
765    }
766}
767
768impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> {
769    fn drop(&mut self) {
770        self.request_stop();
771        while self.is_running() {}
772
773        // "Subsequent reads and writes cannot be moved ahead of preceding reads."
774        fence(Ordering::SeqCst);
775    }
776}