py32_hal/dma/
dma.rs

1// The following code is modified from embassy-stm32
2// https://github.com/embassy-rs/embassy/tree/main/embassy-stm32
3// Special thanks to the Embassy Project and its contributors for their work!
4
5use core::future::{poll_fn, Future};
6use core::pin::Pin;
7use core::sync::atomic::{fence, AtomicUsize, Ordering};
8use core::task::{Context, Poll, Waker};
9
10use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
11use embassy_sync::waitqueue::AtomicWaker;
12use py32_metapac::dma::vals;
13
14use super::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer};
15use super::word::{Word, WordSize};
16use super::{AnyChannel, Channel, Dir, Request, STATE};
17use crate::interrupt::typelevel::Interrupt;
18use crate::{interrupt, pac};
19
20pub(crate) struct ChannelInfo {
21    pub(crate) dma: DmaInfo,
22    pub(crate) num: usize,
23    // #[cfg(feature = "_dual-core")]
24    // pub(crate) irq: pac::Interrupt,
25}
26
27#[derive(Clone, Copy)]
28pub(crate) enum DmaInfo {
29    Dma(pac::dma::Dma),
30}
31
32/// DMA transfer options.
33#[derive(Debug, Copy, Clone, PartialEq, Eq)]
34#[cfg_attr(feature = "defmt", derive(defmt::Format))]
35#[non_exhaustive]
36pub struct TransferOptions {
37    // /// Peripheral burst transfer configuration
38    // pub pburst: Burst,
39    // /// Memory burst transfer configuration
40    // pub mburst: Burst,
41    // /// Flow control configuration
42    // pub flow_ctrl: FlowControl,
43    // /// FIFO threshold for DMA FIFO mode. If none, direct mode is used.
44    // pub fifo_threshold: Option<FifoThreshold>,
45    /// Request priority level
46    pub priority: Priority,
47    /// Enable circular DMA
48    ///
49    /// Note:
50    /// If you enable circular mode manually, you may want to build and `.await` the `Transfer` in a separate task.
51    /// Since DMA in circular mode need manually stop, `.await` in current task would block the task forever.
52    pub circular: bool,
53    /// Enable half transfer interrupt
54    pub half_transfer_ir: bool,
55    /// Enable transfer complete interrupt
56    pub complete_transfer_ir: bool,
57}
58
59impl Default for TransferOptions {
60    fn default() -> Self {
61        Self {
62            // pburst: Burst::Single,
63            // mburst: Burst::Single,
64            // flow_ctrl: FlowControl::Dma,
65            // fifo_threshold: None,
66            priority: Priority::VeryHigh,
67            circular: false,
68            half_transfer_ir: false,
69            complete_transfer_ir: true,
70        }
71    }
72}
73
74/// DMA request priority
75#[derive(Debug, Copy, Clone, PartialEq, Eq)]
76#[cfg_attr(feature = "defmt", derive(defmt::Format))]
77pub enum Priority {
78    /// Low Priority
79    Low,
80    /// Medium Priority
81    Medium,
82    /// High Priority
83    High,
84    /// Very High Priority
85    VeryHigh,
86}
87
88impl From<Priority> for vals::Pl {
89    fn from(value: Priority) -> Self {
90        match value {
91            Priority::Low => pac::dma::vals::Pl::LOW,
92            Priority::Medium => pac::dma::vals::Pl::MEDIUM,
93            Priority::High => pac::dma::vals::Pl::HIGH,
94            Priority::VeryHigh => pac::dma::vals::Pl::VERYHIGH,
95        }
96    }
97}
98
99impl From<WordSize> for vals::Size {
100    fn from(raw: WordSize) -> Self {
101        match raw {
102            WordSize::OneByte => Self::BITS8,
103            WordSize::TwoBytes => Self::BITS16,
104            WordSize::FourBytes => Self::BITS32,
105        }
106    }
107}
108
109impl From<Dir> for vals::Dir {
110    fn from(raw: Dir) -> Self {
111        match raw {
112            Dir::MemoryToPeripheral => Self::MEMORYTOPERIPHERAL,
113            Dir::PeripheralToMemory => Self::PERIPHERALTOMEMORY,
114        }
115    }
116}
117
118pub(crate) struct ChannelState {
119    waker: AtomicWaker,
120    complete_count: AtomicUsize,
121}
122
123impl ChannelState {
124    pub(crate) const NEW: Self = Self {
125        waker: AtomicWaker::new(),
126        complete_count: AtomicUsize::new(0),
127    };
128}
129
130/// safety: must be called only once
131pub(crate) unsafe fn init(
132    cs: critical_section::CriticalSection,
133    dma_priority: interrupt::Priority,
134) {
135    foreach_interrupt! {
136        ($peri:ident, dma, $block:ident, $signal_name:ident, $irq:ident) => {
137            crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, dma_priority);
138            // #[cfg(not(feature = "_dual-core"))]
139            crate::interrupt::typelevel::$irq::enable();
140        };
141        ($peri:ident, bdma, $block:ident, $signal_name:ident, $irq:ident) => {
142            crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, bdma_priority);
143            // #[cfg(not(feature = "_dual-core"))]
144            crate::interrupt::typelevel::$irq::enable();
145        };
146    }
147    crate::_generated::init_dma();
148}
149
150impl AnyChannel {
151    /// Safety: Must be called with a matching set of parameters for a valid dma channel
152    pub(crate) unsafe fn on_irq(&self) {
153        let info = self.info();
154        let state = &STATE[self.id as usize];
155        match self.info().dma {
156            DmaInfo::Dma(r) => {
157                let cr = r.st(info.num).cr();
158                let isr = r.isr().read();
159
160                if isr.teif(info.num) {
161                    panic!(
162                        "DMA: error on DMA@{:08x} channel {}",
163                        r.as_ptr() as u32,
164                        info.num
165                    );
166                }
167
168                if isr.htif(info.num) && cr.read().htie() {
169                    // Acknowledge half transfer complete interrupt
170                    r.ifcr().write(|w| w.set_htif(info.num, true));
171                } else if isr.tcif(info.num) && cr.read().tcie() {
172                    // Acknowledge  transfer complete interrupt
173                    r.ifcr().write(|w| w.set_tcif(info.num, true));
174
175                    let count = state.complete_count.load(Ordering::Acquire);
176                    state.complete_count.store(count + 1, Ordering::Release);
177
178                    if r.st(info.num).ndtr().read() == 0 {
179                        // Disable the channel.
180                        r.st(info.num).cr().modify(|w| {
181                            w.set_en(false);
182                        });
183                    }
184                } else {
185                    return;
186                }
187                state.waker.wake();
188            }
189        }
190    }
191
192    unsafe fn configure(
193        &self,
194        request: Request,
195        dir: Dir,
196        peri_addr: *const u32,
197        mem_addr: *mut u32,
198        mem_len: usize,
199        incr_mem: bool,
200        data_size: WordSize,
201        options: TransferOptions,
202    ) {
203        let info = self.info();
204
205        // #[cfg(feature = "_dual-core")]
206        // {
207        //     use embassy_hal_internal::interrupt::InterruptExt as _;
208        //     info.irq.enable();
209        // }
210
211        assert!(mem_len > 0 && mem_len <= 0xFFFF);
212
213        match info.num / 4 {
214            0 => {
215                pac::SYSCFG.cfgr3().modify(|w| {
216                    w.set_dma_map(info.num % 4, request);
217                });
218            }
219            #[cfg(py32f072)]
220            1 => {
221                pac::SYSCFG.cfgr4().modify(|w| {
222                    w.set_dma_map(info.num % 4, request);
223                });
224            }
225            _ => panic!("Invalid DMA channel number"),
226        }
227
228        match self.info().dma {
229            DmaInfo::Dma(r) => {
230                let ch = r.st(info.num);
231
232                // "Preceding reads and writes cannot be moved past subsequent writes."
233                fence(Ordering::SeqCst);
234
235                self.clear_irqs();
236
237                ch.par().write_value(peri_addr as u32);
238                ch.mar().write_value(mem_addr as u32);
239                ch.ndtr().write_value(mem_len as _);
240
241                ch.cr().write(|w| {
242                    w.set_dir(dir.into());
243                    w.set_msize(data_size.into());
244                    w.set_psize(data_size.into());
245                    w.set_pl(options.priority.into());
246                    w.set_minc(incr_mem);
247                    w.set_pinc(false);
248                    w.set_circ(options.circular);
249
250                    w.set_teie(true);
251                    w.set_htie(options.half_transfer_ir);
252                    w.set_tcie(options.complete_transfer_ir);
253
254                    w.set_en(false); // don't start yet
255                });
256            }
257        }
258    }
259
260    fn start(&self) {
261        let info = self.info();
262        match self.info().dma {
263            DmaInfo::Dma(r) => {
264                let ch = r.st(info.num);
265                ch.cr().modify(|w| w.set_en(true))
266            }
267        }
268    }
269
270    fn clear_irqs(&self) {
271        let info = self.info();
272        match self.info().dma {
273            DmaInfo::Dma(r) => {
274                r.ifcr().write(|w| {
275                    w.set_gif(info.num, true);
276                    // w.set_htif(info.num, true);
277                    // w.set_tcif(info.num, true);
278                    // w.set_teif(info.num, true);
279                });
280            }
281        }
282    }
283
284    fn request_stop(&self) {
285        let info = self.info();
286        match self.info().dma {
287            DmaInfo::Dma(r) => {
288                // Disable the channel. Keep the IEs enabled so the irqs still fire.
289                r.st(info.num).cr().write(|w| {
290                    w.set_teie(true);
291                    w.set_tcie(true);
292                });
293            }
294        }
295    }
296
297    fn request_pause(&self) {
298        let info = self.info();
299        match self.info().dma {
300            DmaInfo::Dma(r) => {
301                // Disable the channel without overwriting the existing configuration
302                r.st(info.num).cr().modify(|w| {
303                    w.set_en(false);
304                });
305            }
306        }
307    }
308
309    fn is_running(&self) -> bool {
310        let info = self.info();
311        match self.info().dma {
312            DmaInfo::Dma(r) => r.st(info.num).cr().read().en(),
313        }
314    }
315
316    fn get_remaining_transfers(&self) -> u16 {
317        let info = self.info();
318        match self.info().dma {
319            DmaInfo::Dma(r) => r.st(info.num).ndtr().read() as _,
320        }
321    }
322
323    fn disable_circular_mode(&self) {
324        let info = self.info();
325        match self.info().dma {
326            DmaInfo::Dma(regs) => regs.st(info.num).cr().modify(|w| {
327                w.set_circ(false);
328            }),
329        }
330    }
331
332    fn poll_stop(&self) -> Poll<()> {
333        use core::sync::atomic::compiler_fence;
334        compiler_fence(Ordering::SeqCst);
335
336        if !self.is_running() {
337            Poll::Ready(())
338        } else {
339            Poll::Pending
340        }
341    }
342}
343
344/// DMA transfer.
345#[must_use = "futures do nothing unless you `.await` or poll them"]
346pub struct Transfer<'a> {
347    channel: PeripheralRef<'a, AnyChannel>,
348}
349
350impl<'a> Transfer<'a> {
351    /// Create a new read DMA transfer (peripheral to memory).
352    pub unsafe fn new_read<W: Word>(
353        channel: impl Peripheral<P = impl Channel> + 'a,
354        request: Request,
355        peri_addr: *mut W,
356        buf: &'a mut [W],
357        options: TransferOptions,
358    ) -> Self {
359        Self::new_read_raw(channel, request, peri_addr, buf, options)
360    }
361
362    /// Create a new read DMA transfer (peripheral to memory), using raw pointers.
363    pub unsafe fn new_read_raw<W: Word>(
364        channel: impl Peripheral<P = impl Channel> + 'a,
365        request: Request,
366        peri_addr: *mut W,
367        buf: *mut [W],
368        options: TransferOptions,
369    ) -> Self {
370        into_ref!(channel);
371
372        Self::new_inner(
373            channel.map_into(),
374            request,
375            Dir::PeripheralToMemory,
376            peri_addr as *const u32,
377            buf as *mut W as *mut u32,
378            buf.len(),
379            true,
380            W::size(),
381            options,
382        )
383    }
384
385    /// Create a new write DMA transfer (memory to peripheral).
386    pub unsafe fn new_write<W: Word>(
387        channel: impl Peripheral<P = impl Channel> + 'a,
388        request: Request,
389        buf: &'a [W],
390        peri_addr: *mut W,
391        options: TransferOptions,
392    ) -> Self {
393        Self::new_write_raw(channel, request, buf, peri_addr, options)
394    }
395
396    /// Create a new write DMA transfer (memory to peripheral), using raw pointers.
397    pub unsafe fn new_write_raw<W: Word>(
398        channel: impl Peripheral<P = impl Channel> + 'a,
399        request: Request,
400        buf: *const [W],
401        peri_addr: *mut W,
402        options: TransferOptions,
403    ) -> Self {
404        into_ref!(channel);
405
406        Self::new_inner(
407            channel.map_into(),
408            request,
409            Dir::MemoryToPeripheral,
410            peri_addr as *const u32,
411            buf as *const W as *mut u32,
412            buf.len(),
413            true,
414            W::size(),
415            options,
416        )
417    }
418
419    /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
420    pub unsafe fn new_write_repeated<W: Word>(
421        channel: impl Peripheral<P = impl Channel> + 'a,
422        request: Request,
423        repeated: &'a W,
424        count: usize,
425        peri_addr: *mut W,
426        options: TransferOptions,
427    ) -> Self {
428        into_ref!(channel);
429
430        Self::new_inner(
431            channel.map_into(),
432            request,
433            Dir::MemoryToPeripheral,
434            peri_addr as *const u32,
435            repeated as *const W as *mut u32,
436            count,
437            false,
438            W::size(),
439            options,
440        )
441    }
442
443    unsafe fn new_inner(
444        channel: PeripheralRef<'a, AnyChannel>,
445        _request: Request,
446        dir: Dir,
447        peri_addr: *const u32,
448        mem_addr: *mut u32,
449        mem_len: usize,
450        incr_mem: bool,
451        data_size: WordSize,
452        options: TransferOptions,
453    ) -> Self {
454        assert!(mem_len > 0 && mem_len <= 0xFFFF);
455
456        channel.configure(
457            _request, dir, peri_addr, mem_addr, mem_len, incr_mem, data_size, options,
458        );
459        channel.start();
460
461        Self { channel }
462    }
463
464    /// Request the transfer to stop.
465    /// The configuration for this channel will **not be preserved**. If you need to restart the transfer
466    /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
467    ///
468    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
469    pub fn request_stop(&mut self) {
470        self.channel.request_stop()
471    }
472
473    /// Request the transfer to pause, keeping the existing configuration for this channel.
474    /// To restart the transfer, call [`start`](Self::start) again.
475    ///
476    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
477    pub fn request_pause(&mut self) {
478        self.channel.request_pause()
479    }
480
481    /// Return whether this transfer is still running.
482    ///
483    /// If this returns `false`, it can be because either the transfer finished, or
484    /// it was requested to stop early with [`request_stop`](Self::request_stop).
485    pub fn is_running(&mut self) -> bool {
486        self.channel.is_running()
487    }
488
489    /// Gets the total remaining transfers for the channel
490    /// Note: this will be zero for transfers that completed without cancellation.
491    pub fn get_remaining_transfers(&self) -> u16 {
492        self.channel.get_remaining_transfers()
493    }
494
495    /// Blocking wait until the transfer finishes.
496    pub fn blocking_wait(mut self) {
497        while self.is_running() {}
498
499        // "Subsequent reads and writes cannot be moved ahead of preceding reads."
500        fence(Ordering::SeqCst);
501
502        core::mem::forget(self);
503    }
504}
505
506impl<'a> Drop for Transfer<'a> {
507    fn drop(&mut self) {
508        self.request_stop();
509        while self.is_running() {}
510
511        // "Subsequent reads and writes cannot be moved ahead of preceding reads."
512        fence(Ordering::SeqCst);
513    }
514}
515
516impl<'a> Unpin for Transfer<'a> {}
517impl<'a> Future for Transfer<'a> {
518    type Output = ();
519    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
520        let state: &ChannelState = &STATE[self.channel.id as usize];
521
522        state.waker.register(cx.waker());
523
524        if self.is_running() {
525            Poll::Pending
526        } else {
527            Poll::Ready(())
528        }
529    }
530}
531
532// ==============================
533
534struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>);
535
536impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
537    fn get_remaining_transfers(&self) -> usize {
538        self.0.get_remaining_transfers() as _
539    }
540
541    fn reset_complete_count(&mut self) -> usize {
542        let state = &STATE[self.0.id as usize];
543        #[cfg(not(armv6m))]
544        return state.complete_count.swap(0, Ordering::AcqRel);
545        #[cfg(armv6m)]
546        return critical_section::with(|_| {
547            let x = state.complete_count.load(Ordering::Acquire);
548            state.complete_count.store(0, Ordering::Release);
549            x
550        });
551    }
552
553    fn set_waker(&mut self, waker: &Waker) {
554        STATE[self.0.id as usize].waker.register(waker);
555    }
556}
557
558/// Ringbuffer for receiving data using DMA circular mode.
559pub struct ReadableRingBuffer<'a, W: Word> {
560    channel: PeripheralRef<'a, AnyChannel>,
561    ringbuf: ReadableDmaRingBuffer<'a, W>,
562}
563
564impl<'a, W: Word> ReadableRingBuffer<'a, W> {
565    /// Create a new ring buffer.
566    pub unsafe fn new(
567        channel: impl Peripheral<P = impl Channel> + 'a,
568        _request: Request,
569        peri_addr: *mut W,
570        buffer: &'a mut [W],
571        mut options: TransferOptions,
572    ) -> Self {
573        into_ref!(channel);
574        let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
575
576        let buffer_ptr = buffer.as_mut_ptr();
577        let len = buffer.len();
578        let dir = Dir::PeripheralToMemory;
579        let data_size = W::size();
580
581        options.half_transfer_ir = true;
582        options.complete_transfer_ir = true;
583        options.circular = true;
584
585        channel.configure(
586            _request,
587            dir,
588            peri_addr as *mut u32,
589            buffer_ptr as *mut u32,
590            len,
591            true,
592            data_size,
593            options,
594        );
595
596        Self {
597            channel,
598            ringbuf: ReadableDmaRingBuffer::new(buffer),
599        }
600    }
601
602    /// Start the ring buffer operation.
603    ///
604    /// You must call this after creating it for it to work.
605    pub fn start(&mut self) {
606        self.channel.start();
607    }
608
609    /// Clear all data in the ring buffer.
610    pub fn clear(&mut self) {
611        self.ringbuf
612            .reset(&mut DmaCtrlImpl(self.channel.reborrow()));
613    }
614
615    /// Read elements from the ring buffer
616    /// Return a tuple of the length read and the length remaining in the buffer
617    /// If not all of the elements were read, then there will be some elements in the buffer remaining
618    /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
619    /// Error is returned if the portion to be read was overwritten by the DMA controller.
620    pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> {
621        self.ringbuf
622            .read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
623    }
624
625    /// Read an exact number of elements from the ringbuffer.
626    ///
627    /// Returns the remaining number of elements available for immediate reading.
628    /// Error is returned if the portion to be read was overwritten by the DMA controller.
629    ///
630    /// Async/Wake Behavior:
631    /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
632    /// and when it wraps around. This means that when called with a buffer of length 'M', when this
633    /// ring buffer was created with a buffer of size 'N':
634    /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
635    /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
636    pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, Error> {
637        self.ringbuf
638            .read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
639            .await
640    }
641
642    /// The current length of the ringbuffer
643    pub fn len(&mut self) -> Result<usize, Error> {
644        Ok(self
645            .ringbuf
646            .len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
647    }
648
649    /// The capacity of the ringbuffer
650    pub const fn capacity(&self) -> usize {
651        self.ringbuf.cap()
652    }
653
654    /// Set a waker to be woken when at least one byte is received.
655    pub fn set_waker(&mut self, waker: &Waker) {
656        DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
657    }
658
659    /// Request the DMA to stop.
660    /// The configuration for this channel will **not be preserved**. If you need to restart the transfer
661    /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
662    ///
663    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
664    pub fn request_stop(&mut self) {
665        self.channel.request_stop()
666    }
667
668    /// Request the transfer to pause, keeping the existing configuration for this channel.
669    /// To restart the transfer, call [`start`](Self::start) again.
670    ///
671    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
672    pub fn request_pause(&mut self) {
673        self.channel.request_pause()
674    }
675
676    /// Return whether DMA is still running.
677    ///
678    /// If this returns `false`, it can be because either the transfer finished, or
679    /// it was requested to stop early with [`request_stop`](Self::request_stop).
680    pub fn is_running(&mut self) -> bool {
681        self.channel.is_running()
682    }
683
684    /// Stop the DMA transfer and await until the buffer is full.
685    ///
686    /// This disables the DMA transfer's circular mode so that the transfer
687    /// stops when the buffer is full.
688    ///
689    /// This is designed to be used with streaming input data such as the
690    /// I2S/SAI or ADC.
691    ///
692    /// When using the UART, you probably want `request_stop()`.
693    pub async fn stop(&mut self) {
694        self.channel.disable_circular_mode();
695        //wait until cr.susp reads as true
696        poll_fn(|cx| {
697            self.set_waker(cx.waker());
698            self.channel.poll_stop()
699        })
700        .await
701    }
702}
703
704impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
705    fn drop(&mut self) {
706        self.request_stop();
707        while self.is_running() {}
708
709        // "Subsequent reads and writes cannot be moved ahead of preceding reads."
710        fence(Ordering::SeqCst);
711    }
712}
713
714/// Ringbuffer for writing data using DMA circular mode.
715pub struct WritableRingBuffer<'a, W: Word> {
716    channel: PeripheralRef<'a, AnyChannel>,
717    ringbuf: WritableDmaRingBuffer<'a, W>,
718}
719
720impl<'a, W: Word> WritableRingBuffer<'a, W> {
721    /// Create a new ring buffer.
722    pub unsafe fn new(
723        channel: impl Peripheral<P = impl Channel> + 'a,
724        _request: Request,
725        peri_addr: *mut W,
726        buffer: &'a mut [W],
727        mut options: TransferOptions,
728    ) -> Self {
729        into_ref!(channel);
730        let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
731
732        let len = buffer.len();
733        let dir = Dir::MemoryToPeripheral;
734        let data_size = W::size();
735        let buffer_ptr = buffer.as_mut_ptr();
736
737        options.half_transfer_ir = true;
738        options.complete_transfer_ir = true;
739        options.circular = true;
740
741        channel.configure(
742            _request,
743            dir,
744            peri_addr as *mut u32,
745            buffer_ptr as *mut u32,
746            len,
747            true,
748            data_size,
749            options,
750        );
751
752        Self {
753            channel,
754            ringbuf: WritableDmaRingBuffer::new(buffer),
755        }
756    }
757
758    /// Start the ring buffer operation.
759    ///
760    /// You must call this after creating it for it to work.
761    pub fn start(&mut self) {
762        self.channel.start();
763    }
764
765    /// Clear all data in the ring buffer.
766    pub fn clear(&mut self) {
767        self.ringbuf
768            .reset(&mut DmaCtrlImpl(self.channel.reborrow()));
769    }
770
771    /// Write elements directly to the raw buffer.
772    /// This can be used to fill the buffer before starting the DMA transfer.
773    pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
774        self.ringbuf.write_immediate(buf)
775    }
776
777    /// Write elements from the ring buffer
778    /// Return a tuple of the length written and the length remaining in the buffer
779    pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
780        self.ringbuf
781            .write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
782    }
783
784    /// Write an exact number of elements to the ringbuffer.
785    pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, Error> {
786        self.ringbuf
787            .write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
788            .await
789    }
790
791    /// Wait for any ring buffer write error.
792    pub async fn wait_write_error(&mut self) -> Result<usize, Error> {
793        self.ringbuf
794            .wait_write_error(&mut DmaCtrlImpl(self.channel.reborrow()))
795            .await
796    }
797
798    /// The current length of the ringbuffer
799    pub fn len(&mut self) -> Result<usize, Error> {
800        Ok(self
801            .ringbuf
802            .len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
803    }
804
805    /// The capacity of the ringbuffer
806    pub const fn capacity(&self) -> usize {
807        self.ringbuf.cap()
808    }
809
810    /// Set a waker to be woken when at least one byte is received.
811    pub fn set_waker(&mut self, waker: &Waker) {
812        DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
813    }
814
815    /// Request the DMA to stop.
816    /// The configuration for this channel will **not be preserved**. If you need to restart the transfer
817    /// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
818    ///
819    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
820    pub fn request_stop(&mut self) {
821        self.channel.request_stop()
822    }
823
824    /// Request the transfer to pause, keeping the existing configuration for this channel.
825    /// To restart the transfer, call [`start`](Self::start) again.
826    ///
827    /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
828    pub fn request_pause(&mut self) {
829        self.channel.request_pause()
830    }
831
832    /// Return whether DMA is still running.
833    ///
834    /// If this returns `false`, it can be because either the transfer finished, or
835    /// it was requested to stop early with [`request_stop`](Self::request_stop).
836    pub fn is_running(&mut self) -> bool {
837        self.channel.is_running()
838    }
839
840    /// Stop the DMA transfer and await until the buffer is empty.
841    ///
842    /// This disables the DMA transfer's circular mode so that the transfer
843    /// stops when all available data has been written.
844    ///
845    /// This is designed to be used with streaming output data such as the
846    /// I2S/SAI or DAC.
847    pub async fn stop(&mut self) {
848        self.channel.disable_circular_mode();
849        //wait until cr.susp reads as true
850        poll_fn(|cx| {
851            self.set_waker(cx.waker());
852            self.channel.poll_stop()
853        })
854        .await
855    }
856}
857
858impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> {
859    fn drop(&mut self) {
860        self.request_stop();
861        while self.is_running() {}
862
863        // "Subsequent reads and writes cannot be moved ahead of preceding reads."
864        fence(Ordering::SeqCst);
865    }
866}