stm32f1xx_hal/
dma.rs

1//! # Direct Memory Access
2#![allow(dead_code)]
3
4use crate::pac::{self, RCC};
5use core::{
6    convert::TryFrom,
7    marker::PhantomData,
8    mem, ptr,
9    sync::atomic::{self, compiler_fence, Ordering},
10};
11use embedded_dma::{ReadBuffer, WriteBuffer};
12
13#[derive(Debug)]
14#[non_exhaustive]
15pub enum Error {
16    Overrun,
17}
18
19#[derive(Clone, Copy, Debug, PartialEq, Eq)]
20pub enum Event {
21    HalfTransfer,
22    TransferComplete,
23}
24
25#[derive(Clone, Copy, Debug, PartialEq, Eq)]
26pub enum Half {
27    First,
28    Second,
29}
30
31pub struct CircBuffer<BUFFER, PAYLOAD>
32where
33    BUFFER: 'static,
34{
35    buffer: &'static mut [BUFFER; 2],
36    payload: PAYLOAD,
37    readable_half: Half,
38}
39
40impl<BUFFER, PAYLOAD> CircBuffer<BUFFER, PAYLOAD>
41where
42    &'static mut [BUFFER; 2]: WriteBuffer,
43    BUFFER: 'static,
44{
45    pub(crate) fn new(buf: &'static mut [BUFFER; 2], payload: PAYLOAD) -> Self {
46        CircBuffer {
47            buffer: buf,
48            payload,
49            readable_half: Half::Second,
50        }
51    }
52}
53
54pub trait DmaExt: crate::Ptr<RB = pac::dma1::RegisterBlock> {
55    type Channels;
56
57    fn split(self, rcc: &mut RCC) -> Self::Channels;
58}
59
60pub trait TransferPayload {
61    fn start(&mut self);
62    fn stop(&mut self);
63}
64
65pub struct Transfer<MODE, BUFFER, PAYLOAD>
66where
67    PAYLOAD: TransferPayload,
68{
69    _mode: PhantomData<MODE>,
70    buffer: BUFFER,
71    payload: PAYLOAD,
72}
73
74impl<BUFFER, PAYLOAD> Transfer<R, BUFFER, PAYLOAD>
75where
76    PAYLOAD: TransferPayload,
77{
78    pub(crate) fn r(buffer: BUFFER, payload: PAYLOAD) -> Self {
79        Transfer {
80            _mode: PhantomData,
81            buffer,
82            payload,
83        }
84    }
85}
86
87impl<BUFFER, PAYLOAD> Transfer<W, BUFFER, PAYLOAD>
88where
89    PAYLOAD: TransferPayload,
90{
91    pub(crate) fn w(buffer: BUFFER, payload: PAYLOAD) -> Self {
92        Transfer {
93            _mode: PhantomData,
94            buffer,
95            payload,
96        }
97    }
98}
99
100impl<MODE, BUFFER, PAYLOAD> Drop for Transfer<MODE, BUFFER, PAYLOAD>
101where
102    PAYLOAD: TransferPayload,
103{
104    fn drop(&mut self) {
105        self.payload.stop();
106        compiler_fence(Ordering::SeqCst);
107    }
108}
109
110/// Read transfer
111pub struct R;
112
113/// Write transfer
114pub struct W;
115
116/// A singleton that represents a single DMAx channel (channel X in this case)
117///
118/// This singleton has exclusive access to the registers of the DMAx channel X
119#[non_exhaustive]
120pub struct Ch<DMA, const C: u8>(PhantomData<DMA>);
121
122impl<DMA: DmaExt, const C: u8> Ch<DMA, C> {
123    /// Associated peripheral `address`
124    ///
125    /// `inc` indicates whether the address will be incremented after every byte transfer
126    pub fn set_peripheral_address(&mut self, address: u32, inc: bool) {
127        self.ch().par().write(|w| unsafe { w.pa().bits(address) });
128        self.ch().cr().modify(|_, w| w.pinc().bit(inc));
129    }
130
131    /// `address` where from/to data will be read/write
132    ///
133    /// `inc` indicates whether the address will be incremented after every byte transfer
134    pub fn set_memory_address(&mut self, address: u32, inc: bool) {
135        self.ch().mar().write(|w| unsafe { w.ma().bits(address) });
136        self.ch().cr().modify(|_, w| w.minc().bit(inc));
137    }
138
139    /// Number of bytes to transfer
140    pub fn set_transfer_length(&mut self, len: usize) {
141        self.ch()
142            .ndtr()
143            .write(|w| w.ndt().set(u16::try_from(len).unwrap()));
144    }
145
146    /// Starts the DMA transfer
147    pub fn start(&mut self) {
148        self.ch().cr().modify(|_, w| w.en().set_bit());
149    }
150
151    /// Stops the DMA transfer
152    pub fn stop(&mut self) {
153        self.ifcr().write(|w| w.cgif(C).set_bit());
154        self.ch().cr().modify(|_, w| w.en().clear_bit());
155    }
156
157    /// Returns `true` if there's a transfer in progress
158    pub fn in_progress(&self) -> bool {
159        self.isr().tcif(C).bit_is_clear()
160    }
161
162    pub fn listen(&mut self, event: Event) {
163        match event {
164            Event::HalfTransfer => self.ch().cr().modify(|_, w| w.htie().set_bit()),
165            Event::TransferComplete => self.ch().cr().modify(|_, w| w.tcie().set_bit()),
166        };
167    }
168
169    pub fn unlisten(&mut self, event: Event) {
170        match event {
171            Event::HalfTransfer => self.ch().cr().modify(|_, w| w.htie().clear_bit()),
172            Event::TransferComplete => self.ch().cr().modify(|_, w| w.tcie().clear_bit()),
173        };
174    }
175
176    pub fn ch(&mut self) -> &pac::dma1::CH {
177        unsafe { (*DMA::ptr()).ch(C as usize) }
178    }
179
180    pub fn isr(&self) -> pac::dma1::isr::R {
181        // NOTE(unsafe) atomic read with no side effects
182        unsafe { (*DMA::ptr()).isr().read() }
183    }
184
185    pub fn ifcr(&self) -> &pac::dma1::IFCR {
186        unsafe { (*DMA::ptr()).ifcr() }
187    }
188
189    pub fn get_ndtr(&self) -> u32 {
190        // NOTE(unsafe) atomic read with no side effects
191        unsafe { &(*DMA::ptr()) }
192            .ch(C as usize)
193            .ndtr()
194            .read()
195            .bits()
196    }
197}
198
199impl<B, PAYLOAD, DMA: DmaExt, const C: u8> CircBuffer<B, RxDma<PAYLOAD, Ch<DMA, C>>>
200where
201    RxDma<PAYLOAD, Ch<DMA, C>>: TransferPayload,
202{
203    /// Peeks into the readable half of the buffer
204    pub fn peek<R, F>(&mut self, f: F) -> Result<R, Error>
205    where
206        F: FnOnce(&B, Half) -> R,
207    {
208        let half_being_read = self.readable_half()?;
209
210        let buf = match half_being_read {
211            Half::First => &self.buffer[0],
212            Half::Second => &self.buffer[1],
213        };
214
215        // XXX does this need a compiler barrier?
216        let ret = f(buf, half_being_read);
217
218        let isr = self.payload.channel.isr();
219        let first_half_is_done = isr.htif(C).bit_is_set();
220        let second_half_is_done = isr.tcif(C).bit_is_set();
221
222        if (half_being_read == Half::First && second_half_is_done)
223            || (half_being_read == Half::Second && first_half_is_done)
224        {
225            Err(Error::Overrun)
226        } else {
227            Ok(ret)
228        }
229    }
230
231    /// Returns the `Half` of the buffer that can be read
232    pub fn readable_half(&mut self) -> Result<Half, Error> {
233        let isr = self.payload.channel.isr();
234        let first_half_is_done = isr.htif(C).bit_is_set();
235        let second_half_is_done = isr.tcif(C).bit_is_set();
236
237        if first_half_is_done && second_half_is_done {
238            return Err(Error::Overrun);
239        }
240
241        let last_read_half = self.readable_half;
242
243        Ok(match last_read_half {
244            Half::First => {
245                if second_half_is_done {
246                    self.payload.channel.ifcr().write(|w| w.ctcif(C).set_bit());
247
248                    self.readable_half = Half::Second;
249                    Half::Second
250                } else {
251                    last_read_half
252                }
253            }
254            Half::Second => {
255                if first_half_is_done {
256                    self.payload.channel.ifcr().write(|w| w.chtif(C).set_bit());
257
258                    self.readable_half = Half::First;
259                    Half::First
260                } else {
261                    last_read_half
262                }
263            }
264        })
265    }
266
267    /// Stops the transfer and returns the underlying buffer and RxDma
268    pub fn stop(mut self) -> (&'static mut [B; 2], RxDma<PAYLOAD, Ch<DMA, C>>) {
269        self.payload.stop();
270
271        (self.buffer, self.payload)
272    }
273}
274
275impl<BUFFER, PAYLOAD, MODE, DMA: DmaExt, const C: u8>
276    Transfer<MODE, BUFFER, RxDma<PAYLOAD, Ch<DMA, C>>>
277where
278    RxDma<PAYLOAD, Ch<DMA, C>>: TransferPayload,
279{
280    pub fn is_done(&self) -> bool {
281        !self.payload.channel.in_progress()
282    }
283
284    pub fn wait(mut self) -> (BUFFER, RxDma<PAYLOAD, Ch<DMA, C>>) {
285        while !self.is_done() {}
286
287        atomic::compiler_fence(Ordering::Acquire);
288
289        self.payload.stop();
290
291        // we need a read here to make the Acquire fence effective
292        // we do *not* need this if `dma.stop` does a RMW operation
293        unsafe {
294            ptr::read_volatile(&0);
295        }
296
297        // we need a fence here for the same reason we need one in `Transfer.wait`
298        atomic::compiler_fence(Ordering::Acquire);
299
300        // `Transfer` needs to have a `Drop` implementation, because we accept
301        // managed buffers that can free their memory on drop. Because of that
302        // we can't move out of the `Transfer`'s fields, so we use `ptr::read`
303        // and `mem::forget`.
304        //
305        // NOTE(unsafe) There is no panic branch between getting the resources
306        // and forgetting `self`.
307        unsafe {
308            let buffer = ptr::read(&self.buffer);
309            let payload = ptr::read(&self.payload);
310            mem::forget(self);
311            (buffer, payload)
312        }
313    }
314}
315
316impl<BUFFER, PAYLOAD, MODE, DMA: DmaExt, const C: u8>
317    Transfer<MODE, BUFFER, TxDma<PAYLOAD, Ch<DMA, C>>>
318where
319    TxDma<PAYLOAD, Ch<DMA, C>>: TransferPayload,
320{
321    pub fn is_done(&self) -> bool {
322        !self.payload.channel.in_progress()
323    }
324
325    pub fn wait(mut self) -> (BUFFER, TxDma<PAYLOAD, Ch<DMA, C>>) {
326        while !self.is_done() {}
327
328        atomic::compiler_fence(Ordering::Acquire);
329
330        self.payload.stop();
331
332        // we need a read here to make the Acquire fence effective
333        // we do *not* need this if `dma.stop` does a RMW operation
334        unsafe {
335            ptr::read_volatile(&0);
336        }
337
338        // we need a fence here for the same reason we need one in `Transfer.wait`
339        atomic::compiler_fence(Ordering::Acquire);
340
341        // `Transfer` needs to have a `Drop` implementation, because we accept
342        // managed buffers that can free their memory on drop. Because of that
343        // we can't move out of the `Transfer`'s fields, so we use `ptr::read`
344        // and `mem::forget`.
345        //
346        // NOTE(unsafe) There is no panic branch between getting the resources
347        // and forgetting `self`.
348        unsafe {
349            let buffer = ptr::read(&self.buffer);
350            let payload = ptr::read(&self.payload);
351            mem::forget(self);
352            (buffer, payload)
353        }
354    }
355}
356
357impl<BUFFER, PAYLOAD, MODE, DMA: DmaExt, const C: u8, TXC>
358    Transfer<MODE, BUFFER, RxTxDma<PAYLOAD, Ch<DMA, C>, TXC>>
359where
360    RxTxDma<PAYLOAD, Ch<DMA, C>, TXC>: TransferPayload,
361{
362    pub fn is_done(&self) -> bool {
363        !self.payload.rxchannel.in_progress()
364    }
365
366    pub fn wait(mut self) -> (BUFFER, RxTxDma<PAYLOAD, Ch<DMA, C>, TXC>) {
367        while !self.is_done() {}
368
369        atomic::compiler_fence(Ordering::Acquire);
370
371        self.payload.stop();
372
373        // we need a read here to make the Acquire fence effective
374        // we do *not* need this if `dma.stop` does a RMW operation
375        unsafe {
376            ptr::read_volatile(&0);
377        }
378
379        // we need a fence here for the same reason we need one in `Transfer.wait`
380        atomic::compiler_fence(Ordering::Acquire);
381
382        // `Transfer` needs to have a `Drop` implementation, because we accept
383        // managed buffers that can free their memory on drop. Because of that
384        // we can't move out of the `Transfer`'s fields, so we use `ptr::read`
385        // and `mem::forget`.
386        //
387        // NOTE(unsafe) There is no panic branch between getting the resources
388        // and forgetting `self`.
389        unsafe {
390            let buffer = ptr::read(&self.buffer);
391            let payload = ptr::read(&self.payload);
392            mem::forget(self);
393            (buffer, payload)
394        }
395    }
396}
397
398impl<BUFFER, PAYLOAD, DMA: DmaExt, const C: u8> Transfer<W, BUFFER, RxDma<PAYLOAD, Ch<DMA, C>>>
399where
400    RxDma<PAYLOAD, Ch<DMA, C>>: TransferPayload,
401{
402    pub fn peek<T>(&self) -> &[T]
403    where
404        BUFFER: AsRef<[T]>,
405    {
406        let pending = self.payload.channel.get_ndtr() as usize;
407
408        let slice = self.buffer.as_ref();
409        let capacity = slice.len();
410
411        &slice[..(capacity - pending)]
412    }
413}
414
415impl<RXBUFFER, TXBUFFER, PAYLOAD, DMA: DmaExt, const C: u8, TXC>
416    Transfer<W, (RXBUFFER, TXBUFFER), RxTxDma<PAYLOAD, Ch<DMA, C>, TXC>>
417where
418    RxTxDma<PAYLOAD, Ch<DMA, C>, TXC>: TransferPayload,
419{
420    pub fn peek<T>(&self) -> &[T]
421    where
422        RXBUFFER: AsRef<[T]>,
423    {
424        let pending = self.payload.rxchannel.get_ndtr() as usize;
425
426        let slice = self.buffer.0.as_ref();
427        let capacity = slice.len();
428
429        &slice[..(capacity - pending)]
430    }
431}
432
433macro_rules! dma {
434    ($DMAX:ident: ($dmaX:ident, {
435        $($CX:ident: ($ch: literal),)+
436    }),) => {
437        pub mod $dmaX {
438            use crate::dma::DmaExt;
439            use crate::pac::{$DMAX, RCC};
440            use crate::rcc::Enable;
441
442            #[non_exhaustive]
443            #[allow(clippy::manual_non_exhaustive)]
444            pub struct Channels((), $(pub $CX),+);
445
446            $(
447                pub type $CX = super::Ch<$DMAX, $ch>;
448            )+
449
450            impl DmaExt for $DMAX {
451                type Channels = Channels;
452
453                fn split(self, rcc: &mut RCC) -> Channels {
454                    $DMAX::enable(rcc);
455
456                    // reset the DMA control registers (stops all on-going transfers)
457                    $(
458                        self.ch($ch).cr().reset();
459                    )+
460
461                    Channels((), $(super::Ch::<$DMAX, $ch>(super::PhantomData)),+)
462                }
463            }
464        }
465    }
466}
467
468dma! {
469    DMA1: (dma1, {
470        C1: (0),
471        C2: (1),
472        C3: (2),
473        C4: (3),
474        C5: (4),
475        C6: (5),
476        C7: (6),
477    }),
478}
479
480dma! {
481    DMA2: (dma2, {
482        C1: (0),
483        C2: (1),
484        C3: (2),
485        C4: (3),
486        C5: (4),
487    }),
488}
489
490/// DMA Receiver
491pub struct RxDma<PAYLOAD, RXCH> {
492    pub(crate) payload: PAYLOAD,
493    pub channel: RXCH,
494}
495
496/// DMA Transmitter
497pub struct TxDma<PAYLOAD, TXCH> {
498    pub(crate) payload: PAYLOAD,
499    pub channel: TXCH,
500}
501
502/// DMA Receiver/Transmitter
503pub struct RxTxDma<PAYLOAD, RXCH, TXCH> {
504    pub(crate) payload: PAYLOAD,
505    pub rxchannel: RXCH,
506    pub txchannel: TXCH,
507}
508
509pub trait Receive {
510    type RxChannel;
511    type TransmittedWord;
512}
513
514pub trait Transmit {
515    type TxChannel;
516    type ReceivedWord;
517}
518
519/// Trait for circular DMA readings from peripheral to memory.
520pub trait CircReadDma<B, RS>: Receive
521where
522    &'static mut [B; 2]: WriteBuffer<Word = RS>,
523    B: 'static,
524    Self: core::marker::Sized,
525{
526    fn circ_read(self, buffer: &'static mut [B; 2]) -> CircBuffer<B, Self>;
527}
528
529/// Trait for DMA readings from peripheral to memory.
530pub trait ReadDma<B, RS>: Receive
531where
532    B: WriteBuffer<Word = RS>,
533    Self: core::marker::Sized + TransferPayload,
534{
535    fn read(self, buffer: B) -> Transfer<W, B, Self>;
536}
537
538/// Trait for DMA writing from memory to peripheral.
539pub trait WriteDma<B, TS>: Transmit
540where
541    B: ReadBuffer<Word = TS>,
542    Self: core::marker::Sized + TransferPayload,
543{
544    fn write(self, buffer: B) -> Transfer<R, B, Self>;
545}
546
547/// Trait for DMA simultaneously reading and writing within one synchronous operation. Panics if both buffers are not of equal length.
548pub trait ReadWriteDma<RXB, TXB, TS>: Transmit
549where
550    RXB: WriteBuffer<Word = TS>,
551    TXB: ReadBuffer<Word = TS>,
552    Self: core::marker::Sized + TransferPayload,
553{
554    fn read_write(self, rx_buffer: RXB, tx_buffer: TXB) -> Transfer<W, (RXB, TXB), Self>;
555}