va416xx_hal/
dma.rs

1//! API for the DMA peripheral
2//!
3//! ## Examples
4//!
5//! - [Simple DMA example](https://egit.irs.uni-stuttgart.de/rust/va416xx-rs/src/branch/main/examples/simple/examples/dma.rs)
6use arbitrary_int::{u10, u3};
7use vorago_shared_hal::{enable_peripheral_clock, reset_peripheral_for_cycles, PeripheralSelect};
8
9use crate::{enable_nvic_interrupt, pac};
10
11const MAX_DMA_TRANSFERS_PER_CYCLE: usize = 1024;
12const BASE_PTR_ADDR_MASK: u32 = 0b1111111;
13
14/// DMA cycle control values.
15///
16/// Refer to chapter 6.3.1 and 6.6.3 of the datasheet for more details.
17#[bitbybit::bitenum(u3, exhaustive = true)]
18#[derive(Debug)]
19#[cfg_attr(feature = "defmt", derive(defmt::Format))]
20#[repr(u8)]
21pub enum CycleControl {
22    /// Indicates that the data structure is invalid.
23    Stop = 0b000,
24    /// The controller must receive a new request prior to entering the arbitration
25    /// process, to enable the DMA cycle to complete. This means that the DMA will only
26    /// continue to do transfers as long as a trigger signal is still active. Therefore,
27    /// this should not be used for momentary triggers like a timer.
28    Basic = 0b001,
29    /// The controller automatically inserts a request for the appropriate channel during the
30    /// arbitration process. This means that the initial request is sufficient to enable the
31    /// DMA cycle to complete.
32    Auto = 0b010,
33    /// This is used to support continuous data flow. Both primary and alternate data structure
34    /// are used. The primary data structure is used first. When the first transfer is complete, an
35    /// interrupt can be generated, and the DMA switches to the alternate data structure. When the
36    /// second transfer is complete, the primary data structure is used. This pattern continues
37    /// until software disables the channel.
38    PingPong = 0b011,
39    MemScatterGatherPrimary = 0b100,
40    MemScatterGatherAlternate = 0b101,
41    PeriphScatterGatherPrimary = 0b110,
42    PeriphScatterGatherAlternate = 0b111,
43}
44
45#[bitbybit::bitenum(u2, exhaustive = true)]
46#[derive(Debug)]
47#[cfg_attr(feature = "defmt", derive(defmt::Format))]
48pub enum AddrIncrement {
49    Byte = 0b00,
50    Halfword = 0b01,
51    Word = 0b10,
52    None = 0b11,
53}
54
55#[bitbybit::bitenum(u2, exhaustive = false)]
56#[derive(Debug)]
57#[cfg_attr(feature = "defmt", derive(defmt::Format))]
58pub enum DataSize {
59    Byte = 0b00,
60    Halfword = 0b01,
61    Word = 0b10,
62}
63
64/// This configuration controls how many DMA transfers can occur before the controller arbitrates.
65#[bitbybit::bitenum(u4, exhaustive = true)]
66#[derive(Debug)]
67#[cfg_attr(feature = "defmt", derive(defmt::Format))]
68pub enum RPower {
69    EachTransfer = 0b0000,
70    Every2 = 0b0001,
71    Every4 = 0b0010,
72    Every8 = 0b0011,
73    Every16 = 0b0100,
74    Every32 = 0b0101,
75    Every64 = 0b0110,
76    Every128 = 0b0111,
77    Every256 = 0b1000,
78    Every512 = 0b1001,
79    Every1024 = 0b1010,
80    Every1024Alt0 = 0b1011,
81    Every1024Alt1 = 0b1100,
82    Every1024Alt2 = 0b1101,
83    Every1024Alt3 = 0b1110,
84    Every1024Alt4 = 0b1111,
85}
86
87#[derive(Debug, PartialEq, Eq, thiserror::Error)]
88#[error("Invalid DMA control block address")]
89#[cfg_attr(feature = "defmt", derive(defmt::Format))]
90pub struct InvalidCtrlBlockAddrError;
91
92#[bitbybit::bitfield(u32, default = 0x0, debug, defmt_fields(feature = "defmt"))]
93pub struct ChannelConfig {
94    #[bits(30..=31, rw)]
95    dst_inc: AddrIncrement,
96    #[bits(28..=29, rw)]
97    dst_size: Option<DataSize>,
98    #[bits(26..=27, rw)]
99    src_inc: AddrIncrement,
100    #[bits(24..=25, rw)]
101    src_size: Option<DataSize>,
102    #[bits(21..=23, rw)]
103    dest_prot_ctrl: u3,
104    #[bits(18..=20, rw)]
105    src_prot_ctrl: u3,
106    #[bits(14..=17, rw)]
107    r_power: RPower,
108    #[bits(4..=13, rw)]
109    n_minus_1: u10,
110    #[bit(3, rw)]
111    next_useburst: bool,
112    #[bits(0..=2, rw)]
113    cycle_ctrl: CycleControl,
114}
115
116#[repr(C)]
117#[derive(Debug, Copy, Clone)]
118#[cfg_attr(feature = "defmt", derive(defmt::Format))]
119pub struct DmaChannelControl {
120    pub src_end_ptr: u32,
121    pub dest_end_ptr: u32,
122    pub cfg: ChannelConfig,
123    padding: u32,
124}
125
126impl DmaChannelControl {
127    const fn new() -> Self {
128        Self {
129            src_end_ptr: 0,
130            dest_end_ptr: 0,
131            cfg: ChannelConfig::new_with_raw_value(0),
132            padding: 0,
133        }
134    }
135}
136impl Default for DmaChannelControl {
137    fn default() -> Self {
138        Self::new()
139    }
140}
141#[repr(C)]
142#[repr(align(128))]
143pub struct DmaCtrlBlock {
144    pub pri: [DmaChannelControl; 4],
145    pub alt: [DmaChannelControl; 4],
146}
147
148impl DmaCtrlBlock {
149    pub const fn new() -> Self {
150        Self {
151            pri: [DmaChannelControl::new(); 4],
152            alt: [DmaChannelControl::new(); 4],
153        }
154    }
155}
156impl Default for DmaCtrlBlock {
157    fn default() -> Self {
158        Self::new()
159    }
160}
161
162impl DmaCtrlBlock {
163    /// This function creates a DMA control block at the specified memory address.
164    ///
165    /// The passed address must be 128-byte aligned. The user must also take care of specifying
166    /// a valid memory address for the DMA control block which is accessible by the system as well.
167    /// For example, the control block can be placed in the SRAM1.
168    pub fn new_at_addr(addr: u32) -> Result<*mut DmaCtrlBlock, InvalidCtrlBlockAddrError> {
169        if addr & BASE_PTR_ADDR_MASK > 0 {
170            return Err(InvalidCtrlBlockAddrError);
171        }
172        let ctrl_block_ptr = addr as *mut DmaCtrlBlock;
173        unsafe { core::ptr::write(ctrl_block_ptr, DmaCtrlBlock::default()) }
174        Ok(ctrl_block_ptr)
175    }
176}
177
178pub struct Dma {
179    dma: pac::Dma,
180    ctrl_block: *mut DmaCtrlBlock,
181}
182
183#[derive(Debug, Clone, Copy, thiserror::Error)]
184#[cfg_attr(feature = "defmt", derive(defmt::Format))]
185pub enum DmaTransferInitError {
186    #[error("source and destination buffer length mismatch: {src_len} != {dest_len}")]
187    SourceDestLenMissmatch { src_len: usize, dest_len: usize },
188    /// Overflow when calculating the source or destination end address.
189    #[error("address overflow")]
190    AddrOverflow,
191    /// Transfer size larger than 1024 units.
192    #[error("transfer size too large: {0}, 1024 is the allowed maximum")]
193    TransferSizeTooLarge(usize),
194}
195
196#[derive(Debug, Clone, Copy, Default)]
197#[cfg_attr(feature = "defmt", derive(defmt::Format))]
198pub struct DmaConfig {
199    pub bufferable: bool,
200    pub cacheable: bool,
201    pub privileged: bool,
202}
203
204pub struct DmaChannel {
205    channel: u8,
206    done_interrupt: pac::Interrupt,
207    active_interrupt: pac::Interrupt,
208    pub dma: pac::Dma,
209    pub ch_ctrl_pri: &'static mut DmaChannelControl,
210    pub ch_ctrl_alt: &'static mut DmaChannelControl,
211}
212
213impl DmaChannel {
214    #[inline(always)]
215    pub fn channel(&self) -> u8 {
216        self.channel
217    }
218
219    #[inline(always)]
220    pub fn enable(&mut self) {
221        self.dma
222            .chnl_enable_set()
223            .write(|w| unsafe { w.bits(1 << self.channel) });
224    }
225
226    #[inline(always)]
227    pub fn is_enabled(&mut self) -> bool {
228        ((self.dma.chnl_enable_set().read().bits() >> self.channel) & 0b1) != 0
229    }
230
231    #[inline(always)]
232    pub fn disable(&mut self) {
233        self.dma
234            .chnl_enable_clr()
235            .write(|w| unsafe { w.bits(1 << self.channel) });
236    }
237
238    #[inline(always)]
239    pub fn trigger_with_sw_request(&mut self) {
240        self.dma
241            .chnl_sw_request()
242            .write(|w| unsafe { w.bits(1 << self.channel) });
243    }
244
245    #[inline(always)]
246    pub fn state_raw(&self) -> u8 {
247        self.dma.status().read().state().bits()
248    }
249
250    #[inline(always)]
251    pub fn select_primary_structure(&self) {
252        self.dma
253            .chnl_pri_alt_clr()
254            .write(|w| unsafe { w.bits(1 << self.channel) });
255    }
256
257    #[inline(always)]
258    pub fn select_alternate_structure(&self) {
259        self.dma
260            .chnl_pri_alt_set()
261            .write(|w| unsafe { w.bits(1 << self.channel) });
262    }
263
264    /// Enables the DMA_DONE interrupt for the DMA channel.
265    ///
266    /// # Safety
267    ///
268    /// This function is `unsafe` because it can break mask-based critical sections.
269    pub unsafe fn enable_done_interrupt(&mut self) {
270        enable_nvic_interrupt(self.done_interrupt);
271    }
272
273    /// Enables the DMA_ACTIVE interrupt for the DMA channel.
274    ///
275    /// # Safety
276    ///
277    /// This function is `unsafe` because it can break mask-based critical sections.
278    pub unsafe fn enable_active_interrupt(&mut self) {
279        enable_nvic_interrupt(self.active_interrupt);
280    }
281
282    /// Prepares a 8-bit DMA transfer from memory to memory.
283    ///
284    /// This function does not enable the DMA channel and interrupts and only prepares
285    /// the DMA control block parameters for the transfer. It configures the primary channel control
286    /// structure to perform the transfer.
287    ///
288    /// You can use [Self::enable], [Self::enable_done_interrupt], [Self::enable_active_interrupt]
289    /// to finish the transfer preparation and then use [Self::trigger_with_sw_request] to
290    /// start the DMA transfer.
291    ///
292    /// # Safety
293    ///
294    /// You must ensure that the destination buffer is safe for DMA writes and the source buffer
295    /// is safe for DMA reads. The specific requirements can be read here:
296    ///
297    ///  - [DMA source buffer](https://docs.rs/embedded-dma/latest/embedded_dma/trait.ReadBuffer.html)
298    ///  - [DMA destination buffer](https://docs.rs/embedded-dma/latest/embedded_dma/trait.WriteBuffer.html)
299    ///
300    /// More specifically, you must ensure that the passed slice remains valid while the DMA is
301    /// active or until the DMA is stopped.
302    pub unsafe fn prepare_mem_to_mem_transfer_8_bit(
303        &mut self,
304        source: &[u8],
305        dest: &mut [u8],
306    ) -> Result<(), DmaTransferInitError> {
307        let len = Self::common_mem_transfer_checks(source.len(), dest.len())?;
308        self.generic_mem_to_mem_transfer_init(
309            len,
310            (source.as_ptr() as u32)
311                .checked_add(len as u32)
312                .ok_or(DmaTransferInitError::AddrOverflow)?,
313            (dest.as_ptr() as u32)
314                .checked_add(len as u32)
315                .ok_or(DmaTransferInitError::AddrOverflow)?,
316            DataSize::Byte,
317            AddrIncrement::Byte,
318        );
319        Ok(())
320    }
321
322    /// Prepares a 16-bit DMA transfer from memory to memory.
323    ///
324    /// This function does not enable the DMA channel and interrupts and only prepares
325    /// the DMA control block parameters for the transfer. It configures the primary channel control
326    /// structure to perform the transfer.
327    ///
328    /// You can use [Self::enable], [Self::enable_done_interrupt], [Self::enable_active_interrupt]
329    /// to finish the transfer preparation and then use [Self::trigger_with_sw_request] to
330    /// start the DMA transfer.
331    ///
332    /// # Safety
333    ///
334    /// You must ensure that the destination buffer is safe for DMA writes and the source buffer
335    /// is safe for DMA reads. The specific requirements can be read here:
336    ///
337    ///  - [DMA source buffer](https://docs.rs/embedded-dma/latest/embedded_dma/trait.ReadBuffer.html)
338    ///  - [DMA destination buffer](https://docs.rs/embedded-dma/latest/embedded_dma/trait.WriteBuffer.html)
339    ///
340    /// More specifically, you must ensure that the passed slice remains valid while the DMA is
341    /// active or until the DMA is stopped.
342    pub unsafe fn prepare_mem_to_mem_transfer_16_bit(
343        &mut self,
344        source: &[u16],
345        dest: &mut [u16],
346    ) -> Result<(), DmaTransferInitError> {
347        let len = Self::common_mem_transfer_checks(source.len(), dest.len())?;
348        self.generic_mem_to_mem_transfer_init(
349            len,
350            (source.as_ptr() as u32)
351                .checked_add(len as u32 * core::mem::size_of::<u16>() as u32)
352                .ok_or(DmaTransferInitError::AddrOverflow)?,
353            (dest.as_ptr() as u32)
354                .checked_add(len as u32 * core::mem::size_of::<u16>() as u32)
355                .ok_or(DmaTransferInitError::AddrOverflow)?,
356            DataSize::Halfword,
357            AddrIncrement::Halfword,
358        );
359        Ok(())
360    }
361
362    /// Prepares a 32-bit DMA transfer from memory to memory.
363    ///
364    /// This function does not enable the DMA channel and interrupts and only prepares
365    /// the DMA control block parameters for the transfer. It configures the primary channel control
366    /// structure to perform the transfer.
367    ///
368    /// You can use [Self::enable], [Self::enable_done_interrupt], [Self::enable_active_interrupt]
369    /// to finish the transfer preparation and then use [Self::trigger_with_sw_request] to
370    /// start the DMA transfer.
371    ///
372    /// # Safety
373    ///
374    /// You must ensure that the destination buffer is safe for DMA writes and the source buffer
375    /// is safe for DMA reads. The specific requirements can be read here:
376    ///
377    ///  - [DMA source buffer](https://docs.rs/embedded-dma/latest/embedded_dma/trait.ReadBuffer.html)
378    ///  - [DMA destination buffer](https://docs.rs/embedded-dma/latest/embedded_dma/trait.WriteBuffer.html)
379    ///
380    /// More specifically, you must ensure that the passed slice remains valid while the DMA is
381    /// active or until the DMA is stopped.
382    pub unsafe fn prepare_mem_to_mem_transfer_32_bit(
383        &mut self,
384        source: &[u32],
385        dest: &mut [u32],
386    ) -> Result<(), DmaTransferInitError> {
387        let len = Self::common_mem_transfer_checks(source.len(), dest.len())?;
388        self.generic_mem_to_mem_transfer_init(
389            len,
390            (source.as_ptr() as u32)
391                .checked_add(len as u32 * core::mem::size_of::<u32>() as u32)
392                .ok_or(DmaTransferInitError::AddrOverflow)?,
393            (dest.as_ptr() as u32)
394                .checked_add(len as u32 * core::mem::size_of::<u32>() as u32)
395                .ok_or(DmaTransferInitError::AddrOverflow)?,
396            DataSize::Word,
397            AddrIncrement::Word,
398        );
399        Ok(())
400    }
401
402    /// Prepares a 8-bit DMA transfer from memory to a peripheral.
403    ///
404    /// It is assumed that a peripheral with a 16-byte FIFO is used here and that the
405    /// transfer is activated by an IRQ trigger when the half-full interrupt of the peripheral
406    /// is fired. Therefore, this function configured the DMA in [CycleControl::Basic] mode with
407    /// rearbitration happening every 8 DMA cycles. It also configures the primary channel control
408    /// structure to perform the transfer.
409    ///
410    /// # Safety
411    ///
412    /// You must ensure that the source buffer is safe for DMA reads. The specific requirements
413    /// can be read here:
414    ///
415    ///  - [DMA source buffer](https://docs.rs/embedded-dma/latest/embedded_dma/trait.ReadBuffer.html)
416    ///
417    /// More specifically, you must ensure that the passed slice remains valid while the DMA is
418    /// active or until the DMA is stopped.
419    ///
420    /// The destination address must be the pointer address of a peripheral FIFO register address.
421    /// You must also ensure that the regular synchronous transfer API of the peripheral is NOT
422    /// used to perform transfers.
423    pub unsafe fn prepare_mem_to_periph_transfer_8_bit(
424        &mut self,
425        source: &[u8],
426        dest: *mut u32,
427    ) -> Result<(), DmaTransferInitError> {
428        if source.len() > MAX_DMA_TRANSFERS_PER_CYCLE {
429            return Err(DmaTransferInitError::TransferSizeTooLarge(source.len()));
430        }
431        let len = source.len() - 1;
432        self.ch_ctrl_pri.cfg = ChannelConfig::new_with_raw_value(0);
433        self.ch_ctrl_pri.src_end_ptr = (source.as_ptr() as u32)
434            .checked_add(len as u32)
435            .ok_or(DmaTransferInitError::AddrOverflow)?;
436        self.ch_ctrl_pri.dest_end_ptr = dest as u32;
437        self.ch_ctrl_pri.cfg.set_cycle_ctrl(CycleControl::Basic);
438        self.ch_ctrl_pri.cfg.set_src_size(DataSize::Byte);
439        self.ch_ctrl_pri.cfg.set_src_inc(AddrIncrement::Byte);
440        self.ch_ctrl_pri.cfg.set_dst_size(DataSize::Byte);
441        self.ch_ctrl_pri.cfg.set_dst_inc(AddrIncrement::None);
442        self.ch_ctrl_pri.cfg.set_n_minus_1(u10::new(len as u16));
443        self.ch_ctrl_pri.cfg.set_r_power(RPower::Every8);
444        self.select_primary_structure();
445        Ok(())
446    }
447
448    // This function performs common checks and returns the source length minus one which is
449    // relevant for further configuration of the DMA. This is because the DMA API expects N minus
450    // 1 and the source and end pointer need to point to the last transfer address.
451    fn common_mem_transfer_checks(
452        src_len: usize,
453        dest_len: usize,
454    ) -> Result<usize, DmaTransferInitError> {
455        if src_len != dest_len {
456            return Err(DmaTransferInitError::SourceDestLenMissmatch { src_len, dest_len });
457        }
458        if src_len > MAX_DMA_TRANSFERS_PER_CYCLE {
459            return Err(DmaTransferInitError::TransferSizeTooLarge(src_len));
460        }
461        Ok(src_len - 1)
462    }
463
464    fn generic_mem_to_mem_transfer_init(
465        &mut self,
466        n_minus_one: usize,
467        src_end_ptr: u32,
468        dest_end_ptr: u32,
469        data_size: DataSize,
470        addr_incr: AddrIncrement,
471    ) {
472        self.ch_ctrl_pri.cfg = ChannelConfig::new_with_raw_value(0);
473        self.ch_ctrl_pri.src_end_ptr = src_end_ptr;
474        self.ch_ctrl_pri.dest_end_ptr = dest_end_ptr;
475        self.ch_ctrl_pri.cfg.set_cycle_ctrl(CycleControl::Auto);
476        self.ch_ctrl_pri.cfg.set_src_size(data_size);
477        self.ch_ctrl_pri.cfg.set_src_inc(addr_incr);
478        self.ch_ctrl_pri.cfg.set_dst_size(data_size);
479        self.ch_ctrl_pri.cfg.set_dst_inc(addr_incr);
480        self.ch_ctrl_pri
481            .cfg
482            .set_n_minus_1(u10::new(n_minus_one as u16));
483        self.ch_ctrl_pri.cfg.set_r_power(RPower::Every4);
484        self.select_primary_structure();
485    }
486}
487
488impl Dma {
489    /// Create a new DMA instance.
490    ///
491    /// You can also place the [DmaCtrlBlock] statically using a global static mutable
492    /// instance and the [DmaCtrlBlock::new] const constructor This also allows to place the control
493    /// block in a memory section using the [link_section](https://doc.rust-lang.org/reference/abi.html#the-link_section-attribute)
494    /// attribute and then creating a mutable pointer to it using [core::ptr::addr_of_mut].
495    ///
496    /// Alternatively, the [DmaCtrlBlock::new_at_addr] function can be used to create the DMA
497    /// control block at a specific address.
498    pub fn new(
499        dma: pac::Dma,
500        cfg: DmaConfig,
501        ctrl_block: *mut DmaCtrlBlock,
502    ) -> Result<Self, InvalidCtrlBlockAddrError> {
503        // The conversion to u32 is safe here because we are on a 32-bit system.
504        let raw_addr = ctrl_block as u32;
505        if raw_addr & BASE_PTR_ADDR_MASK > 0 {
506            return Err(InvalidCtrlBlockAddrError);
507        }
508        enable_peripheral_clock(PeripheralSelect::Dma);
509        reset_peripheral_for_cycles(PeripheralSelect::Dma, 2);
510        let dma = Dma { dma, ctrl_block };
511        dma.dma
512            .ctrl_base_ptr()
513            .write(|w| unsafe { w.bits(raw_addr) });
514        dma.set_protection_bits(&cfg);
515        dma.enable();
516        Ok(dma)
517    }
518
519    #[inline(always)]
520    pub fn enable(&self) {
521        self.dma.cfg().write(|w| w.master_enable().set_bit());
522    }
523
524    #[inline(always)]
525    pub fn disable(&self) {
526        self.dma.cfg().write(|w| w.master_enable().clear_bit());
527    }
528
529    #[inline(always)]
530    pub fn set_protection_bits(&self, cfg: &DmaConfig) {
531        self.dma.cfg().write(|w| unsafe {
532            w.chnl_prot_ctrl().bits(
533                cfg.privileged as u8 | ((cfg.bufferable as u8) << 1) | ((cfg.cacheable as u8) << 2),
534            )
535        });
536    }
537
538    /// Split the DMA instance into four DMA channels which can be used individually. This allows
539    /// using the inidividual DMA channels in separate tasks.
540    pub fn split(self) -> (DmaChannel, DmaChannel, DmaChannel, DmaChannel) {
541        // Safety: The DMA channel API only operates on its respective channels.
542        (
543            DmaChannel {
544                channel: 0,
545                done_interrupt: pac::Interrupt::DMA_DONE0,
546                active_interrupt: pac::Interrupt::DMA_ACTIVE0,
547                dma: unsafe { pac::Dma::steal() },
548                ch_ctrl_pri: unsafe { &mut (*self.ctrl_block).pri[0] },
549                ch_ctrl_alt: unsafe { &mut (*self.ctrl_block).alt[0] },
550            },
551            DmaChannel {
552                channel: 1,
553                done_interrupt: pac::Interrupt::DMA_DONE1,
554                active_interrupt: pac::Interrupt::DMA_ACTIVE1,
555                dma: unsafe { pac::Dma::steal() },
556                ch_ctrl_pri: unsafe { &mut (*self.ctrl_block).pri[1] },
557                ch_ctrl_alt: unsafe { &mut (*self.ctrl_block).alt[1] },
558            },
559            DmaChannel {
560                channel: 2,
561                done_interrupt: pac::Interrupt::DMA_DONE2,
562                active_interrupt: pac::Interrupt::DMA_ACTIVE2,
563                dma: unsafe { pac::Dma::steal() },
564                ch_ctrl_pri: unsafe { &mut (*self.ctrl_block).pri[2] },
565                ch_ctrl_alt: unsafe { &mut (*self.ctrl_block).alt[2] },
566            },
567            DmaChannel {
568                channel: 3,
569                done_interrupt: pac::Interrupt::DMA_DONE3,
570                active_interrupt: pac::Interrupt::DMA_ACTIVE3,
571                dma: unsafe { pac::Dma::steal() },
572                ch_ctrl_pri: unsafe { &mut (*self.ctrl_block).pri[3] },
573                ch_ctrl_alt: unsafe { &mut (*self.ctrl_block).alt[3] },
574            },
575        )
576    }
577}