imxrt_dma/channel.rs
1//! DMA channels
2//!
3//! `channel` contains the DMA [`Channel`] type, along with helper functions for
4//! defining transfers.
5//!
6//! `Channel` methods that specify memory involved in transfers are marked `unsafe`. You must
7//! be very careful when calling these methods, particuarly when a channel is already
8//! enabled.
9
10use crate::{
11 element::Element,
12 ral::{self, dma, dmamux, tcd::BandwidthControl, Static},
13 Error,
14};
15
16impl<const CHANNELS: usize> super::Dma<CHANNELS> {
17 /// Creates the DMA channel described by `index`.
18 ///
19 /// # Safety
20 ///
21 /// This will create a handle that may alias global, mutable state. You should only create
22 /// one channel per index. If there are multiple channels for the same index, you're
23 /// responsible for ensuring synchronized access.
24 ///
25 /// # Panics
26 ///
27 /// Panics if `index` is greater than or equal to the maximum number of channels.
28 pub unsafe fn channel(&'static self, index: usize) -> Channel {
29 assert!(index < CHANNELS);
30 Channel {
31 index,
32 registers: self.controller,
33 multiplexer: self.multiplexer,
34 waker: &self.wakers[index],
35 }
36 }
37}
38
39/// A DMA channel
40///
41/// You should rely on your HAL to allocate `Channel`s. If your HAL does not allocate channels,
42/// or if you're desigining the HAL, use [`Dma`](crate::Dma) to create channels.
43///
44/// The `Channel` stores memory addresses independent of the memory lifetime. You must make
45/// sure that the channel's state is valid before enabling a transfer!
46pub struct Channel {
47 /// Our channel number, expected to be between [0, 32)
48 index: usize,
49 /// Reference to the DMA registers
50 registers: Static<dma::RegisterBlock>,
51 /// Reference to the DMA multiplexer
52 multiplexer: Static<dmamux::RegisterBlock>,
53 /// This channel's waker.
54 pub(crate) waker: &'static super::SharedWaker,
55}
56
57impl Channel {
58 /// Enable the DMA channel for transfers
59 ///
60 /// # Safety
61 ///
62 /// `enable()` allows the DMA controller to read and write from the memory
63 /// addresses stored in the channel. This is very unsafe:
64 ///
65 /// - you must ensure that the lifetime of all memory is greater than the
66 /// lifetime of the transfer
67 /// - you must ensure that no one else is using this channel for anything
68 /// else
69 /// - if the transfer uses a circular buffer, you must ensure that the circular
70 /// buffer is correctly sized and aligned.
71 pub unsafe fn enable(&self) {
72 // Immutable write OK. No other methods directly modify ERQ.
73 self.registers.SERQ.write(self.index as u8);
74 }
75
76 /// Returns the DMA channel number
77 ///
78 /// Channels are unique and numbered within the half-open range `[0, 32)`.
79 pub fn channel(&self) -> usize {
80 self.index
81 }
82
83 /// Set the channel's bandwidth control
84 ///
85 /// - `None` disables bandwidth control (default setting)
86 /// - `Some(bwc)` sets the bandwidth control to `bwc`
87 pub fn set_bandwidth_control(&mut self, bandwidth: Option<BandwidthControl>) {
88 let raw = BandwidthControl::raw(bandwidth);
89 let tcd = self.tcd();
90 ral::modify_reg!(crate::ral::tcd, tcd, CSR, BWC: raw);
91 }
92
93 /// Reset the transfer control descriptor owned by the DMA channel
94 ///
95 /// `reset` should be called during channel initialization to put the
96 /// channel into a known, good state.
97 pub fn reset(&mut self) {
98 self.tcd().reset();
99 }
100
101 /// Returns a handle to this channel's transfer control descriptor
102 fn tcd(&self) -> &crate::ral::tcd::RegisterBlock {
103 &self.registers.TCD[self.index]
104 }
105
106 /// Set the source address for a DMA transfer
107 ///
108 /// `saddr` should be a memory location that can provide the DMA controller
109 /// with data.
110 ///
111 /// # Safety
112 ///
113 /// If the DMA channel is already enabled, the DMA engine may start reading this
114 /// memory location. You must ensure that reads to `saddr` do not perform
115 /// inappropriate side effects. You must ensure `saddr` is valid for the
116 /// lifetime of the transfer.
117 pub unsafe fn set_source_address<E: Element>(&self, saddr: *const E) {
118 // Immutable write OK. 32-bit aligned store on SADDR.
119 let tcd = self.tcd();
120 ral::write_reg!(crate::ral::tcd, tcd, SADDR, saddr as u32);
121 }
122
123 /// Set the source offset *in bytes*
124 ///
125 /// `offset` could be negative, which would decrement the address.
126 ///
127 /// # Safety
128 ///
129 /// This method could allow a DMA engine to read beyond a buffer or
130 /// address. You must ensure that the source is valid for these offsets.
131 pub unsafe fn set_source_offset(&self, offset: i16) {
132 // Immutable write OK. 16-bit aligned store on SOFF.
133 let tcd = self.tcd();
134 ral::write_reg!(crate::ral::tcd, tcd, SOFF, offset);
135 }
136
137 /// Set the destination address for a DMA transfer
138 ///
139 /// `daddr` should be a memory location that can store data from the
140 /// DMA controller.
141 ///
142 /// # Safety
143 ///
144 /// If the DMA channel is already enabled, the DMA engine may start
145 /// writing to this address. You must ensure that writes to `daddr`
146 /// are safe, and that the memory is valid for the lifetime of the
147 /// transfer.
148 pub unsafe fn set_destination_address<E: Element>(&self, daddr: *const E) {
149 // Immutable write OK. 32-bit aligned store on DADDR.
150 let tcd = self.tcd();
151 ral::write_reg!(crate::ral::tcd, tcd, DADDR, daddr as u32);
152 }
153
154 /// Set the destination offset *in bytes*
155 ///
156 /// `offset` could be negative, which would decrement the address.
157 ///
158 /// # Safety
159 ///
160 /// This method could allow a DMA engine to write beyond the range of
161 /// a buffer. You must ensure that the destination is valid for these
162 /// offsets.
163 pub unsafe fn set_destination_offset(&self, offset: i16) {
164 // Immutable write OK. 16-bit aligned store on DOFF.
165 let tcd = self.tcd();
166 ral::write_reg!(crate::ral::tcd, tcd, DOFF, offset);
167 }
168
169 /// Set the transfer attributes for the source
170 ///
171 /// # Safety
172 ///
173 /// An incorrect `modulo` value may allow the DMA engine to loop back
174 /// to an incorrect address. You must ensure that `modulo` is valid
175 /// for your source.
176 pub unsafe fn set_source_attributes<E: Element>(&self, modulo: u8) {
177 let tcd = self.tcd();
178 ral::write_reg!(
179 crate::ral::tcd,
180 tcd,
181 SATTR,
182 MOD: modulo,
183 SIZE: E::DATA_TRANSFER_ID
184 );
185 }
186
187 /// Set the source last address adjustment *in bytes*
188 ///
189 /// # Safety
190 ///
191 /// This could allow the DMA engine to reference an invalid source buffer.
192 /// You must ensure that the adjustment performed by the DMA engine is
193 /// valid, assuming that another DMA transfer immediately runs after the
194 /// current transfer completes.
195 pub unsafe fn set_source_last_address_adjustment(&self, adjustment: i32) {
196 let tcd = self.tcd();
197 ral::write_reg!(crate::ral::tcd, tcd, SLAST, adjustment);
198 }
199
200 /// Set the destination last addrss adjustment *in bytes*
201 ///
202 /// # Safety
203 ///
204 /// This could allow the DMA engine to reference an invalid destination address.
205 /// You must ensure that the adjustment performed by the DMA engine is
206 /// valid, assuming that another DMA transfer immediately runs after the
207 /// current transfer completes.
208 pub unsafe fn set_destination_last_address_adjustment(&self, adjustment: i32) {
209 let tcd = self.tcd();
210 ral::write_reg!(crate::ral::tcd, tcd, DLAST_SGA, adjustment);
211 }
212
213 /// Set the transfer attributes for the destination
214 ///
215 /// # Safety
216 ///
217 /// An incorrect `modulo` value may allow the DMA engine to loop back
218 /// to an incorrect address. You must ensure that `modulo` is valid
219 /// for your destination.
220 pub unsafe fn set_destination_attributes<E: Element>(&self, modulo: u8) {
221 let tcd = self.tcd();
222 ral::write_reg!(
223 crate::ral::tcd,
224 tcd,
225 DATTR,
226 MOD: modulo,
227 SIZE: E::DATA_TRANSFER_ID
228 );
229 }
230
231 /// Set the number of *bytes* to transfer per minor loop
232 ///
233 /// Describes how many bytes we should transfer for each DMA service request.
234 /// Note that `nbytes` of `0` is interpreted as a 4GB transfer.
235 ///
236 /// # Safety
237 ///
238 /// This might allow the DMA engine to read beyond the source, or write beyond
239 /// the destination. Caller must ensure that the number of bytes per minor loop
240 /// is valid for the given transfer.
241 pub unsafe fn set_minor_loop_bytes(&self, nbytes: u32) {
242 // Immutable write OK. 32-bit store on NBYTES.
243 let tcd = self.tcd();
244 ral::write_reg!(crate::ral::tcd, tcd, NBYTES, nbytes);
245 }
246
247 /// Tells the DMA channel how many transfer iterations to perform
248 ///
249 /// A 'transfer iteration' is a read from a source, and a write to a destination, with
250 /// read and write sizes described by a minor loop. Each iteration requires a DMA
251 /// service request, either from hardware or from software. The maximum number of iterations
252 /// is 2^15.
253 ///
254 /// # Safety
255 ///
256 /// This may allow the DMA engine to read beyond the source, or write beyond
257 /// the destination. Caller must ensure that the number of iterations is valid
258 /// for the transfer.
259 pub unsafe fn set_transfer_iterations(&mut self, iterations: u16) {
260 let tcd = self.tcd();
261 // Note that this is clearing the ELINK bit. We don't have support
262 // for channel-to-channel linking right now. Clearing ELINK is intentional
263 // to use the whole 15 bits for iterations.
264 ral::modify_reg!(crate::ral::tcd, tcd, CITER, CITER: iterations);
265 ral::modify_reg!(crate::ral::tcd, tcd, BITER, BITER: iterations);
266 }
267
268 /// Returns the beginning transfer iterations setting for the channel.
269 ///
270 /// This reflects the last call to `set_transfer_iterations`.
271 pub fn beginning_transfer_iterations(&self) -> u16 {
272 let tcd = self.tcd();
273 ral::read_reg!(crate::ral::tcd, tcd, BITER, BITER)
274 }
275
276 /// Set the DMAMUX channel configuration
277 ///
278 /// See the [`Configuration`](crate::channel::Configuration) documentation
279 /// for more information.
280 ///
281 /// # Panics
282 ///
283 /// Only the first four DMA channels support periodic triggering from PIT timers. This method
284 /// panics if `triggering` is set for the [`Enable`](crate::channel::Configuration)
285 /// variant, but the channel does not support triggering.
286 pub fn set_channel_configuration(&mut self, configuration: Configuration) {
287 // Immutable write OK. 32-bit store on configuration register.
288 let chcfg = &self.multiplexer.chcfg[self.index];
289 match configuration {
290 Configuration::Off => chcfg.write(0),
291 Configuration::Enable { source, periodic } => {
292 let mut v = source | dmamux::RegisterBlock::ENBL;
293 if periodic {
294 assert!(
295 self.channel() < 4,
296 "Requested DMA periodic triggering on an unsupported channel."
297 );
298 v |= dmamux::RegisterBlock::TRIG;
299 }
300 chcfg.write(v);
301 }
302 Configuration::AlwaysOn => {
303 // See note in reference manual: when A_ON is high, SOURCE is ignored.
304 chcfg.write(dmamux::RegisterBlock::ENBL | dmamux::RegisterBlock::A_ON)
305 }
306 }
307 }
308
309 /// Returns `true` if the DMA channel is receiving a service signal from hardware
310 pub fn is_hardware_signaling(&self) -> bool {
311 self.registers.HRS.read() & (1 << self.index) != 0
312 }
313
314 /// Disable the DMA channel, preventing any DMA transfers
315 pub fn disable(&self) {
316 // Immutable write OK. No other methods directly modify ERQ.
317 self.registers.CERQ.write(self.index as u8);
318 }
319
320 /// Returns `true` if this DMA channel generated an interrupt
321 pub fn is_interrupt(&self) -> bool {
322 self.registers.INT.read() & (1 << self.index) != 0
323 }
324
325 /// Clear the interrupt flag from this DMA channel
326 pub fn clear_interrupt(&self) {
327 // Immutable write OK. No other methods modify INT.
328 self.registers.CINT.write(self.index as u8);
329 }
330
331 /// Enable or disable 'disable on completion'
332 ///
333 /// 'Disable on completion' lets the DMA channel automatically clear the request signal
334 /// when it completes a transfer.
335 pub fn set_disable_on_completion(&mut self, dreq: bool) {
336 let tcd = self.tcd();
337 ral::modify_reg!(crate::ral::tcd, tcd, CSR, DREQ: dreq as u16);
338 }
339
340 /// Enable or disable interrupt generation when the transfer completes
341 ///
342 /// You're responsible for registering your interrupt handler.
343 pub fn set_interrupt_on_completion(&mut self, intr: bool) {
344 let tcd = self.tcd();
345 ral::modify_reg!(crate::ral::tcd, tcd, CSR, INTMAJOR: intr as u16);
346 }
347
348 /// Indicates if the DMA transfer has completed
349 pub fn is_complete(&self) -> bool {
350 let tcd = self.tcd();
351 ral::read_reg!(crate::ral::tcd, tcd, CSR, DONE == 1)
352 }
353
354 /// Clears completion indication
355 pub fn clear_complete(&self) {
356 // Immutable write OK. CDNE affects a bit in TCD. But, other writes to
357 // TCD require &mut reference. Existence of &mut reference blocks
358 // clear_complete calls.
359 self.registers.CDNE.write(self.index as u8);
360 }
361
362 /// Indicates if the DMA channel is in an error state
363 pub fn is_error(&self) -> bool {
364 self.registers.ERR.read() & (1 << self.index) != 0
365 }
366
367 /// Clears the error flag
368 pub fn clear_error(&self) {
369 // Immutable write OK. CERR affects a bit in ERR, which is
370 // not written to elsewhere.
371 self.registers.CERR.write(self.index as u8);
372 }
373
374 /// Indicates if this DMA channel is actively transferring data
375 pub fn is_active(&self) -> bool {
376 let tcd = self.tcd();
377 ral::read_reg!(crate::ral::tcd, tcd, CSR, ACTIVE == 1)
378 }
379
380 /// Indicates if this DMA channel is enabled
381 pub fn is_enabled(&self) -> bool {
382 self.registers.ERQ.read() & (1 << self.index) != 0
383 }
384
385 /// Returns the value from the **global** error status register
386 ///
387 /// It may reflect the last channel that produced an error, and that
388 /// may not be related to this channel.
389 pub fn error_status(&self) -> Error {
390 Error::new(self.registers.ES.read())
391 }
392
393 /// Start a DMA transfer
394 ///
395 /// `start()` should be used to request service from the DMA controller. It's
396 /// necessary for in-memory DMA transfers. Do not use it for hardware-initiated
397 /// DMA transfers. DMA transfers that involve hardware will rely on the hardware
398 /// to request DMA service.
399 ///
400 /// Flag is automatically cleared by hardware after it's asserted.
401 pub fn start(&self) {
402 // Immutable write OK. SSRT affects a bit in TCD. But, other writes to
403 // TCD require &mut reference. Existence of &mut reference blocks
404 // start calls.
405 self.registers.SSRT.write(self.index as u8);
406 }
407}
408
409// It's OK to send a channel across an execution context.
410// They can't be cloned or copied, so there's no chance of
411// them being (mutably) shared.
412unsafe impl Send for Channel {}
413
414/// DMAMUX channel configuration
415#[derive(Debug, Clone, Copy, PartialEq, Eq)]
416#[non_exhaustive]
417pub enum Configuration {
418 /// The DMAMUX channel is disabled
419 Off,
420 /// The DMAMUX is enabled, permitting hardware triggering.
421 /// See [`enable`](Configuration::enable) to enable
422 /// the channel without periodic triggering.
423 Enable {
424 /// The DMA channel source (slot number)
425 ///
426 /// Specifies which DMA source is routed to the DMA channel.
427 source: u32,
428 /// Set the periodic triggering flag to schedule DMA transfers on PIT
429 /// timer scheduling.
430 ///
431 /// `periodic` only works for the first four DMA channels, since
432 /// it corresponds to the PIT timers.
433 periodic: bool,
434 },
435 /// The DMAMUX is always on, and there's no need for software
436 /// or hardware activation
437 ///
438 /// Use `AlwaysOn` for
439 /// - memory-to-memory transfers
440 /// - memory to external bus transfers
441 AlwaysOn,
442}
443
444impl Configuration {
445 /// Enable the channel without triggering
446 ///
447 /// Shorthand for `ChannelConfiguration::Enable { source, periodic: false }`.
448 /// Use `enable()` to avoid possible panics in
449 /// [`set_channel_configuration`](crate::channel::Channel::set_channel_configuration).
450 pub const fn enable(source: u32) -> Self {
451 Configuration::Enable {
452 source,
453 periodic: false,
454 }
455 }
456}
457
458/// Set a hardware peripheral as the source for a DMA transfer
459///
460/// `hardware_source` is expected to be a pointer to a peripheral register that
461/// can provide DMA data. This function configures the DMA channel always read from
462/// this register.
463///
464/// # Safety
465///
466/// Caller must ensure that `hardware_source` is valid for the lifetime of the transfer,
467/// and valid for all subsequent transfers performed by this DMA channel with this address.
468pub unsafe fn set_source_hardware<E: Element>(chan: &mut Channel, hardware_source: *const E) {
469 chan.set_source_address(hardware_source);
470 chan.set_source_offset(0);
471 chan.set_source_attributes::<E>(0);
472 chan.set_source_last_address_adjustment(0);
473}
474
475/// Set a hardware peripheral as the destination for a DMA transfer
476///
477/// `hardware_destination` is expected to point at a peripheral register that can
478/// receive DMA data. This function configures the DMA channel to always write to
479/// this register.
480///
481/// # Safety
482///
483/// Caller must ensure that `hardware_destination` is valid for the lifetime of the transfer,
484/// and valid for all subsequent transfers performed by this DMA channel with this address.
485pub unsafe fn set_destination_hardware<E: Element>(
486 chan: &mut Channel,
487 hardware_destination: *const E,
488) {
489 chan.set_destination_address(hardware_destination);
490 chan.set_destination_offset(0);
491 chan.set_destination_attributes::<E>(0);
492 chan.set_destination_last_address_adjustment(0);
493}
494
495/// Set a linear buffer as the source for a DMA transfer
496///
497/// When the transfer completes, the DMA channel will point at the
498/// start of the buffer.
499///
500/// # Safety
501///
502/// Caller must ensure that the source is valid for the lifetime of the transfer,
503/// and valid for all subsequent transfers performed by this DMA channel with this buffer.
504pub unsafe fn set_source_linear_buffer<E: Element>(chan: &mut Channel, source: &[E]) {
505 chan.set_source_address(source.as_ptr());
506 chan.set_source_offset(core::mem::size_of::<E>() as i16);
507 chan.set_source_attributes::<E>(0);
508 chan.set_source_last_address_adjustment(
509 ((source.len() * core::mem::size_of::<E>()) as i32).wrapping_neg(),
510 );
511}
512
513/// Set a linear buffer as the destination for a DMA transfer
514///
515/// When the transfer completes, the DMA channel will point at the
516/// start of the buffer.
517///
518/// # Safety
519///
520/// Caller must ensure that the destination is valid for the lifetime of the transfer,
521/// and valid for all subsequent transfers performed by this DMA channel with this buffer.
522pub unsafe fn set_destination_linear_buffer<E: Element>(chan: &mut Channel, destination: &mut [E]) {
523 chan.set_destination_address(destination.as_ptr());
524 chan.set_destination_offset(core::mem::size_of::<E>() as i16);
525 chan.set_destination_attributes::<E>(0);
526 chan.set_destination_last_address_adjustment(
527 ((destination.len() * core::mem::size_of::<E>()) as i32).wrapping_neg(),
528 );
529}
530
531/// Assert properties about the circular buffer
532fn circular_buffer_asserts<E>(buffer: &[E]) {
533 let len = buffer.len();
534 assert!(
535 len.is_power_of_two(),
536 "DMA circular buffer size is not power of two"
537 );
538 let start = buffer.as_ptr();
539 let size = len * core::mem::size_of::<E>();
540 assert!(
541 (start as usize) % size == 0,
542 "DMA circular buffer is not properly aligned"
543 );
544}
545
546/// Compute the circular buffer modulo value
547fn circular_buffer_modulo<E>(buffer: &[E]) -> u32 {
548 31 - (buffer.len() * core::mem::size_of::<E>()).leading_zeros()
549}
550
551/// Set a circular buffer as the source for a DMA transfer
552///
553/// When the transfer completes, the DMA channel remain at the
554/// next element in the circular buffer.
555///
556/// # Safety
557///
558/// Caller must ensure that the source is valid for the lifetime of the transfer,
559/// and for all subsequent transfers performed by this DMA channel with this buffer.
560///
561/// # Panics
562///
563/// Panics if
564///
565/// - the capacity is not a power of two
566/// - the alignment is not a multiple of the buffer's size in bytes
567pub unsafe fn set_source_circular_buffer<E: Element>(chan: &mut Channel, source: &[E]) {
568 circular_buffer_asserts(source);
569 let modulo = circular_buffer_modulo(source);
570
571 chan.set_source_address(source.as_ptr());
572 chan.set_source_offset(core::mem::size_of::<E>() as i16);
573 chan.set_source_attributes::<E>(modulo as u8);
574 chan.set_source_last_address_adjustment(0);
575}
576
577/// Set a circular buffer as the destination for a DMA transfer
578///
579/// When the transfer completes, the DMA channel remain at the
580/// next element in the circular buffer.
581///
582/// # Safety
583///
584/// Caller must ensure that the destination is valid for the lifetime of the transfer,
585/// and for all subsequent transfers performed by this DMA channel with this buffer.
586///
587/// # Panics
588///
589/// Panics if
590///
591/// - the capacity is not a power of two
592/// - the alignment is not a multiple of the buffer's size in bytes
593pub unsafe fn set_destination_circular_buffer<E: Element>(
594 chan: &mut Channel,
595 destination: &mut [E],
596) {
597 circular_buffer_asserts(destination);
598 let modulo = circular_buffer_modulo(destination);
599
600 chan.set_destination_address(destination.as_ptr());
601 chan.set_destination_offset(core::mem::size_of::<E>() as i16);
602 chan.set_destination_attributes::<E>(modulo as u8);
603 chan.set_destination_last_address_adjustment(0);
604}