stm32l4xx_hal/dma.rs
1//! Direct Memory Access Engine
2
3#![allow(dead_code)]
4
5use core::fmt;
6use core::marker::PhantomData;
7use core::mem::MaybeUninit;
8use core::ops::DerefMut;
9use core::ptr;
10use core::slice;
11use core::sync::atomic::{compiler_fence, Ordering};
12use embedded_dma::{StaticReadBuffer, StaticWriteBuffer};
13
14use crate::rcc::AHB1;
15use stable_deref_trait::StableDeref;
16
17#[non_exhaustive]
18#[derive(Debug)]
19pub enum Error {
20 Overrun,
21 BufferError,
22}
23
24pub enum Event {
25 HalfTransfer,
26 TransferComplete,
27}
28
29pub trait CharacterMatch {
30 /// Checks to see if the peripheral has detected a character match and
31 /// clears the flag
32 fn check_character_match(&mut self, clear: bool) -> bool;
33}
34
35pub trait ReceiverTimeout {
36 /// Check to see if the peripheral has detected a
37 /// receiver timeout and clears the flag
38 fn check_receiver_timeout(&mut self, clear: bool) -> bool;
39}
40
41pub trait OperationError<O, E> {
42 /// Check to see if the peripheral has detected some
43 /// sort of error while performing an operation
44 fn check_operation_error(&mut self) -> Result<O, E>;
45}
46
47/// Frame reader "worker", access and handling of frame reads is made through this structure.
48pub struct FrameReader<BUFFER, PAYLOAD, const N: usize>
49where
50 BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
51{
52 buffer: BUFFER,
53 payload: PAYLOAD,
54 matching_character: u8,
55}
56
57impl<BUFFER, PAYLOAD, const N: usize> FrameReader<BUFFER, PAYLOAD, N>
58where
59 BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
60{
61 pub(crate) fn new(
62 buffer: BUFFER,
63 payload: PAYLOAD,
64 matching_character: u8,
65 ) -> FrameReader<BUFFER, PAYLOAD, N> {
66 Self {
67 buffer,
68 payload,
69 matching_character,
70 }
71 }
72}
73
74impl<BUFFER, PAYLOAD, CHANNEL, const N: usize> FrameReader<BUFFER, RxDma<PAYLOAD, CHANNEL>, N>
75where
76 BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
77 PAYLOAD: CharacterMatch,
78{
79 /// Checks to see if the peripheral has detected a character match and
80 /// clears the flag
81 pub fn check_character_match(&mut self, clear: bool) -> bool {
82 self.payload.payload.check_character_match(clear)
83 }
84}
85
86impl<BUFFER, PAYLOAD, CHANNEL, const N: usize> FrameReader<BUFFER, RxDma<PAYLOAD, CHANNEL>, N>
87where
88 BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
89 PAYLOAD: ReceiverTimeout,
90{
91 pub fn check_receiver_timeout(&mut self, clear: bool) -> bool {
92 self.payload.payload.check_receiver_timeout(clear)
93 }
94}
95
96impl<BUFFER, PAYLOAD, CHANNEL, const N: usize> FrameReader<BUFFER, RxDma<PAYLOAD, CHANNEL>, N>
97where
98 BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
99{
100 pub fn check_operation_error<O, E>(&mut self) -> Result<O, E>
101 where
102 PAYLOAD: OperationError<O, E>,
103 {
104 self.payload.payload.check_operation_error()
105 }
106}
107
108/// Frame sender "worker", access and handling of frame transmissions is made through this
109/// structure.
110pub struct FrameSender<BUFFER, PAYLOAD, const N: usize>
111where
112 BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
113{
114 buffer: Option<BUFFER>,
115 payload: PAYLOAD,
116}
117
118impl<BUFFER, PAYLOAD, const N: usize> FrameSender<BUFFER, PAYLOAD, N>
119where
120 BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
121{
122 pub(crate) fn new(payload: PAYLOAD) -> FrameSender<BUFFER, PAYLOAD, N> {
123 Self {
124 buffer: None,
125 payload,
126 }
127 }
128}
129
130/// Data type for holding data frames for the Serial.
131///
132/// Internally used uninitialized storage, making this storage zero cost to create. It can also be
133/// used with, for example, [`heapless::pool`] to create a pool of serial frames.
134///
135/// [`heapless::pool`]: https://docs.rs/heapless/0.5.3/heapless/pool/index.html
136pub struct DMAFrame<const N: usize> {
137 len: u16,
138 buf: [MaybeUninit<u8>; N],
139}
140
141impl<const N: usize> fmt::Debug for DMAFrame<N> {
142 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
143 write!(f, "{:?}", self.read())
144 }
145}
146
147impl<const N: usize> fmt::Write for DMAFrame<N> {
148 fn write_str(&mut self, s: &str) -> fmt::Result {
149 let free = self.free();
150
151 if s.len() > free {
152 Err(fmt::Error)
153 } else {
154 self.write_slice(s.as_bytes());
155 Ok(())
156 }
157 }
158}
159
160impl<const N: usize> Default for DMAFrame<N> {
161 fn default() -> Self {
162 Self::new()
163 }
164}
165
166impl<const N: usize> DMAFrame<N> {
167 const INIT: MaybeUninit<u8> = MaybeUninit::uninit();
168 /// Creates a new node for the Serial DMA
169 #[inline]
170 pub const fn new() -> Self {
171 // Create an uninitialized array of `MaybeUninit<u8>`.
172 Self {
173 len: 0,
174 buf: [Self::INIT; N],
175 }
176 }
177
178 /// Gives a `&mut [u8]` slice to write into with the maximum size, the `commit` method
179 /// must then be used to set the actual number of bytes written.
180 ///
181 /// Note that this function internally first zeros the uninitialized part of the node's buffer.
182 pub fn write(&mut self) -> &mut [u8] {
183 // Initialize remaining memory with a safe value
184 for elem in &mut self.buf[self.len as usize..] {
185 *elem = MaybeUninit::new(0);
186 }
187
188 self.len = self.max_len() as u16;
189
190 // NOTE(unsafe): This is safe as the operation above set the entire buffer to a valid state
191 unsafe { slice::from_raw_parts_mut(self.buf.as_mut_ptr() as *mut _, self.max_len()) }
192 }
193
194 /// Used to shrink the current size of the frame, used in conjunction with `write`.
195 #[inline]
196 pub fn commit(&mut self, shrink_to: usize) {
197 // Only shrinking is allowed to remain safe with the `MaybeUninit`
198 if shrink_to < self.len as _ {
199 self.len = shrink_to as _;
200 }
201 }
202
203 /// Gives an uninitialized `&mut [MaybeUninit<u8>]` slice to write into, the `set_len` method
204 /// must then be used to set the actual number of bytes written.
205 #[inline]
206 pub fn write_uninit(&mut self) -> &mut [MaybeUninit<u8>; N] {
207 &mut self.buf
208 }
209
210 /// Used to set the current size of the frame, used in conjunction with `write_uninit` to have an
211 /// interface for uninitialized memory. Use with care!
212 ///
213 /// # Safety
214 ///
215 /// NOTE(unsafe): This must be set so that the final buffer is only referencing initialized
216 /// memory.
217 #[inline]
218 pub unsafe fn set_len(&mut self, len: usize) {
219 assert!(len <= self.max_len());
220 self.len = len as _;
221 }
222
223 /// Used to write data into the node, and returns how many bytes were written from `buf`.
224 ///
225 /// If the node is already partially filled, this will continue filling the node.
226 pub fn write_slice(&mut self, buf: &[u8]) -> usize {
227 let count = buf.len().min(self.free());
228
229 // Used to write data into the `MaybeUninit`
230 // NOTE(unsafe): Safe based on the size check above
231 unsafe {
232 ptr::copy_nonoverlapping(
233 buf.as_ptr(),
234 (self.buf.as_mut_ptr() as *mut u8).add(self.len.into()),
235 count,
236 );
237 }
238
239 self.len += count as u16;
240
241 count
242 }
243
244 /// Clear the node of all data making it empty
245 #[inline]
246 pub fn clear(&mut self) {
247 self.len = 0;
248 }
249
250 /// Returns a readable slice which maps to the buffers internal data
251 #[inline]
252 pub fn read(&self) -> &[u8] {
253 // NOTE(unsafe): Safe as it uses the internal length of valid data
254 unsafe { slice::from_raw_parts(self.buf.as_ptr() as *const _, self.len as usize) }
255 }
256
257 /// Returns a readable mutable slice which maps to the buffers internal data
258 #[inline]
259 pub fn read_mut(&mut self) -> &mut [u8] {
260 // NOTE(unsafe): Safe as it uses the internal length of valid data
261 unsafe { slice::from_raw_parts_mut(self.buf.as_mut_ptr() as *mut _, self.len as usize) }
262 }
263
264 /// Reads how many bytes are available
265 #[inline]
266 pub fn len(&self) -> usize {
267 self.len as usize
268 }
269
270 /// Reads how many bytes are free
271 #[inline]
272 pub fn free(&self) -> usize {
273 self.max_len() - self.len as usize
274 }
275
276 /// Get the max length of the frame
277 #[inline]
278 pub fn max_len(&self) -> usize {
279 N
280 }
281
282 /// Checks if the frame is empty
283 #[inline]
284 pub fn is_empty(&self) -> bool {
285 self.len == 0
286 }
287
288 #[inline]
289 pub(crate) unsafe fn buffer_address_for_dma(&self) -> u32 {
290 self.buf.as_ptr() as u32
291 }
292
293 #[inline]
294 pub(crate) fn buffer_as_ptr(&self) -> *const MaybeUninit<u8> {
295 self.buf.as_ptr()
296 }
297
298 #[inline]
299 pub(crate) fn buffer_as_mut_ptr(&mut self) -> *mut MaybeUninit<u8> {
300 self.buf.as_mut_ptr()
301 }
302}
303
304impl<const N: usize> AsRef<[u8]> for DMAFrame<N> {
305 #[inline]
306 fn as_ref(&self) -> &[u8] {
307 self.read()
308 }
309}
310
311pub struct CircBuffer<BUFFER, PAYLOAD>
312where
313 BUFFER: 'static,
314{
315 buffer: &'static mut BUFFER,
316 payload: PAYLOAD,
317 read_index: usize,
318 write_previous: usize,
319}
320
321impl<BUFFER, PAYLOAD> CircBuffer<BUFFER, PAYLOAD>
322where
323 &'static mut BUFFER: StaticWriteBuffer,
324 BUFFER: 'static,
325{
326 pub(crate) fn new(buf: &'static mut BUFFER, payload: PAYLOAD) -> Self {
327 CircBuffer {
328 buffer: buf,
329 payload,
330 read_index: 0,
331 write_previous: 0,
332 }
333 }
334}
335
336pub trait DmaExt {
337 type Channels;
338
339 fn split(self, ahb: &mut AHB1) -> Self::Channels;
340}
341
342pub trait TransferPayload {
343 fn start(&mut self);
344 fn stop(&mut self);
345}
346
347pub struct Transfer<MODE, BUFFER, PAYLOAD>
348where
349 PAYLOAD: TransferPayload,
350{
351 _mode: PhantomData<MODE>,
352 buffer: BUFFER,
353 payload: PAYLOAD,
354}
355
356impl<BUFFER, PAYLOAD> Transfer<R, BUFFER, PAYLOAD>
357where
358 PAYLOAD: TransferPayload,
359{
360 pub(crate) fn r(buffer: BUFFER, payload: PAYLOAD) -> Self {
361 Transfer {
362 _mode: PhantomData,
363 buffer,
364 payload,
365 }
366 }
367}
368
369impl<BUFFER, PAYLOAD> Transfer<W, BUFFER, PAYLOAD>
370where
371 PAYLOAD: TransferPayload,
372{
373 pub(crate) fn w(buffer: BUFFER, payload: PAYLOAD) -> Self {
374 Transfer {
375 _mode: PhantomData,
376 buffer,
377 payload,
378 }
379 }
380}
381
382impl<BUFFER, PAYLOAD> Transfer<RW, BUFFER, PAYLOAD>
383where
384 PAYLOAD: TransferPayload,
385{
386 pub(crate) fn rw(buffer: BUFFER, payload: PAYLOAD) -> Self {
387 Transfer {
388 _mode: PhantomData,
389 buffer,
390 payload,
391 }
392 }
393}
394
395impl<MODE, BUFFER, PAYLOAD> Drop for Transfer<MODE, BUFFER, PAYLOAD>
396where
397 PAYLOAD: TransferPayload,
398{
399 fn drop(&mut self) {
400 self.payload.stop();
401 compiler_fence(Ordering::SeqCst);
402 }
403}
404
405impl<MODE, BUFFER, PAYLOAD> Transfer<MODE, BUFFER, PAYLOAD>
406where
407 PAYLOAD: TransferPayload,
408{
409 pub(crate) fn extract_inner_without_drop(self) -> (BUFFER, PAYLOAD) {
410 // `Transfer` needs to have a `Drop` implementation, because we accept
411 // managed buffers that can free their memory on drop. Because of that
412 // we can't move out of the `Transfer`'s fields, so we use `ptr::read`
413 // and `mem::forget`.
414 //
415 // NOTE(unsafe) There is no panic branch between getting the resources
416 // and forgetting `self`.
417 unsafe {
418 // We cannot use mem::replace as we do not have valid objects to replace with
419 let buffer = ptr::read(&self.buffer);
420 let payload = ptr::read(&self.payload);
421 core::mem::forget(self);
422 (buffer, payload)
423 }
424 }
425}
426
427/// Read transfer
428pub struct R;
429
430/// Write transfer
431pub struct W;
432
433/// Read/Write transfer
434pub struct RW;
435
436macro_rules! for_all_pairs {
437 ($mac:ident: $($x:ident)*) => {
438 // Duplicate the list
439 for_all_pairs!(@inner $mac: $($x)*; $($x)*);
440 };
441
442 // The end of iteration: we exhausted the list
443 (@inner $mac:ident: ; $($x:ident)*) => {};
444
445 // The head/tail recursion: pick the first element of the first list
446 // and recursively do it for the tail.
447 (@inner $mac:ident: $head:ident $($tail:ident)*; $($x:ident)*) => {
448 $(
449 $mac!($head $x);
450 )*
451 for_all_pairs!(@inner $mac: $($tail)*; $($x)*);
452 };
453}
454
455macro_rules! rx_tx_channel_mapping {
456 ($CH_A:ident $CH_B:ident) => {
457 impl<BUFFER, PAYLOAD> Transfer<RW, BUFFER, RxTxDma<PAYLOAD, $CH_A, $CH_B>>
458 where
459 RxTxDma<PAYLOAD, $CH_A, $CH_B>: TransferPayload,
460 {
461 pub fn is_done(&self) -> bool {
462 !self.payload.rx_channel.in_progress() && !self.payload.tx_channel.in_progress()
463 }
464
465 pub fn wait(mut self) -> (BUFFER, RxTxDma<PAYLOAD, $CH_A, $CH_B>) {
466 // XXX should we check for transfer errors here?
467 // The manual says "A DMA transfer error can be generated by reading
468 // from or writing to a reserved address space". I think it's impossible
469 // to get to that state with our type safe API and *safe* Rust.
470 while !self.is_done() {}
471
472 self.payload.stop();
473
474 // TODO can we weaken this compiler barrier?
475 // NOTE(compiler_fence) operations on `buffer` should not be reordered
476 // before the previous statement, which marks the DMA transfer as done
477 atomic::compiler_fence(Ordering::SeqCst);
478
479 // `Transfer` has a `Drop` implementation because we accept
480 // managed buffers that can free their memory on drop. Because of that
481 // we can't move out of the `Transfer`'s fields directly.
482 self.extract_inner_without_drop()
483 }
484 }
485
486 impl<BUFFER, PAYLOAD> Transfer<RW, BUFFER, RxTxDma<PAYLOAD, $CH_A, $CH_B>>
487 where
488 RxTxDma<PAYLOAD, $CH_A, $CH_B>: TransferPayload,
489 {
490 pub fn peek<T>(&self) -> &[T]
491 where
492 BUFFER: AsRef<[T]>,
493 {
494 let pending = self.payload.rx_channel.get_cndtr() as usize;
495
496 let capacity = self.buffer.as_ref().len();
497
498 &self.buffer.as_ref()[..(capacity - pending)]
499 }
500 }
501 };
502}
503
504macro_rules! dma {
505 ($($DMAX:ident: ($dmaX:ident, {
506 $($CX:ident: (
507 $ccrX:ident,
508 $CCRX:ident,
509 $cndtrX:ident,
510 $CNDTRX:ident,
511 $cparX:ident,
512 $CPARX:ident,
513 $cmarX:ident,
514 $CMARX:ident,
515 $htifX:ident,
516 $tcifX:ident,
517 $chtifX:ident,
518 $ctcifX:ident,
519 $cgifX:ident,
520 $teifX:ident,
521 $cteifX:ident
522 ),)+
523 }),)+) => {
524 $(
525 pub mod $dmaX {
526 use core::sync::atomic::{self, Ordering};
527 use crate::stm32::{$DMAX, dma1};
528 use core::ops::DerefMut;
529 use core::ptr;
530 use stable_deref_trait::StableDeref;
531
532 use crate::dma::{CircBuffer, FrameReader, FrameSender, DMAFrame, DmaExt, Error, Event, Transfer, W, R, RW, RxDma, RxTxDma, TxDma, TransferPayload};
533 use crate::rcc::{AHB1, Enable};
534
535 #[allow(clippy::manual_non_exhaustive)]
536 pub struct Channels((), $(pub $CX),+);
537
538 for_all_pairs!(rx_tx_channel_mapping: $($CX)+);
539
540 $(
541 /// A singleton that represents a single DMAx channel (channel X in this case)
542 ///
543 /// This singleton has exclusive access to the registers of the DMAx channel X
544 pub struct $CX;
545
546 impl $CX {
547 /// Associated peripheral `address`
548 ///
549 /// `inc` indicates whether the address will be incremented after every transfer
550 #[inline]
551 pub fn set_peripheral_address(&mut self, address: u32, inc: bool) {
552 self.cpar().write(|w|
553 unsafe { w.pa().bits(address) }
554 );
555 self.ccr().modify(|_, w| w.pinc().bit(inc) );
556 }
557
558 /// `address` where from/to data will be read/write
559 ///
560 /// `inc` indicates whether the address will be incremented after every transfer
561 #[inline]
562 pub fn set_memory_address(&mut self, address: u32, inc: bool) {
563 self.cmar().write(|w|
564 unsafe { w.ma().bits(address) }
565 );
566 self.ccr().modify(|_, w| w.minc().bit(inc) );
567 }
568
569 /// The amount of transfers that makes up one transaction
570 #[inline]
571 pub fn set_transfer_length(&mut self, len: u16) {
572 self.cndtr().write(|w| w.ndt().bits(len));
573 }
574
575 /// Starts the DMA transfer
576 #[inline]
577 pub fn start(&mut self) {
578 self.ccr().modify(|_, w| w.en().set_bit() );
579 }
580
581 /// Stops the DMA transfer
582 #[inline]
583 pub fn stop(&mut self) {
584 self.ifcr().write(|w| w.$cgifX().set_bit());
585 self.ccr().modify(|_, w| w.en().clear_bit() );
586 }
587
588 /// Returns `true` if there's a transfer in progress
589 #[inline]
590 pub fn in_progress(&self) -> bool {
591 self.isr().$tcifX().bit_is_clear()
592 }
593
594 #[inline]
595 pub fn listen(&mut self, event: Event) {
596 match event {
597 Event::HalfTransfer => self.ccr().modify(|_, w| w.htie().set_bit()),
598 Event::TransferComplete => {
599 self.ccr().modify(|_, w| w.tcie().set_bit())
600 }
601 }
602 }
603
604 #[inline]
605 pub fn unlisten(&mut self, event: Event) {
606 match event {
607 Event::HalfTransfer => {
608 self.ccr().modify(|_, w| w.htie().clear_bit())
609 },
610 Event::TransferComplete => {
611 self.ccr().modify(|_, w| w.tcie().clear_bit())
612 }
613 }
614 }
615
616 #[inline]
617 pub(crate) fn isr(&self) -> dma1::isr::R {
618 // NOTE(unsafe) atomic read with no side effects
619 unsafe { (*$DMAX::ptr()).isr.read() }
620 }
621
622 #[inline]
623 pub(crate) fn ifcr(&self) -> &dma1::IFCR {
624 unsafe { &(*$DMAX::ptr()).ifcr }
625 }
626
627 #[inline]
628 pub(crate) fn ccr(&mut self) -> &dma1::$CCRX {
629 unsafe { &(*$DMAX::ptr()).$ccrX }
630 }
631
632 #[inline]
633 pub(crate) fn cndtr(&mut self) -> &dma1::$CNDTRX {
634 unsafe { &(*$DMAX::ptr()).$cndtrX }
635 }
636
637 #[inline]
638 pub(crate) fn cpar(&mut self) -> &dma1::$CPARX {
639 unsafe { &(*$DMAX::ptr()).$cparX }
640 }
641
642 #[inline]
643 pub(crate) fn cmar(&mut self) -> &dma1::$CMARX {
644 unsafe { &(*$DMAX::ptr()).$cmarX }
645 }
646
647 #[cfg(not(any(
648 // feature = "stm32l4p5",
649 // feature = "stm32l4q5",
650 // feature = "stm32l4r5",
651 // feature = "stm32l4s5",
652 // feature = "stm32l4r7",
653 // feature = "stm32l4s7",
654 feature = "stm32l4r9",
655 feature = "stm32l4s9"
656 )))]
657 #[inline]
658 pub(crate) fn cselr(&mut self) -> &dma1::CSELR {
659 unsafe { &(*$DMAX::ptr()).cselr }
660 }
661
662 #[inline]
663 pub(crate) fn get_cndtr(&self) -> u32 {
664 // NOTE(unsafe) atomic read with no side effects
665 unsafe { (*$DMAX::ptr()).$cndtrX.read().bits() }
666 }
667
668 }
669
670 impl<BUFFER, PAYLOAD, const N: usize> FrameSender<BUFFER, TxDma<PAYLOAD, $CX>, N>
671 where
672 BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
673 TxDma<PAYLOAD, $CX>: TransferPayload,
674 {
675 /// This method should be called in the transfer complete interrupt of the
676 /// DMA, will return the sent frame if the transfer was truly completed.
677 pub fn transfer_complete_interrupt(
678 &mut self,
679 ) -> Option<BUFFER> {
680
681 // Clear ISR flag (Transfer Complete)
682 if !self.payload.channel.in_progress() {
683 self.payload.channel.ifcr().write(|w| w.$ctcifX().set_bit());
684 } else {
685 // The old transfer is not complete
686 return None;
687 }
688
689 self.payload.channel.stop();
690
691 // NOTE(compiler_fence) operations on the DMA should not be reordered
692 // before the next statement, takes the buffer from the DMA transfer.
693 atomic::compiler_fence(Ordering::SeqCst);
694
695 // Return the old buffer for the user to do what they want with it
696 self.buffer.take()
697 }
698
699 /// Returns `true` if there is an ongoing transfer.
700 #[inline]
701 pub fn ongoing_transfer(&self) -> bool {
702 self.buffer.is_some()
703 }
704
705 /// Send a frame. Will return `Err(frame)` if there was already an ongoing
706 /// transaction or if the buffer has not been read out.
707 pub fn send(
708 &mut self,
709 frame: BUFFER,
710 ) -> Result<(), BUFFER> {
711 if self.ongoing_transfer() {
712 // The old transfer is not complete
713 return Err(frame);
714 }
715
716 let new_buf = &*frame;
717 self.payload.channel.set_memory_address(new_buf.buffer_as_ptr() as u32, true);
718 self.payload.channel.set_transfer_length(new_buf.len() as u16);
719
720 // If there has been an error, clear the error flag to let the next
721 // transaction start
722 if self.payload.channel.isr().$teifX().bit_is_set() {
723 self.payload.channel.ifcr().write(|w| w.$cteifX().set_bit());
724 }
725
726 // NOTE(compiler_fence) operations on `buffer` should not be reordered after
727 // the next statement, which starts the DMA transfer
728 atomic::compiler_fence(Ordering::Release);
729
730 self.payload.channel.start();
731
732 self.buffer = Some(frame);
733
734 Ok(())
735 }
736 }
737
738 impl<BUFFER, PAYLOAD, const N: usize> FrameReader<BUFFER, RxDma<PAYLOAD, $CX>, N>
739 where
740 BUFFER: Sized + StableDeref<Target = DMAFrame<N>> + DerefMut + 'static,
741 RxDma<PAYLOAD, $CX>: TransferPayload,
742 {
743 /// This function should be called from the transfer complete interrupt of
744 /// the corresponding DMA channel.
745 ///
746 /// Returns the full buffer received by the USART.
747 #[inline]
748 pub fn transfer_complete_interrupt(&mut self, next_frame: BUFFER) -> BUFFER {
749 self.internal_interrupt(next_frame, false)
750 }
751
752 /// This function should be called from the character match interrupt of
753 /// the corresponding USART
754 ///
755 /// Returns the buffer received by the USART, including the matching
756 /// character.
757 #[inline]
758 pub fn character_match_interrupt(&mut self, next_frame: BUFFER) -> BUFFER {
759 self.internal_interrupt(next_frame, true)
760 }
761
762 /// This function should be called from the receiver timeout interrupt of
763 /// the corresponding USART
764 ///
765 /// Returns the buffer received by the USART.
766 #[inline]
767 pub fn receiver_timeout_interrupt(&mut self, next_frame: BUFFER) -> BUFFER {
768 self.internal_interrupt(next_frame, false)
769 }
770
771 fn internal_interrupt(
772 &mut self,
773 mut next_frame: BUFFER,
774 character_match_interrupt: bool,
775 ) -> BUFFER {
776 let old_buf = &mut *self.buffer;
777 let new_buf = &mut *next_frame;
778 new_buf.clear();
779
780 // Clear ISR flag (Transfer Complete)
781 if !self.payload.channel.in_progress() {
782 self.payload.channel.ifcr().write(|w| w.$ctcifX().set_bit());
783 } else if character_match_interrupt {
784 // 1. If DMA not done and there was a character match interrupt,
785 // let the DMA flush a little and then halt transfer.
786 //
787 // This is to alleviate the race condition between the character
788 // match interrupt and the DMA memory transfer.
789 let left_in_buffer = self.payload.channel.get_cndtr() as usize;
790
791 for _ in 0..5 {
792 let now_left = self.payload.channel.get_cndtr() as usize;
793
794 if left_in_buffer - now_left >= 4 {
795 // We have gotten 4 extra characters flushed
796 break;
797 }
798 }
799 }
800
801 self.payload.channel.stop();
802
803 // NOTE(compiler_fence) operations on `buffer` should not be reordered after
804 // the next statement, which starts a new DMA transfer
805 atomic::compiler_fence(Ordering::SeqCst);
806
807 let left_in_buffer = self.payload.channel.get_cndtr() as usize;
808 let got_data_len = old_buf.max_len() - left_in_buffer; // How many transfers were completed = how many bytes are available
809 unsafe {
810 old_buf.set_len(got_data_len);
811 }
812
813 // 2. Check DMA race condition by finding matched character, and that
814 // the length is larger than 0
815 let len = if character_match_interrupt && got_data_len > 0 {
816 let search_buf = old_buf.read();
817
818 // Search from the end
819 let ch = self.matching_character;
820 if let Some(pos) = search_buf.iter().rposition(|&x| x == ch) {
821 pos+1
822 } else {
823 // No character match found
824 0
825 }
826 } else {
827 old_buf.len()
828 };
829
830 // 3. Start DMA again
831 let diff = if len < got_data_len {
832 // We got some extra characters in the from the new frame, move
833 // them into the new buffer
834 let diff = got_data_len - len;
835
836 let new_buf_ptr = new_buf.buffer_as_mut_ptr();
837 let old_buf_ptr = old_buf.buffer_as_ptr();
838
839 // new_buf[0..diff].copy_from_slice(&old_buf[len..got_data_len]);
840 unsafe {
841 ptr::copy_nonoverlapping(old_buf_ptr.add(len), new_buf_ptr, diff);
842 }
843
844 diff
845 } else {
846 0
847 };
848
849 self.payload.channel.set_memory_address(unsafe { new_buf.buffer_as_ptr().add(diff) } as u32, true);
850 self.payload.channel.set_transfer_length((new_buf.max_len() - diff) as u16);
851 unsafe { old_buf.set_len(got_data_len - diff) };
852 let received_buffer = core::mem::replace(&mut self.buffer, next_frame);
853
854 // NOTE(compiler_fence) operations on `buffer` should not be reordered after
855 // the next statement, which starts the DMA transfer
856 atomic::compiler_fence(Ordering::Release);
857
858 self.payload.channel.start();
859
860 // 4. Return full frame
861 received_buffer
862 }
863 }
864
865 impl<B, PAYLOAD> CircBuffer<B, RxDma<PAYLOAD, $CX>>
866 where
867 RxDma<PAYLOAD, $CX>: TransferPayload,
868 {
869 /// Determines if the write index passed the given `mark` when it moved
870 /// from `previous` to `current` with wrapping behaviour at `wrap`.
871 /// When `current` reaches `mark` (`current == mark`), this method already
872 /// returns `true`.
873 fn passed_mark(&self, mut previous: usize, mut current: usize, mark: usize, wrap: usize) -> bool {
874 // We have three indexes mark (m), previous (p) and current (c) so
875 // there are fac(3)=6 possibilities how those can be ordered. For both
876 // cases (passed, !passed), there are three wrapped variations each:
877 // !passed: 1. m <= p <= c, 2. c < m <= p, 3. p <= c < m
878 // passed: 1. m <= c < p, 2. p < m <= c, 3. c < p < m
879 // By enforcing p >= m and c >= m (reverting the wrap), we only have to
880 // check the first case.
881 if previous < mark {
882 previous += wrap;
883 }
884 if current < mark {
885 current += wrap;
886 }
887 current < previous && current >= mark
888 }
889
890 /// Reads and removes the available contents of the dma buffer into `buf`.
891 /// Returns `Err(Error::Overrun)` if an overrun is detected but there is no
892 /// guarantee that every overrun can be detected.
893 /// On success, returns the number of words written to `buf`.
894 pub fn read<T>(&mut self, buf: &mut [T]) -> Result<usize, Error>
895 where
896 B: AsRef<[T]>,
897 T: Copy,
898 {
899 // We do our best to figure out when an overrun occurred but without
900 // risking false positives.
901 //
902 // One possibility to detect an overrun is by examining the read- and
903 // write-indexes: If the write-index passes the read-index, that is an
904 // overrun condition because an unread value is overwritten. This check
905 // can fail if the write-index passed the read-index but due to
906 // wrapping, this can not be detected. For example, this can happen if
907 // `capacity` many words were written since the last read which looks
908 // like no word has been written.
909 //
910 // Another possibility to detect overruns is by checking the
911 // TransferComplete and HalfTransferComplete flags. For example, the
912 // TransferComplete flag is set when the write-index wraps around so
913 // whenever we encounter this flag the new write-index should be
914 // smaller than the previous one. If it is not, more than `capacity`
915 // many words must have been written which definitely must be an
916 // overrun. Another possibility to formulate this condition is to check
917 // wheter the write-index passed index 0. A similar condition can be
918 // formulated for the HalfTransferComplete flag, i.e. check whether the
919 // write-index passed index capacity/2.
920 //
921 // Unfortunately, even both checks together can not guarantee that we
922 // detect all overruns.
923 // Example:
924 // read = 2, write = 3, 2*capacity-2 words written => write = 1.
925 let capacity = self.buffer.as_ref().len();
926 // We read the flags before reading the current write-index because if
927 // another word is written between those two accesses, this ordering
928 // prevents a false positive overrun error.
929 let isr = self.payload.channel.isr();
930 let half_complete_flag = isr.$htifX().bit_is_set();
931 if half_complete_flag {
932 self.payload.channel.ifcr().write(|w| w.$chtifX().set_bit());
933 }
934 let transfer_complete_flag = isr.$tcifX().bit_is_set();
935 if transfer_complete_flag {
936 self.payload.channel.ifcr().write(|w| w.$ctcifX().set_bit());
937 }
938 let write_current = capacity - self.payload.channel.get_cndtr() as usize;
939 // Copy the data before examining the overrun conditions. If the
940 // overrun happens shortly after the flags and write-index were read,
941 // we can not detect it anyways. So we can only hope that we have
942 // already read the word(s) that will be overwritten.
943 let available = if write_current >= self.read_index {
944 write_current - self.read_index
945 } else {
946 capacity + write_current - self.read_index
947 };
948 let read_len = core::cmp::min(available, buf.len());
949 if self.read_index + read_len <= capacity {
950 // non-wrapping read
951 buf[..read_len].copy_from_slice(&self.buffer.as_ref()[self.read_index..self.read_index+read_len]);
952 } else {
953 // wrapping read
954 let first_read_len = capacity - self.read_index;
955 let second_read_len = read_len - first_read_len;
956 buf[..first_read_len].copy_from_slice(&self.buffer.as_ref()[self.read_index..]);
957 buf[first_read_len..read_len].copy_from_slice(&self.buffer.as_ref()[..second_read_len]);
958 }
959 // For checking the overrun conditions, it is important that we use the
960 // old read_index so do not increment it yet but check overrun
961 // conditions first.
962 // For odd buffer sizes, the half-complete flag is set at
963 // ceil(capacity/2).
964 let overrun =
965 self.passed_mark(self.write_previous, write_current, self.read_index, capacity)
966 || (transfer_complete_flag && !self.passed_mark(self.write_previous, write_current, 0, capacity))
967 || (half_complete_flag && !self.passed_mark(self.write_previous, write_current, (capacity+1)/2, capacity));
968 self.write_previous = write_current;
969 if overrun {
970 self.read_index = write_current;
971 Err(Error::Overrun)
972 } else {
973 self.read_index += read_len;
974 if self.read_index >= capacity {
975 self.read_index -= capacity;
976 }
977 Ok(read_len)
978 }
979 }
980
981 /// Stops the transfer and returns the underlying buffer and RxDma
982 pub fn stop(mut self) -> (&'static mut B, RxDma<PAYLOAD, $CX>) {
983 self.payload.stop();
984
985 (self.buffer, self.payload)
986 }
987 }
988
989 impl<BUFFER, PAYLOAD> Transfer<W, BUFFER, RxDma<PAYLOAD, $CX>>
990 where
991 RxDma<PAYLOAD, $CX>: TransferPayload,
992 {
993 pub fn is_done(&self) -> bool {
994 !self.payload.channel.in_progress()
995 }
996
997 pub fn wait(mut self) -> (BUFFER, RxDma<PAYLOAD, $CX>) {
998 // XXX should we check for transfer errors here?
999 // The manual says "A DMA transfer error can be generated by reading
1000 // from or writing to a reserved address space". I think it's impossible
1001 // to get to that state with our type safe API and *safe* Rust.
1002 while !self.is_done() {}
1003
1004 self.payload.stop();
1005
1006 // TODO can we weaken this compiler barrier?
1007 // NOTE(compiler_fence) operations on `buffer` should not be reordered
1008 // before the previous statement, which marks the DMA transfer as done
1009 atomic::compiler_fence(Ordering::SeqCst);
1010
1011 // `Transfer` has a `Drop` implementation because we accept
1012 // managed buffers that can free their memory on drop. Because of that
1013 // we can't move out of the `Transfer`'s fields directly.
1014 self.extract_inner_without_drop()
1015 }
1016 }
1017
1018 impl<BUFFER, PAYLOAD> Transfer<R, BUFFER, TxDma<PAYLOAD, $CX>>
1019 where
1020 TxDma<PAYLOAD, $CX>: TransferPayload,
1021 {
1022 pub fn is_done(&self) -> bool {
1023 !self.payload.channel.in_progress()
1024 }
1025
1026 pub fn wait(mut self) -> (BUFFER, TxDma<PAYLOAD, $CX>) {
1027 // XXX should we check for transfer errors here?
1028 // The manual says "A DMA transfer error can be generated by reading
1029 // from or writing to a reserved address space". I think it's impossible
1030 // to get to that state with our type safe API and *safe* Rust.
1031 while !self.is_done() {}
1032
1033 self.payload.stop();
1034
1035 // TODO can we weaken this compiler barrier?
1036 // NOTE(compiler_fence) operations on `buffer` should not be reordered
1037 // before the previous statement, which marks the DMA transfer as done
1038 atomic::compiler_fence(Ordering::SeqCst);
1039
1040 // `Transfer` has a `Drop` implementation because we accept
1041 // managed buffers that can free their memory on drop. Because of that
1042 // we can't move out of the `Transfer`'s fields directly.
1043 self.extract_inner_without_drop()
1044 }
1045 }
1046
1047 impl<BUFFER, PAYLOAD> Transfer<W, BUFFER, RxDma<PAYLOAD, $CX>>
1048 where
1049 RxDma<PAYLOAD, $CX>: TransferPayload,
1050 {
1051 pub fn peek<T>(&self) -> &[T]
1052 where
1053 BUFFER: AsRef<[T]>,
1054 {
1055 let pending = self.payload.channel.get_cndtr() as usize;
1056
1057 let capacity = self.buffer.as_ref().len();
1058
1059 &self.buffer.as_ref()[..(capacity - pending)]
1060 }
1061 }
1062
1063 impl<BUFFER, PAYLOAD> Transfer<R, BUFFER, TxDma<PAYLOAD, $CX>>
1064 where
1065 TxDma<PAYLOAD, $CX>: TransferPayload,
1066 {
1067 pub fn peek<T>(&self) -> &[T]
1068 where
1069 BUFFER: AsRef<[T]>
1070 {
1071 let pending = self.payload.channel.get_cndtr() as usize;
1072
1073 let capacity = self.buffer.as_ref().len();
1074
1075 &self.buffer.as_ref()[..(capacity - pending)]
1076 }
1077 }
1078 )+
1079
1080 impl DmaExt for $DMAX {
1081 type Channels = Channels;
1082
1083 fn split(self, ahb: &mut AHB1) -> Channels {
1084 <$DMAX>::enable(ahb);
1085
1086 #[cfg(any(
1087 // feature = "stm32l4p5",
1088 // feature = "stm32l4q5",
1089 // feature = "stm32l4r5",
1090 // feature = "stm32l4s5",
1091 // feature = "stm32l4r7",
1092 // feature = "stm32l4s7",
1093 feature = "stm32l4r9",
1094 feature = "stm32l4s9"
1095 ))]
1096 ahb.enr().modify(|_, w| w.dmamux1en().set_bit());
1097
1098 // reset the DMA control registers (stops all on-going transfers)
1099 $(
1100 self.$ccrX.reset();
1101 )+
1102
1103 Channels((), $($CX { }),+)
1104 }
1105 }
1106 }
1107 )+
1108 }
1109}
1110
1111dma! {
1112 DMA1: (dma1, {
1113 C1: (
1114 ccr1, CCR1,
1115 cndtr1, CNDTR1,
1116 cpar1, CPAR1,
1117 cmar1, CMAR1,
1118 htif1, tcif1,
1119 chtif1, ctcif1, cgif1,
1120 teif1, cteif1
1121 ),
1122 C2: (
1123 ccr2, CCR2,
1124 cndtr2, CNDTR2,
1125 cpar2, CPAR2,
1126 cmar2, CMAR2,
1127 htif2, tcif2,
1128 chtif2, ctcif2, cgif2,
1129 teif2, cteif2
1130 ),
1131 C3: (
1132 ccr3, CCR3,
1133 cndtr3, CNDTR3,
1134 cpar3, CPAR3,
1135 cmar3, CMAR3,
1136 htif3, tcif3,
1137 chtif3, ctcif3, cgif3,
1138 teif3, cteif3
1139 ),
1140 C4: (
1141 ccr4, CCR4,
1142 cndtr4, CNDTR4,
1143 cpar4, CPAR4,
1144 cmar4, CMAR4,
1145 htif4, tcif4,
1146 chtif4, ctcif4, cgif4,
1147 teif4, cteif4
1148 ),
1149 C5: (
1150 ccr5, CCR5,
1151 cndtr5, CNDTR5,
1152 cpar5, CPAR5,
1153 cmar5, CMAR5,
1154 htif5, tcif5,
1155 chtif5, ctcif5, cgif5,
1156 teif5, cteif5
1157 ),
1158 C6: (
1159 ccr6, CCR6,
1160 cndtr6, CNDTR6,
1161 cpar6, CPAR6,
1162 cmar6, CMAR6,
1163 htif6, tcif6,
1164 chtif6, ctcif6, cgif6,
1165 teif6, cteif6
1166 ),
1167 C7: (
1168 ccr7, CCR7,
1169 cndtr7, CNDTR7,
1170 cpar7, CPAR7,
1171 cmar7, CMAR7,
1172 htif7, tcif7,
1173 chtif7, ctcif7, cgif7,
1174 teif7, cteif7
1175 ),
1176 }),
1177 DMA2: (dma2, {
1178 C1: (
1179 ccr1, CCR1,
1180 cndtr1, CNDTR1,
1181 cpar1, CPAR1,
1182 cmar1, CMAR1,
1183 htif1, tcif1,
1184 chtif1, ctcif1, cgif1,
1185 teif1, cteif1
1186 ),
1187 C2: (
1188 ccr2, CCR2,
1189 cndtr2, CNDTR2,
1190 cpar2, CPAR2,
1191 cmar2, CMAR2,
1192 htif2, tcif2,
1193 chtif2, ctcif2, cgif2,
1194 teif2, cteif2
1195 ),
1196 C3: (
1197 ccr3, CCR3,
1198 cndtr3, CNDTR3,
1199 cpar3, CPAR3,
1200 cmar3, CMAR3,
1201 htif3, tcif3,
1202 chtif3, ctcif3, cgif3,
1203 teif3, cteif3
1204 ),
1205 C4: (
1206 ccr4, CCR4,
1207 cndtr4, CNDTR4,
1208 cpar4, CPAR4,
1209 cmar4, CMAR4,
1210 htif4, tcif4,
1211 chtif4, ctcif4, cgif4,
1212 teif4, cteif4
1213 ),
1214 C5: (
1215 ccr5, CCR5,
1216 cndtr5, CNDTR5,
1217 cpar5, CPAR5,
1218 cmar5, CMAR5,
1219 htif5, tcif5,
1220 chtif5, ctcif5, cgif5,
1221 teif5, cteif5
1222 ),
1223 C6: (
1224 ccr6, CCR6,
1225 cndtr6, CNDTR6,
1226 cpar6, CPAR6,
1227 cmar6, CMAR6,
1228 htif6, tcif6,
1229 chtif6, ctcif6, cgif6,
1230 teif6, cteif6
1231 ),
1232 C7: (
1233 ccr7, CCR7,
1234 cndtr7, CNDTR7,
1235 cpar7, CPAR7,
1236 cmar7, CMAR7,
1237 htif7, tcif7,
1238 chtif7, ctcif7, cgif7,
1239 teif7, cteif7
1240 ),
1241 }),
1242}
1243
1244/// DMA Receiver
1245pub struct RxDma<PAYLOAD, RXCH> {
1246 pub(crate) payload: PAYLOAD,
1247 pub channel: RXCH,
1248}
1249
1250/// DMA Transmitter
1251pub struct TxDma<PAYLOAD, TXCH> {
1252 pub(crate) payload: PAYLOAD,
1253 pub channel: TXCH,
1254}
1255
1256/// DMA Receiver/Transmitter
1257pub struct RxTxDma<PAYLOAD, RXCH, TXCH> {
1258 pub(crate) payload: PAYLOAD,
1259 pub rx_channel: RXCH,
1260 pub tx_channel: TXCH,
1261}
1262
1263pub trait Receive {
1264 type RxChannel;
1265 type TransmittedWord;
1266}
1267
1268pub trait Transmit {
1269 type TxChannel;
1270 type ReceivedWord;
1271}
1272
1273pub trait ReceiveTransmit {
1274 type RxChannel;
1275 type TxChannel;
1276 type TransferedWord;
1277}
1278
1279/// Trait for circular DMA readings from peripheral to memory.
1280pub trait CircReadDma<B, RS>: Receive
1281where
1282 &'static mut B: StaticWriteBuffer<Word = RS>,
1283 B: 'static,
1284 Self: core::marker::Sized,
1285{
1286 fn circ_read(self, buffer: &'static mut B) -> CircBuffer<B, Self>;
1287}
1288
1289/// Trait for DMA readings from peripheral to memory.
1290pub trait ReadDma<B, RS>: Receive
1291where
1292 B: StaticWriteBuffer<Word = RS>,
1293 Self: core::marker::Sized + TransferPayload,
1294{
1295 fn read(self, buffer: B) -> Transfer<W, B, Self>;
1296}
1297
1298/// Trait for DMA writing from memory to peripheral.
1299pub trait WriteDma<B, TS>: Transmit
1300where
1301 B: StaticReadBuffer<Word = TS>,
1302 Self: core::marker::Sized + TransferPayload,
1303{
1304 fn write(self, buffer: B) -> Transfer<R, B, Self>;
1305}
1306
1307/// Trait for DMA simultaneously writing and reading between memory and peripheral.
1308pub trait TransferDma<B, TS>: ReceiveTransmit
1309where
1310 B: StaticWriteBuffer<Word = TS>,
1311 Self: core::marker::Sized + TransferPayload,
1312{
1313 fn transfer(self, buffer: B) -> Transfer<RW, B, Self>;
1314}