stm32f1_hal/common/dma/
ringbuf_tx.rs1use super::*;
2use crate::common::{os::*, ringbuf::*};
3use core::cell::RefCell;
4use critical_section::Mutex;
5
6pub struct DmaRingbufTx {}
7
8impl DmaRingbufTx {
9 #[allow(clippy::new_ret_no_self)]
10 pub fn new<T, CH>(
11 mut ch: CH,
12 peripheral_addr: usize,
13 buf_size: usize,
14 ) -> (DmaRingbufTxWriter<T, CH>, DmaRingbufTxLoader<T, CH>)
15 where
16 T: Sized + Copy,
17 CH: DmaChannel,
18 {
19 ch.set_peripheral_address::<T>(peripheral_addr, true, false, false);
20 let (w, r) = RingBuffer::<T>::new(buf_size);
21 let dma = Arc::new(Mutex::new(RefCell::new(DmaHolder::new(ch, r))));
22 (
23 DmaRingbufTxWriter {
24 w,
25 dma: Arc::clone(&dma),
26 },
27 DmaRingbufTxLoader { dma },
28 )
29 }
30}
31
32pub struct DmaRingbufTxWriter<T, CH> {
35 w: Producer<T>,
36 dma: Arc<Mutex<RefCell<DmaHolder<T, CH>>>>,
37}
38
39impl<T, CH> DmaRingbufTxWriter<T, CH>
40where
41 T: Sized + Copy,
42 CH: DmaChannel,
43{
44 #[inline]
45 pub fn write(&mut self, data: &[T]) -> usize {
46 let ret = self.w.push_slice(data);
47 self.reload();
48 ret
49 }
50
51 #[inline]
52 pub fn in_progress(&self) -> bool {
53 critical_section::with(|cs| self.dma.borrow_ref(cs).in_progress())
54 }
55
56 #[inline]
57 pub fn cancel(&mut self) {
58 critical_section::with(|cs| {
59 let mut dma = self.dma.borrow_ref_mut(cs);
60 dma.work = false;
61 dma.ch.stop();
62 });
63 }
64
65 #[inline]
66 fn reload(&mut self) {
67 critical_section::with(|cs| {
68 let mut dma = self.dma.borrow_ref_mut(cs);
69 if !dma.work {
70 dma.work = true;
71 }
72 dma.reload();
73 });
74 }
75}
76
77pub struct DmaRingbufTxLoader<T, CH> {
81 dma: Arc<Mutex<RefCell<DmaHolder<T, CH>>>>,
82}
83
84impl<T, CH> DmaRingbufTxLoader<T, CH>
85where
86 T: Sized + Copy,
87 CH: DmaChannel,
88{
89 pub fn try_reload(&mut self) {
90 critical_section::with(|cs| {
91 self.dma.borrow_ref_mut(cs).reload();
92 });
93 }
94
95 pub fn interrupt_reload(&mut self) {
96 critical_section::with(|cs| {
97 let mut dma = self.dma.borrow_ref_mut(cs);
98 if dma.ch.is_interrupted(DmaEvent::TransferComplete) {
99 dma.reload();
100 }
101 });
102 }
103}
104
105struct DmaHolder<T, CH> {
108 ch: CH,
109 r: Consumer<T>,
110 busy_len: usize,
111 work: bool,
112}
113
114impl<T, CH> DmaHolder<T, CH>
115where
116 T: Sized + Copy,
117 CH: DmaChannel,
118{
119 fn new(ch: CH, r: Consumer<T>) -> Self {
120 Self {
121 ch,
122 r,
123 busy_len: 0,
124 work: false,
125 }
126 }
127
128 fn in_progress(&self) -> bool {
129 if self.work { !self.r.is_empty() } else { false }
130 }
131
132 fn reload(&mut self) {
133 if self.work && !self.ch.in_progress() {
134 if self.busy_len > 0 {
135 let chunk = self.r.read_chunk(self.busy_len).unwrap();
136 chunk.commit_all();
137 self.busy_len = 0;
138 }
139
140 let n = self.r.slots();
141 if n > 0 {
142 let chunk = self.r.read_chunk(n).unwrap();
143 let data = chunk.get_slice();
144 self.ch.stop();
145 self.ch.set_memory_buf_for_peripheral(data);
146 self.busy_len = data.len();
147 self.ch.start();
148 }
149 }
150 }
151}