stm32f1_hal/common/dma/
ringbuf_tx.rs1use super::*;
2use crate::common::{critical_section::Mutex, ringbuf::*};
3use core::cell::RefCell;
4
5pub struct DmaRingbufTx {}
6
7impl DmaRingbufTx {
8 #[allow(clippy::new_ret_no_self)]
9 pub fn new<T, CH, OS: OsInterface>(
10 mut ch: CH,
11 peripheral_addr: usize,
12 buf_size: usize,
13 notifier: OS::Notifier,
14 ) -> (DmaRingbufTxWriter<T, CH>, DmaRingbufTxLoader<T, CH, OS>)
15 where
16 T: Sized + Copy,
17 CH: DmaChannel,
18 {
19 let (w, r) = RingBuffer::<T>::new(buf_size);
20 ch.set_peripheral_address::<T>(peripheral_addr, true, false, false);
21 ch.set_interrupt(DmaEvent::TransferComplete, true);
22 let dma = Arc::new(Mutex::new(RefCell::new(DmaHolder::new(ch, r))));
23 (
24 DmaRingbufTxWriter {
25 w,
26 dma: Arc::clone(&dma),
27 },
28 DmaRingbufTxLoader { dma, notifier },
29 )
30 }
31}
32
33pub struct DmaRingbufTxWriter<T, CH> {
36 w: Producer<T>,
37 dma: Arc<Mutex<RefCell<DmaHolder<T, CH>>>>,
38}
39
40impl<T, CH> DmaRingbufTxWriter<T, CH>
41where
42 T: Sized + Copy,
43 CH: DmaChannel,
44{
45 #[inline]
46 pub fn write(&mut self, data: &[T]) -> usize {
47 let ret = self.w.push_slice(data);
48 self.reload();
49 ret
50 }
51
52 #[inline]
53 pub fn in_progress(&self) -> bool {
54 critical_section::with(|cs| self.dma.borrow_ref(cs).in_progress())
55 }
56
57 #[inline]
58 pub fn cancel(&mut self) {
59 critical_section::with(|cs| {
60 let mut dma = self.dma.borrow_ref_mut(cs);
61 dma.work = false;
62 dma.ch.stop();
63 });
64 }
65
66 #[inline]
67 fn reload(&mut self) {
68 critical_section::with(|cs| {
69 let mut dma = self.dma.borrow_ref_mut(cs);
70 if !dma.work {
71 dma.work = true;
72 }
73 dma.reload();
74 });
75 }
76}
77
78pub struct DmaRingbufTxLoader<T, CH, OS: OsInterface> {
82 dma: Arc<Mutex<RefCell<DmaHolder<T, CH>>>>,
83 notifier: OS::Notifier,
84}
85
86impl<T, CH, OS> DmaRingbufTxLoader<T, CH, OS>
87where
88 T: Sized + Copy,
89 CH: DmaChannel,
90 OS: OsInterface,
91{
92 pub fn reload(&mut self) {
93 critical_section::with(|cs| {
94 self.dma.borrow_ref_mut(cs).reload();
95 });
96 }
97
98 pub fn interrupt_reload(&mut self) {
99 let reloaded = critical_section::with(|cs| {
100 let mut dma = self.dma.borrow_ref_mut(cs);
101 if dma.ch.is_interrupted(DmaEvent::TransferComplete) {
102 dma.reload();
103 true
104 } else {
105 false
106 }
107 });
108 if reloaded {
109 self.notifier.notify();
110 }
111 }
112}
113
114struct DmaHolder<T, CH> {
117 ch: CH,
118 r: Consumer<T>,
119 busy_len: usize,
120 work: bool,
121}
122
123impl<T, CH> DmaHolder<T, CH>
124where
125 T: Sized + Copy,
126 CH: DmaChannel,
127{
128 fn new(ch: CH, r: Consumer<T>) -> Self {
129 Self {
130 ch,
131 r,
132 busy_len: 0,
133 work: false,
134 }
135 }
136
137 fn in_progress(&self) -> bool {
138 if self.work { !self.r.is_empty() } else { false }
139 }
140
141 fn reload(&mut self) {
142 if self.work && !self.ch.in_progress() {
143 self.ch.stop();
144
145 if self.busy_len > 0 {
146 let chunk = self.r.read_chunk(self.busy_len).unwrap();
147 chunk.commit_all();
148 }
149
150 let n = self.r.buffer().capacity() / 2;
151 let chunk = match self.r.read_chunk(n) {
152 Ok(chunk) => chunk,
153 Err(ChunkError::TooFewSlots(n)) => self.r.read_chunk(n).unwrap(),
154 };
155
156 let data = chunk.get_slice();
157 self.busy_len = data.len();
158 if self.busy_len > 0 {
159 self.ch.set_memory_buf_for_peripheral(data);
160 self.ch.start();
161 }
162 }
163 }
164}