1#![allow(dead_code)]
4
5use core::marker::PhantomData;
6use core::ops;
7
8use crate::rcc::AHB1;
9
10#[derive(Debug)]
11pub enum Error {
12 Overrun,
13 BufferError,
14 #[doc(hidden)]
15 _Extensible,
16}
17
18pub enum Event {
19 HalfTransfer,
20 TransferComplete,
21}
22
23#[derive(Clone, Copy, PartialEq)]
24pub enum Half {
25 First,
26 Second,
27}
28
29pub struct CircBuffer<BUFFER, CHANNEL>
30where
31 BUFFER: 'static,
32{
33 buffer: &'static mut [BUFFER; 2],
34 channel: CHANNEL,
35 readable_half: Half,
36 consumed_offset: usize
37}
38
39impl<BUFFER, CHANNEL> CircBuffer<BUFFER, CHANNEL> {
40 pub(crate) fn new(buf: &'static mut [BUFFER; 2], chan: CHANNEL) -> Self {
41 CircBuffer {
42 buffer: buf,
43 channel: chan,
44 readable_half: Half::Second,
45 consumed_offset: 0,
46 }
47 }
48}
49
50pub trait Static<B> {
51 fn borrow(&self) -> &B;
52}
53
54impl<B> Static<B> for &'static B {
55 fn borrow(&self) -> &B {
56 *self
57 }
58}
59
60impl<B> Static<B> for &'static mut B {
61 fn borrow(&self) -> &B {
62 *self
63 }
64}
65
66pub trait DmaExt {
67 type Channels;
68
69 fn split(self, ahb: &mut AHB1) -> Self::Channels;
70}
71
72pub struct Transfer<MODE, BUFFER, CHANNEL, PAYLOAD> {
73 _mode: PhantomData<MODE>,
74 buffer: BUFFER,
75 channel: CHANNEL,
76 payload: PAYLOAD,
77}
78
79impl<BUFFER, CHANNEL, PAYLOAD> Transfer<R, BUFFER, CHANNEL, PAYLOAD> {
80 pub(crate) fn r(buffer: BUFFER, channel: CHANNEL, payload: PAYLOAD) -> Self {
81 Transfer {
82 _mode: PhantomData,
83 buffer,
84 channel,
85 payload,
86 }
87 }
88}
89
90impl<BUFFER, CHANNEL, PAYLOAD> Transfer<W, BUFFER, CHANNEL, PAYLOAD> {
91 pub(crate) fn w(buffer: BUFFER, channel: CHANNEL, payload: PAYLOAD) -> Self {
92 Transfer {
93 _mode: PhantomData,
94 buffer,
95 channel,
96 payload,
97 }
98 }
99}
100
101impl<BUFFER, CHANNEL, PAYLOAD> ops::Deref for Transfer<R, BUFFER, CHANNEL, PAYLOAD> {
102 type Target = BUFFER;
103
104 fn deref(&self) -> &BUFFER {
105 &self.buffer
106 }
107}
108
109pub struct R;
111
112pub struct W;
114
115macro_rules! dma {
116 ($($DMAX:ident: ($dmaX:ident, $dmaXen:ident, $dmaXrst:ident, {
117 $($CX:ident: (
118 $ccrX:ident,
119 $CCRX:ident,
120 $cndtrX:ident,
121 $CNDTRX:ident,
122 $cparX:ident,
123 $CPARX:ident,
124 $cmarX:ident,
125 $CMARX:ident,
126 $htifX:ident,
127 $tcifX:ident,
128 $chtifX:ident,
129 $ctcifX:ident,
130 $cgifX:ident
131 ),)+
132 }),)+) => {
133 $(
134 pub mod $dmaX {
135 use core::marker::Unsize;
136 use core::sync::atomic::{self, Ordering};
137
138 use crate::stm32::{$DMAX, dma1};
139
140 use crate::dma::{CircBuffer, DmaExt, Error, Event, Half, Transfer, W};
141 use crate::rcc::AHB1;
142
143 pub struct Channels((), $(pub $CX),+);
144
145 $(
146 pub struct $CX { _0: () }
147
148 impl $CX {
149 pub fn listen(&mut self, event: Event) {
150 match event {
151 Event::HalfTransfer => self.ccr().modify(|_, w| w.htie().set_bit()),
152 Event::TransferComplete => {
153 self.ccr().modify(|_, w| w.tcie().set_bit())
154 }
155 }
156 }
157
158 pub fn unlisten(&mut self, event: Event) {
159 match event {
160 Event::HalfTransfer => {
161 self.ccr().modify(|_, w| w.htie().clear_bit())
162 },
163 Event::TransferComplete => {
164 self.ccr().modify(|_, w| w.tcie().clear_bit())
165 }
166 }
167 }
168
169 pub(crate) fn isr(&self) -> dma1::isr::R {
170 unsafe { (*$DMAX::ptr()).isr.read() }
172 }
173
174 pub(crate) fn ifcr(&self) -> &dma1::IFCR {
175 unsafe { &(*$DMAX::ptr()).ifcr }
176 }
177
178 pub(crate) fn ccr(&mut self) -> &dma1::$CCRX {
179 unsafe { &(*$DMAX::ptr()).$ccrX }
180 }
181
182 pub(crate) fn cndtr(&mut self) -> &dma1::$CNDTRX {
183 unsafe { &(*$DMAX::ptr()).$cndtrX }
184 }
185
186 pub(crate) fn cpar(&mut self) -> &dma1::$CPARX {
187 unsafe { &(*$DMAX::ptr()).$cparX }
188 }
189
190 pub(crate) fn cmar(&mut self) -> &dma1::$CMARX {
191 unsafe { &(*$DMAX::ptr()).$cmarX }
192 }
193
194 pub(crate) fn cselr(&mut self) -> &dma1::CSELR {
195 unsafe { &(*$DMAX::ptr()).cselr }
196 }
197
198 pub(crate) fn get_cndtr(&self) -> u32 {
199 unsafe { (*$DMAX::ptr()).$cndtrX.read().bits() }
201 }
202
203 }
204
205 impl<B> CircBuffer<B, $CX> {
206
207 pub fn partial_peek<R, F, T>(&mut self, f: F) -> Result<R, Error>
209 where
210 F: FnOnce(&[T], Half) -> Result<(usize, R), ()>,
211 B: Unsize<[T]>,
212 {
213 let buf = match self.readable_half {
215 Half::First => &self.buffer[1],
216 Half::Second => &self.buffer[0],
217 };
218 let pending = self.channel.get_cndtr() as usize; let slice: &[T] = buf;
223 let capacity = slice.len(); let pending = if pending > capacity {
227 pending - capacity
228 } else {
229 pending
230 };
231 let end = capacity - pending;
235 let slice = &slice[self.consumed_offset..end];
240 match f(slice, self.readable_half) {
241 Ok((l, r)) => { self.consumed_offset += l; Ok(r) },
242 Err(_) => Err(Error::BufferError),
243 }
244 }
245
246 pub fn peek<R, F, T>(&mut self, f: F) -> Result<R, Error>
249 where
250 F: FnOnce(&[T], Half) -> R,
251 B: Unsize<[T]>,
252 {
253 let half_being_read = self.readable_half()?;
254 let buf = match half_being_read {
255 Half::First => &self.buffer[0],
256 Half::Second => &self.buffer[1],
257 };
258 let slice: &[T] = buf;
259 let slice = &slice[self.consumed_offset..];
260 self.consumed_offset = 0;
261 Ok(f(slice, half_being_read))
262 }
263
264 pub fn readable_half(&mut self) -> Result<Half, Error> {
266 let isr = self.channel.isr();
267 let first_half_is_done = isr.$htifX().bit_is_set();
268 let second_half_is_done = isr.$tcifX().bit_is_set();
269
270 if first_half_is_done && second_half_is_done {
271 return Err(Error::Overrun);
272 }
273
274 let last_read_half = self.readable_half;
275
276 Ok(match last_read_half {
277 Half::First => {
278 if second_half_is_done {
279 self.channel.ifcr().write(|w| w.$ctcifX().set_bit());
280
281 self.readable_half = Half::Second;
282 Half::Second
283 } else {
284 last_read_half
285 }
286 }
287 Half::Second => {
288 if first_half_is_done {
289 self.channel.ifcr().write(|w| w.$chtifX().set_bit());
290
291 self.readable_half = Half::First;
292 Half::First
293 } else {
294 last_read_half
295 }
296 }
297 })
298 }
299 }
300
301 impl<BUFFER, PAYLOAD, MODE> Transfer<MODE, BUFFER, $CX, PAYLOAD> {
302 pub fn is_done(&self) -> bool {
303 self.channel.isr().$tcifX().bit_is_set()
304 }
305
306 pub fn wait(mut self) -> (BUFFER, $CX, PAYLOAD) {
307 while !self.is_done() {}
312
313 self.channel.ifcr().write(|w| w.$cgifX().set_bit());
314
315 self.channel.ccr().modify(|_, w| w.en().clear_bit());
316
317 atomic::compiler_fence(Ordering::SeqCst);
321
322 (self.buffer, self.channel, self.payload)
323 }
324 }
325
326 impl<BUFFER, PAYLOAD> Transfer<W, &'static mut BUFFER, $CX, PAYLOAD> {
327 pub fn peek<T>(&self) -> &[T]
328 where
329 BUFFER: Unsize<[T]>,
330 {
331 let pending = self.channel.get_cndtr() as usize;
332
333 let slice: &[T] = self.buffer;
334 let capacity = slice.len();
335
336 &slice[..(capacity - pending)]
337 }
338 }
339 )+
340
341 impl DmaExt for $DMAX {
342 type Channels = Channels;
343
344 fn split(self, ahb: &mut AHB1) -> Channels {
345 ahb.enr().modify(|_, w| w.$dmaXen().set_bit());
346
347 $(
349 self.$ccrX.reset();
350 )+
351
352 Channels((), $($CX { _0: () }),+)
353 }
354 }
355 }
356 )+
357 }
358}
359
360dma! {
361 DMA1: (dma1, dma1en, dma1rst, {
362 C1: (
363 ccr1, CCR1,
364 cndtr1, CNDTR1,
365 cpar1, CPAR1,
366 cmar1, CMAR1,
367 htif1, tcif1,
368 chtif1, ctcif1, cgif1
369 ),
370 C2: (
371 ccr2, CCR2,
372 cndtr2, CNDTR2,
373 cpar2, CPAR2,
374 cmar2, CMAR2,
375 htif2, tcif2,
376 chtif2, ctcif2, cgif2
377 ),
378 C3: (
379 ccr3, CCR3,
380 cndtr3, CNDTR3,
381 cpar3, CPAR3,
382 cmar3, CMAR3,
383 htif3, tcif3,
384 chtif3, ctcif3, cgif3
385 ),
386 C4: (
387 ccr4, CCR4,
388 cndtr4, CNDTR4,
389 cpar4, CPAR4,
390 cmar4, CMAR4,
391 htif4, tcif4,
392 chtif4, ctcif4, cgif4
393 ),
394 C5: (
395 ccr5, CCR5,
396 cndtr5, CNDTR5,
397 cpar5, CPAR5,
398 cmar5, CMAR5,
399 htif5, tcif5,
400 chtif5, ctcif5, cgif5
401 ),
402 C6: (
403 ccr6, CCR6,
404 cndtr6, CNDTR6,
405 cpar6, CPAR6,
406 cmar6, CMAR6,
407 htif6, tcif6,
408 chtif6, ctcif6, cgif6
409 ),
410 C7: (
411 ccr7, CCR7,
412 cndtr7, CNDTR7,
413 cpar7, CPAR7,
414 cmar7, CMAR7,
415 htif7, tcif7,
416 chtif7, ctcif7, cgif7
417 ),
418 }),
419}