lpc8xx_hal/spi/
dma.rs

1use core::marker::PhantomData;
2
3use void::Void;
4
5use crate::{
6    dma::{
7        self,
8        transfer::state::{Ready, Started},
9    },
10    init_state::Enabled,
11    pac::dma0::channel::xfercfg::{DSTINC_A, SRCINC_A},
12};
13
14use super::{Instance, Master, SPI};
15
16/// An SPI/DMA transfer
17///
18/// Since the SPI peripheral is capable of sending and receiving at the same
19/// time, using the same buffer, it needs this bespoke `Transfer` struct, which
20/// wraps and manages two [`dma::Transfer`] structs under the hood.
21///
22/// [`dma::Transfer`]: ../dma/struct.Transfer.html
23pub struct Transfer<State, I: Instance> {
24    spi: SPI<I, Enabled<Master>>,
25    buffer: &'static mut [u8],
26    rx_transfer: dma::Transfer<State, I::RxChannel, Rx<I>, dma::Buffer>,
27    tx_transfer: dma::Transfer<State, I::TxChannel, dma::Buffer, Tx<I>>,
28}
29
30impl<I> Transfer<Ready, I>
31where
32    I: Instance,
33{
34    pub(super) fn new(
35        spi: SPI<I, Enabled<Master>>,
36        buffer: &'static mut [u8],
37        rx_channel: dma::Channel<I::RxChannel, Enabled>,
38        tx_channel: dma::Channel<I::TxChannel, Enabled>,
39    ) -> Self {
40        let ptr = buffer.as_mut_ptr();
41        let len = buffer.len();
42
43        // This is sound, since we know that the SPI peripheral will not access
44        // the buffers concurrently, due to the way the protocol works:
45        // - An SPI master will never receive a word unless it sends a word at
46        //   the same time. That means the peripheral will always be ready to
47        //   send a word _before_ it has received one.
48        // - Once a word has been received, it will overwrite the word in the
49        //   buffer that was sent during the same clock cycle. At that point,
50        //   that part of the buffer will no longer be relevant for the sending
51        //   side.
52        let rx_buffer = unsafe { dma::Buffer::new(ptr, len) };
53        let tx_buffer = unsafe { dma::Buffer::new(ptr, len) };
54
55        let rx_transfer =
56            dma::Transfer::new(rx_channel, Rx(PhantomData), rx_buffer);
57        let tx_transfer =
58            dma::Transfer::new(tx_channel, tx_buffer, Tx(PhantomData));
59
60        Self {
61            spi,
62            buffer,
63            rx_transfer,
64            tx_transfer,
65        }
66    }
67
68    /// Start the transfer
69    ///
70    /// Starts both DMA transfers that are part of this SPI transfer.
71    pub fn start(self) -> Transfer<Started, I> {
72        Transfer {
73            spi: self.spi,
74            buffer: self.buffer,
75            rx_transfer: self.rx_transfer.start(),
76            tx_transfer: self.tx_transfer.start(),
77        }
78    }
79}
80
81impl<I> Transfer<Started, I>
82where
83    I: Instance,
84{
85    /// Wait for the transfer to finish
86    ///
87    /// Waits until both underlying DMA transfers have finished.
88    pub fn wait(
89        self,
90    ) -> (
91        SPI<I, Enabled<Master>>,
92        &'static mut [u8],
93        dma::Channel<I::RxChannel, Enabled>,
94        dma::Channel<I::TxChannel, Enabled>,
95    ) {
96        let rx_payload = match self.rx_transfer.wait() {
97            Ok(payload) => payload,
98            // can't happen, as error type is `Void`
99            Err(_) => unreachable!(),
100        };
101        let tx_payload = match self.tx_transfer.wait() {
102            Ok(payload) => payload,
103            // can't happen, as error type is `Void`
104            Err(_) => unreachable!(),
105        };
106
107        (
108            self.spi,
109            self.buffer,
110            rx_payload.channel,
111            tx_payload.channel,
112        )
113    }
114}
115
116/// Represents the receiving portion of the DMA peripheral
117struct Rx<I>(PhantomData<I>);
118
119impl<I> crate::private::Sealed for Rx<I> {}
120
121impl<I> dma::Source for Rx<I>
122where
123    I: Instance,
124{
125    type Error = Void;
126
127    fn is_valid(&self) -> bool {
128        true
129    }
130
131    fn is_empty(&self) -> bool {
132        false
133    }
134
135    fn increment(&self) -> SRCINC_A {
136        SRCINC_A::NO_INCREMENT
137    }
138
139    fn transfer_count(&self) -> Option<u16> {
140        None
141    }
142
143    fn end_addr(&self) -> *const u8 {
144        // Sound, because we're dereferencing a register address that is always
145        // valid on the target hardware.
146        (unsafe { &(*I::REGISTERS).rxdat }) as *const _ as *mut u8
147    }
148
149    fn finish(&mut self) -> nb::Result<(), Self::Error> {
150        Ok(())
151    }
152}
153
154/// Represents the sending portion of the DMA peripheral
155struct Tx<I>(PhantomData<I>);
156
157impl<I> crate::private::Sealed for Tx<I> {}
158
159impl<I> dma::Dest for Tx<I>
160where
161    I: Instance,
162{
163    type Error = Void;
164
165    fn is_valid(&self) -> bool {
166        true
167    }
168
169    fn is_full(&self) -> bool {
170        false
171    }
172
173    fn increment(&self) -> DSTINC_A {
174        DSTINC_A::NO_INCREMENT
175    }
176
177    fn transfer_count(&self) -> Option<u16> {
178        None
179    }
180
181    fn end_addr(&mut self) -> *mut u8 {
182        // Sound, because we're dereferencing a register address that is always
183        // valid on the target hardware.
184        (unsafe { &(*I::REGISTERS).txdat }) as *const _ as *mut u8
185    }
186
187    fn finish(&mut self) -> nb::Result<(), Self::Error> {
188        Ok(())
189    }
190}