1#![macro_use]
4use core::future::poll_fn;
5use core::marker::PhantomData;
6use core::sync::atomic::{compiler_fence, Ordering};
7use core::task::Poll;
8
9use embassy_embedded_hal::SetConfig;
10use embassy_hal_internal::{Peri, PeripheralType};
11use embassy_sync::waitqueue::AtomicWaker;
12pub use embedded_hal_02::spi::{Mode, Phase, Polarity, MODE_0, MODE_1, MODE_2, MODE_3};
13pub use pac::spis::vals::Order as BitOrder;
14
15use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
16use crate::gpio::{self, convert_drive, AnyPin, OutputDrive, Pin as GpioPin, SealedPin as _};
17use crate::interrupt::typelevel::Interrupt;
18use crate::pac::gpio::vals as gpiovals;
19use crate::pac::spis::vals;
20use crate::util::slice_in_ram_or;
21use crate::{interrupt, pac};
22
23#[derive(Debug, Clone, Copy, PartialEq, Eq)]
25#[cfg_attr(feature = "defmt", derive(defmt::Format))]
26#[non_exhaustive]
27pub enum Error {
28 TxBufferTooLong,
30 RxBufferTooLong,
32 BufferNotInRAM,
34}
35
36#[non_exhaustive]
38pub struct Config {
39 pub mode: Mode,
41
42 pub bit_order: BitOrder,
44
45 pub orc: u8,
50
51 pub def: u8,
56
57 pub auto_acquire: bool,
59
60 pub miso_drive: OutputDrive,
62}
63
64impl Default for Config {
65 fn default() -> Self {
66 Self {
67 mode: MODE_0,
68 bit_order: BitOrder::MSB_FIRST,
69 orc: 0x00,
70 def: 0x00,
71 auto_acquire: true,
72 miso_drive: OutputDrive::HighDrive,
73 }
74 }
75}
76
77pub struct InterruptHandler<T: Instance> {
79 _phantom: PhantomData<T>,
80}
81
82impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
83 unsafe fn on_interrupt() {
84 let r = T::regs();
85 let s = T::state();
86
87 if r.events_end().read() != 0 {
88 s.waker.wake();
89 r.intenclr().write(|w| w.set_end(true));
90 }
91
92 if r.events_acquired().read() != 0 {
93 s.waker.wake();
94 r.intenclr().write(|w| w.set_acquired(true));
95 }
96 }
97}
98
99pub struct Spis<'d, T: Instance> {
101 _p: Peri<'d, T>,
102}
103
104impl<'d, T: Instance> Spis<'d, T> {
105 pub fn new(
107 spis: Peri<'d, T>,
108 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
109 cs: Peri<'d, impl GpioPin>,
110 sck: Peri<'d, impl GpioPin>,
111 miso: Peri<'d, impl GpioPin>,
112 mosi: Peri<'d, impl GpioPin>,
113 config: Config,
114 ) -> Self {
115 Self::new_inner(
116 spis,
117 cs.into(),
118 Some(sck.into()),
119 Some(miso.into()),
120 Some(mosi.into()),
121 config,
122 )
123 }
124
125 pub fn new_txonly(
127 spis: Peri<'d, T>,
128 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
129 cs: Peri<'d, impl GpioPin>,
130 sck: Peri<'d, impl GpioPin>,
131 miso: Peri<'d, impl GpioPin>,
132 config: Config,
133 ) -> Self {
134 Self::new_inner(spis, cs.into(), Some(sck.into()), Some(miso.into()), None, config)
135 }
136
137 pub fn new_rxonly(
139 spis: Peri<'d, T>,
140 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
141 cs: Peri<'d, impl GpioPin>,
142 sck: Peri<'d, impl GpioPin>,
143 mosi: Peri<'d, impl GpioPin>,
144 config: Config,
145 ) -> Self {
146 Self::new_inner(spis, cs.into(), Some(sck.into()), None, Some(mosi.into()), config)
147 }
148
149 pub fn new_txonly_nosck(
151 spis: Peri<'d, T>,
152 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
153 cs: Peri<'d, impl GpioPin>,
154 miso: Peri<'d, impl GpioPin>,
155 config: Config,
156 ) -> Self {
157 Self::new_inner(spis, cs.into(), None, Some(miso.into()), None, config)
158 }
159
160 fn new_inner(
161 spis: Peri<'d, T>,
162 cs: Peri<'d, AnyPin>,
163 sck: Option<Peri<'d, AnyPin>>,
164 miso: Option<Peri<'d, AnyPin>>,
165 mosi: Option<Peri<'d, AnyPin>>,
166 config: Config,
167 ) -> Self {
168 compiler_fence(Ordering::SeqCst);
169
170 let r = T::regs();
171
172 cs.conf().write(|w| w.set_input(gpiovals::Input::CONNECT));
174 r.psel().csn().write_value(cs.psel_bits());
175 if let Some(sck) = &sck {
176 sck.conf().write(|w| w.set_input(gpiovals::Input::CONNECT));
177 r.psel().sck().write_value(sck.psel_bits());
178 }
179 if let Some(mosi) = &mosi {
180 mosi.conf().write(|w| w.set_input(gpiovals::Input::CONNECT));
181 r.psel().mosi().write_value(mosi.psel_bits());
182 }
183 if let Some(miso) = &miso {
184 miso.conf().write(|w| {
185 w.set_dir(gpiovals::Dir::OUTPUT);
186 convert_drive(w, config.miso_drive);
187 });
188 r.psel().miso().write_value(miso.psel_bits());
189 }
190
191 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
193
194 let mut spis = Self { _p: spis };
195
196 Self::set_config(&mut spis, &config).unwrap();
198
199 r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
201
202 T::Interrupt::unpend();
203 unsafe { T::Interrupt::enable() };
204
205 spis
206 }
207
208 fn prepare(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(), Error> {
209 slice_in_ram_or(tx, Error::BufferNotInRAM)?;
210 compiler_fence(Ordering::SeqCst);
214
215 let r = T::regs();
216
217 if tx.len() > EASY_DMA_SIZE {
219 return Err(Error::TxBufferTooLong);
220 }
221 r.txd().ptr().write_value(tx as *const u8 as _);
222 r.txd().maxcnt().write(|w| w.set_maxcnt(tx.len() as _));
223
224 if rx.len() > EASY_DMA_SIZE {
226 return Err(Error::RxBufferTooLong);
227 }
228 r.rxd().ptr().write_value(rx as *mut u8 as _);
229 r.rxd().maxcnt().write(|w| w.set_maxcnt(rx.len() as _));
230
231 r.events_end().write_value(0);
233
234 r.tasks_release().write_value(1);
236
237 Ok(())
238 }
239
240 fn blocking_inner_from_ram(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(usize, usize), Error> {
241 compiler_fence(Ordering::SeqCst);
242 let r = T::regs();
243
244 if r.semstat().read().0 != 1 {
246 r.events_acquired().write_value(0);
247 r.tasks_acquire().write_value(1);
248 while r.semstat().read().0 != 1 {}
250 }
251
252 self.prepare(rx, tx)?;
253
254 while r.events_end().read() == 0 {}
256
257 let n_rx = r.rxd().amount().read().0 as usize;
258 let n_tx = r.txd().amount().read().0 as usize;
259
260 compiler_fence(Ordering::SeqCst);
261
262 Ok((n_rx, n_tx))
263 }
264
265 fn blocking_inner(&mut self, rx: &mut [u8], tx: &[u8]) -> Result<(usize, usize), Error> {
266 match self.blocking_inner_from_ram(rx, tx) {
267 Ok(n) => Ok(n),
268 Err(Error::BufferNotInRAM) => {
269 trace!("Copying SPIS tx buffer into RAM for DMA");
270 let tx_ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..tx.len()];
271 tx_ram_buf.copy_from_slice(tx);
272 self.blocking_inner_from_ram(rx, tx_ram_buf)
273 }
274 Err(error) => Err(error),
275 }
276 }
277
278 async fn async_inner_from_ram(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(usize, usize), Error> {
279 let r = T::regs();
280 let s = T::state();
281
282 r.status().write(|w| {
284 w.set_overflow(true);
285 w.set_overread(true);
286 });
287
288 if r.semstat().read().0 != 1 {
290 r.events_acquired().write_value(0);
292 r.intenset().write(|w| w.set_acquired(true));
293
294 r.tasks_acquire().write_value(1);
296
297 poll_fn(|cx| {
299 s.waker.register(cx.waker());
300 if r.events_acquired().read() == 1 {
301 r.events_acquired().write_value(0);
302 return Poll::Ready(());
303 }
304 Poll::Pending
305 })
306 .await;
307 }
308
309 self.prepare(rx, tx)?;
310
311 r.intenset().write(|w| w.set_end(true));
313 poll_fn(|cx| {
314 s.waker.register(cx.waker());
315 if r.events_end().read() != 0 {
316 r.events_end().write_value(0);
317 return Poll::Ready(());
318 }
319 Poll::Pending
320 })
321 .await;
322
323 let n_rx = r.rxd().amount().read().0 as usize;
324 let n_tx = r.txd().amount().read().0 as usize;
325
326 compiler_fence(Ordering::SeqCst);
327
328 Ok((n_rx, n_tx))
329 }
330
331 async fn async_inner(&mut self, rx: &mut [u8], tx: &[u8]) -> Result<(usize, usize), Error> {
332 match self.async_inner_from_ram(rx, tx).await {
333 Ok(n) => Ok(n),
334 Err(Error::BufferNotInRAM) => {
335 trace!("Copying SPIS tx buffer into RAM for DMA");
336 let tx_ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..tx.len()];
337 tx_ram_buf.copy_from_slice(tx);
338 self.async_inner_from_ram(rx, tx_ram_buf).await
339 }
340 Err(error) => Err(error),
341 }
342 }
343
344 pub fn blocking_read(&mut self, data: &mut [u8]) -> Result<usize, Error> {
347 self.blocking_inner(data, &[]).map(|n| n.0)
348 }
349
350 pub fn blocking_transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(usize, usize), Error> {
354 self.blocking_inner(read, write)
355 }
356
357 pub fn blocking_transfer_from_ram(&mut self, read: &mut [u8], write: &[u8]) -> Result<(usize, usize), Error> {
360 self.blocking_inner_from_ram(read, write)
361 }
362
363 pub fn blocking_transfer_in_place(&mut self, data: &mut [u8]) -> Result<usize, Error> {
367 self.blocking_inner_from_ram(data, data).map(|n| n.0)
368 }
369
370 pub fn blocking_write(&mut self, data: &[u8]) -> Result<usize, Error> {
374 self.blocking_inner(&mut [], data).map(|n| n.1)
375 }
376
377 pub fn blocking_write_from_ram(&mut self, data: &[u8]) -> Result<usize, Error> {
380 self.blocking_inner_from_ram(&mut [], data).map(|n| n.1)
381 }
382
383 pub async fn read(&mut self, data: &mut [u8]) -> Result<usize, Error> {
386 self.async_inner(data, &[]).await.map(|n| n.0)
387 }
388
389 pub async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(usize, usize), Error> {
393 self.async_inner(read, write).await
394 }
395
396 pub async fn transfer_from_ram(&mut self, read: &mut [u8], write: &[u8]) -> Result<(usize, usize), Error> {
399 self.async_inner_from_ram(read, write).await
400 }
401
402 pub async fn transfer_in_place(&mut self, data: &mut [u8]) -> Result<usize, Error> {
405 self.async_inner_from_ram(data, data).await.map(|n| n.0)
406 }
407
408 pub async fn write(&mut self, data: &[u8]) -> Result<usize, Error> {
412 self.async_inner(&mut [], data).await.map(|n| n.1)
413 }
414
415 pub async fn write_from_ram(&mut self, data: &[u8]) -> Result<usize, Error> {
418 self.async_inner_from_ram(&mut [], data).await.map(|n| n.1)
419 }
420
421 pub fn is_overread(&mut self) -> bool {
423 T::regs().status().read().overread()
424 }
425
426 pub fn is_overflow(&mut self) -> bool {
428 T::regs().status().read().overflow()
429 }
430}
431
432impl<'d, T: Instance> Drop for Spis<'d, T> {
433 fn drop(&mut self) {
434 trace!("spis drop");
435
436 let r = T::regs();
438 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
439
440 gpio::deconfigure_pin(r.psel().sck().read());
441 gpio::deconfigure_pin(r.psel().csn().read());
442 gpio::deconfigure_pin(r.psel().miso().read());
443 gpio::deconfigure_pin(r.psel().mosi().read());
444
445 trace!("spis drop: done");
446 }
447}
448
449pub(crate) struct State {
450 waker: AtomicWaker,
451}
452
453impl State {
454 pub(crate) const fn new() -> Self {
455 Self {
456 waker: AtomicWaker::new(),
457 }
458 }
459}
460
461pub(crate) trait SealedInstance {
462 fn regs() -> pac::spis::Spis;
463 fn state() -> &'static State;
464}
465
466#[allow(private_bounds)]
468pub trait Instance: SealedInstance + PeripheralType + 'static {
469 type Interrupt: interrupt::typelevel::Interrupt;
471}
472
473macro_rules! impl_spis {
474 ($type:ident, $pac_type:ident, $irq:ident) => {
475 impl crate::spis::SealedInstance for peripherals::$type {
476 fn regs() -> pac::spis::Spis {
477 pac::$pac_type
478 }
479 fn state() -> &'static crate::spis::State {
480 static STATE: crate::spis::State = crate::spis::State::new();
481 &STATE
482 }
483 }
484 impl crate::spis::Instance for peripherals::$type {
485 type Interrupt = crate::interrupt::typelevel::$irq;
486 }
487 };
488}
489
490impl<'d, T: Instance> SetConfig for Spis<'d, T> {
493 type Config = Config;
494 type ConfigError = ();
495 fn set_config(&mut self, config: &Self::Config) -> Result<(), Self::ConfigError> {
496 let r = T::regs();
497 let mode = config.mode;
499 r.config().write(|w| {
500 w.set_order(config.bit_order);
501 match mode {
502 MODE_0 => {
503 w.set_cpol(vals::Cpol::ACTIVE_HIGH);
504 w.set_cpha(vals::Cpha::LEADING);
505 }
506 MODE_1 => {
507 w.set_cpol(vals::Cpol::ACTIVE_HIGH);
508 w.set_cpha(vals::Cpha::TRAILING);
509 }
510 MODE_2 => {
511 w.set_cpol(vals::Cpol::ACTIVE_LOW);
512 w.set_cpha(vals::Cpha::LEADING);
513 }
514 MODE_3 => {
515 w.set_cpol(vals::Cpol::ACTIVE_LOW);
516 w.set_cpha(vals::Cpha::TRAILING);
517 }
518 }
519 });
520
521 let orc = config.orc;
523 r.orc().write(|w| w.set_orc(orc));
524
525 let def = config.def;
527 r.def().write(|w| w.set_def(def));
528
529 let auto_acquire = config.auto_acquire;
531 r.shorts().write(|w| w.set_end_acquire(auto_acquire));
532
533 Ok(())
534 }
535}