1#![macro_use]
4use core::future::poll_fn;
5use core::marker::PhantomData;
6use core::sync::atomic::{compiler_fence, Ordering};
7use core::task::Poll;
8
9use embassy_embedded_hal::SetConfig;
10use embassy_hal_internal::{Peri, PeripheralType};
11use embassy_sync::waitqueue::AtomicWaker;
12pub use embedded_hal_02::spi::{Mode, Phase, Polarity, MODE_0, MODE_1, MODE_2, MODE_3};
13pub use pac::spis::vals::Order as BitOrder;
14
15use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
16use crate::gpio::{self, convert_drive, AnyPin, OutputDrive, Pin as GpioPin, SealedPin as _};
17use crate::interrupt::typelevel::Interrupt;
18use crate::pac::gpio::vals as gpiovals;
19use crate::pac::spis::vals;
20use crate::util::slice_in_ram_or;
21use crate::{interrupt, pac};
22
23#[derive(Debug, Clone, Copy, PartialEq, Eq)]
25#[cfg_attr(feature = "defmt", derive(defmt::Format))]
26#[non_exhaustive]
27pub enum Error {
28 TxBufferTooLong,
30 RxBufferTooLong,
32 BufferNotInRAM,
34}
35
36#[non_exhaustive]
38pub struct Config {
39 pub mode: Mode,
41
42 pub bit_order: BitOrder,
44
45 pub orc: u8,
50
51 pub def: u8,
56
57 pub auto_acquire: bool,
59
60 pub miso_drive: OutputDrive,
62}
63
64impl Default for Config {
65 fn default() -> Self {
66 Self {
67 mode: MODE_0,
68 bit_order: BitOrder::MSB_FIRST,
69 orc: 0x00,
70 def: 0x00,
71 auto_acquire: true,
72 miso_drive: OutputDrive::HighDrive,
73 }
74 }
75}
76
77pub struct InterruptHandler<T: Instance> {
79 _phantom: PhantomData<T>,
80}
81
82impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
83 unsafe fn on_interrupt() {
84 let r = T::regs();
85 let s = T::state();
86
87 if r.events_end().read() != 0 {
88 s.waker.wake();
89 r.intenclr().write(|w| w.set_end(true));
90 }
91
92 if r.events_acquired().read() != 0 {
93 s.waker.wake();
94 r.intenclr().write(|w| w.set_acquired(true));
95 }
96 }
97}
98
99pub struct Spis<'d> {
101 r: pac::spis::Spis,
102 state: &'static State,
103 _p: PhantomData<&'d ()>,
104}
105
106impl<'d> Spis<'d> {
107 pub fn new<T: Instance>(
109 spis: Peri<'d, T>,
110 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
111 cs: Peri<'d, impl GpioPin>,
112 sck: Peri<'d, impl GpioPin>,
113 miso: Peri<'d, impl GpioPin>,
114 mosi: Peri<'d, impl GpioPin>,
115 config: Config,
116 ) -> Self {
117 Self::new_inner(
118 spis,
119 cs.into(),
120 Some(sck.into()),
121 Some(miso.into()),
122 Some(mosi.into()),
123 config,
124 )
125 }
126
127 pub fn new_txonly<T: Instance>(
129 spis: Peri<'d, T>,
130 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
131 cs: Peri<'d, impl GpioPin>,
132 sck: Peri<'d, impl GpioPin>,
133 miso: Peri<'d, impl GpioPin>,
134 config: Config,
135 ) -> Self {
136 Self::new_inner(spis, cs.into(), Some(sck.into()), Some(miso.into()), None, config)
137 }
138
139 pub fn new_rxonly<T: Instance>(
141 spis: Peri<'d, T>,
142 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
143 cs: Peri<'d, impl GpioPin>,
144 sck: Peri<'d, impl GpioPin>,
145 mosi: Peri<'d, impl GpioPin>,
146 config: Config,
147 ) -> Self {
148 Self::new_inner(spis, cs.into(), Some(sck.into()), None, Some(mosi.into()), config)
149 }
150
151 pub fn new_txonly_nosck<T: Instance>(
153 spis: Peri<'d, T>,
154 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
155 cs: Peri<'d, impl GpioPin>,
156 miso: Peri<'d, impl GpioPin>,
157 config: Config,
158 ) -> Self {
159 Self::new_inner(spis, cs.into(), None, Some(miso.into()), None, config)
160 }
161
162 fn new_inner<T: Instance>(
163 _spis: Peri<'d, T>,
164 cs: Peri<'d, AnyPin>,
165 sck: Option<Peri<'d, AnyPin>>,
166 miso: Option<Peri<'d, AnyPin>>,
167 mosi: Option<Peri<'d, AnyPin>>,
168 config: Config,
169 ) -> Self {
170 compiler_fence(Ordering::SeqCst);
171
172 let r = T::regs();
173
174 cs.conf().write(|w| w.set_input(gpiovals::Input::CONNECT));
176 r.psel().csn().write_value(cs.psel_bits());
177 if let Some(sck) = &sck {
178 sck.conf().write(|w| w.set_input(gpiovals::Input::CONNECT));
179 r.psel().sck().write_value(sck.psel_bits());
180 }
181 if let Some(mosi) = &mosi {
182 mosi.conf().write(|w| w.set_input(gpiovals::Input::CONNECT));
183 r.psel().mosi().write_value(mosi.psel_bits());
184 }
185 if let Some(miso) = &miso {
186 miso.conf().write(|w| {
187 w.set_dir(gpiovals::Dir::OUTPUT);
188 convert_drive(w, config.miso_drive);
189 });
190 r.psel().miso().write_value(miso.psel_bits());
191 }
192
193 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
195
196 let mut spis = Self {
197 r: T::regs(),
198 state: T::state(),
199 _p: PhantomData,
200 };
201
202 spis.set_config(&config).unwrap();
204
205 r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
207
208 T::Interrupt::unpend();
209 unsafe { T::Interrupt::enable() };
210
211 spis
212 }
213
214 fn prepare(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(), Error> {
215 slice_in_ram_or(tx, Error::BufferNotInRAM)?;
216 compiler_fence(Ordering::SeqCst);
220
221 let r = self.r;
222
223 if tx.len() > EASY_DMA_SIZE {
225 return Err(Error::TxBufferTooLong);
226 }
227 r.txd().ptr().write_value(tx as *const u8 as _);
228 r.txd().maxcnt().write(|w| w.set_maxcnt(tx.len() as _));
229
230 if rx.len() > EASY_DMA_SIZE {
232 return Err(Error::RxBufferTooLong);
233 }
234 r.rxd().ptr().write_value(rx as *mut u8 as _);
235 r.rxd().maxcnt().write(|w| w.set_maxcnt(rx.len() as _));
236
237 r.events_end().write_value(0);
239
240 r.tasks_release().write_value(1);
242
243 Ok(())
244 }
245
246 fn blocking_inner_from_ram(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(usize, usize), Error> {
247 compiler_fence(Ordering::SeqCst);
248 let r = self.r;
249
250 if r.semstat().read().0 != 1 {
252 r.events_acquired().write_value(0);
253 r.tasks_acquire().write_value(1);
254 while r.semstat().read().0 != 1 {}
256 }
257
258 self.prepare(rx, tx)?;
259
260 while r.events_end().read() == 0 {}
262
263 let n_rx = r.rxd().amount().read().0 as usize;
264 let n_tx = r.txd().amount().read().0 as usize;
265
266 compiler_fence(Ordering::SeqCst);
267
268 Ok((n_rx, n_tx))
269 }
270
271 fn blocking_inner(&mut self, rx: &mut [u8], tx: &[u8]) -> Result<(usize, usize), Error> {
272 match self.blocking_inner_from_ram(rx, tx) {
273 Ok(n) => Ok(n),
274 Err(Error::BufferNotInRAM) => {
275 trace!("Copying SPIS tx buffer into RAM for DMA");
276 let tx_ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..tx.len()];
277 tx_ram_buf.copy_from_slice(tx);
278 self.blocking_inner_from_ram(rx, tx_ram_buf)
279 }
280 Err(error) => Err(error),
281 }
282 }
283
284 async fn async_inner_from_ram(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(usize, usize), Error> {
285 let r = self.r;
286 let s = self.state;
287
288 r.status().write(|w| {
290 w.set_overflow(true);
291 w.set_overread(true);
292 });
293
294 if r.semstat().read().0 != 1 {
296 r.events_acquired().write_value(0);
298 r.intenset().write(|w| w.set_acquired(true));
299
300 r.tasks_acquire().write_value(1);
302
303 poll_fn(|cx| {
305 s.waker.register(cx.waker());
306 if r.events_acquired().read() == 1 {
307 r.events_acquired().write_value(0);
308 return Poll::Ready(());
309 }
310 Poll::Pending
311 })
312 .await;
313 }
314
315 self.prepare(rx, tx)?;
316
317 r.intenset().write(|w| w.set_end(true));
319 poll_fn(|cx| {
320 s.waker.register(cx.waker());
321 if r.events_end().read() != 0 {
322 r.events_end().write_value(0);
323 return Poll::Ready(());
324 }
325 Poll::Pending
326 })
327 .await;
328
329 let n_rx = r.rxd().amount().read().0 as usize;
330 let n_tx = r.txd().amount().read().0 as usize;
331
332 compiler_fence(Ordering::SeqCst);
333
334 Ok((n_rx, n_tx))
335 }
336
337 async fn async_inner(&mut self, rx: &mut [u8], tx: &[u8]) -> Result<(usize, usize), Error> {
338 match self.async_inner_from_ram(rx, tx).await {
339 Ok(n) => Ok(n),
340 Err(Error::BufferNotInRAM) => {
341 trace!("Copying SPIS tx buffer into RAM for DMA");
342 let tx_ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..tx.len()];
343 tx_ram_buf.copy_from_slice(tx);
344 self.async_inner_from_ram(rx, tx_ram_buf).await
345 }
346 Err(error) => Err(error),
347 }
348 }
349
350 pub fn blocking_read(&mut self, data: &mut [u8]) -> Result<usize, Error> {
353 self.blocking_inner(data, &[]).map(|n| n.0)
354 }
355
356 pub fn blocking_transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(usize, usize), Error> {
360 self.blocking_inner(read, write)
361 }
362
363 pub fn blocking_transfer_from_ram(&mut self, read: &mut [u8], write: &[u8]) -> Result<(usize, usize), Error> {
366 self.blocking_inner_from_ram(read, write)
367 }
368
369 pub fn blocking_transfer_in_place(&mut self, data: &mut [u8]) -> Result<usize, Error> {
373 self.blocking_inner_from_ram(data, data).map(|n| n.0)
374 }
375
376 pub fn blocking_write(&mut self, data: &[u8]) -> Result<usize, Error> {
380 self.blocking_inner(&mut [], data).map(|n| n.1)
381 }
382
383 pub fn blocking_write_from_ram(&mut self, data: &[u8]) -> Result<usize, Error> {
386 self.blocking_inner_from_ram(&mut [], data).map(|n| n.1)
387 }
388
389 pub async fn read(&mut self, data: &mut [u8]) -> Result<usize, Error> {
392 self.async_inner(data, &[]).await.map(|n| n.0)
393 }
394
395 pub async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(usize, usize), Error> {
399 self.async_inner(read, write).await
400 }
401
402 pub async fn transfer_from_ram(&mut self, read: &mut [u8], write: &[u8]) -> Result<(usize, usize), Error> {
405 self.async_inner_from_ram(read, write).await
406 }
407
408 pub async fn transfer_in_place(&mut self, data: &mut [u8]) -> Result<usize, Error> {
411 self.async_inner_from_ram(data, data).await.map(|n| n.0)
412 }
413
414 pub async fn write(&mut self, data: &[u8]) -> Result<usize, Error> {
418 self.async_inner(&mut [], data).await.map(|n| n.1)
419 }
420
421 pub async fn write_from_ram(&mut self, data: &[u8]) -> Result<usize, Error> {
424 self.async_inner_from_ram(&mut [], data).await.map(|n| n.1)
425 }
426
427 pub fn is_overread(&mut self) -> bool {
429 self.r.status().read().overread()
430 }
431
432 pub fn is_overflow(&mut self) -> bool {
434 self.r.status().read().overflow()
435 }
436}
437
438impl<'d> Drop for Spis<'d> {
439 fn drop(&mut self) {
440 trace!("spis drop");
441
442 let r = self.r;
444 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
445
446 gpio::deconfigure_pin(r.psel().sck().read());
447 gpio::deconfigure_pin(r.psel().csn().read());
448 gpio::deconfigure_pin(r.psel().miso().read());
449 gpio::deconfigure_pin(r.psel().mosi().read());
450
451 trace!("spis drop: done");
452 }
453}
454
455pub(crate) struct State {
456 waker: AtomicWaker,
457}
458
459impl State {
460 pub(crate) const fn new() -> Self {
461 Self {
462 waker: AtomicWaker::new(),
463 }
464 }
465}
466
467pub(crate) trait SealedInstance {
468 fn regs() -> pac::spis::Spis;
469 fn state() -> &'static State;
470}
471
472#[allow(private_bounds)]
474pub trait Instance: SealedInstance + PeripheralType + 'static {
475 type Interrupt: interrupt::typelevel::Interrupt;
477}
478
479macro_rules! impl_spis {
480 ($type:ident, $pac_type:ident, $irq:ident) => {
481 impl crate::spis::SealedInstance for peripherals::$type {
482 fn regs() -> pac::spis::Spis {
483 pac::$pac_type
484 }
485 fn state() -> &'static crate::spis::State {
486 static STATE: crate::spis::State = crate::spis::State::new();
487 &STATE
488 }
489 }
490 impl crate::spis::Instance for peripherals::$type {
491 type Interrupt = crate::interrupt::typelevel::$irq;
492 }
493 };
494}
495
496impl<'d> SetConfig for Spis<'d> {
499 type Config = Config;
500 type ConfigError = ();
501 fn set_config(&mut self, config: &Self::Config) -> Result<(), Self::ConfigError> {
502 let r = self.r;
503 let mode = config.mode;
505 r.config().write(|w| {
506 w.set_order(config.bit_order);
507 match mode {
508 MODE_0 => {
509 w.set_cpol(vals::Cpol::ACTIVE_HIGH);
510 w.set_cpha(vals::Cpha::LEADING);
511 }
512 MODE_1 => {
513 w.set_cpol(vals::Cpol::ACTIVE_HIGH);
514 w.set_cpha(vals::Cpha::TRAILING);
515 }
516 MODE_2 => {
517 w.set_cpol(vals::Cpol::ACTIVE_LOW);
518 w.set_cpha(vals::Cpha::LEADING);
519 }
520 MODE_3 => {
521 w.set_cpol(vals::Cpol::ACTIVE_LOW);
522 w.set_cpha(vals::Cpha::TRAILING);
523 }
524 }
525 });
526
527 let orc = config.orc;
529 r.orc().write(|w| w.set_orc(orc));
530
531 let def = config.def;
533 r.def().write(|w| w.set_def(def));
534
535 let auto_acquire = config.auto_acquire;
537 r.shorts().write(|w| w.set_end_acquire(auto_acquire));
538
539 Ok(())
540 }
541}