1#![macro_use]
4
5use core::future::poll_fn;
6use core::marker::PhantomData;
7#[cfg(feature = "_nrf52832_anomaly_109")]
8use core::sync::atomic::AtomicU8;
9use core::sync::atomic::{compiler_fence, Ordering};
10use core::task::Poll;
11
12use embassy_embedded_hal::SetConfig;
13use embassy_hal_internal::{Peri, PeripheralType};
14use embassy_sync::waitqueue::AtomicWaker;
15pub use embedded_hal_02::spi::{Mode, Phase, Polarity, MODE_0, MODE_1, MODE_2, MODE_3};
16pub use pac::spim::vals::{Frequency, Order as BitOrder};
17
18use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
19use crate::gpio::{self, convert_drive, AnyPin, OutputDrive, Pin as GpioPin, PselBits, SealedPin as _};
20use crate::interrupt::typelevel::Interrupt;
21use crate::pac::gpio::vals as gpiovals;
22use crate::pac::spim::vals;
23use crate::util::slice_in_ram_or;
24use crate::{interrupt, pac};
25
26#[derive(Debug, Clone, Copy, PartialEq, Eq)]
28#[cfg_attr(feature = "defmt", derive(defmt::Format))]
29#[non_exhaustive]
30pub enum Error {
31 BufferNotInRAM,
33}
34
35#[non_exhaustive]
37#[derive(Clone)]
38pub struct Config {
39 pub frequency: Frequency,
41
42 pub mode: Mode,
44
45 pub bit_order: BitOrder,
47
48 pub orc: u8,
53
54 pub sck_drive: OutputDrive,
56
57 pub mosi_drive: OutputDrive,
59}
60
61impl Default for Config {
62 fn default() -> Self {
63 Self {
64 frequency: Frequency::M1,
65 mode: MODE_0,
66 bit_order: BitOrder::MSB_FIRST,
67 orc: 0x00,
68 sck_drive: OutputDrive::HighDrive,
69 mosi_drive: OutputDrive::HighDrive,
70 }
71 }
72}
73
74pub struct InterruptHandler<T: Instance> {
76 _phantom: PhantomData<T>,
77}
78
79impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
80 unsafe fn on_interrupt() {
81 let r = T::regs();
82 let s = T::state();
83
84 #[cfg(feature = "_nrf52832_anomaly_109")]
85 {
86 if r.events_started().read() != 0 {
89 s.waker.wake();
90 r.intenclr().write(|w| w.set_started(true));
91 }
92 }
93
94 if r.events_end().read() != 0 {
95 s.waker.wake();
96 r.intenclr().write(|w| w.set_end(true));
97 }
98 }
99}
100
101pub struct Spim<'d> {
103 r: pac::spim::Spim,
104 irq: interrupt::Interrupt,
105 state: &'static State,
106 _p: PhantomData<&'d ()>,
107}
108
109impl<'d> Spim<'d> {
110 pub fn new<T: Instance>(
112 spim: Peri<'d, T>,
113 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
114 sck: Peri<'d, impl GpioPin>,
115 miso: Peri<'d, impl GpioPin>,
116 mosi: Peri<'d, impl GpioPin>,
117 config: Config,
118 ) -> Self {
119 Self::new_inner(spim, Some(sck.into()), Some(miso.into()), Some(mosi.into()), config)
120 }
121
122 pub fn new_txonly<T: Instance>(
124 spim: Peri<'d, T>,
125 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
126 sck: Peri<'d, impl GpioPin>,
127 mosi: Peri<'d, impl GpioPin>,
128 config: Config,
129 ) -> Self {
130 Self::new_inner(spim, Some(sck.into()), None, Some(mosi.into()), config)
131 }
132
133 pub fn new_rxonly<T: Instance>(
135 spim: Peri<'d, T>,
136 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
137 sck: Peri<'d, impl GpioPin>,
138 miso: Peri<'d, impl GpioPin>,
139 config: Config,
140 ) -> Self {
141 Self::new_inner(spim, Some(sck.into()), Some(miso.into()), None, config)
142 }
143
144 pub fn new_txonly_nosck<T: Instance>(
146 spim: Peri<'d, T>,
147 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
148 mosi: Peri<'d, impl GpioPin>,
149 config: Config,
150 ) -> Self {
151 Self::new_inner(spim, None, None, Some(mosi.into()), config)
152 }
153
154 fn new_inner<T: Instance>(
155 _spim: Peri<'d, T>,
156 sck: Option<Peri<'d, AnyPin>>,
157 miso: Option<Peri<'d, AnyPin>>,
158 mosi: Option<Peri<'d, AnyPin>>,
159 config: Config,
160 ) -> Self {
161 let r = T::regs();
162
163 if let Some(sck) = &sck {
165 sck.conf().write(|w| {
166 w.set_dir(gpiovals::Dir::OUTPUT);
167 convert_drive(w, config.sck_drive);
168 });
169 }
170 if let Some(mosi) = &mosi {
171 mosi.conf().write(|w| {
172 w.set_dir(gpiovals::Dir::OUTPUT);
173 convert_drive(w, config.mosi_drive);
174 });
175 }
176 if let Some(miso) = &miso {
177 miso.conf().write(|w| w.set_input(gpiovals::Input::CONNECT));
178 }
179
180 match config.mode.polarity {
181 Polarity::IdleHigh => {
182 if let Some(sck) = &sck {
183 sck.set_high();
184 }
185 if let Some(mosi) = &mosi {
186 mosi.set_high();
187 }
188 }
189 Polarity::IdleLow => {
190 if let Some(sck) = &sck {
191 sck.set_low();
192 }
193 if let Some(mosi) = &mosi {
194 mosi.set_low();
195 }
196 }
197 }
198
199 r.psel().sck().write_value(sck.psel_bits());
201 r.psel().mosi().write_value(mosi.psel_bits());
202 r.psel().miso().write_value(miso.psel_bits());
203
204 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
206
207 let mut spim = Self {
208 r: T::regs(),
209 irq: T::Interrupt::IRQ,
210 state: T::state(),
211 _p: PhantomData {},
212 };
213
214 Self::set_config(&mut spim, &config).unwrap();
216
217 r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
219
220 T::Interrupt::unpend();
221 unsafe { T::Interrupt::enable() };
222
223 spim
224 }
225
226 fn prepare_dma_transfer(&mut self, rx: *mut [u8], tx: *const [u8], offset: usize, length: usize) {
227 compiler_fence(Ordering::SeqCst);
228
229 let r = self.r;
230
231 fn xfer_params(ptr: u32, total: usize, offset: usize, length: usize) -> (u32, usize) {
232 if total > offset {
233 (ptr.wrapping_add(offset as _), core::cmp::min(total - offset, length))
234 } else {
235 (ptr, 0)
236 }
237 }
238
239 let (rx_ptr, rx_len) = xfer_params(rx as *mut u8 as _, rx.len() as _, offset, length);
241 r.rxd().ptr().write_value(rx_ptr);
242 r.rxd().maxcnt().write(|w| w.set_maxcnt(rx_len as _));
243
244 let (tx_ptr, tx_len) = xfer_params(tx as *const u8 as _, tx.len() as _, offset, length);
246 r.txd().ptr().write_value(tx_ptr);
247 r.txd().maxcnt().write(|w| w.set_maxcnt(tx_len as _));
248
249 #[cfg(feature = "_nrf52832_anomaly_109")]
256 if offset == 0 {
257 let s = self.state;
258
259 r.events_started().write_value(0);
260
261 r.txd().maxcnt().write(|_| ());
263 r.rxd().maxcnt().write(|_| ());
264
265 s.tx.store(tx_len as _, Ordering::Relaxed);
267 s.rx.store(rx_len as _, Ordering::Relaxed);
268
269 r.intenset().write(|w| w.set_started(true));
271 }
272
273 r.events_end().write_value(0);
275 r.intenset().write(|w| w.set_end(true));
276
277 r.tasks_start().write_value(1);
279 }
280
281 fn blocking_inner_from_ram_chunk(&mut self, rx: *mut [u8], tx: *const [u8], offset: usize, length: usize) {
282 self.prepare_dma_transfer(rx, tx, offset, length);
283
284 #[cfg(feature = "_nrf52832_anomaly_109")]
285 if offset == 0 {
286 while self.nrf52832_dma_workaround_status().is_pending() {}
287 }
288
289 while self.r.events_end().read() == 0 {}
291
292 compiler_fence(Ordering::SeqCst);
293 }
294
295 fn blocking_inner_from_ram(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(), Error> {
296 slice_in_ram_or(tx, Error::BufferNotInRAM)?;
297 let xfer_len = core::cmp::max(rx.len(), tx.len());
301 for offset in (0..xfer_len).step_by(EASY_DMA_SIZE) {
302 let length = core::cmp::min(xfer_len - offset, EASY_DMA_SIZE);
303 self.blocking_inner_from_ram_chunk(rx, tx, offset, length);
304 }
305 Ok(())
306 }
307
308 fn blocking_inner(&mut self, rx: &mut [u8], tx: &[u8]) -> Result<(), Error> {
309 match self.blocking_inner_from_ram(rx, tx) {
310 Ok(_) => Ok(()),
311 Err(Error::BufferNotInRAM) => {
312 let tx_ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..tx.len()];
314 tx_ram_buf.copy_from_slice(tx);
315 self.blocking_inner_from_ram(rx, tx_ram_buf)
316 }
317 }
318 }
319
320 async fn async_inner_from_ram_chunk(&mut self, rx: *mut [u8], tx: *const [u8], offset: usize, length: usize) {
321 self.prepare_dma_transfer(rx, tx, offset, length);
322
323 #[cfg(feature = "_nrf52832_anomaly_109")]
324 if offset == 0 {
325 poll_fn(|cx| {
326 let s = self.state;
327
328 s.waker.register(cx.waker());
329
330 self.nrf52832_dma_workaround_status()
331 })
332 .await;
333 }
334
335 poll_fn(|cx| {
337 self.state.waker.register(cx.waker());
338 if self.r.events_end().read() != 0 {
339 return Poll::Ready(());
340 }
341
342 Poll::Pending
343 })
344 .await;
345
346 compiler_fence(Ordering::SeqCst);
347 }
348
349 async fn async_inner_from_ram(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(), Error> {
350 slice_in_ram_or(tx, Error::BufferNotInRAM)?;
351 let xfer_len = core::cmp::max(rx.len(), tx.len());
355 for offset in (0..xfer_len).step_by(EASY_DMA_SIZE) {
356 let length = core::cmp::min(xfer_len - offset, EASY_DMA_SIZE);
357 self.async_inner_from_ram_chunk(rx, tx, offset, length).await;
358 }
359 Ok(())
360 }
361
362 async fn async_inner(&mut self, rx: &mut [u8], tx: &[u8]) -> Result<(), Error> {
363 match self.async_inner_from_ram(rx, tx).await {
364 Ok(_) => Ok(()),
365 Err(Error::BufferNotInRAM) => {
366 let tx_ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..tx.len()];
368 tx_ram_buf.copy_from_slice(tx);
369 self.async_inner_from_ram(rx, tx_ram_buf).await
370 }
371 }
372 }
373
374 pub fn blocking_read(&mut self, data: &mut [u8]) -> Result<(), Error> {
376 self.blocking_inner(data, &[])
377 }
378
379 pub fn blocking_transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> {
382 self.blocking_inner(read, write)
383 }
384
385 pub fn blocking_transfer_from_ram(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> {
387 self.blocking_inner(read, write)
388 }
389
390 pub fn blocking_transfer_in_place(&mut self, data: &mut [u8]) -> Result<(), Error> {
393 self.blocking_inner_from_ram(data, data)
394 }
395
396 pub fn blocking_write(&mut self, data: &[u8]) -> Result<(), Error> {
399 self.blocking_inner(&mut [], data)
400 }
401
402 pub fn blocking_write_from_ram(&mut self, data: &[u8]) -> Result<(), Error> {
404 self.blocking_inner(&mut [], data)
405 }
406
407 pub async fn read(&mut self, data: &mut [u8]) -> Result<(), Error> {
409 self.async_inner(data, &[]).await
410 }
411
412 pub async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> {
415 self.async_inner(read, write).await
416 }
417
418 pub async fn transfer_from_ram(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> {
420 self.async_inner_from_ram(read, write).await
421 }
422
423 pub async fn transfer_in_place(&mut self, data: &mut [u8]) -> Result<(), Error> {
425 self.async_inner_from_ram(data, data).await
426 }
427
428 pub async fn write(&mut self, data: &[u8]) -> Result<(), Error> {
431 self.async_inner(&mut [], data).await
432 }
433
434 pub async fn write_from_ram(&mut self, data: &[u8]) -> Result<(), Error> {
436 self.async_inner_from_ram(&mut [], data).await
437 }
438
439 #[cfg(feature = "_nrf52832_anomaly_109")]
440 fn nrf52832_dma_workaround_status(&mut self) -> Poll<()> {
441 let r = self.r;
442 if r.events_started().read() != 0 {
443 let s = self.state;
444
445 r.events_started().write_value(0);
447 r.events_end().write_value(0);
448
449 r.rxd().maxcnt().write(|w| w.set_maxcnt(s.rx.load(Ordering::Relaxed)));
451 r.txd().maxcnt().write(|w| w.set_maxcnt(s.tx.load(Ordering::Relaxed)));
452
453 r.intenset().write(|w| w.set_end(true));
454 r.tasks_start().write_value(1);
456 return Poll::Ready(());
457 }
458 Poll::Pending
459 }
460}
461
462impl<'d> Drop for Spim<'d> {
463 fn drop(&mut self) {
464 trace!("spim drop");
465
466 let r = self.r;
470 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
471
472 gpio::deconfigure_pin(r.psel().sck().read());
473 gpio::deconfigure_pin(r.psel().miso().read());
474 gpio::deconfigure_pin(r.psel().mosi().read());
475
476 cortex_m::peripheral::NVIC::mask(self.irq);
478
479 trace!("spim drop: done");
480 }
481}
482
483pub(crate) struct State {
484 waker: AtomicWaker,
485 #[cfg(feature = "_nrf52832_anomaly_109")]
486 rx: AtomicU8,
487 #[cfg(feature = "_nrf52832_anomaly_109")]
488 tx: AtomicU8,
489}
490
491impl State {
492 pub(crate) const fn new() -> Self {
493 Self {
494 waker: AtomicWaker::new(),
495 #[cfg(feature = "_nrf52832_anomaly_109")]
496 rx: AtomicU8::new(0),
497 #[cfg(feature = "_nrf52832_anomaly_109")]
498 tx: AtomicU8::new(0),
499 }
500 }
501}
502
503pub(crate) trait SealedInstance {
504 fn regs() -> pac::spim::Spim;
505 fn state() -> &'static State;
506}
507
508#[allow(private_bounds)]
510pub trait Instance: SealedInstance + PeripheralType + 'static {
511 type Interrupt: interrupt::typelevel::Interrupt;
513}
514
515macro_rules! impl_spim {
516 ($type:ident, $pac_type:ident, $irq:ident) => {
517 impl crate::spim::SealedInstance for peripherals::$type {
518 fn regs() -> pac::spim::Spim {
519 pac::$pac_type
520 }
521 fn state() -> &'static crate::spim::State {
522 static STATE: crate::spim::State = crate::spim::State::new();
523 &STATE
524 }
525 }
526 impl crate::spim::Instance for peripherals::$type {
527 type Interrupt = crate::interrupt::typelevel::$irq;
528 }
529 };
530}
531
532mod eh02 {
535 use super::*;
536
537 impl<'d> embedded_hal_02::blocking::spi::Transfer<u8> for Spim<'d> {
538 type Error = Error;
539 fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> {
540 self.blocking_transfer_in_place(words)?;
541 Ok(words)
542 }
543 }
544
545 impl<'d> embedded_hal_02::blocking::spi::Write<u8> for Spim<'d> {
546 type Error = Error;
547
548 fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> {
549 self.blocking_write(words)
550 }
551 }
552}
553
554impl embedded_hal_1::spi::Error for Error {
555 fn kind(&self) -> embedded_hal_1::spi::ErrorKind {
556 match *self {
557 Self::BufferNotInRAM => embedded_hal_1::spi::ErrorKind::Other,
558 }
559 }
560}
561
562impl<'d> embedded_hal_1::spi::ErrorType for Spim<'d> {
563 type Error = Error;
564}
565
566impl<'d> embedded_hal_1::spi::SpiBus<u8> for Spim<'d> {
567 fn flush(&mut self) -> Result<(), Self::Error> {
568 Ok(())
569 }
570
571 fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
572 self.blocking_transfer(words, &[])
573 }
574
575 fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> {
576 self.blocking_write(words)
577 }
578
579 fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> {
580 self.blocking_transfer(read, write)
581 }
582
583 fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
584 self.blocking_transfer_in_place(words)
585 }
586}
587
588impl<'d> embedded_hal_async::spi::SpiBus<u8> for Spim<'d> {
589 async fn flush(&mut self) -> Result<(), Error> {
590 Ok(())
591 }
592
593 async fn read(&mut self, words: &mut [u8]) -> Result<(), Error> {
594 self.read(words).await
595 }
596
597 async fn write(&mut self, data: &[u8]) -> Result<(), Error> {
598 self.write(data).await
599 }
600
601 async fn transfer(&mut self, rx: &mut [u8], tx: &[u8]) -> Result<(), Error> {
602 self.transfer(rx, tx).await
603 }
604
605 async fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Error> {
606 self.transfer_in_place(words).await
607 }
608}
609
610impl<'d> SetConfig for Spim<'d> {
611 type Config = Config;
612 type ConfigError = ();
613 fn set_config(&mut self, config: &Self::Config) -> Result<(), Self::ConfigError> {
614 let r = self.r;
615 let mode = config.mode;
617 r.config().write(|w| {
618 w.set_order(config.bit_order);
619 match mode {
620 MODE_0 => {
621 w.set_cpol(vals::Cpol::ACTIVE_HIGH);
622 w.set_cpha(vals::Cpha::LEADING);
623 }
624 MODE_1 => {
625 w.set_cpol(vals::Cpol::ACTIVE_HIGH);
626 w.set_cpha(vals::Cpha::TRAILING);
627 }
628 MODE_2 => {
629 w.set_cpol(vals::Cpol::ACTIVE_LOW);
630 w.set_cpha(vals::Cpha::LEADING);
631 }
632 MODE_3 => {
633 w.set_cpol(vals::Cpol::ACTIVE_LOW);
634 w.set_cpha(vals::Cpha::TRAILING);
635 }
636 }
637 });
638
639 let frequency = config.frequency;
641 r.frequency().write(|w| w.set_frequency(frequency));
642
643 let orc = config.orc;
645 r.orc().write(|w| w.set_orc(orc));
646
647 Ok(())
648 }
649}