1#![macro_use]
4
5use core::future::poll_fn;
6use core::marker::PhantomData;
7#[cfg(feature = "_nrf52832_anomaly_109")]
8use core::sync::atomic::AtomicU8;
9use core::sync::atomic::{compiler_fence, Ordering};
10use core::task::Poll;
11
12use embassy_embedded_hal::SetConfig;
13use embassy_hal_internal::{Peri, PeripheralType};
14use embassy_sync::waitqueue::AtomicWaker;
15pub use embedded_hal_02::spi::{Mode, Phase, Polarity, MODE_0, MODE_1, MODE_2, MODE_3};
16pub use pac::spim::vals::{Frequency, Order as BitOrder};
17
18use crate::chip::{EASY_DMA_SIZE, FORCE_COPY_BUFFER_SIZE};
19use crate::gpio::{self, convert_drive, AnyPin, OutputDrive, Pin as GpioPin, PselBits, SealedPin as _};
20use crate::interrupt::typelevel::Interrupt;
21use crate::pac::gpio::vals as gpiovals;
22use crate::pac::spim::vals;
23use crate::util::slice_in_ram_or;
24use crate::{interrupt, pac};
25
26#[derive(Debug, Clone, Copy, PartialEq, Eq)]
28#[cfg_attr(feature = "defmt", derive(defmt::Format))]
29#[non_exhaustive]
30pub enum Error {
31 BufferNotInRAM,
33}
34
35#[non_exhaustive]
37#[derive(Clone)]
38pub struct Config {
39 pub frequency: Frequency,
41
42 pub mode: Mode,
44
45 pub bit_order: BitOrder,
47
48 pub orc: u8,
53
54 pub sck_drive: OutputDrive,
56
57 pub mosi_drive: OutputDrive,
59}
60
61impl Default for Config {
62 fn default() -> Self {
63 Self {
64 frequency: Frequency::M1,
65 mode: MODE_0,
66 bit_order: BitOrder::MSB_FIRST,
67 orc: 0x00,
68 sck_drive: OutputDrive::HighDrive,
69 mosi_drive: OutputDrive::HighDrive,
70 }
71 }
72}
73
74pub struct InterruptHandler<T: Instance> {
76 _phantom: PhantomData<T>,
77}
78
79impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
80 unsafe fn on_interrupt() {
81 let r = T::regs();
82 let s = T::state();
83
84 #[cfg(feature = "_nrf52832_anomaly_109")]
85 {
86 if r.events_started().read() != 0 {
89 s.waker.wake();
90 r.intenclr().write(|w| w.set_started(true));
91 }
92 }
93
94 if r.events_end().read() != 0 {
95 s.waker.wake();
96 r.intenclr().write(|w| w.set_end(true));
97 }
98 }
99}
100
101pub struct Spim<'d, T: Instance> {
103 _p: Peri<'d, T>,
104}
105
106impl<'d, T: Instance> Spim<'d, T> {
107 pub fn new(
109 spim: Peri<'d, T>,
110 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
111 sck: Peri<'d, impl GpioPin>,
112 miso: Peri<'d, impl GpioPin>,
113 mosi: Peri<'d, impl GpioPin>,
114 config: Config,
115 ) -> Self {
116 Self::new_inner(spim, Some(sck.into()), Some(miso.into()), Some(mosi.into()), config)
117 }
118
119 pub fn new_txonly(
121 spim: Peri<'d, T>,
122 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
123 sck: Peri<'d, impl GpioPin>,
124 mosi: Peri<'d, impl GpioPin>,
125 config: Config,
126 ) -> Self {
127 Self::new_inner(spim, Some(sck.into()), None, Some(mosi.into()), config)
128 }
129
130 pub fn new_rxonly(
132 spim: Peri<'d, T>,
133 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
134 sck: Peri<'d, impl GpioPin>,
135 miso: Peri<'d, impl GpioPin>,
136 config: Config,
137 ) -> Self {
138 Self::new_inner(spim, Some(sck.into()), Some(miso.into()), None, config)
139 }
140
141 pub fn new_txonly_nosck(
143 spim: Peri<'d, T>,
144 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
145 mosi: Peri<'d, impl GpioPin>,
146 config: Config,
147 ) -> Self {
148 Self::new_inner(spim, None, None, Some(mosi.into()), config)
149 }
150
151 fn new_inner(
152 spim: Peri<'d, T>,
153 sck: Option<Peri<'d, AnyPin>>,
154 miso: Option<Peri<'d, AnyPin>>,
155 mosi: Option<Peri<'d, AnyPin>>,
156 config: Config,
157 ) -> Self {
158 let r = T::regs();
159
160 if let Some(sck) = &sck {
162 sck.conf().write(|w| {
163 w.set_dir(gpiovals::Dir::OUTPUT);
164 convert_drive(w, config.sck_drive);
165 });
166 }
167 if let Some(mosi) = &mosi {
168 mosi.conf().write(|w| {
169 w.set_dir(gpiovals::Dir::OUTPUT);
170 convert_drive(w, config.mosi_drive);
171 });
172 }
173 if let Some(miso) = &miso {
174 miso.conf().write(|w| w.set_input(gpiovals::Input::CONNECT));
175 }
176
177 match config.mode.polarity {
178 Polarity::IdleHigh => {
179 if let Some(sck) = &sck {
180 sck.set_high();
181 }
182 if let Some(mosi) = &mosi {
183 mosi.set_high();
184 }
185 }
186 Polarity::IdleLow => {
187 if let Some(sck) = &sck {
188 sck.set_low();
189 }
190 if let Some(mosi) = &mosi {
191 mosi.set_low();
192 }
193 }
194 }
195
196 r.psel().sck().write_value(sck.psel_bits());
198 r.psel().mosi().write_value(mosi.psel_bits());
199 r.psel().miso().write_value(miso.psel_bits());
200
201 r.enable().write(|w| w.set_enable(vals::Enable::ENABLED));
203
204 let mut spim = Self { _p: spim };
205
206 Self::set_config(&mut spim, &config).unwrap();
208
209 r.intenclr().write(|w| w.0 = 0xFFFF_FFFF);
211
212 T::Interrupt::unpend();
213 unsafe { T::Interrupt::enable() };
214
215 spim
216 }
217
218 fn prepare_dma_transfer(&mut self, rx: *mut [u8], tx: *const [u8], offset: usize, length: usize) {
219 compiler_fence(Ordering::SeqCst);
220
221 let r = T::regs();
222
223 fn xfer_params(ptr: u32, total: usize, offset: usize, length: usize) -> (u32, usize) {
224 if total > offset {
225 (ptr.wrapping_add(offset as _), core::cmp::min(total - offset, length))
226 } else {
227 (ptr, 0)
228 }
229 }
230
231 let (rx_ptr, rx_len) = xfer_params(rx as *mut u8 as _, rx.len() as _, offset, length);
233 r.rxd().ptr().write_value(rx_ptr);
234 r.rxd().maxcnt().write(|w| w.set_maxcnt(rx_len as _));
235
236 let (tx_ptr, tx_len) = xfer_params(tx as *const u8 as _, tx.len() as _, offset, length);
238 r.txd().ptr().write_value(tx_ptr);
239 r.txd().maxcnt().write(|w| w.set_maxcnt(tx_len as _));
240
241 #[cfg(feature = "_nrf52832_anomaly_109")]
248 if offset == 0 {
249 let s = T::state();
250
251 r.events_started().write_value(0);
252
253 r.txd().maxcnt().write(|_| ());
255 r.rxd().maxcnt().write(|_| ());
256
257 s.tx.store(tx_len as _, Ordering::Relaxed);
259 s.rx.store(rx_len as _, Ordering::Relaxed);
260
261 r.intenset().write(|w| w.set_started(true));
263 }
264
265 r.events_end().write_value(0);
267 r.intenset().write(|w| w.set_end(true));
268
269 r.tasks_start().write_value(1);
271 }
272
273 fn blocking_inner_from_ram_chunk(&mut self, rx: *mut [u8], tx: *const [u8], offset: usize, length: usize) {
274 self.prepare_dma_transfer(rx, tx, offset, length);
275
276 #[cfg(feature = "_nrf52832_anomaly_109")]
277 if offset == 0 {
278 while self.nrf52832_dma_workaround_status().is_pending() {}
279 }
280
281 while T::regs().events_end().read() == 0 {}
283
284 compiler_fence(Ordering::SeqCst);
285 }
286
287 fn blocking_inner_from_ram(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(), Error> {
288 slice_in_ram_or(tx, Error::BufferNotInRAM)?;
289 let xfer_len = core::cmp::max(rx.len(), tx.len());
293 for offset in (0..xfer_len).step_by(EASY_DMA_SIZE) {
294 let length = core::cmp::min(xfer_len - offset, EASY_DMA_SIZE);
295 self.blocking_inner_from_ram_chunk(rx, tx, offset, length);
296 }
297 Ok(())
298 }
299
300 fn blocking_inner(&mut self, rx: &mut [u8], tx: &[u8]) -> Result<(), Error> {
301 match self.blocking_inner_from_ram(rx, tx) {
302 Ok(_) => Ok(()),
303 Err(Error::BufferNotInRAM) => {
304 let tx_ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..tx.len()];
306 tx_ram_buf.copy_from_slice(tx);
307 self.blocking_inner_from_ram(rx, tx_ram_buf)
308 }
309 }
310 }
311
312 async fn async_inner_from_ram_chunk(&mut self, rx: *mut [u8], tx: *const [u8], offset: usize, length: usize) {
313 self.prepare_dma_transfer(rx, tx, offset, length);
314
315 #[cfg(feature = "_nrf52832_anomaly_109")]
316 if offset == 0 {
317 poll_fn(|cx| {
318 let s = T::state();
319
320 s.waker.register(cx.waker());
321
322 self.nrf52832_dma_workaround_status()
323 })
324 .await;
325 }
326
327 poll_fn(|cx| {
329 T::state().waker.register(cx.waker());
330 if T::regs().events_end().read() != 0 {
331 return Poll::Ready(());
332 }
333
334 Poll::Pending
335 })
336 .await;
337
338 compiler_fence(Ordering::SeqCst);
339 }
340
341 async fn async_inner_from_ram(&mut self, rx: *mut [u8], tx: *const [u8]) -> Result<(), Error> {
342 slice_in_ram_or(tx, Error::BufferNotInRAM)?;
343 let xfer_len = core::cmp::max(rx.len(), tx.len());
347 for offset in (0..xfer_len).step_by(EASY_DMA_SIZE) {
348 let length = core::cmp::min(xfer_len - offset, EASY_DMA_SIZE);
349 self.async_inner_from_ram_chunk(rx, tx, offset, length).await;
350 }
351 Ok(())
352 }
353
354 async fn async_inner(&mut self, rx: &mut [u8], tx: &[u8]) -> Result<(), Error> {
355 match self.async_inner_from_ram(rx, tx).await {
356 Ok(_) => Ok(()),
357 Err(Error::BufferNotInRAM) => {
358 let tx_ram_buf = &mut [0; FORCE_COPY_BUFFER_SIZE][..tx.len()];
360 tx_ram_buf.copy_from_slice(tx);
361 self.async_inner_from_ram(rx, tx_ram_buf).await
362 }
363 }
364 }
365
366 pub fn blocking_read(&mut self, data: &mut [u8]) -> Result<(), Error> {
368 self.blocking_inner(data, &[])
369 }
370
371 pub fn blocking_transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> {
374 self.blocking_inner(read, write)
375 }
376
377 pub fn blocking_transfer_from_ram(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> {
379 self.blocking_inner(read, write)
380 }
381
382 pub fn blocking_transfer_in_place(&mut self, data: &mut [u8]) -> Result<(), Error> {
385 self.blocking_inner_from_ram(data, data)
386 }
387
388 pub fn blocking_write(&mut self, data: &[u8]) -> Result<(), Error> {
391 self.blocking_inner(&mut [], data)
392 }
393
394 pub fn blocking_write_from_ram(&mut self, data: &[u8]) -> Result<(), Error> {
396 self.blocking_inner(&mut [], data)
397 }
398
399 pub async fn read(&mut self, data: &mut [u8]) -> Result<(), Error> {
401 self.async_inner(data, &[]).await
402 }
403
404 pub async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> {
407 self.async_inner(read, write).await
408 }
409
410 pub async fn transfer_from_ram(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> {
412 self.async_inner_from_ram(read, write).await
413 }
414
415 pub async fn transfer_in_place(&mut self, data: &mut [u8]) -> Result<(), Error> {
417 self.async_inner_from_ram(data, data).await
418 }
419
420 pub async fn write(&mut self, data: &[u8]) -> Result<(), Error> {
423 self.async_inner(&mut [], data).await
424 }
425
426 pub async fn write_from_ram(&mut self, data: &[u8]) -> Result<(), Error> {
428 self.async_inner_from_ram(&mut [], data).await
429 }
430
431 #[cfg(feature = "_nrf52832_anomaly_109")]
432 fn nrf52832_dma_workaround_status(&mut self) -> Poll<()> {
433 let r = T::regs();
434 if r.events_started().read() != 0 {
435 let s = T::state();
436
437 r.events_started().write_value(0);
439 r.events_end().write_value(0);
440
441 r.rxd().maxcnt().write(|w| w.set_maxcnt(s.rx.load(Ordering::Relaxed)));
443 r.txd().maxcnt().write(|w| w.set_maxcnt(s.tx.load(Ordering::Relaxed)));
444
445 r.intenset().write(|w| w.set_end(true));
446 r.tasks_start().write_value(1);
448 return Poll::Ready(());
449 }
450 Poll::Pending
451 }
452}
453
454impl<'d, T: Instance> Drop for Spim<'d, T> {
455 fn drop(&mut self) {
456 trace!("spim drop");
457
458 let r = T::regs();
462 r.enable().write(|w| w.set_enable(vals::Enable::DISABLED));
463
464 gpio::deconfigure_pin(r.psel().sck().read());
465 gpio::deconfigure_pin(r.psel().miso().read());
466 gpio::deconfigure_pin(r.psel().mosi().read());
467
468 T::Interrupt::disable();
470
471 trace!("spim drop: done");
472 }
473}
474
475pub(crate) struct State {
476 waker: AtomicWaker,
477 #[cfg(feature = "_nrf52832_anomaly_109")]
478 rx: AtomicU8,
479 #[cfg(feature = "_nrf52832_anomaly_109")]
480 tx: AtomicU8,
481}
482
483impl State {
484 pub(crate) const fn new() -> Self {
485 Self {
486 waker: AtomicWaker::new(),
487 #[cfg(feature = "_nrf52832_anomaly_109")]
488 rx: AtomicU8::new(0),
489 #[cfg(feature = "_nrf52832_anomaly_109")]
490 tx: AtomicU8::new(0),
491 }
492 }
493}
494
495pub(crate) trait SealedInstance {
496 fn regs() -> pac::spim::Spim;
497 fn state() -> &'static State;
498}
499
500#[allow(private_bounds)]
502pub trait Instance: SealedInstance + PeripheralType + 'static {
503 type Interrupt: interrupt::typelevel::Interrupt;
505}
506
507macro_rules! impl_spim {
508 ($type:ident, $pac_type:ident, $irq:ident) => {
509 impl crate::spim::SealedInstance for peripherals::$type {
510 fn regs() -> pac::spim::Spim {
511 pac::$pac_type
512 }
513 fn state() -> &'static crate::spim::State {
514 static STATE: crate::spim::State = crate::spim::State::new();
515 &STATE
516 }
517 }
518 impl crate::spim::Instance for peripherals::$type {
519 type Interrupt = crate::interrupt::typelevel::$irq;
520 }
521 };
522}
523
524mod eh02 {
527 use super::*;
528
529 impl<'d, T: Instance> embedded_hal_02::blocking::spi::Transfer<u8> for Spim<'d, T> {
530 type Error = Error;
531 fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> {
532 self.blocking_transfer_in_place(words)?;
533 Ok(words)
534 }
535 }
536
537 impl<'d, T: Instance> embedded_hal_02::blocking::spi::Write<u8> for Spim<'d, T> {
538 type Error = Error;
539
540 fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> {
541 self.blocking_write(words)
542 }
543 }
544}
545
546impl embedded_hal_1::spi::Error for Error {
547 fn kind(&self) -> embedded_hal_1::spi::ErrorKind {
548 match *self {
549 Self::BufferNotInRAM => embedded_hal_1::spi::ErrorKind::Other,
550 }
551 }
552}
553
554impl<'d, T: Instance> embedded_hal_1::spi::ErrorType for Spim<'d, T> {
555 type Error = Error;
556}
557
558impl<'d, T: Instance> embedded_hal_1::spi::SpiBus<u8> for Spim<'d, T> {
559 fn flush(&mut self) -> Result<(), Self::Error> {
560 Ok(())
561 }
562
563 fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
564 self.blocking_transfer(words, &[])
565 }
566
567 fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> {
568 self.blocking_write(words)
569 }
570
571 fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> {
572 self.blocking_transfer(read, write)
573 }
574
575 fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
576 self.blocking_transfer_in_place(words)
577 }
578}
579
580impl<'d, T: Instance> embedded_hal_async::spi::SpiBus<u8> for Spim<'d, T> {
581 async fn flush(&mut self) -> Result<(), Error> {
582 Ok(())
583 }
584
585 async fn read(&mut self, words: &mut [u8]) -> Result<(), Error> {
586 self.read(words).await
587 }
588
589 async fn write(&mut self, data: &[u8]) -> Result<(), Error> {
590 self.write(data).await
591 }
592
593 async fn transfer(&mut self, rx: &mut [u8], tx: &[u8]) -> Result<(), Error> {
594 self.transfer(rx, tx).await
595 }
596
597 async fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Error> {
598 self.transfer_in_place(words).await
599 }
600}
601
602impl<'d, T: Instance> SetConfig for Spim<'d, T> {
603 type Config = Config;
604 type ConfigError = ();
605 fn set_config(&mut self, config: &Self::Config) -> Result<(), Self::ConfigError> {
606 let r = T::regs();
607 let mode = config.mode;
609 r.config().write(|w| {
610 w.set_order(config.bit_order);
611 match mode {
612 MODE_0 => {
613 w.set_cpol(vals::Cpol::ACTIVE_HIGH);
614 w.set_cpha(vals::Cpha::LEADING);
615 }
616 MODE_1 => {
617 w.set_cpol(vals::Cpol::ACTIVE_HIGH);
618 w.set_cpha(vals::Cpha::TRAILING);
619 }
620 MODE_2 => {
621 w.set_cpol(vals::Cpol::ACTIVE_LOW);
622 w.set_cpha(vals::Cpha::LEADING);
623 }
624 MODE_3 => {
625 w.set_cpol(vals::Cpol::ACTIVE_LOW);
626 w.set_cpha(vals::Cpha::TRAILING);
627 }
628 }
629 });
630
631 let frequency = config.frequency;
633 r.frequency().write(|w| w.set_frequency(frequency));
634
635 let orc = config.orc;
637 r.orc().write(|w| w.set_orc(orc));
638
639 Ok(())
640 }
641}