1#![macro_use]
4
5use core::future::{poll_fn, Future};
6use core::marker::PhantomData;
7use core::ptr;
8use core::task::Poll;
9
10use embassy_hal_internal::drop::OnDrop;
11use embassy_hal_internal::{Peri, PeripheralType};
12use embassy_sync::waitqueue::AtomicWaker;
13use embedded_storage::nor_flash::{ErrorType, NorFlash, NorFlashError, NorFlashErrorKind, ReadNorFlash};
14
15use crate::gpio::{self, Pin as GpioPin};
16use crate::interrupt::typelevel::Interrupt;
17use crate::pac::gpio::vals as gpiovals;
18use crate::pac::qspi::vals;
19pub use crate::pac::qspi::vals::{
20 Addrmode as AddressMode, Ppsize as WritePageSize, Readoc as ReadOpcode, Spimode as SpiMode, Writeoc as WriteOpcode,
21};
22use crate::{interrupt, pac};
23
24pub struct DeepPowerDownConfig {
26 pub enter_time: u16,
28 pub exit_time: u16,
30}
31
32pub enum Frequency {
34 M32 = 0,
36 M16 = 1,
38 M10_7 = 2,
40 M8 = 3,
42 M6_4 = 4,
44 M5_3 = 5,
46 M4_6 = 6,
48 M4 = 7,
50 M3_6 = 8,
52 M3_2 = 9,
54 M2_9 = 10,
56 M2_7 = 11,
58 M2_5 = 12,
60 M2_3 = 13,
62 M2_1 = 14,
64 M2 = 15,
66}
67
68#[non_exhaustive]
70pub struct Config {
71 pub xip_offset: u32,
73 pub read_opcode: ReadOpcode,
75 pub write_opcode: WriteOpcode,
77 pub write_page_size: WritePageSize,
79 pub deep_power_down: Option<DeepPowerDownConfig>,
81 pub frequency: Frequency,
83 pub sck_delay: u8,
85 pub rx_delay: u8,
87 pub spi_mode: SpiMode,
89 pub address_mode: AddressMode,
91 pub capacity: u32,
93}
94
95impl Default for Config {
96 fn default() -> Self {
97 Self {
98 read_opcode: ReadOpcode::READ4IO,
99 write_opcode: WriteOpcode::PP4IO,
100 xip_offset: 0,
101 write_page_size: WritePageSize::_256BYTES,
102 deep_power_down: None,
103 frequency: Frequency::M8,
104 sck_delay: 80,
105 rx_delay: 2,
106 spi_mode: SpiMode::MODE0,
107 address_mode: AddressMode::_24BIT,
108 capacity: 0,
109 }
110 }
111}
112
113#[derive(Debug, Copy, Clone, Eq, PartialEq)]
115#[cfg_attr(feature = "defmt", derive(defmt::Format))]
116#[non_exhaustive]
117pub enum Error {
118 OutOfBounds,
120 }
122
123pub struct InterruptHandler<T: Instance> {
125 _phantom: PhantomData<T>,
126}
127
128impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
129 unsafe fn on_interrupt() {
130 let r = T::regs();
131 let s = T::state();
132
133 if r.events_ready().read() != 0 {
134 s.waker.wake();
135 r.intenclr().write(|w| w.set_ready(true));
136 }
137 }
138}
139
140pub struct Qspi<'d> {
142 r: pac::qspi::Qspi,
143 state: &'static State,
144 dpm_enabled: bool,
145 capacity: u32,
146 _phantom: PhantomData<&'d ()>,
147}
148
149impl<'d> Qspi<'d> {
150 pub fn new<T: Instance>(
152 _qspi: Peri<'d, T>,
153 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
154 sck: Peri<'d, impl GpioPin>,
155 csn: Peri<'d, impl GpioPin>,
156 io0: Peri<'d, impl GpioPin>,
157 io1: Peri<'d, impl GpioPin>,
158 io2: Peri<'d, impl GpioPin>,
159 io3: Peri<'d, impl GpioPin>,
160 config: Config,
161 ) -> Self {
162 let r = T::regs();
163
164 macro_rules! config_pin {
165 ($pin:ident) => {
166 $pin.set_high();
167 $pin.conf().write(|w| {
168 w.set_dir(gpiovals::Dir::OUTPUT);
169 w.set_drive(gpiovals::Drive::H0H1);
170 #[cfg(all(feature = "_nrf5340", feature = "_s"))]
171 w.set_mcusel(gpiovals::Mcusel::PERIPHERAL);
172 });
173 r.psel().$pin().write_value($pin.psel_bits());
174 };
175 }
176
177 config_pin!(sck);
178 config_pin!(csn);
179 config_pin!(io0);
180 config_pin!(io1);
181 config_pin!(io2);
182 config_pin!(io3);
183
184 r.ifconfig0().write(|w| {
185 w.set_addrmode(config.address_mode);
186 w.set_dpmenable(config.deep_power_down.is_some());
187 w.set_ppsize(config.write_page_size);
188 w.set_readoc(config.read_opcode);
189 w.set_writeoc(config.write_opcode);
190 });
191
192 if let Some(dpd) = &config.deep_power_down {
193 r.dpmdur().write(|w| {
194 w.set_enter(dpd.enter_time);
195 w.set_exit(dpd.exit_time);
196 })
197 }
198
199 r.ifconfig1().write(|w| {
200 w.set_sckdelay(config.sck_delay);
201 w.set_dpmen(false);
202 w.set_spimode(config.spi_mode);
203 w.set_sckfreq(config.frequency as u8);
204 });
205
206 r.iftiming().write(|w| {
207 w.set_rxdelay(config.rx_delay & 0b111);
208 });
209
210 r.xipoffset().write_value(config.xip_offset);
211
212 T::Interrupt::unpend();
213 unsafe { T::Interrupt::enable() };
214
215 r.enable().write(|w| w.set_enable(true));
217
218 let res = Self {
219 r: T::regs(),
220 state: T::state(),
221 dpm_enabled: config.deep_power_down.is_some(),
222 capacity: config.capacity,
223 _phantom: PhantomData,
224 };
225
226 r.events_ready().write_value(0);
227 r.intenset().write(|w| w.set_ready(true));
228
229 r.tasks_activate().write_value(1);
230
231 Self::blocking_wait_ready();
232
233 res
234 }
235
236 pub async fn custom_instruction(&mut self, opcode: u8, req: &[u8], resp: &mut [u8]) -> Result<(), Error> {
238 let ondrop = OnDrop::new(Self::blocking_wait_ready);
239
240 let len = core::cmp::max(req.len(), resp.len()) as u8;
241 self.custom_instruction_start(opcode, req, len)?;
242
243 self.wait_ready().await;
244
245 self.custom_instruction_finish(resp)?;
246
247 ondrop.defuse();
248
249 Ok(())
250 }
251
252 pub fn blocking_custom_instruction(&mut self, opcode: u8, req: &[u8], resp: &mut [u8]) -> Result<(), Error> {
254 let len = core::cmp::max(req.len(), resp.len()) as u8;
255 self.custom_instruction_start(opcode, req, len)?;
256
257 Self::blocking_wait_ready();
258
259 self.custom_instruction_finish(resp)?;
260
261 Ok(())
262 }
263
264 fn custom_instruction_start(&mut self, opcode: u8, req: &[u8], len: u8) -> Result<(), Error> {
265 assert!(req.len() <= 8);
266
267 let mut dat0: u32 = 0;
268 let mut dat1: u32 = 0;
269
270 for i in 0..4 {
271 if i < req.len() {
272 dat0 |= (req[i] as u32) << (i * 8);
273 }
274 }
275 for i in 0..4 {
276 if i + 4 < req.len() {
277 dat1 |= (req[i + 4] as u32) << (i * 8);
278 }
279 }
280
281 self.r.cinstrdat0().write(|w| w.0 = dat0);
282 self.r.cinstrdat1().write(|w| w.0 = dat1);
283
284 self.r.events_ready().write_value(0);
285 self.r.intenset().write(|w| w.set_ready(true));
286
287 self.r.cinstrconf().write(|w| {
288 w.set_opcode(opcode);
289 w.set_length(vals::Length::from_bits(len + 1));
290 w.set_lio2(true);
291 w.set_lio3(true);
292 w.set_wipwait(true);
293 w.set_wren(true);
294 w.set_lfen(false);
295 w.set_lfstop(false);
296 });
297 Ok(())
298 }
299
300 fn custom_instruction_finish(&mut self, resp: &mut [u8]) -> Result<(), Error> {
301 let dat0 = self.r.cinstrdat0().read().0;
302 let dat1 = self.r.cinstrdat1().read().0;
303 for i in 0..4 {
304 if i < resp.len() {
305 resp[i] = (dat0 >> (i * 8)) as u8;
306 }
307 }
308 for i in 0..4 {
309 if i + 4 < resp.len() {
310 resp[i] = (dat1 >> (i * 8)) as u8;
311 }
312 }
313 Ok(())
314 }
315
316 fn wait_ready(&mut self) -> impl Future<Output = ()> {
317 let r = self.r;
318 let s = self.state;
319 poll_fn(move |cx| {
320 s.waker.register(cx.waker());
321 if r.events_ready().read() != 0 {
322 return Poll::Ready(());
323 }
324 Poll::Pending
325 })
326 }
327
328 fn blocking_wait_ready() {
329 loop {
330 let r = pac::QSPI;
331 if r.events_ready().read() != 0 {
332 break;
333 }
334 }
335 }
336
337 fn start_read(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> {
338 assert_eq!(data.as_ptr() as u32 % 4, 0);
340 assert_eq!(data.len() as u32 % 4, 0);
341 assert_eq!(address % 4, 0);
342
343 self.r.read().src().write_value(address);
344 self.r.read().dst().write_value(data.as_ptr() as u32);
345 self.r.read().cnt().write(|w| w.set_cnt(data.len() as u32));
346
347 self.r.events_ready().write_value(0);
348 self.r.intenset().write(|w| w.set_ready(true));
349 self.r.tasks_readstart().write_value(1);
350
351 Ok(())
352 }
353
354 fn start_write(&mut self, address: u32, data: &[u8]) -> Result<(), Error> {
355 assert_eq!(data.as_ptr() as u32 % 4, 0);
357 assert_eq!(data.len() as u32 % 4, 0);
358 assert_eq!(address % 4, 0);
359
360 self.r.write().src().write_value(data.as_ptr() as u32);
361 self.r.write().dst().write_value(address);
362 self.r.write().cnt().write(|w| w.set_cnt(data.len() as u32));
363
364 self.r.events_ready().write_value(0);
365 self.r.intenset().write(|w| w.set_ready(true));
366 self.r.tasks_writestart().write_value(1);
367
368 Ok(())
369 }
370
371 fn start_erase(&mut self, address: u32) -> Result<(), Error> {
372 assert_eq!(address % 4096, 0);
374
375 self.r.erase().ptr().write_value(address);
376 self.r.erase().len().write(|w| w.set_len(vals::Len::_4KB));
377
378 self.r.events_ready().write_value(0);
379 self.r.intenset().write(|w| w.set_ready(true));
380 self.r.tasks_erasestart().write_value(1);
381
382 Ok(())
383 }
384
385 pub async fn read_raw(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> {
391 if data.is_empty() {
393 return Ok(());
394 }
395
396 let ondrop = OnDrop::new(Self::blocking_wait_ready);
397
398 self.start_read(address, data)?;
399 self.wait_ready().await;
400
401 ondrop.defuse();
402
403 Ok(())
404 }
405
406 pub async fn write_raw(&mut self, address: u32, data: &[u8]) -> Result<(), Error> {
412 if data.is_empty() {
414 return Ok(());
415 }
416
417 let ondrop = OnDrop::new(Self::blocking_wait_ready);
418
419 self.start_write(address, data)?;
420 self.wait_ready().await;
421
422 ondrop.defuse();
423
424 Ok(())
425 }
426
427 pub fn blocking_read_raw(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> {
433 if data.is_empty() {
435 return Ok(());
436 }
437
438 self.start_read(address, data)?;
439 Self::blocking_wait_ready();
440 Ok(())
441 }
442
443 pub fn blocking_write_raw(&mut self, address: u32, data: &[u8]) -> Result<(), Error> {
449 if data.is_empty() {
451 return Ok(());
452 }
453
454 self.start_write(address, data)?;
455 Self::blocking_wait_ready();
456 Ok(())
457 }
458
459 pub async fn read(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> {
461 self.bounds_check(address, data.len())?;
462 self.read_raw(address, data).await
463 }
464
465 pub async fn write(&mut self, address: u32, data: &[u8]) -> Result<(), Error> {
467 self.bounds_check(address, data.len())?;
468 self.write_raw(address, data).await
469 }
470
471 pub async fn erase(&mut self, address: u32) -> Result<(), Error> {
473 if address >= self.capacity {
474 return Err(Error::OutOfBounds);
475 }
476
477 let ondrop = OnDrop::new(Self::blocking_wait_ready);
478
479 self.start_erase(address)?;
480 self.wait_ready().await;
481
482 ondrop.defuse();
483
484 Ok(())
485 }
486
487 pub fn blocking_read(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> {
489 self.bounds_check(address, data.len())?;
490 self.blocking_read_raw(address, data)
491 }
492
493 pub fn blocking_write(&mut self, address: u32, data: &[u8]) -> Result<(), Error> {
495 self.bounds_check(address, data.len())?;
496 self.blocking_write_raw(address, data)
497 }
498
499 pub fn blocking_erase(&mut self, address: u32) -> Result<(), Error> {
501 if address >= self.capacity {
502 return Err(Error::OutOfBounds);
503 }
504
505 self.start_erase(address)?;
506 Self::blocking_wait_ready();
507 Ok(())
508 }
509
510 fn bounds_check(&self, address: u32, len: usize) -> Result<(), Error> {
511 let len_u32: u32 = len.try_into().map_err(|_| Error::OutOfBounds)?;
512 let end_address = address.checked_add(len_u32).ok_or(Error::OutOfBounds)?;
513 if end_address > self.capacity {
514 return Err(Error::OutOfBounds);
515 }
516 Ok(())
517 }
518}
519
520impl<'d> Drop for Qspi<'d> {
521 fn drop(&mut self) {
522 if self.dpm_enabled {
523 trace!("qspi: doing deep powerdown...");
524
525 self.r.ifconfig1().modify(|w| w.set_dpmen(true));
526
527 while !self.r.status().read().dpm() {}
531
532 cortex_m::asm::delay(4096);
536 }
537
538 self.r.tasks_deactivate().write_value(1);
540
541 unsafe { ptr::write_volatile(0x40029054 as *mut u32, 1) }
545
546 self.r.enable().write(|w| w.set_enable(false));
547
548 gpio::deconfigure_pin(self.r.psel().sck().read());
552 gpio::deconfigure_pin(self.r.psel().io0().read());
553 gpio::deconfigure_pin(self.r.psel().io1().read());
554 gpio::deconfigure_pin(self.r.psel().io2().read());
555 gpio::deconfigure_pin(self.r.psel().io3().read());
556
557 trace!("qspi: dropped");
558 }
559}
560
561impl<'d> ErrorType for Qspi<'d> {
562 type Error = Error;
563}
564
565impl NorFlashError for Error {
566 fn kind(&self) -> NorFlashErrorKind {
567 NorFlashErrorKind::Other
568 }
569}
570
571impl<'d> ReadNorFlash for Qspi<'d> {
572 const READ_SIZE: usize = 4;
573
574 fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> {
575 self.blocking_read(offset, bytes)?;
576 Ok(())
577 }
578
579 fn capacity(&self) -> usize {
580 self.capacity as usize
581 }
582}
583
584impl<'d> NorFlash for Qspi<'d> {
585 const WRITE_SIZE: usize = 4;
586 const ERASE_SIZE: usize = 4096;
587
588 fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
589 for address in (from..to).step_by(<Self as NorFlash>::ERASE_SIZE) {
590 self.blocking_erase(address)?;
591 }
592 Ok(())
593 }
594
595 fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> {
596 self.blocking_write(offset, bytes)?;
597 Ok(())
598 }
599}
600
601#[cfg(feature = "qspi-multiwrite-flash")]
602impl<'d> embedded_storage::nor_flash::MultiwriteNorFlash for Qspi<'d> {}
603
604mod _eh1 {
605 use embedded_storage_async::nor_flash::{NorFlash as AsyncNorFlash, ReadNorFlash as AsyncReadNorFlash};
606
607 use super::*;
608
609 impl<'d> AsyncNorFlash for Qspi<'d> {
610 const WRITE_SIZE: usize = <Self as NorFlash>::WRITE_SIZE;
611 const ERASE_SIZE: usize = <Self as NorFlash>::ERASE_SIZE;
612
613 async fn write(&mut self, offset: u32, data: &[u8]) -> Result<(), Self::Error> {
614 self.write(offset, data).await
615 }
616
617 async fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
618 for address in (from..to).step_by(<Self as AsyncNorFlash>::ERASE_SIZE) {
619 self.erase(address).await?
620 }
621 Ok(())
622 }
623 }
624
625 impl<'d> AsyncReadNorFlash for Qspi<'d> {
626 const READ_SIZE: usize = 4;
627 async fn read(&mut self, address: u32, data: &mut [u8]) -> Result<(), Self::Error> {
628 self.read(address, data).await
629 }
630
631 fn capacity(&self) -> usize {
632 self.capacity as usize
633 }
634 }
635
636 #[cfg(feature = "qspi-multiwrite-flash")]
637 impl<'d> embedded_storage_async::nor_flash::MultiwriteNorFlash for Qspi<'d> {}
638}
639
640pub(crate) struct State {
642 waker: AtomicWaker,
643}
644
645impl State {
646 pub(crate) const fn new() -> Self {
647 Self {
648 waker: AtomicWaker::new(),
649 }
650 }
651}
652
653pub(crate) trait SealedInstance {
654 fn regs() -> pac::qspi::Qspi;
655 fn state() -> &'static State;
656}
657
658#[allow(private_bounds)]
660pub trait Instance: SealedInstance + PeripheralType + 'static + Send {
661 type Interrupt: interrupt::typelevel::Interrupt;
663}
664
665macro_rules! impl_qspi {
666 ($type:ident, $pac_type:ident, $irq:ident) => {
667 impl crate::qspi::SealedInstance for peripherals::$type {
668 fn regs() -> pac::qspi::Qspi {
669 pac::$pac_type
670 }
671 fn state() -> &'static crate::qspi::State {
672 static STATE: crate::qspi::State = crate::qspi::State::new();
673 &STATE
674 }
675 }
676 impl crate::qspi::Instance for peripherals::$type {
677 type Interrupt = crate::interrupt::typelevel::$irq;
678 }
679 };
680}