1#![macro_use]
4
5use core::future::{poll_fn, Future};
6use core::marker::PhantomData;
7use core::ptr;
8use core::task::Poll;
9
10use embassy_hal_internal::drop::OnDrop;
11use embassy_hal_internal::{Peri, PeripheralType};
12use embassy_sync::waitqueue::AtomicWaker;
13use embedded_storage::nor_flash::{ErrorType, NorFlash, NorFlashError, NorFlashErrorKind, ReadNorFlash};
14
15use crate::gpio::{self, Pin as GpioPin};
16use crate::interrupt::typelevel::Interrupt;
17use crate::pac::gpio::vals as gpiovals;
18use crate::pac::qspi::vals;
19pub use crate::pac::qspi::vals::{
20 Addrmode as AddressMode, Ppsize as WritePageSize, Readoc as ReadOpcode, Spimode as SpiMode, Writeoc as WriteOpcode,
21};
22use crate::{interrupt, pac};
23
24pub struct DeepPowerDownConfig {
26 pub enter_time: u16,
28 pub exit_time: u16,
30}
31
32pub enum Frequency {
34 M32 = 0,
36 M16 = 1,
38 M10_7 = 2,
40 M8 = 3,
42 M6_4 = 4,
44 M5_3 = 5,
46 M4_6 = 6,
48 M4 = 7,
50 M3_6 = 8,
52 M3_2 = 9,
54 M2_9 = 10,
56 M2_7 = 11,
58 M2_5 = 12,
60 M2_3 = 13,
62 M2_1 = 14,
64 M2 = 15,
66}
67
68#[non_exhaustive]
70pub struct Config {
71 pub xip_offset: u32,
73 pub read_opcode: ReadOpcode,
75 pub write_opcode: WriteOpcode,
77 pub write_page_size: WritePageSize,
79 pub deep_power_down: Option<DeepPowerDownConfig>,
81 pub frequency: Frequency,
83 pub sck_delay: u8,
85 pub rx_delay: u8,
87 pub spi_mode: SpiMode,
89 pub address_mode: AddressMode,
91 pub capacity: u32,
93}
94
95impl Default for Config {
96 fn default() -> Self {
97 Self {
98 read_opcode: ReadOpcode::READ4IO,
99 write_opcode: WriteOpcode::PP4IO,
100 xip_offset: 0,
101 write_page_size: WritePageSize::_256BYTES,
102 deep_power_down: None,
103 frequency: Frequency::M8,
104 sck_delay: 80,
105 rx_delay: 2,
106 spi_mode: SpiMode::MODE0,
107 address_mode: AddressMode::_24BIT,
108 capacity: 0,
109 }
110 }
111}
112
113#[derive(Debug, Copy, Clone, Eq, PartialEq)]
115#[cfg_attr(feature = "defmt", derive(defmt::Format))]
116#[non_exhaustive]
117pub enum Error {
118 OutOfBounds,
120 }
122
123pub struct InterruptHandler<T: Instance> {
125 _phantom: PhantomData<T>,
126}
127
128impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
129 unsafe fn on_interrupt() {
130 let r = T::regs();
131 let s = T::state();
132
133 if r.events_ready().read() != 0 {
134 s.waker.wake();
135 r.intenclr().write(|w| w.set_ready(true));
136 }
137 }
138}
139
140pub struct Qspi<'d, T: Instance> {
142 _peri: Peri<'d, T>,
143 dpm_enabled: bool,
144 capacity: u32,
145}
146
147impl<'d, T: Instance> Qspi<'d, T> {
148 pub fn new(
150 qspi: Peri<'d, T>,
151 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
152 sck: Peri<'d, impl GpioPin>,
153 csn: Peri<'d, impl GpioPin>,
154 io0: Peri<'d, impl GpioPin>,
155 io1: Peri<'d, impl GpioPin>,
156 io2: Peri<'d, impl GpioPin>,
157 io3: Peri<'d, impl GpioPin>,
158 config: Config,
159 ) -> Self {
160 let r = T::regs();
161
162 macro_rules! config_pin {
163 ($pin:ident) => {
164 $pin.set_high();
165 $pin.conf().write(|w| {
166 w.set_dir(gpiovals::Dir::OUTPUT);
167 w.set_drive(gpiovals::Drive::H0H1);
168 #[cfg(all(feature = "_nrf5340", feature = "_s"))]
169 w.set_mcusel(gpiovals::Mcusel::PERIPHERAL);
170 });
171 r.psel().$pin().write_value($pin.psel_bits());
172 };
173 }
174
175 config_pin!(sck);
176 config_pin!(csn);
177 config_pin!(io0);
178 config_pin!(io1);
179 config_pin!(io2);
180 config_pin!(io3);
181
182 r.ifconfig0().write(|w| {
183 w.set_addrmode(config.address_mode);
184 w.set_dpmenable(config.deep_power_down.is_some());
185 w.set_ppsize(config.write_page_size);
186 w.set_readoc(config.read_opcode);
187 w.set_writeoc(config.write_opcode);
188 });
189
190 if let Some(dpd) = &config.deep_power_down {
191 r.dpmdur().write(|w| {
192 w.set_enter(dpd.enter_time);
193 w.set_exit(dpd.exit_time);
194 })
195 }
196
197 r.ifconfig1().write(|w| {
198 w.set_sckdelay(config.sck_delay);
199 w.set_dpmen(false);
200 w.set_spimode(config.spi_mode);
201 w.set_sckfreq(config.frequency as u8);
202 });
203
204 r.iftiming().write(|w| {
205 w.set_rxdelay(config.rx_delay & 0b111);
206 });
207
208 r.xipoffset().write_value(config.xip_offset);
209
210 T::Interrupt::unpend();
211 unsafe { T::Interrupt::enable() };
212
213 r.enable().write(|w| w.set_enable(true));
215
216 let res = Self {
217 _peri: qspi,
218 dpm_enabled: config.deep_power_down.is_some(),
219 capacity: config.capacity,
220 };
221
222 r.events_ready().write_value(0);
223 r.intenset().write(|w| w.set_ready(true));
224
225 r.tasks_activate().write_value(1);
226
227 Self::blocking_wait_ready();
228
229 res
230 }
231
232 pub async fn custom_instruction(&mut self, opcode: u8, req: &[u8], resp: &mut [u8]) -> Result<(), Error> {
234 let ondrop = OnDrop::new(Self::blocking_wait_ready);
235
236 let len = core::cmp::max(req.len(), resp.len()) as u8;
237 self.custom_instruction_start(opcode, req, len)?;
238
239 self.wait_ready().await;
240
241 self.custom_instruction_finish(resp)?;
242
243 ondrop.defuse();
244
245 Ok(())
246 }
247
248 pub fn blocking_custom_instruction(&mut self, opcode: u8, req: &[u8], resp: &mut [u8]) -> Result<(), Error> {
250 let len = core::cmp::max(req.len(), resp.len()) as u8;
251 self.custom_instruction_start(opcode, req, len)?;
252
253 Self::blocking_wait_ready();
254
255 self.custom_instruction_finish(resp)?;
256
257 Ok(())
258 }
259
260 fn custom_instruction_start(&mut self, opcode: u8, req: &[u8], len: u8) -> Result<(), Error> {
261 assert!(req.len() <= 8);
262
263 let mut dat0: u32 = 0;
264 let mut dat1: u32 = 0;
265
266 for i in 0..4 {
267 if i < req.len() {
268 dat0 |= (req[i] as u32) << (i * 8);
269 }
270 }
271 for i in 0..4 {
272 if i + 4 < req.len() {
273 dat1 |= (req[i + 4] as u32) << (i * 8);
274 }
275 }
276
277 let r = T::regs();
278 r.cinstrdat0().write(|w| w.0 = dat0);
279 r.cinstrdat1().write(|w| w.0 = dat1);
280
281 r.events_ready().write_value(0);
282 r.intenset().write(|w| w.set_ready(true));
283
284 r.cinstrconf().write(|w| {
285 w.set_opcode(opcode);
286 w.set_length(vals::Length::from_bits(len + 1));
287 w.set_lio2(true);
288 w.set_lio3(true);
289 w.set_wipwait(true);
290 w.set_wren(true);
291 w.set_lfen(false);
292 w.set_lfstop(false);
293 });
294 Ok(())
295 }
296
297 fn custom_instruction_finish(&mut self, resp: &mut [u8]) -> Result<(), Error> {
298 let r = T::regs();
299
300 let dat0 = r.cinstrdat0().read().0;
301 let dat1 = r.cinstrdat1().read().0;
302 for i in 0..4 {
303 if i < resp.len() {
304 resp[i] = (dat0 >> (i * 8)) as u8;
305 }
306 }
307 for i in 0..4 {
308 if i + 4 < resp.len() {
309 resp[i] = (dat1 >> (i * 8)) as u8;
310 }
311 }
312 Ok(())
313 }
314
315 fn wait_ready(&mut self) -> impl Future<Output = ()> {
316 poll_fn(move |cx| {
317 let r = T::regs();
318 let s = T::state();
319 s.waker.register(cx.waker());
320 if r.events_ready().read() != 0 {
321 return Poll::Ready(());
322 }
323 Poll::Pending
324 })
325 }
326
327 fn blocking_wait_ready() {
328 loop {
329 let r = T::regs();
330 if r.events_ready().read() != 0 {
331 break;
332 }
333 }
334 }
335
336 fn start_read(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> {
337 assert_eq!(data.as_ptr() as u32 % 4, 0);
339 assert_eq!(data.len() as u32 % 4, 0);
340 assert_eq!(address % 4, 0);
341
342 let r = T::regs();
343
344 r.read().src().write_value(address);
345 r.read().dst().write_value(data.as_ptr() as u32);
346 r.read().cnt().write(|w| w.set_cnt(data.len() as u32));
347
348 r.events_ready().write_value(0);
349 r.intenset().write(|w| w.set_ready(true));
350 r.tasks_readstart().write_value(1);
351
352 Ok(())
353 }
354
355 fn start_write(&mut self, address: u32, data: &[u8]) -> Result<(), Error> {
356 assert_eq!(data.as_ptr() as u32 % 4, 0);
358 assert_eq!(data.len() as u32 % 4, 0);
359 assert_eq!(address % 4, 0);
360
361 let r = T::regs();
362 r.write().src().write_value(data.as_ptr() as u32);
363 r.write().dst().write_value(address);
364 r.write().cnt().write(|w| w.set_cnt(data.len() as u32));
365
366 r.events_ready().write_value(0);
367 r.intenset().write(|w| w.set_ready(true));
368 r.tasks_writestart().write_value(1);
369
370 Ok(())
371 }
372
373 fn start_erase(&mut self, address: u32) -> Result<(), Error> {
374 assert_eq!(address % 4096, 0);
376
377 let r = T::regs();
378 r.erase().ptr().write_value(address);
379 r.erase().len().write(|w| w.set_len(vals::Len::_4KB));
380
381 r.events_ready().write_value(0);
382 r.intenset().write(|w| w.set_ready(true));
383 r.tasks_erasestart().write_value(1);
384
385 Ok(())
386 }
387
388 pub async fn read_raw(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> {
394 if data.is_empty() {
396 return Ok(());
397 }
398
399 let ondrop = OnDrop::new(Self::blocking_wait_ready);
400
401 self.start_read(address, data)?;
402 self.wait_ready().await;
403
404 ondrop.defuse();
405
406 Ok(())
407 }
408
409 pub async fn write_raw(&mut self, address: u32, data: &[u8]) -> Result<(), Error> {
415 if data.is_empty() {
417 return Ok(());
418 }
419
420 let ondrop = OnDrop::new(Self::blocking_wait_ready);
421
422 self.start_write(address, data)?;
423 self.wait_ready().await;
424
425 ondrop.defuse();
426
427 Ok(())
428 }
429
430 pub fn blocking_read_raw(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> {
436 if data.is_empty() {
438 return Ok(());
439 }
440
441 self.start_read(address, data)?;
442 Self::blocking_wait_ready();
443 Ok(())
444 }
445
446 pub fn blocking_write_raw(&mut self, address: u32, data: &[u8]) -> Result<(), Error> {
452 if data.is_empty() {
454 return Ok(());
455 }
456
457 self.start_write(address, data)?;
458 Self::blocking_wait_ready();
459 Ok(())
460 }
461
462 pub async fn read(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> {
464 self.bounds_check(address, data.len())?;
465 self.read_raw(address, data).await
466 }
467
468 pub async fn write(&mut self, address: u32, data: &[u8]) -> Result<(), Error> {
470 self.bounds_check(address, data.len())?;
471 self.write_raw(address, data).await
472 }
473
474 pub async fn erase(&mut self, address: u32) -> Result<(), Error> {
476 if address >= self.capacity {
477 return Err(Error::OutOfBounds);
478 }
479
480 let ondrop = OnDrop::new(Self::blocking_wait_ready);
481
482 self.start_erase(address)?;
483 self.wait_ready().await;
484
485 ondrop.defuse();
486
487 Ok(())
488 }
489
490 pub fn blocking_read(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> {
492 self.bounds_check(address, data.len())?;
493 self.blocking_read_raw(address, data)
494 }
495
496 pub fn blocking_write(&mut self, address: u32, data: &[u8]) -> Result<(), Error> {
498 self.bounds_check(address, data.len())?;
499 self.blocking_write_raw(address, data)
500 }
501
502 pub fn blocking_erase(&mut self, address: u32) -> Result<(), Error> {
504 if address >= self.capacity {
505 return Err(Error::OutOfBounds);
506 }
507
508 self.start_erase(address)?;
509 Self::blocking_wait_ready();
510 Ok(())
511 }
512
513 fn bounds_check(&self, address: u32, len: usize) -> Result<(), Error> {
514 let len_u32: u32 = len.try_into().map_err(|_| Error::OutOfBounds)?;
515 let end_address = address.checked_add(len_u32).ok_or(Error::OutOfBounds)?;
516 if end_address > self.capacity {
517 return Err(Error::OutOfBounds);
518 }
519 Ok(())
520 }
521}
522
523impl<'d, T: Instance> Drop for Qspi<'d, T> {
524 fn drop(&mut self) {
525 let r = T::regs();
526
527 if self.dpm_enabled {
528 trace!("qspi: doing deep powerdown...");
529
530 r.ifconfig1().modify(|w| w.set_dpmen(true));
531
532 while !r.status().read().dpm() {}
536
537 cortex_m::asm::delay(4096);
541 }
542
543 r.tasks_deactivate().write_value(1);
545
546 unsafe { ptr::write_volatile(0x40029054 as *mut u32, 1) }
550
551 r.enable().write(|w| w.set_enable(false));
552
553 gpio::deconfigure_pin(r.psel().sck().read());
557 gpio::deconfigure_pin(r.psel().io0().read());
558 gpio::deconfigure_pin(r.psel().io1().read());
559 gpio::deconfigure_pin(r.psel().io2().read());
560 gpio::deconfigure_pin(r.psel().io3().read());
561
562 trace!("qspi: dropped");
563 }
564}
565
566impl<'d, T: Instance> ErrorType for Qspi<'d, T> {
567 type Error = Error;
568}
569
570impl NorFlashError for Error {
571 fn kind(&self) -> NorFlashErrorKind {
572 NorFlashErrorKind::Other
573 }
574}
575
576impl<'d, T: Instance> ReadNorFlash for Qspi<'d, T> {
577 const READ_SIZE: usize = 4;
578
579 fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> {
580 self.blocking_read(offset, bytes)?;
581 Ok(())
582 }
583
584 fn capacity(&self) -> usize {
585 self.capacity as usize
586 }
587}
588
589impl<'d, T: Instance> NorFlash for Qspi<'d, T> {
590 const WRITE_SIZE: usize = 4;
591 const ERASE_SIZE: usize = 4096;
592
593 fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
594 for address in (from..to).step_by(<Self as NorFlash>::ERASE_SIZE) {
595 self.blocking_erase(address)?;
596 }
597 Ok(())
598 }
599
600 fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> {
601 self.blocking_write(offset, bytes)?;
602 Ok(())
603 }
604}
605
606#[cfg(feature = "qspi-multiwrite-flash")]
607impl<'d, T: Instance> embedded_storage::nor_flash::MultiwriteNorFlash for Qspi<'d, T> {}
608
609mod _eh1 {
610 use embedded_storage_async::nor_flash::{NorFlash as AsyncNorFlash, ReadNorFlash as AsyncReadNorFlash};
611
612 use super::*;
613
614 impl<'d, T: Instance> AsyncNorFlash for Qspi<'d, T> {
615 const WRITE_SIZE: usize = <Self as NorFlash>::WRITE_SIZE;
616 const ERASE_SIZE: usize = <Self as NorFlash>::ERASE_SIZE;
617
618 async fn write(&mut self, offset: u32, data: &[u8]) -> Result<(), Self::Error> {
619 self.write(offset, data).await
620 }
621
622 async fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
623 for address in (from..to).step_by(<Self as AsyncNorFlash>::ERASE_SIZE) {
624 self.erase(address).await?
625 }
626 Ok(())
627 }
628 }
629
630 impl<'d, T: Instance> AsyncReadNorFlash for Qspi<'d, T> {
631 const READ_SIZE: usize = 4;
632 async fn read(&mut self, address: u32, data: &mut [u8]) -> Result<(), Self::Error> {
633 self.read(address, data).await
634 }
635
636 fn capacity(&self) -> usize {
637 self.capacity as usize
638 }
639 }
640
641 #[cfg(feature = "qspi-multiwrite-flash")]
642 impl<'d, T: Instance> embedded_storage_async::nor_flash::MultiwriteNorFlash for Qspi<'d, T> {}
643}
644
645pub(crate) struct State {
647 waker: AtomicWaker,
648}
649
650impl State {
651 pub(crate) const fn new() -> Self {
652 Self {
653 waker: AtomicWaker::new(),
654 }
655 }
656}
657
658pub(crate) trait SealedInstance {
659 fn regs() -> pac::qspi::Qspi;
660 fn state() -> &'static State;
661}
662
663#[allow(private_bounds)]
665pub trait Instance: SealedInstance + PeripheralType + 'static + Send {
666 type Interrupt: interrupt::typelevel::Interrupt;
668}
669
670macro_rules! impl_qspi {
671 ($type:ident, $pac_type:ident, $irq:ident) => {
672 impl crate::qspi::SealedInstance for peripherals::$type {
673 fn regs() -> pac::qspi::Qspi {
674 pac::$pac_type
675 }
676 fn state() -> &'static crate::qspi::State {
677 static STATE: crate::qspi::State = crate::qspi::State::new();
678 &STATE
679 }
680 }
681 impl crate::qspi::Instance for peripherals::$type {
682 type Interrupt = crate::interrupt::typelevel::$irq;
683 }
684 };
685}