1use core::marker::PhantomData;
2use core::sync::atomic::{fence, Ordering};
3
4use crate::pac::rcc::vals::HsiFs;
5use embassy_hal_internal::drop::OnDrop;
6use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
7use embedded_storage::nor_flash::{NorFlashError, NorFlashErrorKind};
8
9use crate::mode::{Async, Blocking};
10use crate::peripherals::FLASH;
11
12mod low_level;
13
14pub mod values {
15 pub const PAGE_SIZE: usize = crate::pac::PAGE_SIZE;
16 pub const SECTOR_SIZE: usize = crate::pac::SECTOR_SIZE;
17
18 pub const WRITE_SIZE: usize = PAGE_SIZE;
19 pub const READ_SIZE: usize = 1;
20
21 pub const FLASH_SIZE: usize = crate::pac::FLASH_SIZE;
22 pub const FLASH_BASE: usize = crate::pac::FLASH_BASE;
23}
24use values::*;
25
26#[allow(missing_docs)]
27#[derive(Debug, Copy, Clone, PartialEq, Eq)]
28#[cfg_attr(feature = "defmt", derive(defmt::Format))]
29pub enum Error {
30 Prog,
31 Size,
32 Miss,
33 Seq,
34 Protected,
35 Unaligned,
36 Parallelism,
37}
38
39#[allow(missing_docs)]
40#[derive(Debug, Copy, Clone, PartialEq, Eq)]
41#[cfg_attr(feature = "defmt", derive(defmt::Format))]
42pub struct FlashSector {
43 pub start: u32,
44}
45
46#[allow(missing_docs)]
47#[derive(Debug, Copy, Clone, PartialEq, Eq)]
48#[cfg_attr(feature = "defmt", derive(defmt::Format))]
49pub struct FlashPage {
50 pub start: u32,
51}
52
53#[allow(missing_docs)]
54#[derive(Debug, Copy, Clone, PartialEq, Eq)]
55#[cfg_attr(feature = "defmt", derive(defmt::Format))]
56pub enum FlashUnit {
57 Page(FlashPage),
58 Sector(FlashSector),
59}
60
61pub struct Flash<'d, MODE = Async> {
63 pub(crate) _inner: PeripheralRef<'d, FLASH>,
64 pub(crate) _mode: PhantomData<MODE>,
65 pub(crate) timing_configured: Option<HsiFs>,
68}
69
70impl<'d> Flash<'d, Blocking> {
71 pub fn new_blocking(p: impl Peripheral<P = FLASH> + 'd) -> Self {
73 into_ref!(p);
74
75 Self {
80 _inner: p,
81 _mode: PhantomData,
82 timing_configured: None,
83 }
84 }
85}
86
87impl<'d, MODE> Flash<'d, MODE> {
88 pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
93 if offset as usize + bytes.len() > FLASH_SIZE {
94 return Err(Error::Size);
95 }
96
97 let start_address = FLASH_BASE as u32 + offset;
98 let flash_data =
99 unsafe { core::slice::from_raw_parts(start_address as *const u8, bytes.len()) };
100 bytes.copy_from_slice(flash_data);
101 Ok(())
102 }
103
104 pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> {
109 if offset as usize + bytes.len() > FLASH_SIZE {
110 return Err(Error::Size);
111 }
112
113 if offset % WRITE_SIZE as u32 != 0 || bytes.len() % WRITE_SIZE != 0 {
114 return Err(Error::Unaligned);
115 }
116
117 let mut address = FLASH_BASE as u32 + offset;
118 trace!("Writing {} bytes at 0x{:x}", bytes.len(), address);
119 for chunk in bytes.chunks(WRITE_SIZE) {
120 unsafe { write_chunk_with_critical_section(address, chunk, self.timing_configured) }?;
121 address += WRITE_SIZE as u32;
122 }
123 Ok(())
124 }
125
126 pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
131 let start_address = FLASH_BASE as u32 + from;
132 let end_address = FLASH_BASE as u32 + to;
133
134 let sector_ret = ensure_sector_aligned(start_address, end_address);
135 let page_ret = ensure_page_aligned(start_address, end_address);
136 let use_sector = match (sector_ret, page_ret) {
137 (Err(_), Err(_)) => return Err(Error::Unaligned),
138 (Ok(_), _) => true,
139 (Err(_), Ok(_)) => false,
140 };
141
142 trace!(
143 "Erasing from 0x{:x} to 0x{:x}, use_sector: {}",
144 start_address,
145 end_address,
146 use_sector
147 );
148
149 let mut address = start_address;
150 while address < end_address {
151 if use_sector {
152 let sector = get_sector(address);
153 trace!("Erasing sector: {:?}", sector);
154 unsafe {
155 erase_unit_with_critical_section(
156 &FlashUnit::Sector(sector),
157 self.timing_configured,
158 )
159 }?;
160 address += SECTOR_SIZE as u32;
161 } else {
162 let page = get_page(address);
163 trace!("Erasing page: {:?}", page);
164 unsafe {
165 erase_unit_with_critical_section(&FlashUnit::Page(page), self.timing_configured)
166 }?;
167 address += PAGE_SIZE as u32;
168 }
169 }
170 Ok(())
171 }
172}
173
174pub(super) unsafe fn write_chunk_unlocked(
175 address: u32,
176 chunk: &[u8],
177 timing_configured: Option<HsiFs>,
178) -> Result<(), Error> {
179 low_level::clear_all_err();
180 fence(Ordering::SeqCst);
181 low_level::unlock();
182 fence(Ordering::SeqCst);
183 low_level::timing_sequence_config(timing_configured);
184 fence(Ordering::SeqCst);
185 low_level::enable_blocking_write();
186 fence(Ordering::SeqCst);
187
188 let _on_drop = OnDrop::new(|| {
189 low_level::disable_blocking_write();
190 fence(Ordering::SeqCst);
191 low_level::lock();
192 });
193
194 low_level::blocking_write(address, unwrap!(chunk.try_into()))
195}
196
197pub(super) unsafe fn write_chunk_with_critical_section(
198 address: u32,
199 chunk: &[u8],
200 timing_configured: Option<HsiFs>,
201) -> Result<(), Error> {
202 critical_section::with(|_| write_chunk_unlocked(address, chunk, timing_configured))
203}
204
205pub(super) unsafe fn erase_unit_unlocked(
206 unit: &FlashUnit,
207 timing_configured: Option<HsiFs>,
208) -> Result<(), Error> {
209 low_level::clear_all_err();
210 fence(Ordering::SeqCst);
211 low_level::unlock();
212 fence(Ordering::SeqCst);
213 low_level::timing_sequence_config(timing_configured);
214 fence(Ordering::SeqCst);
215
216 let _on_drop = OnDrop::new(|| low_level::lock());
217
218 low_level::blocking_erase_unit(unit)
219}
220
221pub(super) unsafe fn erase_unit_with_critical_section(
222 unit: &FlashUnit,
223 timing_configured: Option<HsiFs>,
224) -> Result<(), Error> {
225 critical_section::with(|_| erase_unit_unlocked(unit, timing_configured))
226}
227
228pub(super) fn get_sector(address: u32) -> FlashSector {
229 let index = (address - FLASH_BASE as u32) / SECTOR_SIZE as u32;
230 FlashSector {
231 start: FLASH_BASE as u32 + index * SECTOR_SIZE as u32,
232 }
233}
234
235pub(super) fn get_page(address: u32) -> FlashPage {
236 let index = (address - FLASH_BASE as u32) / PAGE_SIZE as u32;
237 FlashPage {
238 start: FLASH_BASE as u32 + index * PAGE_SIZE as u32,
239 }
240}
241
242pub(super) fn ensure_sector_aligned(start_address: u32, end_address: u32) -> Result<(), Error> {
243 let mut address = start_address;
244 while address < end_address {
245 let sector = get_sector(address);
246 if sector.start != address {
247 return Err(Error::Unaligned);
248 }
249 address += SECTOR_SIZE as u32;
250 }
251 if address != end_address {
252 return Err(Error::Unaligned);
253 }
254 Ok(())
255}
256
257pub(super) fn ensure_page_aligned(start_address: u32, end_address: u32) -> Result<(), Error> {
258 let mut address = start_address;
259 while address < end_address {
260 let page = get_page(address);
261 if page.start != address {
262 return Err(Error::Unaligned);
263 }
264 address += PAGE_SIZE as u32;
265 }
266 if address != end_address {
267 return Err(Error::Unaligned);
268 }
269 Ok(())
270}
271
272impl<MODE> embedded_storage::nor_flash::ErrorType for Flash<'_, MODE> {
273 type Error = Error;
274}
275
276impl<MODE> embedded_storage::nor_flash::ReadNorFlash for Flash<'_, MODE> {
277 const READ_SIZE: usize = READ_SIZE;
278
279 fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> {
280 self.blocking_read(offset, bytes)
281 }
282
283 fn capacity(&self) -> usize {
284 FLASH_SIZE
285 }
286}
287
288impl<MODE> embedded_storage::nor_flash::NorFlash for Flash<'_, MODE> {
289 const WRITE_SIZE: usize = WRITE_SIZE;
290 const ERASE_SIZE: usize = PAGE_SIZE;
291
292 fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> {
293 self.blocking_write(offset, bytes)
294 }
295
296 fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
297 self.blocking_erase(from, to)
298 }
299}
300
301impl NorFlashError for Error {
302 fn kind(&self) -> NorFlashErrorKind {
303 match self {
304 Self::Size => NorFlashErrorKind::OutOfBounds,
305 Self::Unaligned => NorFlashErrorKind::NotAligned,
306 _ => NorFlashErrorKind::Other,
307 }
308 }
309}