Skip to main content

esp_storage/
nor_flash.rs

1use embedded_storage::nor_flash::{
2    ErrorType,
3    MultiwriteNorFlash,
4    NorFlash,
5    NorFlashError,
6    NorFlashErrorKind,
7    ReadNorFlash,
8};
9
10#[cfg(feature = "bytewise-read")]
11use crate::buffer::FlashWordBuffer;
12use crate::{
13    FlashStorage,
14    FlashStorageError,
15    buffer::{FlashSectorBuffer, uninit_slice, uninit_slice_mut},
16};
17
18impl FlashStorage<'_> {
19    #[inline(always)]
20    fn is_word_aligned(bytes: &[u8]) -> bool {
21        // TODO: Use is_aligned_to when stabilized (see `pointer_is_aligned`)
22        (bytes.as_ptr() as usize) % (Self::WORD_SIZE as usize) == 0
23    }
24}
25
26impl NorFlashError for FlashStorageError {
27    fn kind(&self) -> NorFlashErrorKind {
28        match self {
29            Self::NotAligned => NorFlashErrorKind::NotAligned,
30            Self::OutOfBounds => NorFlashErrorKind::OutOfBounds,
31            _ => NorFlashErrorKind::Other,
32        }
33    }
34}
35
36impl ErrorType for FlashStorage<'_> {
37    type Error = FlashStorageError;
38}
39
40impl ReadNorFlash for FlashStorage<'_> {
41    #[cfg(not(feature = "bytewise-read"))]
42    const READ_SIZE: usize = Self::WORD_SIZE as _;
43
44    #[cfg(feature = "bytewise-read")]
45    const READ_SIZE: usize = 1;
46
47    fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> {
48        const RS: u32 = FlashStorage::READ_SIZE as u32;
49        self.check_alignment::<{ RS }>(offset, bytes.len())?;
50        self.check_bounds(offset, bytes.len())?;
51
52        #[cfg(feature = "bytewise-read")]
53        let (offset, bytes) = {
54            let byte_offset = (offset % Self::WORD_SIZE) as usize;
55            if byte_offset > 0 {
56                let mut word_buffer = FlashWordBuffer::uninit();
57
58                let offset = offset - byte_offset as u32;
59                let length = bytes.len().min(Self::WORD_SIZE as usize - byte_offset);
60
61                self.internal_read(offset, word_buffer.as_bytes_mut())?;
62                let word_buffer = unsafe { word_buffer.assume_init_bytes_mut() };
63                bytes[..length].copy_from_slice(&word_buffer[byte_offset..][..length]);
64
65                (offset + Self::WORD_SIZE, &mut bytes[length..])
66            } else {
67                (offset, bytes)
68            }
69        };
70
71        if Self::is_word_aligned(bytes) {
72            // Bytes buffer is word-aligned so we can read directly to it
73            for (offset, chunk) in (offset..)
74                .step_by(Self::SECTOR_SIZE as _)
75                .zip(bytes.chunks_mut(Self::SECTOR_SIZE as _))
76            {
77                // Chunk already is word aligned so we can read directly to it
78                #[cfg(not(feature = "bytewise-read"))]
79                self.internal_read(offset, uninit_slice_mut(chunk))?;
80
81                #[cfg(feature = "bytewise-read")]
82                {
83                    let length = chunk.len();
84                    let byte_length = length % Self::WORD_SIZE as usize;
85                    let length = length - byte_length;
86
87                    self.internal_read(offset, &mut uninit_slice_mut(chunk)[..length])?;
88
89                    // Read not aligned rest of data
90                    if byte_length > 0 {
91                        let mut word_buffer = FlashWordBuffer::uninit();
92
93                        self.internal_read(offset + length as u32, word_buffer.as_bytes_mut())?;
94                        let word_buffer = unsafe { word_buffer.assume_init_bytes_mut() };
95                        chunk[length..].copy_from_slice(&word_buffer[..byte_length]);
96                    }
97                }
98            }
99        } else {
100            // Bytes buffer isn't word-aligned so we might read only via aligned buffer
101            let mut buffer = FlashSectorBuffer::uninit();
102
103            for (offset, chunk) in (offset..)
104                .step_by(Self::SECTOR_SIZE as _)
105                .zip(bytes.chunks_mut(Self::SECTOR_SIZE as _))
106            {
107                // Read to temporary buffer first (chunk length is aligned)
108                #[cfg(not(feature = "bytewise-read"))]
109                self.internal_read(offset, &mut buffer.as_bytes_mut()[..chunk.len()])?;
110
111                // Read to temporary buffer first (chunk length is not aligned)
112                #[cfg(feature = "bytewise-read")]
113                {
114                    let length = chunk.len();
115                    let byte_length = length % Self::WORD_SIZE as usize;
116                    let length = if byte_length > 0 {
117                        length - byte_length + Self::WORD_SIZE as usize
118                    } else {
119                        length
120                    };
121
122                    self.internal_read(offset, &mut buffer.as_bytes_mut()[..length])?;
123                }
124                let buffer = unsafe { buffer.assume_init_bytes() };
125
126                // Copy to bytes buffer
127                chunk.copy_from_slice(&buffer[..chunk.len()]);
128            }
129        }
130
131        Ok(())
132    }
133
134    fn capacity(&self) -> usize {
135        self.capacity
136    }
137}
138
139impl NorFlash for FlashStorage<'_> {
140    const WRITE_SIZE: usize = Self::WORD_SIZE as _;
141    const ERASE_SIZE: usize = Self::SECTOR_SIZE as _;
142
143    fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> {
144        const WS: u32 = FlashStorage::WORD_SIZE;
145        self.check_alignment::<{ WS }>(offset, bytes.len())?;
146        self.check_bounds(offset, bytes.len())?;
147
148        if Self::is_word_aligned(bytes) {
149            // Bytes buffer is word-aligned so we can write directly from it
150            for (offset, chunk) in (offset..)
151                .step_by(Self::SECTOR_SIZE as _)
152                .zip(bytes.chunks(Self::SECTOR_SIZE as _))
153            {
154                // Chunk already is word aligned so we can write directly from it
155                self.internal_write(offset, chunk)?;
156            }
157        } else {
158            // Bytes buffer isn't word-aligned so we might write only via aligned buffer
159            let mut buffer = FlashSectorBuffer::uninit();
160
161            for (offset, chunk) in (offset..)
162                .step_by(Self::SECTOR_SIZE as _)
163                .zip(bytes.chunks(Self::SECTOR_SIZE as _))
164            {
165                // Copy to temporary buffer first
166                buffer.as_bytes_mut()[..chunk.len()].copy_from_slice(uninit_slice(chunk));
167                // Write from temporary buffer
168                self.internal_write(offset, unsafe {
169                    &buffer.assume_init_bytes()[..chunk.len()]
170                })?;
171            }
172        }
173
174        Ok(())
175    }
176
177    fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
178        let len = (to - from) as _;
179        const SZ: u32 = FlashStorage::SECTOR_SIZE;
180        self.check_alignment::<{ SZ }>(from, len)?;
181        self.check_bounds(from, len)?;
182
183        // First erase by sector up to the block boundary.
184        let mut address = from;
185        while address < to && (address % Self::BLOCK_SIZE) != 0 {
186            self.internal_erase_sector(address / Self::SECTOR_SIZE)?;
187            address += Self::SECTOR_SIZE;
188        }
189
190        // Then erase as much as possible by blocks.
191        while (to - address) >= Self::BLOCK_SIZE {
192            self.internal_erase_block(address / Self::BLOCK_SIZE)?;
193            address += Self::BLOCK_SIZE;
194        }
195
196        // Finally, erase any remaining sectors.
197        while address < to {
198            self.internal_erase_sector(address / Self::SECTOR_SIZE)?;
199            address += Self::SECTOR_SIZE;
200        }
201
202        Ok(())
203    }
204}
205
206impl MultiwriteNorFlash for FlashStorage<'_> {}
207
208// Run the tests with `--test-threads=1` - the emulation is not multithread safe
209#[cfg(test)]
210mod tests {
211    use super::*;
212    use crate::common::Flash;
213
214    const WORD_SIZE: u32 = 4;
215    const SECTOR_SIZE: u32 = 4 << 10;
216    const BLOCK_SIZE: u32 = 4 << 14;
217    const NUM_BLOCKS: u32 = 3;
218    const FLASH_SIZE: u32 = BLOCK_SIZE * NUM_BLOCKS;
219    const MAX_OFFSET: u32 = SECTOR_SIZE * 1;
220    const MAX_LENGTH: u32 = SECTOR_SIZE * 2;
221
222    #[repr(C, align(4))]
223    struct TestBuffer {
224        data: [u8; FLASH_SIZE as _],
225    }
226
227    impl TestBuffer {
228        const fn seq() -> Self {
229            let mut data = [0u8; FLASH_SIZE as _];
230            let mut index = 0;
231            while index < FLASH_SIZE {
232                data[index as usize] = (index & 0xff) as u8;
233                index += 1;
234            }
235            Self { data }
236        }
237    }
238
239    impl Default for TestBuffer {
240        fn default() -> Self {
241            Self {
242                data: [0u8; FLASH_SIZE as usize],
243            }
244        }
245    }
246
247    #[cfg(not(miri))]
248    fn range_gen<const ALIGN: u32, const MAX_OFF: u32, const MAX_LEN: u32>(
249        aligned: Option<bool>,
250    ) -> impl Iterator<Item = (u32, u32)> {
251        (0..=MAX_OFF).flat_map(move |off| {
252            (0..=MAX_LEN - off)
253                .filter(move |len| {
254                    aligned
255                        .map(|aligned| aligned == (off % ALIGN == 0 && len % ALIGN == 0))
256                        .unwrap_or(true)
257                })
258                .map(move |len| (off, len))
259        })
260    }
261
262    #[cfg(miri)]
263    fn range_gen<const ALIGN: u32, const MAX_OFF: u32, const MAX_LEN: u32>(
264        aligned: Option<bool>,
265    ) -> impl Iterator<Item = (u32, u32)> {
266        // MIRI is very slow - just use a couple of combinations
267        match aligned {
268            Some(true) => vec![(0, 4), (0, 8), (0, 16), (0, 32), (0, 1024)],
269            Some(false) => vec![(3, 7), (11, 11)],
270            None => vec![
271                (0, 4),
272                (0, 8),
273                (0, 16),
274                (0, 32),
275                (0, 1024),
276                (3, 7),
277                (11, 11),
278                (0, 4098),
279            ],
280        }
281        .into_iter()
282    }
283
284    #[test]
285    #[cfg(not(feature = "bytewise-read"))]
286    fn aligned_read() {
287        let mut flash = FlashStorage::new(Flash::new());
288        flash.capacity = FLASH_SIZE as usize;
289        let src = TestBuffer::seq();
290        let mut data = TestBuffer::default();
291
292        flash.erase(0, FLASH_SIZE).unwrap();
293        flash.write(0, &src.data).unwrap();
294
295        for (off, len) in range_gen::<WORD_SIZE, MAX_OFFSET, MAX_LENGTH>(Some(true)) {
296            flash.read(off, &mut data.data[..len as usize]).unwrap();
297            assert_eq!(
298                data.data[..len as usize],
299                src.data[off as usize..][..len as usize]
300            );
301        }
302    }
303
304    #[test]
305    #[cfg(not(feature = "bytewise-read"))]
306    fn not_aligned_read_aligned_buffer() {
307        let mut flash = FlashStorage::new(Flash::new());
308        flash.capacity = FLASH_SIZE as usize;
309        let mut data = TestBuffer::default();
310
311        for (off, len) in range_gen::<WORD_SIZE, MAX_OFFSET, MAX_LENGTH>(Some(false)) {
312            flash.read(off, &mut data.data[..len as usize]).unwrap_err();
313        }
314    }
315
316    #[test]
317    #[cfg(not(feature = "bytewise-read"))]
318    fn aligned_read_not_aligned_buffer() {
319        let mut flash = FlashStorage::new(Flash::new());
320        flash.capacity = FLASH_SIZE as usize;
321        let src = TestBuffer::seq();
322        let mut data = TestBuffer::default();
323
324        flash.erase(0, FLASH_SIZE).unwrap();
325        flash.write(0, &src.data).unwrap();
326
327        for (off, len) in range_gen::<WORD_SIZE, MAX_OFFSET, MAX_LENGTH>(Some(true)) {
328            flash
329                .read(off, &mut data.data[1..][..len as usize])
330                .unwrap();
331            assert_eq!(
332                data.data[1..][..len as usize],
333                src.data[off as usize..][..len as usize]
334            );
335        }
336    }
337
338    #[test]
339    #[cfg(feature = "bytewise-read")]
340    fn bytewise_read_aligned_buffer() {
341        let mut flash = FlashStorage::new(Flash::new());
342
343        flash.capacity = FLASH_SIZE as usize;
344        let src = TestBuffer::seq();
345        let mut data = TestBuffer::default();
346
347        flash.erase(0, FLASH_SIZE).unwrap();
348        flash.write(0, &src.data).unwrap();
349
350        for (off, len) in range_gen::<WORD_SIZE, MAX_OFFSET, MAX_LENGTH>(None) {
351            flash.read(off, &mut data.data[..len as usize]).unwrap();
352            assert_eq!(
353                data.data[..len as usize],
354                src.data[off as usize..][..len as usize]
355            );
356        }
357    }
358
359    #[test]
360    #[cfg(feature = "bytewise-read")]
361    fn bytewise_read_not_aligned_buffer() {
362        let mut flash = FlashStorage::new(Flash::new());
363
364        flash.capacity = FLASH_SIZE as usize;
365        let src = TestBuffer::seq();
366        let mut data = TestBuffer::default();
367
368        flash.erase(0, FLASH_SIZE).unwrap();
369        flash.write(0, &src.data).unwrap();
370
371        for (off, len) in range_gen::<WORD_SIZE, MAX_OFFSET, MAX_LENGTH>(None) {
372            flash
373                .read(off, &mut data.data[1..][..len as usize])
374                .unwrap();
375            assert_eq!(
376                data.data[1..][..len as usize],
377                src.data[off as usize..][..len as usize]
378            );
379        }
380    }
381
382    #[test]
383    fn write_not_aligned_buffer() {
384        let mut flash = FlashStorage::new(Flash::new());
385        flash.capacity = FLASH_SIZE as usize;
386        let mut read_data = TestBuffer::default();
387        let write_data = TestBuffer::seq();
388
389        flash.erase(0, FLASH_SIZE).unwrap();
390        flash.write(0, &write_data.data[1..129]).unwrap();
391
392        flash.read(0, &mut read_data.data[..128]).unwrap();
393
394        assert_eq!(&read_data.data[..128], &write_data.data[1..129]);
395    }
396
397    #[test]
398    fn erase_across_blocks() {
399        let mut flash = FlashStorage::new(Flash::new());
400        flash.capacity = FLASH_SIZE as usize;
401        let mut read_data = TestBuffer::default();
402        let write_data = [0u8; SECTOR_SIZE as usize];
403
404        // An entire block and some sectors before and after
405        let from = BLOCK_SIZE - (2 * SECTOR_SIZE);
406        let to = (2 * BLOCK_SIZE) + (1 * SECTOR_SIZE);
407
408        // Clear area before and after erase
409        flash.erase(from - SECTOR_SIZE, from).unwrap();
410        flash.write(from - SECTOR_SIZE, &write_data).unwrap();
411        flash.erase(to, to + SECTOR_SIZE).unwrap();
412        flash.write(to, &write_data).unwrap();
413
414        // Erase and verify that only the desired parts were touched
415        flash.erase(from, to).unwrap();
416        flash.read(0, &mut read_data.data).unwrap();
417        for i in from..to {
418            assert_eq!(read_data.data[i as usize], 0xFF);
419        }
420        assert_eq!(read_data.data[(from - 1) as usize], 0x0);
421        assert_eq!(read_data.data[to as usize], 0x0);
422    }
423}