sequential_storage/
queue.rs

1//! A queue (fifo) implementation for storing arbitrary data in flash memory.
2//!
3//! Use [push] to add data to the fifo and use [peek] and [pop] to get the data back.
4//!
5//! ```rust
6//! # use sequential_storage::queue::{push, peek, pop};
7//! # use sequential_storage::cache::NoCache;
8//! # use mock_flash::MockFlashBase;
9//! # use futures::executor::block_on;
10//! # type Flash = MockFlashBase<10, 1, 4096>;
11//! # mod mock_flash {
12//! #   include!("mock_flash.rs");
13//! # }
14//! #
15//! # fn init_flash() -> Flash {
16//! #     Flash::new(mock_flash::WriteCountCheck::Twice, None, false)
17//! # }
18//! #
19//! # block_on(async {
20//!
21//! // Initialize the flash. This can be internal or external
22//! let mut flash = init_flash();
23//! // These are the flash addresses in which the crate will operate.
24//! // The crate will not read, write or erase outside of this range.
25//! let flash_range = 0x1000..0x3000;
26//! // We need to give the crate a buffer to work with.
27//! // It must be big enough to serialize the biggest value of your storage type in.
28//! let mut data_buffer = [0; 128];
29//!
30//! let my_data = [10, 47, 29];
31//!
32//! // We can push some data to the queue
33//! push(&mut flash, flash_range.clone(), &mut NoCache::new(), &my_data, false).await.unwrap();
34//!
35//! // We can peek at the oldest data
36//!
37//! assert_eq!(
38//!     &peek(&mut flash, flash_range.clone(), &mut NoCache::new(), &mut data_buffer).await.unwrap().unwrap()[..],
39//!     &my_data[..]
40//! );
41//!
42//! // With popping we get back the oldest data, but that data is now also removed
43//!
44//! assert_eq!(
45//!     &pop(&mut flash, flash_range.clone(), &mut NoCache::new(), &mut data_buffer).await.unwrap().unwrap()[..],
46//!     &my_data[..]
47//! );
48//!
49//! // If we pop again, we find there's no data anymore
50//!
51//! assert_eq!(
52//!     pop(&mut flash, flash_range.clone(), &mut NoCache::new(), &mut data_buffer).await,
53//!     Ok(None)
54//! );
55//! # });
56//! ```
57
58use crate::item::{find_next_free_item_spot, is_page_empty, Item, ItemHeader, ItemHeaderIter};
59
60use self::{cache::CacheImpl, item::ItemUnborrowed};
61
62use super::*;
63use embedded_storage_async::nor_flash::MultiwriteNorFlash;
64
65/// Push data into the queue in the given flash memory with the given range.
66/// The data can only be taken out with the [pop] function.
67///
68/// Old data will not be overwritten unless `allow_overwrite_old_data` is true.
69/// If it is, then if the queue is full, the oldest data is removed to make space for the new data.
70///
71/// *Note: If a page is already used and you push more data than the remaining capacity of the page,
72/// the entire remaining capacity will go unused because the data is stored on the next page.*
73pub async fn push<S: NorFlash>(
74    flash: &mut S,
75    flash_range: Range<u32>,
76    cache: &mut impl CacheImpl,
77    data: &[u8],
78    allow_overwrite_old_data: bool,
79) -> Result<(), Error<S::Error>> {
80    run_with_auto_repair!(
81        function = push_inner(
82            flash,
83            flash_range.clone(),
84            cache,
85            data,
86            allow_overwrite_old_data
87        )
88        .await,
89        repair = try_repair(flash, flash_range.clone(), cache).await?
90    )
91}
92
93async fn push_inner<S: NorFlash>(
94    flash: &mut S,
95    flash_range: Range<u32>,
96    cache: &mut impl CacheImpl,
97    data: &[u8],
98    allow_overwrite_old_data: bool,
99) -> Result<(), Error<S::Error>> {
100    assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0);
101    assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0);
102
103    assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4);
104    assert!(S::WORD_SIZE <= MAX_WORD_SIZE);
105
106    if cache.is_dirty() {
107        cache.invalidate_cache_state();
108    }
109
110    // Data must fit in a single page
111    if data.len() > u16::MAX as usize
112        || data.len()
113            > calculate_page_size::<S>().saturating_sub(ItemHeader::data_address::<S>(0) as usize)
114    {
115        cache.unmark_dirty();
116        return Err(Error::ItemTooBig);
117    }
118
119    let current_page = find_youngest_page(flash, flash_range.clone(), cache).await?;
120
121    let page_data_start_address =
122        calculate_page_address::<S>(flash_range.clone(), current_page) + S::WORD_SIZE as u32;
123    let page_data_end_address =
124        calculate_page_end_address::<S>(flash_range.clone(), current_page) - S::WORD_SIZE as u32;
125
126    partial_close_page(flash, flash_range.clone(), cache, current_page).await?;
127
128    // Find the last item on the page so we know where we need to write
129
130    let mut next_address = find_next_free_item_spot(
131        flash,
132        flash_range.clone(),
133        cache,
134        page_data_start_address,
135        page_data_end_address,
136        data.len() as u32,
137    )
138    .await?;
139
140    if next_address.is_none() {
141        // No cap left on this page, move to the next page
142        let next_page = next_page::<S>(flash_range.clone(), current_page);
143        match get_page_state(flash, flash_range.clone(), cache, next_page).await? {
144            PageState::Open => {
145                close_page(flash, flash_range.clone(), cache, current_page).await?;
146                partial_close_page(flash, flash_range.clone(), cache, next_page).await?;
147                next_address = Some(
148                    calculate_page_address::<S>(flash_range.clone(), next_page)
149                        + S::WORD_SIZE as u32,
150                );
151            }
152            state @ PageState::Closed => {
153                let next_page_data_start_address =
154                    calculate_page_address::<S>(flash_range.clone(), next_page)
155                        + S::WORD_SIZE as u32;
156
157                if !allow_overwrite_old_data
158                    && !is_page_empty(flash, flash_range.clone(), cache, next_page, Some(state))
159                        .await?
160                {
161                    cache.unmark_dirty();
162                    return Err(Error::FullStorage);
163                }
164
165                open_page(flash, flash_range.clone(), cache, next_page).await?;
166                close_page(flash, flash_range.clone(), cache, current_page).await?;
167                partial_close_page(flash, flash_range.clone(), cache, next_page).await?;
168                next_address = Some(next_page_data_start_address);
169            }
170            PageState::PartialOpen => {
171                // This should never happen
172                return Err(Error::Corrupted {
173                    #[cfg(feature = "_test")]
174                    backtrace: std::backtrace::Backtrace::capture(),
175                });
176            }
177        }
178    }
179
180    Item::write_new(
181        flash,
182        flash_range.clone(),
183        cache,
184        next_address.unwrap(),
185        data,
186    )
187    .await?;
188
189    cache.unmark_dirty();
190    Ok(())
191}
192
193/// Get an iterator-like interface to iterate over the items stored in the queue.
194/// This goes from oldest to newest.
195///
196/// The iteration happens non-destructively, or in other words it peeks at every item.
197/// The returned entry has a [QueueIteratorEntry::pop] function with which you can decide to pop the item
198/// after you've seen the contents.
199pub async fn iter<'s, S: NorFlash, CI: CacheImpl>(
200    flash: &'s mut S,
201    flash_range: Range<u32>,
202    cache: &'s mut CI,
203) -> Result<QueueIterator<'s, S, CI>, Error<S::Error>> {
204    // Note: Corruption repair is done in these functions already
205    QueueIterator::new(flash, flash_range, cache).await
206}
207
208/// Peek at the oldest data.
209///
210/// If you also want to remove the data use [pop].
211///
212/// The data is written to the given `data_buffer` and the part that was written is returned.
213/// It is valid to only use the length of the returned slice and use the original `data_buffer`.
214/// The `data_buffer` may contain extra data on ranges after the returned slice.
215/// You should not depend on that data.
216///
217/// If the data buffer is not big enough an error is returned.
218pub async fn peek<'d, S: NorFlash>(
219    flash: &mut S,
220    flash_range: Range<u32>,
221    cache: &mut impl CacheImpl,
222    data_buffer: &'d mut [u8],
223) -> Result<Option<&'d mut [u8]>, Error<S::Error>> {
224    // Note: Corruption repair is done in these functions already
225    let mut iterator = iter(flash, flash_range, cache).await?;
226
227    let next_value = iterator.next(data_buffer).await?;
228
229    match next_value {
230        Some(entry) => Ok(Some(entry.into_buf())),
231        None => Ok(None),
232    }
233}
234
235/// Pop the oldest data from the queue.
236///
237/// If you don't want to remove the data use [peek].
238///
239/// The data is written to the given `data_buffer` and the part that was written is returned.
240/// It is valid to only use the length of the returned slice and use the original `data_buffer`.
241/// The `data_buffer` may contain extra data on ranges after the returned slice.
242/// You should not depend on that data.
243///
244/// If the data buffer is not big enough an error is returned.
245pub async fn pop<'d, S: MultiwriteNorFlash>(
246    flash: &mut S,
247    flash_range: Range<u32>,
248    cache: &mut impl CacheImpl,
249    data_buffer: &'d mut [u8],
250) -> Result<Option<&'d mut [u8]>, Error<S::Error>> {
251    let mut iterator = iter(flash, flash_range, cache).await?;
252
253    let next_value = iterator.next(data_buffer).await?;
254
255    match next_value {
256        Some(entry) => Ok(Some(entry.pop().await?)),
257        None => Ok(None),
258    }
259}
260
261/// An iterator-like interface for peeking into data stored in flash with the option to pop it.
262pub struct QueueIterator<'s, S: NorFlash, CI: CacheImpl> {
263    flash: &'s mut S,
264    flash_range: Range<u32>,
265    cache: &'s mut CI,
266    next_address: NextAddress,
267}
268
269impl<S: NorFlash, CI: CacheImpl> Debug for QueueIterator<'_, S, CI> {
270    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
271        f.debug_struct("QueueIterator")
272            .field("current_address", &self.next_address)
273            .finish_non_exhaustive()
274    }
275}
276
277#[derive(Debug, Clone)]
278enum NextAddress {
279    Address(u32),
280    PageAfter(usize),
281}
282
283impl<'s, S: NorFlash, CI: CacheImpl> QueueIterator<'s, S, CI> {
284    async fn new(
285        flash: &'s mut S,
286        flash_range: Range<u32>,
287        cache: &'s mut CI,
288    ) -> Result<Self, Error<S::Error>> {
289        let start_address = run_with_auto_repair!(
290            function = Self::find_start_address(flash, flash_range.clone(), cache).await,
291            repair = try_repair(flash, flash_range.clone(), cache).await?
292        )?;
293
294        Ok(Self {
295            flash,
296            flash_range,
297            cache,
298            next_address: start_address,
299        })
300    }
301
302    async fn find_start_address(
303        flash: &mut S,
304        flash_range: Range<u32>,
305        cache: &mut CI,
306    ) -> Result<NextAddress, Error<S::Error>> {
307        assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0);
308        assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0);
309
310        assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4);
311        assert!(S::WORD_SIZE <= MAX_WORD_SIZE);
312
313        if cache.is_dirty() {
314            cache.invalidate_cache_state();
315        }
316
317        let oldest_page = find_oldest_page(flash, flash_range.clone(), cache).await?;
318
319        // We start at the start of the oldest page
320        let current_address = match cache.first_item_after_erased(oldest_page) {
321            Some(address) => address,
322            None => {
323                calculate_page_address::<S>(flash_range.clone(), oldest_page) + S::WORD_SIZE as u32
324            }
325        };
326
327        Ok(NextAddress::Address(current_address))
328    }
329
330    /// Get the next entry.
331    ///
332    /// If there are no more entries, None is returned.
333    ///
334    /// The `data_buffer` has to be large enough to be able to hold the largest item in flash.
335    pub async fn next<'d, 'q>(
336        &'q mut self,
337        data_buffer: &'d mut [u8],
338    ) -> Result<Option<QueueIteratorEntry<'s, 'd, 'q, S, CI>>, Error<S::Error>> {
339        let value = run_with_auto_repair!(
340            function = self.next_inner(data_buffer).await,
341            repair = try_repair(self.flash, self.flash_range.clone(), self.cache).await?
342        );
343
344        value.map(|v| {
345            v.map(|(item, address)| QueueIteratorEntry {
346                iter: self,
347                item: item.reborrow(data_buffer),
348                address,
349            })
350        })
351    }
352
353    async fn next_inner(
354        &mut self,
355        data_buffer: &mut [u8],
356    ) -> Result<Option<(ItemUnborrowed, u32)>, Error<S::Error>> {
357        let mut data_buffer = Some(data_buffer);
358
359        if self.cache.is_dirty() {
360            self.cache.invalidate_cache_state();
361        }
362
363        loop {
364            // Get the current page and address based on what was stored
365            let (current_page, current_address) = match self.next_address {
366                NextAddress::PageAfter(previous_page) => {
367                    let next_page = next_page::<S>(self.flash_range.clone(), previous_page);
368                    if get_page_state(
369                        self.flash,
370                        self.flash_range.clone(),
371                        &mut self.cache,
372                        next_page,
373                    )
374                    .await?
375                    .is_open()
376                        || next_page
377                            == find_oldest_page(
378                                self.flash,
379                                self.flash_range.clone(),
380                                &mut self.cache,
381                            )
382                            .await?
383                    {
384                        self.cache.unmark_dirty();
385                        return Ok(None);
386                    }
387
388                    let current_address =
389                        calculate_page_address::<S>(self.flash_range.clone(), next_page)
390                            + S::WORD_SIZE as u32;
391
392                    self.next_address = NextAddress::Address(current_address);
393
394                    (next_page, current_address)
395                }
396                NextAddress::Address(address) => (
397                    calculate_page_index::<S>(self.flash_range.clone(), address),
398                    address,
399                ),
400            };
401
402            let page_data_end_address =
403                calculate_page_end_address::<S>(self.flash_range.clone(), current_page)
404                    - S::WORD_SIZE as u32;
405
406            // Search for the first item with data
407            let mut it = ItemHeaderIter::new(current_address, page_data_end_address);
408            // No need to worry about cache here since that has been dealt with at the creation of this iterator
409            if let (Some(found_item_header), found_item_address) = it
410                .traverse(self.flash, |header, _| header.crc.is_none())
411                .await?
412            {
413                let maybe_item = found_item_header
414                    .read_item(
415                        self.flash,
416                        data_buffer.take().unwrap(),
417                        found_item_address,
418                        page_data_end_address,
419                    )
420                    .await?;
421
422                match maybe_item {
423                    item::MaybeItem::Corrupted(header, db) => {
424                        let next_address = header.next_item_address::<S>(found_item_address);
425                        self.next_address = if next_address >= page_data_end_address {
426                            NextAddress::PageAfter(current_page)
427                        } else {
428                            NextAddress::Address(next_address)
429                        };
430                        data_buffer.replace(db);
431                    }
432                    item::MaybeItem::Erased(_, _) => unreachable!("Item is already erased"),
433                    item::MaybeItem::Present(item) => {
434                        let next_address = item.header.next_item_address::<S>(found_item_address);
435                        self.next_address = if next_address >= page_data_end_address {
436                            NextAddress::PageAfter(current_page)
437                        } else {
438                            NextAddress::Address(next_address)
439                        };
440                        // Return the item we found
441                        self.cache.unmark_dirty();
442                        return Ok(Some((item.unborrow(), found_item_address)));
443                    }
444                }
445            } else {
446                self.next_address = NextAddress::PageAfter(current_page);
447            }
448        }
449    }
450}
451
452/// An entry in the iteration over the queue flash
453pub struct QueueIteratorEntry<'s, 'd, 'q, S: NorFlash, CI: CacheImpl> {
454    iter: &'q mut QueueIterator<'s, S, CI>,
455    address: u32,
456    item: Item<'d>,
457}
458
459impl<S: NorFlash, CI: CacheImpl> Deref for QueueIteratorEntry<'_, '_, '_, S, CI> {
460    type Target = [u8];
461
462    fn deref(&self) -> &Self::Target {
463        self.item.data()
464    }
465}
466
467impl<S: NorFlash, CI: CacheImpl> DerefMut for QueueIteratorEntry<'_, '_, '_, S, CI> {
468    fn deref_mut(&mut self) -> &mut Self::Target {
469        self.item.data_mut()
470    }
471}
472
473impl<'d, S: NorFlash, CI: CacheImpl> QueueIteratorEntry<'_, 'd, '_, S, CI> {
474    /// Get a mutable reference to the data of this entry, but consume the entry too.
475    /// This function has some relaxed lifetime constraints compared to the deref impls.
476    pub fn into_buf(self) -> &'d mut [u8] {
477        let (header, data) = self.item.destruct();
478        &mut data[..header.length as usize]
479    }
480
481    /// Pop the data in flash that corresponds to this entry. This makes it so
482    /// future peeks won't find this data anymore.
483    pub async fn pop(self) -> Result<&'d mut [u8], Error<S::Error>>
484    where
485        S: MultiwriteNorFlash,
486    {
487        let (header, data_buffer) = self.item.destruct();
488        let ret = &mut data_buffer[..header.length as usize];
489
490        header
491            .erase_data(
492                self.iter.flash,
493                self.iter.flash_range.clone(),
494                &mut self.iter.cache,
495                self.address,
496            )
497            .await?;
498
499        self.iter.cache.unmark_dirty();
500        Ok(ret)
501    }
502}
503
504/// Find the largest size of data that can be stored.
505///
506/// This will read through the entire flash to find the largest chunk of
507/// data that can be stored, taking alignment requirements of the item into account.
508///
509/// If there is no space left, `None` is returned.
510pub async fn find_max_fit<S: NorFlash>(
511    flash: &mut S,
512    flash_range: Range<u32>,
513    cache: &mut impl CacheImpl,
514) -> Result<Option<u32>, Error<S::Error>> {
515    run_with_auto_repair!(
516        function = find_max_fit_inner(flash, flash_range.clone(), cache).await,
517        repair = try_repair(flash, flash_range.clone(), cache).await?
518    )
519}
520
521async fn find_max_fit_inner<S: NorFlash>(
522    flash: &mut S,
523    flash_range: Range<u32>,
524    cache: &mut impl CacheImpl,
525) -> Result<Option<u32>, Error<S::Error>> {
526    assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0);
527    assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0);
528
529    assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4);
530    assert!(S::WORD_SIZE <= MAX_WORD_SIZE);
531
532    if cache.is_dirty() {
533        cache.invalidate_cache_state();
534    }
535
536    let current_page = find_youngest_page(flash, flash_range.clone(), cache).await?;
537
538    // Check if we have space on the next page
539    let next_page = next_page::<S>(flash_range.clone(), current_page);
540    match get_page_state(flash, flash_range.clone(), cache, next_page).await? {
541        state @ PageState::Closed => {
542            if is_page_empty(flash, flash_range.clone(), cache, next_page, Some(state)).await? {
543                cache.unmark_dirty();
544                return Ok(Some((S::ERASE_SIZE - (2 * S::WORD_SIZE)) as u32));
545            }
546        }
547        PageState::Open => {
548            cache.unmark_dirty();
549            return Ok(Some((S::ERASE_SIZE - (2 * S::WORD_SIZE)) as u32));
550        }
551        PageState::PartialOpen => {
552            // This should never happen
553            return Err(Error::Corrupted {
554                #[cfg(feature = "_test")]
555                backtrace: std::backtrace::Backtrace::capture(),
556            });
557        }
558    };
559
560    // See how much space we can find in the current page.
561    let page_data_start_address =
562        calculate_page_address::<S>(flash_range.clone(), current_page) + S::WORD_SIZE as u32;
563    let page_data_end_address =
564        calculate_page_end_address::<S>(flash_range.clone(), current_page) - S::WORD_SIZE as u32;
565
566    let next_item_address = match cache.first_item_after_written(current_page) {
567        Some(next_item_address) => next_item_address,
568        None => {
569            ItemHeaderIter::new(
570                cache
571                    .first_item_after_erased(current_page)
572                    .unwrap_or(page_data_start_address),
573                page_data_end_address,
574            )
575            .traverse(flash, |_, _| true)
576            .await?
577            .1
578        }
579    };
580
581    cache.unmark_dirty();
582    Ok(ItemHeader::available_data_bytes::<S>(
583        page_data_end_address - next_item_address,
584    ))
585}
586
587/// Calculate how much space is left free in the queue (in bytes).
588///
589/// The number given back is accurate, however there are lots of things that add overhead and padding.
590/// Every push is an item with its own overhead. You can check the overhead per item with [crate::item_overhead_size].
591///
592/// Furthermore, every item has to fully fit in a page. So if a page has 50 bytes left and you push an item of 60 bytes,
593/// the current page is closed and the item is stored on the next page, 'wasting' the 50 you had.
594///
595/// So unless you're tracking all this, the returned number should only be used as a rough indication.
596pub async fn space_left<S: NorFlash>(
597    flash: &mut S,
598    flash_range: Range<u32>,
599    cache: &mut impl CacheImpl,
600) -> Result<u32, Error<S::Error>> {
601    run_with_auto_repair!(
602        function = space_left_inner(flash, flash_range.clone(), cache).await,
603        repair = try_repair(flash, flash_range.clone(), cache).await?
604    )
605}
606
607async fn space_left_inner<S: NorFlash>(
608    flash: &mut S,
609    flash_range: Range<u32>,
610    cache: &mut impl CacheImpl,
611) -> Result<u32, Error<S::Error>> {
612    assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0);
613    assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0);
614
615    assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4);
616    assert!(S::WORD_SIZE <= MAX_WORD_SIZE);
617
618    if cache.is_dirty() {
619        cache.invalidate_cache_state();
620    }
621
622    let mut total_free_space = 0;
623
624    for page in get_pages::<S>(flash_range.clone(), 0) {
625        let state = get_page_state(flash, flash_range.clone(), cache, page).await?;
626        let page_empty =
627            is_page_empty(flash, flash_range.clone(), cache, page, Some(state)).await?;
628
629        if state.is_closed() && !page_empty {
630            continue;
631        }
632
633        // See how much space we can find in the current page.
634        let page_data_start_address =
635            calculate_page_address::<S>(flash_range.clone(), page) + S::WORD_SIZE as u32;
636        let page_data_end_address =
637            calculate_page_end_address::<S>(flash_range.clone(), page) - S::WORD_SIZE as u32;
638
639        if page_empty {
640            total_free_space += page_data_end_address - page_data_start_address;
641            continue;
642        }
643
644        // Partial open page
645        let next_item_address = match cache.first_item_after_written(page) {
646            Some(next_item_address) => next_item_address,
647            None => {
648                ItemHeaderIter::new(
649                    cache
650                        .first_item_after_erased(page)
651                        .unwrap_or(page_data_start_address),
652                    page_data_end_address,
653                )
654                .traverse(flash, |_, _| true)
655                .await?
656                .1
657            }
658        };
659
660        if ItemHeader::available_data_bytes::<S>(page_data_end_address - next_item_address)
661            .is_none()
662        {
663            // No data fits on this partial open page anymore.
664            // So if all data on this is already erased, then this page might as well be counted as empty.
665            // We can use [is_page_empty] and lie to to it so it checks the items.
666            if is_page_empty(
667                flash,
668                flash_range.clone(),
669                cache,
670                page,
671                Some(PageState::Closed),
672            )
673            .await?
674            {
675                total_free_space += page_data_end_address - page_data_start_address;
676                continue;
677            }
678        }
679
680        total_free_space += page_data_end_address - next_item_address;
681    }
682
683    cache.unmark_dirty();
684    Ok(total_free_space)
685}
686
687async fn find_youngest_page<S: NorFlash>(
688    flash: &mut S,
689    flash_range: Range<u32>,
690    cache: &mut impl PrivateCacheImpl,
691) -> Result<usize, Error<S::Error>> {
692    let last_used_page =
693        find_first_page(flash, flash_range.clone(), cache, 0, PageState::PartialOpen).await?;
694
695    if let Some(last_used_page) = last_used_page {
696        return Ok(last_used_page);
697    }
698
699    // We have no partial open page. Search for an open page to start in
700    let first_open_page = find_first_page(flash, flash_range, cache, 0, PageState::Open).await?;
701
702    if let Some(first_open_page) = first_open_page {
703        return Ok(first_open_page);
704    }
705
706    // All pages are closed... This is not correct.
707    Err(Error::Corrupted {
708        #[cfg(feature = "_test")]
709        backtrace: std::backtrace::Backtrace::capture(),
710    })
711}
712
713async fn find_oldest_page<S: NorFlash>(
714    flash: &mut S,
715    flash_range: Range<u32>,
716    cache: &mut impl PrivateCacheImpl,
717) -> Result<usize, Error<S::Error>> {
718    let youngest_page = find_youngest_page(flash, flash_range.clone(), cache).await?;
719
720    // The oldest page is the first non-open page after the youngest page
721    let oldest_closed_page =
722        find_first_page(flash, flash_range, cache, youngest_page, PageState::Closed).await?;
723
724    Ok(oldest_closed_page.unwrap_or(youngest_page))
725}
726
727/// Try to repair the state of the flash to hopefull get back everything in working order.
728/// Care is taken that no data is lost, but this depends on correctly repairing the state and
729/// so is only best effort.
730///
731/// This function might be called after a different function returned the [Error::Corrupted] error.
732/// There's no guarantee it will work.
733///
734/// If this function or the function call after this crate returns [Error::Corrupted], then it's unlikely
735/// that the state can be recovered. To at least make everything function again at the cost of losing the data,
736/// erase the flash range.
737async fn try_repair<S: NorFlash>(
738    flash: &mut S,
739    flash_range: Range<u32>,
740    cache: &mut impl CacheImpl,
741) -> Result<(), Error<S::Error>> {
742    cache.invalidate_cache_state();
743
744    crate::try_general_repair(flash, flash_range.clone(), cache).await?;
745    Ok(())
746}
747
748#[cfg(test)]
749mod tests {
750    use crate::mock_flash::{FlashAverageStatsResult, FlashStatsResult, WriteCountCheck};
751
752    use super::*;
753    use futures_test::test;
754
755    type MockFlashBig = mock_flash::MockFlashBase<4, 4, 256>;
756    type MockFlashTiny = mock_flash::MockFlashBase<2, 1, 32>;
757
758    #[test]
759    async fn peek_and_overwrite_old_data() {
760        let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None, true);
761        const FLASH_RANGE: Range<u32> = 0x00..0x40;
762        let mut data_buffer = AlignedBuf([0; 1024]);
763        const DATA_SIZE: usize = 22;
764
765        assert_eq!(
766            space_left(&mut flash, FLASH_RANGE, &mut cache::NoCache::new())
767                .await
768                .unwrap(),
769            60
770        );
771
772        assert_eq!(
773            peek(
774                &mut flash,
775                FLASH_RANGE,
776                &mut cache::NoCache::new(),
777                &mut data_buffer
778            )
779            .await
780            .unwrap(),
781            None
782        );
783
784        data_buffer[..DATA_SIZE].copy_from_slice(&[0xAA; DATA_SIZE]);
785        push(
786            &mut flash,
787            FLASH_RANGE,
788            &mut cache::NoCache::new(),
789            &data_buffer[..DATA_SIZE],
790            false,
791        )
792        .await
793        .unwrap();
794
795        assert_eq!(
796            space_left(&mut flash, FLASH_RANGE, &mut cache::NoCache::new())
797                .await
798                .unwrap(),
799            30
800        );
801
802        assert_eq!(
803            peek(
804                &mut flash,
805                FLASH_RANGE,
806                &mut cache::NoCache::new(),
807                &mut data_buffer
808            )
809            .await
810            .unwrap()
811            .unwrap(),
812            &[0xAA; DATA_SIZE]
813        );
814        data_buffer[..DATA_SIZE].copy_from_slice(&[0xBB; DATA_SIZE]);
815        push(
816            &mut flash,
817            FLASH_RANGE,
818            &mut cache::NoCache::new(),
819            &data_buffer[..DATA_SIZE],
820            false,
821        )
822        .await
823        .unwrap();
824
825        assert_eq!(
826            space_left(&mut flash, FLASH_RANGE, &mut cache::NoCache::new())
827                .await
828                .unwrap(),
829            0
830        );
831
832        assert_eq!(
833            peek(
834                &mut flash,
835                FLASH_RANGE,
836                &mut cache::NoCache::new(),
837                &mut data_buffer
838            )
839            .await
840            .unwrap()
841            .unwrap(),
842            &[0xAA; DATA_SIZE]
843        );
844
845        // Flash is full, this should fail
846        data_buffer[..DATA_SIZE].copy_from_slice(&[0xCC; DATA_SIZE]);
847        push(
848            &mut flash,
849            FLASH_RANGE,
850            &mut cache::NoCache::new(),
851            &data_buffer[..DATA_SIZE],
852            false,
853        )
854        .await
855        .unwrap_err();
856        // Now we allow overwrite, so it should work
857        data_buffer[..DATA_SIZE].copy_from_slice(&[0xDD; DATA_SIZE]);
858        push(
859            &mut flash,
860            FLASH_RANGE,
861            &mut cache::NoCache::new(),
862            &data_buffer[..DATA_SIZE],
863            true,
864        )
865        .await
866        .unwrap();
867
868        assert_eq!(
869            peek(
870                &mut flash,
871                FLASH_RANGE,
872                &mut cache::NoCache::new(),
873                &mut data_buffer
874            )
875            .await
876            .unwrap()
877            .unwrap(),
878            &[0xBB; DATA_SIZE]
879        );
880        assert_eq!(
881            pop(
882                &mut flash,
883                FLASH_RANGE,
884                &mut cache::NoCache::new(),
885                &mut data_buffer
886            )
887            .await
888            .unwrap()
889            .unwrap(),
890            &[0xBB; DATA_SIZE]
891        );
892
893        assert_eq!(
894            space_left(&mut flash, FLASH_RANGE, &mut cache::NoCache::new())
895                .await
896                .unwrap(),
897            30
898        );
899
900        assert_eq!(
901            peek(
902                &mut flash,
903                FLASH_RANGE,
904                &mut cache::NoCache::new(),
905                &mut data_buffer
906            )
907            .await
908            .unwrap()
909            .unwrap(),
910            &[0xDD; DATA_SIZE]
911        );
912        assert_eq!(
913            pop(
914                &mut flash,
915                FLASH_RANGE,
916                &mut cache::NoCache::new(),
917                &mut data_buffer
918            )
919            .await
920            .unwrap()
921            .unwrap(),
922            &[0xDD; DATA_SIZE]
923        );
924
925        assert_eq!(
926            space_left(&mut flash, FLASH_RANGE, &mut cache::NoCache::new())
927                .await
928                .unwrap(),
929            60
930        );
931
932        assert_eq!(
933            peek(
934                &mut flash,
935                FLASH_RANGE,
936                &mut cache::NoCache::new(),
937                &mut data_buffer
938            )
939            .await
940            .unwrap(),
941            None
942        );
943        assert_eq!(
944            pop(
945                &mut flash,
946                FLASH_RANGE,
947                &mut cache::NoCache::new(),
948                &mut data_buffer
949            )
950            .await
951            .unwrap(),
952            None
953        );
954    }
955
956    #[test]
957    async fn push_pop() {
958        let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true);
959        let flash_range = 0x000..0x1000;
960        let mut data_buffer = AlignedBuf([0; 1024]);
961
962        for i in 0..2000 {
963            println!("{i}");
964            let data = vec![i as u8; i % 512 + 1];
965
966            push(
967                &mut flash,
968                flash_range.clone(),
969                &mut cache::NoCache::new(),
970                &data,
971                true,
972            )
973            .await
974            .unwrap();
975            assert_eq!(
976                peek(
977                    &mut flash,
978                    flash_range.clone(),
979                    &mut cache::NoCache::new(),
980                    &mut data_buffer
981                )
982                .await
983                .unwrap()
984                .unwrap(),
985                &data,
986                "At {i}"
987            );
988            assert_eq!(
989                pop(
990                    &mut flash,
991                    flash_range.clone(),
992                    &mut cache::NoCache::new(),
993                    &mut data_buffer
994                )
995                .await
996                .unwrap()
997                .unwrap(),
998                &data,
999                "At {i}"
1000            );
1001            assert_eq!(
1002                peek(
1003                    &mut flash,
1004                    flash_range.clone(),
1005                    &mut cache::NoCache::new(),
1006                    &mut data_buffer
1007                )
1008                .await
1009                .unwrap(),
1010                None,
1011                "At {i}"
1012            );
1013        }
1014    }
1015
1016    #[test]
1017    async fn push_pop_tiny() {
1018        let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None, true);
1019        let flash_range = 0x00..0x40;
1020        let mut data_buffer = AlignedBuf([0; 1024]);
1021
1022        for i in 0..2000 {
1023            println!("{i}");
1024            let data = vec![i as u8; i % 20 + 1];
1025
1026            println!("PUSH");
1027            push(
1028                &mut flash,
1029                flash_range.clone(),
1030                &mut cache::NoCache::new(),
1031                &data,
1032                true,
1033            )
1034            .await
1035            .unwrap();
1036            assert_eq!(
1037                peek(
1038                    &mut flash,
1039                    flash_range.clone(),
1040                    &mut cache::NoCache::new(),
1041                    &mut data_buffer
1042                )
1043                .await
1044                .unwrap()
1045                .unwrap(),
1046                &data,
1047                "At {i}"
1048            );
1049            println!("POP");
1050            assert_eq!(
1051                pop(
1052                    &mut flash,
1053                    flash_range.clone(),
1054                    &mut cache::NoCache::new(),
1055                    &mut data_buffer
1056                )
1057                .await
1058                .unwrap()
1059                .unwrap(),
1060                &data,
1061                "At {i}"
1062            );
1063            println!("PEEK");
1064            assert_eq!(
1065                peek(
1066                    &mut flash,
1067                    flash_range.clone(),
1068                    &mut cache::NoCache::new(),
1069                    &mut data_buffer
1070                )
1071                .await
1072                .unwrap(),
1073                None,
1074                "At {i}"
1075            );
1076            println!("DONE");
1077        }
1078    }
1079
1080    #[test]
1081    /// Same as [push_lots_then_pop_lots], except with added peeking and using the iterator style
1082    async fn push_peek_pop_many() {
1083        let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true);
1084        let flash_range = 0x000..0x1000;
1085        let mut data_buffer = AlignedBuf([0; 1024]);
1086
1087        let mut push_stats = FlashStatsResult::default();
1088        let mut pushes = 0;
1089        let mut peek_stats = FlashStatsResult::default();
1090        let mut peeks = 0;
1091        let mut pop_stats = FlashStatsResult::default();
1092        let mut pops = 0;
1093
1094        let mut cache = cache::NoCache::new();
1095
1096        for loop_index in 0..100 {
1097            println!("Loop index: {loop_index}");
1098
1099            for i in 0..20 {
1100                let start_snapshot = flash.stats_snapshot();
1101                let data = vec![i as u8; 50];
1102                push(&mut flash, flash_range.clone(), &mut cache, &data, false)
1103                    .await
1104                    .unwrap();
1105                pushes += 1;
1106                push_stats += start_snapshot.compare_to(flash.stats_snapshot());
1107            }
1108
1109            let start_snapshot = flash.stats_snapshot();
1110            let mut iterator = iter(&mut flash, flash_range.clone(), &mut cache)
1111                .await
1112                .unwrap();
1113            peek_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1114            for i in 0..5 {
1115                let start_snapshot = iterator.flash.stats_snapshot();
1116                let data = [i as u8; 50];
1117                assert_eq!(
1118                    iterator
1119                        .next(&mut data_buffer)
1120                        .await
1121                        .unwrap()
1122                        .unwrap()
1123                        .deref(),
1124                    &data[..],
1125                    "At {i}"
1126                );
1127                peeks += 1;
1128                peek_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1129            }
1130
1131            let start_snapshot = flash.stats_snapshot();
1132            let mut iterator = iter(&mut flash, flash_range.clone(), &mut cache)
1133                .await
1134                .unwrap();
1135            pop_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1136            for i in 0..5 {
1137                let start_snapshot = iterator.flash.stats_snapshot();
1138                let data = vec![i as u8; 50];
1139                assert_eq!(
1140                    iterator
1141                        .next(&mut data_buffer)
1142                        .await
1143                        .unwrap()
1144                        .unwrap()
1145                        .pop()
1146                        .await
1147                        .unwrap(),
1148                    &data,
1149                    "At {i}"
1150                );
1151                pops += 1;
1152                pop_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1153            }
1154
1155            for i in 20..25 {
1156                let start_snapshot = flash.stats_snapshot();
1157                let data = vec![i as u8; 50];
1158                push(&mut flash, flash_range.clone(), &mut cache, &data, false)
1159                    .await
1160                    .unwrap();
1161                pushes += 1;
1162                push_stats += start_snapshot.compare_to(flash.stats_snapshot());
1163            }
1164
1165            let start_snapshot = flash.stats_snapshot();
1166            let mut iterator = iter(&mut flash, flash_range.clone(), &mut cache)
1167                .await
1168                .unwrap();
1169            peek_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1170            for i in 5..25 {
1171                let start_snapshot = iterator.flash.stats_snapshot();
1172                let data = vec![i as u8; 50];
1173                assert_eq!(
1174                    iterator
1175                        .next(&mut data_buffer)
1176                        .await
1177                        .unwrap()
1178                        .unwrap()
1179                        .deref(),
1180                    &data,
1181                    "At {i}"
1182                );
1183                peeks += 1;
1184                peek_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1185            }
1186
1187            let start_snapshot = flash.stats_snapshot();
1188            let mut iterator = iter(&mut flash, flash_range.clone(), &mut cache)
1189                .await
1190                .unwrap();
1191            pop_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1192            for i in 5..25 {
1193                let start_snapshot = iterator.flash.stats_snapshot();
1194                let data = vec![i as u8; 50];
1195                assert_eq!(
1196                    iterator
1197                        .next(&mut data_buffer)
1198                        .await
1199                        .unwrap()
1200                        .unwrap()
1201                        .pop()
1202                        .await
1203                        .unwrap(),
1204                    &data,
1205                    "At {i}"
1206                );
1207                pops += 1;
1208                pop_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1209            }
1210        }
1211
1212        // Assert the performance. These numbers can be changed if acceptable.
1213        approx::assert_relative_eq!(
1214            push_stats.take_average(pushes),
1215            FlashAverageStatsResult {
1216                avg_erases: 0.0612,
1217                avg_reads: 17.902,
1218                avg_writes: 3.1252,
1219                avg_bytes_read: 113.7248,
1220                avg_bytes_written: 60.5008
1221            }
1222        );
1223        approx::assert_relative_eq!(
1224            peek_stats.take_average(peeks),
1225            FlashAverageStatsResult {
1226                avg_erases: 0.0,
1227                avg_reads: 8.0188,
1228                avg_writes: 0.0,
1229                avg_bytes_read: 96.4224,
1230                avg_bytes_written: 0.0
1231            }
1232        );
1233        approx::assert_relative_eq!(
1234            pop_stats.take_average(pops),
1235            FlashAverageStatsResult {
1236                avg_erases: 0.0,
1237                avg_reads: 8.0188,
1238                avg_writes: 1.0,
1239                avg_bytes_read: 96.4224,
1240                avg_bytes_written: 8.0
1241            }
1242        );
1243    }
1244
1245    #[test]
1246    async fn push_lots_then_pop_lots() {
1247        let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true);
1248        let flash_range = 0x000..0x1000;
1249        let mut data_buffer = AlignedBuf([0; 1024]);
1250
1251        let mut push_stats = FlashStatsResult::default();
1252        let mut pushes = 0;
1253        let mut pop_stats = FlashStatsResult::default();
1254        let mut pops = 0;
1255
1256        for loop_index in 0..100 {
1257            println!("Loop index: {loop_index}");
1258
1259            for i in 0..20 {
1260                let start_snapshot = flash.stats_snapshot();
1261                let data = vec![i as u8; 50];
1262                push(
1263                    &mut flash,
1264                    flash_range.clone(),
1265                    &mut cache::NoCache::new(),
1266                    &data,
1267                    false,
1268                )
1269                .await
1270                .unwrap();
1271                pushes += 1;
1272                push_stats += start_snapshot.compare_to(flash.stats_snapshot());
1273            }
1274
1275            for i in 0..5 {
1276                let start_snapshot = flash.stats_snapshot();
1277                let data = vec![i as u8; 50];
1278                assert_eq!(
1279                    pop(
1280                        &mut flash,
1281                        flash_range.clone(),
1282                        &mut cache::NoCache::new(),
1283                        &mut data_buffer
1284                    )
1285                    .await
1286                    .unwrap()
1287                    .unwrap(),
1288                    &data,
1289                    "At {i}"
1290                );
1291                pops += 1;
1292                pop_stats += start_snapshot.compare_to(flash.stats_snapshot());
1293            }
1294
1295            for i in 20..25 {
1296                let start_snapshot = flash.stats_snapshot();
1297                let data = vec![i as u8; 50];
1298                push(
1299                    &mut flash,
1300                    flash_range.clone(),
1301                    &mut cache::NoCache::new(),
1302                    &data,
1303                    false,
1304                )
1305                .await
1306                .unwrap();
1307                pushes += 1;
1308                push_stats += start_snapshot.compare_to(flash.stats_snapshot());
1309            }
1310
1311            for i in 5..25 {
1312                let start_snapshot = flash.stats_snapshot();
1313                let data = vec![i as u8; 50];
1314                assert_eq!(
1315                    pop(
1316                        &mut flash,
1317                        flash_range.clone(),
1318                        &mut cache::NoCache::new(),
1319                        &mut data_buffer
1320                    )
1321                    .await
1322                    .unwrap()
1323                    .unwrap(),
1324                    &data,
1325                    "At {i}"
1326                );
1327                pops += 1;
1328                pop_stats += start_snapshot.compare_to(flash.stats_snapshot());
1329            }
1330        }
1331
1332        // Assert the performance. These numbers can be changed if acceptable.
1333        approx::assert_relative_eq!(
1334            push_stats.take_average(pushes),
1335            FlashAverageStatsResult {
1336                avg_erases: 0.0612,
1337                avg_reads: 17.902,
1338                avg_writes: 3.1252,
1339                avg_bytes_read: 113.7248,
1340                avg_bytes_written: 60.5008
1341            }
1342        );
1343        approx::assert_relative_eq!(
1344            pop_stats.take_average(pops),
1345            FlashAverageStatsResult {
1346                avg_erases: 0.0,
1347                avg_reads: 82.618,
1348                avg_writes: 1.0,
1349                avg_bytes_read: 567.9904,
1350                avg_bytes_written: 8.0
1351            }
1352        );
1353    }
1354
1355    #[test]
1356    async fn pop_with_empty_section() {
1357        let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None, true);
1358        let flash_range = 0x00..0x40;
1359        let mut data_buffer = AlignedBuf([0; 1024]);
1360
1361        data_buffer[..20].copy_from_slice(&[0xAA; 20]);
1362        push(
1363            &mut flash,
1364            flash_range.clone(),
1365            &mut cache::NoCache::new(),
1366            &data_buffer[0..20],
1367            false,
1368        )
1369        .await
1370        .unwrap();
1371        data_buffer[..20].copy_from_slice(&[0xBB; 20]);
1372        push(
1373            &mut flash,
1374            flash_range.clone(),
1375            &mut cache::NoCache::new(),
1376            &data_buffer[0..20],
1377            false,
1378        )
1379        .await
1380        .unwrap();
1381
1382        // There's now an unused gap at the end of the first page
1383
1384        assert_eq!(
1385            pop(
1386                &mut flash,
1387                flash_range.clone(),
1388                &mut cache::NoCache::new(),
1389                &mut data_buffer
1390            )
1391            .await
1392            .unwrap()
1393            .unwrap(),
1394            &[0xAA; 20]
1395        );
1396
1397        assert_eq!(
1398            pop(
1399                &mut flash,
1400                flash_range.clone(),
1401                &mut cache::NoCache::new(),
1402                &mut data_buffer
1403            )
1404            .await
1405            .unwrap()
1406            .unwrap(),
1407            &[0xBB; 20]
1408        );
1409    }
1410
1411    #[test]
1412    async fn search_pages() {
1413        let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true);
1414
1415        const FLASH_RANGE: Range<u32> = 0x000..0x1000;
1416
1417        close_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new(), 0)
1418            .await
1419            .unwrap();
1420        close_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new(), 1)
1421            .await
1422            .unwrap();
1423        partial_close_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new(), 2)
1424            .await
1425            .unwrap();
1426
1427        assert_eq!(
1428            find_youngest_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new())
1429                .await
1430                .unwrap(),
1431            2
1432        );
1433        assert_eq!(
1434            find_oldest_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new())
1435                .await
1436                .unwrap(),
1437            0
1438        );
1439    }
1440
1441    #[test]
1442    async fn store_too_big_item() {
1443        let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true);
1444        const FLASH_RANGE: Range<u32> = 0x000..0x1000;
1445
1446        push(
1447            &mut flash,
1448            FLASH_RANGE,
1449            &mut cache::NoCache::new(),
1450            &AlignedBuf([0; 1024 - 4 * 2 - 8]),
1451            false,
1452        )
1453        .await
1454        .unwrap();
1455
1456        assert_eq!(
1457            push(
1458                &mut flash,
1459                FLASH_RANGE,
1460                &mut cache::NoCache::new(),
1461                &AlignedBuf([0; 1024 - 4 * 2 - 8 + 1]),
1462                false,
1463            )
1464            .await,
1465            Err(Error::ItemTooBig)
1466        );
1467    }
1468}