sequential_storage/
queue.rs

1//! A queue (fifo) implementation for storing arbitrary data in flash memory.
2//!
3//! Use [push] to add data to the fifo and use [peek] and [pop] to get the data back.
4//!
5//! ```rust
6//! # use sequential_storage::queue::{push, peek, pop};
7//! # use sequential_storage::cache::NoCache;
8//! # use mock_flash::MockFlashBase;
9//! # use futures::executor::block_on;
10//! # type Flash = MockFlashBase<10, 1, 4096>;
11//! # mod mock_flash {
12//! #   include!("mock_flash.rs");
13//! # }
14//! #
15//! # fn init_flash() -> Flash {
16//! #     Flash::new(mock_flash::WriteCountCheck::Twice, None, false)
17//! # }
18//! #
19//! # block_on(async {
20//!
21//! // Initialize the flash. This can be internal or external
22//! let mut flash = init_flash();
23//! // These are the flash addresses in which the crate will operate.
24//! // The crate will not read, write or erase outside of this range.
25//! let flash_range = 0x1000..0x3000;
26//! // We need to give the crate a buffer to work with.
27//! // It must be big enough to serialize the biggest value of your storage type in.
28//! let mut data_buffer = [0; 128];
29//!
30//! let my_data = [10, 47, 29];
31//!
32//! // We can push some data to the queue
33//! push(&mut flash, flash_range.clone(), &mut NoCache::new(), &my_data, false).await.unwrap();
34//!
35//! // We can peek at the oldest data
36//!
37//! assert_eq!(
38//!     &peek(&mut flash, flash_range.clone(), &mut NoCache::new(), &mut data_buffer).await.unwrap().unwrap()[..],
39//!     &my_data[..]
40//! );
41//!
42//! // With popping we get back the oldest data, but that data is now also removed
43//!
44//! assert_eq!(
45//!     &pop(&mut flash, flash_range.clone(), &mut NoCache::new(), &mut data_buffer).await.unwrap().unwrap()[..],
46//!     &my_data[..]
47//! );
48//!
49//! // If we pop again, we find there's no data anymore
50//!
51//! assert_eq!(
52//!     pop(&mut flash, flash_range.clone(), &mut NoCache::new(), &mut data_buffer).await,
53//!     Ok(None)
54//! );
55//! # });
56//! ```
57
58use crate::item::{Item, ItemHeader, ItemHeaderIter, find_next_free_item_spot, is_page_empty};
59
60use self::{cache::CacheImpl, item::ItemUnborrowed};
61
62use super::*;
63use embedded_storage_async::nor_flash::MultiwriteNorFlash;
64
65/// Push data into the queue in the given flash memory with the given range.
66/// The data can only be taken out with the [pop] function.
67///
68/// Old data will not be overwritten unless `allow_overwrite_old_data` is true.
69/// If it is, then if the queue is full, the oldest data is removed to make space for the new data.
70///
71/// *Note: If a page is already used and you push more data than the remaining capacity of the page,
72/// the entire remaining capacity will go unused because the data is stored on the next page.*
73pub async fn push<S: NorFlash>(
74    flash: &mut S,
75    flash_range: Range<u32>,
76    cache: &mut impl CacheImpl,
77    data: &[u8],
78    allow_overwrite_old_data: bool,
79) -> Result<(), Error<S::Error>> {
80    run_with_auto_repair!(
81        function = push_inner(
82            flash,
83            flash_range.clone(),
84            cache,
85            data,
86            allow_overwrite_old_data
87        )
88        .await,
89        repair = try_repair(flash, flash_range.clone(), cache).await?
90    )
91}
92
93async fn push_inner<S: NorFlash>(
94    flash: &mut S,
95    flash_range: Range<u32>,
96    cache: &mut impl CacheImpl,
97    data: &[u8],
98    allow_overwrite_old_data: bool,
99) -> Result<(), Error<S::Error>> {
100    assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0);
101    assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0);
102
103    assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4);
104    assert!(S::WORD_SIZE <= MAX_WORD_SIZE);
105
106    if cache.is_dirty() {
107        cache.invalidate_cache_state();
108    }
109
110    // Data must fit in a single page
111    if data.len() > u16::MAX as usize
112        || data.len()
113            > calculate_page_size::<S>().saturating_sub(ItemHeader::data_address::<S>(0) as usize)
114    {
115        cache.unmark_dirty();
116        return Err(Error::ItemTooBig);
117    }
118
119    let current_page = find_youngest_page(flash, flash_range.clone(), cache).await?;
120
121    let page_data_start_address =
122        calculate_page_address::<S>(flash_range.clone(), current_page) + S::WORD_SIZE as u32;
123    let page_data_end_address =
124        calculate_page_end_address::<S>(flash_range.clone(), current_page) - S::WORD_SIZE as u32;
125
126    partial_close_page(flash, flash_range.clone(), cache, current_page).await?;
127
128    // Find the last item on the page so we know where we need to write
129
130    let mut next_address = find_next_free_item_spot(
131        flash,
132        flash_range.clone(),
133        cache,
134        page_data_start_address,
135        page_data_end_address,
136        data.len() as u32,
137    )
138    .await?;
139
140    if next_address.is_none() {
141        // No cap left on this page, move to the next page
142        let next_page = next_page::<S>(flash_range.clone(), current_page);
143        let next_page_state = get_page_state(flash, flash_range.clone(), cache, next_page).await?;
144        let single_page = next_page == current_page;
145
146        match (next_page_state, single_page) {
147            (PageState::Open, _) => {
148                close_page(flash, flash_range.clone(), cache, current_page).await?;
149                partial_close_page(flash, flash_range.clone(), cache, next_page).await?;
150                next_address = Some(
151                    calculate_page_address::<S>(flash_range.clone(), next_page)
152                        + S::WORD_SIZE as u32,
153                );
154            }
155            (PageState::Closed, _) | (PageState::PartialOpen, true) => {
156                let next_page_data_start_address =
157                    calculate_page_address::<S>(flash_range.clone(), next_page)
158                        + S::WORD_SIZE as u32;
159
160                if !allow_overwrite_old_data
161                    && !is_page_empty(
162                        flash,
163                        flash_range.clone(),
164                        cache,
165                        next_page,
166                        Some(next_page_state),
167                    )
168                    .await?
169                {
170                    cache.unmark_dirty();
171                    return Err(Error::FullStorage);
172                }
173
174                open_page(flash, flash_range.clone(), cache, next_page).await?;
175                if !single_page {
176                    close_page(flash, flash_range.clone(), cache, current_page).await?;
177                }
178                partial_close_page(flash, flash_range.clone(), cache, next_page).await?;
179                next_address = Some(next_page_data_start_address);
180            }
181            (PageState::PartialOpen, false) => {
182                // This should never happen
183                return Err(Error::Corrupted {
184                    #[cfg(feature = "_test")]
185                    backtrace: std::backtrace::Backtrace::capture(),
186                });
187            }
188        }
189    }
190
191    Item::write_new(
192        flash,
193        flash_range.clone(),
194        cache,
195        next_address.unwrap(),
196        data,
197    )
198    .await?;
199
200    cache.unmark_dirty();
201    Ok(())
202}
203
204/// Get an iterator-like interface to iterate over the items stored in the queue.
205/// This goes from oldest to newest.
206///
207/// The iteration happens non-destructively, or in other words it peeks at every item.
208/// The returned entry has a [QueueIteratorEntry::pop] function with which you can decide to pop the item
209/// after you've seen the contents.
210pub async fn iter<'s, S: NorFlash, CI: CacheImpl>(
211    flash: &'s mut S,
212    flash_range: Range<u32>,
213    cache: &'s mut CI,
214) -> Result<QueueIterator<'s, S, CI>, Error<S::Error>> {
215    // Note: Corruption repair is done in these functions already
216    QueueIterator::new(flash, flash_range, cache).await
217}
218
219/// Peek at the oldest data.
220///
221/// If you also want to remove the data use [pop].
222///
223/// The data is written to the given `data_buffer` and the part that was written is returned.
224/// It is valid to only use the length of the returned slice and use the original `data_buffer`.
225/// The `data_buffer` may contain extra data on ranges after the returned slice.
226/// You should not depend on that data.
227///
228/// If the data buffer is not big enough an error is returned.
229pub async fn peek<'d, S: NorFlash>(
230    flash: &mut S,
231    flash_range: Range<u32>,
232    cache: &mut impl CacheImpl,
233    data_buffer: &'d mut [u8],
234) -> Result<Option<&'d mut [u8]>, Error<S::Error>> {
235    // Note: Corruption repair is done in these functions already
236    let mut iterator = iter(flash, flash_range, cache).await?;
237
238    let next_value = iterator.next(data_buffer).await?;
239
240    match next_value {
241        Some(entry) => Ok(Some(entry.into_buf())),
242        None => Ok(None),
243    }
244}
245
246/// Pop the oldest data from the queue.
247///
248/// If you don't want to remove the data use [peek].
249///
250/// The data is written to the given `data_buffer` and the part that was written is returned.
251/// It is valid to only use the length of the returned slice and use the original `data_buffer`.
252/// The `data_buffer` may contain extra data on ranges after the returned slice.
253/// You should not depend on that data.
254///
255/// If the data buffer is not big enough an error is returned.
256pub async fn pop<'d, S: MultiwriteNorFlash>(
257    flash: &mut S,
258    flash_range: Range<u32>,
259    cache: &mut impl CacheImpl,
260    data_buffer: &'d mut [u8],
261) -> Result<Option<&'d mut [u8]>, Error<S::Error>> {
262    let mut iterator = iter(flash, flash_range, cache).await?;
263
264    let next_value = iterator.next(data_buffer).await?;
265
266    match next_value {
267        Some(entry) => Ok(Some(entry.pop().await?)),
268        None => Ok(None),
269    }
270}
271
272#[derive(PartialEq, Eq, Clone, Copy, Debug)]
273enum PreviousItemStates {
274    AllPopped,
275    AllButCurrentPopped,
276    Unpopped,
277}
278
279/// An iterator-like interface for peeking into data stored in flash with the option to pop it.
280pub struct QueueIterator<'s, S: NorFlash, CI: CacheImpl> {
281    flash: &'s mut S,
282    flash_range: Range<u32>,
283    cache: &'s mut CI,
284    next_address: NextAddress,
285    previous_item_states: PreviousItemStates,
286    oldest_page: usize,
287}
288
289impl<S: NorFlash, CI: CacheImpl> Debug for QueueIterator<'_, S, CI> {
290    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
291        f.debug_struct("QueueIterator")
292            .field("current_address", &self.next_address)
293            .finish_non_exhaustive()
294    }
295}
296
297#[derive(Debug, Clone, Copy)]
298enum NextAddress {
299    Address(u32),
300    PageAfter(usize),
301}
302
303impl<'s, S: NorFlash, CI: CacheImpl> QueueIterator<'s, S, CI> {
304    async fn new(
305        flash: &'s mut S,
306        flash_range: Range<u32>,
307        cache: &'s mut CI,
308    ) -> Result<Self, Error<S::Error>> {
309        let start_address = run_with_auto_repair!(
310            function = Self::find_start_address(flash, flash_range.clone(), cache).await,
311            repair = try_repair(flash, flash_range.clone(), cache).await?
312        )?;
313
314        Ok(Self {
315            flash,
316            flash_range: flash_range.clone(),
317            cache,
318            next_address: start_address,
319            previous_item_states: PreviousItemStates::AllPopped,
320            oldest_page: match start_address {
321                NextAddress::Address(address) => {
322                    calculate_page_index::<S>(flash_range.clone(), address)
323                }
324                NextAddress::PageAfter(index) => next_page::<S>(flash_range.clone(), index),
325            },
326        })
327    }
328
329    async fn find_start_address(
330        flash: &mut S,
331        flash_range: Range<u32>,
332        cache: &mut CI,
333    ) -> Result<NextAddress, Error<S::Error>> {
334        assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0);
335        assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0);
336
337        assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4);
338        assert!(S::WORD_SIZE <= MAX_WORD_SIZE);
339
340        if cache.is_dirty() {
341            cache.invalidate_cache_state();
342        }
343
344        let oldest_page = find_oldest_page(flash, flash_range.clone(), cache).await?;
345
346        // We start at the start of the oldest page
347        let current_address = match cache.first_item_after_erased(oldest_page) {
348            Some(address) => address,
349            None => {
350                calculate_page_address::<S>(flash_range.clone(), oldest_page) + S::WORD_SIZE as u32
351            }
352        };
353
354        Ok(NextAddress::Address(current_address))
355    }
356
357    /// Get the next entry.
358    ///
359    /// If there are no more entries, None is returned.
360    ///
361    /// The `data_buffer` has to be large enough to be able to hold the largest item in flash.
362    pub async fn next<'d, 'q>(
363        &'q mut self,
364        data_buffer: &'d mut [u8],
365    ) -> Result<Option<QueueIteratorEntry<'s, 'd, 'q, S, CI>>, Error<S::Error>> {
366        // We continue from a place where the current item wasn't popped
367        // That means that from now on, the next item will have unpopped items behind it
368        if self.previous_item_states == PreviousItemStates::AllButCurrentPopped {
369            self.previous_item_states = PreviousItemStates::Unpopped;
370        }
371
372        let value = run_with_auto_repair!(
373            function = self.next_inner(data_buffer).await,
374            repair = try_repair(self.flash, self.flash_range.clone(), self.cache).await?
375        );
376
377        value.map(|v| {
378            v.map(|(item, address)| QueueIteratorEntry {
379                iter: self,
380                item: item.reborrow(data_buffer),
381                address,
382            })
383        })
384    }
385
386    async fn next_inner(
387        &mut self,
388        data_buffer: &mut [u8],
389    ) -> Result<Option<(ItemUnborrowed, u32)>, Error<S::Error>> {
390        let mut data_buffer = Some(data_buffer);
391
392        if self.cache.is_dirty() {
393            self.cache.invalidate_cache_state();
394        }
395
396        loop {
397            // Get the current page and address based on what was stored
398            let (current_page, current_address) = match self.next_address {
399                NextAddress::PageAfter(previous_page) => {
400                    let next_page = next_page::<S>(self.flash_range.clone(), previous_page);
401                    if get_page_state(
402                        self.flash,
403                        self.flash_range.clone(),
404                        &mut self.cache,
405                        next_page,
406                    )
407                    .await?
408                    .is_open()
409                        || next_page == self.oldest_page
410                    {
411                        self.cache.unmark_dirty();
412                        return Ok(None);
413                    }
414
415                    // We now know the previous page was left because there were no items on there anymore
416                    // If we know all those items were popped, we can proactively open the previous page
417                    // This is amazing for performance
418                    if self.previous_item_states == PreviousItemStates::AllPopped {
419                        open_page(
420                            self.flash,
421                            self.flash_range.clone(),
422                            self.cache,
423                            previous_page,
424                        )
425                        .await?;
426                    }
427
428                    let current_address =
429                        calculate_page_address::<S>(self.flash_range.clone(), next_page)
430                            + S::WORD_SIZE as u32;
431
432                    self.next_address = NextAddress::Address(current_address);
433
434                    (next_page, current_address)
435                }
436                NextAddress::Address(address) => (
437                    calculate_page_index::<S>(self.flash_range.clone(), address),
438                    address,
439                ),
440            };
441
442            let page_data_end_address =
443                calculate_page_end_address::<S>(self.flash_range.clone(), current_page)
444                    - S::WORD_SIZE as u32;
445
446            // Search for the first item with data
447            let mut it = ItemHeaderIter::new(current_address, page_data_end_address);
448            // No need to worry about cache here since that has been dealt with at the creation of this iterator
449            if let (Some(found_item_header), found_item_address) = it
450                .traverse(self.flash, |header, _| header.crc.is_none())
451                .await?
452            {
453                let maybe_item = found_item_header
454                    .read_item(
455                        self.flash,
456                        data_buffer.take().unwrap(),
457                        found_item_address,
458                        page_data_end_address,
459                    )
460                    .await?;
461
462                match maybe_item {
463                    item::MaybeItem::Corrupted(header, db) => {
464                        let next_address = header.next_item_address::<S>(found_item_address);
465                        self.next_address = if next_address >= page_data_end_address {
466                            NextAddress::PageAfter(current_page)
467                        } else {
468                            NextAddress::Address(next_address)
469                        };
470                        data_buffer.replace(db);
471                    }
472                    item::MaybeItem::Erased(_, _) => unreachable!("Item is already erased"),
473                    item::MaybeItem::Present(item) => {
474                        let next_address = item.header.next_item_address::<S>(found_item_address);
475                        self.next_address = if next_address >= page_data_end_address {
476                            NextAddress::PageAfter(current_page)
477                        } else {
478                            NextAddress::Address(next_address)
479                        };
480
481                        // Record that the current item hasn't been popped (yet)
482                        if self.previous_item_states == PreviousItemStates::AllPopped {
483                            self.previous_item_states = PreviousItemStates::AllButCurrentPopped;
484                        }
485
486                        // Return the item we found
487                        self.cache.unmark_dirty();
488                        return Ok(Some((item.unborrow(), found_item_address)));
489                    }
490                }
491            } else {
492                self.next_address = NextAddress::PageAfter(current_page);
493            }
494        }
495    }
496}
497
498/// An entry in the iteration over the queue flash
499pub struct QueueIteratorEntry<'s, 'd, 'q, S: NorFlash, CI: CacheImpl> {
500    iter: &'q mut QueueIterator<'s, S, CI>,
501    address: u32,
502    item: Item<'d>,
503}
504
505impl<S: NorFlash, CI: CacheImpl> Deref for QueueIteratorEntry<'_, '_, '_, S, CI> {
506    type Target = [u8];
507
508    fn deref(&self) -> &Self::Target {
509        self.item.data()
510    }
511}
512
513impl<S: NorFlash, CI: CacheImpl> DerefMut for QueueIteratorEntry<'_, '_, '_, S, CI> {
514    fn deref_mut(&mut self) -> &mut Self::Target {
515        self.item.data_mut()
516    }
517}
518
519impl<'d, S: NorFlash, CI: CacheImpl> QueueIteratorEntry<'_, 'd, '_, S, CI> {
520    /// Get a mutable reference to the data of this entry, but consume the entry too.
521    /// This function has some relaxed lifetime constraints compared to the deref impls.
522    pub fn into_buf(self) -> &'d mut [u8] {
523        let (header, data) = self.item.destruct();
524        &mut data[..header.length as usize]
525    }
526
527    /// Pop the data in flash that corresponds to this entry. This makes it so
528    /// future peeks won't find this data anymore.
529    pub async fn pop(self) -> Result<&'d mut [u8], Error<S::Error>>
530    where
531        S: MultiwriteNorFlash,
532    {
533        let (header, data_buffer) = self.item.destruct();
534        let ret = &mut data_buffer[..header.length as usize];
535
536        // We're popping ourself, so if all previous but us were popped, then now all are popped again
537        if self.iter.previous_item_states == PreviousItemStates::AllButCurrentPopped {
538            self.iter.previous_item_states = PreviousItemStates::AllPopped;
539        }
540
541        header
542            .erase_data(
543                self.iter.flash,
544                self.iter.flash_range.clone(),
545                &mut self.iter.cache,
546                self.address,
547            )
548            .await?;
549
550        self.iter.cache.unmark_dirty();
551        Ok(ret)
552    }
553
554    /// Get the flash address of the item
555    #[cfg(feature = "_test")]
556    pub fn address(&self) -> u32 {
557        self.address
558    }
559}
560
561/// Find the largest size of data that can be stored.
562///
563/// This will read through the entire flash to find the largest chunk of
564/// data that can be stored, taking alignment requirements of the item into account.
565///
566/// If there is no space left, `None` is returned.
567pub async fn find_max_fit<S: NorFlash>(
568    flash: &mut S,
569    flash_range: Range<u32>,
570    cache: &mut impl CacheImpl,
571) -> Result<Option<u32>, Error<S::Error>> {
572    run_with_auto_repair!(
573        function = find_max_fit_inner(flash, flash_range.clone(), cache).await,
574        repair = try_repair(flash, flash_range.clone(), cache).await?
575    )
576}
577
578async fn find_max_fit_inner<S: NorFlash>(
579    flash: &mut S,
580    flash_range: Range<u32>,
581    cache: &mut impl CacheImpl,
582) -> Result<Option<u32>, Error<S::Error>> {
583    assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0);
584    assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0);
585
586    assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4);
587    assert!(S::WORD_SIZE <= MAX_WORD_SIZE);
588
589    if cache.is_dirty() {
590        cache.invalidate_cache_state();
591    }
592
593    let current_page = find_youngest_page(flash, flash_range.clone(), cache).await?;
594
595    // Check if we have space on the next page
596    let next_page = next_page::<S>(flash_range.clone(), current_page);
597    match get_page_state(flash, flash_range.clone(), cache, next_page).await? {
598        state @ PageState::Closed => {
599            if is_page_empty(flash, flash_range.clone(), cache, next_page, Some(state)).await? {
600                cache.unmark_dirty();
601                return Ok(Some((S::ERASE_SIZE - (2 * S::WORD_SIZE)) as u32));
602            }
603        }
604        PageState::Open => {
605            cache.unmark_dirty();
606            return Ok(Some((S::ERASE_SIZE - (2 * S::WORD_SIZE)) as u32));
607        }
608        PageState::PartialOpen => {
609            // This should never happen
610            return Err(Error::Corrupted {
611                #[cfg(feature = "_test")]
612                backtrace: std::backtrace::Backtrace::capture(),
613            });
614        }
615    };
616
617    // See how much space we can find in the current page.
618    let page_data_start_address =
619        calculate_page_address::<S>(flash_range.clone(), current_page) + S::WORD_SIZE as u32;
620    let page_data_end_address =
621        calculate_page_end_address::<S>(flash_range.clone(), current_page) - S::WORD_SIZE as u32;
622
623    let next_item_address = match cache.first_item_after_written(current_page) {
624        Some(next_item_address) => next_item_address,
625        None => {
626            ItemHeaderIter::new(
627                cache
628                    .first_item_after_erased(current_page)
629                    .unwrap_or(page_data_start_address),
630                page_data_end_address,
631            )
632            .traverse(flash, |_, _| true)
633            .await?
634            .1
635        }
636    };
637
638    cache.unmark_dirty();
639    Ok(ItemHeader::available_data_bytes::<S>(
640        page_data_end_address - next_item_address,
641    ))
642}
643
644/// Calculate how much space is left free in the queue (in bytes).
645///
646/// The number given back is accurate, however there are lots of things that add overhead and padding.
647/// Every push is an item with its own overhead. You can check the overhead per item with [crate::item_overhead_size].
648///
649/// Furthermore, every item has to fully fit in a page. So if a page has 50 bytes left and you push an item of 60 bytes,
650/// the current page is closed and the item is stored on the next page, 'wasting' the 50 you had.
651///
652/// So unless you're tracking all this, the returned number should only be used as a rough indication.
653pub async fn space_left<S: NorFlash>(
654    flash: &mut S,
655    flash_range: Range<u32>,
656    cache: &mut impl CacheImpl,
657) -> Result<u32, Error<S::Error>> {
658    run_with_auto_repair!(
659        function = space_left_inner(flash, flash_range.clone(), cache).await,
660        repair = try_repair(flash, flash_range.clone(), cache).await?
661    )
662}
663
664async fn space_left_inner<S: NorFlash>(
665    flash: &mut S,
666    flash_range: Range<u32>,
667    cache: &mut impl CacheImpl,
668) -> Result<u32, Error<S::Error>> {
669    assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0);
670    assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0);
671
672    assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4);
673    assert!(S::WORD_SIZE <= MAX_WORD_SIZE);
674
675    if cache.is_dirty() {
676        cache.invalidate_cache_state();
677    }
678
679    let mut total_free_space = 0;
680
681    for page in get_pages::<S>(flash_range.clone(), 0) {
682        let state = get_page_state(flash, flash_range.clone(), cache, page).await?;
683        let page_empty =
684            is_page_empty(flash, flash_range.clone(), cache, page, Some(state)).await?;
685
686        if state.is_closed() && !page_empty {
687            continue;
688        }
689
690        // See how much space we can find in the current page.
691        let page_data_start_address =
692            calculate_page_address::<S>(flash_range.clone(), page) + S::WORD_SIZE as u32;
693        let page_data_end_address =
694            calculate_page_end_address::<S>(flash_range.clone(), page) - S::WORD_SIZE as u32;
695
696        if page_empty {
697            total_free_space += page_data_end_address - page_data_start_address;
698            continue;
699        }
700
701        // Partial open page
702        let next_item_address = match cache.first_item_after_written(page) {
703            Some(next_item_address) => next_item_address,
704            None => {
705                ItemHeaderIter::new(
706                    cache
707                        .first_item_after_erased(page)
708                        .unwrap_or(page_data_start_address),
709                    page_data_end_address,
710                )
711                .traverse(flash, |_, _| true)
712                .await?
713                .1
714            }
715        };
716
717        if ItemHeader::available_data_bytes::<S>(page_data_end_address - next_item_address)
718            .is_none()
719        {
720            // No data fits on this partial open page anymore.
721            // So if all data on this is already erased, then this page might as well be counted as empty.
722            // We can use [is_page_empty] and lie to to it so it checks the items.
723            if is_page_empty(
724                flash,
725                flash_range.clone(),
726                cache,
727                page,
728                Some(PageState::Closed),
729            )
730            .await?
731            {
732                total_free_space += page_data_end_address - page_data_start_address;
733                continue;
734            }
735        }
736
737        total_free_space += page_data_end_address - next_item_address;
738    }
739
740    cache.unmark_dirty();
741    Ok(total_free_space)
742}
743
744async fn find_youngest_page<S: NorFlash>(
745    flash: &mut S,
746    flash_range: Range<u32>,
747    cache: &mut impl PrivateCacheImpl,
748) -> Result<usize, Error<S::Error>> {
749    let last_used_page =
750        find_first_page(flash, flash_range.clone(), cache, 0, PageState::PartialOpen).await?;
751
752    if let Some(last_used_page) = last_used_page {
753        return Ok(last_used_page);
754    }
755
756    // We have no partial open page. Search for a closed page to anker ourselves to
757    let first_closed_page =
758        find_first_page(flash, flash_range.clone(), cache, 0, PageState::Closed).await?;
759
760    let first_open_page = match first_closed_page {
761        Some(anchor) => {
762            // We have at least one closed page
763            // The first one after is the page we need to use
764            find_first_page(flash, flash_range, cache, anchor, PageState::Open).await?
765        }
766        None => {
767            // No closed pages and no partial open pages, so all pages should be open
768            // Might as well start at page 0
769            Some(0)
770        }
771    };
772
773    if let Some(first_open_page) = first_open_page {
774        return Ok(first_open_page);
775    }
776
777    // All pages are closed... This is not correct.
778    Err(Error::Corrupted {
779        #[cfg(feature = "_test")]
780        backtrace: std::backtrace::Backtrace::capture(),
781    })
782}
783
784async fn find_oldest_page<S: NorFlash>(
785    flash: &mut S,
786    flash_range: Range<u32>,
787    cache: &mut impl PrivateCacheImpl,
788) -> Result<usize, Error<S::Error>> {
789    let youngest_page = find_youngest_page(flash, flash_range.clone(), cache).await?;
790
791    // The oldest page is the first non-open page after the youngest page
792    let oldest_closed_page =
793        find_first_page(flash, flash_range, cache, youngest_page, PageState::Closed).await?;
794
795    Ok(oldest_closed_page.unwrap_or(youngest_page))
796}
797
798/// Try to repair the state of the flash to hopefull get back everything in working order.
799/// Care is taken that no data is lost, but this depends on correctly repairing the state and
800/// so is only best effort.
801///
802/// This function might be called after a different function returned the [Error::Corrupted] error.
803/// There's no guarantee it will work.
804///
805/// If this function or the function call after this crate returns [Error::Corrupted], then it's unlikely
806/// that the state can be recovered. To at least make everything function again at the cost of losing the data,
807/// erase the flash range.
808async fn try_repair<S: NorFlash>(
809    flash: &mut S,
810    flash_range: Range<u32>,
811    cache: &mut impl CacheImpl,
812) -> Result<(), Error<S::Error>> {
813    cache.invalidate_cache_state();
814
815    crate::try_general_repair(flash, flash_range.clone(), cache).await?;
816    Ok(())
817}
818
819#[cfg(test)]
820mod tests {
821    use crate::mock_flash::{FlashAverageStatsResult, FlashStatsResult, WriteCountCheck};
822
823    use super::*;
824    use futures_test::test;
825
826    type MockFlashBig = mock_flash::MockFlashBase<4, 4, 256>;
827    type MockFlashTiny = mock_flash::MockFlashBase<2, 1, 32>;
828
829    #[test]
830    async fn peek_and_overwrite_old_data() {
831        let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None, true);
832        const FLASH_RANGE: Range<u32> = 0x00..0x40;
833        let mut data_buffer = AlignedBuf([0; 1024]);
834        const DATA_SIZE: usize = 22;
835
836        assert_eq!(
837            space_left(&mut flash, FLASH_RANGE, &mut cache::NoCache::new())
838                .await
839                .unwrap(),
840            60
841        );
842
843        assert_eq!(
844            peek(
845                &mut flash,
846                FLASH_RANGE,
847                &mut cache::NoCache::new(),
848                &mut data_buffer
849            )
850            .await
851            .unwrap(),
852            None
853        );
854
855        data_buffer[..DATA_SIZE].copy_from_slice(&[0xAA; DATA_SIZE]);
856        push(
857            &mut flash,
858            FLASH_RANGE,
859            &mut cache::NoCache::new(),
860            &data_buffer[..DATA_SIZE],
861            false,
862        )
863        .await
864        .unwrap();
865
866        assert_eq!(
867            space_left(&mut flash, FLASH_RANGE, &mut cache::NoCache::new())
868                .await
869                .unwrap(),
870            30
871        );
872
873        assert_eq!(
874            peek(
875                &mut flash,
876                FLASH_RANGE,
877                &mut cache::NoCache::new(),
878                &mut data_buffer
879            )
880            .await
881            .unwrap()
882            .unwrap(),
883            &[0xAA; DATA_SIZE]
884        );
885        data_buffer[..DATA_SIZE].copy_from_slice(&[0xBB; DATA_SIZE]);
886        push(
887            &mut flash,
888            FLASH_RANGE,
889            &mut cache::NoCache::new(),
890            &data_buffer[..DATA_SIZE],
891            false,
892        )
893        .await
894        .unwrap();
895
896        assert_eq!(
897            space_left(&mut flash, FLASH_RANGE, &mut cache::NoCache::new())
898                .await
899                .unwrap(),
900            0
901        );
902
903        assert_eq!(
904            peek(
905                &mut flash,
906                FLASH_RANGE,
907                &mut cache::NoCache::new(),
908                &mut data_buffer
909            )
910            .await
911            .unwrap()
912            .unwrap(),
913            &[0xAA; DATA_SIZE]
914        );
915
916        // Flash is full, this should fail
917        data_buffer[..DATA_SIZE].copy_from_slice(&[0xCC; DATA_SIZE]);
918        push(
919            &mut flash,
920            FLASH_RANGE,
921            &mut cache::NoCache::new(),
922            &data_buffer[..DATA_SIZE],
923            false,
924        )
925        .await
926        .unwrap_err();
927        // Now we allow overwrite, so it should work
928        data_buffer[..DATA_SIZE].copy_from_slice(&[0xDD; DATA_SIZE]);
929        push(
930            &mut flash,
931            FLASH_RANGE,
932            &mut cache::NoCache::new(),
933            &data_buffer[..DATA_SIZE],
934            true,
935        )
936        .await
937        .unwrap();
938
939        assert_eq!(
940            peek(
941                &mut flash,
942                FLASH_RANGE,
943                &mut cache::NoCache::new(),
944                &mut data_buffer
945            )
946            .await
947            .unwrap()
948            .unwrap(),
949            &[0xBB; DATA_SIZE]
950        );
951        assert_eq!(
952            pop(
953                &mut flash,
954                FLASH_RANGE,
955                &mut cache::NoCache::new(),
956                &mut data_buffer
957            )
958            .await
959            .unwrap()
960            .unwrap(),
961            &[0xBB; DATA_SIZE]
962        );
963
964        assert_eq!(
965            space_left(&mut flash, FLASH_RANGE, &mut cache::NoCache::new())
966                .await
967                .unwrap(),
968            30
969        );
970
971        assert_eq!(
972            peek(
973                &mut flash,
974                FLASH_RANGE,
975                &mut cache::NoCache::new(),
976                &mut data_buffer
977            )
978            .await
979            .unwrap()
980            .unwrap(),
981            &[0xDD; DATA_SIZE]
982        );
983        assert_eq!(
984            pop(
985                &mut flash,
986                FLASH_RANGE,
987                &mut cache::NoCache::new(),
988                &mut data_buffer
989            )
990            .await
991            .unwrap()
992            .unwrap(),
993            &[0xDD; DATA_SIZE]
994        );
995
996        assert_eq!(
997            space_left(&mut flash, FLASH_RANGE, &mut cache::NoCache::new())
998                .await
999                .unwrap(),
1000            60
1001        );
1002
1003        assert_eq!(
1004            peek(
1005                &mut flash,
1006                FLASH_RANGE,
1007                &mut cache::NoCache::new(),
1008                &mut data_buffer
1009            )
1010            .await
1011            .unwrap(),
1012            None
1013        );
1014        assert_eq!(
1015            pop(
1016                &mut flash,
1017                FLASH_RANGE,
1018                &mut cache::NoCache::new(),
1019                &mut data_buffer
1020            )
1021            .await
1022            .unwrap(),
1023            None
1024        );
1025    }
1026
1027    #[test]
1028    async fn push_pop() {
1029        let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true);
1030        let flash_range = 0x000..0x1000;
1031        let mut data_buffer = AlignedBuf([0; 1024]);
1032
1033        for i in 0..2000 {
1034            println!("{i}");
1035            let data = vec![i as u8; i % 512 + 1];
1036
1037            push(
1038                &mut flash,
1039                flash_range.clone(),
1040                &mut cache::NoCache::new(),
1041                &data,
1042                true,
1043            )
1044            .await
1045            .unwrap();
1046            assert_eq!(
1047                peek(
1048                    &mut flash,
1049                    flash_range.clone(),
1050                    &mut cache::NoCache::new(),
1051                    &mut data_buffer
1052                )
1053                .await
1054                .unwrap()
1055                .unwrap(),
1056                &data,
1057                "At {i}"
1058            );
1059            assert_eq!(
1060                pop(
1061                    &mut flash,
1062                    flash_range.clone(),
1063                    &mut cache::NoCache::new(),
1064                    &mut data_buffer
1065                )
1066                .await
1067                .unwrap()
1068                .unwrap(),
1069                &data,
1070                "At {i}"
1071            );
1072            assert_eq!(
1073                peek(
1074                    &mut flash,
1075                    flash_range.clone(),
1076                    &mut cache::NoCache::new(),
1077                    &mut data_buffer
1078                )
1079                .await
1080                .unwrap(),
1081                None,
1082                "At {i}"
1083            );
1084        }
1085    }
1086
1087    #[test]
1088    async fn push_pop_tiny() {
1089        let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None, true);
1090        let flash_range = 0x00..0x40;
1091        let mut data_buffer = AlignedBuf([0; 1024]);
1092
1093        for i in 0..2000 {
1094            println!("{i}");
1095            let data = vec![i as u8; i % 20 + 1];
1096
1097            println!("PUSH");
1098            push(
1099                &mut flash,
1100                flash_range.clone(),
1101                &mut cache::NoCache::new(),
1102                &data,
1103                true,
1104            )
1105            .await
1106            .unwrap();
1107            assert_eq!(
1108                peek(
1109                    &mut flash,
1110                    flash_range.clone(),
1111                    &mut cache::NoCache::new(),
1112                    &mut data_buffer
1113                )
1114                .await
1115                .unwrap()
1116                .unwrap(),
1117                &data,
1118                "At {i}"
1119            );
1120            println!("POP");
1121            assert_eq!(
1122                pop(
1123                    &mut flash,
1124                    flash_range.clone(),
1125                    &mut cache::NoCache::new(),
1126                    &mut data_buffer
1127                )
1128                .await
1129                .unwrap()
1130                .unwrap(),
1131                &data,
1132                "At {i}"
1133            );
1134            println!("PEEK");
1135            assert_eq!(
1136                peek(
1137                    &mut flash,
1138                    flash_range.clone(),
1139                    &mut cache::NoCache::new(),
1140                    &mut data_buffer
1141                )
1142                .await
1143                .unwrap(),
1144                None,
1145                "At {i}"
1146            );
1147            println!("DONE");
1148        }
1149    }
1150
1151    #[test]
1152    /// Same as [push_lots_then_pop_lots], except with added peeking and using the iterator style
1153    async fn push_peek_pop_many() {
1154        let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true);
1155        let flash_range = 0x000..0x1000;
1156        let mut data_buffer = AlignedBuf([0; 1024]);
1157
1158        let mut push_stats = FlashStatsResult::default();
1159        let mut pushes = 0;
1160        let mut peek_stats = FlashStatsResult::default();
1161        let mut peeks = 0;
1162        let mut pop_stats = FlashStatsResult::default();
1163        let mut pops = 0;
1164
1165        let mut cache = cache::NoCache::new();
1166
1167        for loop_index in 0..100 {
1168            println!("Loop index: {loop_index}");
1169
1170            for i in 0..20 {
1171                let start_snapshot = flash.stats_snapshot();
1172                let data = vec![i as u8; 50];
1173                push(&mut flash, flash_range.clone(), &mut cache, &data, false)
1174                    .await
1175                    .unwrap();
1176                pushes += 1;
1177                push_stats += start_snapshot.compare_to(flash.stats_snapshot());
1178            }
1179
1180            let start_snapshot = flash.stats_snapshot();
1181            let mut iterator = iter(&mut flash, flash_range.clone(), &mut cache)
1182                .await
1183                .unwrap();
1184            peek_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1185            for i in 0..5 {
1186                let start_snapshot = iterator.flash.stats_snapshot();
1187                let data = [i as u8; 50];
1188                assert_eq!(
1189                    iterator
1190                        .next(&mut data_buffer)
1191                        .await
1192                        .unwrap()
1193                        .unwrap()
1194                        .deref(),
1195                    &data[..],
1196                    "At {i}"
1197                );
1198                peeks += 1;
1199                peek_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1200            }
1201
1202            let start_snapshot = flash.stats_snapshot();
1203            let mut iterator = iter(&mut flash, flash_range.clone(), &mut cache)
1204                .await
1205                .unwrap();
1206            pop_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1207            for i in 0..5 {
1208                let start_snapshot = iterator.flash.stats_snapshot();
1209                let data = vec![i as u8; 50];
1210                assert_eq!(
1211                    iterator
1212                        .next(&mut data_buffer)
1213                        .await
1214                        .unwrap()
1215                        .unwrap()
1216                        .pop()
1217                        .await
1218                        .unwrap(),
1219                    &data,
1220                    "At {i}"
1221                );
1222                pops += 1;
1223                pop_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1224            }
1225
1226            for i in 20..25 {
1227                let start_snapshot = flash.stats_snapshot();
1228                let data = vec![i as u8; 50];
1229                push(&mut flash, flash_range.clone(), &mut cache, &data, false)
1230                    .await
1231                    .unwrap();
1232                pushes += 1;
1233                push_stats += start_snapshot.compare_to(flash.stats_snapshot());
1234            }
1235
1236            let start_snapshot = flash.stats_snapshot();
1237            let mut iterator = iter(&mut flash, flash_range.clone(), &mut cache)
1238                .await
1239                .unwrap();
1240            peek_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1241            for i in 5..25 {
1242                let start_snapshot = iterator.flash.stats_snapshot();
1243                let data = vec![i as u8; 50];
1244                assert_eq!(
1245                    iterator
1246                        .next(&mut data_buffer)
1247                        .await
1248                        .unwrap()
1249                        .unwrap()
1250                        .deref(),
1251                    &data,
1252                    "At {i}"
1253                );
1254                peeks += 1;
1255                peek_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1256            }
1257
1258            let start_snapshot = flash.stats_snapshot();
1259            let mut iterator = iter(&mut flash, flash_range.clone(), &mut cache)
1260                .await
1261                .unwrap();
1262            pop_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1263            for i in 5..25 {
1264                let start_snapshot = iterator.flash.stats_snapshot();
1265                let data = vec![i as u8; 50];
1266                assert_eq!(
1267                    iterator
1268                        .next(&mut data_buffer)
1269                        .await
1270                        .unwrap()
1271                        .unwrap()
1272                        .pop()
1273                        .await
1274                        .unwrap(),
1275                    &data,
1276                    "At {i}"
1277                );
1278                pops += 1;
1279                pop_stats += start_snapshot.compare_to(iterator.flash.stats_snapshot());
1280            }
1281        }
1282
1283        // Assert the performance. These numbers can be changed if acceptable.
1284        approx::assert_relative_eq!(
1285            push_stats.take_average(pushes),
1286            FlashAverageStatsResult {
1287                avg_erases: 0.0,
1288                avg_reads: 16.864,
1289                avg_writes: 3.1252,
1290                avg_bytes_read: 105.4112,
1291                avg_bytes_written: 60.5008
1292            }
1293        );
1294        approx::assert_relative_eq!(
1295            peek_stats.take_average(peeks),
1296            FlashAverageStatsResult {
1297                avg_erases: 0.0052,
1298                avg_reads: 3.8656,
1299                avg_writes: 0.0,
1300                avg_bytes_read: 70.4256,
1301                avg_bytes_written: 0.0
1302            }
1303        );
1304        approx::assert_relative_eq!(
1305            pop_stats.take_average(pops),
1306            FlashAverageStatsResult {
1307                avg_erases: 0.0572,
1308                avg_reads: 3.7772,
1309                avg_writes: 1.0,
1310                avg_bytes_read: 69.7184,
1311                avg_bytes_written: 8.0
1312            }
1313        );
1314    }
1315
1316    #[test]
1317    async fn push_lots_then_pop_lots() {
1318        let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true);
1319        let flash_range = 0x000..0x1000;
1320        let mut data_buffer = AlignedBuf([0; 1024]);
1321
1322        let mut push_stats = FlashStatsResult::default();
1323        let mut pushes = 0;
1324        let mut pop_stats = FlashStatsResult::default();
1325        let mut pops = 0;
1326
1327        for loop_index in 0..100 {
1328            println!("Loop index: {loop_index}");
1329
1330            for i in 0..20 {
1331                let start_snapshot = flash.stats_snapshot();
1332                let data = vec![i as u8; 50];
1333                push(
1334                    &mut flash,
1335                    flash_range.clone(),
1336                    &mut cache::NoCache::new(),
1337                    &data,
1338                    false,
1339                )
1340                .await
1341                .unwrap();
1342                pushes += 1;
1343                push_stats += start_snapshot.compare_to(flash.stats_snapshot());
1344            }
1345
1346            for i in 0..5 {
1347                let start_snapshot = flash.stats_snapshot();
1348                let data = vec![i as u8; 50];
1349                assert_eq!(
1350                    pop(
1351                        &mut flash,
1352                        flash_range.clone(),
1353                        &mut cache::NoCache::new(),
1354                        &mut data_buffer
1355                    )
1356                    .await
1357                    .unwrap()
1358                    .unwrap(),
1359                    &data,
1360                    "At {i}"
1361                );
1362                pops += 1;
1363                pop_stats += start_snapshot.compare_to(flash.stats_snapshot());
1364            }
1365
1366            for i in 20..25 {
1367                let start_snapshot = flash.stats_snapshot();
1368                let data = vec![i as u8; 50];
1369                push(
1370                    &mut flash,
1371                    flash_range.clone(),
1372                    &mut cache::NoCache::new(),
1373                    &data,
1374                    false,
1375                )
1376                .await
1377                .unwrap();
1378                pushes += 1;
1379                push_stats += start_snapshot.compare_to(flash.stats_snapshot());
1380            }
1381
1382            for i in 5..25 {
1383                let start_snapshot = flash.stats_snapshot();
1384                let data = vec![i as u8; 50];
1385                assert_eq!(
1386                    pop(
1387                        &mut flash,
1388                        flash_range.clone(),
1389                        &mut cache::NoCache::new(),
1390                        &mut data_buffer
1391                    )
1392                    .await
1393                    .unwrap()
1394                    .unwrap(),
1395                    &data,
1396                    "At {i}"
1397                );
1398                pops += 1;
1399                pop_stats += start_snapshot.compare_to(flash.stats_snapshot());
1400            }
1401        }
1402
1403        // Assert the performance. These numbers can be changed if acceptable.
1404        approx::assert_relative_eq!(
1405            push_stats.take_average(pushes),
1406            FlashAverageStatsResult {
1407                avg_erases: 0.0,
1408                avg_reads: 16.864,
1409                avg_writes: 3.1252,
1410                avg_bytes_read: 105.4112,
1411                avg_bytes_written: 60.5008
1412            }
1413        );
1414        approx::assert_relative_eq!(
1415            pop_stats.take_average(pops),
1416            FlashAverageStatsResult {
1417                avg_erases: 0.0624,
1418                avg_reads: 23.5768,
1419                avg_writes: 1.0,
1420                avg_bytes_read: 180.512,
1421                avg_bytes_written: 8.0
1422            }
1423        );
1424    }
1425
1426    #[test]
1427    async fn pop_with_empty_section() {
1428        let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None, true);
1429        let flash_range = 0x00..0x40;
1430        let mut data_buffer = AlignedBuf([0; 1024]);
1431
1432        data_buffer[..20].copy_from_slice(&[0xAA; 20]);
1433        push(
1434            &mut flash,
1435            flash_range.clone(),
1436            &mut cache::NoCache::new(),
1437            &data_buffer[0..20],
1438            false,
1439        )
1440        .await
1441        .unwrap();
1442        data_buffer[..20].copy_from_slice(&[0xBB; 20]);
1443        push(
1444            &mut flash,
1445            flash_range.clone(),
1446            &mut cache::NoCache::new(),
1447            &data_buffer[0..20],
1448            false,
1449        )
1450        .await
1451        .unwrap();
1452
1453        // There's now an unused gap at the end of the first page
1454
1455        assert_eq!(
1456            pop(
1457                &mut flash,
1458                flash_range.clone(),
1459                &mut cache::NoCache::new(),
1460                &mut data_buffer
1461            )
1462            .await
1463            .unwrap()
1464            .unwrap(),
1465            &[0xAA; 20]
1466        );
1467
1468        assert_eq!(
1469            pop(
1470                &mut flash,
1471                flash_range.clone(),
1472                &mut cache::NoCache::new(),
1473                &mut data_buffer
1474            )
1475            .await
1476            .unwrap()
1477            .unwrap(),
1478            &[0xBB; 20]
1479        );
1480    }
1481
1482    #[test]
1483    async fn search_pages() {
1484        let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true);
1485
1486        const FLASH_RANGE: Range<u32> = 0x000..0x1000;
1487
1488        close_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new(), 0)
1489            .await
1490            .unwrap();
1491        close_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new(), 1)
1492            .await
1493            .unwrap();
1494        partial_close_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new(), 2)
1495            .await
1496            .unwrap();
1497
1498        assert_eq!(
1499            find_youngest_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new())
1500                .await
1501                .unwrap(),
1502            2
1503        );
1504        assert_eq!(
1505            find_oldest_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new())
1506                .await
1507                .unwrap(),
1508            0
1509        );
1510    }
1511
1512    #[test]
1513    async fn store_too_big_item() {
1514        let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true);
1515        const FLASH_RANGE: Range<u32> = 0x000..0x1000;
1516
1517        push(
1518            &mut flash,
1519            FLASH_RANGE,
1520            &mut cache::NoCache::new(),
1521            &AlignedBuf([0; 1024 - 4 * 2 - 8]),
1522            false,
1523        )
1524        .await
1525        .unwrap();
1526
1527        assert_eq!(
1528            push(
1529                &mut flash,
1530                FLASH_RANGE,
1531                &mut cache::NoCache::new(),
1532                &AlignedBuf([0; 1024 - 4 * 2 - 8 + 1]),
1533                false,
1534            )
1535            .await,
1536            Err(Error::ItemTooBig)
1537        );
1538    }
1539
1540    #[test]
1541    async fn push_on_single_page() {
1542        let mut flash =
1543            mock_flash::MockFlashBase::<1, 4, 256>::new(WriteCountCheck::Twice, None, true);
1544        const FLASH_RANGE: Range<u32> = 0x000..0x400;
1545
1546        for _ in 0..100 {
1547            match push(
1548                &mut flash,
1549                FLASH_RANGE,
1550                &mut cache::NoCache::new(),
1551                &[0, 1, 2, 3, 4],
1552                true,
1553            )
1554            .await
1555            {
1556                Ok(_) => {}
1557                Err(e) => {
1558                    println!("{}", flash.print_items().await);
1559                    panic!("{e}");
1560                }
1561            }
1562        }
1563    }
1564}