sequential_storage/
map.rs

1//! Implementation of the map logic.
2
3use core::{marker::PhantomData, mem::size_of};
4use embedded_storage_async::nor_flash::MultiwriteNorFlash;
5
6#[cfg(feature = "postcard")]
7use serde::{Deserialize, Serialize};
8
9use crate::item::{Item, ItemHeader, ItemIter};
10
11use self::{
12    cache::KeyCacheImpl,
13    item::{ItemHeaderIter, ItemUnborrowed},
14};
15
16use super::{
17    Debug, Error, GenericStorage, MAX_WORD_SIZE, NorFlash, NorFlashExt, PageState, Range, cache,
18    calculate_page_address, calculate_page_end_address, calculate_page_index, calculate_page_size,
19    item, run_with_auto_repair,
20};
21
22/// Configuration for a map
23pub struct MapConfig<S> {
24    flash_range: Range<u32>,
25    _phantom: PhantomData<S>,
26}
27
28impl<S: NorFlash> MapConfig<S> {
29    /// Create a new map configuration. Will panic if the data is invalid.
30    /// If you want a fallible version, use [`Self::try_new`].
31    #[must_use]
32    pub const fn new(flash_range: Range<u32>) -> Self {
33        Self::try_new(flash_range).expect("Map config must be correct")
34    }
35
36    /// Create a new map configuration. Will return None if the data is invalid
37    #[must_use]
38    pub const fn try_new(flash_range: Range<u32>) -> Option<Self> {
39        if !flash_range.start.is_multiple_of(S::ERASE_SIZE as u32) {
40            return None;
41        }
42        if !flash_range.end.is_multiple_of(S::ERASE_SIZE as u32) {
43            return None;
44        }
45        // At least 2 pages are used
46        if flash_range.end - flash_range.start < S::ERASE_SIZE as u32 * 2 {
47            return None;
48        }
49
50        if S::ERASE_SIZE < S::WORD_SIZE * 3 {
51            return None;
52        }
53        if S::WORD_SIZE > MAX_WORD_SIZE {
54            return None;
55        }
56
57        Some(Self {
58            flash_range,
59            _phantom: PhantomData,
60        })
61    }
62}
63
64/// A map-like storage
65///
66/// When a key-value is stored, it overwrites the any old items with the same key.
67///
68/// ## Basic API
69///
70/// ```rust
71/// # use sequential_storage::cache::NoCache;
72/// # use sequential_storage::map::{MapConfig, MapStorage};
73/// # use mock_flash::MockFlashBase;
74/// # use futures::executor::block_on;
75/// # type Flash = MockFlashBase<10, 1, 4096>;
76/// # mod mock_flash {
77/// #   include!("mock_flash.rs");
78/// # }
79/// # fn init_flash() -> Flash {
80/// #     Flash::new(mock_flash::WriteCountCheck::Twice, None, false)
81/// # }
82///
83/// # block_on(async {
84/// // Initialize the flash. This can be internal or external
85/// let mut flash = init_flash();
86///
87/// // Create the storage instance
88///
89/// // We provide the flash addresses in which it will operate.
90/// // The storage will not read, write or erase outside of this range.
91///
92/// // We also put the config in a const block so if the config is bad we'll get a compile time error
93///
94/// // With the generics we specify that this is a map with `u8` as the key
95/// let mut storage = MapStorage::<u8, _, _>::new(flash, const { MapConfig::new(0x1000..0x3000) }, NoCache::new());
96///
97/// // We need to give the crate a buffer to work with.
98/// // It must be big enough to serialize the biggest value of your storage type in,
99/// // rounded up to to word alignment of the flash. Some kinds of internal flash may require
100/// // this buffer to be aligned in RAM as well.
101/// let mut data_buffer = [0; 128];
102///
103/// // We can fetch an item from the flash. We're using `u8` as our key type and `u32` as our value type.
104/// // Nothing is stored in it yet, so it will return None.
105///
106/// assert_eq!(
107///     storage.fetch_item::<u32>(
108///         &mut data_buffer,
109///         &42,
110///     ).await.unwrap(),
111///     None
112/// );
113///
114/// // Now we store an item the flash with key 42.
115/// // Again we make sure we pass the correct key and value types, u8 and u32.
116/// // It is important to do this consistently.
117///
118/// storage.store_item(
119///     &mut data_buffer,
120///     &42u8,
121///     &104729u32,
122/// ).await.unwrap();
123///
124/// // When we ask for key 42, we now get back a Some with the correct value
125///
126/// assert_eq!(
127///     storage.fetch_item::<u32>(
128///         &mut data_buffer,
129///         &42,
130///     ).await.unwrap(),
131///     Some(104729)
132/// );
133/// # });
134/// ```
135///
136/// For your convenience there are premade implementations for the [Key] and [Value] traits.
137pub struct MapStorage<K: Key, S: NorFlash, C: KeyCacheImpl<K>> {
138    inner: GenericStorage<S, C>,
139    _phantom: PhantomData<K>,
140}
141
142impl<S: NorFlash, C: KeyCacheImpl<K>, K: Key> MapStorage<K, S, C> {
143    /// Create a new map instance
144    ///
145    /// The provided cache instance must be new or must be in the exact correct state for the current flash contents.
146    /// If the cache is bad, undesirable things will happen.
147    /// So, it's ok to reuse the cache gotten from the [`Self::destroy`] method when the flash hasn't changed since calling destroy.
148    pub const fn new(storage: S, config: MapConfig<S>, cache: C) -> Self {
149        Self {
150            inner: GenericStorage {
151                flash: storage,
152                flash_range: config.flash_range,
153                cache,
154            },
155            _phantom: PhantomData,
156        }
157    }
158
159    /// Get the last stored value from the flash that is associated with the given key.
160    /// If no value with the key is found, None is returned.
161    ///
162    /// You must make sure the used type as [Value] is correct for the key used.
163    /// If not, there can be wrong values or deserialization errors.
164    ///
165    /// The data buffer must be long enough to hold the longest serialized data of your [Key] + [Value] types combined,
166    /// rounded up to flash word alignment.
167    pub async fn fetch_item<'d, V: Value<'d>>(
168        &mut self,
169        data_buffer: &'d mut [u8],
170        search_key: &K,
171    ) -> Result<Option<V>, Error<S::Error>> {
172        let result = run_with_auto_repair!(
173            function = self.fetch_item_with_location(data_buffer, search_key).await,
174            repair = self.try_repair(data_buffer).await?
175        );
176
177        let Some((item, _, item_key_len)) = result? else {
178            return Ok(None);
179        };
180
181        let data_len = item.header.length as usize;
182        let item_key_len = match item_key_len {
183            Some(item_key_len) => item_key_len,
184            None => K::get_len(&data_buffer[..data_len])?,
185        };
186
187        let (value, _size) =
188            V::deserialize_from(&data_buffer[item_key_len..][..data_len - item_key_len])
189                .map_err(Error::SerializationError)?;
190        Ok(Some(value))
191    }
192
193    /// Fetch the item, but with the item unborrowed, the address of the item and the length of the key
194    #[allow(clippy::type_complexity)]
195    async fn fetch_item_with_location(
196        &mut self,
197        data_buffer: &mut [u8],
198        search_key: &K,
199    ) -> Result<Option<(ItemUnborrowed, u32, Option<usize>)>, Error<S::Error>> {
200        if self.inner.cache.is_dirty() {
201            self.inner.cache.invalidate_cache_state();
202        }
203
204        'cache: {
205            if let Some(cached_location) = self.inner.cache.key_location(search_key) {
206                let page_index = calculate_page_index::<S>(self.flash_range(), cached_location);
207                let page_data_end_address =
208                    calculate_page_end_address::<S>(self.flash_range(), page_index)
209                        - S::WORD_SIZE as u32;
210
211                let Some(header) = ItemHeader::read_new(
212                    &mut self.inner.flash,
213                    cached_location,
214                    page_data_end_address,
215                )
216                .await?
217                else {
218                    // The cache points to a non-existing item?
219                    #[expect(clippy::assertions_on_constants, reason = "Clippy is wrong here")]
220                    {
221                        assert!(
222                            !cfg!(feature = "_test"),
223                            "Wrong cache value. Addr: {cached_location}"
224                        );
225                    }
226                    self.inner.cache.invalidate_cache_state();
227                    break 'cache;
228                };
229
230                let item = header
231                    .read_item(
232                        &mut self.inner.flash,
233                        data_buffer,
234                        cached_location,
235                        page_data_end_address,
236                    )
237                    .await?;
238
239                match item {
240                    item::MaybeItem::Corrupted(_, _) | item::MaybeItem::Erased(_, _) => {
241                        #[expect(clippy::assertions_on_constants, reason = "Clippy is wrong here")]
242                        {
243                            assert!(
244                                !cfg!(feature = "_test"),
245                                "Wrong cache value. Addr: {cached_location}"
246                            );
247                        }
248
249                        // The cache points to a corrupted or erased item?
250                        self.inner.cache.invalidate_cache_state();
251                        break 'cache;
252                    }
253                    item::MaybeItem::Present(item) => {
254                        return Ok(Some((item.unborrow(), cached_location, None)));
255                    }
256                }
257            }
258        }
259
260        // We need to find the page we were last using. This should be the only partial open page.
261        let mut last_used_page = self
262            .inner
263            .find_first_page(0, PageState::PartialOpen)
264            .await?;
265
266        if last_used_page.is_none() {
267            // In the event that all pages are still open or the last used page was just closed, we search for the first open page.
268            // If the page one before that is closed, then that's the last used page.
269            if let Some(first_open_page) = self.inner.find_first_page(0, PageState::Open).await? {
270                let previous_page = self.inner.previous_page(first_open_page);
271                if self.inner.get_page_state(previous_page).await?.is_closed() {
272                    last_used_page = Some(previous_page);
273                } else {
274                    // The page before the open page is not closed, so it must be open.
275                    // This means that all pages are open and that we don't have any items yet.
276                    self.inner.cache.unmark_dirty();
277                    return Ok(None);
278                }
279            } else {
280                // There are no open pages, so everything must be closed.
281                // Something is up and this should never happen.
282                // To recover, we will just erase all the flash.
283                return Err(Error::Corrupted {
284                    #[cfg(feature = "_test")]
285                    backtrace: std::backtrace::Backtrace::capture(),
286                });
287            }
288        }
289
290        // We must now find the most recent storage item with the key that was asked for.
291        // If we don't find it in the current page, then we check again in the previous page if that page is closed.
292
293        let mut current_page_to_check = last_used_page.unwrap();
294        let mut newest_found_item_data = None;
295
296        loop {
297            let page_data_start_address =
298                calculate_page_address::<S>(self.flash_range(), current_page_to_check)
299                    + S::WORD_SIZE as u32;
300            let page_data_end_address =
301                calculate_page_end_address::<S>(self.flash_range(), current_page_to_check)
302                    - S::WORD_SIZE as u32;
303
304            let mut it = ItemIter::new(page_data_start_address, page_data_end_address);
305            while let Some((item, address)) = it.next(&mut self.inner.flash, data_buffer).await? {
306                let (found_key, found_key_len) = K::deserialize_from(item.data())?;
307                if found_key == *search_key {
308                    newest_found_item_data = Some((address, found_key_len));
309                }
310            }
311
312            // We've found the item! We can stop searching
313            if let Some((newest_found_item_address, _)) = newest_found_item_data.as_ref() {
314                self.inner
315                    .cache
316                    .notice_key_location(search_key, *newest_found_item_address, false);
317
318                break;
319            }
320
321            // We have not found the item. We've got to look in the previous page, but only if that page is closed and contains data.
322            let previous_page = self.inner.previous_page(current_page_to_check);
323
324            if self.inner.get_page_state(previous_page).await? != PageState::Closed {
325                // We've looked through all the pages with data and couldn't find the item
326                self.inner.cache.unmark_dirty();
327                return Ok(None);
328            }
329
330            current_page_to_check = previous_page;
331        }
332
333        self.inner.cache.unmark_dirty();
334
335        // We now need to reread the item because we lost all its data other than its address
336
337        if let Some((newest_found_item_address, newest_found_item_key_len)) = newest_found_item_data
338        {
339            let item =
340                ItemHeader::read_new(&mut self.inner.flash, newest_found_item_address, u32::MAX)
341                    .await?
342                    .ok_or_else(|| {
343                        // How come there's no item header here!? We just found it!
344                        Error::Corrupted {
345                            #[cfg(feature = "_test")]
346                            backtrace: std::backtrace::Backtrace::capture(),
347                        }
348                    })?
349                    .read_item(
350                        &mut self.inner.flash,
351                        data_buffer,
352                        newest_found_item_address,
353                        u32::MAX,
354                    )
355                    .await?;
356
357            Ok(Some((
358                item.unwrap()?.unborrow(),
359                newest_found_item_address,
360                Some(newest_found_item_key_len),
361            )))
362        } else {
363            Ok(None)
364        }
365    }
366
367    /// Store a key-value pair into flash memory.
368    /// It will (logically) overwrite the last value that has the same key.
369    ///
370    /// The data buffer must be long enough to hold the longest serialized data of your [Key] + [Value] types combined,
371    /// rounded up to flash word alignment.
372    pub async fn store_item<'d, V: Value<'d>>(
373        &mut self,
374        data_buffer: &mut [u8],
375        key: &K,
376        item: &V,
377    ) -> Result<(), Error<S::Error>> {
378        run_with_auto_repair!(
379            function = self.store_item_inner(data_buffer, key, item).await,
380            repair = self.try_repair(data_buffer).await?
381        )
382    }
383
384    async fn store_item_inner(
385        &mut self,
386        data_buffer: &mut [u8],
387        key: &K,
388        item: &dyn Value<'_>,
389    ) -> Result<(), Error<S::Error>> {
390        if self.inner.cache.is_dirty() {
391            self.inner.cache.invalidate_cache_state();
392        }
393
394        let mut recursion_level = 0;
395        loop {
396            // Check if we're in an infinite recursion which happens when we don't have enough space to store the new data
397            if recursion_level == self.inner.get_pages(0).count() {
398                self.inner.cache.unmark_dirty();
399                return Err(Error::FullStorage);
400            }
401
402            // If there is a partial open page, we try to write in that first if there is enough space
403            let next_page_to_use = if let Some(partial_open_page) = self
404                .inner
405                .find_first_page(0, PageState::PartialOpen)
406                .await?
407            {
408                // We found a partial open page, but at this point it's relatively cheap to do a consistency check
409                if !self
410                    .inner
411                    .get_page_state(self.inner.next_page(partial_open_page))
412                    .await?
413                    .is_open()
414                {
415                    // Oh oh, the next page which serves as the buffer page is not open. We're corrupt.
416                    // This likely happened because of an unexpected shutdown during data migration from the
417                    // then new buffer page to the new partial open page.
418                    // The repair function should be able to repair this.
419                    return Err(Error::Corrupted {
420                        #[cfg(feature = "_test")]
421                        backtrace: std::backtrace::Backtrace::capture(),
422                    });
423                }
424
425                // We've got to search where the free space is since the page starts with items present already
426
427                let page_data_start_address =
428                    calculate_page_address::<S>(self.flash_range(), partial_open_page)
429                        + S::WORD_SIZE as u32;
430                let page_data_end_address =
431                    calculate_page_end_address::<S>(self.flash_range(), partial_open_page)
432                        - S::WORD_SIZE as u32;
433
434                let key_len = key.serialize_into(data_buffer)?;
435                let item_data_length = key_len
436                    + item
437                        .serialize_into(&mut data_buffer[key_len..])
438                        .map_err(Error::SerializationError)?;
439
440                if item_data_length > u16::MAX as usize
441                    || item_data_length
442                        > calculate_page_size::<S>()
443                            .saturating_sub(ItemHeader::data_address::<S>(0) as usize)
444                {
445                    self.inner.cache.unmark_dirty();
446                    return Err(Error::ItemTooBig);
447                }
448
449                let free_spot_address = self
450                    .inner
451                    .find_next_free_item_spot(
452                        page_data_start_address,
453                        page_data_end_address,
454                        item_data_length as u32,
455                    )
456                    .await?;
457
458                if let Some(free_spot_address) = free_spot_address {
459                    self.inner
460                        .cache
461                        .notice_key_location(key, free_spot_address, true);
462                    Item::write_new(
463                        &mut self.inner.flash,
464                        self.inner.flash_range.clone(),
465                        &mut self.inner.cache,
466                        free_spot_address,
467                        &data_buffer[..item_data_length],
468                    )
469                    .await?;
470
471                    self.inner.cache.unmark_dirty();
472                    return Ok(());
473                }
474
475                // The item doesn't fit here, so we need to close this page and move to the next
476                self.inner.close_page(partial_open_page).await?;
477                Some(self.inner.next_page(partial_open_page))
478            } else {
479                None
480            };
481
482            // If we get here, there was no partial page found or the partial page has now been closed because the item didn't fit.
483            // If there was a partial page, then we need to look at the next page. It's supposed to be open since it was the previous empty buffer page.
484            // The new buffer page has to be emptied if it was closed.
485            // If there was no partial page, we just use the first open page.
486
487            if let Some(next_page_to_use) = next_page_to_use {
488                let next_page_state = self.inner.get_page_state(next_page_to_use).await?;
489
490                if !next_page_state.is_open() {
491                    // What was the previous buffer page was not open...
492                    return Err(Error::Corrupted {
493                        #[cfg(feature = "_test")]
494                        backtrace: std::backtrace::Backtrace::capture(),
495                    });
496                }
497
498                // Since we're gonna write data here, let's already partially close the page
499                // This could be done after moving the data, but this is more robust in the
500                // face of shutdowns and cancellations
501                self.inner.partial_close_page(next_page_to_use).await?;
502
503                let next_buffer_page = self.inner.next_page(next_page_to_use);
504                let next_buffer_page_state = self.inner.get_page_state(next_buffer_page).await?;
505
506                if !next_buffer_page_state.is_open() {
507                    self.migrate_items(data_buffer, next_buffer_page, next_page_to_use)
508                        .await?;
509                }
510            } else {
511                // There's no partial open page, so we just gotta turn the first open page into a partial open one
512                let Some(first_open_page) = self.inner.find_first_page(0, PageState::Open).await?
513                else {
514                    // Uh oh, no open pages.
515                    // Something has gone wrong.
516                    // We should never get here.
517                    return Err(Error::Corrupted {
518                        #[cfg(feature = "_test")]
519                        backtrace: std::backtrace::Backtrace::capture(),
520                    });
521                };
522
523                self.inner.partial_close_page(first_open_page).await?;
524            }
525
526            // If we get here, we just freshly partially closed a new page, so the next loop iteration should succeed.
527            recursion_level += 1;
528        }
529    }
530
531    /// Fully remove an item. Additional calls to fetch with the same key will return None until
532    /// a new one is stored again.
533    ///
534    /// <div class="warning">
535    /// This is really slow!
536    ///
537    /// All items in flash have to be read and deserialized to find the items with the key.
538    /// This is unlikely to be cached well.
539    ///
540    /// Alternatively, e.g. when you don't have a [`MultiwriteNorFlash`] flash, you could store your value inside an Option
541    /// and store the value `None` to mark it as erased.
542    /// </div>
543    pub async fn remove_item(
544        &mut self,
545        data_buffer: &mut [u8],
546        search_key: &K,
547    ) -> Result<(), Error<S::Error>>
548    where
549        S: MultiwriteNorFlash,
550    {
551        run_with_auto_repair!(
552            function = self.remove_item_inner(data_buffer, Some(search_key)).await,
553            repair = self.try_repair(data_buffer).await?
554        )
555    }
556
557    /// Fully remove all stored items. Additional calls to fetch with any key will return None until
558    /// new items are stored again.
559    ///
560    /// <div class="warning">
561    /// This might be really slow! This doesn't simply erase flash, but goes through all items and marks them as deleted.
562    /// This is better for flash endurance.
563    ///
564    /// You might want to simply erase the flash range, e.g. if your flash does not implement [`MultiwriteNorFlash`].
565    /// Consider using the helper method for that: [`Self::erase_all`].
566    /// </div>
567    pub async fn remove_all_items(&mut self, data_buffer: &mut [u8]) -> Result<(), Error<S::Error>>
568    where
569        S: MultiwriteNorFlash,
570    {
571        run_with_auto_repair!(
572            function = self.remove_item_inner(data_buffer, None).await,
573            repair = self.try_repair(data_buffer).await?
574        )
575    }
576
577    /// If `search_key` is None, then all items will be removed
578    async fn remove_item_inner(
579        &mut self,
580        data_buffer: &mut [u8],
581        search_key: Option<&K>,
582    ) -> Result<(), Error<S::Error>>
583    where
584        S: MultiwriteNorFlash,
585    {
586        if let Some(key) = &search_key {
587            self.inner.cache.notice_key_erased(key);
588        } else {
589            self.inner.cache.invalidate_cache_state();
590        }
591
592        // Search for the last used page. We're gonna erase from the one after this first.
593        // If we get an early shutoff or cancellation, this will make it so that we don't return
594        // an old version of the key on the next fetch.
595        let last_used_page = self
596            .inner
597            .find_first_page(0, PageState::PartialOpen)
598            .await?
599            .unwrap_or_default();
600
601        // Go through all the pages
602        for page_index in self.inner.get_pages(self.inner.next_page(last_used_page)) {
603            if self.inner.get_page_state(page_index).await?.is_open() {
604                // This page is open, we don't have to check it
605                continue;
606            }
607
608            let page_data_start_address =
609                calculate_page_address::<S>(self.flash_range(), page_index) + S::WORD_SIZE as u32;
610            let page_data_end_address =
611                calculate_page_end_address::<S>(self.flash_range(), page_index)
612                    - S::WORD_SIZE as u32;
613
614            // Go through all items on the page
615            let mut item_headers =
616                ItemHeaderIter::new(page_data_start_address, page_data_end_address);
617
618            while let (Some(item_header), item_address) =
619                item_headers.next(&mut self.inner.flash).await?
620            {
621                let item = item_header
622                    .read_item(
623                        &mut self.inner.flash,
624                        data_buffer,
625                        item_address,
626                        page_data_end_address,
627                    )
628                    .await?;
629
630                match item {
631                    item::MaybeItem::Corrupted(_, _) | item::MaybeItem::Erased(_, _) => continue,
632                    item::MaybeItem::Present(item) => {
633                        let item_match = match search_key {
634                            Some(search_key) => K::deserialize_from(item.data())?.0 == *search_key,
635                            _ => true,
636                        };
637                        // If this item has the same key as the key we're trying to erase, then erase the item.
638                        // But keep going! We need to erase everything.
639                        if item_match {
640                            item.header
641                                .erase_data(
642                                    &mut self.inner.flash,
643                                    self.inner.flash_range.clone(),
644                                    &mut self.inner.cache,
645                                    item_address,
646                                )
647                                .await?;
648                        }
649                    }
650                }
651            }
652        }
653
654        // We're done, we now know the cache is in a good state
655        self.inner.cache.unmark_dirty();
656
657        Ok(())
658    }
659
660    /// Get an iterator that iterates over all non-erased & non-corrupted items in the map.
661    ///
662    /// <div class="warning">
663    /// You should be very careful when using the map item iterator:
664    /// <ul>
665    /// <li>
666    /// Because map doesn't erase the items when you insert a new one with the same key,
667    /// so it's expected that the iterator returns items with the same key multiple times.
668    /// Only the last returned one is the `active` one.
669    /// </li>
670    /// <li>
671    /// The iterator requires ALL items in the storage have the SAME Value type.
672    /// If you have different types of items in your map, the iterator might return incorrect data or error.
673    /// </li>
674    /// </ul>
675    /// </div>
676    ///
677    /// The following is a simple example of how to use the iterator:
678    /// ```rust
679    /// # use sequential_storage::cache::NoCache;
680    /// # use sequential_storage::map::{MapConfig, MapStorage};
681    /// # use mock_flash::MockFlashBase;
682    /// # use futures::executor::block_on;
683    /// # use std::collections::HashMap;
684    /// # type Flash = MockFlashBase<10, 1, 4096>;
685    /// # mod mock_flash {
686    /// #   include!("mock_flash.rs");
687    /// # }
688    /// # fn init_flash() -> Flash {
689    /// #     Flash::new(mock_flash::WriteCountCheck::Twice, None, false)
690    /// # }
691    ///
692    /// # block_on(async {
693    /// let mut flash = init_flash();
694    ///
695    /// let mut storage = MapStorage::<u8, _, _>::new(flash, const { MapConfig::new(0x1000..0x3000) }, NoCache::new());
696    /// let mut data_buffer = [0; 128];
697    ///
698    /// // Create the iterator of map items
699    /// let mut iterator = storage.fetch_all_items(
700    ///     &mut data_buffer
701    /// )
702    /// .await
703    /// .unwrap();
704    ///
705    /// let mut all_items = HashMap::new();
706    ///
707    /// // Iterate through all items, suppose the Key and Value types are u8, u32
708    /// while let Some((key, value)) = iterator
709    ///     .next::<u32>(&mut data_buffer)
710    ///     .await
711    ///     .unwrap()
712    /// {
713    ///     // Do something with the item.
714    ///     // Please note that for the same key there might be multiple items returned,
715    ///     // the last one is the current active one.
716    ///     all_items.insert(key, value);
717    /// }
718    /// # })
719    /// ```
720    pub async fn fetch_all_items(
721        &mut self,
722        data_buffer: &mut [u8],
723    ) -> Result<MapItemIter<'_, K, S, C>, Error<S::Error>> {
724        // Get the first page index.
725        // The first page used by the map is the next page of the `PartialOpen` page or the last `Closed` page
726        let first_page = run_with_auto_repair!(
727            function = {
728                match self
729                    .inner
730                    .find_first_page(0, PageState::PartialOpen)
731                    .await?
732                {
733                    Some(last_used_page) => {
734                        // The next page of the `PartialOpen` page is the first page
735                        Ok(self.inner.next_page(last_used_page))
736                    }
737                    None => {
738                        // In the event that all pages are still open or the last used page was just closed, we search for the first open page.
739                        // If the page one before that is closed, then that's the last used page.
740                        if let Some(first_open_page) =
741                            self.inner.find_first_page(0, PageState::Open).await?
742                        {
743                            let previous_page = self.inner.previous_page(first_open_page);
744                            if self.inner.get_page_state(previous_page).await?.is_closed() {
745                                // The previous page is closed, so the first_open_page is what we want
746                                Ok(first_open_page)
747                            } else {
748                                // The page before the open page is not closed, so it must be open.
749                                // This means that all pages are open and that we don't have any items yet.
750                                self.inner.cache.unmark_dirty();
751                                Ok(0)
752                            }
753                        } else {
754                            // There are no open pages, so everything must be closed.
755                            // Something is up and this should never happen.
756                            // To recover, we will just erase all the flash.
757                            Err(Error::Corrupted {
758                                #[cfg(feature = "_test")]
759                                backtrace: std::backtrace::Backtrace::capture(),
760                            })
761                        }
762                    }
763                }
764            },
765            repair = self.try_repair(data_buffer).await?
766        )?;
767
768        let start_address =
769            calculate_page_address::<S>(self.flash_range(), first_page) + S::WORD_SIZE as u32;
770        let end_address =
771            calculate_page_end_address::<S>(self.flash_range(), first_page) - S::WORD_SIZE as u32;
772
773        Ok(MapItemIter {
774            storage: self,
775            first_page,
776            current_page_index: first_page,
777            current_iter: ItemIter::new(start_address, end_address),
778            _key: PhantomData,
779        })
780    }
781
782    async fn migrate_items(
783        &mut self,
784        data_buffer: &mut [u8],
785        source_page: usize,
786        target_page: usize,
787    ) -> Result<(), Error<S::Error>> {
788        // We need to move the data from the next buffer page to the next_page_to_use, but only if that data
789        // doesn't have a newer value somewhere else.
790
791        let mut next_page_write_address =
792            calculate_page_address::<S>(self.flash_range(), target_page) + S::WORD_SIZE as u32;
793
794        let mut it = ItemIter::new(
795            calculate_page_address::<S>(self.flash_range(), source_page) + S::WORD_SIZE as u32,
796            calculate_page_end_address::<S>(self.flash_range(), source_page) - S::WORD_SIZE as u32,
797        );
798        while let Some((item, item_address)) = it.next(&mut self.inner.flash, data_buffer).await? {
799            let (key, _) = K::deserialize_from(item.data())?;
800
801            // We're in a decent state here
802            self.inner.cache.unmark_dirty();
803
804            // Search for the newest item with the key we found
805            let Some((found_item, found_address, _)) =
806                self.fetch_item_with_location(data_buffer, &key).await?
807            else {
808                // We couldn't even find our own item?
809                return Err(Error::Corrupted {
810                    #[cfg(feature = "_test")]
811                    backtrace: std::backtrace::Backtrace::capture(),
812                });
813            };
814
815            let found_item = found_item
816                .reborrow(data_buffer)
817                .ok_or_else(|| Error::LogicBug {
818                    #[cfg(feature = "_test")]
819                    backtrace: std::backtrace::Backtrace::capture(),
820                })?;
821
822            if found_address == item_address {
823                self.inner
824                    .cache
825                    .notice_key_location(&key, next_page_write_address, true);
826                found_item
827                    .write(
828                        &mut self.inner.flash,
829                        self.inner.flash_range.clone(),
830                        &mut self.inner.cache,
831                        next_page_write_address,
832                    )
833                    .await?;
834                next_page_write_address = found_item
835                    .header
836                    .next_item_address::<S>(next_page_write_address);
837            }
838        }
839
840        self.inner.open_page(source_page).await?;
841
842        Ok(())
843    }
844
845    /// Try to repair the state of the flash to hopefull get back everything in working order.
846    /// Care is taken that no data is lost, but this depends on correctly repairing the state and
847    /// so is only best effort.
848    ///
849    /// This function might be called after a different function returned the [`Error::Corrupted`] error.
850    /// There's no guarantee it will work.
851    ///
852    /// If this function or the function call after this crate returns [`Error::Corrupted`], then it's unlikely
853    /// that the state can be recovered. To at least make everything function again at the cost of losing the data,
854    /// erase the flash range.
855    async fn try_repair(&mut self, data_buffer: &mut [u8]) -> Result<(), Error<S::Error>> {
856        self.inner.cache.invalidate_cache_state();
857
858        self.inner.try_general_repair().await?;
859
860        // Let's check if we corrupted in the middle of a migration
861        if let Some(partial_open_page) = self
862            .inner
863            .find_first_page(0, PageState::PartialOpen)
864            .await?
865        {
866            let buffer_page = self.inner.next_page(partial_open_page);
867            if !self.inner.get_page_state(buffer_page).await?.is_open() {
868                // Yes, the migration got interrupted. Let's redo it.
869                // To do that, we erase the partial open page first because it contains incomplete data.
870                self.inner.open_page(partial_open_page).await?;
871
872                // Then partially close it again
873                self.inner.partial_close_page(partial_open_page).await?;
874
875                self.migrate_items(data_buffer, buffer_page, partial_open_page)
876                    .await?;
877            }
878        }
879
880        Ok(())
881    }
882
883    /// Resets the flash in the entire given flash range.
884    ///
885    /// This is just a thin helper function as it just calls the flash's erase function.
886    pub fn erase_all(&mut self) -> impl Future<Output = Result<(), Error<S::Error>>> {
887        self.inner.erase_all()
888    }
889
890    /// Get the minimal overhead size per stored item for the given flash type.
891    ///
892    /// The associated data of each item is additionally padded to a full flash word size, but that's not part of this number.\
893    /// This means the full item length is `returned number + (data length).next_multiple_of(S::WORD_SIZE)`.
894    #[must_use]
895    pub const fn item_overhead_size() -> u32 {
896        GenericStorage::<S, C>::item_overhead_size()
897    }
898
899    /// Destroy the instance to get back the flash and the cache.
900    ///
901    /// The cache can be passed to a new storage instance, but only for the same flash region and if nothing has changed in flash.
902    pub fn destroy(self) -> (S, C) {
903        self.inner.destroy()
904    }
905
906    /// Get a reference to the flash. Mutating the memory is at your own risk.
907    pub const fn flash(&mut self) -> &mut S {
908        self.inner.flash()
909    }
910
911    /// Get the flash range being used
912    pub const fn flash_range(&self) -> Range<u32> {
913        self.inner.flash_range()
914    }
915
916    #[cfg(any(test, feature = "std"))]
917    /// Print all items in flash to the returned string
918    ///
919    /// This is meant as a debugging utility. The string format is not stable.
920    pub fn print_items(&mut self) -> impl Future<Output = String> {
921        self.inner.print_items()
922    }
923}
924
925/// Iterator which iterates all non-erased & non-corrupted items in the map.
926///
927/// The iterator will return the (Key, Value) tuple when calling `next()`.
928/// If the iterator ends, it will return `Ok(None)`.
929///
930/// The following is a simple example of how to use the iterator:
931/// ```rust
932/// # use sequential_storage::cache::NoCache;
933/// # use sequential_storage::map::{MapConfig, MapStorage};
934/// # use mock_flash::MockFlashBase;
935/// # use futures::executor::block_on;
936/// # use std::collections::HashMap;
937/// # type Flash = MockFlashBase<10, 1, 4096>;
938/// # mod mock_flash {
939/// #   include!("mock_flash.rs");
940/// # }
941/// # fn init_flash() -> Flash {
942/// #     Flash::new(mock_flash::WriteCountCheck::Twice, None, false)
943/// # }
944///
945/// # block_on(async {
946/// let mut flash = init_flash();
947///
948/// let mut storage = MapStorage::<u8, _, _>::new(flash, const { MapConfig::new(0x1000..0x3000) }, NoCache::new());
949/// let mut data_buffer = [0; 128];
950///
951/// // Create the iterator of map items
952/// let mut iterator = storage.fetch_all_items(
953///     &mut data_buffer
954/// )
955/// .await
956/// .unwrap();
957///
958/// let mut all_items = HashMap::new();
959///
960/// // Iterate through all items, suppose the Key and Value types are u8, u32
961/// while let Some((key, value)) = iterator
962///     .next::<u32>(&mut data_buffer)
963///     .await
964///     .unwrap()
965/// {
966///     // Do something with the item.
967///     // Please note that for the same key there might be multiple items returned,
968///     // the last one is the current active one.
969///     all_items.insert(key, value);
970/// }
971/// # })
972/// ```
973pub struct MapItemIter<'s, K: Key, S: NorFlash, C: KeyCacheImpl<K>> {
974    storage: &'s mut MapStorage<K, S, C>,
975    first_page: usize,
976    current_page_index: usize,
977    pub(crate) current_iter: ItemIter,
978    _key: PhantomData<K>,
979}
980
981impl<K: Key, S: NorFlash, C: KeyCacheImpl<K>> MapItemIter<'_, K, S, C> {
982    /// Get the next item in the iterator. Be careful that the given `data_buffer` should large enough to contain the serialized key and value.
983    pub async fn next<'a, V: Value<'a>>(
984        &mut self,
985        data_buffer: &'a mut [u8],
986    ) -> Result<Option<(K, V)>, Error<S::Error>> {
987        // Find the next item
988        let item = loop {
989            if let Some((item, _address)) = self
990                .current_iter
991                .next(&mut self.storage.inner.flash, data_buffer)
992                .await?
993            {
994                // We've found the next item, quit the loop
995                break item;
996            }
997
998            // The current page is done, we need to find the next page
999            // Find next page which is not open, update `self.current_iter`
1000            loop {
1001                self.current_page_index = self.storage.inner.next_page(self.current_page_index);
1002
1003                // We've looped back to the first page, which means all pages are checked, there's nothing left so we return None
1004                if self.current_page_index == self.first_page {
1005                    return Ok(None);
1006                }
1007
1008                match self
1009                    .storage
1010                    .inner
1011                    .get_page_state(self.current_page_index)
1012                    .await
1013                {
1014                    Ok(PageState::Closed | PageState::PartialOpen) => {
1015                        self.current_iter = ItemIter::new(
1016                            calculate_page_address::<S>(
1017                                self.storage.inner.flash_range.clone(),
1018                                self.current_page_index,
1019                            ) + S::WORD_SIZE as u32,
1020                            calculate_page_end_address::<S>(
1021                                self.storage.inner.flash_range.clone(),
1022                                self.current_page_index,
1023                            ) - S::WORD_SIZE as u32,
1024                        );
1025                        break;
1026                    }
1027                    _ => continue,
1028                }
1029            }
1030        };
1031
1032        let data_len = item.header.length as usize;
1033        let (key, key_len) = K::deserialize_from(item.data())?;
1034        let (value, _value_len) = V::deserialize_from(&data_buffer[..data_len][key_len..])
1035            .map_err(Error::SerializationError)?;
1036
1037        Ok(Some((key, value)))
1038    }
1039}
1040
1041/// Anything implementing this trait can be used as a key in the map functions.
1042///
1043/// It provides a way to serialize and deserialize the key.
1044///
1045/// The `Eq` bound is used because we need to be able to compare keys and the
1046/// `Clone` bound helps us pass the key around.
1047///
1048/// The key cannot have a lifetime like the [Value]
1049pub trait Key: Eq + Clone + Sized {
1050    /// Serialize the key into the given buffer.
1051    /// The serialized size is returned.
1052    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError>;
1053    /// Deserialize the key from the given buffer.
1054    /// The key is returned together with the serialized length.
1055    fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), SerializationError>;
1056    /// Get the length of the key from the buffer.
1057    /// This is an optimized version of [`Self::deserialize_from`] that doesn't have to deserialize everything.
1058    fn get_len(buffer: &[u8]) -> Result<usize, SerializationError> {
1059        Self::deserialize_from(buffer).map(|(_, len)| len)
1060    }
1061}
1062
1063macro_rules! impl_key_num {
1064    ($int:ty) => {
1065        impl Key for $int {
1066            fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1067                let self_bytes = self.to_le_bytes();
1068
1069                match buffer.get_mut(..self_bytes.len()) {
1070                    Some(buffer) => {
1071                        buffer.copy_from_slice(&self_bytes);
1072                        Ok(buffer.len())
1073                    }
1074                    None => Err(SerializationError::BufferTooSmall),
1075                }
1076            }
1077
1078            fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), SerializationError> {
1079                let value = Self::from_le_bytes(
1080                    buffer
1081                        .get(..size_of::<Self>())
1082                        .ok_or(SerializationError::BufferTooSmall)?
1083                        .try_into()
1084                        .unwrap(),
1085                );
1086                Ok((value, size_of::<Self>()))
1087            }
1088
1089            fn get_len(_buffer: &[u8]) -> Result<usize, SerializationError> {
1090                Ok(size_of::<Self>())
1091            }
1092        }
1093    };
1094}
1095
1096impl_key_num!(u8);
1097impl_key_num!(u16);
1098impl_key_num!(u32);
1099impl_key_num!(u64);
1100impl_key_num!(u128);
1101impl_key_num!(i8);
1102impl_key_num!(i16);
1103impl_key_num!(i32);
1104impl_key_num!(i64);
1105impl_key_num!(i128);
1106
1107impl<const N: usize> Key for [u8; N] {
1108    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1109        buffer
1110            .get_mut(..N)
1111            .ok_or(SerializationError::BufferTooSmall)?
1112            .copy_from_slice(self);
1113        Ok(N)
1114    }
1115
1116    fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), SerializationError> {
1117        Ok((
1118            buffer
1119                .get(..N)
1120                .ok_or(SerializationError::BufferTooSmall)?
1121                .try_into()
1122                .unwrap(),
1123            N,
1124        ))
1125    }
1126
1127    fn get_len(_buffer: &[u8]) -> Result<usize, SerializationError> {
1128        Ok(N)
1129    }
1130}
1131
1132impl Key for () {
1133    fn serialize_into(&self, _buffer: &mut [u8]) -> Result<usize, SerializationError> {
1134        Ok(0)
1135    }
1136
1137    fn deserialize_from(_buffer: &[u8]) -> Result<(Self, usize), SerializationError> {
1138        Ok(((), 0))
1139    }
1140}
1141
1142/// The trait that defines how map values are serialized and deserialized.
1143///
1144/// It also carries a lifetime so that zero-copy deserialization is supported.
1145/// Zero-copy serialization is not supported due to technical restrictions.
1146pub trait Value<'a> {
1147    /// Serialize the value into the given buffer. If everything went ok, this function returns the length
1148    /// of the used part of the buffer.
1149    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError>;
1150    /// Deserialize the value from the buffer. Because of the added lifetime, the implementation can borrow from the
1151    /// buffer which opens up some zero-copy possibilities.
1152    ///
1153    /// The buffer will be the same length as the serialize function returned for this value. Though note that the length
1154    /// is written from flash, so bitflips can affect that (though the length is separately crc-protected) and the key deserialization might
1155    /// return a wrong length.
1156    ///
1157    /// Returns the decoded value and the amount of bytes used from the buffer.
1158    fn deserialize_from(buffer: &'a [u8]) -> Result<(Self, usize), SerializationError>
1159    where
1160        Self: Sized;
1161}
1162
1163impl<'a> Value<'a> for bool {
1164    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1165        <u8 as Value>::serialize_into(&u8::from(*self), buffer)
1166    }
1167
1168    fn deserialize_from(buffer: &'a [u8]) -> Result<(Self, usize), SerializationError>
1169    where
1170        Self: Sized,
1171    {
1172        let (value, size) = <u8 as Value>::deserialize_from(buffer)?;
1173        Ok((value != 0, size))
1174    }
1175}
1176
1177impl<'a, T: Value<'a>> Value<'a> for Option<T> {
1178    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1179        if let Some(val) = self {
1180            let mut size = 0;
1181            size += <bool as Value>::serialize_into(&true, buffer)?;
1182            size += <T as Value>::serialize_into(
1183                val,
1184                buffer
1185                    .get_mut(size..)
1186                    .ok_or(SerializationError::BufferTooSmall)?,
1187            )?;
1188            Ok(size)
1189        } else {
1190            <bool as Value>::serialize_into(&false, buffer)
1191        }
1192    }
1193
1194    fn deserialize_from(buffer: &'a [u8]) -> Result<(Self, usize), SerializationError>
1195    where
1196        Self: Sized,
1197    {
1198        let (is_some, tag_size) = <bool as Value>::deserialize_from(buffer)?;
1199        if is_some {
1200            let (value, value_size) = <T as Value>::deserialize_from(
1201                buffer
1202                    .get(tag_size..)
1203                    .ok_or(SerializationError::BufferTooSmall)?,
1204            )?;
1205            Ok((Some(value), tag_size + value_size))
1206        } else {
1207            Ok((None, tag_size))
1208        }
1209    }
1210}
1211
1212impl<'a> Value<'a> for &'a [u8] {
1213    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1214        buffer
1215            .get_mut(..self.len())
1216            .ok_or(SerializationError::BufferTooSmall)?
1217            .copy_from_slice(self);
1218        Ok(self.len())
1219    }
1220
1221    fn deserialize_from(buffer: &'a [u8]) -> Result<(Self, usize), SerializationError>
1222    where
1223        Self: Sized,
1224    {
1225        Ok((buffer, buffer.len()))
1226    }
1227}
1228
1229#[cfg(feature = "postcard")]
1230/// Marker trait for types that will be serialized using [postcard].
1231///
1232/// Implement this trait on a type to enable automatically using postcard to serialize and
1233/// deserialize it.
1234///
1235/// [postcard]: https://crates.io/crates/postcard
1236pub trait PostcardValue<'a>: Serialize + Deserialize<'a> {}
1237
1238#[cfg(feature = "postcard")]
1239impl<'a, T> Value<'a> for T
1240where
1241    T: PostcardValue<'a>,
1242{
1243    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1244        Ok(postcard::to_slice(self, buffer).map(|s| s.len())?)
1245    }
1246
1247    fn deserialize_from(buffer: &'a [u8]) -> Result<(Self, usize), SerializationError>
1248    where
1249        Self: Sized,
1250    {
1251        Ok((postcard::from_bytes(buffer)?, buffer.len()))
1252    }
1253}
1254
1255#[cfg(feature = "postcard")]
1256impl From<postcard::Error> for SerializationError {
1257    fn from(error: postcard::Error) -> SerializationError {
1258        use postcard::Error::*;
1259        match error {
1260            SerializeBufferFull => SerializationError::BufferTooSmall,
1261            SerializeSeqLengthUnknown => SerializationError::InvalidData,
1262            DeserializeUnexpectedEnd
1263            | DeserializeBadVarint
1264            | DeserializeBadBool
1265            | DeserializeBadChar
1266            | DeserializeBadUtf8
1267            | DeserializeBadOption
1268            | DeserializeBadEnum
1269            | DeserializeBadEncoding
1270            | DeserializeBadCrc => SerializationError::InvalidFormat,
1271            _ => SerializationError::Custom(error as i32),
1272        }
1273    }
1274}
1275
1276macro_rules! impl_map_item_num {
1277    ($num:ty) => {
1278        impl<'a> Value<'a> for $num {
1279            fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1280                let self_bytes = self.to_le_bytes();
1281
1282                match buffer.get_mut(..self_bytes.len()) {
1283                    Some(buffer) => {
1284                        buffer.copy_from_slice(&self_bytes);
1285                        Ok(buffer.len())
1286                    }
1287                    None => Err(SerializationError::BufferTooSmall),
1288                }
1289            }
1290
1291            fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), SerializationError> {
1292                let value = Self::from_le_bytes(
1293                    buffer
1294                        .get(..size_of::<Self>())
1295                        .ok_or(SerializationError::BufferTooSmall)?
1296                        .try_into()
1297                        .unwrap(),
1298                );
1299                Ok((value, size_of::<Self>()))
1300            }
1301        }
1302
1303        // Also implement `Value` for arrays of numbers.
1304        impl<'a, const N: usize> Value<'a> for [$num; N] {
1305            fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1306                let elem_size = size_of::<$num>();
1307
1308                let buffer = buffer
1309                    .get_mut(0..elem_size * N)
1310                    .ok_or(SerializationError::BufferTooSmall)?;
1311
1312                for (chunk, number) in buffer.chunks_exact_mut(elem_size).zip(self.iter()) {
1313                    chunk.copy_from_slice(&number.to_le_bytes())
1314                }
1315                Ok(elem_size * N)
1316            }
1317
1318            fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), SerializationError> {
1319                let elem_size = size_of::<$num>();
1320                if buffer.len() < elem_size * N {
1321                    return Err(SerializationError::BufferTooSmall);
1322                }
1323
1324                let mut array = [0 as $num; N];
1325                for (chunk, number) in buffer.chunks_exact(elem_size).zip(array.iter_mut()) {
1326                    *number = <$num>::from_le_bytes(chunk.try_into().unwrap());
1327                }
1328                Ok((array, elem_size * N))
1329            }
1330        }
1331    };
1332}
1333
1334impl_map_item_num!(u8);
1335impl_map_item_num!(u16);
1336impl_map_item_num!(u32);
1337impl_map_item_num!(u64);
1338impl_map_item_num!(u128);
1339impl_map_item_num!(i8);
1340impl_map_item_num!(i16);
1341impl_map_item_num!(i32);
1342impl_map_item_num!(i64);
1343impl_map_item_num!(i128);
1344impl_map_item_num!(f32);
1345impl_map_item_num!(f64);
1346
1347/// Error for map value (de)serialization.
1348///
1349/// This error type is predefined (in contrast to using generics) to save many kilobytes of binary size.
1350#[non_exhaustive]
1351#[derive(Debug, PartialEq, Eq, Clone)]
1352#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1353pub enum SerializationError {
1354    /// The provided buffer was too small.
1355    BufferTooSmall,
1356    /// The serialization could not succeed because the data was not in order. (e.g. too big to fit)
1357    InvalidData,
1358    /// The deserialization could not succeed because the bytes are in an invalid format.
1359    InvalidFormat,
1360    /// An implementation defined error that might contain more information than the other predefined
1361    /// error variants.
1362    Custom(i32),
1363}
1364
1365impl core::fmt::Display for SerializationError {
1366    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
1367        match self {
1368            SerializationError::BufferTooSmall => write!(f, "Buffer too small"),
1369            SerializationError::InvalidData => write!(f, "Invalid data"),
1370            SerializationError::InvalidFormat => write!(f, "Invalid format"),
1371            SerializationError::Custom(val) => write!(f, "Custom error: {val}"),
1372        }
1373    }
1374}
1375
1376#[cfg(test)]
1377mod tests {
1378    use crate::{AlignedBuf, cache::NoCache, mock_flash};
1379
1380    use super::*;
1381    use futures_test::test;
1382
1383    type MockFlashBig = mock_flash::MockFlashBase<4, 4, 256>;
1384    type MockFlashTiny = mock_flash::MockFlashBase<2, 1, 32>;
1385
1386    #[test]
1387    async fn store_and_fetch() {
1388        let mut storage = MapStorage::<u8, _, _>::new(
1389            MockFlashBig::default(),
1390            MapConfig::new(0x000..0x1000),
1391            cache::NoCache::new(),
1392        );
1393
1394        let mut data_buffer = AlignedBuf([0; 128]);
1395
1396        let start_snapshot = storage.flash().stats_snapshot();
1397
1398        let item = storage
1399            .fetch_item::<&[u8]>(&mut data_buffer, &0)
1400            .await
1401            .unwrap();
1402        assert_eq!(item, None);
1403
1404        let item = storage
1405            .fetch_item::<&[u8]>(&mut data_buffer, &60)
1406            .await
1407            .unwrap();
1408        assert_eq!(item, None);
1409
1410        let item = storage
1411            .fetch_item::<&[u8]>(&mut data_buffer, &0xFF)
1412            .await
1413            .unwrap();
1414        assert_eq!(item, None);
1415
1416        storage
1417            .store_item(&mut data_buffer, &0u8, &[5u8])
1418            .await
1419            .unwrap();
1420        storage
1421            .store_item(&mut data_buffer, &0u8, &[5u8, 6])
1422            .await
1423            .unwrap();
1424
1425        let item = storage
1426            .fetch_item::<&[u8]>(&mut data_buffer, &0)
1427            .await
1428            .unwrap()
1429            .unwrap();
1430        assert_eq!(item, &[5, 6]);
1431
1432        storage
1433            .store_item(&mut data_buffer, &1u8, &[2u8, 2, 2, 2, 2, 2])
1434            .await
1435            .unwrap();
1436
1437        let item = storage
1438            .fetch_item::<&[u8]>(&mut data_buffer, &0)
1439            .await
1440            .unwrap()
1441            .unwrap();
1442        assert_eq!(item, &[5, 6]);
1443
1444        let item = storage
1445            .fetch_item::<&[u8]>(&mut data_buffer, &1)
1446            .await
1447            .unwrap()
1448            .unwrap();
1449        assert_eq!(item, &[2, 2, 2, 2, 2, 2]);
1450
1451        for index in 0..4000 {
1452            storage
1453                .store_item(
1454                    &mut data_buffer,
1455                    &((index % 10) as u8),
1456                    &vec![(index % 10) as u8 * 2; index % 10].as_slice(),
1457                )
1458                .await
1459                .unwrap();
1460        }
1461
1462        for i in 0..10 {
1463            let item = storage
1464                .fetch_item::<&[u8]>(&mut data_buffer, &i)
1465                .await
1466                .unwrap()
1467                .unwrap();
1468            assert_eq!(item, &vec![(i % 10) * 2; (i % 10) as usize]);
1469        }
1470
1471        for _ in 0..4000 {
1472            storage
1473                .store_item(&mut data_buffer, &11u8, &[0; 10])
1474                .await
1475                .unwrap();
1476        }
1477
1478        for i in 0..10 {
1479            let item = storage
1480                .fetch_item::<&[u8]>(&mut data_buffer, &i)
1481                .await
1482                .unwrap()
1483                .unwrap();
1484            assert_eq!(item, &vec![(i % 10) * 2; (i % 10) as usize]);
1485        }
1486
1487        println!(
1488            "{:?}",
1489            start_snapshot.compare_to(storage.flash().stats_snapshot()),
1490        );
1491    }
1492
1493    #[test]
1494    async fn store_too_many_items() {
1495        const UPPER_BOUND: u8 = 3;
1496
1497        let mut storage = MapStorage::new(
1498            MockFlashTiny::default(),
1499            const { MapConfig::new(0x00..0x40) },
1500            NoCache::new(),
1501        );
1502        let mut data_buffer = AlignedBuf([0; 128]);
1503
1504        for i in 0..UPPER_BOUND {
1505            println!("Storing {i:?}");
1506
1507            storage
1508                .store_item(&mut data_buffer, &i, &vec![i; i as usize].as_slice())
1509                .await
1510                .unwrap();
1511        }
1512
1513        assert_eq!(
1514            storage
1515                .store_item(
1516                    &mut data_buffer,
1517                    &UPPER_BOUND,
1518                    &vec![0; UPPER_BOUND as usize].as_slice(),
1519                )
1520                .await,
1521            Err(Error::FullStorage)
1522        );
1523
1524        for i in 0..UPPER_BOUND {
1525            let item = storage
1526                .fetch_item::<&[u8]>(&mut data_buffer, &i)
1527                .await
1528                .unwrap()
1529                .unwrap();
1530
1531            println!("Fetched {item:?}");
1532
1533            assert_eq!(item, vec![i; i as usize]);
1534        }
1535    }
1536
1537    #[test]
1538    async fn store_too_many_items_big() {
1539        const UPPER_BOUND: u8 = 68;
1540
1541        let mut storage = MapStorage::new(
1542            MockFlashBig::default(),
1543            const { MapConfig::new(0x0000..0x1000) },
1544            NoCache::new(),
1545        );
1546        let mut data_buffer = AlignedBuf([0; 128]);
1547
1548        for i in 0..UPPER_BOUND {
1549            println!("Storing {i:?}");
1550
1551            storage
1552                .store_item(&mut data_buffer, &i, &vec![i; i as usize].as_slice())
1553                .await
1554                .unwrap();
1555        }
1556
1557        assert_eq!(
1558            storage
1559                .store_item(
1560                    &mut data_buffer,
1561                    &UPPER_BOUND,
1562                    &vec![0; UPPER_BOUND as usize].as_slice(),
1563                )
1564                .await,
1565            Err(Error::FullStorage)
1566        );
1567
1568        for i in 0..UPPER_BOUND {
1569            let item = storage
1570                .fetch_item::<&[u8]>(&mut data_buffer, &i)
1571                .await
1572                .unwrap()
1573                .unwrap();
1574
1575            println!("Fetched {item:?}");
1576
1577            assert_eq!(item, vec![i; i as usize]);
1578        }
1579    }
1580
1581    #[test]
1582    async fn store_many_items_big() {
1583        let mut storage = MapStorage::new(
1584            mock_flash::MockFlashBase::<4, 1, 4096>::default(),
1585            const { MapConfig::new(0x0000..0x4000) },
1586            NoCache::new(),
1587        );
1588        let mut data_buffer = AlignedBuf([0; 128]);
1589
1590        const LENGHT_PER_KEY: [usize; 24] = [
1591            11, 13, 6, 13, 13, 10, 2, 3, 5, 36, 1, 65, 4, 6, 1, 15, 10, 7, 3, 15, 9, 3, 4, 5,
1592        ];
1593
1594        for _ in 0..100 {
1595            #[allow(clippy::needless_range_loop)]
1596            for i in 0..24 {
1597                storage
1598                    .store_item(
1599                        &mut data_buffer,
1600                        &(i as u16),
1601                        &vec![i as u8; LENGHT_PER_KEY[i]].as_slice(),
1602                    )
1603                    .await
1604                    .unwrap();
1605            }
1606        }
1607
1608        #[allow(clippy::needless_range_loop)]
1609        for i in 0..24 {
1610            let item = storage
1611                .fetch_item::<&[u8]>(&mut data_buffer, &(i as u16))
1612                .await
1613                .unwrap()
1614                .unwrap();
1615
1616            println!("Fetched {item:?}");
1617
1618            assert_eq!(item, vec![i as u8; LENGHT_PER_KEY[i]]);
1619        }
1620    }
1621
1622    #[test]
1623    async fn remove_items() {
1624        let mut storage = MapStorage::new(
1625            mock_flash::MockFlashBase::<4, 1, 4096>::new(
1626                mock_flash::WriteCountCheck::Twice,
1627                None,
1628                true,
1629            ),
1630            const { MapConfig::new(0x0000..0x4000) },
1631            NoCache::new(),
1632        );
1633        let mut data_buffer = AlignedBuf([0; 128]);
1634
1635        // Add some data to flash
1636        for j in 0..10 {
1637            for i in 0..24 {
1638                storage
1639                    .store_item(
1640                        &mut data_buffer,
1641                        &(i as u8),
1642                        &vec![i as u8; j + 2].as_slice(),
1643                    )
1644                    .await
1645                    .unwrap();
1646            }
1647        }
1648
1649        for j in (0..24).rev() {
1650            // Are all things still in flash that we expect?
1651            for i in 0..=j {
1652                assert!(
1653                    storage
1654                        .fetch_item::<&[u8]>(&mut data_buffer, &i)
1655                        .await
1656                        .unwrap()
1657                        .is_some()
1658                );
1659            }
1660
1661            // Remove the item
1662            storage.remove_item(&mut data_buffer, &j).await.unwrap();
1663
1664            // Are all things still in flash that we expect?
1665            for i in 0..j {
1666                assert!(
1667                    storage
1668                        .fetch_item::<&[u8]>(&mut data_buffer, &i)
1669                        .await
1670                        .unwrap()
1671                        .is_some()
1672                );
1673            }
1674
1675            assert!(
1676                storage
1677                    .fetch_item::<&[u8]>(&mut data_buffer, &j)
1678                    .await
1679                    .unwrap()
1680                    .is_none()
1681            );
1682        }
1683    }
1684
1685    #[test]
1686    async fn remove_all() {
1687        let mut storage = MapStorage::new(
1688            mock_flash::MockFlashBase::<4, 1, 4096>::new(
1689                mock_flash::WriteCountCheck::Twice,
1690                None,
1691                true,
1692            ),
1693            const { MapConfig::new(0x0000..0x4000) },
1694            NoCache::new(),
1695        );
1696        let mut data_buffer = AlignedBuf([0; 128]);
1697
1698        // Add some data to flash
1699        for value in 0..10 {
1700            for key in 0..24u8 {
1701                storage
1702                    .store_item(&mut data_buffer, &key, &vec![key; value + 2].as_slice())
1703                    .await
1704                    .unwrap();
1705            }
1706        }
1707
1708        // Sanity check that we can find all the keys we just added.
1709        for key in 0..24u8 {
1710            assert!(
1711                storage
1712                    .fetch_item::<&[u8]>(&mut data_buffer, &key)
1713                    .await
1714                    .unwrap()
1715                    .is_some()
1716            );
1717        }
1718
1719        // Remove all the items
1720        storage.remove_all_items(&mut data_buffer).await.unwrap();
1721
1722        // Verify that none of the keys are present in flash.
1723        for key in 0..24 {
1724            assert!(
1725                storage
1726                    .fetch_item::<&[u8]>(&mut data_buffer, &key)
1727                    .await
1728                    .unwrap()
1729                    .is_none()
1730            );
1731        }
1732    }
1733
1734    #[test]
1735    async fn store_too_big_item() {
1736        let mut storage = MapStorage::new(
1737            MockFlashBig::new(mock_flash::WriteCountCheck::Twice, None, true),
1738            const { MapConfig::new(0x000..0x1000) },
1739            NoCache::new(),
1740        );
1741
1742        storage
1743            .store_item(&mut [0; 1024], &0u8, &[0u8; 1024 - 4 * 2 - 8 - 1])
1744            .await
1745            .unwrap();
1746
1747        assert_eq!(
1748            storage
1749                .store_item(&mut [0; 1024], &0u8, &[0u8; 1024 - 4 * 2 - 8 - 1 + 1],)
1750                .await,
1751            Err(Error::ItemTooBig)
1752        );
1753    }
1754
1755    #[test]
1756    async fn item_iterator() {
1757        const UPPER_BOUND: u8 = 64;
1758        let mut storage = MapStorage::new(
1759            MockFlashBig::default(),
1760            const { MapConfig::new(0x000..0x1000) },
1761            NoCache::new(),
1762        );
1763
1764        let mut data_buffer = AlignedBuf([0; 128]);
1765
1766        for i in 0..UPPER_BOUND {
1767            storage
1768                .store_item(&mut data_buffer, &i, &vec![i; i as usize].as_slice())
1769                .await
1770                .unwrap();
1771        }
1772
1773        // Save 10 times for key 1
1774        for i in 0..10 {
1775            storage
1776                .store_item(&mut data_buffer, &1u8, &vec![i; i as usize].as_slice())
1777                .await
1778                .unwrap();
1779        }
1780
1781        let mut map_iter = storage.fetch_all_items(&mut data_buffer).await.unwrap();
1782
1783        let mut count = 0;
1784        let mut last_value_buffer = [0u8; 64];
1785        let mut last_value_length = 0;
1786        while let Ok(Some((key, value))) = map_iter.next::<&[u8]>(&mut data_buffer).await {
1787            if key == 1 {
1788                // This is the key we stored multiple times, record the last value
1789                last_value_length = value.len();
1790                last_value_buffer[..value.len()].copy_from_slice(value);
1791            } else {
1792                assert_eq!(value, vec![key; key as usize]);
1793                count += 1;
1794            }
1795        }
1796
1797        assert_eq!(last_value_length, 9);
1798        assert_eq!(
1799            &last_value_buffer[..last_value_length],
1800            vec![9u8; 9].as_slice()
1801        );
1802
1803        // Check total number of fetched items, +1 since we didn't count key 1
1804        assert_eq!(count + 1, UPPER_BOUND);
1805    }
1806
1807    #[test]
1808    async fn store_unit_key() {
1809        let mut storage = MapStorage::new(
1810            MockFlashBig::default(),
1811            const { MapConfig::new(0x000..0x1000) },
1812            NoCache::new(),
1813        );
1814
1815        let mut data_buffer = AlignedBuf([0; 128]);
1816
1817        let item = storage
1818            .fetch_item::<&[u8]>(&mut data_buffer, &())
1819            .await
1820            .unwrap();
1821        assert_eq!(item, None);
1822
1823        storage
1824            .store_item(&mut data_buffer, &(), &[5u8])
1825            .await
1826            .unwrap();
1827        storage
1828            .store_item(&mut data_buffer, &(), &[5u8, 6])
1829            .await
1830            .unwrap();
1831
1832        let item = storage
1833            .fetch_item::<&[u8]>(&mut data_buffer, &())
1834            .await
1835            .unwrap()
1836            .unwrap();
1837        assert_eq!(item, &[5, 6]);
1838    }
1839
1840    #[test]
1841    async fn option_value() {
1842        let mut buffer = [0; 2];
1843
1844        assert_eq!(Some(42u8).serialize_into(&mut buffer), Ok(2));
1845        assert_eq!(Option::<u8>::deserialize_from(&buffer), Ok((Some(42u8), 2)));
1846        assert_eq!(buffer, [1, 42]);
1847
1848        let mut buffer = [0; 1];
1849
1850        assert_eq!(Option::<u8>::None.serialize_into(&mut buffer), Ok(1));
1851        assert_eq!(Option::<u8>::deserialize_from(&buffer), Ok((None, 1)));
1852        assert_eq!(buffer, [0]);
1853    }
1854
1855    #[test]
1856    async fn array_value() {
1857        let mut buffer = [0; 3];
1858        assert_eq!(Value::serialize_into(&[1u8, 2, 3], &mut buffer), Ok(3));
1859        assert_eq!(buffer, [1, 2, 3]);
1860        assert_eq!(
1861            <[u8; 3] as Value>::deserialize_from(&buffer),
1862            Ok(([1, 2, 3], 3))
1863        );
1864
1865        let mut buffer = [0; 4];
1866        assert_eq!(
1867            Value::serialize_into(&[0x1234u16, 0x5678], &mut buffer),
1868            Ok(4)
1869        );
1870        assert_eq!(buffer, [0x34, 0x12, 0x78, 0x56]);
1871        assert_eq!(
1872            <[u16; 2]>::deserialize_from(&buffer),
1873            Ok(([0x1234, 0x5678], 4))
1874        );
1875    }
1876
1877    #[cfg(feature = "postcard")]
1878    #[test]
1879    async fn postcard_value() {
1880        #[derive(PartialEq, Debug, serde::Serialize, serde::Deserialize)]
1881        struct Foo(u32);
1882        impl crate::map::PostcardValue<'_> for Foo {}
1883        let mut buffer = [0; 8];
1884        assert_eq!(Value::serialize_into(&Foo(123), &mut buffer), Ok(1));
1885        assert_eq!(
1886            <Foo as Value>::deserialize_from(&buffer[..1]),
1887            Ok((Foo(123), 1))
1888        );
1889    }
1890}