sequential_storage/
map.rs

1//! Implementation of the map logic.
2
3use core::{marker::PhantomData, mem::size_of};
4use embedded_storage_async::nor_flash::MultiwriteNorFlash;
5
6use crate::item::{Item, ItemHeader, ItemIter};
7
8use self::{
9    cache::KeyCacheImpl,
10    item::{ItemHeaderIter, ItemUnborrowed},
11};
12
13use super::{
14    Debug, Error, GenericStorage, MAX_WORD_SIZE, NorFlash, NorFlashExt, PageState, Range, cache,
15    calculate_page_address, calculate_page_end_address, calculate_page_index, calculate_page_size,
16    item, run_with_auto_repair,
17};
18
19/// Configuration for a map
20pub struct MapConfig<S> {
21    flash_range: Range<u32>,
22    _phantom: PhantomData<S>,
23}
24
25impl<S: NorFlash> MapConfig<S> {
26    /// Create a new map configuration. Will panic if the data is invalid.
27    /// If you want a fallible version, use [`Self::try_new`].
28    #[must_use]
29    pub const fn new(flash_range: Range<u32>) -> Self {
30        Self::try_new(flash_range).expect("Map config must be correct")
31    }
32
33    /// Create a new map configuration. Will return None if the data is invalid
34    #[must_use]
35    pub const fn try_new(flash_range: Range<u32>) -> Option<Self> {
36        if !flash_range.start.is_multiple_of(S::ERASE_SIZE as u32) {
37            return None;
38        }
39        if !flash_range.end.is_multiple_of(S::ERASE_SIZE as u32) {
40            return None;
41        }
42        // At least 2 pages are used
43        if flash_range.end - flash_range.start < S::ERASE_SIZE as u32 * 2 {
44            return None;
45        }
46
47        if S::ERASE_SIZE < S::WORD_SIZE * 3 {
48            return None;
49        }
50        if S::WORD_SIZE > MAX_WORD_SIZE {
51            return None;
52        }
53
54        Some(Self {
55            flash_range,
56            _phantom: PhantomData,
57        })
58    }
59}
60
61/// A map-like storage
62///
63/// When a key-value is stored, it overwrites the any old items with the same key.
64///
65/// ## Basic API
66///
67/// ```rust
68/// # use sequential_storage::cache::NoCache;
69/// # use sequential_storage::map::{MapConfig, MapStorage};
70/// # use mock_flash::MockFlashBase;
71/// # use futures::executor::block_on;
72/// # type Flash = MockFlashBase<10, 1, 4096>;
73/// # mod mock_flash {
74/// #   include!("mock_flash.rs");
75/// # }
76/// # fn init_flash() -> Flash {
77/// #     Flash::new(mock_flash::WriteCountCheck::Twice, None, false)
78/// # }
79///
80/// # block_on(async {
81/// // Initialize the flash. This can be internal or external
82/// let mut flash = init_flash();
83///
84/// // Create the storage instance
85///
86/// // We provide the flash addresses in which it will operate.
87/// // The storage will not read, write or erase outside of this range.
88///
89/// // We also put the config in a const block so if the config is bad we'll get a compile time error
90///
91/// // With the generics we specify that this is a map with `u8` as the key
92/// let mut storage = MapStorage::<u8, _, _>::new(flash, const { MapConfig::new(0x1000..0x3000) }, NoCache::new());
93///
94/// // We need to give the crate a buffer to work with.
95/// // It must be big enough to serialize the biggest value of your storage type in,
96/// // rounded up to to word alignment of the flash. Some kinds of internal flash may require
97/// // this buffer to be aligned in RAM as well.
98/// let mut data_buffer = [0; 128];
99///
100/// // We can fetch an item from the flash. We're using `u8` as our key type and `u32` as our value type.
101/// // Nothing is stored in it yet, so it will return None.
102///
103/// assert_eq!(
104///     storage.fetch_item::<u32>(
105///         &mut data_buffer,
106///         &42,
107///     ).await.unwrap(),
108///     None
109/// );
110///
111/// // Now we store an item the flash with key 42.
112/// // Again we make sure we pass the correct key and value types, u8 and u32.
113/// // It is important to do this consistently.
114///
115/// storage.store_item(
116///     &mut data_buffer,
117///     &42u8,
118///     &104729u32,
119/// ).await.unwrap();
120///
121/// // When we ask for key 42, we now get back a Some with the correct value
122///
123/// assert_eq!(
124///     storage.fetch_item::<u32>(
125///         &mut data_buffer,
126///         &42,
127///     ).await.unwrap(),
128///     Some(104729)
129/// );
130/// # });
131/// ```
132///
133/// For your convenience there are premade implementations for the [Key] and [Value] traits.
134pub struct MapStorage<K: Key, S: NorFlash, C: KeyCacheImpl<K>> {
135    inner: GenericStorage<S, C>,
136    _phantom: PhantomData<K>,
137}
138
139impl<S: NorFlash, C: KeyCacheImpl<K>, K: Key> MapStorage<K, S, C> {
140    /// Create a new map instance
141    ///
142    /// The provided cache instance must be new or must be in the exact correct state for the current flash contents.
143    /// If the cache is bad, undesirable things will happen.
144    /// So, it's ok to reuse the cache gotten from the [`Self::destroy`] method when the flash hasn't changed since calling destroy.
145    pub const fn new(storage: S, config: MapConfig<S>, cache: C) -> Self {
146        Self {
147            inner: GenericStorage {
148                flash: storage,
149                flash_range: config.flash_range,
150                cache,
151            },
152            _phantom: PhantomData,
153        }
154    }
155
156    /// Get the last stored value from the flash that is associated with the given key.
157    /// If no value with the key is found, None is returned.
158    ///
159    /// You must make sure the used type as [Value] is correct for the key used.
160    /// If not, there can be wrong values or deserialization errors.
161    ///
162    /// The data buffer must be long enough to hold the longest serialized data of your [Key] + [Value] types combined,
163    /// rounded up to flash word alignment.
164    pub async fn fetch_item<'d, V: Value<'d>>(
165        &mut self,
166        data_buffer: &'d mut [u8],
167        search_key: &K,
168    ) -> Result<Option<V>, Error<S::Error>> {
169        let result = run_with_auto_repair!(
170            function = self.fetch_item_with_location(data_buffer, search_key).await,
171            repair = self.try_repair(data_buffer).await?
172        );
173
174        let Some((item, _, item_key_len)) = result? else {
175            return Ok(None);
176        };
177
178        let data_len = item.header.length as usize;
179        let item_key_len = match item_key_len {
180            Some(item_key_len) => item_key_len,
181            None => K::get_len(&data_buffer[..data_len])?,
182        };
183
184        let (value, _size) =
185            V::deserialize_from(&data_buffer[item_key_len..][..data_len - item_key_len])
186                .map_err(Error::SerializationError)?;
187        Ok(Some(value))
188    }
189
190    /// Fetch the item, but with the item unborrowed, the address of the item and the length of the key
191    #[allow(clippy::type_complexity)]
192    async fn fetch_item_with_location(
193        &mut self,
194        data_buffer: &mut [u8],
195        search_key: &K,
196    ) -> Result<Option<(ItemUnborrowed, u32, Option<usize>)>, Error<S::Error>> {
197        if self.inner.cache.is_dirty() {
198            self.inner.cache.invalidate_cache_state();
199        }
200
201        'cache: {
202            if let Some(cached_location) = self.inner.cache.key_location(search_key) {
203                let page_index = calculate_page_index::<S>(self.flash_range(), cached_location);
204                let page_data_end_address =
205                    calculate_page_end_address::<S>(self.flash_range(), page_index)
206                        - S::WORD_SIZE as u32;
207
208                let Some(header) = ItemHeader::read_new(
209                    &mut self.inner.flash,
210                    cached_location,
211                    page_data_end_address,
212                )
213                .await?
214                else {
215                    // The cache points to a non-existing item?
216                    #[expect(clippy::assertions_on_constants, reason = "Clippy is wrong here")]
217                    {
218                        assert!(
219                            !cfg!(feature = "_test"),
220                            "Wrong cache value. Addr: {cached_location}"
221                        );
222                    }
223                    self.inner.cache.invalidate_cache_state();
224                    break 'cache;
225                };
226
227                let item = header
228                    .read_item(
229                        &mut self.inner.flash,
230                        data_buffer,
231                        cached_location,
232                        page_data_end_address,
233                    )
234                    .await?;
235
236                match item {
237                    item::MaybeItem::Corrupted(_, _) | item::MaybeItem::Erased(_, _) => {
238                        #[expect(clippy::assertions_on_constants, reason = "Clippy is wrong here")]
239                        {
240                            assert!(
241                                !cfg!(feature = "_test"),
242                                "Wrong cache value. Addr: {cached_location}"
243                            );
244                        }
245
246                        // The cache points to a corrupted or erased item?
247                        self.inner.cache.invalidate_cache_state();
248                        break 'cache;
249                    }
250                    item::MaybeItem::Present(item) => {
251                        return Ok(Some((item.unborrow(), cached_location, None)));
252                    }
253                }
254            }
255        }
256
257        // We need to find the page we were last using. This should be the only partial open page.
258        let mut last_used_page = self
259            .inner
260            .find_first_page(0, PageState::PartialOpen)
261            .await?;
262
263        if last_used_page.is_none() {
264            // In the event that all pages are still open or the last used page was just closed, we search for the first open page.
265            // If the page one before that is closed, then that's the last used page.
266            if let Some(first_open_page) = self.inner.find_first_page(0, PageState::Open).await? {
267                let previous_page = self.inner.previous_page(first_open_page);
268                if self.inner.get_page_state(previous_page).await?.is_closed() {
269                    last_used_page = Some(previous_page);
270                } else {
271                    // The page before the open page is not closed, so it must be open.
272                    // This means that all pages are open and that we don't have any items yet.
273                    self.inner.cache.unmark_dirty();
274                    return Ok(None);
275                }
276            } else {
277                // There are no open pages, so everything must be closed.
278                // Something is up and this should never happen.
279                // To recover, we will just erase all the flash.
280                return Err(Error::Corrupted {
281                    #[cfg(feature = "_test")]
282                    backtrace: std::backtrace::Backtrace::capture(),
283                });
284            }
285        }
286
287        // We must now find the most recent storage item with the key that was asked for.
288        // If we don't find it in the current page, then we check again in the previous page if that page is closed.
289
290        let mut current_page_to_check = last_used_page.unwrap();
291        let mut newest_found_item_data = None;
292
293        loop {
294            let page_data_start_address =
295                calculate_page_address::<S>(self.flash_range(), current_page_to_check)
296                    + S::WORD_SIZE as u32;
297            let page_data_end_address =
298                calculate_page_end_address::<S>(self.flash_range(), current_page_to_check)
299                    - S::WORD_SIZE as u32;
300
301            let mut it = ItemIter::new(page_data_start_address, page_data_end_address);
302            while let Some((item, address)) = it.next(&mut self.inner.flash, data_buffer).await? {
303                let (found_key, found_key_len) = K::deserialize_from(item.data())?;
304                if found_key == *search_key {
305                    newest_found_item_data = Some((address, found_key_len));
306                }
307            }
308
309            // We've found the item! We can stop searching
310            if let Some((newest_found_item_address, _)) = newest_found_item_data.as_ref() {
311                self.inner
312                    .cache
313                    .notice_key_location(search_key, *newest_found_item_address, false);
314
315                break;
316            }
317
318            // We have not found the item. We've got to look in the previous page, but only if that page is closed and contains data.
319            let previous_page = self.inner.previous_page(current_page_to_check);
320
321            if self.inner.get_page_state(previous_page).await? != PageState::Closed {
322                // We've looked through all the pages with data and couldn't find the item
323                self.inner.cache.unmark_dirty();
324                return Ok(None);
325            }
326
327            current_page_to_check = previous_page;
328        }
329
330        self.inner.cache.unmark_dirty();
331
332        // We now need to reread the item because we lost all its data other than its address
333
334        if let Some((newest_found_item_address, newest_found_item_key_len)) = newest_found_item_data
335        {
336            let item =
337                ItemHeader::read_new(&mut self.inner.flash, newest_found_item_address, u32::MAX)
338                    .await?
339                    .ok_or_else(|| {
340                        // How come there's no item header here!? We just found it!
341                        Error::Corrupted {
342                            #[cfg(feature = "_test")]
343                            backtrace: std::backtrace::Backtrace::capture(),
344                        }
345                    })?
346                    .read_item(
347                        &mut self.inner.flash,
348                        data_buffer,
349                        newest_found_item_address,
350                        u32::MAX,
351                    )
352                    .await?;
353
354            Ok(Some((
355                item.unwrap()?.unborrow(),
356                newest_found_item_address,
357                Some(newest_found_item_key_len),
358            )))
359        } else {
360            Ok(None)
361        }
362    }
363
364    /// Store a key-value pair into flash memory.
365    /// It will (logically) overwrite the last value that has the same key.
366    ///
367    /// The data buffer must be long enough to hold the longest serialized data of your [Key] + [Value] types combined,
368    /// rounded up to flash word alignment.
369    pub async fn store_item<'d, V: Value<'d>>(
370        &mut self,
371        data_buffer: &mut [u8],
372        key: &K,
373        item: &V,
374    ) -> Result<(), Error<S::Error>> {
375        run_with_auto_repair!(
376            function = self.store_item_inner(data_buffer, key, item).await,
377            repair = self.try_repair(data_buffer).await?
378        )
379    }
380
381    async fn store_item_inner(
382        &mut self,
383        data_buffer: &mut [u8],
384        key: &K,
385        item: &dyn Value<'_>,
386    ) -> Result<(), Error<S::Error>> {
387        if self.inner.cache.is_dirty() {
388            self.inner.cache.invalidate_cache_state();
389        }
390
391        let mut recursion_level = 0;
392        loop {
393            // Check if we're in an infinite recursion which happens when we don't have enough space to store the new data
394            if recursion_level == self.inner.get_pages(0).count() {
395                self.inner.cache.unmark_dirty();
396                return Err(Error::FullStorage);
397            }
398
399            // If there is a partial open page, we try to write in that first if there is enough space
400            let next_page_to_use = if let Some(partial_open_page) = self
401                .inner
402                .find_first_page(0, PageState::PartialOpen)
403                .await?
404            {
405                // We found a partial open page, but at this point it's relatively cheap to do a consistency check
406                if !self
407                    .inner
408                    .get_page_state(self.inner.next_page(partial_open_page))
409                    .await?
410                    .is_open()
411                {
412                    // Oh oh, the next page which serves as the buffer page is not open. We're corrupt.
413                    // This likely happened because of an unexpected shutdown during data migration from the
414                    // then new buffer page to the new partial open page.
415                    // The repair function should be able to repair this.
416                    return Err(Error::Corrupted {
417                        #[cfg(feature = "_test")]
418                        backtrace: std::backtrace::Backtrace::capture(),
419                    });
420                }
421
422                // We've got to search where the free space is since the page starts with items present already
423
424                let page_data_start_address =
425                    calculate_page_address::<S>(self.flash_range(), partial_open_page)
426                        + S::WORD_SIZE as u32;
427                let page_data_end_address =
428                    calculate_page_end_address::<S>(self.flash_range(), partial_open_page)
429                        - S::WORD_SIZE as u32;
430
431                let key_len = key.serialize_into(data_buffer)?;
432                let item_data_length = key_len
433                    + item
434                        .serialize_into(&mut data_buffer[key_len..])
435                        .map_err(Error::SerializationError)?;
436
437                if item_data_length > u16::MAX as usize
438                    || item_data_length
439                        > calculate_page_size::<S>()
440                            .saturating_sub(ItemHeader::data_address::<S>(0) as usize)
441                {
442                    self.inner.cache.unmark_dirty();
443                    return Err(Error::ItemTooBig);
444                }
445
446                let free_spot_address = self
447                    .inner
448                    .find_next_free_item_spot(
449                        page_data_start_address,
450                        page_data_end_address,
451                        item_data_length as u32,
452                    )
453                    .await?;
454
455                if let Some(free_spot_address) = free_spot_address {
456                    self.inner
457                        .cache
458                        .notice_key_location(key, free_spot_address, true);
459                    Item::write_new(
460                        &mut self.inner.flash,
461                        self.inner.flash_range.clone(),
462                        &mut self.inner.cache,
463                        free_spot_address,
464                        &data_buffer[..item_data_length],
465                    )
466                    .await?;
467
468                    self.inner.cache.unmark_dirty();
469                    return Ok(());
470                }
471
472                // The item doesn't fit here, so we need to close this page and move to the next
473                self.inner.close_page(partial_open_page).await?;
474                Some(self.inner.next_page(partial_open_page))
475            } else {
476                None
477            };
478
479            // If we get here, there was no partial page found or the partial page has now been closed because the item didn't fit.
480            // If there was a partial page, then we need to look at the next page. It's supposed to be open since it was the previous empty buffer page.
481            // The new buffer page has to be emptied if it was closed.
482            // If there was no partial page, we just use the first open page.
483
484            if let Some(next_page_to_use) = next_page_to_use {
485                let next_page_state = self.inner.get_page_state(next_page_to_use).await?;
486
487                if !next_page_state.is_open() {
488                    // What was the previous buffer page was not open...
489                    return Err(Error::Corrupted {
490                        #[cfg(feature = "_test")]
491                        backtrace: std::backtrace::Backtrace::capture(),
492                    });
493                }
494
495                // Since we're gonna write data here, let's already partially close the page
496                // This could be done after moving the data, but this is more robust in the
497                // face of shutdowns and cancellations
498                self.inner.partial_close_page(next_page_to_use).await?;
499
500                let next_buffer_page = self.inner.next_page(next_page_to_use);
501                let next_buffer_page_state = self.inner.get_page_state(next_buffer_page).await?;
502
503                if !next_buffer_page_state.is_open() {
504                    self.migrate_items(data_buffer, next_buffer_page, next_page_to_use)
505                        .await?;
506                }
507            } else {
508                // There's no partial open page, so we just gotta turn the first open page into a partial open one
509                let Some(first_open_page) = self.inner.find_first_page(0, PageState::Open).await?
510                else {
511                    // Uh oh, no open pages.
512                    // Something has gone wrong.
513                    // We should never get here.
514                    return Err(Error::Corrupted {
515                        #[cfg(feature = "_test")]
516                        backtrace: std::backtrace::Backtrace::capture(),
517                    });
518                };
519
520                self.inner.partial_close_page(first_open_page).await?;
521            }
522
523            // If we get here, we just freshly partially closed a new page, so the next loop iteration should succeed.
524            recursion_level += 1;
525        }
526    }
527
528    /// Fully remove an item. Additional calls to fetch with the same key will return None until
529    /// a new one is stored again.
530    ///
531    /// <div class="warning">
532    /// This is really slow!
533    ///
534    /// All items in flash have to be read and deserialized to find the items with the key.
535    /// This is unlikely to be cached well.
536    ///
537    /// Alternatively, e.g. when you don't have a [`MultiwriteNorFlash`] flash, you could store your value inside an Option
538    /// and store the value `None` to mark it as erased.
539    /// </div>
540    pub async fn remove_item(
541        &mut self,
542        data_buffer: &mut [u8],
543        search_key: &K,
544    ) -> Result<(), Error<S::Error>>
545    where
546        S: MultiwriteNorFlash,
547    {
548        run_with_auto_repair!(
549            function = self.remove_item_inner(data_buffer, Some(search_key)).await,
550            repair = self.try_repair(data_buffer).await?
551        )
552    }
553
554    /// Fully remove all stored items. Additional calls to fetch with any key will return None until
555    /// new items are stored again.
556    ///
557    /// <div class="warning">
558    /// This might be really slow! This doesn't simply erase flash, but goes through all items and marks them as deleted.
559    /// This is better for flash endurance.
560    ///
561    /// You might want to simply erase the flash range, e.g. if your flash does not implement [`MultiwriteNorFlash`].
562    /// Consider using the helper method for that: [`Self::erase_all`].
563    /// </div>
564    pub async fn remove_all_items(&mut self, data_buffer: &mut [u8]) -> Result<(), Error<S::Error>>
565    where
566        S: MultiwriteNorFlash,
567    {
568        run_with_auto_repair!(
569            function = self.remove_item_inner(data_buffer, None).await,
570            repair = self.try_repair(data_buffer).await?
571        )
572    }
573
574    /// If `search_key` is None, then all items will be removed
575    async fn remove_item_inner(
576        &mut self,
577        data_buffer: &mut [u8],
578        search_key: Option<&K>,
579    ) -> Result<(), Error<S::Error>>
580    where
581        S: MultiwriteNorFlash,
582    {
583        if let Some(key) = &search_key {
584            self.inner.cache.notice_key_erased(key);
585        } else {
586            self.inner.cache.invalidate_cache_state();
587        }
588
589        // Search for the last used page. We're gonna erase from the one after this first.
590        // If we get an early shutoff or cancellation, this will make it so that we don't return
591        // an old version of the key on the next fetch.
592        let last_used_page = self
593            .inner
594            .find_first_page(0, PageState::PartialOpen)
595            .await?
596            .unwrap_or_default();
597
598        // Go through all the pages
599        for page_index in self.inner.get_pages(self.inner.next_page(last_used_page)) {
600            if self.inner.get_page_state(page_index).await?.is_open() {
601                // This page is open, we don't have to check it
602                continue;
603            }
604
605            let page_data_start_address =
606                calculate_page_address::<S>(self.flash_range(), page_index) + S::WORD_SIZE as u32;
607            let page_data_end_address =
608                calculate_page_end_address::<S>(self.flash_range(), page_index)
609                    - S::WORD_SIZE as u32;
610
611            // Go through all items on the page
612            let mut item_headers =
613                ItemHeaderIter::new(page_data_start_address, page_data_end_address);
614
615            while let (Some(item_header), item_address) =
616                item_headers.next(&mut self.inner.flash).await?
617            {
618                let item = item_header
619                    .read_item(
620                        &mut self.inner.flash,
621                        data_buffer,
622                        item_address,
623                        page_data_end_address,
624                    )
625                    .await?;
626
627                match item {
628                    item::MaybeItem::Corrupted(_, _) | item::MaybeItem::Erased(_, _) => continue,
629                    item::MaybeItem::Present(item) => {
630                        let item_match = match search_key {
631                            Some(search_key) => K::deserialize_from(item.data())?.0 == *search_key,
632                            _ => true,
633                        };
634                        // If this item has the same key as the key we're trying to erase, then erase the item.
635                        // But keep going! We need to erase everything.
636                        if item_match {
637                            item.header
638                                .erase_data(
639                                    &mut self.inner.flash,
640                                    self.inner.flash_range.clone(),
641                                    &mut self.inner.cache,
642                                    item_address,
643                                )
644                                .await?;
645                        }
646                    }
647                }
648            }
649        }
650
651        // We're done, we now know the cache is in a good state
652        self.inner.cache.unmark_dirty();
653
654        Ok(())
655    }
656
657    /// Get an iterator that iterates over all non-erased & non-corrupted items in the map.
658    ///
659    /// <div class="warning">
660    /// You should be very careful when using the map item iterator:
661    /// <ul>
662    /// <li>
663    /// Because map doesn't erase the items when you insert a new one with the same key,
664    /// so it's expected that the iterator returns items with the same key multiple times.
665    /// Only the last returned one is the `active` one.
666    /// </li>
667    /// <li>
668    /// The iterator requires ALL items in the storage have the SAME Value type.
669    /// If you have different types of items in your map, the iterator might return incorrect data or error.
670    /// </li>
671    /// </ul>
672    /// </div>
673    ///
674    /// The following is a simple example of how to use the iterator:
675    /// ```rust
676    /// # use sequential_storage::cache::NoCache;
677    /// # use sequential_storage::map::{MapConfig, MapStorage};
678    /// # use mock_flash::MockFlashBase;
679    /// # use futures::executor::block_on;
680    /// # use std::collections::HashMap;
681    /// # type Flash = MockFlashBase<10, 1, 4096>;
682    /// # mod mock_flash {
683    /// #   include!("mock_flash.rs");
684    /// # }
685    /// # fn init_flash() -> Flash {
686    /// #     Flash::new(mock_flash::WriteCountCheck::Twice, None, false)
687    /// # }
688    ///
689    /// # block_on(async {
690    /// let mut flash = init_flash();
691    ///
692    /// let mut storage = MapStorage::<u8, _, _>::new(flash, const { MapConfig::new(0x1000..0x3000) }, NoCache::new());
693    /// let mut data_buffer = [0; 128];
694    ///
695    /// // Create the iterator of map items
696    /// let mut iterator = storage.fetch_all_items(
697    ///     &mut data_buffer
698    /// )
699    /// .await
700    /// .unwrap();
701    ///
702    /// let mut all_items = HashMap::new();
703    ///
704    /// // Iterate through all items, suppose the Key and Value types are u8, u32
705    /// while let Some((key, value)) = iterator
706    ///     .next::<u32>(&mut data_buffer)
707    ///     .await
708    ///     .unwrap()
709    /// {
710    ///     // Do something with the item.
711    ///     // Please note that for the same key there might be multiple items returned,
712    ///     // the last one is the current active one.
713    ///     all_items.insert(key, value);
714    /// }
715    /// # })
716    /// ```
717    pub async fn fetch_all_items(
718        &mut self,
719        data_buffer: &mut [u8],
720    ) -> Result<MapItemIter<'_, K, S, C>, Error<S::Error>> {
721        // Get the first page index.
722        // The first page used by the map is the next page of the `PartialOpen` page or the last `Closed` page
723        let first_page = run_with_auto_repair!(
724            function = {
725                match self
726                    .inner
727                    .find_first_page(0, PageState::PartialOpen)
728                    .await?
729                {
730                    Some(last_used_page) => {
731                        // The next page of the `PartialOpen` page is the first page
732                        Ok(self.inner.next_page(last_used_page))
733                    }
734                    None => {
735                        // In the event that all pages are still open or the last used page was just closed, we search for the first open page.
736                        // If the page one before that is closed, then that's the last used page.
737                        if let Some(first_open_page) =
738                            self.inner.find_first_page(0, PageState::Open).await?
739                        {
740                            let previous_page = self.inner.previous_page(first_open_page);
741                            if self.inner.get_page_state(previous_page).await?.is_closed() {
742                                // The previous page is closed, so the first_open_page is what we want
743                                Ok(first_open_page)
744                            } else {
745                                // The page before the open page is not closed, so it must be open.
746                                // This means that all pages are open and that we don't have any items yet.
747                                self.inner.cache.unmark_dirty();
748                                Ok(0)
749                            }
750                        } else {
751                            // There are no open pages, so everything must be closed.
752                            // Something is up and this should never happen.
753                            // To recover, we will just erase all the flash.
754                            Err(Error::Corrupted {
755                                #[cfg(feature = "_test")]
756                                backtrace: std::backtrace::Backtrace::capture(),
757                            })
758                        }
759                    }
760                }
761            },
762            repair = self.try_repair(data_buffer).await?
763        )?;
764
765        let start_address =
766            calculate_page_address::<S>(self.flash_range(), first_page) + S::WORD_SIZE as u32;
767        let end_address =
768            calculate_page_end_address::<S>(self.flash_range(), first_page) - S::WORD_SIZE as u32;
769
770        Ok(MapItemIter {
771            storage: self,
772            first_page,
773            current_page_index: first_page,
774            current_iter: ItemIter::new(start_address, end_address),
775            _key: PhantomData,
776        })
777    }
778
779    async fn migrate_items(
780        &mut self,
781        data_buffer: &mut [u8],
782        source_page: usize,
783        target_page: usize,
784    ) -> Result<(), Error<S::Error>> {
785        // We need to move the data from the next buffer page to the next_page_to_use, but only if that data
786        // doesn't have a newer value somewhere else.
787
788        let mut next_page_write_address =
789            calculate_page_address::<S>(self.flash_range(), target_page) + S::WORD_SIZE as u32;
790
791        let mut it = ItemIter::new(
792            calculate_page_address::<S>(self.flash_range(), source_page) + S::WORD_SIZE as u32,
793            calculate_page_end_address::<S>(self.flash_range(), source_page) - S::WORD_SIZE as u32,
794        );
795        while let Some((item, item_address)) = it.next(&mut self.inner.flash, data_buffer).await? {
796            let (key, _) = K::deserialize_from(item.data())?;
797
798            // We're in a decent state here
799            self.inner.cache.unmark_dirty();
800
801            // Search for the newest item with the key we found
802            let Some((found_item, found_address, _)) =
803                self.fetch_item_with_location(data_buffer, &key).await?
804            else {
805                // We couldn't even find our own item?
806                return Err(Error::Corrupted {
807                    #[cfg(feature = "_test")]
808                    backtrace: std::backtrace::Backtrace::capture(),
809                });
810            };
811
812            let found_item = found_item
813                .reborrow(data_buffer)
814                .ok_or_else(|| Error::LogicBug {
815                    #[cfg(feature = "_test")]
816                    backtrace: std::backtrace::Backtrace::capture(),
817                })?;
818
819            if found_address == item_address {
820                self.inner
821                    .cache
822                    .notice_key_location(&key, next_page_write_address, true);
823                found_item
824                    .write(
825                        &mut self.inner.flash,
826                        self.inner.flash_range.clone(),
827                        &mut self.inner.cache,
828                        next_page_write_address,
829                    )
830                    .await?;
831                next_page_write_address = found_item
832                    .header
833                    .next_item_address::<S>(next_page_write_address);
834            }
835        }
836
837        self.inner.open_page(source_page).await?;
838
839        Ok(())
840    }
841
842    /// Try to repair the state of the flash to hopefull get back everything in working order.
843    /// Care is taken that no data is lost, but this depends on correctly repairing the state and
844    /// so is only best effort.
845    ///
846    /// This function might be called after a different function returned the [`Error::Corrupted`] error.
847    /// There's no guarantee it will work.
848    ///
849    /// If this function or the function call after this crate returns [`Error::Corrupted`], then it's unlikely
850    /// that the state can be recovered. To at least make everything function again at the cost of losing the data,
851    /// erase the flash range.
852    async fn try_repair(&mut self, data_buffer: &mut [u8]) -> Result<(), Error<S::Error>> {
853        self.inner.cache.invalidate_cache_state();
854
855        self.inner.try_general_repair().await?;
856
857        // Let's check if we corrupted in the middle of a migration
858        if let Some(partial_open_page) = self
859            .inner
860            .find_first_page(0, PageState::PartialOpen)
861            .await?
862        {
863            let buffer_page = self.inner.next_page(partial_open_page);
864            if !self.inner.get_page_state(buffer_page).await?.is_open() {
865                // Yes, the migration got interrupted. Let's redo it.
866                // To do that, we erase the partial open page first because it contains incomplete data.
867                self.inner.open_page(partial_open_page).await?;
868
869                // Then partially close it again
870                self.inner.partial_close_page(partial_open_page).await?;
871
872                self.migrate_items(data_buffer, buffer_page, partial_open_page)
873                    .await?;
874            }
875        }
876
877        Ok(())
878    }
879
880    /// Resets the flash in the entire given flash range.
881    ///
882    /// This is just a thin helper function as it just calls the flash's erase function.
883    pub fn erase_all(&mut self) -> impl Future<Output = Result<(), Error<S::Error>>> {
884        self.inner.erase_all()
885    }
886
887    /// Get the minimal overhead size per stored item for the given flash type.
888    ///
889    /// The associated data of each item is additionally padded to a full flash word size, but that's not part of this number.\
890    /// This means the full item length is `returned number + (data length).next_multiple_of(S::WORD_SIZE)`.
891    #[must_use]
892    pub const fn item_overhead_size() -> u32 {
893        GenericStorage::<S, C>::item_overhead_size()
894    }
895
896    /// Destroy the instance to get back the flash and the cache.
897    ///
898    /// The cache can be passed to a new storage instance, but only for the same flash region and if nothing has changed in flash.
899    pub fn destroy(self) -> (S, C) {
900        self.inner.destroy()
901    }
902
903    /// Get a reference to the flash. Mutating the memory is at your own risk.
904    pub const fn flash(&mut self) -> &mut S {
905        self.inner.flash()
906    }
907
908    /// Get the flash range being used
909    pub const fn flash_range(&self) -> Range<u32> {
910        self.inner.flash_range()
911    }
912
913    #[cfg(any(test, feature = "std"))]
914    /// Print all items in flash to the returned string
915    ///
916    /// This is meant as a debugging utility. The string format is not stable.
917    pub fn print_items(&mut self) -> impl Future<Output = String> {
918        self.inner.print_items()
919    }
920}
921
922/// Iterator which iterates all non-erased & non-corrupted items in the map.
923///
924/// The iterator will return the (Key, Value) tuple when calling `next()`.
925/// If the iterator ends, it will return `Ok(None)`.
926///
927/// The following is a simple example of how to use the iterator:
928/// ```rust
929/// # use sequential_storage::cache::NoCache;
930/// # use sequential_storage::map::{MapConfig, MapStorage};
931/// # use mock_flash::MockFlashBase;
932/// # use futures::executor::block_on;
933/// # use std::collections::HashMap;
934/// # type Flash = MockFlashBase<10, 1, 4096>;
935/// # mod mock_flash {
936/// #   include!("mock_flash.rs");
937/// # }
938/// # fn init_flash() -> Flash {
939/// #     Flash::new(mock_flash::WriteCountCheck::Twice, None, false)
940/// # }
941///
942/// # block_on(async {
943/// let mut flash = init_flash();
944///
945/// let mut storage = MapStorage::<u8, _, _>::new(flash, const { MapConfig::new(0x1000..0x3000) }, NoCache::new());
946/// let mut data_buffer = [0; 128];
947///
948/// // Create the iterator of map items
949/// let mut iterator = storage.fetch_all_items(
950///     &mut data_buffer
951/// )
952/// .await
953/// .unwrap();
954///
955/// let mut all_items = HashMap::new();
956///
957/// // Iterate through all items, suppose the Key and Value types are u8, u32
958/// while let Some((key, value)) = iterator
959///     .next::<u32>(&mut data_buffer)
960///     .await
961///     .unwrap()
962/// {
963///     // Do something with the item.
964///     // Please note that for the same key there might be multiple items returned,
965///     // the last one is the current active one.
966///     all_items.insert(key, value);
967/// }
968/// # })
969/// ```
970pub struct MapItemIter<'s, K: Key, S: NorFlash, C: KeyCacheImpl<K>> {
971    storage: &'s mut MapStorage<K, S, C>,
972    first_page: usize,
973    current_page_index: usize,
974    pub(crate) current_iter: ItemIter,
975    _key: PhantomData<K>,
976}
977
978impl<K: Key, S: NorFlash, C: KeyCacheImpl<K>> MapItemIter<'_, K, S, C> {
979    /// Get the next item in the iterator. Be careful that the given `data_buffer` should large enough to contain the serialized key and value.
980    pub async fn next<'a, V: Value<'a>>(
981        &mut self,
982        data_buffer: &'a mut [u8],
983    ) -> Result<Option<(K, V)>, Error<S::Error>> {
984        // Find the next item
985        let item = loop {
986            if let Some((item, _address)) = self
987                .current_iter
988                .next(&mut self.storage.inner.flash, data_buffer)
989                .await?
990            {
991                // We've found the next item, quit the loop
992                break item;
993            }
994
995            // The current page is done, we need to find the next page
996            // Find next page which is not open, update `self.current_iter`
997            loop {
998                self.current_page_index = self.storage.inner.next_page(self.current_page_index);
999
1000                // We've looped back to the first page, which means all pages are checked, there's nothing left so we return None
1001                if self.current_page_index == self.first_page {
1002                    return Ok(None);
1003                }
1004
1005                match self
1006                    .storage
1007                    .inner
1008                    .get_page_state(self.current_page_index)
1009                    .await
1010                {
1011                    Ok(PageState::Closed | PageState::PartialOpen) => {
1012                        self.current_iter = ItemIter::new(
1013                            calculate_page_address::<S>(
1014                                self.storage.inner.flash_range.clone(),
1015                                self.current_page_index,
1016                            ) + S::WORD_SIZE as u32,
1017                            calculate_page_end_address::<S>(
1018                                self.storage.inner.flash_range.clone(),
1019                                self.current_page_index,
1020                            ) - S::WORD_SIZE as u32,
1021                        );
1022                        break;
1023                    }
1024                    _ => continue,
1025                }
1026            }
1027        };
1028
1029        let data_len = item.header.length as usize;
1030        let (key, key_len) = K::deserialize_from(item.data())?;
1031        let (value, _value_len) = V::deserialize_from(&data_buffer[..data_len][key_len..])
1032            .map_err(Error::SerializationError)?;
1033
1034        Ok(Some((key, value)))
1035    }
1036}
1037
1038/// Anything implementing this trait can be used as a key in the map functions.
1039///
1040/// It provides a way to serialize and deserialize the key.
1041///
1042/// The `Eq` bound is used because we need to be able to compare keys and the
1043/// `Clone` bound helps us pass the key around.
1044///
1045/// The key cannot have a lifetime like the [Value]
1046pub trait Key: Eq + Clone + Sized {
1047    /// Serialize the key into the given buffer.
1048    /// The serialized size is returned.
1049    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError>;
1050    /// Deserialize the key from the given buffer.
1051    /// The key is returned together with the serialized length.
1052    fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), SerializationError>;
1053    /// Get the length of the key from the buffer.
1054    /// This is an optimized version of [`Self::deserialize_from`] that doesn't have to deserialize everything.
1055    fn get_len(buffer: &[u8]) -> Result<usize, SerializationError> {
1056        Self::deserialize_from(buffer).map(|(_, len)| len)
1057    }
1058}
1059
1060macro_rules! impl_key_num {
1061    ($int:ty) => {
1062        impl Key for $int {
1063            fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1064                let self_bytes = self.to_le_bytes();
1065
1066                match buffer.get_mut(..self_bytes.len()) {
1067                    Some(buffer) => {
1068                        buffer.copy_from_slice(&self_bytes);
1069                        Ok(buffer.len())
1070                    }
1071                    None => Err(SerializationError::BufferTooSmall),
1072                }
1073            }
1074
1075            fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), SerializationError> {
1076                let value = Self::from_le_bytes(
1077                    buffer
1078                        .get(..size_of::<Self>())
1079                        .ok_or(SerializationError::BufferTooSmall)?
1080                        .try_into()
1081                        .unwrap(),
1082                );
1083                Ok((value, size_of::<Self>()))
1084            }
1085
1086            fn get_len(_buffer: &[u8]) -> Result<usize, SerializationError> {
1087                Ok(size_of::<Self>())
1088            }
1089        }
1090    };
1091}
1092
1093impl_key_num!(u8);
1094impl_key_num!(u16);
1095impl_key_num!(u32);
1096impl_key_num!(u64);
1097impl_key_num!(u128);
1098impl_key_num!(i8);
1099impl_key_num!(i16);
1100impl_key_num!(i32);
1101impl_key_num!(i64);
1102impl_key_num!(i128);
1103
1104impl<const N: usize> Key for [u8; N] {
1105    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1106        buffer
1107            .get_mut(..N)
1108            .ok_or(SerializationError::BufferTooSmall)?
1109            .copy_from_slice(self);
1110        Ok(N)
1111    }
1112
1113    fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), SerializationError> {
1114        Ok((
1115            buffer
1116                .get(..N)
1117                .ok_or(SerializationError::BufferTooSmall)?
1118                .try_into()
1119                .unwrap(),
1120            N,
1121        ))
1122    }
1123
1124    fn get_len(_buffer: &[u8]) -> Result<usize, SerializationError> {
1125        Ok(N)
1126    }
1127}
1128
1129impl Key for () {
1130    fn serialize_into(&self, _buffer: &mut [u8]) -> Result<usize, SerializationError> {
1131        Ok(0)
1132    }
1133
1134    fn deserialize_from(_buffer: &[u8]) -> Result<(Self, usize), SerializationError> {
1135        Ok(((), 0))
1136    }
1137}
1138
1139/// The trait that defines how map values are serialized and deserialized.
1140///
1141/// It also carries a lifetime so that zero-copy deserialization is supported.
1142/// Zero-copy serialization is not supported due to technical restrictions.
1143pub trait Value<'a> {
1144    /// Serialize the value into the given buffer. If everything went ok, this function returns the length
1145    /// of the used part of the buffer.
1146    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError>;
1147    /// Deserialize the value from the buffer. Because of the added lifetime, the implementation can borrow from the
1148    /// buffer which opens up some zero-copy possibilities.
1149    ///
1150    /// The buffer will be the same length as the serialize function returned for this value. Though note that the length
1151    /// is written from flash, so bitflips can affect that (though the length is separately crc-protected) and the key deserialization might
1152    /// return a wrong length.
1153    ///
1154    /// Returns the decoded value and the amount of bytes used from the buffer.
1155    fn deserialize_from(buffer: &'a [u8]) -> Result<(Self, usize), SerializationError>
1156    where
1157        Self: Sized;
1158}
1159
1160impl<'a> Value<'a> for bool {
1161    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1162        <u8 as Value>::serialize_into(&u8::from(*self), buffer)
1163    }
1164
1165    fn deserialize_from(buffer: &'a [u8]) -> Result<(Self, usize), SerializationError>
1166    where
1167        Self: Sized,
1168    {
1169        let (value, size) = <u8 as Value>::deserialize_from(buffer)?;
1170        Ok((value != 0, size))
1171    }
1172}
1173
1174impl<'a, T: Value<'a>> Value<'a> for Option<T> {
1175    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1176        if let Some(val) = self {
1177            let mut size = 0;
1178            size += <bool as Value>::serialize_into(&true, buffer)?;
1179            size += <T as Value>::serialize_into(
1180                val,
1181                buffer
1182                    .get_mut(size..)
1183                    .ok_or(SerializationError::BufferTooSmall)?,
1184            )?;
1185            Ok(size)
1186        } else {
1187            <bool as Value>::serialize_into(&false, buffer)
1188        }
1189    }
1190
1191    fn deserialize_from(buffer: &'a [u8]) -> Result<(Self, usize), SerializationError>
1192    where
1193        Self: Sized,
1194    {
1195        let (is_some, tag_size) = <bool as Value>::deserialize_from(buffer)?;
1196        if is_some {
1197            let (value, value_size) = <T as Value>::deserialize_from(
1198                buffer
1199                    .get(tag_size..)
1200                    .ok_or(SerializationError::BufferTooSmall)?,
1201            )?;
1202            Ok((Some(value), tag_size + value_size))
1203        } else {
1204            Ok((None, tag_size))
1205        }
1206    }
1207}
1208
1209impl<'a> Value<'a> for &'a [u8] {
1210    fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1211        buffer
1212            .get_mut(..self.len())
1213            .ok_or(SerializationError::BufferTooSmall)?
1214            .copy_from_slice(self);
1215        Ok(self.len())
1216    }
1217
1218    fn deserialize_from(buffer: &'a [u8]) -> Result<(Self, usize), SerializationError>
1219    where
1220        Self: Sized,
1221    {
1222        Ok((buffer, buffer.len()))
1223    }
1224}
1225
1226macro_rules! impl_map_item_num {
1227    ($num:ty) => {
1228        impl<'a> Value<'a> for $num {
1229            fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1230                let self_bytes = self.to_le_bytes();
1231
1232                match buffer.get_mut(..self_bytes.len()) {
1233                    Some(buffer) => {
1234                        buffer.copy_from_slice(&self_bytes);
1235                        Ok(buffer.len())
1236                    }
1237                    None => Err(SerializationError::BufferTooSmall),
1238                }
1239            }
1240
1241            fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), SerializationError> {
1242                let value = Self::from_le_bytes(
1243                    buffer
1244                        .get(..size_of::<Self>())
1245                        .ok_or(SerializationError::BufferTooSmall)?
1246                        .try_into()
1247                        .unwrap(),
1248                );
1249                Ok((value, size_of::<Self>()))
1250            }
1251        }
1252
1253        // Also implement `Value` for arrays of numbers.
1254        impl<'a, const N: usize> Value<'a> for [$num; N] {
1255            fn serialize_into(&self, buffer: &mut [u8]) -> Result<usize, SerializationError> {
1256                let elem_size = size_of::<$num>();
1257
1258                let buffer = buffer
1259                    .get_mut(0..elem_size * N)
1260                    .ok_or(SerializationError::BufferTooSmall)?;
1261
1262                for (chunk, number) in buffer.chunks_exact_mut(elem_size).zip(self.iter()) {
1263                    chunk.copy_from_slice(&number.to_le_bytes())
1264                }
1265                Ok(elem_size * N)
1266            }
1267
1268            fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), SerializationError> {
1269                let elem_size = size_of::<$num>();
1270                if buffer.len() < elem_size * N {
1271                    return Err(SerializationError::BufferTooSmall);
1272                }
1273
1274                let mut array = [0 as $num; N];
1275                for (chunk, number) in buffer.chunks_exact(elem_size).zip(array.iter_mut()) {
1276                    *number = <$num>::from_le_bytes(chunk.try_into().unwrap());
1277                }
1278                Ok((array, elem_size * N))
1279            }
1280        }
1281    };
1282}
1283
1284impl_map_item_num!(u8);
1285impl_map_item_num!(u16);
1286impl_map_item_num!(u32);
1287impl_map_item_num!(u64);
1288impl_map_item_num!(u128);
1289impl_map_item_num!(i8);
1290impl_map_item_num!(i16);
1291impl_map_item_num!(i32);
1292impl_map_item_num!(i64);
1293impl_map_item_num!(i128);
1294impl_map_item_num!(f32);
1295impl_map_item_num!(f64);
1296
1297/// Error for map value (de)serialization.
1298///
1299/// This error type is predefined (in contrast to using generics) to save many kilobytes of binary size.
1300#[non_exhaustive]
1301#[derive(Debug, PartialEq, Eq, Clone)]
1302#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1303pub enum SerializationError {
1304    /// The provided buffer was too small.
1305    BufferTooSmall,
1306    /// The serialization could not succeed because the data was not in order. (e.g. too big to fit)
1307    InvalidData,
1308    /// The deserialization could not succeed because the bytes are in an invalid format.
1309    InvalidFormat,
1310    /// An implementation defined error that might contain more information than the other predefined
1311    /// error variants.
1312    Custom(i32),
1313}
1314
1315impl core::fmt::Display for SerializationError {
1316    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
1317        match self {
1318            SerializationError::BufferTooSmall => write!(f, "Buffer too small"),
1319            SerializationError::InvalidData => write!(f, "Invalid data"),
1320            SerializationError::InvalidFormat => write!(f, "Invalid format"),
1321            SerializationError::Custom(val) => write!(f, "Custom error: {val}"),
1322        }
1323    }
1324}
1325
1326#[cfg(test)]
1327mod tests {
1328    use crate::{AlignedBuf, cache::NoCache, mock_flash};
1329
1330    use super::*;
1331    use futures_test::test;
1332
1333    type MockFlashBig = mock_flash::MockFlashBase<4, 4, 256>;
1334    type MockFlashTiny = mock_flash::MockFlashBase<2, 1, 32>;
1335
1336    #[test]
1337    async fn store_and_fetch() {
1338        let mut storage = MapStorage::<u8, _, _>::new(
1339            MockFlashBig::default(),
1340            MapConfig::new(0x000..0x1000),
1341            cache::NoCache::new(),
1342        );
1343
1344        let mut data_buffer = AlignedBuf([0; 128]);
1345
1346        let start_snapshot = storage.flash().stats_snapshot();
1347
1348        let item = storage
1349            .fetch_item::<&[u8]>(&mut data_buffer, &0)
1350            .await
1351            .unwrap();
1352        assert_eq!(item, None);
1353
1354        let item = storage
1355            .fetch_item::<&[u8]>(&mut data_buffer, &60)
1356            .await
1357            .unwrap();
1358        assert_eq!(item, None);
1359
1360        let item = storage
1361            .fetch_item::<&[u8]>(&mut data_buffer, &0xFF)
1362            .await
1363            .unwrap();
1364        assert_eq!(item, None);
1365
1366        storage
1367            .store_item(&mut data_buffer, &0u8, &[5u8])
1368            .await
1369            .unwrap();
1370        storage
1371            .store_item(&mut data_buffer, &0u8, &[5u8, 6])
1372            .await
1373            .unwrap();
1374
1375        let item = storage
1376            .fetch_item::<&[u8]>(&mut data_buffer, &0)
1377            .await
1378            .unwrap()
1379            .unwrap();
1380        assert_eq!(item, &[5, 6]);
1381
1382        storage
1383            .store_item(&mut data_buffer, &1u8, &[2u8, 2, 2, 2, 2, 2])
1384            .await
1385            .unwrap();
1386
1387        let item = storage
1388            .fetch_item::<&[u8]>(&mut data_buffer, &0)
1389            .await
1390            .unwrap()
1391            .unwrap();
1392        assert_eq!(item, &[5, 6]);
1393
1394        let item = storage
1395            .fetch_item::<&[u8]>(&mut data_buffer, &1)
1396            .await
1397            .unwrap()
1398            .unwrap();
1399        assert_eq!(item, &[2, 2, 2, 2, 2, 2]);
1400
1401        for index in 0..4000 {
1402            storage
1403                .store_item(
1404                    &mut data_buffer,
1405                    &((index % 10) as u8),
1406                    &vec![(index % 10) as u8 * 2; index % 10].as_slice(),
1407                )
1408                .await
1409                .unwrap();
1410        }
1411
1412        for i in 0..10 {
1413            let item = storage
1414                .fetch_item::<&[u8]>(&mut data_buffer, &i)
1415                .await
1416                .unwrap()
1417                .unwrap();
1418            assert_eq!(item, &vec![(i % 10) * 2; (i % 10) as usize]);
1419        }
1420
1421        for _ in 0..4000 {
1422            storage
1423                .store_item(&mut data_buffer, &11u8, &[0; 10])
1424                .await
1425                .unwrap();
1426        }
1427
1428        for i in 0..10 {
1429            let item = storage
1430                .fetch_item::<&[u8]>(&mut data_buffer, &i)
1431                .await
1432                .unwrap()
1433                .unwrap();
1434            assert_eq!(item, &vec![(i % 10) * 2; (i % 10) as usize]);
1435        }
1436
1437        println!(
1438            "{:?}",
1439            start_snapshot.compare_to(storage.flash().stats_snapshot()),
1440        );
1441    }
1442
1443    #[test]
1444    async fn store_too_many_items() {
1445        const UPPER_BOUND: u8 = 3;
1446
1447        let mut storage = MapStorage::new(
1448            MockFlashTiny::default(),
1449            const { MapConfig::new(0x00..0x40) },
1450            NoCache::new(),
1451        );
1452        let mut data_buffer = AlignedBuf([0; 128]);
1453
1454        for i in 0..UPPER_BOUND {
1455            println!("Storing {i:?}");
1456
1457            storage
1458                .store_item(&mut data_buffer, &i, &vec![i; i as usize].as_slice())
1459                .await
1460                .unwrap();
1461        }
1462
1463        assert_eq!(
1464            storage
1465                .store_item(
1466                    &mut data_buffer,
1467                    &UPPER_BOUND,
1468                    &vec![0; UPPER_BOUND as usize].as_slice(),
1469                )
1470                .await,
1471            Err(Error::FullStorage)
1472        );
1473
1474        for i in 0..UPPER_BOUND {
1475            let item = storage
1476                .fetch_item::<&[u8]>(&mut data_buffer, &i)
1477                .await
1478                .unwrap()
1479                .unwrap();
1480
1481            println!("Fetched {item:?}");
1482
1483            assert_eq!(item, vec![i; i as usize]);
1484        }
1485    }
1486
1487    #[test]
1488    async fn store_too_many_items_big() {
1489        const UPPER_BOUND: u8 = 68;
1490
1491        let mut storage = MapStorage::new(
1492            MockFlashBig::default(),
1493            const { MapConfig::new(0x0000..0x1000) },
1494            NoCache::new(),
1495        );
1496        let mut data_buffer = AlignedBuf([0; 128]);
1497
1498        for i in 0..UPPER_BOUND {
1499            println!("Storing {i:?}");
1500
1501            storage
1502                .store_item(&mut data_buffer, &i, &vec![i; i as usize].as_slice())
1503                .await
1504                .unwrap();
1505        }
1506
1507        assert_eq!(
1508            storage
1509                .store_item(
1510                    &mut data_buffer,
1511                    &UPPER_BOUND,
1512                    &vec![0; UPPER_BOUND as usize].as_slice(),
1513                )
1514                .await,
1515            Err(Error::FullStorage)
1516        );
1517
1518        for i in 0..UPPER_BOUND {
1519            let item = storage
1520                .fetch_item::<&[u8]>(&mut data_buffer, &i)
1521                .await
1522                .unwrap()
1523                .unwrap();
1524
1525            println!("Fetched {item:?}");
1526
1527            assert_eq!(item, vec![i; i as usize]);
1528        }
1529    }
1530
1531    #[test]
1532    async fn store_many_items_big() {
1533        let mut storage = MapStorage::new(
1534            mock_flash::MockFlashBase::<4, 1, 4096>::default(),
1535            const { MapConfig::new(0x0000..0x4000) },
1536            NoCache::new(),
1537        );
1538        let mut data_buffer = AlignedBuf([0; 128]);
1539
1540        const LENGHT_PER_KEY: [usize; 24] = [
1541            11, 13, 6, 13, 13, 10, 2, 3, 5, 36, 1, 65, 4, 6, 1, 15, 10, 7, 3, 15, 9, 3, 4, 5,
1542        ];
1543
1544        for _ in 0..100 {
1545            #[allow(clippy::needless_range_loop)]
1546            for i in 0..24 {
1547                storage
1548                    .store_item(
1549                        &mut data_buffer,
1550                        &(i as u16),
1551                        &vec![i as u8; LENGHT_PER_KEY[i]].as_slice(),
1552                    )
1553                    .await
1554                    .unwrap();
1555            }
1556        }
1557
1558        #[allow(clippy::needless_range_loop)]
1559        for i in 0..24 {
1560            let item = storage
1561                .fetch_item::<&[u8]>(&mut data_buffer, &(i as u16))
1562                .await
1563                .unwrap()
1564                .unwrap();
1565
1566            println!("Fetched {item:?}");
1567
1568            assert_eq!(item, vec![i as u8; LENGHT_PER_KEY[i]]);
1569        }
1570    }
1571
1572    #[test]
1573    async fn remove_items() {
1574        let mut storage = MapStorage::new(
1575            mock_flash::MockFlashBase::<4, 1, 4096>::new(
1576                mock_flash::WriteCountCheck::Twice,
1577                None,
1578                true,
1579            ),
1580            const { MapConfig::new(0x0000..0x4000) },
1581            NoCache::new(),
1582        );
1583        let mut data_buffer = AlignedBuf([0; 128]);
1584
1585        // Add some data to flash
1586        for j in 0..10 {
1587            for i in 0..24 {
1588                storage
1589                    .store_item(
1590                        &mut data_buffer,
1591                        &(i as u8),
1592                        &vec![i as u8; j + 2].as_slice(),
1593                    )
1594                    .await
1595                    .unwrap();
1596            }
1597        }
1598
1599        for j in (0..24).rev() {
1600            // Are all things still in flash that we expect?
1601            for i in 0..=j {
1602                assert!(
1603                    storage
1604                        .fetch_item::<&[u8]>(&mut data_buffer, &i)
1605                        .await
1606                        .unwrap()
1607                        .is_some()
1608                );
1609            }
1610
1611            // Remove the item
1612            storage.remove_item(&mut data_buffer, &j).await.unwrap();
1613
1614            // Are all things still in flash that we expect?
1615            for i in 0..j {
1616                assert!(
1617                    storage
1618                        .fetch_item::<&[u8]>(&mut data_buffer, &i)
1619                        .await
1620                        .unwrap()
1621                        .is_some()
1622                );
1623            }
1624
1625            assert!(
1626                storage
1627                    .fetch_item::<&[u8]>(&mut data_buffer, &j)
1628                    .await
1629                    .unwrap()
1630                    .is_none()
1631            );
1632        }
1633    }
1634
1635    #[test]
1636    async fn remove_all() {
1637        let mut storage = MapStorage::new(
1638            mock_flash::MockFlashBase::<4, 1, 4096>::new(
1639                mock_flash::WriteCountCheck::Twice,
1640                None,
1641                true,
1642            ),
1643            const { MapConfig::new(0x0000..0x4000) },
1644            NoCache::new(),
1645        );
1646        let mut data_buffer = AlignedBuf([0; 128]);
1647
1648        // Add some data to flash
1649        for value in 0..10 {
1650            for key in 0..24u8 {
1651                storage
1652                    .store_item(&mut data_buffer, &key, &vec![key; value + 2].as_slice())
1653                    .await
1654                    .unwrap();
1655            }
1656        }
1657
1658        // Sanity check that we can find all the keys we just added.
1659        for key in 0..24u8 {
1660            assert!(
1661                storage
1662                    .fetch_item::<&[u8]>(&mut data_buffer, &key)
1663                    .await
1664                    .unwrap()
1665                    .is_some()
1666            );
1667        }
1668
1669        // Remove all the items
1670        storage.remove_all_items(&mut data_buffer).await.unwrap();
1671
1672        // Verify that none of the keys are present in flash.
1673        for key in 0..24 {
1674            assert!(
1675                storage
1676                    .fetch_item::<&[u8]>(&mut data_buffer, &key)
1677                    .await
1678                    .unwrap()
1679                    .is_none()
1680            );
1681        }
1682    }
1683
1684    #[test]
1685    async fn store_too_big_item() {
1686        let mut storage = MapStorage::new(
1687            MockFlashBig::new(mock_flash::WriteCountCheck::Twice, None, true),
1688            const { MapConfig::new(0x000..0x1000) },
1689            NoCache::new(),
1690        );
1691
1692        storage
1693            .store_item(&mut [0; 1024], &0u8, &[0u8; 1024 - 4 * 2 - 8 - 1])
1694            .await
1695            .unwrap();
1696
1697        assert_eq!(
1698            storage
1699                .store_item(&mut [0; 1024], &0u8, &[0u8; 1024 - 4 * 2 - 8 - 1 + 1],)
1700                .await,
1701            Err(Error::ItemTooBig)
1702        );
1703    }
1704
1705    #[test]
1706    async fn item_iterator() {
1707        const UPPER_BOUND: u8 = 64;
1708        let mut storage = MapStorage::new(
1709            MockFlashBig::default(),
1710            const { MapConfig::new(0x000..0x1000) },
1711            NoCache::new(),
1712        );
1713
1714        let mut data_buffer = AlignedBuf([0; 128]);
1715
1716        for i in 0..UPPER_BOUND {
1717            storage
1718                .store_item(&mut data_buffer, &i, &vec![i; i as usize].as_slice())
1719                .await
1720                .unwrap();
1721        }
1722
1723        // Save 10 times for key 1
1724        for i in 0..10 {
1725            storage
1726                .store_item(&mut data_buffer, &1u8, &vec![i; i as usize].as_slice())
1727                .await
1728                .unwrap();
1729        }
1730
1731        let mut map_iter = storage.fetch_all_items(&mut data_buffer).await.unwrap();
1732
1733        let mut count = 0;
1734        let mut last_value_buffer = [0u8; 64];
1735        let mut last_value_length = 0;
1736        while let Ok(Some((key, value))) = map_iter.next::<&[u8]>(&mut data_buffer).await {
1737            if key == 1 {
1738                // This is the key we stored multiple times, record the last value
1739                last_value_length = value.len();
1740                last_value_buffer[..value.len()].copy_from_slice(value);
1741            } else {
1742                assert_eq!(value, vec![key; key as usize]);
1743                count += 1;
1744            }
1745        }
1746
1747        assert_eq!(last_value_length, 9);
1748        assert_eq!(
1749            &last_value_buffer[..last_value_length],
1750            vec![9u8; 9].as_slice()
1751        );
1752
1753        // Check total number of fetched items, +1 since we didn't count key 1
1754        assert_eq!(count + 1, UPPER_BOUND);
1755    }
1756
1757    #[test]
1758    async fn store_unit_key() {
1759        let mut storage = MapStorage::new(
1760            MockFlashBig::default(),
1761            const { MapConfig::new(0x000..0x1000) },
1762            NoCache::new(),
1763        );
1764
1765        let mut data_buffer = AlignedBuf([0; 128]);
1766
1767        let item = storage
1768            .fetch_item::<&[u8]>(&mut data_buffer, &())
1769            .await
1770            .unwrap();
1771        assert_eq!(item, None);
1772
1773        storage
1774            .store_item(&mut data_buffer, &(), &[5u8])
1775            .await
1776            .unwrap();
1777        storage
1778            .store_item(&mut data_buffer, &(), &[5u8, 6])
1779            .await
1780            .unwrap();
1781
1782        let item = storage
1783            .fetch_item::<&[u8]>(&mut data_buffer, &())
1784            .await
1785            .unwrap()
1786            .unwrap();
1787        assert_eq!(item, &[5, 6]);
1788    }
1789
1790    #[test]
1791    async fn option_value() {
1792        let mut buffer = [0; 2];
1793
1794        assert_eq!(Some(42u8).serialize_into(&mut buffer), Ok(2));
1795        assert_eq!(Option::<u8>::deserialize_from(&buffer), Ok((Some(42u8), 2)));
1796        assert_eq!(buffer, [1, 42]);
1797
1798        let mut buffer = [0; 1];
1799
1800        assert_eq!(Option::<u8>::None.serialize_into(&mut buffer), Ok(1));
1801        assert_eq!(Option::<u8>::deserialize_from(&buffer), Ok((None, 1)));
1802        assert_eq!(buffer, [0]);
1803    }
1804
1805    #[test]
1806    async fn array_value() {
1807        let mut buffer = [0; 3];
1808        assert_eq!(Value::serialize_into(&[1u8, 2, 3], &mut buffer), Ok(3));
1809        assert_eq!(buffer, [1, 2, 3]);
1810        assert_eq!(
1811            <[u8; 3] as Value>::deserialize_from(&buffer),
1812            Ok(([1, 2, 3], 3))
1813        );
1814
1815        let mut buffer = [0; 4];
1816        assert_eq!(
1817            Value::serialize_into(&[0x1234u16, 0x5678], &mut buffer),
1818            Ok(4)
1819        );
1820        assert_eq!(buffer, [0x34, 0x12, 0x78, 0x56]);
1821        assert_eq!(
1822            <[u16; 2]>::deserialize_from(&buffer),
1823            Ok(([0x1234, 0x5678], 4))
1824        );
1825    }
1826}