1#![doc = include_str! ("../README.md")]
2#![cfg_attr(not(target_arch = "x86_64"), no_std)]
3
4pub mod error;
5pub mod mem_flash;
6pub mod platform;
7pub mod raw;
8
9mod get;
10mod internal;
11mod set;
12mod u24;
13
14pub use raw::{
15 ENTRIES_PER_PAGE,
16 ENTRY_STATE_BITMAP_SIZE,
17 FLASH_SECTOR_SIZE,
18 ITEM_SIZE,
19 ItemType,
20 MAX_BLOB_DATA_PER_PAGE,
21 MAX_BLOB_SIZE,
22 PAGE_HEADER_SIZE,
23 PageState,
24};
25
26pub const MAX_KEY_LENGTH: usize = 15;
28const MAX_KEY_NUL_TERMINATED_LENGTH: usize = MAX_KEY_LENGTH + 1;
29
30#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
32pub struct Key([u8; MAX_KEY_NUL_TERMINATED_LENGTH]);
33
34impl Key {
35 pub const fn from_array<const M: usize>(src: &[u8; M]) -> Self {
42 assert!(M <= MAX_KEY_LENGTH);
43 let mut dst = [0u8; MAX_KEY_NUL_TERMINATED_LENGTH];
44 let mut i = 0;
45 while i < M {
46 dst[i] = src[i];
47 i += 1;
48 }
49 Self(dst)
50 }
51
52 pub const fn from_slice(src: &[u8]) -> Self {
59 assert!(src.len() <= MAX_KEY_LENGTH);
60 let mut dst = [0u8; MAX_KEY_NUL_TERMINATED_LENGTH];
61 let mut i = 0;
62 while i < src.len() {
63 dst[i] = src[i];
64 i += 1;
65 }
66 Self(dst)
67 }
68
69 pub const fn from_str(s: &str) -> Self {
76 let bytes = s.as_bytes();
77 Self::from_slice(bytes)
78 }
79
80 pub const fn as_bytes(&self) -> &[u8; MAX_KEY_NUL_TERMINATED_LENGTH] {
82 &self.0
83 }
84
85 pub fn as_str(&self) -> &str {
87 let len = self.0.iter().position(|&b| b == 0).unwrap_or(self.0.len());
88 unsafe { core::str::from_utf8_unchecked(&self.0[..len]) }
90 }
91}
92
93impl fmt::Debug for Key {
94 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
95 write!(f, "Key(b\"")?;
97
98 for &byte in &self.0[..self.0.len() - 1] {
102 if byte == 0 {
104 write!(f, "\\0")?;
105 continue;
106 }
107
108 write!(f, "{}", core::ascii::escape_default(byte))?;
109 }
110
111 write!(f, "\")")
112 }
113}
114
115#[cfg(feature = "defmt")]
116impl defmt::Format for Key {
117 fn format(&self, f: defmt::Formatter) {
118 defmt::write!(f, "Key(b\"");
120
121 for &byte in &self.0[..self.0.len() - 1] {
126 match byte {
127 b'\t' => defmt::write!(f, "\\t"),
128 b'\n' => defmt::write!(f, "\\n"),
129 b'\r' => defmt::write!(f, "\\r"),
130 b'\\' => defmt::write!(f, "\\\\"),
131 b'"' => defmt::write!(f, "\\\""),
132 0x20..=0x7e => defmt::write!(f, "{}", byte as char),
133 _ => defmt::write!(f, "\\x{:02x}", byte),
134 }
135 }
136
137 defmt::write!(f, "\")");
138 }
139}
140
141impl AsRef<[u8]> for Key {
142 fn as_ref(&self) -> &[u8] {
143 self.as_bytes()
144 }
145}
146
147pub use get::Get;
148pub use set::Set;
149
150extern crate alloc;
151
152use alloc::collections::{
153 BTreeMap,
154 BinaryHeap,
155};
156use alloc::vec::Vec;
157use core::fmt;
158
159use crate::error::Error;
160use crate::internal::{
161 ChunkIndex,
162 IterPageItems,
163 ThinPage,
164 VersionOffset,
165};
166use crate::platform::Platform;
167use crate::raw::Item;
168
169#[derive(Debug, Clone, PartialEq)]
170pub struct NvsStatistics {
171 pub pages: PageStatistics,
172 pub entries_per_page: Vec<EntryStatistics>,
173 pub entries_overall: EntryStatistics,
174}
175
176#[derive(Debug, Clone, PartialEq)]
177pub struct PageStatistics {
178 pub empty: u16,
179 pub active: u16,
180 pub full: u16,
181 pub erasing: u16,
182 pub corrupted: u16,
183}
184
185#[derive(Debug, Clone, PartialEq)]
186pub struct EntryStatistics {
187 pub empty: u32,
188 pub written: u32,
189 pub erased: u32,
190 pub illegal: u32,
191}
192
193pub struct Nvs<T: Platform> {
196 pub(crate) hal: T,
197 pub(crate) base_address: usize,
198 pub(crate) sectors: u16,
199 pub(crate) faulted: bool,
200
201 pub(crate) namespaces: BTreeMap<Key, u8>,
203 pub(crate) free_pages: BinaryHeap<ThinPage>,
204 pub(crate) pages: Vec<ThinPage>,
205}
206
207impl<T: Platform> Nvs<T> {
208 pub fn new(partition_offset: usize, partition_size: usize, hal: T) -> Result<Nvs<T>, Error> {
217 if !partition_offset.is_multiple_of(FLASH_SECTOR_SIZE) {
218 return Err(Error::InvalidPartitionOffset);
219 }
220
221 if !partition_size.is_multiple_of(FLASH_SECTOR_SIZE) {
222 return Err(Error::InvalidPartitionSize);
223 }
224
225 let sectors = partition_size / FLASH_SECTOR_SIZE;
226 if sectors > u16::MAX as usize {
227 return Err(Error::InvalidPartitionSize);
228 }
229
230 let mut nvs: Nvs<T> = Self {
231 hal,
232 base_address: partition_offset,
233 sectors: sectors as u16,
234 namespaces: BTreeMap::new(),
235 free_pages: Default::default(),
236 pages: Default::default(),
237 faulted: false,
238 };
239
240 match nvs.load_sectors() {
241 Ok(()) => Ok(nvs),
242 Err(Error::FlashError) => {
243 nvs.faulted = true;
244 Err(Error::FlashError)
245 }
246 Err(e) => Err(e),
247 }
248 }
249
250 pub fn get<R>(&mut self, namespace: &Key, key: &Key) -> Result<R, Error>
256 where
257 Nvs<T>: Get<R>,
258 {
259 match Get::get(self, namespace, key) {
260 Ok(val) => Ok(val),
261 Err(Error::FlashError) => {
262 self.faulted = true;
263 Err(Error::FlashError)
264 }
265 Err(e) => Err(e),
266 }
267 }
268
269 pub fn set<R>(&mut self, namespace: &Key, key: &Key, value: R) -> Result<(), Error>
277 where
278 Nvs<T>: Set<R>,
279 {
280 if self.faulted {
281 return Err(Error::FlashError);
282 }
283
284 match Set::set(self, namespace, key, value) {
285 Ok(()) => Ok(()),
286 Err(Error::FlashError) => {
287 self.faulted = true;
288 Err(Error::FlashError)
289 }
290 Err(e) => Err(e),
291 }
292 }
293
294 pub fn namespaces(&self) -> impl Iterator<Item = &Key> {
296 self.namespaces.keys()
297 }
298
299 pub fn keys(&mut self) -> impl Iterator<Item = Result<(Key, Key), Error>> {
305 IterKeys::new(&self.pages, &mut self.hal, &self.namespaces)
306 }
307
308 pub fn typed_entries(&mut self) -> impl Iterator<Item = Result<(Key, Key, ItemType), Error>> {
319 IterTypedEntries::new(&self.pages, &mut self.hal, &self.namespaces)
320 }
321
322 pub fn delete(&mut self, namespace: &Key, key: &Key) -> Result<(), Error> {
326 if self.faulted {
327 return Err(Error::FlashError);
328 }
329
330 if key.0[MAX_KEY_LENGTH] != b'\0' {
331 return Err(Error::KeyMalformed);
332 }
333 if namespace.0[MAX_KEY_LENGTH] != b'\0' {
334 return Err(Error::NamespaceMalformed);
335 }
336
337 let namespace_index = match self.namespaces.get(namespace) {
338 Some(&idx) => idx,
339 None => return Ok(()), };
341 let result = self.delete_key(namespace_index, key, ChunkIndex::Any);
342 match result {
343 Err(Error::KeyNotFound) => Ok(()),
344 Err(Error::FlashError) => {
345 self.faulted = true;
346 Err(Error::FlashError)
347 }
348 other => other,
349 }
350 }
351
352 pub fn into_inner(self) -> T {
357 self.hal
358 }
359
360 pub fn statistics(&mut self) -> Result<NvsStatistics, Error> {
362 if self.faulted {
363 return Err(Error::FlashError);
364 }
365
366 let mut page_stats = PageStatistics {
367 empty: 0,
368 active: 0,
369 full: 0,
370 erasing: 0,
371 corrupted: 0,
372 };
373
374 let mut all_pages: Vec<&ThinPage> = Vec::with_capacity(self.sectors as _);
375 all_pages.extend(self.pages.iter());
376 all_pages.extend(self.free_pages.iter());
377 all_pages.sort_by_key(|page| page.address);
379
380 let entries_per_page = all_pages
381 .into_iter()
382 .map(|page| {
383 match page.get_state() {
384 internal::ThinPageState::Active => page_stats.active += 1,
385 internal::ThinPageState::Full => page_stats.full += 1,
386 internal::ThinPageState::Freeing => page_stats.erasing += 1,
387 internal::ThinPageState::Corrupt => page_stats.corrupted += 1,
388 internal::ThinPageState::Invalid => page_stats.corrupted += 1,
389 internal::ThinPageState::Uninitialized => page_stats.empty += 1,
390 }
391
392 if *page.get_state() == internal::ThinPageState::Corrupt {
393 EntryStatistics {
394 empty: 0,
395 written: 0,
396 erased: 0,
397 illegal: ENTRIES_PER_PAGE as _,
398 }
399 } else {
400 let (empty, written, erased, illegal) = page.get_entry_statistics();
401 EntryStatistics {
402 empty,
403 written,
404 erased,
405 illegal,
406 }
407 }
408 })
409 .collect::<Vec<_>>();
410
411 let entries_overall = entries_per_page.iter().fold(
412 EntryStatistics {
413 empty: 0,
414 written: 0,
415 erased: 0,
416 illegal: 0,
417 },
418 |acc, x| EntryStatistics {
419 empty: acc.empty + x.empty,
420 written: acc.written + x.written,
421 erased: acc.erased + x.erased,
422 illegal: acc.illegal + x.illegal,
423 },
424 );
425
426 Ok(NvsStatistics {
427 pages: page_stats,
428 entries_per_page,
429 entries_overall,
430 })
431 }
432}
433
434struct IterLoadedItems<'a, T: Platform> {
435 pages: &'a [ThinPage],
436 current: Option<IterPageItems<'a, T>>,
437}
438
439impl<'a, T: Platform> IterLoadedItems<'a, T> {
440 fn new(mut pages: &'a [ThinPage], hal: &'a mut T) -> Self {
441 let first = pages.split_off_first();
442
443 Self {
444 pages,
445 current: first.map(|page| page.items(hal)),
446 }
447 }
448}
449
450impl<'a, T: Platform> Iterator for IterLoadedItems<'a, T> {
451 type Item = Result<Item, Error>;
452
453 fn next(&mut self) -> Option<Self::Item> {
454 let current = self.current.as_mut()?;
456
457 while current.is_empty() {
460 let next_page = self.pages.split_off_first()?;
461
462 current.switch_to_page(next_page);
463 }
464
465 current.next()
466 }
467}
468
469struct IterKeys<'a, T: Platform> {
470 items: IterLoadedItems<'a, T>,
471 namespaces: &'a BTreeMap<Key, u8>,
472}
473
474impl<'a, T: Platform> IterKeys<'a, T> {
475 fn new(pages: &'a [ThinPage], hal: &'a mut T, namespaces: &'a BTreeMap<Key, u8>) -> Self {
476 Self {
477 items: IterLoadedItems::new(pages, hal),
478 namespaces,
479 }
480 }
481
482 fn item_to_keys(&self, item: Item) -> (Key, Key) {
483 let (namespace_key, _) = self
484 .namespaces
485 .iter()
486 .find(|(_, idx)| **idx == item.namespace_index)
487 .unwrap();
489
490 (*namespace_key, item.key)
491 }
492}
493
494impl<'a, T: Platform> Iterator for IterKeys<'a, T> {
495 type Item = Result<(Key, Key), Error>;
496
497 fn next(&mut self) -> Option<Self::Item> {
498 loop {
499 return match self.items.next()? {
500 Ok(item) => {
501 if item.namespace_index == 0 || item.type_ == ItemType::Blob || item.type_ == ItemType::BlobIndex {
504 continue;
505 }
506
507 if item.type_ == ItemType::BlobData
508 && item.chunk_index != VersionOffset::V0 as u8
509 && item.chunk_index != VersionOffset::V1 as u8
510 {
511 continue;
512 }
513
514 Some(Ok(self.item_to_keys(item)))
515 }
516 Err(err) => Some(Err(err)),
517 };
518 }
519 }
520}
521
522struct IterTypedEntries<'a, T: Platform> {
523 items: IterLoadedItems<'a, T>,
524 namespaces: &'a BTreeMap<Key, u8>,
525}
526
527impl<'a, T: Platform> IterTypedEntries<'a, T> {
528 fn new(pages: &'a [ThinPage], hal: &'a mut T, namespaces: &'a BTreeMap<Key, u8>) -> Self {
529 Self {
530 items: IterLoadedItems::new(pages, hal),
531 namespaces,
532 }
533 }
534
535 fn item_to_entry(&self, item: Item) -> (Key, Key, ItemType) {
536 let (namespace_key, _) = self
537 .namespaces
538 .iter()
539 .find(|(_, idx)| **idx == item.namespace_index)
540 .unwrap();
541
542 (*namespace_key, item.key, item.type_)
543 }
544}
545
546impl<'a, T: Platform> Iterator for IterTypedEntries<'a, T> {
547 type Item = Result<(Key, Key, ItemType), Error>;
548
549 fn next(&mut self) -> Option<Self::Item> {
550 loop {
551 return match self.items.next()? {
552 Ok(item) => {
553 if item.namespace_index == 0 {
555 continue;
556 }
557
558 if item.type_ == ItemType::BlobData {
560 continue;
561 }
562
563 Some(Ok(self.item_to_entry(item)))
565 }
566 Err(err) => Some(Err(err)),
567 };
568 }
569 }
570}