use crate::Key;
use crate::error::Error;
#[cfg(feature = "debug-logs")]
use crate::raw::slice_with_nullbytes_to_str;
use crate::raw::{
ENTRIES_PER_PAGE, ENTRY_STATE_BITMAP_SIZE, EntryMapState, FLASH_SECTOR_SIZE, Item, ItemData,
ItemDataBlobIndex, ItemType, MAX_BLOB_DATA_PER_PAGE, MAX_BLOB_SIZE, PageHeader, PageHeaderRaw,
PageState, RawItem, RawPage, write_aligned,
};
use crate::u24::u24;
use crate::{Nvs, raw};
use alloc::string::{String, ToString};
use alloc::vec;
use alloc::vec::Vec;
use core::cmp;
use core::cmp::Ordering;
#[cfg(feature = "debug-logs")]
use core::fmt::{Debug, Formatter};
use core::mem::size_of;
use core::ops::Range;
use crate::error::Error::{ItemTypeMismatch, KeyNotFound, PageFull};
use crate::platform::{AlignedOps, Platform};
use alloc::collections::BTreeMap;
use core::mem;
use core::mem::offset_of;
use core::ops::Not;
#[cfg(feature = "defmt")]
use defmt::trace;
pub(crate) const MAX_KEY_LENGTH: usize = 15;
type BlobIndexKey = (NamespaceIndex, VersionOffset, Key);
type BlobIndexValue = (Option<BlobIndexEntryBlobIndexData>, BlobObservedData);
type BlobIndex = BTreeMap<BlobIndexKey, BlobIndexValue>;
pub(crate) struct ItemIndex(pub(crate) u8);
struct PageSequence(u32);
#[derive(Ord, PartialOrd, Eq, PartialEq, Copy, Clone)]
#[cfg_attr(feature = "debug-logs", derive(Debug))]
struct NamespaceIndex(u8);
impl From<u8> for ItemIndex {
fn from(value: u8) -> Self {
Self(value)
}
}
impl From<ItemIndex> for u8 {
fn from(val: ItemIndex) -> Self {
val.0
}
}
pub(crate) struct PageIndex(pub(crate) usize);
impl From<usize> for PageIndex {
fn from(value: usize) -> Self {
Self(value)
}
}
impl From<PageIndex> for usize {
fn from(val: PageIndex) -> Self {
val.0
}
}
#[derive(Clone)]
#[cfg_attr(feature = "debug-logs", derive(Debug))]
pub(crate) enum ChunkIndex {
Any,
BlobIndex,
BlobData(u8),
}
#[derive(PartialEq, Ord, PartialOrd, Eq, Clone)]
#[cfg_attr(feature = "debug-logs", derive(Debug))]
pub(crate) enum VersionOffset {
V0 = 0x00,
V1 = 0x80,
}
impl VersionOffset {
fn invert(&self) -> VersionOffset {
if *self == VersionOffset::V0 {
VersionOffset::V1
} else {
VersionOffset::V0
}
}
}
impl From<u8> for VersionOffset {
fn from(value: u8) -> Self {
if value < VersionOffset::V1 as u8 {
VersionOffset::V0
} else {
VersionOffset::V1
}
}
}
#[cfg_attr(feature = "debug-logs", derive(Debug))]
struct ChunkData {
page_sequence: u32,
chunk_count: u8,
data_size: u32,
}
#[cfg_attr(feature = "debug-logs", derive(Debug))]
struct BlobObservedData {
chunks_by_page: Vec<ChunkData>,
}
#[cfg_attr(feature = "debug-logs", derive(Debug))]
struct BlobIndexEntryBlobIndexData {
item_index: u8,
page_sequence: u32,
size: u32,
chunk_count: u8,
}
pub(crate) struct ThinPage {
pub(crate) address: usize,
header: ThinPageHeader,
entry_state_bitmap: [u8; ENTRY_STATE_BITMAP_SIZE],
item_hash_list: Vec<ItemHashListEntry>,
erased_entry_count: u8,
used_entry_count: u8,
}
impl ThinPage {
pub(crate) fn uninitialized(address: usize) -> Self {
Self {
address,
header: ThinPageHeader::uninitialzed(),
entry_state_bitmap: [0xFF; 32],
item_hash_list: vec![],
erased_entry_count: 0,
used_entry_count: 0,
}
}
pub(crate) fn initialize<T: Platform>(
&mut self,
hal: &mut T,
next_sequence: u32,
) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("initialize: @{:#08x}", self.address);
#[cfg(feature = "debug-logs")]
println!(" ThinPage: initialize");
let mut raw_header = PageHeader {
state: PageState::Active as u32,
sequence: next_sequence,
version: 0xFE,
_unused: [0xFF; 19],
crc: 0,
};
let crc = raw_header.calculate_crc32(T::crc32);
raw_header.crc = crc;
let raw_header = PageHeaderRaw {
page_header: raw_header,
};
write_aligned::<T>(hal, self.address as u32, unsafe { &raw_header.raw })
.map_err(|_| Error::FlashError)?;
self.header.state = ThinPageState::Active;
self.header.version = 0xFE;
self.header.sequence = next_sequence;
self.header.crc = crc;
Ok(())
}
pub(crate) fn mark_as_full<T: Platform>(&mut self, hal: &mut T) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("mark_as_full: @{:#08x}", self.address);
let raw = (PageState::Full as u32).to_le_bytes();
write_aligned(hal, self.address as u32, &raw).map_err(|_| Error::FlashError)?;
self.header.state = ThinPageState::Full;
Ok(())
}
pub(crate) fn load_item<T: Platform>(
&self,
hal: &mut T,
item_index: u8,
) -> Result<Item, Error> {
#[cfg(feature = "defmt")]
trace!("load_item: @{:#08x}[{}]", self.address, item_index);
let mut buf = [0u8; size_of::<Item>()];
hal.read(
(self.address + offset_of!(RawPage, items) + size_of::<Item>() * item_index as usize)
as _,
&mut buf,
)
.map_err(|_| Error::FlashError)?;
if buf.iter().all(|&it| it == 0xFF) {
return Err(KeyNotFound);
}
let item = unsafe { mem::transmute::<[u8; 32], Item>(buf) };
if item.crc != item.calculate_crc32(T::crc32) {
return Err(KeyNotFound);
}
Ok(item)
}
}
impl ThinPage {
#[allow(clippy::too_many_arguments)]
pub(crate) fn write_item<T: Platform>(
&mut self,
hal: &mut T,
namespace_index: u8,
key: Key,
type_: ItemType,
chunk_index: Option<u8>,
span: u8,
item_data: ItemData,
) -> Result<(), Error> {
let mut item = Item {
namespace_index,
type_,
span,
chunk_index: chunk_index.unwrap_or(u8::MAX),
crc: 0,
key,
data: item_data,
};
item.crc = item.calculate_crc32(T::crc32);
let item_index = self.get_next_free_entry();
let target_addr =
self.address + offset_of!(RawPage, items) + size_of::<Item>() * item_index;
#[cfg(feature = "defmt")]
trace!("load_item: @{:#08x}[{}]", self.address, item_index);
#[cfg(feature = "debug-logs")]
println!(" internal: write_item: target_addr: 0x{target_addr:0>8x}");
let raw_item = RawItem { item };
write_aligned(hal, target_addr as _, unsafe { &raw_item.raw })
.map_err(|_| Error::FlashError)?;
self.set_entry_state(hal, item_index, EntryMapState::Written)?;
self.used_entry_count += span;
if namespace_index != 0 {
self.item_hash_list.push(ItemHashListEntry {
hash: item.calculate_hash(T::crc32),
index: item_index as u8,
});
}
if self.get_next_free_entry() == ENTRIES_PER_PAGE {
self.mark_as_full::<T>(hal)?;
}
Ok(())
}
pub(crate) fn write_namespace<T: Platform>(
&mut self,
hal: &mut T,
key: Key,
value: u8,
) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("write_namespace: @{:#08x}", self.address);
let mut buf = [u8::MAX; 8];
buf[..1].copy_from_slice(&value.to_le_bytes());
self.write_item::<T>(hal, 0, key, ItemType::U8, None, 1, ItemData { raw: buf })
}
pub(crate) fn write_variable_sized_item<T: Platform>(
&mut self,
hal: &mut T,
namespace_index: u8,
key: Key,
type_: ItemType,
chunk_index: Option<u8>,
data: &[u8],
) -> Result<(), Error> {
#[cfg(feature = "debug-logs")]
println!("internal: write_variable_sized_item");
let data_entries = if data.len().is_multiple_of(size_of::<Item>()) {
data.len() / size_of::<Item>()
} else {
data.len() / size_of::<Item>() + 1
};
let span = data_entries + 1;
if span > ENTRIES_PER_PAGE {
return Err(Error::ValueTooLong);
}
if span > self.get_free_entry_count() {
return Err(PageFull);
}
let start_index = self.get_next_free_entry();
let item_data = ItemData {
sized: raw::ItemDataSized::new(data.len() as _, T::crc32(u32::MAX, data)),
};
let mut item = Item {
namespace_index,
type_,
span: span as u8,
chunk_index: chunk_index.unwrap_or(u8::MAX),
crc: 0,
key,
data: item_data,
};
item.crc = item.calculate_crc32(T::crc32);
#[cfg(feature = "defmt")]
trace!(
"write_variable_sized_item: @{:#08x}[{}-{}]",
self.address,
start_index,
start_index + span - 1
);
let header_addr =
self.address + offset_of!(RawPage, items) + size_of::<Item>() * start_index;
let raw_item = RawItem { item };
write_aligned(hal, header_addr as _, unsafe { &raw_item.raw })
.map_err(|_| Error::FlashError)?;
let data_addr = header_addr + size_of::<Item>();
write_aligned(hal, data_addr as _, data).map_err(|_| Error::FlashError)?;
self.set_entry_state_range(
hal,
start_index as u8..(start_index + span) as u8,
EntryMapState::Written,
)?;
self.item_hash_list.push(ItemHashListEntry {
hash: item.calculate_hash(T::crc32),
index: start_index as u8,
});
self.used_entry_count += span as u8;
if start_index + span == ENTRIES_PER_PAGE {
self.mark_as_full::<T>(hal)?;
}
Ok(())
}
fn load_referenced_data<T: Platform>(
&self,
hal: &mut T,
item_index: u8,
item: &Item,
) -> Result<Vec<u8>, Error> {
#[cfg(feature = "defmt")]
trace!(
"load_referenced_data: @{:#08x}[{}-{}]",
self.address,
item_index + 1,
item_index + item.span
);
#[cfg(feature = "debug-logs")]
println!("internal: load_item_data");
match item.type_ {
ItemType::Sized | ItemType::BlobData => {}
_ => return Err(ItemTypeMismatch(item.type_)),
}
let size = unsafe { item.data.sized.size } as usize;
let aligned_size = T::align_read(size);
let mut buf = Vec::with_capacity(aligned_size);
unsafe {
Vec::set_len(&mut buf, aligned_size);
}
hal.read(
(self.address
+ offset_of!(RawPage, items)
+ size_of::<Item>() * (item_index as usize + 1)) as _,
&mut buf,
)
.map_err(|_| Error::FlashError)?;
unsafe {
Vec::set_len(&mut buf, size);
}
Ok(buf)
}
fn set_entry_state<T: Platform>(
&mut self,
hal: &mut T,
item_index: usize,
state: EntryMapState,
) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!(
"set_entry_state: @{:#08x}[{}]: {}",
self.address, item_index, state
);
#[cfg(feature = "debug-logs")]
println!("internal: set_entry_state");
self.set_entry_state_range(hal, (item_index as u8)..(item_index as u8 + 1), state)
}
fn get_entry_state(&self, item_index: u8) -> EntryMapState {
let idx = item_index / 4;
let byte = self.entry_state_bitmap[idx as usize];
let two_bits = (byte >> ((item_index % 4) * 2)) & 0b11;
let state = EntryMapState::from_repr(two_bits).unwrap();
#[cfg(feature = "defmt")]
trace!(
"get_entry_state: @{:#08x}[{}]: {}",
self.address, item_index, state
);
#[cfg(feature = "debug-logs")]
println!(
"internal: get_item_state @{:#08x}[{item_index}]: {state:?}",
self.address,
);
state
}
fn set_entry_state_range<T: Platform>(
&mut self,
hal: &mut T,
indices: Range<u8>,
state: EntryMapState,
) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!(
"set_entry_state_range: @{:#08x}[{}-{}]: {}",
self.address, indices.start, indices.end, state
);
let raw_state = state as u8;
for item_index in indices.clone() {
let mask = 0b11u8 << ((item_index % 4) * 2);
let bits = raw_state << ((item_index % 4) * 2);
let masked_bits = bits | !mask;
let offset_in_map = item_index / 4;
self.entry_state_bitmap[offset_in_map as usize] &= masked_bits;
}
let start_byte = (indices.start / 4) as usize;
let end_byte = ((indices.end - 1) / 4) as usize;
let aligned_start_byte = T::align_write_floor(start_byte);
let aligned_end_byte = T::align_write_ceil(end_byte + 1);
let offset_in_raw_flash =
self.address + offset_of!(RawPage, entry_state_bitmap) + start_byte;
let aligned_offset_in_raw_flash = T::align_write_floor(offset_in_raw_flash) as _;
#[cfg(feature = "debug-logs")]
println!(
" internal: set_entry_state_range: {:>3}..<{:>3} [0x{offset_in_raw_flash:0>4x}]",
indices.start, indices.end
);
write_aligned(
hal,
aligned_offset_in_raw_flash,
&self.entry_state_bitmap[aligned_start_byte..aligned_end_byte],
)
.map_err(|_| Error::FlashError)
}
fn get_next_free_entry(&self) -> usize {
self.used_entry_count as usize + self.erased_entry_count as usize
}
fn get_free_entry_count(&self) -> usize {
ENTRIES_PER_PAGE - self.get_next_free_entry()
}
fn is_full(&self) -> bool {
self.get_next_free_entry() == ENTRIES_PER_PAGE
}
pub(crate) fn get_state(&self) -> &ThinPageState {
&self.header.state
}
pub(crate) fn get_entry_statistics(&self) -> (u32, u32, u32, u32) {
let mut empty = 0u32;
let mut written = 0u32;
let mut erased = 0u32;
let mut illegal = 0u32;
for i in 0..ENTRIES_PER_PAGE as u8 {
match self.get_entry_state(i) {
EntryMapState::Empty => empty += 1,
EntryMapState::Written => written += 1,
EntryMapState::Erased => erased += 1,
EntryMapState::Illegal => illegal += 1,
}
}
(empty, written, erased, illegal)
}
pub(crate) fn erase_item<T: Platform>(
&mut self,
hal: &mut T,
item_index: u8,
span: u8,
) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!(
"erase_item: @{:#08x}[{}-{}]",
self.address,
item_index,
item_index + span
);
#[cfg(feature = "debug-logs")]
println!("internal: erase_item");
self.set_entry_state_range(hal, item_index..(item_index + span), EntryMapState::Erased)?;
self.erased_entry_count += span;
self.used_entry_count -= span;
self.item_hash_list
.retain(|entry| entry.index != item_index);
Ok(())
}
}
impl PartialEq<Self> for ThinPage {
fn eq(&self, other: &Self) -> bool {
self.address == other.address
}
}
impl PartialOrd<Self> for ThinPage {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for ThinPage {}
impl Ord for ThinPage {
fn cmp(&self, other: &Self) -> Ordering {
match (&self.header.state, &other.header.state) {
(ThinPageState::Uninitialized, ThinPageState::Uninitialized) => {
other.address.cmp(&self.address)
}
(ThinPageState::Uninitialized, _) => Ordering::Greater,
(_, ThinPageState::Uninitialized) => Ordering::Less,
(_, _) => other.header.sequence.cmp(&self.header.sequence),
}
}
}
#[cfg(feature = "debug-logs")]
impl Debug for ThinPage {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
let address = self.address;
let header = &self.header;
f.write_fmt(format_args!(
"Page {{ address: 0x{address:0>8x} {header:?} "
))?;
match header.state {
ThinPageState::Full | ThinPageState::Active => (),
_ => {
return f.write_fmt(format_args!("}}"));
}
}
let erased_entry_count = self.erased_entry_count;
let used_entry_count = self.used_entry_count;
let entry_hash_list_len = self.item_hash_list.len();
f.write_fmt(format_args!("erased_entry_count: {erased_entry_count}, used_entry_count: {used_entry_count}, entry_hash_list_len: {entry_hash_list_len}}}"))
}
}
pub(crate) struct ThinPageHeader {
pub(crate) state: ThinPageState,
pub(crate) sequence: u32,
pub(crate) version: u8,
pub(crate) crc: u32,
}
impl ThinPageHeader {
fn uninitialzed() -> Self {
Self {
state: ThinPageState::Uninitialized,
sequence: 0,
version: 0,
crc: 0,
}
}
}
#[cfg(feature = "debug-logs")]
impl Debug for ThinPageHeader {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
let state = &self.state;
let sequence = self.sequence;
let version = self.version;
let crc = self.crc;
match state {
ThinPageState::Full | ThinPageState::Active => {
f.write_fmt(format_args!("PageHeader {{ state: {state:>13}, sequence: {sequence:>4}, version: 0x{version:0>2x}, crc: 0x{crc:0>4x}}}"))
}
_ => f.write_fmt(format_args!("PageHeader {{ state: {state:>13} }}"))
}
}
}
#[derive(strum::Display, PartialEq)]
pub(crate) enum ThinPageState {
Uninitialized,
Active,
Full,
Freeing,
Corrupt,
Invalid,
}
struct ItemHashListEntry {
pub(crate) hash: u24,
pub(crate) index: u8,
}
enum LoadPageResult {
Empty(ThinPage),
Used(ThinPage, Vec<Namespace>, BlobIndex),
}
struct Namespace {
name: Key,
index: u8,
}
impl<T> Nvs<'_, T>
where
T: Platform,
{
pub(crate) fn get_primitive(
&mut self,
namespace: &Key,
key: &Key,
type_: ItemType,
) -> Result<u64, Error> {
#[cfg(feature = "defmt")]
trace!("get_primitive");
#[cfg(feature = "debug-logs")]
println!("internal: get_primitive");
if key.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::KeyMalformed);
}
if namespace.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::NamespaceMalformed);
}
let namespace_index = *self
.namespaces
.get(namespace)
.ok_or(Error::NamespaceNotFound)?;
let (_, _, item) = self.load_item(namespace_index, ChunkIndex::Any, key)?;
if item.type_ != type_ {
return Err(ItemTypeMismatch(item.type_));
}
Ok(u64::from_le_bytes(unsafe { item.data.raw }))
}
pub(crate) fn get_string(&mut self, namespace: &Key, key: &Key) -> Result<String, Error> {
#[cfg(feature = "defmt")]
trace!("get_string");
#[cfg(feature = "debug-logs")]
println!("internal: get_string");
if key.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::KeyMalformed);
}
if namespace.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::NamespaceMalformed);
}
let namespace_index = *self
.namespaces
.get(namespace)
.ok_or(Error::NamespaceNotFound)?;
let (page_index, item_index, item) =
self.load_item(namespace_index, ChunkIndex::Any, key)?;
if item.type_ != ItemType::Sized {
return Err(ItemTypeMismatch(item.type_));
}
let page = &self.pages[page_index.0];
let data = page.load_referenced_data(self.hal, item_index.0, &item)?;
let crc = unsafe { item.data.sized.crc };
if crc != T::crc32(u32::MAX, &data) {
return Err(Error::KeyNotFound);
}
let str =
core::str::from_utf8(&data[..data.len() - 1]).map_err(|_| Error::CorruptedData)?; Ok(str.to_string())
}
pub(crate) fn get_blob(&mut self, namespace: &Key, key: &Key) -> Result<Vec<u8>, Error> {
#[cfg(feature = "defmt")]
trace!("get_blob");
#[cfg(feature = "debug-logs")]
println!("internal: get_blob");
if key.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::KeyMalformed);
}
if namespace.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::NamespaceMalformed);
}
let namespace_index = *self
.namespaces
.get(namespace)
.ok_or(Error::NamespaceNotFound)?;
let (_page_index, _item_index, item) =
self.load_item(namespace_index, ChunkIndex::Any, key)?;
if item.type_ != ItemType::BlobIndex {
return Err(ItemTypeMismatch(item.type_));
}
let size = unsafe { item.data.blob_index.size };
if size as usize > MAX_BLOB_SIZE {
return Err(Error::CorruptedData);
}
let chunk_count = unsafe { item.data.blob_index.chunk_count };
let chunk_start = unsafe { item.data.blob_index.chunk_start };
let mut buf = vec![0u8; size as usize];
let mut offset = 0usize;
for chunk in chunk_start..chunk_start + chunk_count {
if offset >= buf.len() {
return Err(Error::CorruptedData); }
let (page_index, item_index, item) =
self.load_item(namespace_index, ChunkIndex::BlobData(chunk), key)?;
if item.type_ != ItemType::BlobData {
return Err(ItemTypeMismatch(item.type_));
}
let page = &self.pages[page_index.0];
let data = page.load_referenced_data(self.hal, item_index.0, &item)?;
let data_crc = unsafe { item.data.sized.crc };
if data_crc != T::crc32(u32::MAX, &data) {
return Err(Error::CorruptedData);
}
let read_bytes = data.len().min(buf.len() - offset);
buf[offset..offset + read_bytes].copy_from_slice(&data[..read_bytes]);
offset += read_bytes;
}
Ok(buf)
}
pub(crate) fn delete_key(
&mut self,
namespace_index: u8,
key: &Key,
chunk_index: ChunkIndex,
) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("delete_key");
#[cfg(feature = "debug-logs")]
println!("internal: delete_key");
let (page_index, item_index, item) =
self.load_item(namespace_index, chunk_index.clone(), key)?;
let page = self.pages.get_mut(page_index.0).unwrap();
page.erase_item::<T>(self.hal, item_index.0, item.span)?;
if item.type_ == ItemType::BlobIndex {
self.delete_blob_data(item.namespace_index, key, unsafe {
VersionOffset::from(item.data.blob_index.chunk_start)
})?;
}
Ok(())
}
fn delete_blob_data(
&mut self,
namespace_index: u8,
key: &Key,
chunk_start: VersionOffset,
) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("delete_blob_data");
#[cfg(feature = "debug-logs")]
println!("internal: delete_blob_data");
let raw_chunk_start = chunk_start.clone() as u8;
for chunk in raw_chunk_start..(raw_chunk_start + (VersionOffset::V1 as u8 - 1)) {
match self.delete_key(namespace_index, key, ChunkIndex::BlobData(chunk)) {
Ok(_) => continue,
Err(Error::KeyNotFound) => {
#[cfg(feature = "debug-logs")]
println!("internal: delete_blob_data: chunk {} not found", chunk);
continue;
}
Err(e) => {
return Err(e);
}
}
}
Ok(())
}
fn blob_is_equal(
&mut self,
namespace_index: u8,
key: &Key,
blob_item: &Item,
data: &[u8],
) -> Result<bool, Error> {
#[cfg(feature = "defmt")]
trace!("blob_is_equal");
#[cfg(feature = "debug-logs")]
println!("internal: blob_is_equal");
let blob_index_data = unsafe { blob_item.data.blob_index };
if blob_index_data.size as usize != data.len() {
return Ok(false);
}
let mut to_be_compared = data;
let chunks = blob_index_data.chunk_count;
let chunk_start = blob_index_data.chunk_start;
for chunk_index in (chunk_start..chunk_start + chunks).rev() {
let (_page_index, item_index, item) =
self.load_item(namespace_index, ChunkIndex::BlobData(chunk_index), key)?;
if item.type_ != ItemType::BlobData {
return Ok(false);
}
let sized = unsafe { item.data.sized };
if sized.size as usize > to_be_compared.len() {
return Ok(false);
}
let page = &self.pages[_page_index.0];
let chunk_data = page.load_referenced_data(self.hal, item_index.0, &item)?;
if sized.crc != T::crc32(u32::MAX, &chunk_data) {
return Ok(false);
}
let offset = to_be_compared.len() - sized.size as usize;
let expected_chunk_data = &to_be_compared[offset..];
if chunk_data != expected_chunk_data {
return Ok(false);
}
to_be_compared = &to_be_compared[..offset];
}
Ok(true)
}
fn find_existing_blob_version(&mut self, namespace: &Key, key: &Key) -> Option<VersionOffset> {
#[cfg(feature = "defmt")]
trace!("find_existing_blob_version");
#[cfg(feature = "debug-logs")]
println!("internal: find_existing_blob_version");
let namespace_index = match self.namespaces.get(namespace) {
Some(&idx) => idx,
None => return None,
};
match self.load_item(namespace_index, ChunkIndex::Any, key) {
Ok((_page_index, _item_index, item)) => {
if item.type_ == ItemType::BlobIndex {
Some(VersionOffset::from(unsafe {
item.data.blob_index.chunk_start
}))
} else {
None
}
}
Err(_) => None,
}
}
pub(crate) fn set_primitive(
&mut self,
namespace: &Key,
key: Key,
type_: ItemType,
value: u64,
) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("set_primitive");
#[cfg(feature = "debug-logs")]
println!("internal: set_primitive");
if key.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::KeyMalformed);
}
if namespace.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::NamespaceMalformed);
}
let width = type_.get_primitive_bytes_width()?;
let mut raw_value = [0xFF; 8];
raw_value[..width].copy_from_slice(&value.to_le_bytes()[..width]);
let mut page = self.get_active_page()?;
let namespace_index = self.get_or_create_namespace(namespace, &mut page)?;
if page.is_full() {
page.mark_as_full(self.hal)?;
page = self.get_active_page()?;
}
self.pages.push(page);
let old_entry_location = if let Ok((page_index, item_index, item)) =
self.load_item(namespace_index, ChunkIndex::Any, &key)
{
if unsafe { item.data.raw } == raw_value {
#[cfg(feature = "debug-logs")]
println!("internal: set_primitive: entry already exists and matches");
return Ok(());
}
#[cfg(feature = "debug-logs")]
println!("internal: set_primitive: entry already exists and needs to be removed");
Some((page_index, item_index))
} else {
None
};
page = self.pages.pop().unwrap();
page.write_item::<T>(
self.hal,
namespace_index,
key,
type_,
None,
1,
ItemData { raw: raw_value },
)?;
self.pages.push(page);
if let Some((page_index, item_index)) = old_entry_location {
let old_page = self.pages.get_mut(page_index.0).unwrap();
old_page.erase_item(self.hal, item_index.0, 1)?;
}
Ok(())
}
pub(crate) fn set_str(&mut self, namespace: &Key, key: Key, value: &str) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("set_str");
#[cfg(feature = "debug-logs")]
println!("internal: set_str");
if key.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::KeyMalformed);
}
if namespace.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::NamespaceMalformed);
}
if value.len() + 1 > MAX_BLOB_DATA_PER_PAGE {
return Err(Error::ValueTooLong);
}
let mut buf = Vec::with_capacity(value.len() + 1);
buf.extend_from_slice(value.as_bytes());
buf.push(b'\0');
let old_entry_location = if let Some(&namespace_index) = self.namespaces.get(namespace) {
match self.load_item(namespace_index, ChunkIndex::Any, &key) {
Ok((page_index, item_index, item)) => {
if item.type_ != ItemType::Sized {
Some((page_index, item_index))
} else {
let page = &self.pages[page_index.0];
let data = page.load_referenced_data(self.hal, item_index.0, &item)?;
let crc = unsafe { item.data.sized.crc };
if crc == T::crc32(u32::MAX, &buf) && data == buf {
return Ok(());
}
Some((page_index, item_index))
}
}
Err(Error::FlashError) => return Err(Error::FlashError),
Err(_) => None,
}
} else {
None
};
let mut page = self.get_active_page()?;
let namespace_index = self.get_or_create_namespace(namespace, &mut page)?;
match page.write_variable_sized_item::<T>(
self.hal,
namespace_index,
key,
ItemType::Sized,
None,
&buf,
) {
Ok(_) => {}
Err(Error::PageFull) => {
page.mark_as_full::<T>(self.hal)?;
self.pages.push(page);
page = self.get_active_page()?;
page.write_variable_sized_item::<T>(
self.hal,
namespace_index,
key,
ItemType::Sized,
None,
&buf,
)?;
}
Err(e) => return Err(e),
}
self.pages.push(page);
if let Some((_page_index, _item_index)) = old_entry_location {
self.delete_key(namespace_index, &key, ChunkIndex::Any)?;
}
Ok(())
}
pub(crate) fn set_blob(&mut self, namespace: &Key, key: Key, data: &[u8]) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("set_blob");
#[cfg(feature = "debug-logs")]
println!("internal: set_blob");
if key.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::KeyMalformed);
}
if namespace.0[MAX_KEY_LENGTH] != b'\0' {
return Err(Error::NamespaceMalformed);
}
if data.len() + 1 > MAX_BLOB_SIZE {
return Err(Error::ValueTooLong);
}
let old_blob_version = self.find_existing_blob_version(namespace, &key);
let should_write = if let Some(&namespace_index) = self.namespaces.get(namespace) {
match self.load_item(namespace_index, ChunkIndex::Any, &key) {
Ok((_page_index, _item_index, item)) => {
if item.type_ != ItemType::BlobIndex {
true } else {
!self.blob_is_equal(namespace_index, &key, &item, data)?
}
}
Err(_) => true, }
} else {
true };
if !should_write {
return Ok(());
}
let mut page = self.get_active_page()?;
let namespace_index = self.get_or_create_namespace(namespace, &mut page)?;
self.pages.push(page);
let new_version_offset = match &old_blob_version {
Some(old_offset) => old_offset.invert(),
None => VersionOffset::V0,
};
let version_base = new_version_offset.clone() as u8;
let mut chunk_count = 0u8;
let mut offset = 0usize;
while offset < data.len() {
let mut page = self.get_active_page()?;
let free_entries = page.get_free_entry_count();
if free_entries <= 1 {
page.mark_as_full::<T>(self.hal)?;
self.pages.push(page);
continue;
}
let data_len = cmp::min((free_entries - 1) * size_of::<Item>(), data.len() - offset);
match page.write_variable_sized_item::<T>(
self.hal,
namespace_index,
key,
ItemType::BlobData,
Some(version_base + chunk_count),
&data[offset..offset + data_len],
) {
Ok(_) => {
offset += data_len;
chunk_count += 1;
self.pages.push(page);
}
Err(Error::PageFull) => {
page.mark_as_full::<T>(self.hal)?;
self.pages.push(page);
continue;
}
Err(e) => return Err(e),
}
}
let mut page = self.get_active_page()?;
let item_data = raw::ItemData {
blob_index: ItemDataBlobIndex {
size: data.len() as u32,
chunk_count,
chunk_start: version_base,
},
};
page.write_item::<T>(
self.hal,
namespace_index,
key,
ItemType::BlobIndex,
None,
1,
item_data,
)?;
self.pages.push(page);
if let Some(_old_version) = old_blob_version {
self.delete_key(namespace_index, &key, ChunkIndex::BlobIndex)?;
}
Ok(())
}
pub(crate) fn get_active_page(&mut self) -> Result<ThinPage, Error> {
#[cfg(feature = "defmt")]
trace!("get_active_page");
let page = self
.pages
.pop_if(|page| page.header.state == ThinPageState::Active);
if let Some(page) = page {
return Ok(page);
}
if self.free_pages.len() == 1 {
self.defragment()?;
}
let page = self
.pages
.pop_if(|page| page.header.state == ThinPageState::Active);
if let Some(page) = page {
return Ok(page);
}
if self.free_pages.len() == 1 {
return Err(Error::FlashFull);
}
let mut page = self.free_pages.pop().unwrap();
if page.header.state != ThinPageState::Uninitialized {
self.hal
.erase(
page.address as _,
(page.address + raw::FLASH_SECTOR_SIZE) as _,
)
.map_err(|_| Error::FlashError)?;
}
let next_sequence = self.get_next_sequence();
page.initialize(self.hal, next_sequence)?;
Ok(page)
}
fn get_next_sequence(&self) -> u32 {
match self.pages.iter().map(|page| page.header.sequence).max() {
Some(current) => current + 1,
None => 0,
}
}
fn get_or_create_namespace(
&mut self,
namespace: &Key,
page: &mut ThinPage,
) -> Result<u8, Error> {
#[cfg(feature = "defmt")]
trace!("get_or_create_namespace");
#[cfg(feature = "debug-logs")]
println!("internal: get_or_create_namespace");
let namespace_index = match self.namespaces.get(namespace) {
Some(ns_idx) => *ns_idx,
None => {
let namespace_index = self
.namespaces
.iter()
.max_by_key(|(_, idx)| **idx)
.map_or(1, |(_, idx)| idx + 1);
page.write_namespace(self.hal, *namespace, namespace_index)?;
self.namespaces.insert(*namespace, namespace_index);
namespace_index
}
};
Ok(namespace_index)
}
pub(crate) fn load_item(
&mut self,
namespace_index: u8,
chunk_index: ChunkIndex,
key: &Key,
) -> Result<(PageIndex, ItemIndex, Item), Error> {
#[cfg(feature = "defmt")]
trace!("load_item");
#[cfg(feature = "debug-logs")]
println!("internal: load_item {chunk_index:?}");
let item_chunk_index = match chunk_index {
ChunkIndex::Any => 0xFF,
ChunkIndex::BlobIndex => 0xFF,
ChunkIndex::BlobData(idx) => idx,
};
let hash = Item::calculate_hash_ref(T::crc32, namespace_index, key, item_chunk_index);
#[cfg(feature = "debug-logs")]
println!("looking for hash {hash:?}");
for (page_index, page) in self.pages.iter().enumerate() {
for cache_entry in &page.item_hash_list {
if cache_entry.hash == hash {
let item: Item = page.load_item(self.hal, cache_entry.index)?;
if item.namespace_index != namespace_index
|| item.key != *key
|| item.chunk_index != item_chunk_index
{
continue;
}
return Ok((page_index.into(), cache_entry.index.into(), item));
}
}
}
Err(KeyNotFound)
}
pub(crate) fn load_sectors(&mut self) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("load_sectors");
#[cfg(feature = "debug-logs")]
println!("internal: load_sectors");
let mut blob_index = BlobIndex::new();
let sectors = self.sectors as usize;
for sector_idx in 0..sectors {
let sector_addr = self.base_address + sector_idx * FLASH_SECTOR_SIZE;
match self.load_sector(sector_addr)? {
LoadPageResult::Empty(page) => self.free_pages.push(page),
LoadPageResult::Used(page, new_namespaces, new_blob_index) => {
self.pages.push(page);
new_namespaces.into_iter().for_each(|ns| {
self.namespaces.insert(ns.name, ns.index);
});
new_blob_index.into_iter().for_each(|(key, val)| {
match blob_index.get_mut(&key) {
Some(existing) => {
if let Some(index) = val.0 {
existing.0 = Some(index);
}
existing.1.chunks_by_page.extend(val.1.chunks_by_page);
}
None => {
blob_index.insert(key, val);
}
}
})
}
};
}
#[cfg(feature = "debug-logs")]
println!("internal: load_sectors: blob_index: {:?}", blob_index);
self.continue_free_page()?;
self.cleanup_duplicate_entries()?;
self.cleanup_dirty_blobs(blob_index)?;
Ok(())
}
fn cleanup_dirty_blobs(&mut self, mut blob_index: BlobIndex) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("cleanup_dirty_blobs");
while let Some(((namespace_index, chunk_start, key), (index, observed))) =
blob_index.pop_first()
{
if let Some(index) = index {
let (chunk_count, data_size) = observed.chunks_by_page.iter().fold(
(0u8, 0u32),
|(count, size), chunk_data| {
(count + chunk_data.chunk_count, size + chunk_data.data_size)
},
);
if index.chunk_count != chunk_count || index.size != data_size {
#[cfg(feature = "debug-logs")]
println!(
"internal: load_sectors: blob index data doesn't match observed data {index:?} (expected: chunk_count={}, data_size={}, got: chunk_count={}, data_size={})",
index.chunk_count, index.size, chunk_count, data_size
);
self.delete_key(namespace_index.0, &key, ChunkIndex::BlobIndex)?;
self.delete_blob_data(namespace_index.0, &key, chunk_start)?;
continue;
} else if let Some(other) =
blob_index.get(&(namespace_index, chunk_start.invert(), key))
&& let Some(other_index) = &other.0
{
let other_is_newer = other_index.page_sequence > index.page_sequence
|| (index.page_sequence == other_index.page_sequence
&& other_index.item_index > index.item_index);
if other_is_newer {
#[cfg(feature = "debug-logs")]
println!(
"internal: load_sectors: found two blob indices for the same key, deleting the older current one (seq: {} vs {})",
index.page_sequence, other_index.page_sequence
);
self.delete_key(namespace_index.0, &key, ChunkIndex::BlobIndex)?;
} else {
#[cfg(feature = "debug-logs")]
println!(
"internal: load_sectors: found two blob indices for the same key, deleting the older other one (seq: {} vs {})",
other_index.page_sequence, index.page_sequence
);
self.delete_key(namespace_index.0, &key, ChunkIndex::BlobIndex)?;
}
}
} else {
#[cfg(feature = "debug-logs")]
println!(
"internal: load_sectors: found orphaned blob data. key: '{}', chunk_start: {}",
slice_with_nullbytes_to_str(&key.0),
chunk_start.clone() as u8
);
self.delete_blob_data(namespace_index.0, &key, chunk_start)?;
}
}
Ok(())
}
fn continue_free_page(&mut self) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("continue_free_page");
let source_page = match self
.pages
.iter()
.position(|it| it.header.state == ThinPageState::Freeing)
{
None => return Ok(()),
Some(idx) => self.pages.swap_remove(idx),
};
let target_page = match self
.pages
.iter()
.position(|it| it.header.state == ThinPageState::Active)
{
Some(idx) => self.pages.swap_remove(idx),
None => {
let mut page = self.free_pages.pop().ok_or(Error::FlashFull)?;
if page.header.state != ThinPageState::Uninitialized {
self.erase_page(page)?;
self.free_pages.pop().unwrap() } else {
page.initialize(self.hal, self.get_next_sequence())?;
page
}
}
};
self.copy_items(&source_page, target_page)?;
self.erase_page(source_page)?;
Ok(())
}
fn cleanup_duplicate_entries(&mut self) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("cleanup_duplicate_entries");
#[cfg(feature = "debug-logs")]
println!("internal: cleanup_duplicate_entries");
let mut hash_to_item: BTreeMap<u24, Vec<(PageIndex, ItemIndex, PageSequence)>> =
BTreeMap::new();
for (page_idx, page) in self.pages.iter().enumerate() {
for hash_entry in &page.item_hash_list {
hash_to_item.entry(hash_entry.hash).or_default().push((
PageIndex(page_idx),
ItemIndex(hash_entry.index),
PageSequence(page.header.sequence),
));
}
}
for (_hash, entries) in hash_to_item {
if entries.len() <= 1 {
continue; }
let mut items: Vec<_> = Vec::with_capacity(entries.len());
for (page_idx, item_index, page_seq) in entries {
let page = &self.pages[page_idx.0];
let item = page.load_item(self.hal, item_index.0)?;
if item.namespace_index == 0
|| item.type_ == ItemType::BlobIndex
|| item.type_ == ItemType::BlobData
{
continue;
}
items.push((
(NamespaceIndex(item.namespace_index), item.key),
(page_idx, item_index, page_seq, item.span),
));
}
let mut key_groups = BTreeMap::<_, Vec<_>>::new();
for (key, val) in items {
key_groups.entry(key).or_default().push(val);
}
for (_key, mut group) in key_groups {
if group.len() <= 1 {
continue;
}
group.sort_by_key(|(_, ItemIndex(idx), PageSequence(seq), _)| (*seq, *idx));
let keep_count = group.len() - 1;
for (PageIndex(page_index), ItemIndex(item_index), _, span) in
group.into_iter().take(keep_count)
{
let page = self.pages.get_mut(page_index).unwrap();
page.erase_item::<T>(self.hal, item_index, span)?;
}
}
}
Ok(())
}
fn defragment(&mut self) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("defragment");
#[cfg(feature = "debug-logs")]
println!("internal: defragment");
let next_sequence = self.get_next_sequence();
let next_page = self
.pages
.iter()
.enumerate()
.map(|(idx, page)| {
let points = if page.erased_entry_count == 0 {
0
} else {
page.erased_entry_count as u32 * 10 + (next_sequence - page.header.sequence)
};
(points, idx)
})
.max_by_key(|(points, _idx)| *points)
.map(|(_, idx)| idx)
.ok_or(Error::FlashFull)?;
let page = self.pages.swap_remove(next_page);
match page.header.state {
ThinPageState::Uninitialized => unreachable!(),
ThinPageState::Active => unreachable!(),
ThinPageState::Full => {
if page.erased_entry_count != ENTRIES_PER_PAGE as _ {
self.free_page(&page, next_sequence)?;
}
self.erase_page(page)?;
}
ThinPageState::Freeing => unreachable!(), ThinPageState::Corrupt => {
self.erase_page(page)?;
}
ThinPageState::Invalid => {
self.erase_page(page)?;
}
}
Ok(())
}
fn erase_page(&mut self, page: ThinPage) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("erase_page");
#[cfg(feature = "debug-logs")]
println!("internal: erase_page");
self.hal
.erase(page.address as _, (page.address + FLASH_SECTOR_SIZE) as _)
.map_err(|_| Error::FlashError)?;
self.free_pages.push(ThinPage::uninitialized(page.address));
Ok(())
}
fn free_page(&mut self, source: &ThinPage, next_sequence: u32) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("free_page");
#[cfg(feature = "debug-logs")]
println!("internal: copy_entries_to_reserve_page");
let raw = (PageState::Freeing as u32).to_le_bytes();
write_aligned(self.hal, source.address as u32, &raw).map_err(|_| Error::FlashError)?;
let mut target = self.free_pages.pop().ok_or(Error::FlashFull)?;
if target.header.state != ThinPageState::Uninitialized {
self.hal
.erase(
target.address as _,
(target.address + FLASH_SECTOR_SIZE) as _,
)
.map_err(|_| Error::FlashError)?;
}
target.initialize(self.hal, next_sequence)?;
self.copy_items(source, target)?;
Ok(())
}
fn copy_items(&mut self, source: &ThinPage, mut target: ThinPage) -> Result<(), Error> {
#[cfg(feature = "defmt")]
trace!("copy_items");
let mut last_copied_entry = match target.item_hash_list.iter().max_by_key(|it| it.index) {
Some(hash_entry) => Some(target.load_item(self.hal, hash_entry.index)?),
None => None,
};
let mut item_index = 0u8;
while item_index < ENTRIES_PER_PAGE as u8 {
if source.get_entry_state(item_index) != EntryMapState::Written {
item_index += 1;
continue;
}
let item = source.load_item(self.hal, item_index)?;
if let Some(last) = last_copied_entry {
if item == last {
last_copied_entry = None;
} else {
}
item_index += item.span;
continue;
}
match item.type_ {
ItemType::U8
| ItemType::I8
| ItemType::U16
| ItemType::I16
| ItemType::U32
| ItemType::I32
| ItemType::U64
| ItemType::I64
| ItemType::BlobIndex => {
target.write_item::<T>(
self.hal,
item.namespace_index,
item.key,
item.type_,
if item.chunk_index == u8::MAX {
None
} else {
Some(item.chunk_index)
},
item.span,
item.data,
)?;
}
ItemType::Sized | ItemType::BlobData => {
let data = source.load_referenced_data(self.hal, item_index, &item)?;
target.write_variable_sized_item::<T>(
self.hal,
item.namespace_index,
item.key,
item.type_,
if item.chunk_index == u8::MAX {
None
} else {
Some(item.chunk_index)
},
&data,
)?;
}
ItemType::Blob => {
}
ItemType::Any => {
}
}
item_index += item.span;
}
self.pages.push(target);
Ok(())
}
fn load_sector(&mut self, sector_address: usize) -> Result<LoadPageResult, Error> {
#[cfg(feature = "defmt")]
trace!("load_sector: @{:#08x}", sector_address);
#[cfg(feature = "debug-logs")]
println!(" raw: load page: 0x{sector_address:04X}");
let mut buf = [0u8; FLASH_SECTOR_SIZE];
self.hal
.read(sector_address as _, &mut buf)
.map_err(|_| Error::FlashError)?;
if buf[..size_of::<PageHeader>()] == [0xFFu8; size_of::<PageHeader>()] {
return Ok(LoadPageResult::Empty(ThinPage::uninitialized(
sector_address,
)));
}
let raw_page: RawPage = unsafe { core::mem::transmute(buf) };
let mut page = ThinPage {
address: sector_address,
header: raw_page.header.into(),
entry_state_bitmap: raw_page.entry_state_bitmap,
erased_entry_count: 0,
used_entry_count: 0,
item_hash_list: vec![],
};
match page.header.state {
ThinPageState::Corrupt | ThinPageState::Invalid => {
return Ok(LoadPageResult::Empty(page));
}
ThinPageState::Uninitialized => {
if buf.iter().all(|it| *it == 0xFF).not() {
page.header.state = ThinPageState::Corrupt;
};
return Ok(LoadPageResult::Empty(page));
}
ThinPageState::Freeing => (),
ThinPageState::Active => (),
ThinPageState::Full => (),
}
if raw_page.header.crc != raw_page.header.calculate_crc32(T::crc32) {
page.header.state = ThinPageState::Corrupt;
return Ok(LoadPageResult::Empty(page));
};
let mut blob_index = BlobIndex::new();
let mut namespaces: Vec<Namespace> = vec![];
let items = &raw_page.items;
let mut item_iter = unsafe { items.entries.iter().zip(u8::MIN..u8::MAX) };
'item_iter: while let Some((item, item_index)) = item_iter.next() {
let state = page.get_entry_state(item_index);
match state {
EntryMapState::Illegal => {
page.erased_entry_count += 1;
continue 'item_iter;
}
EntryMapState::Erased => {
page.erased_entry_count += 1;
continue 'item_iter;
}
EntryMapState::Empty => {
let calculated_crc = item.calculate_crc32(T::crc32);
if item.crc == calculated_crc
&& item.type_ != ItemType::Any
&& item.span != u8::MAX
{
match item.type_ {
ItemType::U8
| ItemType::I8
| ItemType::U16
| ItemType::I16
| ItemType::U32
| ItemType::I32
| ItemType::U64
| ItemType::I64
| ItemType::BlobIndex => {
#[cfg(feature = "debug-logs")]
println!("encountered valid but empty scalar item at {item_index}");
page.set_entry_state(
self.hal,
item_index as _,
EntryMapState::Written,
)?;
page.used_entry_count += 1;
}
ItemType::Blob => {
page.used_entry_count += 1;
continue 'item_iter;
}
ItemType::Sized | ItemType::BlobData => {
#[cfg(feature = "debug-logs")]
println!(
"encountered valid but EMPTY variable sized item at {item_index}"
);
let data = page.load_referenced_data(self.hal, item_index, item)?;
let data_crc = T::crc32(u32::MAX, &data);
if data_crc != unsafe { item.data.sized.crc } {
page.set_entry_state_range(
self.hal,
item_index..item_index + item.span,
EntryMapState::Erased,
)?;
page.erased_entry_count += item.span;
continue 'item_iter;
}
page.set_entry_state_range(
self.hal,
item_index..item_index + item.span,
EntryMapState::Written,
)?;
page.used_entry_count += item.span;
}
ItemType::Any => {
continue 'item_iter;
}
}
} else {
continue 'item_iter;
}
}
EntryMapState::Written => {
let calculated_crc = item.calculate_crc32(T::crc32);
if item.crc != calculated_crc {
#[cfg(feature = "debug-logs")]
println!(
"CRC mismatch for item '{}', marking as erased",
slice_with_nullbytes_to_str(&item.key.0)
);
page.set_entry_state_range(
self.hal,
item_index..(item_index + item.span),
EntryMapState::Erased,
)?;
page.erased_entry_count += item.span;
continue 'item_iter;
}
page.used_entry_count += item.span;
}
}
#[cfg(feature = "debug-logs")]
println!("item: {:?}", item);
if item.namespace_index == 0 {
namespaces.push(Namespace {
name: item.key,
index: unsafe { item.data.raw[0] },
});
continue 'item_iter;
}
if item.type_ == ItemType::BlobIndex || item.type_ == ItemType::BlobData {
let chunk_start = if item.type_ == ItemType::BlobIndex {
unsafe { VersionOffset::from(item.data.blob_index.chunk_start) }
} else {
VersionOffset::from(item.chunk_index)
};
let key = (NamespaceIndex(item.namespace_index), chunk_start, item.key);
let existing = blob_index.get_mut(&key);
if let Some(existing) = existing {
if item.type_ == ItemType::BlobIndex {
existing.0 = Some(BlobIndexEntryBlobIndexData {
item_index,
page_sequence: page.header.sequence,
size: unsafe { item.data.blob_index.size },
chunk_count: unsafe { item.data.blob_index.chunk_count },
});
} else {
let chunk_size = unsafe { item.data.sized.size } as u32;
let page_seq = page.header.sequence;
if let Some(entry) = existing
.1
.chunks_by_page
.iter_mut()
.find(|chunk| chunk.page_sequence == page_seq)
{
entry.chunk_count += 1;
entry.data_size += chunk_size;
} else {
existing.1.chunks_by_page.push(ChunkData {
page_sequence: page_seq,
chunk_count: 1,
data_size: chunk_size,
});
}
}
} else if item.type_ == ItemType::BlobIndex {
blob_index.insert(
key,
(
Some(BlobIndexEntryBlobIndexData {
item_index,
page_sequence: page.header.sequence,
size: unsafe { item.data.blob_index.size },
chunk_count: unsafe { item.data.blob_index.chunk_count },
}),
BlobObservedData {
chunks_by_page: vec![],
},
),
);
} else {
blob_index.insert(
key,
(
None,
BlobObservedData {
chunks_by_page: vec![ChunkData {
page_sequence: page.header.sequence,
chunk_count: 1,
data_size: unsafe { item.data.sized.size } as u32,
}],
},
),
);
}
}
page.item_hash_list.push(ItemHashListEntry {
hash: item.calculate_hash(T::crc32),
index: item_index,
});
if item.span >= 2 {
item_iter.nth((item.span - 2) as usize);
}
}
#[cfg(feature = "debug-logs")]
println!("PGE {page:?}");
Ok(LoadPageResult::Used(page, namespaces, blob_index))
}
}