use ledger_secure_sdk_sys::nvm_write;
use AtomicStorageElem::{StorageA, StorageB};
pub struct StorageFullError;
pub trait SingleStorage<T> {
fn get_ref(&self) -> &T;
fn update(&mut self, value: &T);
}
#[repr(align(64))]
#[derive(Copy, Clone)]
pub struct AlignedStorage<T> {
value: T,
}
impl<T> AlignedStorage<T> {
pub const fn new(value: T) -> AlignedStorage<T> {
AlignedStorage { value }
}
}
impl<T> SingleStorage<T> for AlignedStorage<T> {
fn get_ref(&self) -> &T {
&self.value
}
fn update(&mut self, value: &T) {
unsafe {
nvm_write(
&self.value as *const T as *const core::ffi::c_void as *mut core::ffi::c_void,
value as *const T as *const core::ffi::c_void as *mut core::ffi::c_void,
core::mem::size_of::<T>() as u32,
);
let mut _dummy = &self.value;
}
}
}
const STORAGE_VALID: u8 = 0xa5;
pub struct SafeStorage<T> {
flag: AlignedStorage<u8>,
value: AlignedStorage<T>,
}
impl<T> SafeStorage<T> {
pub const fn new(value: T) -> SafeStorage<T> {
SafeStorage {
flag: AlignedStorage::new(STORAGE_VALID),
value: AlignedStorage::new(value),
}
}
pub fn invalidate(&mut self) {
self.flag.update(&0);
}
pub fn is_valid(&self) -> bool {
*self.flag.get_ref() == STORAGE_VALID
}
}
impl<T> SingleStorage<T> for SafeStorage<T> {
fn get_ref(&self) -> &T {
assert_eq!(*self.flag.get_ref(), STORAGE_VALID);
self.value.get_ref()
}
fn update(&mut self, value: &T) {
self.flag.update(&0);
self.value.update(value);
self.flag.update(&STORAGE_VALID);
}
}
macro_rules! atomic_storage {
($n:expr) => {
#[repr(align($n))]
pub struct AtomicStorage<T> {
storage_a: SafeStorage<T>,
storage_b: SafeStorage<T>, }
};
}
#[cfg(target_os = "nanox")]
atomic_storage!(256);
#[cfg(any(
target_os = "nanosplus",
target_os = "stax",
target_os = "flex",
target_os = "apex_p"
))]
atomic_storage!(512);
pub enum AtomicStorageElem {
StorageA,
StorageB,
}
impl<T> AtomicStorage<T>
where
T: Copy,
{
pub const fn new(value: &T) -> AtomicStorage<T> {
AtomicStorage {
storage_a: SafeStorage::new(*value),
storage_b: SafeStorage::new(*value),
}
}
fn which(&self) -> AtomicStorageElem {
if self.storage_a.is_valid() {
StorageA
} else if self.storage_b.is_valid() {
StorageB
} else {
panic!("invalidated atomic storage");
}
}
}
impl<T> SingleStorage<T> for AtomicStorage<T>
where
T: Copy,
{
fn get_ref(&self) -> &T {
match self.which() {
StorageA => self.storage_a.get_ref(),
StorageB => self.storage_b.get_ref(),
}
}
fn update(&mut self, value: &T) {
match self.which() {
StorageA => {
self.storage_b.update(value);
self.storage_a.invalidate();
}
StorageB => {
self.storage_a.update(value);
self.storage_b.invalidate();
}
}
}
}
pub struct KeyOutOfRange;
pub struct Collection<T, const N: usize> {
flags: AtomicStorage<[u8; N]>,
slots: [AlignedStorage<T>; N],
}
impl<T, const N: usize> Collection<T, N>
where
T: Copy,
{
pub const fn new(value: T) -> Collection<T, N> {
Collection {
flags: AtomicStorage::new(&[0; N]),
slots: [AlignedStorage::new(value); N],
}
}
fn find_free_slot(&self) -> Option<usize> {
self.flags
.get_ref()
.iter()
.position(|&e| e != STORAGE_VALID)
}
pub fn add(&mut self, value: &T) -> Result<(), StorageFullError> {
match self.find_free_slot() {
Some(i) => {
self.slots[i].update(value);
let mut new_flags = *self.flags.get_ref();
new_flags[i] = STORAGE_VALID;
self.flags.update(&new_flags);
Ok(())
}
None => Err(StorageFullError),
}
}
fn is_allocated(&self, key: usize) -> Result<bool, KeyOutOfRange> {
match self.flags.get_ref().get(key) {
Some(&byte) => {
if byte == STORAGE_VALID {
Ok(true)
} else {
Ok(false)
}
}
None => Err(KeyOutOfRange),
}
}
pub fn len(&self) -> usize {
self.count_allocated(N)
}
pub fn is_empty(&self) -> bool {
!self.flags.get_ref().iter().any(|v| *v == STORAGE_VALID)
}
pub const fn capacity(&self) -> usize {
N
}
pub fn remaining(&self) -> usize {
self.capacity() - self.len()
}
fn count_allocated(&self, len: usize) -> usize {
self.flags
.get_ref()
.iter()
.take(len)
.fold(0, |acc, &byte| acc + (byte == STORAGE_VALID) as u32) as usize
}
fn index_to_key(&self, index: usize) -> Option<usize> {
let mut key = index;
let mut allocated_count = self.count_allocated(index);
loop {
let is_allocated = self.is_allocated(key).ok()?;
if is_allocated {
if allocated_count == index {
return Some(key);
}
allocated_count += 1;
}
key += 1;
}
}
pub fn get(&self, index: usize) -> Option<&T> {
match self.index_to_key(index) {
Some(key) => Some(self.slots[key].get_ref()),
None => None,
}
}
pub fn remove(&mut self, index: usize) {
let key = self.index_to_key(index).unwrap();
let mut new_flags = *self.flags.get_ref();
new_flags[key] = 0;
self.flags.update(&new_flags);
}
pub fn clear(&mut self) {
self.flags.update(&[0; N]);
}
}
impl<'a, T, const N: usize> IntoIterator for &'a Collection<T, N>
where
T: Copy,
{
type Item = &'a T;
type IntoIter = CollectionIterator<'a, T, N>;
fn into_iter(self) -> CollectionIterator<'a, T, N> {
CollectionIterator {
container: self,
next_key: 0,
}
}
}
pub struct CollectionIterator<'a, T, const N: usize>
where
T: Copy,
{
container: &'a Collection<T, N>,
next_key: usize,
}
impl<'a, T, const N: usize> Iterator for CollectionIterator<'a, T, N>
where
T: Copy,
{
type Item = &'a T;
fn next(&mut self) -> core::option::Option<&'a T> {
loop {
let is_allocated = self.container.is_allocated(self.next_key).ok()?;
self.next_key += 1;
if is_allocated {
return Some(self.container.slots[self.next_key - 1].get_ref());
}
}
}
}