pub mod backend;
mod error;
pub mod migration;
mod models;
mod operations;
mod schema;
use std::cmp::Reverse;
use std::collections::{HashMap, HashSet};
use diesel::Connection as _;
use crate::error::{InternalError, InvalidStateError};
use crate::state::error::{StatePruneError, StateReadError, StateWriteError};
#[cfg(feature = "state-merkle-leaf-reader")]
use crate::state::merkle::{MerkleRadixLeafReadError, MerkleRadixLeafReader};
use crate::state::{Prune, Read, StateChange, Write};
use super::node::Node;
use backend::{Backend, Connection};
pub use error::SqlMerkleStateBuildError;
use operations::get_leaves::MerkleRadixGetLeavesOperation as _;
use operations::get_or_create_tree::MerkleRadixGetOrCreateTreeOperation as _;
use operations::get_path::MerkleRadixGetPathOperation as _;
use operations::get_tree_by_name::MerkleRadixGetTreeByNameOperation as _;
use operations::has_root::MerkleRadixHasRootOperation as _;
use operations::insert_nodes::MerkleRadixInsertNodesOperation as _;
use operations::list_leaves::MerkleRadixListLeavesOperation as _;
use operations::prune_entries::MerkleRadixPruneEntriesOperation as _;
use operations::update_change_log::MerkleRadixUpdateUpdateChangeLogOperation as _;
use operations::MerkleRadixOperations;
const TOKEN_SIZE: usize = 2;
#[derive(Default)]
pub struct SqlMerkleStateBuilder<B: Backend + Clone> {
backend: Option<B>,
tree_name: Option<String>,
create_tree: bool,
}
impl<B: Backend + Clone> SqlMerkleStateBuilder<B> {
pub fn new() -> Self {
Self {
backend: None,
tree_name: None,
create_tree: false,
}
}
pub fn with_backend(mut self, backend: B) -> Self {
self.backend = Some(backend);
self
}
pub fn with_tree<S: Into<String>>(mut self, tree_name: S) -> Self {
self.tree_name = Some(tree_name.into());
self
}
pub fn create_tree_if_necessary(mut self) -> Self {
self.create_tree = true;
self
}
}
#[cfg(feature = "sqlite")]
impl SqlMerkleStateBuilder<backend::SqliteBackend> {
pub fn build(self) -> Result<SqlMerkleState<backend::SqliteBackend>, SqlMerkleStateBuildError> {
let backend = self
.backend
.ok_or_else(|| InvalidStateError::with_message("must provide a backend".into()))?;
let tree_name = self
.tree_name
.ok_or_else(|| InvalidStateError::with_message("must provide a tree name".into()))?;
let conn = backend.connection()?;
let operations = MerkleRadixOperations::new(conn.as_inner());
let (initial_state_root_hash, _) = encode_and_hash(Node::default())?;
let tree_id: i64 = if self.create_tree {
operations.get_or_create_tree(&tree_name, &hex::encode(&initial_state_root_hash))?
} else {
operations.get_tree_id_by_name(&tree_name)?.ok_or_else(|| {
InvalidStateError::with_message("must provide the name of an existing tree".into())
})?
};
Ok(SqlMerkleState { backend, tree_id })
}
}
#[cfg(feature = "postgres")]
impl SqlMerkleStateBuilder<backend::PostgresBackend> {
pub fn build(
self,
) -> Result<SqlMerkleState<backend::PostgresBackend>, SqlMerkleStateBuildError> {
let backend = self
.backend
.ok_or_else(|| InvalidStateError::with_message("must provide a backend".into()))?;
let tree_name = self
.tree_name
.ok_or_else(|| InvalidStateError::with_message("must provide a tree name".into()))?;
let conn = backend.connection()?;
let operations = MerkleRadixOperations::new(conn.as_inner());
let (initial_state_root_hash, _) = encode_and_hash(Node::default())?;
let tree_id: i64 = if self.create_tree {
operations.get_or_create_tree(&tree_name, &hex::encode(&initial_state_root_hash))?
} else {
operations.get_tree_id_by_name(&tree_name)?.ok_or_else(|| {
InvalidStateError::with_message("must provide the name of an existing tree".into())
})?
};
Ok(SqlMerkleState { backend, tree_id })
}
}
#[derive(Clone)]
pub struct SqlMerkleState<B: Backend + Clone> {
backend: B,
tree_id: i64,
}
impl<B: Backend + Clone> SqlMerkleState<B> {
pub fn initial_state_root_hash(&self) -> Result<String, InternalError> {
let (hash, _) = encode_and_hash(Node::default())?;
Ok(hex::encode(&hash))
}
}
#[cfg(feature = "sqlite")]
impl Write for SqlMerkleState<backend::SqliteBackend> {
type StateId = String;
type Key = String;
type Value = Vec<u8>;
fn commit(
&self,
state_id: &Self::StateId,
state_changes: &[StateChange],
) -> Result<Self::StateId, StateWriteError> {
let overlay =
MerkleRadixOverlay::new(self.tree_id, &*state_id, SqlOverlay::new(&self.backend));
let (next_state_id, tree_update) = overlay
.generate_updates(state_changes)
.map_err(|e| StateWriteError::StorageError(Box::new(e)))?;
overlay
.write_updates(&next_state_id, tree_update)
.map_err(|e| StateWriteError::StorageError(Box::new(e)))?;
Ok(next_state_id)
}
fn compute_state_id(
&self,
state_id: &Self::StateId,
state_changes: &[StateChange],
) -> Result<Self::StateId, StateWriteError> {
let overlay =
MerkleRadixOverlay::new(self.tree_id, &*state_id, SqlOverlay::new(&self.backend));
let (next_state_id, _) = overlay
.generate_updates(state_changes)
.map_err(|e| StateWriteError::StorageError(Box::new(e)))?;
Ok(next_state_id)
}
}
#[cfg(feature = "sqlite")]
impl Read for SqlMerkleState<backend::SqliteBackend> {
type StateId = String;
type Key = String;
type Value = Vec<u8>;
fn get(
&self,
state_id: &Self::StateId,
keys: &[Self::Key],
) -> Result<HashMap<Self::Key, Self::Value>, StateReadError> {
let overlay =
MerkleRadixOverlay::new(self.tree_id, &*state_id, SqlOverlay::new(&self.backend));
if !overlay
.has_root()
.map_err(|e| StateReadError::StorageError(Box::new(e)))?
{
return Err(StateReadError::InvalidStateId(state_id.into()));
}
overlay
.get_entries(keys)
.map_err(|e| StateReadError::StorageError(Box::new(e)))
}
fn clone_box(
&self,
) -> Box<dyn Read<StateId = Self::StateId, Key = Self::Key, Value = Self::Value>> {
Box::new(self.clone())
}
}
#[cfg(feature = "sqlite")]
impl Prune for SqlMerkleState<backend::SqliteBackend> {
type StateId = String;
type Key = String;
type Value = Vec<u8>;
fn prune(&self, state_ids: Vec<Self::StateId>) -> Result<Vec<Self::Key>, StatePruneError> {
let overlay = MerkleRadixPruner::new(self.tree_id, SqlOverlay::new(&self.backend));
overlay
.prune(&state_ids)
.map_err(|e| StatePruneError::StorageError(Box::new(e)))
}
}
#[cfg(feature = "postgres")]
impl Write for SqlMerkleState<backend::PostgresBackend> {
type StateId = String;
type Key = String;
type Value = Vec<u8>;
fn commit(
&self,
state_id: &Self::StateId,
state_changes: &[StateChange],
) -> Result<Self::StateId, StateWriteError> {
let overlay =
MerkleRadixOverlay::new(self.tree_id, &*state_id, SqlOverlay::new(&self.backend));
let (next_state_id, tree_update) = overlay
.generate_updates(state_changes)
.map_err(|e| StateWriteError::StorageError(Box::new(e)))?;
overlay
.write_updates(&next_state_id, tree_update)
.map_err(|e| StateWriteError::StorageError(Box::new(e)))?;
Ok(next_state_id)
}
fn compute_state_id(
&self,
state_id: &Self::StateId,
state_changes: &[StateChange],
) -> Result<Self::StateId, StateWriteError> {
let overlay =
MerkleRadixOverlay::new(self.tree_id, &*state_id, SqlOverlay::new(&self.backend));
let (next_state_id, _) = overlay
.generate_updates(state_changes)
.map_err(|e| StateWriteError::StorageError(Box::new(e)))?;
Ok(next_state_id)
}
}
#[cfg(feature = "postgres")]
impl Prune for SqlMerkleState<backend::PostgresBackend> {
type StateId = String;
type Key = String;
type Value = Vec<u8>;
fn prune(&self, state_ids: Vec<Self::StateId>) -> Result<Vec<Self::Key>, StatePruneError> {
let overlay = MerkleRadixPruner::new(self.tree_id, SqlOverlay::new(&self.backend));
overlay
.prune(&state_ids)
.map_err(|e| StatePruneError::StorageError(Box::new(e)))
}
}
#[cfg(feature = "postgres")]
impl Read for SqlMerkleState<backend::PostgresBackend> {
type StateId = String;
type Key = String;
type Value = Vec<u8>;
fn get(
&self,
state_id: &Self::StateId,
keys: &[Self::Key],
) -> Result<HashMap<Self::Key, Self::Value>, StateReadError> {
let overlay =
MerkleRadixOverlay::new(self.tree_id, &*state_id, SqlOverlay::new(&self.backend));
if !overlay
.has_root()
.map_err(|e| StateReadError::StorageError(Box::new(e)))?
{
return Err(StateReadError::InvalidStateId(state_id.into()));
}
overlay
.get_entries(keys)
.map_err(|e| StateReadError::StorageError(Box::new(e)))
}
fn clone_box(
&self,
) -> Box<dyn Read<StateId = Self::StateId, Key = Self::Key, Value = Self::Value>> {
Box::new(self.clone())
}
}
#[cfg(feature = "state-merkle-leaf-reader")]
type IterResult<T> = Result<T, MerkleRadixLeafReadError>;
#[cfg(feature = "state-merkle-leaf-reader")]
type LeafIter<T> = Box<dyn Iterator<Item = IterResult<T>>>;
#[cfg(all(feature = "state-merkle-leaf-reader", feature = "sqlite"))]
impl MerkleRadixLeafReader for SqlMerkleState<backend::SqliteBackend> {
fn leaves(
&self,
state_id: &Self::StateId,
subtree: Option<&str>,
) -> IterResult<LeafIter<(Self::Key, Self::Value)>> {
let conn = self.backend.connection()?;
if &self.initial_state_root_hash()? == state_id {
return Ok(Box::new(std::iter::empty()));
}
let leaves = MerkleRadixOperations::new(conn.as_inner()).list_leaves(
self.tree_id,
state_id,
subtree,
)?;
Ok(Box::new(leaves.into_iter().map(Ok)))
}
}
#[cfg(all(feature = "state-merkle-leaf-reader", feature = "postgres"))]
impl MerkleRadixLeafReader for SqlMerkleState<backend::PostgresBackend> {
fn leaves(
&self,
state_id: &Self::StateId,
subtree: Option<&str>,
) -> IterResult<LeafIter<(Self::Key, Self::Value)>> {
let conn = self.backend.connection()?;
if &self.initial_state_root_hash()? == state_id {
return Ok(Box::new(std::iter::empty()));
}
let leaves = MerkleRadixOperations::new(conn.as_inner()).list_leaves(
self.tree_id,
state_id,
subtree,
)?;
Ok(Box::new(leaves.into_iter().map(Ok)))
}
}
struct MerkleRadixOverlay<'s, O> {
tree_id: i64,
state_root_hash: &'s str,
inner: O,
}
type NodeChanges = Vec<(String, Node, String)>;
#[derive(Default)]
struct TreeUpdate {
node_changes: NodeChanges,
deletions: HashSet<String>,
}
impl<'s, O> MerkleRadixOverlay<'s, O>
where
O: OverlayReader + OverlayWriter,
{
fn new(tree_id: i64, state_root_hash: &'s str, inner: O) -> Self {
Self {
tree_id,
state_root_hash,
inner,
}
}
fn write_updates(
&self,
new_state_root: &str,
tree_update: TreeUpdate,
) -> Result<(), InternalError> {
if tree_update.node_changes.is_empty() {
return Ok(());
}
self.inner.write_changes(
self.tree_id,
new_state_root,
self.state_root_hash,
tree_update,
)?;
Ok(())
}
fn has_root(&self) -> Result<bool, InternalError> {
self.inner.has_root(self.tree_id, self.state_root_hash)
}
fn get_entries(&self, keys: &[String]) -> Result<HashMap<String, Vec<u8>>, InternalError> {
let keys = keys.iter().map(|k| &**k).collect::<Vec<_>>();
self.inner
.get_entries(self.tree_id, self.state_root_hash, keys)
.map(|result| result.into_iter().collect::<HashMap<_, _>>())
}
fn generate_updates(
&self,
state_changes: &[StateChange],
) -> Result<(String, TreeUpdate), StateWriteError> {
if state_changes.is_empty() {
return Ok((self.state_root_hash.to_string(), TreeUpdate::default()));
}
let mut path_map = HashMap::new();
let mut deletions = HashSet::new();
let mut additions = HashSet::new();
let mut delete_items = vec![];
for state_change in state_changes {
match state_change {
StateChange::Set { key, value } => {
let mut set_path_map = self
.get_path(key)
.map_err(|e| StateWriteError::StorageError(Box::new(e)))?;
{
let node = set_path_map
.get_mut(key)
.expect("Path map not correctly generated");
node.value = Some(value.to_vec());
}
for pkey in set_path_map.keys() {
additions.insert(pkey.clone());
}
path_map.extend(set_path_map);
}
StateChange::Delete { key } => {
let del_path_map = self
.get_path(key)
.map_err(|e| StateWriteError::StorageError(Box::new(e)))?;
path_map.extend(del_path_map);
delete_items.push(key);
}
}
}
for del_address in delete_items.iter() {
path_map.remove(*del_address);
let (mut parent_address, mut path_branch) = parent_and_branch(del_address);
while !parent_address.is_empty() {
let remove_parent = {
let parent_node = path_map
.get_mut(parent_address)
.expect("Path map not correctly generated or entry is deleted");
if let Some(old_hash_key) = parent_node.children.remove(path_branch) {
deletions.insert(old_hash_key);
}
parent_node.children.is_empty()
};
if remove_parent && !additions.contains(parent_address) {
path_map.remove(parent_address);
} else {
break;
}
let (next_parent, next_branch) = parent_and_branch(parent_address);
parent_address = next_parent;
path_branch = next_branch;
if parent_address.is_empty() {
let parent_node = path_map
.get_mut(parent_address)
.expect("Path map not correctly generated");
if let Some(old_hash_key) = parent_node.children.remove(path_branch) {
deletions.insert(old_hash_key);
}
}
}
}
let mut sorted_paths: Vec<_> = path_map.keys().cloned().collect();
sorted_paths.sort_by_key(|a| Reverse(a.len()));
let mut key_hash_hex = String::new();
let mut batch = Vec::with_capacity(sorted_paths.len());
for path in sorted_paths {
let node = path_map
.remove(&path)
.expect("Path map keys are out of sink");
let (hash_key, _) = encode_and_hash(node.clone())
.map_err(|e| StateWriteError::StorageError(Box::new(e)))?;
key_hash_hex = hex::encode(&hash_key);
if !path.is_empty() {
let (parent_address, path_branch) = parent_and_branch(&path);
let parent = path_map
.get_mut(parent_address)
.expect("Path map not correctly generated");
if let Some(old_hash_key) = parent
.children
.insert(path_branch.to_string(), key_hash_hex.clone())
{
deletions.insert(old_hash_key);
}
}
batch.push((key_hash_hex.clone(), node, path));
}
Ok((
key_hash_hex,
TreeUpdate {
node_changes: batch,
deletions,
},
))
}
fn get_path(&self, address: &str) -> Result<HashMap<String, Node>, InternalError> {
let addresses_along_path = (0..address.len())
.step_by(2)
.map(|i| address[0..i].to_string())
.chain(std::iter::once(address.to_string()));
let node_path_iter = self
.inner
.get_path(self.tree_id, self.state_root_hash, address)?
.into_iter()
.map(|(_, node)| node)
.chain(std::iter::repeat(Node::default()));
Ok(addresses_along_path
.zip(node_path_iter)
.collect::<HashMap<_, _>>())
}
}
struct MerkleRadixPruner<O> {
tree_id: i64,
inner: O,
}
impl<O> MerkleRadixPruner<O>
where
O: OverlayWriter,
{
fn new(tree_id: i64, inner: O) -> Self {
Self { tree_id, inner }
}
fn prune(&self, state_ids: &[String]) -> Result<Vec<String>, InternalError> {
let mut removed_hashes = vec![];
for state_id in state_ids {
let pruned = self.inner.prune(self.tree_id, state_id)?;
removed_hashes.extend(pruned.into_iter());
}
Ok(removed_hashes)
}
}
trait OverlayReader {
fn has_root(&self, tree_id: i64, state_root_hash: &str) -> Result<bool, InternalError>;
fn get_path(
&self,
tree_id: i64,
state_root_hash: &str,
address: &str,
) -> Result<Vec<(String, Node)>, InternalError>;
fn get_entries(
&self,
tree_id: i64,
state_root_hash: &str,
keys: Vec<&str>,
) -> Result<Vec<(String, Vec<u8>)>, InternalError>;
}
trait OverlayWriter {
fn write_changes(
&self,
tree_id: i64,
state_root_hash: &str,
parent_state_root_hash: &str,
tree_update: TreeUpdate,
) -> Result<(), InternalError>;
fn prune(&self, tree_id: i64, state_root: &str) -> Result<Vec<String>, InternalError>;
}
struct SqlOverlay<'b, B: Backend> {
backend: &'b B,
}
impl<'b, B: Backend> SqlOverlay<'b, B> {
fn new(backend: &'b B) -> Self {
Self { backend }
}
}
#[cfg(feature = "sqlite")]
impl<'b> OverlayReader for SqlOverlay<'b, backend::SqliteBackend> {
fn has_root(&self, tree_id: i64, state_root_hash: &str) -> Result<bool, InternalError> {
let conn = self.backend.connection()?;
let operations = MerkleRadixOperations::new(conn.as_inner());
operations.has_root(tree_id, state_root_hash)
}
fn get_path(
&self,
tree_id: i64,
state_root_hash: &str,
address: &str,
) -> Result<Vec<(String, Node)>, InternalError> {
let conn = self.backend.connection()?;
let operations = MerkleRadixOperations::new(conn.as_inner());
operations.get_path(tree_id, state_root_hash, address)
}
fn get_entries(
&self,
tree_id: i64,
state_root_hash: &str,
keys: Vec<&str>,
) -> Result<Vec<(String, Vec<u8>)>, InternalError> {
let conn = self.backend.connection()?;
let operations = MerkleRadixOperations::new(conn.as_inner());
operations.get_leaves(tree_id, state_root_hash, keys)
}
}
#[cfg(feature = "sqlite")]
impl<'b> OverlayWriter for SqlOverlay<'b, backend::SqliteBackend> {
fn write_changes(
&self,
tree_id: i64,
state_root_hash: &str,
parent_state_root_hash: &str,
tree_update: TreeUpdate,
) -> Result<(), InternalError> {
let conn = self.backend.connection()?;
conn.as_inner().transaction(|| {
let operations = MerkleRadixOperations::new(conn.as_inner());
let TreeUpdate {
node_changes,
deletions,
} = tree_update;
let insertable_changes = node_changes
.into_iter()
.map(
|(hash, node, address)| operations::insert_nodes::InsertableNode {
hash,
node,
address,
},
)
.collect::<Vec<_>>();
operations.insert_nodes(tree_id, &insertable_changes)?;
let additions = insertable_changes
.iter()
.map(|insertable| insertable.hash.as_ref())
.collect::<Vec<_>>();
let deletions = deletions.iter().map(|s| s.as_ref()).collect::<Vec<_>>();
operations.update_change_log(
tree_id,
state_root_hash,
parent_state_root_hash,
&additions,
&deletions,
)?;
Ok(())
})
}
fn prune(&self, tree_id: i64, state_root: &str) -> Result<Vec<String>, InternalError> {
let conn = self.backend.connection()?;
let operations = MerkleRadixOperations::new(conn.as_inner());
operations.prune_entries(tree_id, state_root)
}
}
#[cfg(feature = "postgres")]
impl<'b> OverlayReader for SqlOverlay<'b, backend::PostgresBackend> {
fn has_root(&self, tree_id: i64, state_root_hash: &str) -> Result<bool, InternalError> {
let conn = self.backend.connection()?;
let operations = MerkleRadixOperations::new(conn.as_inner());
operations.has_root(tree_id, state_root_hash)
}
fn get_path(
&self,
tree_id: i64,
state_root_hash: &str,
address: &str,
) -> Result<Vec<(String, Node)>, InternalError> {
let conn = self.backend.connection()?;
let operations = MerkleRadixOperations::new(conn.as_inner());
operations.get_path(tree_id, state_root_hash, address)
}
fn get_entries(
&self,
tree_id: i64,
state_root_hash: &str,
keys: Vec<&str>,
) -> Result<Vec<(String, Vec<u8>)>, InternalError> {
let conn = self.backend.connection()?;
let operations = MerkleRadixOperations::new(conn.as_inner());
operations.get_leaves(tree_id, state_root_hash, keys)
}
}
#[cfg(feature = "postgres")]
impl<'b> OverlayWriter for SqlOverlay<'b, backend::PostgresBackend> {
fn write_changes(
&self,
tree_id: i64,
state_root_hash: &str,
parent_state_root_hash: &str,
tree_update: TreeUpdate,
) -> Result<(), InternalError> {
let conn = self.backend.connection()?;
conn.as_inner().transaction(|| {
let operations = MerkleRadixOperations::new(conn.as_inner());
let TreeUpdate {
node_changes,
deletions,
} = tree_update;
let insertable_changes = node_changes
.into_iter()
.map(
|(hash, node, address)| operations::insert_nodes::InsertableNode {
hash,
node,
address,
},
)
.collect::<Vec<_>>();
operations.insert_nodes(tree_id, &insertable_changes)?;
let additions = insertable_changes
.iter()
.map(|insertable| insertable.hash.as_ref())
.collect::<Vec<_>>();
let deletions = deletions.iter().map(|s| s.as_ref()).collect::<Vec<_>>();
operations.update_change_log(
tree_id,
state_root_hash,
parent_state_root_hash,
&additions,
&deletions,
)?;
Ok(())
})
}
fn prune(&self, tree_id: i64, state_root: &str) -> Result<Vec<String>, InternalError> {
let conn = self.backend.connection()?;
let operations = MerkleRadixOperations::new(conn.as_inner());
operations.prune_entries(tree_id, state_root)
}
}
fn parent_and_branch(path: &str) -> (&str, &str) {
let parent_address = if !path.is_empty() {
&path[..path.len() - TOKEN_SIZE]
} else {
""
};
let path_branch = if !path.is_empty() {
&path[(path.len() - TOKEN_SIZE)..]
} else {
""
};
(parent_address, path_branch)
}
fn encode_and_hash(node: Node) -> Result<(Vec<u8>, Vec<u8>), InternalError> {
let packed = node.into_bytes()?;
let hash = hash(&packed);
Ok((hash, packed))
}
fn hash(input: &[u8]) -> Vec<u8> {
let mut bytes: Vec<u8> = Vec::new();
bytes.extend(openssl::sha::sha512(input).iter());
let (hash, _rest) = bytes.split_at(bytes.len() / 2);
hash.to_vec()
}
#[cfg(feature = "sqlite")]
#[cfg(test)]
mod test {
use super::*;
use crate::state::merkle::sql::backend::SqliteBackendBuilder;
use crate::state::merkle::sql::migration::MigrationManager;
#[test]
fn test_multiple_trees() -> Result<(), Box<dyn std::error::Error>> {
let backend = SqliteBackendBuilder::new().with_memory_database().build()?;
backend.run_migrations()?;
let tree_1 = SqlMerkleStateBuilder::new()
.with_backend(backend.clone())
.with_tree("test-1")
.create_tree_if_necessary()
.build()?;
let initial_state_root_hash = tree_1.initial_state_root_hash()?;
let state_change_set = StateChange::Set {
key: "1234".to_string(),
value: "state_value".as_bytes().to_vec(),
};
let new_root = tree_1
.commit(&initial_state_root_hash, &[state_change_set])
.unwrap();
assert_read_value_at_address(&tree_1, &new_root, "1234", Some("state_value"));
let tree_2 = SqlMerkleStateBuilder::new()
.with_backend(backend)
.with_tree("test-2")
.create_tree_if_necessary()
.build()?;
assert!(tree_2.get(&new_root, &["1234".to_string()]).is_err());
Ok(())
}
#[test]
fn test_build_fails_without_explicit_create() -> Result<(), Box<dyn std::error::Error>> {
let backend = SqliteBackendBuilder::new().with_memory_database().build()?;
backend.run_migrations()?;
assert!(SqlMerkleStateBuilder::new()
.with_backend(backend.clone())
.with_tree("test-1")
.build()
.is_err());
Ok(())
}
fn assert_read_value_at_address<R>(
merkle_read: &R,
root_hash: &str,
address: &str,
expected_value: Option<&str>,
) where
R: Read<StateId = String, Key = String, Value = Vec<u8>>,
{
let value = merkle_read
.get(&root_hash.to_string(), &[address.to_string()])
.and_then(|mut values| {
Ok(values.remove(address).map(|value| {
String::from_utf8(value).expect("could not convert bytes to string")
}))
});
match value {
Ok(value) => assert_eq!(expected_value, value.as_deref()),
Err(err) => panic!("value at address {} produced an error: {}", address, err),
}
}
}