use crate::serialization::to_digest;
use crate::storage::manager::StorageManager;
use crate::{
errors::TreeNodeError,
proof_structs::{AppendOnlyProof, MembershipProof, NonMembershipProof, SingleAppendOnlyProof},
storage::{Database, SizeOf, Storable},
tree_node::*,
};
use crate::storage::types::StorageType;
use crate::{errors::*, node_label::*, tree_node::TreeNode, ARITY, *};
use async_recursion::async_recursion;
use log::{debug, info};
use std::marker::{Send, Sync};
use std::time::Instant;
use winter_crypto::Hasher;
use keyed_priority_queue::{Entry, KeyedPriorityQueue};
use std::collections::HashSet;
pub const DEFAULT_AZKS_KEY: u8 = 1u8;
#[derive(Debug, Eq, PartialEq, Hash)]
#[cfg_attr(
feature = "serde_serialization",
derive(serde::Deserialize, serde::Serialize)
)]
#[cfg_attr(feature = "serde_serialization", serde(bound = ""))]
pub struct Azks {
pub latest_epoch: u64,
pub num_nodes: u64, }
impl SizeOf for Azks {
fn size_of(&self) -> usize {
std::mem::size_of::<u64>() * 2
}
}
impl Storable for Azks {
type StorageKey = u8;
fn data_type() -> StorageType {
StorageType::Azks
}
fn get_id(&self) -> u8 {
DEFAULT_AZKS_KEY
}
fn get_full_binary_key_id(key: &u8) -> Vec<u8> {
vec![StorageType::Azks as u8, *key]
}
fn key_from_full_binary(bin: &[u8]) -> Result<u8, String> {
if bin.is_empty() || bin[0] != StorageType::Azks as u8 {
return Err("Not an AZKS key".to_string());
}
Ok(DEFAULT_AZKS_KEY)
}
}
unsafe impl Sync for Azks {}
impl Clone for Azks {
fn clone(&self) -> Self {
Self {
latest_epoch: self.latest_epoch,
num_nodes: self.num_nodes,
}
}
}
impl Azks {
pub async fn new<S: Database + Sync + Send, H: Hasher>(
storage: &StorageManager<S>,
) -> Result<Self, AkdError> {
create_empty_root::<H, S>(storage, Option::Some(0), Option::Some(0)).await?;
let azks = Azks {
latest_epoch: 0,
num_nodes: 1,
};
Ok(azks)
}
#[cfg(test)]
pub async fn insert_leaf<S: Database + Sync + Send, H: Hasher>(
&mut self,
storage: &StorageManager<S>,
node: Node<H>,
epoch: u64,
) -> Result<(), AkdError> {
let new_leaf =
create_leaf_node::<H, S>(storage, node.label, &node.hash, NodeLabel::root(), epoch)
.await?;
let mut root_node = TreeNode::get_from_storage(
storage,
&NodeKey(NodeLabel::root()),
self.get_latest_epoch(),
)
.await?;
root_node
.insert_single_leaf_and_hash::<S, H>(
storage,
new_leaf,
epoch,
&mut self.num_nodes,
None,
)
.await?;
Ok(())
}
pub async fn batch_insert_leaves<S: Database + Sync + Send, H: Hasher>(
&mut self,
storage: &StorageManager<S>,
insertion_set: Vec<Node<H>>,
) -> Result<(), AkdError> {
self.batch_insert_leaves_helper::<_, H>(storage, insertion_set, false)
.await
}
async fn preload_nodes_for_insertion<S: Database + Sync + Send, H: Hasher>(
&self,
storage: &StorageManager<S>,
insertion_set: &[Node<H>],
) -> Result<u64, AkdError> {
let prefixes_set = crate::utils::build_prefixes_set(
insertion_set
.iter()
.map(|n| n.label)
.collect::<Vec<NodeLabel>>()
.as_ref(),
);
self.bfs_preload_nodes::<S, H>(storage, prefixes_set).await
}
pub async fn bfs_preload_nodes<S: Database + Sync + Send, H: Hasher>(
&self,
storage: &StorageManager<S>,
nodes_to_load: HashSet<NodeLabel>,
) -> Result<u64, AkdError> {
let mut load_count: u64 = 0;
let mut current_nodes = vec![NodeKey(NodeLabel::root())];
while !current_nodes.is_empty() {
let nodes =
TreeNode::batch_get_from_storage(storage, ¤t_nodes, self.get_latest_epoch())
.await?;
load_count += nodes.len() as u64;
current_nodes = Vec::<NodeKey>::new();
for node in &nodes {
if !nodes_to_load.contains(&node.label) {
continue;
}
for dir in 0..ARITY {
let child_label = node.get_child_label(Direction::Some(dir));
if let Some(child_label) = child_label {
current_nodes.push(NodeKey(child_label));
}
}
}
}
Ok(load_count)
}
pub async fn batch_insert_leaves_helper<S: Database + Sync + Send, H: Hasher>(
&mut self,
storage: &StorageManager<S>,
insertion_set: Vec<Node<H>>,
append_only_exclude_usage: bool,
) -> Result<(), AkdError> {
let tic = Instant::now();
let load_count = self
.preload_nodes_for_insertion::<S, H>(storage, &insertion_set)
.await?;
let toc = Instant::now() - tic;
info!(
"Preload of tree ({} objects loaded), took {} s",
load_count,
toc.as_secs_f64()
);
self.increment_epoch();
let mut hash_q = KeyedPriorityQueue::<NodeLabel, i32>::new();
let mut priorities: i32 = 0;
let mut root_node = TreeNode::get_from_storage(
storage,
&NodeKey(NodeLabel::root()),
self.get_latest_epoch(),
)
.await?;
for node in insertion_set {
let new_leaf = create_leaf_node::<H, S>(
storage,
node.label,
&node.hash,
NodeLabel::root(),
self.latest_epoch,
)
.await?;
debug!("BEGIN insert leaf");
root_node
.insert_leaf::<_, H>(
storage,
new_leaf,
self.latest_epoch,
&mut self.num_nodes,
Some(append_only_exclude_usage),
)
.await?;
debug!("END insert leaf");
hash_q.push(node.label, priorities);
priorities -= 1;
}
while let Some((next_node_label, _)) = hash_q.pop() {
let mut next_node: TreeNode = TreeNode::get_from_storage(
storage,
&NodeKey(next_node_label),
self.get_latest_epoch(),
)
.await?;
next_node
.update_node_hash::<_, H>(
storage,
self.latest_epoch,
Some(append_only_exclude_usage),
)
.await?;
if !next_node.is_root() {
match hash_q.entry(next_node.parent) {
Entry::Vacant(entry) => {
entry.set_priority(priorities);
}
Entry::Occupied(entry) => {
entry.set_priority(priorities);
}
};
priorities -= 1;
}
}
Ok(())
}
pub async fn get_membership_proof<S: Database + Sync + Send, H: Hasher>(
&self,
storage: &StorageManager<S>,
label: NodeLabel,
_epoch: u64,
) -> Result<MembershipProof<H>, AkdError> {
let (pf, _) = self.get_membership_proof_and_node(storage, label).await?;
Ok(pf)
}
#[allow(clippy::needless_range_loop)]
pub async fn get_non_membership_proof<S: Database + Sync + Send, H: Hasher>(
&self,
storage: &StorageManager<S>,
label: NodeLabel,
) -> Result<NonMembershipProof<H>, AkdError> {
let (longest_prefix_membership_proof, lcp_node_label) =
self.get_membership_proof_and_node(storage, label).await?;
let lcp_node: TreeNode =
TreeNode::get_from_storage(storage, &NodeKey(lcp_node_label), self.get_latest_epoch())
.await?;
let longest_prefix = lcp_node.label;
let mut longest_prefix_children = [Node {
label: EMPTY_LABEL,
hash: crate::utils::empty_node_hash::<H>(),
}; ARITY];
for i in 0..ARITY {
let child = lcp_node
.get_child_state(storage, Some(i), self.latest_epoch)
.await?;
match child {
None => {
debug!("i = {}, empty", i);
continue;
}
Some(child) => {
let unwrapped_child: TreeNode = TreeNode::get_from_storage(
storage,
&NodeKey(child.label),
self.get_latest_epoch(),
)
.await?;
debug!("Label of child {} is {:?}", i, unwrapped_child.label);
longest_prefix_children[i] = Node {
label: unwrapped_child.label,
hash: optional_child_state_hash::<H>(&Some(unwrapped_child))?,
};
}
}
}
debug!("Lcp label = {:?}", longest_prefix);
Ok(NonMembershipProof {
label,
longest_prefix,
longest_prefix_children,
longest_prefix_membership_proof,
})
}
pub async fn get_append_only_proof<S: Database + Sync + Send, H: Hasher>(
&self,
storage: &StorageManager<S>,
start_epoch: u64,
end_epoch: u64,
) -> Result<AppendOnlyProof<H>, AkdError> {
let mut proofs = Vec::<SingleAppendOnlyProof<H>>::new();
let mut epochs = Vec::<u64>::new();
let node = TreeNode::get_from_storage(
storage,
&NodeKey(NodeLabel::root()),
self.get_latest_epoch(),
)
.await?;
for ep in start_epoch..end_epoch {
let tic = Instant::now();
let num_records = self
.gather_audit_proof_nodes::<_, H>(vec![node.clone()], storage, ep, ep + 1)
.await?;
let toc = Instant::now() - tic;
info!(
"Preload of nodes for audit ({} objects loaded), took {} s",
num_records,
toc.as_secs_f64()
);
storage.log_metrics(log::Level::Info).await;
let (unchanged, leaves) = self
.get_append_only_proof_helper::<_, H>(storage, node.clone(), ep, ep + 1)
.await?;
proofs.push(SingleAppendOnlyProof {
inserted: leaves,
unchanged_nodes: unchanged,
});
epochs.push(ep);
}
Ok(AppendOnlyProof { proofs, epochs })
}
fn determine_retrieval_nodes(
node: &TreeNode,
start_epoch: u64,
end_epoch: u64,
) -> Vec<NodeLabel> {
if node.is_leaf() {
return vec![];
}
if node.get_latest_epoch() <= start_epoch {
return vec![];
}
if node.least_descendant_ep > end_epoch {
return vec![];
}
match (node.left_child, node.right_child) {
(Some(lc), None) => vec![lc],
(None, Some(rc)) => vec![rc],
(Some(lc), Some(rc)) => vec![lc, rc],
_ => vec![],
}
}
async fn gather_audit_proof_nodes<S: Database + Sync + Send, H: Hasher>(
&self,
nodes: Vec<TreeNode>,
storage: &StorageManager<S>,
start_epoch: u64,
end_epoch: u64,
) -> Result<u64, AkdError> {
let mut children_to_fetch: Vec<NodeKey> = nodes
.iter()
.flat_map(|node| Self::determine_retrieval_nodes(node, start_epoch, end_epoch))
.map(NodeKey)
.collect();
let mut element_count = 0u64;
while !children_to_fetch.is_empty() {
let got = TreeNode::batch_get_from_storage(
storage,
&children_to_fetch,
self.get_latest_epoch(),
)
.await?;
element_count += got.len() as u64;
children_to_fetch = got
.iter()
.flat_map(|node| Self::determine_retrieval_nodes(node, start_epoch, end_epoch))
.map(NodeKey)
.collect();
}
Ok(element_count)
}
#[async_recursion]
async fn get_append_only_proof_helper<S: Database + Sync + Send, H: Hasher>(
&self,
storage: &StorageManager<S>,
node: TreeNode,
start_epoch: u64,
end_epoch: u64,
) -> Result<AppendOnlyHelper<H>, AkdError> {
let mut unchanged = Vec::<Node<H>>::new();
let mut leaves = Vec::<Node<H>>::new();
if node.get_latest_epoch() <= start_epoch {
if node.is_root() {
return Ok((unchanged, leaves));
}
unchanged.push(Node::<H> {
label: node.label,
hash: optional_child_state_hash::<H>(&Some(node))?,
});
return Ok((unchanged, leaves));
}
if node.least_descendant_ep > end_epoch {
return Ok((unchanged, leaves));
}
if node.is_leaf() {
leaves.push(Node::<H> {
label: node.label,
hash: to_digest::<H>(&node.hash)?,
});
} else {
for child_label in [node.left_child, node.right_child] {
match child_label {
None => {
continue;
}
Some(label) => {
let child_node = TreeNode::get_from_storage(
storage,
&NodeKey(label),
self.get_latest_epoch(),
)
.await?;
let (mut inner_unchanged, mut inner_leaf) = self
.get_append_only_proof_helper::<_, H>(
storage,
child_node,
start_epoch,
end_epoch,
)
.await?;
unchanged.append(&mut inner_unchanged);
leaves.append(&mut inner_leaf);
}
}
}
}
Ok((unchanged, leaves))
}
pub async fn get_root_hash<S: Database + Sync + Send, H: Hasher>(
&self,
storage: &StorageManager<S>,
) -> Result<H::Digest, AkdError> {
self.get_root_hash_at_epoch::<_, H>(storage, self.get_latest_epoch())
.await
}
pub async fn get_root_hash_at_epoch<S: Database + Sync + Send, H: Hasher>(
&self,
storage: &StorageManager<S>,
epoch: u64,
) -> Result<H::Digest, AkdError> {
if self.latest_epoch < epoch {
return Err(AkdError::TreeNode(TreeNodeError::NonexistentAtEpoch(
NodeLabel::root(),
epoch,
)));
}
let root_node: TreeNode = TreeNode::get_from_storage(
storage,
&NodeKey(NodeLabel::root()),
self.get_latest_epoch(),
)
.await?;
hash_u8_with_label::<H>(&root_node.hash, root_node.label)
}
pub fn get_latest_epoch(&self) -> u64 {
self.latest_epoch
}
fn increment_epoch(&mut self) {
let epoch = self.latest_epoch + 1;
self.latest_epoch = epoch;
}
pub async fn get_membership_proof_and_node<S: Database + Sync + Send, H: Hasher>(
&self,
storage: &StorageManager<S>,
label: NodeLabel,
) -> Result<(MembershipProof<H>, NodeLabel), AkdError> {
let mut layer_proofs = Vec::new();
let mut curr_node: TreeNode = TreeNode::get_from_storage(
storage,
&NodeKey(NodeLabel::root()),
self.get_latest_epoch(),
)
.await?;
let mut dir = curr_node.label.get_dir(label);
let mut equal = label == curr_node.label;
let mut prev_node = NodeLabel::root();
while !equal && dir.is_some() {
prev_node = curr_node.label;
let mut nodes = [Node::<H> {
label: EMPTY_LABEL,
hash: crate::utils::empty_node_hash::<H>(),
}; ARITY - 1];
let mut count = 0;
let direction = dir.ok_or(AkdError::TreeNode(TreeNodeError::NoDirection(
curr_node.label,
None,
)))?;
let next_state = curr_node
.get_child_state(storage, Some(direction), self.latest_epoch)
.await?;
if next_state.is_some() {
for i in 0..ARITY {
let no_direction_error =
AkdError::TreeNode(TreeNodeError::NoDirection(curr_node.label, None));
if i != dir.ok_or(no_direction_error)? {
let sibling = curr_node
.get_child_state(storage, Direction::Some(i), self.latest_epoch)
.await?;
nodes[count] = Node::<H> {
label: optional_child_state_to_label(&sibling),
hash: optional_child_state_hash::<H>(&sibling)?,
};
count += 1;
}
}
} else {
break;
}
layer_proofs.push(proof_structs::LayerProof {
label: curr_node.label,
siblings: nodes,
direction: dir,
});
let new_curr_node: TreeNode = TreeNode::get_from_storage(
storage,
&NodeKey(curr_node.get_child(dir).unwrap().unwrap()),
self.get_latest_epoch(),
)
.await?;
curr_node = new_curr_node;
dir = curr_node.label.get_dir(label);
equal = label == curr_node.label;
}
if !equal {
let new_curr_node: TreeNode =
TreeNode::get_from_storage(storage, &NodeKey(prev_node), self.get_latest_epoch())
.await?;
curr_node = new_curr_node;
layer_proofs.pop();
}
let hash_val = if curr_node.is_leaf() {
H::merge_with_int(to_digest::<H>(&curr_node.hash)?, curr_node.last_epoch)
} else {
to_digest::<H>(&curr_node.hash)?
};
Ok((
MembershipProof::<H> {
label: curr_node.label,
hash_val,
layer_proofs,
},
prev_node,
))
}
}
type AppendOnlyHelper<H> = (Vec<Node<H>>, Vec<Node<H>>);
#[cfg(test)]
mod tests {
use super::*;
use crate::{
auditor::audit_verify,
client::{verify_membership, verify_nonmembership},
storage::memory::AsyncInMemoryDatabase,
};
use rand::{rngs::OsRng, seq::SliceRandom, RngCore};
use winter_crypto::hashers::Blake3_256;
use winter_math::fields::f128::BaseElement;
type Blake3 = Blake3_256<BaseElement>;
type Blake3Digest = <Blake3_256<winter_math::fields::f128::BaseElement> as Hasher>::Digest;
#[tokio::test]
async fn test_batch_insert_basic() -> Result<(), AkdError> {
let mut rng = OsRng;
let num_nodes = 10;
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks1 = Azks::new::<_, Blake3>(&db).await?;
azks1.increment_epoch();
let mut insertion_set: Vec<Node<Blake3>> = vec![];
for _ in 0..num_nodes {
let label = NodeLabel::random(&mut rng);
let mut input = [0u8; 32];
rng.fill_bytes(&mut input);
let hash = Blake3::hash(&input);
let node = Node::<Blake3> { label, hash };
insertion_set.push(node);
azks1.insert_leaf::<_, Blake3>(&db, node, 1).await?;
}
let database2 = AsyncInMemoryDatabase::new();
let db2 = StorageManager::new_no_cache(&database2);
let mut azks2 = Azks::new::<_, Blake3>(&db2).await?;
azks2
.batch_insert_leaves::<_, Blake3>(&db2, insertion_set)
.await?;
assert_eq!(
azks1.get_root_hash::<_, Blake3>(&db).await?,
azks2.get_root_hash::<_, Blake3>(&db2).await?,
"Batch insert doesn't match individual insert"
);
Ok(())
}
#[tokio::test]
async fn test_insert_permuted() -> Result<(), AkdError> {
let num_nodes = 10;
let mut rng = OsRng;
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks1 = Azks::new::<_, Blake3>(&db).await?;
azks1.increment_epoch();
let mut insertion_set: Vec<Node<Blake3>> = vec![];
for _ in 0..num_nodes {
let label = NodeLabel::random(&mut rng);
let mut input = [0u8; 32];
rng.fill_bytes(&mut input);
let hash = Blake3Digest::new(input);
let node = Node::<Blake3> { label, hash };
insertion_set.push(node);
azks1.insert_leaf::<_, Blake3>(&db, node, 1).await?;
}
insertion_set.shuffle(&mut rng);
let database2 = AsyncInMemoryDatabase::new();
let db2 = StorageManager::new_no_cache(&database2);
let mut azks2 = Azks::new::<_, Blake3>(&db2).await?;
azks2
.batch_insert_leaves::<_, Blake3>(&db2, insertion_set)
.await?;
assert_eq!(
azks1.get_root_hash::<_, Blake3>(&db).await?,
azks2.get_root_hash::<_, Blake3>(&db2).await?,
"Batch insert doesn't match individual insert"
);
Ok(())
}
#[tokio::test]
async fn test_membership_proof_permuted() -> Result<(), AkdError> {
let num_nodes = 10;
let mut rng = OsRng;
let mut insertion_set: Vec<Node<Blake3>> = vec![];
for _ in 0..num_nodes {
let label = NodeLabel::random(&mut rng);
let mut input = [0u8; 32];
rng.fill_bytes(&mut input);
let hash = Blake3Digest::new(input);
let node = Node::<Blake3> { label, hash };
insertion_set.push(node);
}
insertion_set.shuffle(&mut rng);
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks = Azks::new::<_, Blake3>(&db).await?;
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set.clone())
.await?;
let proof = azks
.get_membership_proof(&db, insertion_set[0].label, 1)
.await?;
verify_membership::<Blake3>(azks.get_root_hash::<_, Blake3>(&db).await?, &proof)?;
Ok(())
}
#[tokio::test]
async fn test_membership_proof_small() -> Result<(), AkdError> {
let num_nodes = 2;
let mut insertion_set: Vec<Node<Blake3>> = vec![];
for i in 0..num_nodes {
let mut label_arr = [0u8; 32];
label_arr[0] = i;
let label = NodeLabel::new(label_arr, 256u32);
let input = [0u8; 32];
let hash = Blake3Digest::new(input);
let node = Node::<Blake3> { label, hash };
insertion_set.push(node);
}
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks = Azks::new::<_, Blake3>(&db).await?;
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set.clone())
.await?;
let proof = azks
.get_membership_proof(&db, insertion_set[0].label, 1)
.await?;
verify_membership::<Blake3>(azks.get_root_hash::<_, Blake3>(&db).await?, &proof)?;
Ok(())
}
#[tokio::test]
async fn test_membership_proof_failing() -> Result<(), AkdError> {
let num_nodes = 10;
let mut rng = OsRng;
let mut insertion_set: Vec<Node<Blake3>> = vec![];
for _ in 0..num_nodes {
let label = NodeLabel::random(&mut rng);
let mut input = [0u8; 32];
rng.fill_bytes(&mut input);
let hash = Blake3Digest::new(input);
let node = Node::<Blake3> { label, hash };
insertion_set.push(node);
}
insertion_set.shuffle(&mut rng);
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks = Azks::new::<_, Blake3>(&db).await?;
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set.clone())
.await?;
let mut proof = azks
.get_membership_proof(&db, insertion_set[0].label, 1)
.await?;
let hash_val = Blake3::hash(&EMPTY_VALUE);
proof = MembershipProof::<Blake3> {
label: proof.label,
hash_val,
layer_proofs: proof.layer_proofs,
};
assert!(
verify_membership::<Blake3>(azks.get_root_hash::<_, Blake3>(&db).await?, &proof)
.is_err(),
"Membership proof does verify, despite being wrong"
);
Ok(())
}
#[tokio::test]
async fn test_membership_proof_intermediate() -> Result<(), AkdError> {
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut insertion_set: Vec<Node<Blake3>> = vec![];
insertion_set.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b0), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
insertion_set.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b1 << 63), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
insertion_set.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b11 << 62), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
insertion_set.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b01 << 62), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
insertion_set.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b111 << 61), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
let mut azks = Azks::new::<_, Blake3>(&db).await?;
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set)
.await?;
let search_label = NodeLabel::new(byte_arr_from_u64(0b1111 << 60), 64);
let proof = azks.get_non_membership_proof(&db, search_label).await?;
assert!(
verify_nonmembership::<Blake3>(azks.get_root_hash::<_, Blake3>(&db).await?, &proof)
.is_ok(),
"Nonmembership proof does not verify"
);
Ok(())
}
#[tokio::test]
async fn test_nonmembership_proof_very_small() -> Result<(), AkdError> {
let num_nodes = 2;
let mut insertion_set: Vec<Node<Blake3>> = vec![];
for i in 0..num_nodes {
let mut label_arr = [0u8; 32];
label_arr[31] = i;
let label = NodeLabel::new(label_arr, 256u32);
let mut input = [0u8; 32];
input[31] = i;
let hash = Blake3Digest::new(input);
let node = Node::<Blake3> { label, hash };
insertion_set.push(node);
}
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks = Azks::new::<_, Blake3>(&db).await?;
let search_label = insertion_set[0].label;
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set.clone()[1..2].to_vec())
.await?;
let proof = azks.get_non_membership_proof(&db, search_label).await?;
verify_nonmembership::<Blake3>(azks.get_root_hash::<_, Blake3>(&db).await?, &proof)?;
Ok(())
}
#[tokio::test]
async fn test_nonmembership_proof_small() -> Result<(), AkdError> {
let num_nodes = 3;
let mut rng = OsRng;
let mut insertion_set: Vec<Node<Blake3>> = vec![];
for _ in 0..num_nodes {
let label = NodeLabel::random(&mut rng);
let mut input = [0u8; 32];
rng.fill_bytes(&mut input);
let hash = Blake3Digest::new(input);
let node = Node::<Blake3> { label, hash };
insertion_set.push(node);
}
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks = Azks::new::<_, Blake3>(&db).await?;
let search_label = insertion_set[num_nodes - 1].label;
azks.batch_insert_leaves::<_, Blake3>(
&db,
insertion_set.clone()[0..num_nodes - 1].to_vec(),
)
.await?;
let proof = azks.get_non_membership_proof(&db, search_label).await?;
verify_nonmembership::<Blake3>(azks.get_root_hash::<_, Blake3>(&db).await?, &proof)?;
Ok(())
}
#[tokio::test]
async fn test_nonmembership_proof() -> Result<(), AkdError> {
let num_nodes = 10;
let mut rng = OsRng;
let mut insertion_set: Vec<Node<Blake3>> = vec![];
for _ in 0..num_nodes {
let label = NodeLabel::random(&mut rng);
let mut input = [0u8; 32];
rng.fill_bytes(&mut input);
let hash = Blake3Digest::new(input);
let node = Node::<Blake3> { label, hash };
insertion_set.push(node);
}
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks = Azks::new::<_, Blake3>(&db).await?;
let search_label = insertion_set[num_nodes - 1].label;
azks.batch_insert_leaves::<_, Blake3>(
&db,
insertion_set.clone()[0..num_nodes - 1].to_vec(),
)
.await?;
let proof = azks.get_non_membership_proof(&db, search_label).await?;
verify_nonmembership::<Blake3>(azks.get_root_hash::<_, Blake3>(&db).await?, &proof)?;
Ok(())
}
#[tokio::test]
async fn test_append_only_proof_very_tiny() -> Result<(), AkdError> {
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks = Azks::new::<_, Blake3>(&db).await?;
let mut insertion_set_1: Vec<Node<Blake3>> = vec![];
insertion_set_1.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b0), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set_1)
.await?;
let start_hash = azks.get_root_hash::<_, Blake3>(&db).await?;
let mut insertion_set_2: Vec<Node<Blake3>> = vec![];
insertion_set_2.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b01 << 62), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set_2)
.await?;
let end_hash = azks.get_root_hash::<_, Blake3>(&db).await?;
let proof = azks.get_append_only_proof(&db, 1, 2).await?;
audit_verify::<Blake3>(vec![start_hash, end_hash], proof).await?;
Ok(())
}
#[tokio::test]
async fn test_append_only_proof_tiny() -> Result<(), AkdError> {
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks = Azks::new::<_, Blake3>(&db).await?;
let mut insertion_set_1: Vec<Node<Blake3>> = vec![];
insertion_set_1.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b0), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
insertion_set_1.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b1 << 63), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set_1)
.await?;
let start_hash = azks.get_root_hash::<_, Blake3>(&db).await?;
let mut insertion_set_2: Vec<Node<Blake3>> = vec![];
insertion_set_2.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b1 << 62), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
insertion_set_2.push(Node {
label: NodeLabel::new(byte_arr_from_u64(0b111 << 61), 64),
hash: Blake3::hash(&EMPTY_VALUE),
});
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set_2)
.await?;
let end_hash = azks.get_root_hash::<_, Blake3>(&db).await?;
let proof = azks.get_append_only_proof(&db, 1, 2).await?;
audit_verify::<Blake3>(vec![start_hash, end_hash], proof).await?;
Ok(())
}
#[tokio::test]
async fn test_append_only_proof() -> Result<(), AkdError> {
let num_nodes = 10;
let mut rng = OsRng;
let mut insertion_set_1: Vec<Node<Blake3>> = vec![];
for _ in 0..num_nodes {
let label = NodeLabel::random(&mut rng);
let mut input = [0u8; 32];
rng.fill_bytes(&mut input);
let hash = Blake3Digest::new(input);
let node = Node::<Blake3> { label, hash };
insertion_set_1.push(node);
}
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let mut azks = Azks::new::<_, Blake3>(&db).await?;
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set_1.clone())
.await?;
let start_hash = azks.get_root_hash::<_, Blake3>(&db).await?;
let mut insertion_set_2: Vec<Node<Blake3>> = vec![];
for _ in 0..num_nodes {
let label = NodeLabel::random(&mut rng);
let mut input = [0u8; 32];
rng.fill_bytes(&mut input);
let hash = Blake3Digest::new(input);
let node = Node::<Blake3> { label, hash };
insertion_set_2.push(node);
}
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set_2.clone())
.await?;
let middle_hash = azks.get_root_hash::<_, Blake3>(&db).await?;
let mut insertion_set_3: Vec<Node<Blake3>> = vec![];
for _ in 0..num_nodes {
let label = NodeLabel::random(&mut rng);
let mut input = [0u8; 32];
rng.fill_bytes(&mut input);
let hash = Blake3Digest::new(input);
let node = Node::<Blake3> { label, hash };
insertion_set_3.push(node);
}
azks.batch_insert_leaves::<_, Blake3>(&db, insertion_set_3.clone())
.await?;
let end_hash = azks.get_root_hash::<_, Blake3>(&db).await?;
let proof = azks.get_append_only_proof(&db, 1, 3).await?;
let hashes = vec![start_hash, middle_hash, end_hash];
audit_verify::<Blake3>(hashes, proof).await?;
Ok(())
}
#[tokio::test]
async fn future_epoch_throws_error() -> Result<(), AkdError> {
let database = AsyncInMemoryDatabase::new();
let db = StorageManager::new_no_cache(&database);
let azks = Azks::new::<_, Blake3>(&db).await?;
let out = azks.get_root_hash_at_epoch::<_, Blake3>(&db, 123).await;
let expected = Err::<_, AkdError>(AkdError::TreeNode(TreeNodeError::NonexistentAtEpoch(
NodeLabel::root(),
123,
)));
assert_eq!(expected, out);
Ok(())
}
}