use crate::{
index::ordered::Index,
journal::contiguous::fixed::Journal,
merkle::{self, Location},
qmdb::{
any::{ordered, value::FixedEncoding, FixedConfig as Config, FixedValue},
Error,
},
translator::Translator,
Context,
};
use commonware_cryptography::Hasher;
use commonware_utils::Array;
pub type Update<K, V> = ordered::Update<K, FixedEncoding<V>>;
pub type Operation<F, K, V> = ordered::Operation<F, K, FixedEncoding<V>>;
pub type Db<F, E, K, V, H, T> =
super::Db<F, E, Journal<E, Operation<F, K, V>>, Index<T, Location<F>>, H, Update<K, V>>;
impl<F: merkle::Family, E: Context, K: Array, V: FixedValue, H: Hasher, T: Translator>
Db<F, E, K, V, H, T>
{
pub async fn init(context: E, cfg: Config<T>) -> Result<Self, Error<F>> {
Self::init_with_callback(context, cfg, None, |_, _| {}).await
}
pub(crate) async fn init_with_callback(
context: E,
cfg: Config<T>,
known_inactivity_floor: Option<Location<F>>,
callback: impl FnMut(bool, Option<Location<F>>),
) -> Result<Self, Error<F>> {
crate::qmdb::any::init(context, cfg, known_inactivity_floor, callback).await
}
}
pub mod partitioned {
pub use super::{Operation, Update};
use crate::{
index::partitioned::ordered::Index,
journal::contiguous::fixed::Journal,
merkle::{self, Location},
qmdb::{
any::{FixedConfig as Config, FixedValue},
Error,
},
translator::Translator,
Context,
};
use commonware_cryptography::Hasher;
use commonware_utils::Array;
pub type Db<F, E, K, V, H, T, const P: usize> = crate::qmdb::any::ordered::Db<
F,
E,
Journal<E, Operation<F, K, V>>,
Index<T, Location<F>, P>,
H,
Update<K, V>,
>;
impl<
F: merkle::Family,
E: Context,
K: Array,
V: FixedValue,
H: Hasher,
T: Translator,
const P: usize,
> Db<F, E, K, V, H, T, P>
{
pub async fn init(context: E, cfg: Config<T>) -> Result<Self, Error<F>> {
Self::init_with_callback(context, cfg, None, |_, _| {}).await
}
pub(crate) async fn init_with_callback(
context: E,
cfg: Config<T>,
known_inactivity_floor: Option<Location<F>>,
callback: impl FnMut(bool, Option<Location<F>>),
) -> Result<Self, Error<F>> {
crate::qmdb::any::init(context, cfg, known_inactivity_floor, callback).await
}
}
pub mod p256 {
pub type Db<F, E, K, V, H, T> = super::Db<F, E, K, V, H, T, 1>;
}
pub mod p64k {
pub type Db<F, E, K, V, H, T> = super::Db<F, E, K, V, H, T, 2>;
}
}
#[cfg(test)]
pub(crate) mod test {
use super::*;
use crate::{
index::Unordered as _,
merkle::{
mmr::{self, Location, StandardHasher as Standard},
Location as GenericLocation,
},
qmdb::{
any::{
ordered::{
test::{
test_ordered_any_db_basic, test_ordered_any_db_empty,
test_ordered_any_update_collision_edge_case,
},
Update,
},
test::fixed_db_config,
},
verify_proof,
},
translator::{OneCap, TwoCap},
};
use commonware_cryptography::{sha256::Digest, Hasher, Sha256};
use commonware_macros::test_traced;
use commonware_math::algebra::Random;
use commonware_runtime::{
deterministic::{self, Context},
Metrics, Runner as _,
};
use commonware_utils::{sequence::FixedBytes, test_rng_seeded, NZU64};
use futures::StreamExt as _;
use rand::{rngs::StdRng, seq::IteratorRandom, RngCore, SeedableRng};
use std::collections::{BTreeMap, HashMap};
type AnyTestGeneric<F> = crate::qmdb::any::db::Db<
F,
deterministic::Context,
Journal<
deterministic::Context,
crate::qmdb::any::operation::Ordered<F, Digest, FixedEncoding<Digest>>,
>,
Index<TwoCap, GenericLocation<F>>,
Sha256,
crate::qmdb::any::operation::update::Ordered<Digest, FixedEncoding<Digest>>,
>;
pub(crate) type AnyTest =
Db<mmr::Family, deterministic::Context, Digest, Digest, Sha256, TwoCap>;
async fn open_db_generic<F: crate::merkle::Family>(
context: deterministic::Context,
) -> AnyTestGeneric<F> {
let cfg = fixed_db_config::<TwoCap>("partition", &context);
crate::qmdb::any::init(context, cfg, None, |_, _| {})
.await
.unwrap()
}
async fn open_db(context: deterministic::Context) -> AnyTest {
let cfg = fixed_db_config("partition", &context);
AnyTest::init(context, cfg).await.unwrap()
}
pub(crate) async fn create_test_db(mut context: Context) -> AnyTest {
let seed = context.next_u64();
let cfg = fixed_db_config::<TwoCap>(&seed.to_string(), &context);
AnyTest::init(context, cfg).await.unwrap()
}
pub(crate) fn create_test_ops(n: usize) -> Vec<Operation<mmr::Family, Digest, Digest>> {
create_test_ops_seeded(n, 0)
}
pub(crate) fn create_test_ops_seeded(
n: usize,
seed: u64,
) -> Vec<Operation<mmr::Family, Digest, Digest>> {
let mut rng = test_rng_seeded(seed);
let mut prev_key = Digest::random(&mut rng);
let mut ops = Vec::new();
for i in 0..n {
if i % 10 == 0 && i > 0 {
ops.push(Operation::Delete(prev_key));
} else {
let key = Digest::random(&mut rng);
let next_key = Digest::random(&mut rng);
let value = Digest::random(&mut rng);
ops.push(Operation::Update(Update {
key,
value,
next_key,
}));
prev_key = key;
}
}
ops
}
pub(crate) async fn apply_ops(
db: &mut AnyTest,
ops: Vec<Operation<mmr::Family, Digest, Digest>>,
) {
let mut batch = db.new_batch();
for op in ops {
match op {
Operation::Update(data) => {
batch = batch.write(data.key, Some(data.value));
}
Operation::Delete(key) => {
batch = batch.write(key, None);
}
Operation::CommitFloor(_, _) => {
panic!("CommitFloor not supported in apply_ops");
}
}
}
let merkleized = batch.merkleize(db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
}
#[test_traced("WARN")]
fn test_ordered_any_fixed_db_translated_key_collision_edge_case() {
let executor = deterministic::Runner::default();
executor.start(|mut context| async move {
let seed = context.next_u64();
let config = fixed_db_config::<OneCap>(&seed.to_string(), &context);
let mut db = Db::<mmr::Family, Context, FixedBytes<2>, i32, Sha256, OneCap>::init(
context, config,
)
.await
.unwrap();
let key1 = FixedBytes::<2>::new([1u8, 1u8]);
let key2 = FixedBytes::<2>::new([1u8, 3u8]);
let early_key = FixedBytes::<2>::new([0u8, 2u8]);
let late_key = FixedBytes::<2>::new([3u8, 0u8]);
let middle_key = FixedBytes::<2>::new([1u8, 2u8]);
let merkleized = db
.new_batch()
.write(key1.clone(), Some(1))
.write(key2.clone(), Some(2))
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
assert_eq!(db.get_all(&key1).await.unwrap().unwrap(), (1, key2.clone()));
assert_eq!(db.get_all(&key2).await.unwrap().unwrap(), (2, key1.clone()));
assert!(db.get_span(&key1).await.unwrap().unwrap().1.next_key == key2.clone());
assert!(db.get_span(&key2).await.unwrap().unwrap().1.next_key == key1.clone());
assert!(db.get_span(&early_key).await.unwrap().unwrap().1.next_key == key1.clone());
assert!(db.get_span(&middle_key).await.unwrap().unwrap().1.next_key == key2.clone());
assert!(db.get_span(&late_key).await.unwrap().unwrap().1.next_key == key1.clone());
let merkleized = db
.new_batch()
.write(key1.clone(), None)
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
assert!(db.get_span(&key1).await.unwrap().unwrap().1.next_key == key2.clone());
assert!(db.get_span(&key2).await.unwrap().unwrap().1.next_key == key2.clone());
assert!(db.get_span(&early_key).await.unwrap().unwrap().1.next_key == key2.clone());
assert!(db.get_span(&middle_key).await.unwrap().unwrap().1.next_key == key2.clone());
assert!(db.get_span(&late_key).await.unwrap().unwrap().1.next_key == key2.clone());
let merkleized = db
.new_batch()
.write(key2.clone(), None)
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
assert!(db.get_span(&key1).await.unwrap().is_none());
assert!(db.get_span(&key2).await.unwrap().is_none());
assert!(db.is_empty());
let merkleized = db
.new_batch()
.write(key2.clone(), Some(2))
.write(key1.clone(), Some(1))
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
assert_eq!(db.get_all(&key1).await.unwrap().unwrap(), (1, key2.clone()));
assert_eq!(db.get_all(&key2).await.unwrap().unwrap(), (2, key1.clone()));
assert!(db.get_span(&key1).await.unwrap().unwrap().1.next_key == key2.clone());
assert!(db.get_span(&key2).await.unwrap().unwrap().1.next_key == key1.clone());
assert!(db.get_span(&early_key).await.unwrap().unwrap().1.next_key == key1.clone());
assert!(db.get_span(&middle_key).await.unwrap().unwrap().1.next_key == key2.clone());
assert!(db.get_span(&late_key).await.unwrap().unwrap().1.next_key == key1.clone());
let merkleized = db
.new_batch()
.write(key2.clone(), None)
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
assert!(db.get_span(&key1).await.unwrap().unwrap().1.next_key == key1.clone());
assert!(db.get_span(&key2).await.unwrap().unwrap().1.next_key == key1.clone());
assert!(db.get_span(&early_key).await.unwrap().unwrap().1.next_key == key1.clone());
assert!(db.get_span(&middle_key).await.unwrap().unwrap().1.next_key == key1.clone());
assert!(db.get_span(&late_key).await.unwrap().unwrap().1.next_key == key1.clone());
let merkleized = db
.new_batch()
.write(key1.clone(), None)
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
assert!(db.get_span(&key1).await.unwrap().is_none());
assert!(db.get_span(&key2).await.unwrap().is_none());
db.destroy().await.unwrap();
});
}
#[test_traced("WARN")]
fn test_ordered_any_fixed_db_build_and_authenticate() {
let executor = deterministic::Runner::default();
const ELEMENTS: u64 = 1000;
executor.start(|context| async move {
let hasher = Standard::<Sha256>::new();
let mut db = open_db(context.with_label("first")).await;
let mut map = HashMap::<Digest, Digest>::default();
{
let mut batch = db.new_batch();
for i in 0u64..ELEMENTS {
let k = Sha256::hash(&i.to_be_bytes());
let v = Sha256::hash(&(i * 1000).to_be_bytes());
batch = batch.write(k, Some(v));
map.insert(k, v);
}
for i in 0u64..ELEMENTS {
if i % 3 != 0 {
continue;
}
let k = Sha256::hash(&i.to_be_bytes());
let v = Sha256::hash(&((i + 1) * 10000).to_be_bytes());
batch = batch.write(k, Some(v));
map.insert(k, v);
}
for i in 0u64..ELEMENTS {
if i % 7 != 1 {
continue;
}
let k = Sha256::hash(&i.to_be_bytes());
batch = batch.write(k, None);
map.remove(&k);
}
let merkleized = batch.merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
}
assert_eq!(db.snapshot.items(), 857);
db.sync().await.unwrap();
db.prune(db.inactivity_floor_loc()).await.unwrap();
assert_eq!(db.snapshot.items(), 857);
let root = db.root();
db.sync().await.unwrap();
drop(db);
let mut db = open_db(context.with_label("second")).await;
assert_eq!(root, db.root());
assert_eq!(db.snapshot.items(), 857);
for i in 0u64..1000 {
let k = Sha256::hash(&i.to_be_bytes());
if let Some(map_value) = map.get(&k) {
let Some(db_value) = db.get(&k).await.unwrap() else {
panic!("key not found in db: {k}");
};
assert_eq!(*map_value, db_value);
} else {
assert!(db.get(&k).await.unwrap().is_none());
}
}
let max_ops = NZU64!(4);
let end_loc = db.bounds().await.end;
let start_loc = db.log.merkle.bounds().start;
let merkleized = db.new_batch().merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
let root = db.root();
assert!(start_loc < db.inactivity_floor_loc());
for i in start_loc.as_u64()..end_loc.as_u64() {
let loc = Location::from(i);
let (proof, log) = db.proof(loc, max_ops).await.unwrap();
assert!(verify_proof(&hasher, &proof, loc, &log, &root));
}
db.destroy().await.unwrap();
});
}
#[test_traced("WARN")]
fn test_ordered_any_fixed_non_empty_db_recovery() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let mut db = open_db(context.with_label("first")).await;
const ELEMENTS: u64 = 1000;
{
let mut batch = db.new_batch();
for i in 0u64..ELEMENTS {
let k = Sha256::hash(&i.to_be_bytes());
let v = Sha256::hash(&(i * 1000).to_be_bytes());
batch = batch.write(k, Some(v));
}
let merkleized = batch.merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
db.commit().await.unwrap();
}
db.prune(db.inactivity_floor_loc()).await.unwrap();
let root = db.root();
let op_count = db.bounds().await.end;
let inactivity_floor_loc = db.inactivity_floor_loc();
let mut db = open_db(context.with_label("second")).await;
assert_eq!(db.bounds().await.end, op_count);
assert_eq!(db.inactivity_floor_loc(), inactivity_floor_loc);
assert_eq!(db.root(), root);
fn write_unapplied_batch(db: &mut AnyTest) {
let mut batch = db.new_batch();
for i in 0u64..ELEMENTS {
let k = Sha256::hash(&i.to_be_bytes());
let v = Sha256::hash(&((i + 1) * 10000).to_be_bytes());
batch = batch.write(k, Some(v));
}
}
write_unapplied_batch(&mut db);
drop(db);
let mut db = open_db(context.with_label("third")).await;
assert_eq!(db.bounds().await.end, op_count);
assert_eq!(db.inactivity_floor_loc(), inactivity_floor_loc);
assert_eq!(db.root(), root);
write_unapplied_batch(&mut db);
drop(db);
let mut db = open_db(context.with_label("fourth")).await;
assert_eq!(db.bounds().await.end, op_count);
assert_eq!(db.root(), root);
write_unapplied_batch(&mut db);
write_unapplied_batch(&mut db);
write_unapplied_batch(&mut db);
let mut db = open_db(context.with_label("fifth")).await;
assert_eq!(db.bounds().await.end, op_count);
assert_eq!(db.root(), root);
{
let mut batch = db.new_batch();
for i in 0u64..ELEMENTS {
let k = Sha256::hash(&i.to_be_bytes());
let v = Sha256::hash(&((i + 1) * 10000).to_be_bytes());
batch = batch.write(k, Some(v));
}
let merkleized = batch.merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
db.commit().await.unwrap();
}
let db = open_db(context.with_label("sixth")).await;
assert!(db.bounds().await.end > op_count);
assert_ne!(db.inactivity_floor_loc(), inactivity_floor_loc);
assert_ne!(db.root(), root);
db.destroy().await.unwrap();
});
}
#[test_traced("WARN")]
fn test_ordered_any_fixed_empty_db_recovery() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let db = open_db(context.with_label("first")).await;
let root = db.root();
let mut db = open_db(context.with_label("second")).await;
assert_eq!(db.bounds().await.end, 1);
assert_eq!(db.root(), root);
fn write_unapplied_batch(db: &mut AnyTest) {
let mut batch = db.new_batch();
for i in 0u64..1000 {
let k = Sha256::hash(&i.to_be_bytes());
let v = Sha256::hash(&((i + 1) * 10000).to_be_bytes());
batch = batch.write(k, Some(v));
}
}
write_unapplied_batch(&mut db);
drop(db);
let mut db = open_db(context.with_label("third")).await;
assert_eq!(db.bounds().await.end, 1);
assert_eq!(db.root(), root);
write_unapplied_batch(&mut db);
drop(db);
let mut db = open_db(context.with_label("fourth")).await;
assert_eq!(db.bounds().await.end, 1);
assert_eq!(db.root(), root);
write_unapplied_batch(&mut db);
write_unapplied_batch(&mut db);
write_unapplied_batch(&mut db);
let mut db = open_db(context.with_label("fifth")).await;
assert_eq!(db.bounds().await.end, 1);
assert_eq!(db.root(), root);
{
let mut batch = db.new_batch();
for i in 0u64..1000 {
let k = Sha256::hash(&i.to_be_bytes());
let v = Sha256::hash(&((i + 1) * 10000).to_be_bytes());
batch = batch.write(k, Some(v));
}
let merkleized = batch.merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
db.commit().await.unwrap();
}
let db = open_db(context.with_label("sixth")).await;
assert!(db.bounds().await.end > 1);
assert_ne!(db.root(), root);
db.destroy().await.unwrap();
});
}
#[test_traced("WARN")]
fn test_ordered_any_fixed_db_multiple_commits_delete_gets_replayed() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let mut db = open_db(context.with_label("first")).await;
let mut map = HashMap::<Digest, Digest>::default();
const ELEMENTS: u64 = 10;
let metadata = Sha256::hash(&42u64.to_be_bytes());
for j in 0u64..ELEMENTS {
let mut batch = db.new_batch();
for i in 0u64..ELEMENTS {
let k = Sha256::hash(&(j * 1000 + i).to_be_bytes());
let v = Sha256::hash(&(i * 1000).to_be_bytes());
batch = batch.write(k, Some(v));
map.insert(k, v);
}
let merkleized = batch.merkleize(&db, Some(metadata)).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
db.commit().await.unwrap();
}
assert_eq!(db.get_metadata().await.unwrap(), Some(metadata));
let k = Sha256::hash(&((ELEMENTS - 1) * 1000 + (ELEMENTS - 1)).to_be_bytes());
let merkleized = db
.new_batch()
.write(k, None)
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
db.commit().await.unwrap();
assert_eq!(db.get_metadata().await.unwrap(), None);
assert!(db.get(&k).await.unwrap().is_none());
let merkleized = db.new_batch().merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
db.commit().await.unwrap();
let root = db.root();
drop(db);
let db = open_db(context.with_label("second")).await;
assert_eq!(root, db.root());
assert_eq!(db.get_metadata().await.unwrap(), None);
assert!(db.get(&k).await.unwrap().is_none());
db.destroy().await.unwrap();
});
}
#[test]
fn test_ordered_any_fixed_db_historical_proof_basic() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let mut db = create_test_db(context.clone()).await;
let ops = create_test_ops(20);
apply_ops(&mut db, ops.clone()).await;
let hasher = Standard::<Sha256>::new();
let root_hash = db.root();
let original_op_count = db.bounds().await.end;
let max_ops = NZU64!(10);
let (historical_proof, historical_ops) = db
.historical_proof(original_op_count, Location::new(5), max_ops)
.await
.unwrap();
let (regular_proof, regular_ops) = db.proof(Location::new(5), max_ops).await.unwrap();
assert_eq!(historical_proof.leaves, regular_proof.leaves);
assert_eq!(historical_proof.digests, regular_proof.digests);
assert_eq!(historical_ops, regular_ops);
assert!(verify_proof(
&hasher,
&historical_proof,
Location::new(5),
&historical_ops,
&root_hash
));
let more_ops = create_test_ops_seeded(5, 1);
apply_ops(&mut db, more_ops.clone()).await;
let (historical_proof, historical_ops) = db
.historical_proof(original_op_count, Location::new(5), NZU64!(10))
.await
.unwrap();
assert_eq!(historical_proof.leaves, original_op_count);
assert_eq!(historical_ops.len(), 10);
assert_eq!(historical_proof.digests, regular_proof.digests);
assert_eq!(historical_ops, regular_ops);
assert!(verify_proof(
&hasher,
&historical_proof,
Location::new(5),
&historical_ops,
&root_hash
));
db.destroy().await.unwrap();
});
}
#[test]
fn test_ordered_any_fixed_db_historical_proof_edge_cases() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let hasher = Standard::<Sha256>::new();
let ops = create_test_ops(50);
let mut db = create_test_db(context.with_label("first")).await;
apply_ops(&mut db, ops.clone()).await;
let root = db.root();
let full_size = db.bounds().await.end;
let (proof, proof_ops) = db.proof(Location::new(1), NZU64!(1)).await.unwrap();
assert_eq!(proof_ops.len(), 1);
assert!(verify_proof(
&hasher,
&proof,
Location::new(1),
&proof_ops,
&root
));
let (hp, hp_ops) = db
.historical_proof(full_size, Location::new(1), NZU64!(1))
.await
.unwrap();
assert_eq!(hp.digests, proof.digests);
assert_eq!(hp_ops, proof_ops);
let (_proof, limited_ops) = db
.historical_proof(Location::new(10), Location::new(5), NZU64!(20))
.await
.unwrap();
assert_eq!(limited_ops.len(), 5);
let (min_proof, min_ops) = db
.historical_proof(Location::new(4), Location::new(1), NZU64!(3))
.await
.unwrap();
assert_eq!(min_proof.leaves, Location::new(4));
assert_eq!(min_ops.len(), 3);
db.destroy().await.unwrap();
});
}
#[test]
fn test_ordered_any_fixed_db_historical_proof_different_historical_sizes() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let mut db = create_test_db(context.clone()).await;
let ops = create_test_ops(100);
apply_ops(&mut db, ops.clone()).await;
let hasher = Standard::<Sha256>::new();
let root = db.root();
let start_loc = Location::new(20);
let max_ops = NZU64!(10);
let (proof, ops) = db.proof(start_loc, max_ops).await.unwrap();
let historical_size = db.bounds().await.end;
for i in 1..10 {
let more_ops = create_test_ops_seeded(100, i);
apply_ops(&mut db, more_ops).await;
let (historical_proof, historical_ops) = db
.historical_proof(historical_size, start_loc, max_ops)
.await
.unwrap();
assert_eq!(proof.leaves, historical_proof.leaves);
assert_eq!(ops, historical_ops);
assert_eq!(proof.digests, historical_proof.digests);
assert!(verify_proof(
&hasher,
&historical_proof,
start_loc,
&historical_ops,
&root
));
}
db.destroy().await.unwrap();
});
}
#[test]
fn test_ordered_any_fixed_db_span_maintenance_under_collisions() {
let executor = deterministic::Runner::default();
executor.start(|mut context| async move {
async fn insert_random<T: Translator>(
mut db: Db<mmr::Family, Context, Digest, i32, Sha256, T>,
rng: &mut StdRng,
) -> Db<mmr::Family, Context, Digest, i32, Sha256, T> {
let mut keys = BTreeMap::new();
{
let mut batch = db.new_batch();
for i in 0..1000 {
let key = Digest::random(&mut *rng);
keys.insert(key, i);
batch = batch.write(key, Some(i));
}
let merkleized = batch.merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
}
let mut iter = keys.iter();
let first_key = iter.next().unwrap().0;
let mut next_key = db.get_all(first_key).await.unwrap().unwrap().1;
for (key, value) in iter {
let (v, next) = db.get_all(key).await.unwrap().unwrap();
assert_eq!(*value, v);
assert_eq!(*key, next_key);
assert_eq!(db.get_span(key).await.unwrap().unwrap().1.next_key, next);
next_key = next;
}
{
let mut batch = db.new_batch();
for _ in 0..500 {
let key = keys.keys().choose(rng).cloned().unwrap();
keys.remove(&key);
batch = batch.write(key, None);
}
let merkleized = batch.merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
}
let mut iter = keys.iter();
let first_key = iter.next().unwrap().0;
let mut next_key = db.get_all(first_key).await.unwrap().unwrap().1;
for (key, value) in iter {
let (v, next) = db.get_all(key).await.unwrap().unwrap();
assert_eq!(*value, v);
assert_eq!(*key, next_key);
assert_eq!(db.get_span(key).await.unwrap().unwrap().1.next_key, next);
next_key = next;
}
{
let mut batch = db.new_batch();
for _ in 0..500 {
let key = keys.keys().choose(rng).cloned().unwrap();
keys.remove(&key);
batch = batch.write(key, None);
}
let merkleized = batch.merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
}
assert_eq!(keys.len(), 0);
assert!(db.is_empty());
assert_eq!(db.get_span(&Digest::random(&mut *rng)).await.unwrap(), None);
db
}
let mut rng = StdRng::seed_from_u64(context.next_u64());
let seed = context.next_u64();
let config = fixed_db_config::<OneCap>(&seed.to_string(), &context);
let db = Db::<mmr::Family, Context, Digest, i32, Sha256, OneCap>::init(
context.with_label("first"),
config,
)
.await
.unwrap();
let db = insert_random(db, &mut rng).await;
db.destroy().await.unwrap();
let config = fixed_db_config::<TwoCap>(&seed.to_string(), &context);
let db = Db::<mmr::Family, Context, Digest, i32, Sha256, TwoCap>::init(
context.with_label("second"),
config,
)
.await
.unwrap();
let db = insert_random(db, &mut rng).await;
db.destroy().await.unwrap();
});
}
type FixedDb = Db<mmr::Family, Context, FixedBytes<4>, Digest, Sha256, TwoCap>;
async fn open_fixed_db(context: Context) -> FixedDb {
let cfg = fixed_db_config("fixed-bytes-partition", &context);
FixedDb::init(context, cfg).await.unwrap()
}
#[test_traced("WARN")]
fn test_ordered_any_fixed_db_empty() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let db = open_fixed_db(context.with_label("initial")).await;
test_ordered_any_db_empty(context, db, |ctx| Box::pin(open_fixed_db(ctx))).await;
});
}
#[test_traced("WARN")]
fn test_ordered_any_fixed_db_basic() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let db = open_fixed_db(context.with_label("initial")).await;
test_ordered_any_db_basic(context, db, |ctx| Box::pin(open_fixed_db(ctx))).await;
});
}
#[test_traced("WARN")]
fn test_ordered_any_update_collision_edge_case_fixed() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let db = open_fixed_db(context.clone()).await;
test_ordered_any_update_collision_edge_case(db).await;
});
}
#[test_traced("WARN")]
fn test_ordered_any_batch_create_with_cycling_next_key() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let mut db = open_fixed_db(context.clone()).await;
let mid_key = FixedBytes::from([0xAAu8; 4]);
let val = Sha256::fill(1u8);
let merkleized = db
.new_batch()
.write(mid_key.clone(), Some(val))
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
let preceeding_key = FixedBytes::from([0x55u8; 4]);
let merkleized = db
.new_batch()
.write(preceeding_key.clone(), Some(val))
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
assert_eq!(db.get(&preceeding_key).await.unwrap().unwrap(), val);
assert_eq!(db.get(&mid_key).await.unwrap().unwrap(), val);
let span1 = db.get_span(&preceeding_key).await.unwrap().unwrap();
assert_eq!(span1.1.next_key, mid_key);
let span2 = db.get_span(&mid_key).await.unwrap().unwrap();
assert_eq!(span2.1.next_key, preceeding_key);
db.destroy().await.unwrap();
});
}
#[test_traced("WARN")]
fn test_ordered_any_batch_delete_middle_key() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let mut db = open_fixed_db(context.clone()).await;
let key_a = FixedBytes::from([0x11u8; 4]);
let key_b = FixedBytes::from([0x22u8; 4]);
let key_c = FixedBytes::from([0x33u8; 4]);
let val = Sha256::fill(1u8);
let merkleized = db
.new_batch()
.write(key_a.clone(), Some(val))
.write(key_b.clone(), Some(val))
.write(key_c.clone(), Some(val))
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
let span_a = db.get_span(&key_a).await.unwrap().unwrap();
assert_eq!(span_a.1.next_key, key_b);
let span_b = db.get_span(&key_b).await.unwrap().unwrap();
assert_eq!(span_b.1.next_key, key_c);
let span_c = db.get_span(&key_c).await.unwrap().unwrap();
assert_eq!(span_c.1.next_key, key_a);
let merkleized = db
.new_batch()
.write(key_b.clone(), None)
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
assert!(db.get(&key_b).await.unwrap().is_none());
let span_a = db.get_span(&key_a).await.unwrap().unwrap();
assert_eq!(span_a.1.next_key, key_c);
let span_c = db.get_span(&key_c).await.unwrap().unwrap();
assert_eq!(span_c.1.next_key, key_a);
db.destroy().await.unwrap();
});
}
#[test_traced("WARN")]
fn test_ordered_any_stream_range() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let mut db = open_fixed_db(context.clone()).await;
let key1 = FixedBytes::from([0x10u8, 0x00, 0x00, 0x05]);
let val = Sha256::fill(1u8);
let merkleized = db
.new_batch()
.write(key1.clone(), Some(val))
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
{
let mut stream = db.stream_range(key1.clone()).await.unwrap().boxed_local();
assert_eq!(stream.next().await.unwrap().unwrap().0, key1);
assert!(stream.next().await.is_none());
}
{
let start = FixedBytes::from([0x10u8, 0x00, 0x00, 0x01]);
let mut stream = db.stream_range(start).await.unwrap().boxed_local();
assert_eq!(stream.next().await.unwrap().unwrap().0, key1);
assert!(stream.next().await.is_none());
}
{
let start = FixedBytes::from([0x10u8, 0x00, 0x00, 0xFF]);
let mut stream = db.stream_range(start).await.unwrap().boxed_local();
assert!(stream.next().await.is_none());
}
{
let start = FixedBytes::from([0x00u8, 0x00, 0x00, 0x01]);
let mut stream = db.stream_range(start).await.unwrap().boxed_local();
assert_eq!(stream.next().await.unwrap().unwrap().0, key1);
assert!(stream.next().await.is_none());
}
{
let start = FixedBytes::from([0xFFu8, 0x00, 0x00, 0x11]);
let mut stream = db.stream_range(start).await.unwrap().boxed_local();
assert!(stream.next().await.is_none());
}
let key2_1 = FixedBytes::from([0x20u8, 0x00, 0x00, 0x05]);
let key2_2 = FixedBytes::from([0x20u8, 0x00, 0x00, 0x11]);
let key3 = FixedBytes::from([0x30u8, 0x00, 0x00, 0x05]);
let merkleized = db
.new_batch()
.write(key2_1.clone(), Some(val))
.write(key2_2.clone(), Some(val))
.write(key3.clone(), Some(val))
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
{
let mut stream = db.stream_range(key1.clone()).await.unwrap().boxed_local();
assert_eq!(stream.next().await.unwrap().unwrap().0, key1);
assert_eq!(stream.next().await.unwrap().unwrap().0, key2_1);
assert_eq!(stream.next().await.unwrap().unwrap().0, key2_2);
assert_eq!(stream.next().await.unwrap().unwrap().0, key3);
assert!(stream.next().await.is_none());
}
{
let start = FixedBytes::from([0x10u8, 0x00, 0x00, 0xFF]);
let mut stream = db.stream_range(start).await.unwrap().boxed_local();
assert_eq!(stream.next().await.unwrap().unwrap().0, key2_1);
assert_eq!(stream.next().await.unwrap().unwrap().0, key2_2);
assert_eq!(stream.next().await.unwrap().unwrap().0, key3);
assert!(stream.next().await.is_none());
}
{
let start = FixedBytes::from([0x10u8, 0x00, 0x00, 0x00]);
let mut stream = db.stream_range(start).await.unwrap().boxed_local();
assert_eq!(stream.next().await.unwrap().unwrap().0, key1);
assert_eq!(stream.next().await.unwrap().unwrap().0, key2_1);
assert_eq!(stream.next().await.unwrap().unwrap().0, key2_2);
assert_eq!(stream.next().await.unwrap().unwrap().0, key3);
assert!(stream.next().await.is_none());
}
{
let start = FixedBytes::from([0x20u8, 0x00, 0x00, 0x06]);
let mut stream = db.stream_range(start).await.unwrap().boxed_local();
assert_eq!(stream.next().await.unwrap().unwrap().0, key2_2);
assert_eq!(stream.next().await.unwrap().unwrap().0, key3);
assert!(stream.next().await.is_none());
}
{
let mut stream = db.stream_range(key2_2.clone()).await.unwrap().boxed_local();
assert_eq!(stream.next().await.unwrap().unwrap().0, key2_2);
assert_eq!(stream.next().await.unwrap().unwrap().0, key3);
assert!(stream.next().await.is_none());
}
{
let start = FixedBytes::from([0x40u8, 0x00, 0x00, 0x00]);
let mut stream = db.stream_range(start).await.unwrap().boxed_local();
assert!(stream.next().await.is_none());
}
db.destroy().await.unwrap();
});
}
fn key(i: u64) -> Digest {
Sha256::hash(&i.to_be_bytes())
}
fn val(i: u64) -> Digest {
Sha256::hash(&(i + 10000).to_be_bytes())
}
async fn commit_writes_generic<F: crate::merkle::Family>(
db: &mut AnyTestGeneric<F>,
writes: impl IntoIterator<Item = (Digest, Option<Digest>)>,
metadata: Option<Digest>,
) -> std::ops::Range<GenericLocation<F>> {
let mut batch = db.new_batch();
for (k, v) in writes {
batch = batch.write(k, v);
}
let merkleized = batch.merkleize(db, metadata).await.unwrap();
let range = db.apply_batch(merkleized).await.unwrap();
db.commit().await.unwrap();
range
}
async fn batch_empty_inner<F: crate::merkle::Family>(context: deterministic::Context) {
let mut db = open_db_generic::<F>(context.with_label("db")).await;
let root_before = db.root();
let merkleized = db.new_batch().merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
assert_ne!(db.root(), root_before);
commit_writes_generic(&mut db, [(key(0), Some(val(0)))], None).await;
assert_eq!(db.get(&key(0)).await.unwrap(), Some(val(0)));
db.destroy().await.unwrap();
}
async fn batch_metadata_inner<F: crate::merkle::Family>(context: deterministic::Context) {
let mut db = open_db_generic::<F>(context.with_label("db")).await;
let metadata = val(42);
commit_writes_generic(&mut db, [(key(0), Some(val(0)))], Some(metadata)).await;
assert_eq!(db.get_metadata().await.unwrap(), Some(metadata));
let merkleized = db.new_batch().merkleize(&db, None).await.unwrap();
db.apply_batch(merkleized).await.unwrap();
assert_eq!(db.get_metadata().await.unwrap(), None);
db.destroy().await.unwrap();
}
async fn batch_get_read_through_inner<F: crate::merkle::Family>(
context: deterministic::Context,
) {
let mut db = open_db_generic::<F>(context.with_label("db")).await;
let ka = key(0);
let va = val(0);
commit_writes_generic(&mut db, [(ka, Some(va))], None).await;
let kb = key(1);
let vb = val(1);
let kc = key(2);
let mut batch = db.new_batch();
assert_eq!(batch.get(&ka, &db).await.unwrap(), Some(va));
batch = batch.write(kb, Some(vb));
assert_eq!(batch.get(&kb, &db).await.unwrap(), Some(vb));
assert_eq!(batch.get(&kc, &db).await.unwrap(), None);
let va2 = val(100);
batch = batch.write(ka, Some(va2));
assert_eq!(batch.get(&ka, &db).await.unwrap(), Some(va2));
batch = batch.write(ka, None);
assert_eq!(batch.get(&ka, &db).await.unwrap(), None);
db.destroy().await.unwrap();
}
async fn batch_get_on_merkleized_inner<F: crate::merkle::Family>(
context: deterministic::Context,
) {
let mut db = open_db_generic::<F>(context.with_label("db")).await;
let ka = key(0);
let kb = key(1);
let kc = key(2);
let kd = key(3);
commit_writes_generic(&mut db, [(ka, Some(val(0))), (kb, Some(val(1)))], None).await;
let va2 = val(100);
let vc = val(2);
let mut batch = db.new_batch();
batch = batch.write(ka, Some(va2));
batch = batch.write(kb, None);
batch = batch.write(kc, Some(vc));
let merkleized = batch.merkleize(&db, None).await.unwrap();
assert_eq!(merkleized.get(&ka, &db).await.unwrap(), Some(va2));
assert_eq!(merkleized.get(&kb, &db).await.unwrap(), None);
assert_eq!(merkleized.get(&kc, &db).await.unwrap(), Some(vc));
assert_eq!(merkleized.get(&kd, &db).await.unwrap(), None);
db.destroy().await.unwrap();
}
async fn batch_stacked_get_inner<F: crate::merkle::Family>(context: deterministic::Context) {
let db = open_db_generic::<F>(context.with_label("db")).await;
let ka = key(0);
let kb = key(1);
let mut batch = db.new_batch();
batch = batch.write(ka, Some(val(0)));
let merkleized = batch.merkleize(&db, None).await.unwrap();
let mut child = merkleized.new_batch::<Sha256>();
assert_eq!(child.get(&ka, &db).await.unwrap(), Some(val(0)));
child = child.write(ka, Some(val(100)));
assert_eq!(child.get(&ka, &db).await.unwrap(), Some(val(100)));
child = child.write(kb, Some(val(1)));
assert_eq!(child.get(&kb, &db).await.unwrap(), Some(val(1)));
child = child.write(ka, None);
assert_eq!(child.get(&ka, &db).await.unwrap(), None);
drop(child);
drop(merkleized);
db.destroy().await.unwrap();
}
async fn batch_stacked_delete_recreate_inner<F: crate::merkle::Family>(
context: deterministic::Context,
) {
let mut db = open_db_generic::<F>(context.with_label("db")).await;
let ka = key(0);
commit_writes_generic(&mut db, [(ka, Some(val(0)))], None).await;
let parent_m = db
.new_batch()
.write(ka, None)
.merkleize(&db, None)
.await
.unwrap();
assert_eq!(parent_m.get(&ka, &db).await.unwrap(), None);
let child_m = parent_m
.new_batch::<Sha256>()
.write(ka, Some(val(200)))
.merkleize(&db, None)
.await
.unwrap();
assert_eq!(child_m.get(&ka, &db).await.unwrap(), Some(val(200)));
db.apply_batch(child_m).await.unwrap();
assert_eq!(db.get(&ka).await.unwrap(), Some(val(200)));
db.destroy().await.unwrap();
}
async fn batch_apply_returns_range_inner<F: crate::merkle::Family>(
context: deterministic::Context,
) {
let mut db = open_db_generic::<F>(context.with_label("db")).await;
let writes: Vec<_> = (0..5).map(|i| (key(i), Some(val(i)))).collect();
let range1 = commit_writes_generic(&mut db, writes, None).await;
assert_eq!(range1.start, GenericLocation::<F>::new(1));
assert!(range1.end.saturating_sub(*range1.start) >= 6);
let writes: Vec<_> = (5..10).map(|i| (key(i), Some(val(i)))).collect();
let range2 = commit_writes_generic(&mut db, writes, None).await;
assert_eq!(range2.start, range1.end);
db.destroy().await.unwrap();
}
async fn batch_speculative_root_inner<F: crate::merkle::Family>(
context: deterministic::Context,
) {
let mut db = open_db_generic::<F>(context.with_label("db")).await;
let mut batch = db.new_batch();
for i in 0..10 {
batch = batch.write(key(i), Some(val(i)));
}
let merkleized = batch.merkleize(&db, None).await.unwrap();
let speculative_root = merkleized.root();
db.apply_batch(merkleized).await.unwrap();
assert_eq!(db.root(), speculative_root);
db.destroy().await.unwrap();
}
async fn log_replay_inner<F: crate::merkle::Family>(context: deterministic::Context) {
let db_context = context.with_label("db");
let mut db = open_db_generic::<F>(db_context.clone()).await;
const UPDATES: u64 = 100;
let k = Sha256::hash(&UPDATES.to_be_bytes());
for i in 0u64..UPDATES {
let v = Sha256::hash(&(i * 1000).to_be_bytes());
let merkleized = db
.new_batch()
.write(k, Some(v))
.merkleize(&db, None)
.await
.unwrap();
db.apply_batch(merkleized).await.unwrap();
}
db.commit().await.unwrap();
let root = db.root();
drop(db);
let db: AnyTestGeneric<F> = open_db_generic::<F>(db_context.with_label("reopened")).await;
let iter = db.snapshot.get(&k);
assert_eq!(iter.cloned().collect::<Vec<_>>().len(), 1);
assert_eq!(db.root(), root);
db.destroy().await.unwrap();
}
#[test_traced("INFO")]
fn test_ordered_child_delete_colliding_key_corrupts_next_key() {
let executor = deterministic::Runner::default();
executor.start(|context| async move {
let mut db = open_db(context.with_label("db")).await;
let key_b = Digest::from({
let mut b = [0u8; 32];
b[0] = 0xAA;
b[1] = 0xBB;
b[2] = 0x01;
b
});
let key_k = Digest::from({
let mut k = [0u8; 32];
k[0] = 0xAA;
k[1] = 0xBB;
k[2] = 0x02;
k
});
let key_a = Digest::from({
let mut a = [0u8; 32];
a[0] = 0x11;
a[1] = 0x22;
a
});
commit_writes_generic(
&mut db,
[
(key_a, Some(val(1))),
(key_b, Some(val(2))),
(key_k, Some(val(3))),
],
None,
)
.await;
let mut padding_keys = Vec::new();
for i in 0..20u64 {
let pk = Digest::from({
let mut p = [0u8; 32];
p[0] = 0xCC;
p[1] = i as u8;
p
});
padding_keys.push(pk);
commit_writes_generic(&mut db, [(pk, Some(val(100 + i)))], None).await;
}
let (_, next_b) = db.get_all(&key_b).await.unwrap().unwrap();
assert_eq!(next_b, key_k, "B.next_key should be K before delete");
let mut parent = db.new_batch();
parent = parent.write(key_k, Some(val(4)));
let parent_m = parent.merkleize(&db, None).await.unwrap();
let mut child = parent_m.new_batch::<Sha256>();
child = child.write(key_k, None);
let child_m = child.merkleize(&db, None).await.unwrap();
db.apply_batch(child_m).await.unwrap();
db.commit().await.unwrap();
assert!(db.get(&key_k).await.unwrap().is_none());
let (_, b_next) = db.get_all(&key_b).await.unwrap().unwrap();
assert_ne!(
b_next, key_k,
"B.next_key still points to deleted K (corrupted next_key ring)"
);
assert_eq!(b_next, padding_keys[0]);
db.destroy().await.unwrap();
});
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_empty() {
let executor = deterministic::Runner::default();
executor.start(batch_empty_inner::<mmr::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_metadata() {
let executor = deterministic::Runner::default();
executor.start(batch_metadata_inner::<mmr::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_get_read_through() {
let executor = deterministic::Runner::default();
executor.start(batch_get_read_through_inner::<mmr::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_get_on_merkleized() {
let executor = deterministic::Runner::default();
executor.start(batch_get_on_merkleized_inner::<mmr::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_stacked_get() {
let executor = deterministic::Runner::default();
executor.start(batch_stacked_get_inner::<mmr::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_stacked_delete_recreate() {
let executor = deterministic::Runner::default();
executor.start(batch_stacked_delete_recreate_inner::<mmr::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_apply_returns_range() {
let executor = deterministic::Runner::default();
executor.start(batch_apply_returns_range_inner::<mmr::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_speculative_root() {
let executor = deterministic::Runner::default();
executor.start(batch_speculative_root_inner::<mmr::Family>);
}
#[test_traced("WARN")]
fn test_ordered_any_fixed_db_log_replay() {
let executor = deterministic::Runner::default();
executor.start(log_replay_inner::<mmr::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_empty_mmb() {
let executor = deterministic::Runner::default();
executor.start(batch_empty_inner::<crate::merkle::mmb::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_metadata_mmb() {
let executor = deterministic::Runner::default();
executor.start(batch_metadata_inner::<crate::merkle::mmb::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_get_read_through_mmb() {
let executor = deterministic::Runner::default();
executor.start(batch_get_read_through_inner::<crate::merkle::mmb::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_get_on_merkleized_mmb() {
let executor = deterministic::Runner::default();
executor.start(batch_get_on_merkleized_inner::<crate::merkle::mmb::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_stacked_get_mmb() {
let executor = deterministic::Runner::default();
executor.start(batch_stacked_get_inner::<crate::merkle::mmb::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_stacked_delete_recreate_mmb() {
let executor = deterministic::Runner::default();
executor.start(batch_stacked_delete_recreate_inner::<crate::merkle::mmb::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_apply_returns_range_mmb() {
let executor = deterministic::Runner::default();
executor.start(batch_apply_returns_range_inner::<crate::merkle::mmb::Family>);
}
#[test_traced("INFO")]
fn test_ordered_fixed_batch_speculative_root_mmb() {
let executor = deterministic::Runner::default();
executor.start(batch_speculative_root_inner::<crate::merkle::mmb::Family>);
}
#[test_traced("WARN")]
fn test_ordered_any_fixed_db_log_replay_mmb() {
let executor = deterministic::Runner::default();
executor.start(log_replay_inner::<crate::merkle::mmb::Family>);
}
fn is_send<T: Send>(_: T) {}
#[allow(dead_code)]
fn assert_non_trait_futures_are_send(db: &mut AnyTest, key: Digest) {
is_send(db.get_all(&key));
is_send(db.get_with_loc(&key));
is_send(db.get_span(&key));
}
mod from_sync_testable {
use super::*;
use crate::{
merkle::{
mmr::{self, journaled::Mmr},
Family as _,
},
qmdb::any::sync::tests::FromSyncTestable,
};
use futures::future::join_all;
type TestMmr = Mmr<deterministic::Context, Digest>;
impl FromSyncTestable for AnyTest {
type Mmr = TestMmr;
fn into_log_components(self) -> (Self::Mmr, Self::Journal) {
(self.log.merkle, self.log.journal)
}
async fn pinned_nodes_at(&self, loc: Location) -> Vec<Digest> {
join_all(mmr::Family::nodes_to_pin(loc).map(|p| self.log.merkle.get_node(p)))
.await
.into_iter()
.map(|n| n.unwrap().unwrap())
.collect()
}
}
}
}