Skip to main content

commonware_storage/qmdb/any/unordered/
fixed.rs

1//! An Any database implementation with an unordered key space and fixed-size values.
2
3use crate::{
4    index::unordered::Index,
5    journal::contiguous::fixed::Journal,
6    merkle::{self, Location},
7    qmdb::{
8        any::{unordered, value::FixedEncoding, FixedConfig as Config, FixedValue},
9        Error,
10    },
11    translator::Translator,
12    Context,
13};
14use commonware_cryptography::Hasher;
15use commonware_utils::Array;
16
17pub type Update<K, V> = unordered::Update<K, FixedEncoding<V>>;
18pub type Operation<F, K, V> = unordered::Operation<F, K, FixedEncoding<V>>;
19
20/// A key-value QMDB based on an authenticated log of operations, supporting authentication of any
21/// value ever associated with a key.
22pub type Db<F, E, K, V, H, T> =
23    super::Db<F, E, Journal<E, Operation<F, K, V>>, Index<T, Location<F>>, H, Update<K, V>>;
24
25impl<F: merkle::Family, E: Context, K: Array, V: FixedValue, H: Hasher, T: Translator>
26    Db<F, E, K, V, H, T>
27{
28    /// Returns a [Db] QMDB initialized from `cfg`. Uncommitted log operations will be
29    /// discarded and the state of the db will be as of the last committed operation.
30    pub async fn init(context: E, cfg: Config<T>) -> Result<Self, Error<F>> {
31        Self::init_with_callback(context, cfg, None, |_, _| {}).await
32    }
33
34    /// Initialize the DB, invoking `callback` for each operation processed during recovery.
35    ///
36    /// If `known_inactivity_floor` is provided and is less than the log's actual inactivity floor,
37    /// `callback` is invoked with `(false, None)` for each location in the gap. Then, as the snapshot
38    /// is built from the log, `callback` is invoked for each operation with its activity status and
39    /// previous location (if any).
40    pub(crate) async fn init_with_callback(
41        context: E,
42        cfg: Config<T>,
43        known_inactivity_floor: Option<Location<F>>,
44        callback: impl FnMut(bool, Option<Location<F>>),
45    ) -> Result<Self, Error<F>> {
46        crate::qmdb::any::init(context, cfg, known_inactivity_floor, callback).await
47    }
48}
49
50/// Partitioned index variants that divide the key space into `2^(P*8)` partitions.
51///
52/// See [partitioned::Db] for the generic type, or use the convenience aliases:
53/// - [partitioned::p256::Db] for 256 partitions (P=1)
54/// - [partitioned::p64k::Db] for 65,536 partitions (P=2)
55pub mod partitioned {
56    pub use super::{Operation, Update};
57    use crate::{
58        index::partitioned::unordered::Index,
59        journal::contiguous::fixed::Journal,
60        merkle::{self, Location},
61        qmdb::{
62            any::{FixedConfig as Config, FixedValue},
63            Error,
64        },
65        translator::Translator,
66        Context,
67    };
68    use commonware_cryptography::Hasher;
69    use commonware_utils::Array;
70
71    /// A key-value QMDB with a partitioned snapshot index.
72    ///
73    /// This is the partitioned variant of [super::Db]. The const generic `P` specifies
74    /// the number of prefix bytes used for partitioning:
75    /// - `P = 1`: 256 partitions
76    /// - `P = 2`: 65,536 partitions
77    ///
78    /// Use partitioned indices when you have a large number of keys (>> 2^(P*8)) and memory
79    /// efficiency is important. Keys should be uniformly distributed across the prefix space.
80    pub type Db<F, E, K, V, H, T, const P: usize> = crate::qmdb::any::unordered::Db<
81        F,
82        E,
83        Journal<E, Operation<F, K, V>>,
84        Index<T, Location<F>, P>,
85        H,
86        Update<K, V>,
87    >;
88
89    impl<
90            F: merkle::Family,
91            E: Context,
92            K: Array,
93            V: FixedValue,
94            H: Hasher,
95            T: Translator,
96            const P: usize,
97        > Db<F, E, K, V, H, T, P>
98    {
99        /// Returns a [Db] QMDB initialized from `cfg`. Uncommitted log operations will be
100        /// discarded and the state of the db will be as of the last committed operation.
101        pub async fn init(context: E, cfg: Config<T>) -> Result<Self, Error<F>> {
102            Self::init_with_callback(context, cfg, None, |_, _| {}).await
103        }
104
105        /// Initialize the DB, invoking `callback` for each operation processed during recovery.
106        ///
107        /// If `known_inactivity_floor` is provided and is less than the log's actual inactivity floor,
108        /// `callback` is invoked with `(false, None)` for each location in the gap. Then, as the snapshot
109        /// is built from the log, `callback` is invoked for each operation with its activity status and
110        /// previous location (if any).
111        pub(crate) async fn init_with_callback(
112            context: E,
113            cfg: Config<T>,
114            known_inactivity_floor: Option<Location<F>>,
115            callback: impl FnMut(bool, Option<Location<F>>),
116        ) -> Result<Self, Error<F>> {
117            crate::qmdb::any::init(context, cfg, known_inactivity_floor, callback).await
118        }
119    }
120
121    /// Convenience type aliases for 256 partitions (P=1).
122    pub mod p256 {
123        /// Fixed-value DB with 256 partitions.
124        pub type Db<F, E, K, V, H, T> = super::Db<F, E, K, V, H, T, 1>;
125    }
126
127    /// Convenience type aliases for 65,536 partitions (P=2).
128    pub mod p64k {
129        /// Fixed-value DB with 65,536 partitions.
130        pub type Db<F, E, K, V, H, T> = super::Db<F, E, K, V, H, T, 2>;
131    }
132}
133
134// pub(crate) so helpers can be used by the sync module.
135#[cfg(test)]
136pub(crate) mod test {
137    use super::*;
138    use crate::{
139        index::Unordered as _,
140        merkle::{
141            mmr::{self, Location, StandardHasher},
142            Location as GenericLocation,
143        },
144        qmdb::{
145            any::{
146                test::fixed_db_config,
147                unordered::{fixed::Operation, Update},
148            },
149            verify_proof,
150        },
151        translator::TwoCap,
152    };
153    use commonware_cryptography::{sha256::Digest, Hasher, Sha256};
154    use commonware_macros::test_traced;
155    use commonware_math::algebra::Random;
156    use commonware_runtime::{
157        deterministic::{self, Context},
158        Metrics, Runner as _,
159    };
160    use commonware_utils::{test_rng_seeded, NZU64};
161    use rand::RngCore;
162
163    /// A generic type alias for an Any database parameterized by merkle family.
164    type AnyTestGeneric<F> = crate::qmdb::any::db::Db<
165        F,
166        deterministic::Context,
167        Journal<
168            deterministic::Context,
169            crate::qmdb::any::operation::Unordered<F, Digest, FixedEncoding<Digest>>,
170        >,
171        Index<TwoCap, GenericLocation<F>>,
172        Sha256,
173        crate::qmdb::any::operation::update::Unordered<Digest, FixedEncoding<Digest>>,
174    >;
175
176    /// A type alias for the concrete [Db] type used in these unit tests.
177    pub(crate) type AnyTest =
178        Db<mmr::Family, deterministic::Context, Digest, Digest, Sha256, TwoCap>;
179
180    /// Return an `Any` database initialized with a fixed config, generic over merkle family.
181    async fn open_db_generic<F: crate::merkle::Family>(
182        context: deterministic::Context,
183    ) -> AnyTestGeneric<F> {
184        let cfg = fixed_db_config::<TwoCap>("partition", &context);
185        crate::qmdb::any::init(context, cfg, None, |_, _| {})
186            .await
187            .unwrap()
188    }
189
190    /// Create a test database with unique partition names
191    pub(crate) async fn create_test_db(mut context: Context) -> AnyTest {
192        let seed = context.next_u64();
193        let cfg = fixed_db_config::<TwoCap>(&seed.to_string(), &context);
194        AnyTest::init(context, cfg).await.unwrap()
195    }
196
197    /// Create n random operations using the default seed (0). Some portion of
198    /// the updates are deletes. create_test_ops(n) is a prefix of
199    /// create_test_ops(n') for n < n'.
200    pub(crate) fn create_test_ops(n: usize) -> Vec<Operation<mmr::Family, Digest, Digest>> {
201        create_test_ops_seeded(n, 0)
202    }
203
204    /// Create n random operations using a specific seed.
205    /// Use different seeds when you need non-overlapping keys in the same test.
206    pub(crate) fn create_test_ops_seeded(
207        n: usize,
208        seed: u64,
209    ) -> Vec<Operation<mmr::Family, Digest, Digest>> {
210        let mut rng = test_rng_seeded(seed);
211        let mut prev_key = Digest::random(&mut rng);
212        let mut ops = Vec::new();
213        for i in 0..n {
214            let key = Digest::random(&mut rng);
215            if i % 10 == 0 && i > 0 {
216                ops.push(Operation::Delete(prev_key));
217            } else {
218                let value = Digest::random(&mut rng);
219                ops.push(Operation::Update(Update(key, value)));
220                prev_key = key;
221            }
222        }
223        ops
224    }
225
226    /// Applies the given operations to the database.
227    pub(crate) async fn apply_ops(
228        db: &mut AnyTest,
229        ops: Vec<Operation<mmr::Family, Digest, Digest>>,
230    ) {
231        let mut batch = db.new_batch();
232        for op in ops {
233            match op {
234                Operation::Update(Update(key, value)) => {
235                    batch = batch.write(key, Some(value));
236                }
237                Operation::Delete(key) => {
238                    batch = batch.write(key, None);
239                }
240                Operation::CommitFloor(_, _) => {
241                    panic!("CommitFloor not supported in apply_ops");
242                }
243            }
244        }
245        let merkleized = batch.merkleize(db, None).await.unwrap();
246        db.apply_batch(merkleized).await.unwrap();
247    }
248
249    /// Helper: commit a batch of key-value writes and return the applied range (generic).
250    async fn commit_writes_generic<F: crate::merkle::Family>(
251        db: &mut AnyTestGeneric<F>,
252        writes: impl IntoIterator<Item = (Digest, Option<Digest>)>,
253        metadata: Option<Digest>,
254    ) -> std::ops::Range<GenericLocation<F>> {
255        let mut batch = db.new_batch();
256        for (k, v) in writes {
257            batch = batch.write(k, v);
258        }
259        let merkleized = batch.merkleize(db, metadata).await.unwrap();
260        let range = db.apply_batch(merkleized).await.unwrap();
261        db.commit().await.unwrap();
262        range
263    }
264
265    fn key(i: u64) -> Digest {
266        Sha256::hash(&i.to_be_bytes())
267    }
268
269    fn val(i: u64) -> Digest {
270        Sha256::hash(&(i + 10000).to_be_bytes())
271    }
272
273    // -- Generic inner functions for parameterized batch tests --
274
275    async fn batch_empty_inner<F: crate::merkle::Family>(context: deterministic::Context) {
276        let mut db = open_db_generic::<F>(context.with_label("db")).await;
277        let root_before = db.root();
278
279        let merkleized = db.new_batch().merkleize(&db, None).await.unwrap();
280        db.apply_batch(merkleized).await.unwrap();
281        assert_ne!(db.root(), root_before);
282
283        // DB should still be functional.
284        commit_writes_generic(&mut db, [(key(0), Some(val(0)))], None).await;
285        assert_eq!(db.get(&key(0)).await.unwrap(), Some(val(0)));
286
287        db.destroy().await.unwrap();
288    }
289
290    async fn batch_metadata_inner<F: crate::merkle::Family>(context: deterministic::Context) {
291        let mut db = open_db_generic::<F>(context.with_label("db")).await;
292        let metadata = val(42);
293
294        commit_writes_generic(&mut db, [(key(0), Some(val(0)))], Some(metadata)).await;
295        assert_eq!(db.get_metadata().await.unwrap(), Some(metadata));
296
297        let merkleized = db.new_batch().merkleize(&db, None).await.unwrap();
298        db.apply_batch(merkleized).await.unwrap();
299        assert_eq!(db.get_metadata().await.unwrap(), None);
300
301        db.destroy().await.unwrap();
302    }
303
304    async fn batch_get_read_through_inner<F: crate::merkle::Family>(
305        context: deterministic::Context,
306    ) {
307        let mut db = open_db_generic::<F>(context.with_label("db")).await;
308
309        let ka = key(0);
310        let va = val(0);
311        commit_writes_generic(&mut db, [(ka, Some(va))], None).await;
312
313        let kb = key(1);
314        let vb = val(1);
315        let kc = key(2);
316
317        let mut batch = db.new_batch();
318        assert_eq!(batch.get(&ka, &db).await.unwrap(), Some(va));
319
320        batch = batch.write(kb, Some(vb));
321        assert_eq!(batch.get(&kb, &db).await.unwrap(), Some(vb));
322        assert_eq!(batch.get(&kc, &db).await.unwrap(), None);
323
324        let va2 = val(100);
325        batch = batch.write(ka, Some(va2));
326        assert_eq!(batch.get(&ka, &db).await.unwrap(), Some(va2));
327
328        batch = batch.write(ka, None);
329        assert_eq!(batch.get(&ka, &db).await.unwrap(), None);
330
331        db.destroy().await.unwrap();
332    }
333
334    async fn batch_get_on_merkleized_inner<F: crate::merkle::Family>(
335        context: deterministic::Context,
336    ) {
337        let mut db = open_db_generic::<F>(context.with_label("db")).await;
338
339        let ka = key(0);
340        let kb = key(1);
341        let kc = key(2);
342        let kd = key(3);
343
344        commit_writes_generic(&mut db, [(ka, Some(val(0))), (kb, Some(val(1)))], None).await;
345
346        let va2 = val(100);
347        let vc = val(2);
348        let merkleized = db
349            .new_batch()
350            .write(ka, Some(va2))
351            .write(kb, None)
352            .write(kc, Some(vc))
353            .merkleize(&db, None)
354            .await
355            .unwrap();
356
357        assert_eq!(merkleized.get(&ka, &db).await.unwrap(), Some(va2));
358        assert_eq!(merkleized.get(&kb, &db).await.unwrap(), None);
359        assert_eq!(merkleized.get(&kc, &db).await.unwrap(), Some(vc));
360        assert_eq!(merkleized.get(&kd, &db).await.unwrap(), None);
361
362        db.destroy().await.unwrap();
363    }
364
365    async fn batch_stacked_get_inner<F: crate::merkle::Family>(context: deterministic::Context) {
366        let db = open_db_generic::<F>(context.with_label("db")).await;
367
368        let ka = key(0);
369        let kb = key(1);
370
371        let merkleized = db
372            .new_batch()
373            .write(ka, Some(val(0)))
374            .merkleize(&db, None)
375            .await
376            .unwrap();
377
378        let mut child = merkleized.new_batch();
379        assert_eq!(child.get(&ka, &db).await.unwrap(), Some(val(0)));
380
381        child = child.write(ka, Some(val(100)));
382        assert_eq!(child.get(&ka, &db).await.unwrap(), Some(val(100)));
383
384        child = child.write(kb, Some(val(1)));
385        assert_eq!(child.get(&kb, &db).await.unwrap(), Some(val(1)));
386
387        child = child.write(ka, None);
388        assert_eq!(child.get(&ka, &db).await.unwrap(), None);
389
390        drop(child);
391        drop(merkleized);
392        db.destroy().await.unwrap();
393    }
394
395    async fn batch_stacked_delete_recreate_inner<F: crate::merkle::Family>(
396        context: deterministic::Context,
397    ) {
398        let mut db = open_db_generic::<F>(context.with_label("db")).await;
399        let ka = key(0);
400
401        commit_writes_generic(&mut db, [(ka, Some(val(0)))], None).await;
402
403        let parent_m = db
404            .new_batch()
405            .write(ka, None)
406            .merkleize(&db, None)
407            .await
408            .unwrap();
409        assert_eq!(parent_m.get(&ka, &db).await.unwrap(), None);
410
411        let child_m = parent_m
412            .new_batch()
413            .write(ka, Some(val(200)))
414            .merkleize(&db, None)
415            .await
416            .unwrap();
417        assert_eq!(child_m.get(&ka, &db).await.unwrap(), Some(val(200)));
418
419        db.apply_batch(child_m).await.unwrap();
420        assert_eq!(db.get(&ka).await.unwrap(), Some(val(200)));
421
422        db.destroy().await.unwrap();
423    }
424
425    async fn batch_apply_returns_range_inner<F: crate::merkle::Family>(
426        context: deterministic::Context,
427    ) {
428        let mut db = open_db_generic::<F>(context.with_label("db")).await;
429
430        let writes: Vec<_> = (0..5).map(|i| (key(i), Some(val(i)))).collect();
431        let range1 = commit_writes_generic(&mut db, writes, None).await;
432
433        assert_eq!(range1.start, GenericLocation::<F>::new(1));
434        assert!(range1.end.saturating_sub(*range1.start) >= 6);
435
436        let writes: Vec<_> = (5..10).map(|i| (key(i), Some(val(i)))).collect();
437        let range2 = commit_writes_generic(&mut db, writes, None).await;
438        assert_eq!(range2.start, range1.end);
439
440        db.destroy().await.unwrap();
441    }
442
443    async fn batch_speculative_root_inner<F: crate::merkle::Family>(
444        context: deterministic::Context,
445    ) {
446        let mut db = open_db_generic::<F>(context.with_label("db")).await;
447
448        let mut batch = db.new_batch();
449        for i in 0..10 {
450            batch = batch.write(key(i), Some(val(i)));
451        }
452        let merkleized = batch.merkleize(&db, None).await.unwrap();
453        let speculative_root = merkleized.root();
454
455        db.apply_batch(merkleized).await.unwrap();
456        assert_eq!(db.root(), speculative_root);
457
458        db.destroy().await.unwrap();
459    }
460
461    async fn log_replay_inner<F: crate::merkle::Family>(context: deterministic::Context) {
462        let db_context = context.with_label("db");
463        let mut db = open_db_generic::<F>(db_context.clone()).await;
464
465        // Update the same key many times within a single batch.
466        const UPDATES: u64 = 100;
467        let k = Sha256::hash(&UPDATES.to_be_bytes());
468        let mut batch = db.new_batch();
469        for i in 0u64..UPDATES {
470            let v = Sha256::hash(&(i * 1000).to_be_bytes());
471            batch = batch.write(k, Some(v));
472        }
473        let merkleized = batch.merkleize(&db, None).await.unwrap();
474        db.apply_batch(merkleized).await.unwrap();
475        db.commit().await.unwrap();
476        let root = db.root();
477
478        // Simulate a failed commit and test that the log replay doesn't leave behind old data.
479        drop(db);
480        let db: AnyTestGeneric<F> = open_db_generic::<F>(db_context.with_label("reopened")).await;
481        let iter = db.snapshot.get(&k);
482        assert_eq!(iter.cloned().collect::<Vec<_>>().len(), 1);
483        assert_eq!(db.root(), root);
484
485        db.destroy().await.unwrap();
486    }
487
488    // -- MMR test wrappers --
489
490    #[test_traced("INFO")]
491    fn test_unordered_fixed_batch_empty() {
492        let executor = deterministic::Runner::default();
493        executor.start(batch_empty_inner::<mmr::Family>);
494    }
495
496    #[test_traced("INFO")]
497    fn test_unordered_fixed_batch_metadata() {
498        let executor = deterministic::Runner::default();
499        executor.start(batch_metadata_inner::<mmr::Family>);
500    }
501
502    #[test_traced("INFO")]
503    fn test_unordered_fixed_batch_get_read_through() {
504        let executor = deterministic::Runner::default();
505        executor.start(batch_get_read_through_inner::<mmr::Family>);
506    }
507
508    #[test_traced("INFO")]
509    fn test_unordered_fixed_batch_get_on_merkleized() {
510        let executor = deterministic::Runner::default();
511        executor.start(batch_get_on_merkleized_inner::<mmr::Family>);
512    }
513
514    #[test_traced("INFO")]
515    fn test_unordered_fixed_batch_stacked_get() {
516        let executor = deterministic::Runner::default();
517        executor.start(batch_stacked_get_inner::<mmr::Family>);
518    }
519
520    #[test_traced("INFO")]
521    fn test_unordered_fixed_batch_stacked_delete_recreate() {
522        let executor = deterministic::Runner::default();
523        executor.start(batch_stacked_delete_recreate_inner::<mmr::Family>);
524    }
525
526    #[test_traced("INFO")]
527    fn test_unordered_fixed_batch_apply_returns_range() {
528        let executor = deterministic::Runner::default();
529        executor.start(batch_apply_returns_range_inner::<mmr::Family>);
530    }
531
532    #[test_traced("INFO")]
533    fn test_unordered_fixed_batch_speculative_root() {
534        let executor = deterministic::Runner::default();
535        executor.start(batch_speculative_root_inner::<mmr::Family>);
536    }
537
538    #[test_traced("WARN")]
539    fn test_any_fixed_db_log_replay() {
540        let executor = deterministic::Runner::default();
541        executor.start(log_replay_inner::<mmr::Family>);
542    }
543
544    // -- MMB test wrappers --
545
546    #[test_traced("INFO")]
547    fn test_unordered_fixed_batch_empty_mmb() {
548        let executor = deterministic::Runner::default();
549        executor.start(batch_empty_inner::<crate::merkle::mmb::Family>);
550    }
551
552    #[test_traced("INFO")]
553    fn test_unordered_fixed_batch_metadata_mmb() {
554        let executor = deterministic::Runner::default();
555        executor.start(batch_metadata_inner::<crate::merkle::mmb::Family>);
556    }
557
558    #[test_traced("INFO")]
559    fn test_unordered_fixed_batch_get_read_through_mmb() {
560        let executor = deterministic::Runner::default();
561        executor.start(batch_get_read_through_inner::<crate::merkle::mmb::Family>);
562    }
563
564    #[test_traced("INFO")]
565    fn test_unordered_fixed_batch_get_on_merkleized_mmb() {
566        let executor = deterministic::Runner::default();
567        executor.start(batch_get_on_merkleized_inner::<crate::merkle::mmb::Family>);
568    }
569
570    #[test_traced("INFO")]
571    fn test_unordered_fixed_batch_stacked_get_mmb() {
572        let executor = deterministic::Runner::default();
573        executor.start(batch_stacked_get_inner::<crate::merkle::mmb::Family>);
574    }
575
576    #[test_traced("INFO")]
577    fn test_unordered_fixed_batch_stacked_delete_recreate_mmb() {
578        let executor = deterministic::Runner::default();
579        executor.start(batch_stacked_delete_recreate_inner::<crate::merkle::mmb::Family>);
580    }
581
582    #[test_traced("INFO")]
583    fn test_unordered_fixed_batch_apply_returns_range_mmb() {
584        let executor = deterministic::Runner::default();
585        executor.start(batch_apply_returns_range_inner::<crate::merkle::mmb::Family>);
586    }
587
588    #[test_traced("INFO")]
589    fn test_unordered_fixed_batch_speculative_root_mmb() {
590        let executor = deterministic::Runner::default();
591        executor.start(batch_speculative_root_inner::<crate::merkle::mmb::Family>);
592    }
593
594    #[test_traced("WARN")]
595    fn test_any_fixed_db_log_replay_mmb() {
596        let executor = deterministic::Runner::default();
597        executor.start(log_replay_inner::<crate::merkle::mmb::Family>);
598    }
599
600    // -- MMR-only tests (use verify_proof / Position which are MMR-specific) --
601
602    #[test]
603    fn test_any_fixed_db_historical_proof_basic() {
604        let executor = deterministic::Runner::default();
605        executor.start(|context| async move {
606            let mut db = create_test_db(context.clone()).await;
607            let ops = create_test_ops(20);
608            apply_ops(&mut db, ops.clone()).await;
609            let root_hash = db.root();
610            let original_op_count = db.bounds().await.end;
611
612            // Historical proof should match "regular" proof when historical size == current database size
613            let max_ops = NZU64!(10);
614            let (historical_proof, historical_ops) = db
615                .historical_proof(original_op_count, Location::new(6), max_ops)
616                .await
617                .unwrap();
618            let (regular_proof, regular_ops) = db.proof(Location::new(6), max_ops).await.unwrap();
619
620            assert_eq!(historical_proof.leaves, regular_proof.leaves);
621            assert_eq!(historical_proof.digests, regular_proof.digests);
622            assert_eq!(historical_ops, regular_ops);
623            let hasher = StandardHasher::<Sha256>::new();
624            assert!(verify_proof(
625                &hasher,
626                &historical_proof,
627                Location::new(6),
628                &historical_ops,
629                &root_hash
630            ));
631
632            // Add more operations to the database
633            // (use different seed to avoid key collisions)
634            let more_ops = create_test_ops_seeded(5, 1);
635            apply_ops(&mut db, more_ops.clone()).await;
636
637            // Historical proof should remain the same even though database has grown
638            let (historical_proof, historical_ops) = db
639                .historical_proof(original_op_count, Location::new(6), NZU64!(10))
640                .await
641                .unwrap();
642            assert_eq!(historical_proof.leaves, original_op_count);
643            assert_eq!(historical_proof.leaves, regular_proof.leaves);
644            assert_eq!(historical_ops.len(), 10);
645            assert_eq!(historical_proof.digests, regular_proof.digests);
646            assert_eq!(historical_ops, regular_ops);
647            assert!(verify_proof(
648                &hasher,
649                &historical_proof,
650                Location::new(6),
651                &historical_ops,
652                &root_hash
653            ));
654
655            // Try to get historical proof with op_count > number of operations and confirm it
656            // returns RangeOutOfBounds error.
657            assert!(matches!(
658                db.historical_proof(db.bounds().await.end + 1, Location::new(6), NZU64!(10))
659                    .await,
660                Err(Error::Merkle(crate::mmr::Error::RangeOutOfBounds(_)))
661            ));
662
663            db.destroy().await.unwrap();
664        });
665    }
666
667    #[test]
668    fn test_any_fixed_db_historical_proof_edge_cases() {
669        let executor = deterministic::Runner::default();
670        executor.start(|context| async move {
671            let hasher = StandardHasher::<Sha256>::new();
672            let ops = create_test_ops(50);
673
674            let mut db = create_test_db(context.with_label("first")).await;
675            apply_ops(&mut db, ops.clone()).await;
676
677            let root = db.root();
678            let full_size = db.bounds().await.end;
679
680            // Verify a single-op proof at the full commit size.
681            let (proof, proof_ops) = db.proof(Location::new(1), NZU64!(1)).await.unwrap();
682            assert_eq!(proof_ops.len(), 1);
683            assert!(verify_proof(
684                &hasher,
685                &proof,
686                Location::new(1),
687                &proof_ops,
688                &root
689            ));
690
691            // historical_proof at full size should match proof.
692            let (hp, hp_ops) = db
693                .historical_proof(full_size, Location::new(1), NZU64!(1))
694                .await
695                .unwrap();
696            assert_eq!(hp.digests, proof.digests);
697            assert_eq!(hp_ops, proof_ops);
698
699            // Test requesting more operations than available in historical position.
700            let (_proof, limited_ops) = db
701                .historical_proof(Location::new(11), Location::new(6), NZU64!(20))
702                .await
703                .unwrap();
704            assert_eq!(limited_ops.len(), 5); // limited by historical size
705
706            // Test proof at minimum historical position.
707            let (min_proof, min_ops) = db
708                .historical_proof(Location::new(4), Location::new(1), NZU64!(3))
709                .await
710                .unwrap();
711            assert_eq!(min_proof.leaves, Location::new(4));
712            assert_eq!(min_ops.len(), 3);
713
714            db.destroy().await.unwrap();
715        });
716    }
717
718    #[test]
719    fn test_any_fixed_db_historical_proof_different_historical_sizes() {
720        let executor = deterministic::Runner::default();
721        executor.start(|context| async move {
722            let ops = create_test_ops(100);
723            let hasher = StandardHasher::<Sha256>::new();
724            let start_loc = Location::new(2);
725            let max_ops = NZU64!(10);
726
727            // Build checkpoints only at commit points and record reference proofs/roots there.
728            let mut db = create_test_db(context.with_label("main")).await;
729            let mut offset = 0usize;
730            let mut checkpoints = Vec::new();
731            for chunk in [20usize, 15, 25, 30, 10] {
732                apply_ops(&mut db, ops[offset..offset + chunk].to_vec()).await;
733                offset += chunk;
734
735                let end_loc = db.bounds().await.end;
736                let root = db.root();
737                let (proof, proof_ops) = db.proof(start_loc, max_ops).await.unwrap();
738                checkpoints.push((end_loc, root, proof, proof_ops));
739            }
740
741            // Grow state past the checkpoints with an empty batch and verify all
742            // historical proofs from that later state.
743            let merkleized = db.new_batch().merkleize(&db, None).await.unwrap();
744            db.apply_batch(merkleized).await.unwrap();
745            for (historical_size, root, reference_proof, reference_ops) in checkpoints {
746                let (historical_proof, historical_ops) = db
747                    .historical_proof(historical_size, start_loc, max_ops)
748                    .await
749                    .unwrap();
750                assert_eq!(historical_proof.leaves, reference_proof.leaves);
751                assert_eq!(historical_proof.digests, reference_proof.digests);
752                assert_eq!(historical_ops, reference_ops);
753                assert!(verify_proof(
754                    &hasher,
755                    &historical_proof,
756                    start_loc,
757                    &historical_ops,
758                    &root
759                ));
760            }
761
762            // Verify the current full-size proof against the current root as a final sanity check.
763            let full_root = db.root();
764            let (full_proof, full_ops) = db.proof(start_loc, max_ops).await.unwrap();
765            assert!(verify_proof(
766                &hasher,
767                &full_proof,
768                start_loc,
769                &full_ops,
770                &full_root
771            ));
772
773            db.destroy().await.unwrap();
774        });
775    }
776
777    fn is_send<T: Send>(_: T) {}
778
779    #[allow(dead_code)]
780    fn assert_non_trait_futures_are_send(db: &AnyTest, key: Digest, value: Digest) {
781        let batch = db.new_batch().write(key, Some(value));
782        is_send(batch.merkleize(db, None));
783        is_send(db.get_with_loc(&key));
784    }
785
786    // FromSyncTestable implementation for from_sync_result tests
787    mod from_sync_testable {
788        use super::*;
789        use crate::{
790            merkle::{
791                mmr::{self, journaled::Mmr},
792                Family as _,
793            },
794            qmdb::any::sync::tests::FromSyncTestable,
795        };
796        use futures::future::join_all;
797
798        type TestMmr = Mmr<deterministic::Context, Digest>;
799
800        impl FromSyncTestable for AnyTest {
801            type Mmr = TestMmr;
802
803            fn into_log_components(self) -> (Self::Mmr, Self::Journal) {
804                (self.log.merkle, self.log.journal)
805            }
806
807            async fn pinned_nodes_at(&self, loc: Location) -> Vec<Digest> {
808                join_all(mmr::Family::nodes_to_pin(loc).map(|p| self.log.merkle.get_node(p)))
809                    .await
810                    .into_iter()
811                    .map(|n| n.unwrap().unwrap())
812                    .collect()
813            }
814        }
815    }
816}