commonware_storage/qmdb/current/ordered/
fixed.rs

1//! An _ordered_ variant of a [crate::qmdb::current] authenticated database optimized for fixed-size
2//! values
3//!
4//! This variant maintains the lexicographic-next active key for each active key, enabling exclusion
5//! proofs (proving a key is currently inactive). Use [super::super::unordered::fixed] if exclusion
6//! proofs are not needed.
7//!
8//! See [Db] for the main database type and [ExclusionProof] for proving key inactivity.
9
10#[cfg(any(test, feature = "test-traits"))]
11use crate::qmdb::any::states::{
12    CleanAny, MerkleizedNonDurableAny, MutableAny, UnmerkleizedDurableAny,
13};
14use crate::{
15    bitmap::CleanBitMap,
16    kv::{self, Batchable},
17    mmr::{grafting::Storage as GraftingStorage, Location, Proof, StandardHasher},
18    qmdb::{
19        any::{
20            ordered::fixed::{Db as AnyDb, Operation, Update},
21            FixedValue,
22        },
23        current::{
24            merkleize_grafted_bitmap,
25            ordered::ExclusionProof,
26            proof::{OperationProof, RangeProof},
27            root, FixedConfig as Config,
28        },
29        store,
30        store::{LogStore, MerkleizedStore, PrunableStore},
31        DurabilityState, Durable, Error, MerkleizationState, Merkleized, NonDurable, Unmerkleized,
32    },
33    translator::Translator,
34    AuthenticatedBitMap as BitMap, Persistable,
35};
36use commonware_codec::FixedSize;
37use commonware_cryptography::{Digest, DigestOf, Hasher};
38use commonware_runtime::{Clock, Metrics, Storage as RStorage};
39use commonware_utils::Array;
40use core::ops::Range;
41use futures::stream::Stream;
42use std::num::NonZeroU64;
43
44/// A key-value QMDB based on an MMR over its log of operations, supporting key exclusion proofs and
45/// authentication of whether a currently has a specific value.
46///
47/// Note: The generic parameter N is not really generic, and must be manually set to double the size
48/// of the hash digest being produced by the hasher. A compile-time assertion is used to prevent any
49/// other setting.
50pub struct Db<
51    E: RStorage + Clock + Metrics,
52    K: Array,
53    V: FixedValue,
54    H: Hasher,
55    T: Translator,
56    const N: usize,
57    M: MerkleizationState<DigestOf<H>> = Merkleized<H>,
58    D: DurabilityState = Durable,
59> {
60    /// An authenticated database that provides the ability to prove whether a key ever had a
61    /// specific value.
62    any: AnyDb<E, K, V, H, T, M, D>,
63
64    /// The bitmap over the activity status of each operation. Supports augmenting [Db] proofs in
65    /// order to further prove whether a key _currently_ has a specific value.
66    status: BitMap<H::Digest, N, M>,
67
68    context: E,
69
70    bitmap_metadata_partition: String,
71
72    /// Cached root digest. Invariant: valid when in Clean state.
73    cached_root: Option<H::Digest>,
74}
75
76/// Proof information for verifying a key has a particular value in the database.
77#[derive(Clone, Eq, PartialEq, Debug)]
78pub struct KeyValueProof<K: Array, D: Digest, const N: usize> {
79    pub proof: OperationProof<D, N>,
80    pub next_key: K,
81}
82
83// Functionality shared across all DB states, such as most non-mutating operations.
84impl<
85        E: RStorage + Clock + Metrics,
86        K: Array,
87        V: FixedValue,
88        H: Hasher,
89        T: Translator,
90        const N: usize,
91        M: MerkleizationState<DigestOf<H>>,
92        D: DurabilityState,
93    > Db<E, K, V, H, T, N, M, D>
94{
95    /// The number of operations that have been applied to this db, including those that have been
96    /// pruned and those that are not yet committed.
97    pub fn op_count(&self) -> Location {
98        self.any.op_count()
99    }
100
101    /// Return the inactivity floor location. This is the location before which all operations are
102    /// known to be inactive. Operations before this point can be safely pruned.
103    pub const fn inactivity_floor_loc(&self) -> Location {
104        self.any.inactivity_floor_loc()
105    }
106
107    /// Get the value of `key` in the db, or None if it has no value.
108    pub async fn get(&self, key: &K) -> Result<Option<V>, Error> {
109        self.any.get(key).await
110    }
111
112    /// Get the metadata associated with the last commit.
113    pub async fn get_metadata(&self) -> Result<Option<V>, Error> {
114        self.any.get_metadata().await
115    }
116
117    /// Whether the db currently has no active keys.
118    pub const fn is_empty(&self) -> bool {
119        self.any.is_empty()
120    }
121
122    /// Get the level of the base MMR into which we are grafting.
123    ///
124    /// This value is log2 of the chunk size in bits. Since we assume the chunk size is a power of
125    /// 2, we compute this from trailing_zeros.
126    const fn grafting_height() -> u32 {
127        CleanBitMap::<H::Digest, N>::CHUNK_SIZE_BITS.trailing_zeros()
128    }
129
130    /// Return true if the proof authenticates that `key` currently has value `value` in the db with
131    /// the provided `root`.
132    pub fn verify_key_value_proof(
133        hasher: &mut H,
134        key: K,
135        value: V,
136        proof: &KeyValueProof<K, H::Digest, N>,
137        root: &H::Digest,
138    ) -> bool {
139        let op = Operation::Update(Update {
140            key,
141            value,
142            next_key: proof.next_key.clone(),
143        });
144
145        proof
146            .proof
147            .verify(hasher, Self::grafting_height(), op, root)
148    }
149
150    /// Get the operation that currently defines the span whose range contains `key`, or None if the
151    /// DB is empty.
152    pub async fn get_span(&self, key: &K) -> Result<Option<(Location, Update<K, V>)>, Error> {
153        self.any.get_span(key).await
154    }
155
156    /// Streams all active (key, value) pairs in the database in key order, starting from the first
157    /// active key greater than or equal to `start`.
158    pub async fn stream_range<'a>(
159        &'a self,
160        start: K,
161    ) -> Result<impl Stream<Item = Result<(K, V), Error>> + 'a, Error> {
162        self.any.stream_range(start).await
163    }
164
165    /// Return true if the proof authenticates that `key` does _not_ exist in the db with the
166    /// provided `root`.
167    pub fn verify_exclusion_proof(
168        hasher: &mut H,
169        key: &K,
170        proof: &ExclusionProof<K, V, H::Digest, N>,
171        root: &H::Digest,
172    ) -> bool {
173        let (op_proof, op) = match proof {
174            ExclusionProof::KeyValue(op_proof, data) => {
175                if data.key == *key {
176                    // The provided `key` is in the DB if it matches the start of the span.
177                    return false;
178                }
179                if !AnyDb::<E, K, V, H, T, Merkleized<H>, Durable>::span_contains(
180                    &data.key,
181                    &data.next_key,
182                    key,
183                ) {
184                    // If the key is not within the span, then this proof cannot prove its
185                    // exclusion.
186                    return false;
187                }
188
189                (op_proof, Operation::Update(data.clone()))
190            }
191            ExclusionProof::Commit(op_proof, metadata) => {
192                // Handle the case where the proof shows the db is empty, hence any key is proven
193                // excluded. For the db to be empty, the floor must equal the commit operation's
194                // location.
195                let floor_loc = op_proof.loc;
196                (
197                    op_proof,
198                    Operation::CommitFloor(metadata.clone(), floor_loc),
199                )
200            }
201        };
202
203        op_proof.verify(hasher, Self::grafting_height(), op, root)
204    }
205
206    /// Return true if the given sequence of `ops` were applied starting at location `start_loc` in
207    /// the log with the provided root.
208    pub fn verify_range_proof(
209        hasher: &mut H,
210        proof: &RangeProof<H::Digest>,
211        start_loc: Location,
212        ops: &[Operation<K, V>],
213        chunks: &[[u8; N]],
214        root: &H::Digest,
215    ) -> bool {
216        let height = Self::grafting_height();
217
218        proof.verify(hasher, height, start_loc, ops, chunks, root)
219    }
220}
221
222// Functionality for the Clean state.
223impl<
224        E: RStorage + Clock + Metrics,
225        K: Array,
226        V: FixedValue,
227        H: Hasher,
228        T: Translator,
229        const N: usize,
230    > Db<E, K, V, H, T, N, Merkleized<H>, Durable>
231{
232    /// Initializes a [Db] from the given `config`. Leverages parallel Merkleization to initialize
233    /// the bitmap MMR if a thread pool is provided.
234    pub async fn init(context: E, config: Config<T>) -> Result<Self, Error> {
235        // TODO: Re-evaluate assertion placement after `generic_const_exprs` is stable.
236        const {
237            // A compile-time assertion that the chunk size is some multiple of digest size. A multiple of 1 is optimal
238            // with respect to proof size, but a higher multiple allows for a smaller (RAM resident) merkle tree over
239            // the structure.
240            assert!(
241                N.is_multiple_of(H::Digest::SIZE),
242                "chunk size must be some multiple of the digest size",
243            );
244            // A compile-time assertion that chunk size is a power of 2, which is necessary to allow the status bitmap
245            // tree to be aligned with the underlying operations MMR.
246            assert!(N.is_power_of_two(), "chunk size must be a power of 2");
247        }
248
249        let thread_pool = config.thread_pool.clone();
250        let bitmap_metadata_partition = config.bitmap_metadata_partition.clone();
251
252        let mut hasher = StandardHasher::<H>::new();
253        let mut status = CleanBitMap::restore_pruned(
254            context.with_label("bitmap"),
255            &bitmap_metadata_partition,
256            thread_pool,
257            &mut hasher,
258        )
259        .await?
260        .into_dirty();
261
262        // Initialize the anydb with a callback that initializes the status bitmap.
263        let last_known_inactivity_floor = Location::new_unchecked(status.len());
264        let any = AnyDb::init_with_callback(
265            context.with_label("any"),
266            config.to_any_config(),
267            Some(last_known_inactivity_floor),
268            |append: bool, loc: Option<Location>| {
269                status.push(append);
270                if let Some(loc) = loc {
271                    status.set_bit(*loc, false);
272                }
273            },
274        )
275        .await?;
276
277        let height = Self::grafting_height();
278        let status = merkleize_grafted_bitmap(&mut hasher, status, &any.log.mmr, height).await?;
279
280        // Compute and cache the root
281        let cached_root = Some(root(&mut hasher, height, &status, &any.log.mmr).await?);
282
283        Ok(Self {
284            any,
285            status,
286            context,
287            bitmap_metadata_partition,
288            cached_root,
289        })
290    }
291
292    /// Return the cached root of the db.
293    pub const fn root(&self) -> H::Digest {
294        self.cached_root.expect("Clean state must have cached root")
295    }
296
297    /// Sync all database state to disk.
298    pub async fn sync(&mut self) -> Result<(), Error> {
299        self.any.sync().await?;
300
301        // Write the bitmap pruning boundary to disk so that next startup doesn't have to
302        // re-Merkleize the inactive portion up to the inactivity floor.
303        self.status
304            .write_pruned(
305                self.context.with_label("bitmap"),
306                &self.bitmap_metadata_partition,
307            )
308            .await
309            .map_err(Into::into)
310    }
311
312    /// Destroy the db, removing all data from disk.
313    pub async fn destroy(self) -> Result<(), Error> {
314        // Clean up bitmap metadata partition.
315        CleanBitMap::<H::Digest, N>::destroy(self.context, &self.bitmap_metadata_partition).await?;
316
317        // Clean up Any components (MMR and log).
318        self.any.destroy().await
319    }
320
321    /// Transition into the mutable state.
322    pub fn into_mutable(self) -> Db<E, K, V, H, T, N, Unmerkleized, NonDurable> {
323        Db {
324            any: self.any.into_mutable(),
325            status: self.status.into_dirty(),
326            context: self.context,
327            bitmap_metadata_partition: self.bitmap_metadata_partition,
328            cached_root: None,
329        }
330    }
331}
332
333// Functionality for any Merkleized state (both Durable and NonDurable).
334impl<
335        E: RStorage + Clock + Metrics,
336        K: Array,
337        V: FixedValue,
338        H: Hasher,
339        T: Translator,
340        const N: usize,
341        D: store::State,
342    > Db<E, K, V, H, T, N, Merkleized<H>, D>
343{
344    /// Returns a proof that the specified range of operations are part of the database, along with
345    /// the operations from the range. A truncated range (from hitting the max) can be detected by
346    /// looking at the length of the returned operations vector. Also returns the bitmap chunks
347    /// required to verify the proof.
348    ///
349    /// # Errors
350    ///
351    /// Returns [crate::mmr::Error::LocationOverflow] if `start_loc` > [crate::mmr::MAX_LOCATION].
352    /// Returns [crate::mmr::Error::RangeOutOfBounds] if `start_loc` >= number of leaves in the MMR.
353    pub async fn range_proof(
354        &self,
355        hasher: &mut H,
356        start_loc: Location,
357        max_ops: NonZeroU64,
358    ) -> Result<(RangeProof<H::Digest>, Vec<Operation<K, V>>, Vec<[u8; N]>), Error> {
359        RangeProof::<H::Digest>::new_with_ops(
360            hasher,
361            &self.status,
362            Self::grafting_height(),
363            &self.any.log.mmr,
364            &self.any.log,
365            start_loc,
366            max_ops,
367        )
368        .await
369    }
370
371    /// Generate and return a proof of the current value of `key`, along with the other
372    /// [KeyValueProof] required to verify the proof. Returns KeyNotFound error if the key is not
373    /// currently assigned any value.
374    ///
375    /// # Errors
376    ///
377    /// Returns [Error::KeyNotFound] if the key is not currently assigned any value.
378    pub async fn key_value_proof(
379        &self,
380        hasher: &mut H,
381        key: K,
382    ) -> Result<KeyValueProof<K, H::Digest, N>, Error> {
383        let op_loc = self.any.get_with_loc(&key).await?;
384        let Some((data, loc)) = op_loc else {
385            return Err(Error::KeyNotFound);
386        };
387        let height = Self::grafting_height();
388        let mmr = &self.any.log.mmr;
389        let proof =
390            OperationProof::<H::Digest, N>::new(hasher, &self.status, height, mmr, loc).await?;
391
392        Ok(KeyValueProof {
393            proof,
394            next_key: data.next_key,
395        })
396    }
397
398    /// Generate and return a proof that the specified `key` does not exist in the db.
399    ///
400    /// # Errors
401    ///
402    /// Returns [Error::KeyExists] if the key exists in the db.
403    pub async fn exclusion_proof(
404        &self,
405        hasher: &mut H,
406        key: &K,
407    ) -> Result<ExclusionProof<K, V, H::Digest, N>, Error> {
408        let height = Self::grafting_height();
409        let grafted_mmr =
410            GraftingStorage::<'_, H, _, _>::new(&self.status, &self.any.log.mmr, height);
411
412        let span = self.any.get_span(key).await?;
413        let loc = match &span {
414            Some((loc, key_data)) => {
415                if key_data.key == *key {
416                    // Cannot prove exclusion of a key that exists in the db.
417                    return Err(Error::KeyExists);
418                }
419                *loc
420            }
421            None => self
422                .op_count()
423                .checked_sub(1)
424                .expect("db shouldn't be empty"),
425        };
426
427        let op_proof =
428            OperationProof::<H::Digest, N>::new(hasher, &self.status, height, &grafted_mmr, loc)
429                .await?;
430
431        Ok(match span {
432            Some((_, key_data)) => ExclusionProof::KeyValue(op_proof, key_data),
433            None => {
434                let value = match self.any.log.read(loc).await? {
435                    Operation::CommitFloor(value, _) => value,
436                    _ => unreachable!("last commit is not a CommitFloor operation"),
437                };
438                ExclusionProof::Commit(op_proof, value)
439            }
440        })
441    }
442
443    /// Prune historical operations prior to `prune_loc`. This does not affect the db's root
444    /// or current snapshot.
445    pub async fn prune(&mut self, prune_loc: Location) -> Result<(), Error> {
446        // Write the pruned portion of the bitmap to disk *first* to ensure recovery in case of
447        // failure during pruning. If we don't do this, we may not be able to recover the bitmap
448        // because it may require replaying of pruned operations.
449        self.status
450            .write_pruned(
451                self.context.with_label("bitmap"),
452                &self.bitmap_metadata_partition,
453            )
454            .await?;
455
456        self.any.prune(prune_loc).await
457    }
458}
459
460// Functionality for the Mutable state.
461impl<
462        E: RStorage + Clock + Metrics,
463        K: Array,
464        V: FixedValue,
465        H: Hasher,
466        T: Translator,
467        const N: usize,
468    > Db<E, K, V, H, T, N, Unmerkleized, NonDurable>
469{
470    /// Updates `key` to have value `value`. The operation is reflected in the snapshot, but will be
471    /// subject to rollback until the next successful `commit`.
472    pub async fn update(&mut self, key: K, value: V) -> Result<(), Error> {
473        self.any
474            .update_with_callback(key, value, |loc| {
475                self.status.push(true);
476                if let Some(loc) = loc {
477                    self.status.set_bit(*loc, false);
478                }
479            })
480            .await
481    }
482
483    /// Creates a new key-value pair in the db. The operation is reflected in the snapshot, but will
484    /// be subject to rollback until the next successful `commit`. Returns true if the key was
485    /// created, false if it already existed.
486    pub async fn create(&mut self, key: K, value: V) -> Result<bool, Error> {
487        self.any
488            .create_with_callback(key, value, |loc| {
489                self.status.push(true);
490                if let Some(loc) = loc {
491                    self.status.set_bit(*loc, false);
492                }
493            })
494            .await
495    }
496
497    /// Delete `key` and its value from the db. Deleting a key that already has no value is a no-op.
498    /// The operation is reflected in the snapshot, but will be subject to rollback until the next
499    /// successful `commit`. Returns true if the key was deleted, false if it was already inactive.
500    pub async fn delete(&mut self, key: K) -> Result<bool, Error> {
501        let mut r = false;
502        self.any
503            .delete_with_callback(key, |append, loc| {
504                if let Some(loc) = loc {
505                    self.status.set_bit(*loc, false);
506                }
507                self.status.push(append);
508                r = true;
509            })
510            .await?;
511
512        Ok(r)
513    }
514
515    /// Commit any pending operations to the database, ensuring their durability upon return from
516    /// this function. Also raises the inactivity floor according to the schedule. Returns the
517    /// `[start_loc, end_loc)` location range of committed operations.
518    async fn apply_commit_op(&mut self, metadata: Option<V>) -> Result<Range<Location>, Error> {
519        let start_loc = self.any.last_commit_loc + 1;
520
521        // Inactivate the current commit operation.
522        self.status.set_bit(*self.any.last_commit_loc, false);
523
524        // Raise the inactivity floor by taking `self.steps` steps, plus 1 to account for the
525        // previous commit becoming inactive.
526        let inactivity_floor_loc = self.any.raise_floor_with_bitmap(&mut self.status).await?;
527
528        // Append the commit operation with the new floor and tag it as active in the bitmap.
529        self.status.push(true);
530        let commit_op = Operation::CommitFloor(metadata, inactivity_floor_loc);
531
532        self.any.apply_commit_op(commit_op).await?;
533
534        Ok(start_loc..self.op_count())
535    }
536
537    /// Commit any pending operations to the database, ensuring their durability upon return.
538    /// This transitions to the Durable state without merkleizing. Returns the committed database
539    /// and the `[start_loc, end_loc)` range of committed operations. Note that even if no
540    /// operations were added since the last commit, this is a root-state changing operation.
541    pub async fn commit(
542        mut self,
543        metadata: Option<V>,
544    ) -> Result<(Db<E, K, V, H, T, N, Unmerkleized, Durable>, Range<Location>), Error> {
545        let range = self.apply_commit_op(metadata).await?;
546
547        // Transition to Durable state without merkleizing
548        let any = AnyDb {
549            log: self.any.log,
550            inactivity_floor_loc: self.any.inactivity_floor_loc,
551            last_commit_loc: self.any.last_commit_loc,
552            snapshot: self.any.snapshot,
553            durable_state: store::Durable,
554            active_keys: self.any.active_keys,
555            _update: core::marker::PhantomData,
556        };
557
558        Ok((
559            Db {
560                any,
561                status: self.status,
562                context: self.context,
563                bitmap_metadata_partition: self.bitmap_metadata_partition,
564                cached_root: None, // Not merkleized yet
565            },
566            range,
567        ))
568    }
569
570    /// Merkleize the database and transition to the provable state without committing.
571    /// This enables proof generation while keeping the database in the non-durable state.
572    pub async fn into_merkleized(
573        self,
574    ) -> Result<Db<E, K, V, H, T, N, Merkleized<H>, NonDurable>, Error> {
575        // Merkleize the any db's log
576        let any = AnyDb {
577            log: self.any.log.merkleize(),
578            inactivity_floor_loc: self.any.inactivity_floor_loc,
579            last_commit_loc: self.any.last_commit_loc,
580            snapshot: self.any.snapshot,
581            durable_state: self.any.durable_state,
582            active_keys: self.any.active_keys,
583            _update: core::marker::PhantomData,
584        };
585
586        // Merkleize the bitmap using the clean MMR
587        let mut hasher = StandardHasher::<H>::new();
588        let height = Db::<E, K, V, H, T, N, Merkleized<H>, NonDurable>::grafting_height();
589        let mut status =
590            merkleize_grafted_bitmap(&mut hasher, self.status, &any.log.mmr, height).await?;
591
592        // Prune the bitmap of no-longer-necessary bits.
593        status.prune_to_bit(*any.inactivity_floor_loc)?;
594
595        // Compute and cache the root
596        let cached_root = Some(root(&mut hasher, height, &status, &any.log.mmr).await?);
597
598        Ok(Db {
599            any,
600            status,
601            context: self.context,
602            bitmap_metadata_partition: self.bitmap_metadata_partition,
603            cached_root,
604        })
605    }
606}
607
608// Functionality for (Merkleized, NonDurable) state.
609impl<
610        E: RStorage + Clock + Metrics,
611        K: Array,
612        V: FixedValue,
613        H: Hasher,
614        T: Translator,
615        const N: usize,
616    > Db<E, K, V, H, T, N, Merkleized<H>, NonDurable>
617{
618    /// Transition into the mutable state.
619    pub fn into_mutable(self) -> Db<E, K, V, H, T, N, Unmerkleized, NonDurable> {
620        Db {
621            any: self.any.into_mutable(),
622            status: self.status.into_dirty(),
623            context: self.context,
624            bitmap_metadata_partition: self.bitmap_metadata_partition,
625            cached_root: None,
626        }
627    }
628}
629
630// Functionality for (Unmerkleized, Durable) state.
631impl<
632        E: RStorage + Clock + Metrics,
633        K: Array,
634        V: FixedValue,
635        H: Hasher,
636        T: Translator,
637        const N: usize,
638    > Db<E, K, V, H, T, N, Unmerkleized, Durable>
639{
640    /// Merkleize the database, transitioning to the provable state.
641    pub async fn into_merkleized(
642        self,
643    ) -> Result<Db<E, K, V, H, T, N, Merkleized<H>, Durable>, Error> {
644        // Merkleize the any db's log
645        let any = AnyDb {
646            log: self.any.log.merkleize(),
647            inactivity_floor_loc: self.any.inactivity_floor_loc,
648            last_commit_loc: self.any.last_commit_loc,
649            snapshot: self.any.snapshot,
650            durable_state: self.any.durable_state,
651            active_keys: self.any.active_keys,
652            _update: core::marker::PhantomData,
653        };
654
655        // Merkleize the bitmap using the clean MMR
656        let mut hasher = StandardHasher::<H>::new();
657        let height = Db::<E, K, V, H, T, N, Merkleized<H>, Durable>::grafting_height();
658        let mut status =
659            merkleize_grafted_bitmap(&mut hasher, self.status, &any.log.mmr, height).await?;
660
661        // Prune the bitmap of no-longer-necessary bits.
662        status.prune_to_bit(*any.inactivity_floor_loc)?;
663
664        // Compute and cache the root
665        let cached_root = Some(root(&mut hasher, height, &status, &any.log.mmr).await?);
666
667        Ok(Db {
668            any,
669            status,
670            context: self.context,
671            bitmap_metadata_partition: self.bitmap_metadata_partition,
672            cached_root,
673        })
674    }
675
676    /// Transition into the mutable state.
677    pub fn into_mutable(self) -> Db<E, K, V, H, T, N, Unmerkleized, NonDurable> {
678        Db {
679            any: self.any.into_mutable(),
680            status: self.status,
681            context: self.context,
682            bitmap_metadata_partition: self.bitmap_metadata_partition,
683            cached_root: None,
684        }
685    }
686}
687
688// LogStore implementation for all states.
689impl<
690        E: RStorage + Clock + Metrics,
691        K: Array,
692        V: FixedValue,
693        H: Hasher,
694        T: Translator,
695        const N: usize,
696        M: MerkleizationState<DigestOf<H>>,
697        D: DurabilityState,
698    > LogStore for Db<E, K, V, H, T, N, M, D>
699{
700    type Value = V;
701
702    fn op_count(&self) -> Location {
703        self.op_count()
704    }
705
706    fn inactivity_floor_loc(&self) -> Location {
707        self.inactivity_floor_loc()
708    }
709
710    async fn get_metadata(&self) -> Result<Option<V>, Error> {
711        self.get_metadata().await
712    }
713
714    fn is_empty(&self) -> bool {
715        self.is_empty()
716    }
717}
718
719// Store implementation for all states
720impl<
721        E: RStorage + Clock + Metrics,
722        K: Array,
723        V: FixedValue,
724        H: Hasher,
725        T: Translator,
726        const N: usize,
727        M: MerkleizationState<DigestOf<H>>,
728        D: DurabilityState,
729    > kv::Gettable for Db<E, K, V, H, T, N, M, D>
730{
731    type Key = K;
732    type Value = V;
733    type Error = Error;
734
735    async fn get(&self, key: &Self::Key) -> Result<Option<Self::Value>, Self::Error> {
736        self.get(key).await
737    }
738}
739
740// StoreMut for (Unmerkleized, NonDurable) (aka mutable) state
741impl<
742        E: RStorage + Clock + Metrics,
743        K: Array,
744        V: FixedValue,
745        H: Hasher,
746        T: Translator,
747        const N: usize,
748    > kv::Updatable for Db<E, K, V, H, T, N, Unmerkleized, NonDurable>
749{
750    async fn update(&mut self, key: Self::Key, value: Self::Value) -> Result<(), Self::Error> {
751        self.update(key, value).await
752    }
753}
754
755// StoreDeletable for (Unmerkleized, NonDurable) (aka mutable) state
756impl<
757        E: RStorage + Clock + Metrics,
758        K: Array,
759        V: FixedValue,
760        H: Hasher,
761        T: Translator,
762        const N: usize,
763    > kv::Deletable for Db<E, K, V, H, T, N, Unmerkleized, NonDurable>
764{
765    async fn delete(&mut self, key: Self::Key) -> Result<bool, Self::Error> {
766        self.delete(key).await
767    }
768}
769
770// Batchable for (Unmerkleized, NonDurable) (aka mutable) state
771impl<E, K, V, T, H, const N: usize> Batchable for Db<E, K, V, H, T, N, Unmerkleized, NonDurable>
772where
773    E: RStorage + Clock + Metrics,
774    K: Array,
775    V: FixedValue,
776    T: Translator,
777    H: Hasher,
778{
779    async fn write_batch<'a, Iter>(&'a mut self, iter: Iter) -> Result<(), Error>
780    where
781        Iter: Iterator<Item = (K, Option<V>)> + Send + 'a,
782    {
783        let status = &mut self.status;
784        self.any
785            .write_batch_with_callback(iter, move |append: bool, loc: Option<Location>| {
786                status.push(append);
787                if let Some(loc) = loc {
788                    status.set_bit(*loc, false);
789                }
790            })
791            .await
792    }
793}
794
795// MerkleizedStore for Merkleized states (both Durable and NonDurable)
796// TODO(https://github.com/commonwarexyz/monorepo/issues/2560): This is broken -- it's computing
797// proofs only over the any db mmr not the grafted mmr, so they won't validate against the grafted
798// root.
799impl<
800        E: RStorage + Clock + Metrics,
801        K: Array,
802        V: FixedValue,
803        H: Hasher,
804        T: Translator,
805        const N: usize,
806        D: store::State,
807    > MerkleizedStore for Db<E, K, V, H, T, N, Merkleized<H>, D>
808{
809    type Digest = H::Digest;
810    type Operation = Operation<K, V>;
811
812    fn root(&self) -> Self::Digest {
813        self.cached_root
814            .expect("Merkleized state must have cached root")
815    }
816
817    async fn historical_proof(
818        &self,
819        historical_size: Location,
820        start_loc: Location,
821        max_ops: NonZeroU64,
822    ) -> Result<(Proof<Self::Digest>, Vec<Self::Operation>), Error> {
823        self.any
824            .historical_proof(historical_size, start_loc, max_ops)
825            .await
826    }
827}
828
829// PrunableStore for Merkleized states (both Durable and NonDurable)
830impl<
831        E: RStorage + Clock + Metrics,
832        K: Array,
833        V: FixedValue,
834        H: Hasher,
835        T: Translator,
836        const N: usize,
837        D: DurabilityState,
838    > PrunableStore for Db<E, K, V, H, T, N, Merkleized<H>, D>
839{
840    async fn prune(&mut self, prune_loc: Location) -> Result<(), Error> {
841        self.prune(prune_loc).await
842    }
843}
844
845// Persistable for Clean state
846impl<
847        E: RStorage + Clock + Metrics,
848        K: Array,
849        V: FixedValue,
850        H: Hasher,
851        T: Translator,
852        const N: usize,
853    > Persistable for Db<E, K, V, H, T, N, Merkleized<H>, Durable>
854{
855    type Error = Error;
856
857    async fn commit(&mut self) -> Result<(), Self::Error> {
858        // No-op, DB already recoverable.
859        Ok(())
860    }
861
862    async fn sync(&mut self) -> Result<(), Self::Error> {
863        self.sync().await
864    }
865
866    async fn destroy(self) -> Result<(), Self::Error> {
867        self.destroy().await
868    }
869}
870
871// CleanAny trait implementation
872#[cfg(any(test, feature = "test-traits"))]
873impl<
874        E: RStorage + Clock + Metrics,
875        K: Array,
876        V: FixedValue,
877        H: Hasher,
878        T: Translator,
879        const N: usize,
880    > CleanAny for Db<E, K, V, H, T, N, Merkleized<H>, Durable>
881{
882    type Mutable = Db<E, K, V, H, T, N, Unmerkleized, NonDurable>;
883
884    fn into_mutable(self) -> Self::Mutable {
885        self.into_mutable()
886    }
887}
888
889// UnmerkleizedDurableAny trait implementation
890#[cfg(any(test, feature = "test-traits"))]
891impl<
892        E: RStorage + Clock + Metrics,
893        K: Array,
894        V: FixedValue,
895        H: Hasher,
896        T: Translator,
897        const N: usize,
898    > UnmerkleizedDurableAny for Db<E, K, V, H, T, N, Unmerkleized, Durable>
899{
900    type Digest = H::Digest;
901    type Operation = Operation<K, V>;
902    type Mutable = Db<E, K, V, H, T, N, Unmerkleized, NonDurable>;
903    type Merkleized = Db<E, K, V, H, T, N, Merkleized<H>, Durable>;
904
905    fn into_mutable(self) -> Self::Mutable {
906        self.into_mutable()
907    }
908
909    async fn into_merkleized(self) -> Result<Self::Merkleized, Error> {
910        self.into_merkleized().await
911    }
912}
913
914// MerkleizedNonDurableAny trait implementation
915#[cfg(any(test, feature = "test-traits"))]
916impl<
917        E: RStorage + Clock + Metrics,
918        K: Array,
919        V: FixedValue,
920        H: Hasher,
921        T: Translator,
922        const N: usize,
923    > MerkleizedNonDurableAny for Db<E, K, V, H, T, N, Merkleized<H>, NonDurable>
924{
925    type Mutable = Db<E, K, V, H, T, N, Unmerkleized, NonDurable>;
926
927    fn into_mutable(self) -> Self::Mutable {
928        self.into_mutable()
929    }
930}
931
932// MutableAny trait implementation
933#[cfg(any(test, feature = "test-traits"))]
934impl<
935        E: RStorage + Clock + Metrics,
936        K: Array,
937        V: FixedValue,
938        H: Hasher,
939        T: Translator,
940        const N: usize,
941    > MutableAny for Db<E, K, V, H, T, N, Unmerkleized, NonDurable>
942{
943    type Digest = H::Digest;
944    type Operation = Operation<K, V>;
945    type Merkleized = Db<E, K, V, H, T, N, Merkleized<H>, NonDurable>;
946    type Durable = Db<E, K, V, H, T, N, Unmerkleized, Durable>;
947
948    async fn commit(self, metadata: Option<V>) -> Result<(Self::Durable, Range<Location>), Error> {
949        self.commit(metadata).await
950    }
951
952    async fn into_merkleized(self) -> Result<Self::Merkleized, Error> {
953        self.into_merkleized().await
954    }
955
956    fn steps(&self) -> u64 {
957        self.any.durable_state.steps
958    }
959}
960
961#[cfg(test)]
962pub mod test {
963    use super::*;
964    use crate::{
965        index::Unordered as _,
966        kv::tests::{assert_batchable, assert_deletable, assert_gettable, assert_send},
967        mmr::{hasher::Hasher as _, Location},
968        qmdb::store::{
969            batch_tests,
970            tests::{assert_log_store, assert_merkleized_store, assert_prunable_store},
971        },
972        translator::OneCap,
973    };
974    use commonware_cryptography::{sha256::Digest, Sha256};
975    use commonware_macros::test_traced;
976    use commonware_runtime::{buffer::PoolRef, deterministic, Runner as _};
977    use commonware_utils::{NZUsize, NZU16, NZU64};
978    use rand::{rngs::StdRng, RngCore, SeedableRng};
979    use std::{
980        collections::HashMap,
981        num::{NonZeroU16, NonZeroUsize},
982    };
983    use tracing::warn;
984
985    const PAGE_SIZE: NonZeroU16 = NZU16!(88);
986    const PAGE_CACHE_SIZE: NonZeroUsize = NZUsize!(8);
987
988    fn current_db_config(partition_prefix: &str) -> Config<OneCap> {
989        Config {
990            mmr_journal_partition: format!("{partition_prefix}_journal_partition"),
991            mmr_metadata_partition: format!("{partition_prefix}_metadata_partition"),
992            mmr_items_per_blob: NZU64!(11),
993            mmr_write_buffer: NZUsize!(1024),
994            log_journal_partition: format!("{partition_prefix}_partition_prefix"),
995            log_items_per_blob: NZU64!(7),
996            log_write_buffer: NZUsize!(1024),
997            bitmap_metadata_partition: format!("{partition_prefix}_bitmap_metadata_partition"),
998            translator: OneCap,
999            thread_pool: None,
1000            buffer_pool: PoolRef::new(PAGE_SIZE, PAGE_CACHE_SIZE),
1001        }
1002    }
1003
1004    /// A type alias for the concrete [Db] type used in these unit tests (Merkleized, Durable state).
1005    type CleanCurrentTest =
1006        Db<deterministic::Context, Digest, Digest, Sha256, OneCap, 32, Merkleized<Sha256>, Durable>;
1007
1008    /// A type alias for the Mutable variant of CurrentTest (Unmerkleized, NonDurable state).
1009    type MutableCurrentTest =
1010        Db<deterministic::Context, Digest, Digest, Sha256, OneCap, 32, Unmerkleized, NonDurable>;
1011
1012    /// Return an [Db] database initialized with a fixed config.
1013    async fn open_db(context: deterministic::Context, partition_prefix: &str) -> CleanCurrentTest {
1014        CleanCurrentTest::init(context, current_db_config(partition_prefix))
1015            .await
1016            .unwrap()
1017    }
1018
1019    /// Build a small database, then close and reopen it and ensure state is preserved.
1020    #[test_traced("DEBUG")]
1021    pub fn test_current_db_build_small_close_reopen() {
1022        let executor = deterministic::Runner::default();
1023        executor.start(|context| async move {
1024            let partition = "build_small";
1025            let db = open_db(context.clone(), partition).await;
1026            assert_eq!(db.op_count(), 1);
1027            assert_eq!(db.inactivity_floor_loc(), Location::new_unchecked(0));
1028            let root0 = db.root();
1029            drop(db);
1030            let db = open_db(context.clone(), partition).await;
1031            assert_eq!(db.op_count(), 1);
1032            assert!(db.get_metadata().await.unwrap().is_none());
1033            assert_eq!(db.root(), root0);
1034
1035            // Add one key.
1036            let k1 = Sha256::hash(&0u64.to_be_bytes());
1037            let v1 = Sha256::hash(&10u64.to_be_bytes());
1038            let mut db = db.into_mutable();
1039            assert!(db.create(k1, v1).await.unwrap());
1040            assert_eq!(db.get(&k1).await.unwrap().unwrap(), v1);
1041            let (db, _) = db.commit(None).await.unwrap();
1042            let db = db.into_merkleized().await.unwrap();
1043            assert_eq!(db.op_count(), 4); // 1 update, 1 commit, 1 move + 1 initial commit.
1044            assert!(db.get_metadata().await.unwrap().is_none());
1045            let root1 = db.root();
1046            assert!(root1 != root0);
1047
1048            drop(db);
1049            let db = open_db(context.clone(), partition).await;
1050            assert_eq!(db.op_count(), 4);
1051            assert_eq!(db.root(), root1);
1052
1053            // Create of same key should fail.
1054            let mut db = db.into_mutable();
1055            assert!(!db.create(k1, v1).await.unwrap());
1056
1057            // Delete that one key.
1058            assert!(db.delete(k1).await.unwrap());
1059
1060            let metadata = Sha256::hash(&1u64.to_be_bytes());
1061            let (db, _) = db.commit(Some(metadata)).await.unwrap();
1062            let db = db.into_merkleized().await.unwrap();
1063            assert_eq!(db.op_count(), 6); // 1 update, 2 commits, 1 move, 1 delete.
1064            assert_eq!(db.get_metadata().await.unwrap().unwrap(), metadata);
1065            assert_eq!(db.inactivity_floor_loc(), Location::new_unchecked(5));
1066            let root2 = db.root();
1067
1068            drop(db);
1069            let db = open_db(context.clone(), partition).await;
1070            assert_eq!(db.op_count(), 6);
1071            assert_eq!(db.get_metadata().await.unwrap().unwrap(), metadata);
1072            assert_eq!(db.inactivity_floor_loc(), Location::new_unchecked(5));
1073            assert_eq!(db.root(), root2);
1074
1075            // Repeated delete of same key should fail.
1076            let mut db = db.into_mutable();
1077            assert!(!db.delete(k1).await.unwrap());
1078            let (db, _) = db.commit(None).await.unwrap();
1079            let db = db.into_merkleized().await.unwrap();
1080
1081            // Confirm all activity bits except the last are false.
1082            for i in 0..*db.op_count() - 1 {
1083                assert!(!db.status.get_bit(i));
1084            }
1085            assert!(db.status.get_bit(*db.op_count() - 1));
1086
1087            db.destroy().await.unwrap();
1088        });
1089    }
1090
1091    #[test_traced("WARN")]
1092    fn test_current_db_build_big() {
1093        let executor = deterministic::Runner::default();
1094        // Build a db with 1000 keys, some of which we update and some of which we delete, and
1095        // confirm that the end state of the db matches that of an identically updated hashmap.
1096        const ELEMENTS: u64 = 1000;
1097        executor.start(|context| async move {
1098            let mut db = open_db(context.clone(), "build_big").await.into_mutable();
1099
1100            let mut map = HashMap::<Digest, Digest>::default();
1101            for i in 0u64..ELEMENTS {
1102                let k = Sha256::hash(&i.to_be_bytes());
1103                let v = Sha256::hash(&(i * 1000).to_be_bytes());
1104                db.update(k, v).await.unwrap();
1105                map.insert(k, v);
1106            }
1107
1108            // Update every 3rd key
1109            for i in 0u64..ELEMENTS {
1110                if i % 3 != 0 {
1111                    continue;
1112                }
1113                let k = Sha256::hash(&i.to_be_bytes());
1114                let v = Sha256::hash(&((i + 1) * 10000).to_be_bytes());
1115                db.update(k, v).await.unwrap();
1116                map.insert(k, v);
1117            }
1118
1119            // Delete every 7th key
1120            for i in 0u64..ELEMENTS {
1121                if i % 7 != 1 {
1122                    continue;
1123                }
1124                let k = Sha256::hash(&i.to_be_bytes());
1125                db.delete(k).await.unwrap();
1126                map.remove(&k);
1127            }
1128
1129            assert_eq!(db.op_count(), 2620);
1130            assert_eq!(db.inactivity_floor_loc(), Location::new_unchecked(0));
1131            assert_eq!(db.any.snapshot.items(), 857);
1132
1133            // Test that commit + sync w/ pruning will raise the activity floor.
1134            let (db, _) = db.commit(None).await.unwrap();
1135            let mut db = db.into_merkleized().await.unwrap();
1136            db.sync().await.unwrap();
1137            db.prune(db.inactivity_floor_loc()).await.unwrap();
1138            assert_eq!(db.op_count(), 4241);
1139            assert_eq!(db.inactivity_floor_loc(), Location::new_unchecked(3383));
1140            assert_eq!(db.any.snapshot.items(), 857);
1141
1142            // Reopen the db, making sure it has exactly the same state.
1143            let root = db.root();
1144            drop(db);
1145            let db = open_db(context.clone(), "build_big").await;
1146            assert_eq!(root, db.root());
1147            assert_eq!(db.op_count(), 4241);
1148            assert_eq!(db.inactivity_floor_loc(), Location::new_unchecked(3383));
1149            assert_eq!(db.any.snapshot.items(), 857);
1150
1151            // Confirm the db's state matches that of the separate map we computed independently.
1152            for i in 0u64..1000 {
1153                let k = Sha256::hash(&i.to_be_bytes());
1154                if let Some(map_value) = map.get(&k) {
1155                    let Some(db_value) = db.get(&k).await.unwrap() else {
1156                        panic!("key not found in db: {k}");
1157                    };
1158                    assert_eq!(*map_value, db_value);
1159                } else {
1160                    assert!(db.get(&k).await.unwrap().is_none());
1161                }
1162            }
1163        });
1164    }
1165
1166    // Test that merkleization state changes don't reset `steps`.
1167    #[test_traced("DEBUG")]
1168    fn test_current_ordered_fixed_db_steps_not_reset() {
1169        let executor = deterministic::Runner::default();
1170        executor.start(|context| async move {
1171            let db = open_db(context, "steps_test").await;
1172            crate::qmdb::any::test::test_any_db_steps_not_reset(db).await;
1173        });
1174    }
1175
1176    /// Build a tiny database and make sure we can't convince the verifier that some old value of a
1177    /// key is active. We specifically test over the partial chunk case, since these bits are yet to
1178    /// be committed to the underlying MMR.
1179    #[test_traced("DEBUG")]
1180    pub fn test_current_db_verify_proof_over_bits_in_uncommitted_chunk() {
1181        let executor = deterministic::Runner::default();
1182        executor.start(|context| async move {
1183            let mut hasher = StandardHasher::<Sha256>::new();
1184            let partition = "build_small";
1185            let mut db = open_db(context.clone(), partition).await.into_mutable();
1186
1187            // Add one key.
1188            let k = Sha256::fill(0x01);
1189            let v1 = Sha256::fill(0xA1);
1190            db.update(k, v1).await.unwrap();
1191            let (db, _) = db.commit(None).await.unwrap();
1192            let db = db.into_merkleized().await.unwrap();
1193
1194            let (_, op_loc) = db.any.get_with_loc(&k).await.unwrap().unwrap();
1195            let proof = db.key_value_proof(hasher.inner(), k).await.unwrap();
1196
1197            // Proof should be verifiable against current root.
1198            let root = db.root();
1199            assert!(CleanCurrentTest::verify_key_value_proof(
1200                hasher.inner(),
1201                k,
1202                v1,
1203                &proof,
1204                &root,
1205            ));
1206
1207            let v2 = Sha256::fill(0xA2);
1208            // Proof should not verify against a different value.
1209            assert!(!CleanCurrentTest::verify_key_value_proof(
1210                hasher.inner(),
1211                k,
1212                v2,
1213                &proof,
1214                &root,
1215            ));
1216            // Proof should not verify against a mangled next_key.
1217            let mut mangled_proof = proof.clone();
1218            mangled_proof.next_key = Sha256::fill(0xFF);
1219            assert!(!CleanCurrentTest::verify_key_value_proof(
1220                hasher.inner(),
1221                k,
1222                v1,
1223                &mangled_proof,
1224                &root,
1225            ));
1226
1227            // Update the key to a new value (v2), which inactivates the previous operation.
1228            let mut db = db.into_mutable();
1229            db.update(k, v2).await.unwrap();
1230            let (db, _) = db.commit(None).await.unwrap();
1231            let db = db.into_merkleized().await.unwrap();
1232            let root = db.root();
1233
1234            // New value should not be verifiable against the old proof.
1235            assert!(!CleanCurrentTest::verify_key_value_proof(
1236                hasher.inner(),
1237                k,
1238                v2,
1239                &proof,
1240                &root,
1241            ));
1242
1243            // But the new value should verify against a new proof.
1244            let proof = db.key_value_proof(hasher.inner(), k).await.unwrap();
1245            assert!(CleanCurrentTest::verify_key_value_proof(
1246                hasher.inner(),
1247                k,
1248                v2,
1249                &proof,
1250                &root,
1251            ));
1252            // Old value will not verify against new proof.
1253            assert!(!CleanCurrentTest::verify_key_value_proof(
1254                hasher.inner(),
1255                k,
1256                v1,
1257                &proof,
1258                &root,
1259            ));
1260
1261            // Create a proof of the now-inactive update operation assigining v1 to k against the
1262            // current root.
1263            let (p, _, chunks) = db
1264                .range_proof(hasher.inner(), op_loc, NZU64!(1))
1265                .await
1266                .unwrap();
1267            let proof_inactive = KeyValueProof {
1268                proof: OperationProof {
1269                    loc: op_loc,
1270                    chunk: chunks[0],
1271                    range_proof: p,
1272                },
1273                next_key: k,
1274            };
1275            // This proof should verify using verify_range_proof which does not check activity
1276            // status.
1277            let op = Operation::Update(Update {
1278                key: k,
1279                value: v1,
1280                next_key: k,
1281            });
1282            assert!(CleanCurrentTest::verify_range_proof(
1283                hasher.inner(),
1284                &proof_inactive.proof.range_proof,
1285                proof_inactive.proof.loc,
1286                &[op],
1287                &[proof_inactive.proof.chunk],
1288                &root,
1289            ));
1290            // But this proof should *not* verify as a key value proof, since verification will see
1291            // that the operation is inactive.
1292            assert!(!CleanCurrentTest::verify_key_value_proof(
1293                hasher.inner(),
1294                k,
1295                v1,
1296                &proof_inactive,
1297                &root,
1298            ));
1299
1300            // Attempt #1 to "fool" the verifier:  change the location to that of an active
1301            // operation. This should not fool the verifier if we're properly validating the
1302            // inclusion of the operation itself, and not just the chunk.
1303            let (_, active_loc) = db.any.get_with_loc(&k).await.unwrap().unwrap();
1304            // The new location should differ but still be in the same chunk.
1305            assert_ne!(active_loc, proof_inactive.proof.loc);
1306            assert_eq!(
1307                CleanBitMap::<Digest, 32>::leaf_pos(*active_loc),
1308                CleanBitMap::<Digest, 32>::leaf_pos(*proof_inactive.proof.loc)
1309            );
1310            let mut fake_proof = proof_inactive.clone();
1311            fake_proof.proof.loc = active_loc;
1312            assert!(!CleanCurrentTest::verify_key_value_proof(
1313                hasher.inner(),
1314                k,
1315                v1,
1316                &fake_proof,
1317                &root,
1318            ));
1319
1320            // Attempt #2 to "fool" the verifier: Modify the chunk in the proof info to make it look
1321            // like the operation is active by flipping its corresponding bit to 1. This should not
1322            // fool the verifier if we are correctly incorporating the partial chunk information
1323            // into the root computation.
1324            let mut modified_chunk = proof_inactive.proof.chunk;
1325            let bit_pos = *proof_inactive.proof.loc;
1326            let byte_idx = bit_pos / 8;
1327            let bit_idx = bit_pos % 8;
1328            modified_chunk[byte_idx as usize] |= 1 << bit_idx;
1329
1330            let mut fake_proof = proof_inactive.clone();
1331            fake_proof.proof.chunk = modified_chunk;
1332            assert!(!CleanCurrentTest::verify_key_value_proof(
1333                hasher.inner(),
1334                k,
1335                v1,
1336                &fake_proof,
1337                &root,
1338            ));
1339
1340            db.destroy().await.unwrap();
1341        });
1342    }
1343
1344    /// Apply random operations to the given db, committing them (randomly & at the end) only if
1345    /// `commit_changes` is true. Returns a mutable db; callers should commit if needed.
1346    async fn apply_random_ops(
1347        num_elements: u64,
1348        commit_changes: bool,
1349        rng_seed: u64,
1350        mut db: MutableCurrentTest,
1351    ) -> Result<MutableCurrentTest, Error> {
1352        // Log the seed with high visibility to make failures reproducible.
1353        warn!("rng_seed={}", rng_seed);
1354        let mut rng = StdRng::seed_from_u64(rng_seed);
1355
1356        for i in 0u64..num_elements {
1357            let k = Sha256::hash(&i.to_be_bytes());
1358            let v = Sha256::hash(&rng.next_u32().to_be_bytes());
1359            db.update(k, v).await.unwrap();
1360        }
1361
1362        // Randomly update / delete them. We use a delete frequency that is 1/7th of the update
1363        // frequency.
1364        for _ in 0u64..num_elements * 10 {
1365            let rand_key = Sha256::hash(&(rng.next_u64() % num_elements).to_be_bytes());
1366            if rng.next_u32() % 7 == 0 {
1367                db.delete(rand_key).await.unwrap();
1368                continue;
1369            }
1370            let v = Sha256::hash(&rng.next_u32().to_be_bytes());
1371            db.update(rand_key, v).await.unwrap();
1372            if commit_changes && rng.next_u32() % 20 == 0 {
1373                // Commit every ~20 updates.
1374                let (durable_db, _) = db.commit(None).await?;
1375                let clean_db = durable_db.into_merkleized().await?;
1376                db = clean_db.into_mutable();
1377            }
1378        }
1379        if commit_changes {
1380            let (durable_db, _) = db.commit(None).await?;
1381            let clean_db = durable_db.into_merkleized().await?;
1382            db = clean_db.into_mutable();
1383        }
1384        Ok(db)
1385    }
1386
1387    #[test_traced("DEBUG")]
1388    pub fn test_current_db_range_proofs() {
1389        let executor = deterministic::Runner::default();
1390        executor.start(|mut context| async move {
1391            let partition = "range_proofs";
1392            let mut hasher = StandardHasher::<Sha256>::new();
1393            let db = open_db(context.clone(), partition).await;
1394            let root = db.root();
1395
1396            // Empty range proof should not crash or verify, since even an empty db has a single
1397            // commit op.
1398            let proof = RangeProof {
1399                proof: Proof::default(),
1400                partial_chunk_digest: None,
1401            };
1402            assert!(!CleanCurrentTest::verify_range_proof(
1403                hasher.inner(),
1404                &proof,
1405                Location::new_unchecked(0),
1406                &[],
1407                &[],
1408                &root,
1409            ));
1410
1411            let db = apply_random_ops(200, true, context.next_u64(), db.into_mutable())
1412                .await
1413                .unwrap();
1414            let (db, _) = db.commit(None).await.unwrap();
1415            let db = db.into_merkleized().await.unwrap();
1416            let root = db.root();
1417
1418            // Make sure size-constrained batches of operations are provable from the oldest
1419            // retained op to tip.
1420            let max_ops = 4;
1421            let end_loc = db.op_count();
1422            let start_loc = db.any.inactivity_floor_loc();
1423
1424            for loc in *start_loc..*end_loc {
1425                let loc = Location::new_unchecked(loc);
1426                let (proof, ops, chunks) = db
1427                    .range_proof(hasher.inner(), loc, NZU64!(max_ops))
1428                    .await
1429                    .unwrap();
1430                assert!(
1431                    CleanCurrentTest::verify_range_proof(
1432                        hasher.inner(),
1433                        &proof,
1434                        loc,
1435                        &ops,
1436                        &chunks,
1437                        &root
1438                    ),
1439                    "failed to verify range at start_loc {start_loc}",
1440                );
1441                // Proof should not verify if we include extra chunks.
1442                let mut chunks_with_extra = chunks.clone();
1443                chunks_with_extra.push(chunks[chunks.len() - 1]);
1444                assert!(!CleanCurrentTest::verify_range_proof(
1445                    hasher.inner(),
1446                    &proof,
1447                    loc,
1448                    &ops,
1449                    &chunks_with_extra,
1450                    &root,
1451                ));
1452            }
1453
1454            db.destroy().await.unwrap();
1455        });
1456    }
1457
1458    #[test_traced("DEBUG")]
1459    pub fn test_current_db_key_value_proof() {
1460        let executor = deterministic::Runner::default();
1461        executor.start(|mut context| async move {
1462            let partition = "range_proofs";
1463            let mut hasher = StandardHasher::<Sha256>::new();
1464            let db = open_db(context.clone(), partition).await.into_mutable();
1465            let db = apply_random_ops(500, true, context.next_u64(), db)
1466                .await
1467                .unwrap();
1468            let (db, _) = db.commit(None).await.unwrap();
1469            let db = db.into_merkleized().await.unwrap();
1470            let root = db.root();
1471
1472            // Confirm bad keys produce the expected error.
1473            let bad_key = Sha256::fill(0xAA);
1474            let res = db.key_value_proof(hasher.inner(), bad_key).await;
1475            assert!(matches!(res, Err(Error::KeyNotFound)));
1476
1477            let start = *db.inactivity_floor_loc();
1478            for i in start..db.status.len() {
1479                if !db.status.get_bit(i) {
1480                    continue;
1481                }
1482                // Found an active operation! Create a proof for its active current key/value if
1483                // it's a key-updating operation.
1484                let op = db.any.log.read(Location::new_unchecked(i)).await.unwrap();
1485                let (key, value) = match op {
1486                    Operation::Update(key_data) => (key_data.key, key_data.value),
1487                    Operation::CommitFloor(_, _) => continue,
1488                    _ => unreachable!("expected update or commit floor operation"),
1489                };
1490                let proof = db.key_value_proof(hasher.inner(), key).await.unwrap();
1491
1492                // Proof should validate against the current value and correct root.
1493                assert!(CleanCurrentTest::verify_key_value_proof(
1494                    hasher.inner(),
1495                    key,
1496                    value,
1497                    &proof,
1498                    &root
1499                ));
1500                // Proof should fail against the wrong value.
1501                let wrong_val = Sha256::fill(0xFF);
1502                assert!(!CleanCurrentTest::verify_key_value_proof(
1503                    hasher.inner(),
1504                    key,
1505                    wrong_val,
1506                    &proof,
1507                    &root
1508                ));
1509                // Proof should fail against the wrong key.
1510                let wrong_key = Sha256::fill(0xEE);
1511                assert!(!CleanCurrentTest::verify_key_value_proof(
1512                    hasher.inner(),
1513                    wrong_key,
1514                    value,
1515                    &proof,
1516                    &root
1517                ));
1518                // Proof should fail against the wrong root.
1519                let wrong_root = Sha256::fill(0xDD);
1520                assert!(!CleanCurrentTest::verify_key_value_proof(
1521                    hasher.inner(),
1522                    key,
1523                    value,
1524                    &proof,
1525                    &wrong_root,
1526                ));
1527                // Proof should fail with the wrong next-key.
1528                let mut bad_proof = proof.clone();
1529                bad_proof.next_key = wrong_key;
1530                assert!(!CleanCurrentTest::verify_key_value_proof(
1531                    hasher.inner(),
1532                    key,
1533                    value,
1534                    &bad_proof,
1535                    &root,
1536                ));
1537            }
1538
1539            db.destroy().await.unwrap();
1540        });
1541    }
1542
1543    /// This test builds a random database, and makes sure that its state is correctly restored
1544    /// after closing and re-opening.
1545    #[test_traced("WARN")]
1546    pub fn test_current_db_build_random_close_reopen() {
1547        // Number of elements to initially insert into the db.
1548        const ELEMENTS: u64 = 1000;
1549
1550        let executor = deterministic::Runner::default();
1551        executor.start(|mut context| async move {
1552            let partition = "build_random";
1553            let rng_seed = context.next_u64();
1554            let db = open_db(context.clone(), partition).await.into_mutable();
1555            let db = apply_random_ops(ELEMENTS, true, rng_seed, db)
1556                .await
1557                .unwrap();
1558            let (db, _) = db.commit(None).await.unwrap();
1559            let mut db = db.into_merkleized().await.unwrap();
1560            db.sync().await.unwrap();
1561
1562            // Drop and reopen the db
1563            let root = db.root();
1564            drop(db);
1565            let db = open_db(context, partition).await;
1566
1567            // Ensure the root matches
1568            assert_eq!(db.root(), root);
1569
1570            db.destroy().await.unwrap();
1571        });
1572    }
1573
1574    /// Repeatedly update the same key to a new value and ensure we can prove its current value
1575    /// after each update.
1576    #[test_traced("WARN")]
1577    pub fn test_current_db_proving_repeated_updates() {
1578        let executor = deterministic::Runner::default();
1579        executor.start(|context| async move {
1580            let mut hasher = StandardHasher::<Sha256>::new();
1581            let partition = "build_small";
1582            let mut db = open_db(context.clone(), partition).await;
1583
1584            // Add one key.
1585            let k = Sha256::fill(0x00);
1586            let mut old_val = Sha256::fill(0x00);
1587            for i in 1u8..=255 {
1588                let v = Sha256::fill(i);
1589                let mut dirty_db = db.into_mutable();
1590                dirty_db.update(k, v).await.unwrap();
1591                assert_eq!(dirty_db.get(&k).await.unwrap().unwrap(), v);
1592                let (dirty_db, _) = dirty_db.commit(None).await.unwrap();
1593                let clean_db = dirty_db.into_merkleized().await.unwrap();
1594                db = clean_db;
1595                let root = db.root();
1596
1597                // Create a proof for the current value of k.
1598                let proof = db.key_value_proof(hasher.inner(), k).await.unwrap();
1599                assert!(
1600                    CleanCurrentTest::verify_key_value_proof(hasher.inner(), k, v, &proof, &root),
1601                    "proof of update {i} failed to verify"
1602                );
1603                // Ensure the proof does NOT verify if we use the previous value.
1604                assert!(
1605                    !CleanCurrentTest::verify_key_value_proof(
1606                        hasher.inner(),
1607                        k,
1608                        old_val,
1609                        &proof,
1610                        &root
1611                    ),
1612                    "proof of update {i} verified when it should not have"
1613                );
1614                old_val = v;
1615            }
1616
1617            db.destroy().await.unwrap();
1618        });
1619    }
1620
1621    /// This test builds a random database and simulates we can recover from different types of
1622    /// failure scenarios.
1623    #[test_traced("WARN")]
1624    pub fn test_current_db_simulate_write_failures() {
1625        // Number of elements to initially insert into the db.
1626        const ELEMENTS: u64 = 1000;
1627
1628        let executor = deterministic::Runner::default();
1629        executor.start(|mut context| async move {
1630            let partition = "build_random_fail_commit";
1631            let rng_seed = context.next_u64();
1632            let db = open_db(context.clone(), partition).await.into_mutable();
1633            let db = apply_random_ops(ELEMENTS, true, rng_seed, db)
1634                .await
1635                .unwrap();
1636            let (db, _) = db.commit(None).await.unwrap();
1637            let mut db = db.into_merkleized().await.unwrap();
1638            let committed_root = db.root();
1639            let committed_op_count = db.op_count();
1640            let committed_inactivity_floor = db.any.inactivity_floor_loc();
1641            db.prune(committed_inactivity_floor).await.unwrap();
1642
1643            // Perform more random operations without committing any of them.
1644            let db = apply_random_ops(ELEMENTS, false, rng_seed + 1, db.into_mutable())
1645                .await
1646                .unwrap();
1647
1648            // SCENARIO #1: Simulate a crash that happens before any writes. Upon reopening, the
1649            // state of the DB should be as of the last commit.
1650            drop(db);
1651            let db = open_db(context.clone(), partition).await;
1652            assert_eq!(db.root(), committed_root);
1653            assert_eq!(db.op_count(), committed_op_count);
1654
1655            // Re-apply the exact same uncommitted operations.
1656            let db = apply_random_ops(ELEMENTS, false, rng_seed + 1, db.into_mutable())
1657                .await
1658                .unwrap();
1659
1660            // SCENARIO #2: Simulate a crash that happens after the any db has been committed, but
1661            // before the state of the pruned bitmap can be written to disk (i.e., before
1662            // into_merkleized is called). We do this by committing and then dropping the durable
1663            // db without calling close or into_merkleized.
1664            let (durable_db, _) = db.commit(None).await.unwrap();
1665            let committed_op_count = durable_db.op_count();
1666            drop(durable_db);
1667
1668            // We should be able to recover, so the root should differ from the previous commit, and
1669            // the op count should be greater than before.
1670            let db = open_db(context.clone(), partition).await;
1671            let scenario_2_root = db.root();
1672
1673            // To confirm the second committed hash is correct we'll re-build the DB in a new
1674            // partition, but without any failures. They should have the exact same state.
1675            let fresh_partition = "build_random_fail_commit_fresh";
1676            let db = open_db(context.clone(), fresh_partition)
1677                .await
1678                .into_mutable();
1679            let db = apply_random_ops(ELEMENTS, true, rng_seed, db)
1680                .await
1681                .unwrap();
1682            let (db, _) = db.commit(None).await.unwrap();
1683            let db = apply_random_ops(ELEMENTS, false, rng_seed + 1, db.into_mutable())
1684                .await
1685                .unwrap();
1686            let (db, _) = db.commit(None).await.unwrap();
1687            let mut db = db.into_merkleized().await.unwrap();
1688            db.prune(db.any.inactivity_floor_loc()).await.unwrap();
1689            // State from scenario #2 should match that of a successful commit.
1690            assert_eq!(db.op_count(), committed_op_count);
1691            assert_eq!(db.root(), scenario_2_root);
1692
1693            db.destroy().await.unwrap();
1694        });
1695    }
1696
1697    #[test_traced("WARN")]
1698    pub fn test_current_db_different_pruning_delays_same_root() {
1699        let executor = deterministic::Runner::default();
1700        executor.start(|context| async move {
1701            // Create two databases that are identical other than how they are pruned.
1702            let db_config_no_pruning = current_db_config("no_pruning_test");
1703
1704            let db_config_pruning = current_db_config("pruning_test");
1705
1706            let mut db_no_pruning =
1707                CleanCurrentTest::init(context.clone(), db_config_no_pruning.clone())
1708                    .await
1709                    .unwrap()
1710                    .into_mutable();
1711            let mut db_pruning = CleanCurrentTest::init(context.clone(), db_config_pruning.clone())
1712                .await
1713                .unwrap()
1714                .into_mutable();
1715
1716            // Apply identical operations to both databases, but only prune one.
1717            const NUM_OPERATIONS: u64 = 1000;
1718            for i in 0..NUM_OPERATIONS {
1719                let key = Sha256::hash(&i.to_be_bytes());
1720                let value = Sha256::hash(&(i * 1000).to_be_bytes());
1721
1722                db_no_pruning.update(key, value).await.unwrap();
1723                db_pruning.update(key, value).await.unwrap();
1724
1725                // Commit periodically
1726                if i % 50 == 49 {
1727                    let (durable_no_pruning, _) = db_no_pruning.commit(None).await.unwrap();
1728                    let clean_no_pruning = durable_no_pruning.into_merkleized().await.unwrap();
1729                    let (durable_pruning, _) = db_pruning.commit(None).await.unwrap();
1730                    let mut clean_pruning = durable_pruning.into_merkleized().await.unwrap();
1731                    clean_pruning
1732                        .prune(clean_no_pruning.any.inactivity_floor_loc())
1733                        .await
1734                        .unwrap();
1735                    db_no_pruning = clean_no_pruning.into_mutable();
1736                    db_pruning = clean_pruning.into_mutable();
1737                }
1738            }
1739
1740            // Final commit
1741            let (db_no_pruning, _) = db_no_pruning.commit(None).await.unwrap();
1742            let db_no_pruning = db_no_pruning.into_merkleized().await.unwrap();
1743            let (db_pruning, _) = db_pruning.commit(None).await.unwrap();
1744            let db_pruning = db_pruning.into_merkleized().await.unwrap();
1745
1746            // Get roots from both databases
1747            let root_no_pruning = db_no_pruning.root();
1748            let root_pruning = db_pruning.root();
1749
1750            // Verify they generate the same roots
1751            assert_eq!(root_no_pruning, root_pruning);
1752
1753            drop(db_no_pruning);
1754            drop(db_pruning);
1755
1756            // Restart both databases
1757            let db_no_pruning = CleanCurrentTest::init(context.clone(), db_config_no_pruning)
1758                .await
1759                .unwrap();
1760            let db_pruning = CleanCurrentTest::init(context.clone(), db_config_pruning)
1761                .await
1762                .unwrap();
1763            assert_eq!(
1764                db_no_pruning.inactivity_floor_loc(),
1765                db_pruning.inactivity_floor_loc()
1766            );
1767
1768            // Get roots after restart
1769            let root_no_pruning_restart = db_no_pruning.root();
1770            let root_pruning_restart = db_pruning.root();
1771
1772            // Ensure roots still match after restart
1773            assert_eq!(root_no_pruning, root_no_pruning_restart);
1774            assert_eq!(root_pruning, root_pruning_restart);
1775
1776            db_no_pruning.destroy().await.unwrap();
1777            db_pruning.destroy().await.unwrap();
1778        });
1779    }
1780
1781    /// Build a tiny database and confirm exclusion proofs work as expected.
1782    #[test_traced("DEBUG")]
1783    pub fn test_current_db_exclusion_proofs() {
1784        let executor = deterministic::Runner::default();
1785        executor.start(|context| async move {
1786            let mut hasher = StandardHasher::<Sha256>::new();
1787            let partition = "exclusion_proofs";
1788            let db = open_db(context.clone(), partition).await;
1789
1790            let key_exists_1 = Sha256::fill(0x10);
1791
1792            // We should be able to prove exclusion for any key against an empty db.
1793            let empty_root = db.root();
1794            let empty_proof = db
1795                .exclusion_proof(hasher.inner(), &key_exists_1)
1796                .await
1797                .unwrap();
1798            assert!(CleanCurrentTest::verify_exclusion_proof(
1799                hasher.inner(),
1800                &key_exists_1,
1801                &empty_proof,
1802                &empty_root,
1803            ));
1804
1805            // Add `key_exists_1` and test exclusion proving over the single-key database case.
1806            let v1 = Sha256::fill(0xA1);
1807            let mut db = db.into_mutable();
1808            db.update(key_exists_1, v1).await.unwrap();
1809            let (db, _) = db.commit(None).await.unwrap();
1810            let db = db.into_merkleized().await.unwrap();
1811            let root = db.root();
1812
1813            // We shouldn't be able to generate an exclusion proof for a key already in the db.
1814            let result = db.exclusion_proof(hasher.inner(), &key_exists_1).await;
1815            assert!(matches!(result, Err(Error::KeyExists)));
1816
1817            // Generate some valid exclusion proofs for keys on either side.
1818            let greater_key = Sha256::fill(0xFF);
1819            let lesser_key = Sha256::fill(0x00);
1820            let proof = db
1821                .exclusion_proof(hasher.inner(), &greater_key)
1822                .await
1823                .unwrap();
1824            let proof2 = db
1825                .exclusion_proof(hasher.inner(), &lesser_key)
1826                .await
1827                .unwrap();
1828
1829            // Since there's only one span in the DB, the two exclusion proofs should be identical,
1830            // and the proof should verify any key but the one that exists in the db.
1831            assert_eq!(proof, proof2);
1832            // Any key except the one that exists should verify against this proof.
1833            assert!(CleanCurrentTest::verify_exclusion_proof(
1834                hasher.inner(),
1835                &greater_key,
1836                &proof,
1837                &root,
1838            ));
1839            assert!(CleanCurrentTest::verify_exclusion_proof(
1840                hasher.inner(),
1841                &lesser_key,
1842                &proof,
1843                &root,
1844            ));
1845            // Exclusion should fail if we test it on a key that exists.
1846            assert!(!CleanCurrentTest::verify_exclusion_proof(
1847                hasher.inner(),
1848                &key_exists_1,
1849                &proof,
1850                &root,
1851            ));
1852
1853            // Add a second key and test exclusion proving over the two-key database case.
1854            let key_exists_2 = Sha256::fill(0x30);
1855            let v2 = Sha256::fill(0xB2);
1856
1857            let mut db = db.into_mutable();
1858            db.update(key_exists_2, v2).await.unwrap();
1859            let (db, _) = db.commit(None).await.unwrap();
1860            let db = db.into_merkleized().await.unwrap();
1861            let root = db.root();
1862
1863            // Use a lesser/greater key that has a translated-key conflict based
1864            // on our use of OneCap translator.
1865            let lesser_key = Sha256::fill(0x0F); // < k1=0x10
1866            let greater_key = Sha256::fill(0x31); // > k2=0x30
1867            let middle_key = Sha256::fill(0x20); // between k1=0x10 and k2=0x30
1868            let proof = db
1869                .exclusion_proof(hasher.inner(), &greater_key)
1870                .await
1871                .unwrap();
1872            // Test the "cycle around" span. This should prove exclusion of greater_key & lesser
1873            // key, but fail on middle_key.
1874            assert!(CleanCurrentTest::verify_exclusion_proof(
1875                hasher.inner(),
1876                &greater_key,
1877                &proof,
1878                &root,
1879            ));
1880            assert!(CleanCurrentTest::verify_exclusion_proof(
1881                hasher.inner(),
1882                &lesser_key,
1883                &proof,
1884                &root,
1885            ));
1886            assert!(!CleanCurrentTest::verify_exclusion_proof(
1887                hasher.inner(),
1888                &middle_key,
1889                &proof,
1890                &root,
1891            ));
1892
1893            // Due to the cycle, lesser & greater keys should produce the same proof.
1894            let new_proof = db
1895                .exclusion_proof(hasher.inner(), &lesser_key)
1896                .await
1897                .unwrap();
1898            assert_eq!(proof, new_proof);
1899
1900            // Test the inner span [k, k2).
1901            let proof = db
1902                .exclusion_proof(hasher.inner(), &middle_key)
1903                .await
1904                .unwrap();
1905            // `k` should fail since it's in the db.
1906            assert!(!CleanCurrentTest::verify_exclusion_proof(
1907                hasher.inner(),
1908                &key_exists_1,
1909                &proof,
1910                &root,
1911            ));
1912            // `middle_key` should succeed since it's in range.
1913            assert!(CleanCurrentTest::verify_exclusion_proof(
1914                hasher.inner(),
1915                &middle_key,
1916                &proof,
1917                &root,
1918            ));
1919            assert!(!CleanCurrentTest::verify_exclusion_proof(
1920                hasher.inner(),
1921                &key_exists_2,
1922                &proof,
1923                &root,
1924            ));
1925
1926            let conflicting_middle_key = Sha256::fill(0x11); // between k1=0x10 and k2=0x30
1927            assert!(CleanCurrentTest::verify_exclusion_proof(
1928                hasher.inner(),
1929                &conflicting_middle_key,
1930                &proof,
1931                &root,
1932            ));
1933
1934            // Using lesser/greater keys for the middle-proof should fail.
1935            assert!(!CleanCurrentTest::verify_exclusion_proof(
1936                hasher.inner(),
1937                &greater_key,
1938                &proof,
1939                &root,
1940            ));
1941            assert!(!CleanCurrentTest::verify_exclusion_proof(
1942                hasher.inner(),
1943                &lesser_key,
1944                &proof,
1945                &root,
1946            ));
1947
1948            // Make the DB empty again by deleting the keys and check the empty case
1949            // again.
1950            let mut db = db.into_mutable();
1951            db.delete(key_exists_1).await.unwrap();
1952            db.delete(key_exists_2).await.unwrap();
1953            let (db, _) = db.commit(None).await.unwrap();
1954            let mut db = db.into_merkleized().await.unwrap();
1955            db.sync().await.unwrap();
1956            let root = db.root();
1957            // This root should be different than the empty root from earlier since the DB now has a
1958            // non-zero number of operations.
1959            assert!(db.is_empty());
1960            assert_ne!(db.op_count(), 0);
1961            assert_ne!(root, empty_root);
1962
1963            let proof = db
1964                .exclusion_proof(hasher.inner(), &key_exists_1)
1965                .await
1966                .unwrap();
1967            assert!(CleanCurrentTest::verify_exclusion_proof(
1968                hasher.inner(),
1969                &key_exists_1,
1970                &proof,
1971                &root,
1972            ));
1973            assert!(CleanCurrentTest::verify_exclusion_proof(
1974                hasher.inner(),
1975                &key_exists_2,
1976                &proof,
1977                &root,
1978            ));
1979
1980            // Try fooling the verifier with improper values.
1981            assert!(!CleanCurrentTest::verify_exclusion_proof(
1982                hasher.inner(),
1983                &key_exists_1,
1984                &empty_proof, // wrong proof
1985                &root,
1986            ));
1987            assert!(!CleanCurrentTest::verify_exclusion_proof(
1988                hasher.inner(),
1989                &key_exists_1,
1990                &proof,
1991                &empty_root, // wrong root
1992            ));
1993        });
1994    }
1995
1996    #[test_traced("DEBUG")]
1997    fn test_batch() {
1998        batch_tests::test_batch(|mut ctx| async move {
1999            let seed = ctx.next_u64();
2000            let partition = format!("current_ordered_batch_{seed}");
2001            open_db(ctx, &partition).await.into_mutable()
2002        });
2003    }
2004
2005    #[allow(dead_code)]
2006    fn assert_merkleized_db_futures_are_send(
2007        db: &mut CleanCurrentTest,
2008        key: Digest,
2009        loc: Location,
2010    ) {
2011        assert_gettable(db, &key);
2012        assert_log_store(db);
2013        assert_prunable_store(db, loc);
2014        assert_merkleized_store(db, loc);
2015        assert_send(db.sync());
2016    }
2017
2018    #[allow(dead_code)]
2019    fn assert_mutable_db_futures_are_send(db: &mut MutableCurrentTest, key: Digest, value: Digest) {
2020        assert_gettable(db, &key);
2021        assert_log_store(db);
2022        assert_send(db.update(key, value));
2023        assert_send(db.create(key, value));
2024        assert_deletable(db, key);
2025        assert_batchable(db, key, value);
2026    }
2027
2028    #[allow(dead_code)]
2029    fn assert_mutable_db_commit_is_send(db: MutableCurrentTest) {
2030        assert_send(db.commit(None));
2031    }
2032}