commonware_storage/qmdb/current/
mod.rs

1//! A _Current_ authenticated database provides succinct proofs of _any_ value ever associated with
2//! a key, and also whether that value is the _current_ value associated with it. The
3//! implementations are based on a [crate::qmdb::any] authenticated database combined with an
4//! authenticated [CleanBitMap] over the activity status of each operation.
5//! The two structures are "grafted" together to minimize proof sizes.
6
7use crate::{
8    bitmap::{CleanBitMap, DirtyBitMap},
9    mmr::{
10        grafting::{Hasher as GraftingHasher, Storage as GraftingStorage},
11        hasher::Hasher,
12        journaled::Mmr,
13        mem::Clean,
14        StandardHasher,
15    },
16    qmdb::{any::FixedConfig as AConfig, Error},
17    translator::Translator,
18};
19use commonware_cryptography::{DigestOf, Hasher as CHasher};
20use commonware_parallel::ThreadPool;
21use commonware_runtime::{buffer::PoolRef, Clock, Metrics, Storage as RStorage};
22use std::num::{NonZeroU64, NonZeroUsize};
23
24pub mod ordered;
25pub mod proof;
26pub mod unordered;
27
28/// Configuration for a `Current` authenticated db with fixed-size values.
29#[derive(Clone)]
30pub struct FixedConfig<T: Translator> {
31    /// The name of the storage partition used for the MMR's backing journal.
32    pub mmr_journal_partition: String,
33
34    /// The items per blob configuration value used by the MMR journal.
35    pub mmr_items_per_blob: NonZeroU64,
36
37    /// The size of the write buffer to use for each blob in the MMR journal.
38    pub mmr_write_buffer: NonZeroUsize,
39
40    /// The name of the storage partition used for the MMR's metadata.
41    pub mmr_metadata_partition: String,
42
43    /// The name of the storage partition used to persist the (pruned) log of operations.
44    pub log_journal_partition: String,
45
46    /// The items per blob configuration value used by the log journal.
47    pub log_items_per_blob: NonZeroU64,
48
49    /// The size of the write buffer to use for each blob in the log journal.
50    pub log_write_buffer: NonZeroUsize,
51
52    /// The name of the storage partition used for the bitmap metadata.
53    pub bitmap_metadata_partition: String,
54
55    /// The translator used by the compressed index.
56    pub translator: T,
57
58    /// An optional thread pool to use for parallelizing batch operations.
59    pub thread_pool: Option<ThreadPool>,
60
61    /// The buffer pool to use for caching data.
62    pub buffer_pool: PoolRef,
63}
64
65impl<T: Translator> FixedConfig<T> {
66    /// Convert this config to an [AConfig] used to initialize the authenticated log.
67    pub fn to_any_config(self) -> AConfig<T> {
68        AConfig {
69            mmr_journal_partition: self.mmr_journal_partition,
70            mmr_metadata_partition: self.mmr_metadata_partition,
71            mmr_items_per_blob: self.mmr_items_per_blob,
72            mmr_write_buffer: self.mmr_write_buffer,
73            log_journal_partition: self.log_journal_partition,
74            log_items_per_blob: self.log_items_per_blob,
75            log_write_buffer: self.log_write_buffer,
76            translator: self.translator,
77            thread_pool: self.thread_pool,
78            buffer_pool: self.buffer_pool,
79        }
80    }
81}
82
83/// Return the root of the current QMDB represented by the provided mmr and bitmap.
84async fn root<E: RStorage + Clock + Metrics, H: CHasher, const N: usize>(
85    hasher: &mut StandardHasher<H>,
86    height: u32,
87    status: &CleanBitMap<H::Digest, N>,
88    mmr: &Mmr<E, H::Digest, Clean<DigestOf<H>>>,
89) -> Result<H::Digest, Error> {
90    let grafted_mmr = GraftingStorage::<'_, H, _, _>::new(status, mmr, height);
91    let mmr_root = grafted_mmr.root(hasher).await?;
92
93    // If we are on a chunk boundary, then the mmr_root fully captures the state of the DB.
94    let (last_chunk, next_bit) = status.last_chunk();
95    if next_bit == CleanBitMap::<H::Digest, N>::CHUNK_SIZE_BITS {
96        // Last chunk is complete, no partial chunk to add
97        return Ok(mmr_root);
98    }
99
100    // There are bits in an uncommitted (partial) chunk, so we need to incorporate that information
101    // into the root digest to fully capture the database state. We do so by hashing the mmr root
102    // along with the number of bits within the last chunk and the digest of the last chunk.
103    hasher.inner().update(last_chunk);
104    let last_chunk_digest = hasher.inner().finalize();
105
106    Ok(CleanBitMap::<H::Digest, N>::partial_chunk_root(
107        hasher.inner(),
108        &mmr_root,
109        next_bit,
110        &last_chunk_digest,
111    ))
112}
113
114/// Consumes a `DirtyBitMap`, performs merkleization using the provided hasher and MMR storage,
115/// and returns a `CleanBitMap` containing the merkleized result.
116///
117/// # Arguments
118/// * `hasher` - The hasher used for merkleization.
119/// * `status` - The `DirtyBitMap` to be merkleized. Ownership is taken.
120/// * `mmr` - The MMR storage used for grafting.
121/// * `grafting_height` - The height at which grafting occurs.
122async fn merkleize_grafted_bitmap<H, const N: usize>(
123    hasher: &mut StandardHasher<H>,
124    status: DirtyBitMap<H::Digest, N>,
125    mmr: &impl crate::mmr::storage::Storage<H::Digest>,
126    grafting_height: u32,
127) -> Result<CleanBitMap<H::Digest, N>, Error>
128where
129    H: CHasher,
130{
131    let mut grafter = GraftingHasher::new(hasher, grafting_height);
132    grafter
133        .load_grafted_digests(&status.dirty_chunks(), mmr)
134        .await?;
135    status.merkleize(&mut grafter).await.map_err(Into::into)
136}