Skip to main content

commonware_storage/qmdb/current/
proof.rs

1//! Proof types for [crate::qmdb::current] authenticated databases.
2//!
3//! This module provides:
4//! - [RangeProof]: Proves a range of operations exist in the database.
5//! - [OperationProof]: Proves a specific operation is active in the database.
6
7use crate::{
8    bitmap::{partial_chunk_root, MerkleizedBitMap},
9    journal::contiguous::Contiguous,
10    mmr::{
11        grafting::{Storage as GraftingStorage, Verifier},
12        hasher::Hasher,
13        journaled::Mmr,
14        mem::Clean,
15        storage::Storage,
16        verification, Location, Proof,
17    },
18    qmdb::Error,
19};
20use commonware_codec::Codec;
21use commonware_cryptography::{Digest, Hasher as CHasher};
22use commonware_runtime::{Clock, Metrics, Storage as RStorage};
23use commonware_utils::bitmap::BitMap;
24use core::ops::Range;
25use futures::future::try_join_all;
26use std::num::NonZeroU64;
27use tracing::debug;
28
29/// A proof that a range of operations exist in the database.
30#[derive(Clone, Eq, PartialEq, Debug)]
31pub struct RangeProof<D: Digest> {
32    /// The MMR digest material required to verify the proof.
33    pub proof: Proof<D>,
34
35    /// The partial chunk digest from the status bitmap at the time of proof generation, if any.
36    pub partial_chunk_digest: Option<D>,
37}
38
39impl<D: Digest> RangeProof<D> {
40    /// Create a new range proof for the provided `range` of operations.
41    pub async fn new<
42        E: RStorage + Clock + Metrics,
43        H: CHasher<Digest = D>,
44        S: Storage<D>,
45        const N: usize,
46    >(
47        hasher: &mut H,
48        status: &MerkleizedBitMap<E, D, N>,
49        grafting_height: u32,
50        mmr: &S,
51        range: Range<Location>,
52    ) -> Result<Self, Error> {
53        let grafted_mmr = GraftingStorage::<'_, H, _, _>::new(status, mmr, grafting_height);
54        let proof = verification::range_proof(&grafted_mmr, range).await?;
55
56        let (last_chunk, next_bit) = status.last_chunk();
57        let partial_chunk_digest = if next_bit != MerkleizedBitMap::<E, D, N>::CHUNK_SIZE_BITS {
58            // Last chunk is incomplete, meaning it's not yet in the MMR and needs to be included
59            // in the proof.
60            hasher.update(last_chunk);
61            Some(hasher.finalize())
62        } else {
63            None
64        };
65
66        Ok(Self {
67            proof,
68            partial_chunk_digest,
69        })
70    }
71
72    /// Returns a proof that the specified range of operations are part of the database, along with
73    /// the operations from the range and their activity status chunks. A truncated range (from
74    /// hitting the max) can be detected by looking at the length of the returned operations vector.
75    ///
76    /// # Errors
77    ///
78    /// Returns [crate::mmr::Error::LocationOverflow] if `start_loc` > [crate::mmr::MAX_LOCATION].
79    /// Returns [crate::mmr::Error::RangeOutOfBounds] if `start_loc` >= number of leaves in the MMR.
80    pub async fn new_with_ops<
81        E: RStorage + Clock + Metrics,
82        H: CHasher<Digest = D>,
83        C: Contiguous,
84        const N: usize,
85    >(
86        hasher: &mut H,
87        status: &MerkleizedBitMap<E, D, N>,
88        height: u32,
89        mmr: &Mmr<E, D, Clean<D>>,
90        log: &C,
91        start_loc: Location,
92        max_ops: NonZeroU64,
93    ) -> Result<(Self, Vec<C::Item>, Vec<[u8; N]>), Error> {
94        // Compute the start and end locations & positions of the range.
95        let leaves = mmr.leaves();
96        if start_loc >= leaves {
97            return Err(crate::mmr::Error::RangeOutOfBounds(start_loc).into());
98        }
99        let max_loc = start_loc.saturating_add(max_ops.get());
100        let end_loc = core::cmp::min(max_loc, leaves);
101
102        // Generate the proof from the grafted MMR.
103        let proof = Self::new(hasher, status, height, mmr, start_loc..end_loc).await?;
104
105        // Collect the operations necessary to verify the proof.
106        let mut ops = Vec::with_capacity((*end_loc - *start_loc) as usize);
107        let futures = (*start_loc..*end_loc)
108            .map(|i| log.read(i))
109            .collect::<Vec<_>>();
110        try_join_all(futures)
111            .await?
112            .into_iter()
113            .for_each(|op| ops.push(op));
114
115        // Gather the chunks necessary to verify the proof.
116        let chunk_bits = BitMap::<N>::CHUNK_SIZE_BITS;
117        let start = *start_loc / chunk_bits; // chunk that contains the first bit
118        let end = (*end_loc - 1) / chunk_bits; // chunk that contains the last bit
119        let mut chunks = Vec::with_capacity((end - start + 1) as usize);
120        for i in start..=end {
121            let bit_offset = i * chunk_bits;
122            let chunk = *status.get_chunk_containing(bit_offset);
123            chunks.push(chunk);
124        }
125
126        Ok((proof, ops, chunks))
127    }
128
129    /// Return true if the given sequence of `ops` were applied starting at location `start_loc` in
130    /// the db with the provided root, and having the activity status described by `chunks`.
131    pub fn verify<H: CHasher<Digest = D>, O: Codec, const N: usize>(
132        &self,
133        hasher: &mut H,
134        grafting_height: u32,
135        start_loc: Location,
136        ops: &[O],
137        chunks: &[[u8; N]],
138        root: &H::Digest,
139    ) -> bool {
140        if ops.is_empty() || chunks.is_empty() {
141            debug!("verification failed, empty input");
142            return false;
143        }
144
145        // Compute the (non-inclusive) end location of the range.
146        let Some(end_loc) = start_loc.checked_add(ops.len() as u64) else {
147            debug!("verification failed, end_loc overflow");
148            return false;
149        };
150
151        let leaves = self.proof.leaves;
152        if end_loc > leaves {
153            debug!(
154                loc = ?end_loc,
155                ?leaves, "verification failed, invalid range"
156            );
157            return false;
158        }
159
160        // Validate the number of input chunks.
161        let chunk_bits = BitMap::<N>::CHUNK_SIZE_BITS;
162        let start = *start_loc / chunk_bits; // chunk that contains first bit
163        let end = (*end_loc.saturating_sub(1)) / chunk_bits; // chunk that contains the last bit
164        let expected = end - start + 1;
165        let actual = chunks.len() as u64;
166        if expected != actual {
167            debug!(expected, actual, "verification failed, chunk mismatch");
168            return false;
169        }
170
171        let elements = ops.iter().map(|op| op.encode()).collect::<Vec<_>>();
172
173        let chunk_vec = chunks.iter().map(|c| c.as_ref()).collect::<Vec<_>>();
174        let start_chunk_loc = *start_loc / BitMap::<N>::CHUNK_SIZE_BITS;
175        let mut verifier = Verifier::<H>::new(
176            grafting_height,
177            Location::new_unchecked(start_chunk_loc),
178            chunk_vec,
179        );
180
181        let next_bit = *leaves % BitMap::<N>::CHUNK_SIZE_BITS;
182        if next_bit == 0 {
183            return self
184                .proof
185                .verify_range_inclusion(&mut verifier, &elements, start_loc, root);
186        }
187
188        // The proof must contain the partial chunk digest.
189        let Some(last_chunk_digest) = self.partial_chunk_digest else {
190            debug!("proof has no partial chunk digest");
191            return false;
192        };
193
194        // If the proof is over an operation in the partial chunk, we need to verify the last chunk
195        // digest from the proof matches the digest of chunk, since these bits are not part of the
196        // mmr.
197        if *(end_loc - 1) / BitMap::<N>::CHUNK_SIZE_BITS == *leaves / BitMap::<N>::CHUNK_SIZE_BITS {
198            let Some(last_chunk) = chunks.last() else {
199                debug!("chunks is empty");
200                return false;
201            };
202            let expected_last_chunk_digest = verifier.digest(last_chunk);
203            if last_chunk_digest != expected_last_chunk_digest {
204                debug!("last chunk digest does not match expected value");
205                return false;
206            }
207        }
208
209        // Reconstruct the MMR root.
210        let mmr_root = match self
211            .proof
212            .reconstruct_root(&mut verifier, &elements, start_loc)
213        {
214            Ok(root) => root,
215            Err(error) => {
216                debug!(error = ?error, "invalid proof input");
217                return false;
218            }
219        };
220
221        let reconstructed_root =
222            partial_chunk_root::<H, N>(hasher, &mmr_root, next_bit, &last_chunk_digest);
223
224        reconstructed_root == *root
225    }
226}
227
228/// A proof that a specific operation is currently active in the database.
229#[derive(Clone, Eq, PartialEq, Debug)]
230pub struct OperationProof<D: Digest, const N: usize> {
231    /// The location of the operation in the db.
232    pub loc: Location,
233
234    /// The status bitmap chunk that contains the bit corresponding the operation's location.
235    pub chunk: [u8; N],
236
237    /// The range proof that incorporates activity status for the operation designated by `loc`.
238    pub range_proof: RangeProof<D>,
239}
240
241impl<D: Digest, const N: usize> OperationProof<D, N> {
242    /// Return an inclusion proof that incorporates activity status for the operation designated by
243    /// `loc`.
244    ///
245    /// # Panics
246    ///
247    /// - Panics if `loc` is out of bounds.
248    pub async fn new<E: RStorage + Clock + Metrics, H: CHasher<Digest = D>, S: Storage<D>>(
249        hasher: &mut H,
250        status: &MerkleizedBitMap<E, D, N>,
251        grafting_height: u32,
252        mmr: &S,
253        loc: Location,
254    ) -> Result<Self, Error> {
255        // Since `loc` is assumed to be in-bounds, `loc + 1` won't overflow.
256        let range_proof =
257            RangeProof::<D>::new(hasher, status, grafting_height, mmr, loc..loc + 1).await?;
258        let chunk = *status.get_chunk_containing(*loc);
259
260        Ok(Self {
261            loc,
262            chunk,
263            range_proof,
264        })
265    }
266
267    /// Verify that the proof proves that `operation` is active in the database with the given
268    /// `root`.
269    pub fn verify<H: CHasher<Digest = D>, O: Codec>(
270        &self,
271        hasher: &mut H,
272        grafting_height: u32,
273        operation: O,
274        root: &D,
275    ) -> bool {
276        // Make sure that the bit for the operation in the bitmap chunk is actually a 1 (indicating
277        // the operation is indeed active).
278        if !BitMap::<N>::get_bit_from_chunk(&self.chunk, *self.loc) {
279            debug!(
280                ?self.loc,
281                "proof verification failed, operation is inactive"
282            );
283            return false;
284        }
285
286        self.range_proof.verify(
287            hasher,
288            grafting_height,
289            self.loc,
290            &[operation],
291            &[self.chunk],
292            root,
293        )
294    }
295}