commonware_storage/qmdb/current/
proof.rs

1//! Proof types for [crate::qmdb::current] authenticated databases.
2//!
3//! This module provides:
4//! - [RangeProof]: Proves a range of operations exist in the database.
5//! - [OperationProof]: Proves a specific operation is active in the database.
6
7use crate::{
8    bitmap::CleanBitMap,
9    journal::contiguous::Contiguous,
10    mmr::{
11        grafting::{Storage as GraftingStorage, Verifier},
12        hasher::Hasher,
13        journaled::Mmr,
14        mem::Clean,
15        storage::Storage,
16        verification, Location, Proof,
17    },
18    qmdb::Error,
19};
20use commonware_codec::Codec;
21use commonware_cryptography::{Digest, Hasher as CHasher};
22use commonware_runtime::{Clock, Metrics, Storage as RStorage};
23use core::ops::Range;
24use futures::future::try_join_all;
25use std::num::NonZeroU64;
26use tracing::debug;
27
28/// A proof that a range of operations exist in the database.
29#[derive(Clone, Eq, PartialEq, Debug)]
30pub struct RangeProof<D: Digest> {
31    /// The MMR digest material required to verify the proof.
32    pub proof: Proof<D>,
33
34    /// The partial chunk digest from the status bitmap at the time of proof generation, if any.
35    pub partial_chunk_digest: Option<D>,
36}
37
38impl<D: Digest> RangeProof<D> {
39    /// Create a new range proof for the provided `range` of operations.
40    pub async fn new<H: CHasher<Digest = D>, S: Storage<D>, const N: usize>(
41        hasher: &mut H,
42        status: &CleanBitMap<D, N>,
43        grafting_height: u32,
44        mmr: &S,
45        range: Range<Location>,
46    ) -> Result<Self, Error> {
47        let grafted_mmr = GraftingStorage::<'_, H, _, _>::new(status, mmr, grafting_height);
48        let proof = verification::range_proof(&grafted_mmr, range).await?;
49
50        let (last_chunk, next_bit) = status.last_chunk();
51        let partial_chunk_digest = if next_bit != CleanBitMap::<D, N>::CHUNK_SIZE_BITS {
52            // Last chunk is incomplete, meaning it's not yet in the MMR and needs to be included
53            // in the proof.
54            hasher.update(last_chunk);
55            Some(hasher.finalize())
56        } else {
57            None
58        };
59
60        Ok(Self {
61            proof,
62            partial_chunk_digest,
63        })
64    }
65
66    /// Returns a proof that the specified range of operations are part of the database, along with
67    /// the operations from the range and their activity status chunks. A truncated range (from
68    /// hitting the max) can be detected by looking at the length of the returned operations vector.
69    ///
70    /// # Errors
71    ///
72    /// Returns [crate::mmr::Error::LocationOverflow] if `start_loc` > [crate::mmr::MAX_LOCATION].
73    /// Returns [crate::mmr::Error::RangeOutOfBounds] if `start_loc` >= number of leaves in the MMR.
74    pub async fn new_with_ops<
75        E: RStorage + Clock + Metrics,
76        H: CHasher<Digest = D>,
77        C: Contiguous,
78        const N: usize,
79    >(
80        hasher: &mut H,
81        status: &CleanBitMap<D, N>,
82        height: u32,
83        mmr: &Mmr<E, D, Clean<D>>,
84        log: &C,
85        start_loc: Location,
86        max_ops: NonZeroU64,
87    ) -> Result<(Self, Vec<C::Item>, Vec<[u8; N]>), Error> {
88        // Compute the start and end locations & positions of the range.
89        let leaves = mmr.leaves();
90        if start_loc >= leaves {
91            return Err(crate::mmr::Error::RangeOutOfBounds(start_loc).into());
92        }
93        let max_loc = start_loc.saturating_add(max_ops.get());
94        let end_loc = core::cmp::min(max_loc, leaves);
95
96        // Generate the proof from the grafted MMR.
97        let proof = Self::new(hasher, status, height, mmr, start_loc..end_loc).await?;
98
99        // Collect the operations necessary to verify the proof.
100        let mut ops = Vec::with_capacity((*end_loc - *start_loc) as usize);
101        let futures = (*start_loc..*end_loc)
102            .map(|i| log.read(i))
103            .collect::<Vec<_>>();
104        try_join_all(futures)
105            .await?
106            .into_iter()
107            .for_each(|op| ops.push(op));
108
109        // Gather the chunks necessary to verify the proof.
110        let chunk_bits = CleanBitMap::<H::Digest, N>::CHUNK_SIZE_BITS;
111        let start = *start_loc / chunk_bits; // chunk that contains the very first bit.
112        let end = (*end_loc - 1) / chunk_bits; // chunk that contains the very last bit.
113        let mut chunks = Vec::with_capacity((end - start + 1) as usize);
114        for i in start..=end {
115            let bit_offset = i * chunk_bits;
116            let chunk = *status.get_chunk_containing(bit_offset);
117            chunks.push(chunk);
118        }
119
120        Ok((proof, ops, chunks))
121    }
122
123    /// Return true if the given sequence of `ops` were applied starting at location `start_loc` in
124    /// the db with the provided root, and having the activity status described by `chunks`.
125    pub fn verify<H: CHasher<Digest = D>, O: Codec, const N: usize>(
126        &self,
127        hasher: &mut H,
128        grafting_height: u32,
129        start_loc: Location,
130        ops: &[O],
131        chunks: &[[u8; N]],
132        root: &H::Digest,
133    ) -> bool {
134        let Ok(op_count) = Location::try_from(self.proof.size) else {
135            debug!("verification failed, invalid proof size");
136            return false;
137        };
138
139        // Compute the (non-inclusive) end location of the range.
140        let Some(end_loc) = start_loc.checked_add(ops.len() as u64) else {
141            debug!("verification failed, end_loc overflow");
142            return false;
143        };
144        if end_loc > op_count {
145            debug!(
146                loc = ?end_loc,
147                ?op_count, "proof verification failed, invalid range"
148            );
149            return false;
150        }
151
152        let elements = ops.iter().map(|op| op.encode()).collect::<Vec<_>>();
153
154        let chunk_vec = chunks.iter().map(|c| c.as_ref()).collect::<Vec<_>>();
155        let start_chunk_loc = *start_loc / CleanBitMap::<H::Digest, N>::CHUNK_SIZE_BITS;
156        let mut verifier = Verifier::<H>::new(
157            grafting_height,
158            Location::new_unchecked(start_chunk_loc),
159            chunk_vec,
160        );
161
162        let next_bit = *op_count % CleanBitMap::<H::Digest, N>::CHUNK_SIZE_BITS;
163        if next_bit == 0 {
164            return self
165                .proof
166                .verify_range_inclusion(&mut verifier, &elements, start_loc, root);
167        }
168
169        // The proof must contain the partial chunk digest.
170        let Some(last_chunk_digest) = self.partial_chunk_digest else {
171            debug!("proof has no partial chunk digest");
172            return false;
173        };
174
175        // If the proof is over an operation in the partial chunk, we need to verify the last chunk
176        // digest from the proof matches the digest of chunk, since these bits are not part of the
177        // mmr.
178        if *(end_loc - 1) / CleanBitMap::<H::Digest, N>::CHUNK_SIZE_BITS
179            == *op_count / CleanBitMap::<H::Digest, N>::CHUNK_SIZE_BITS
180        {
181            let Some(last_chunk) = chunks.last() else {
182                debug!("chunks is empty");
183                return false;
184            };
185            let expected_last_chunk_digest = verifier.digest(last_chunk);
186            if last_chunk_digest != expected_last_chunk_digest {
187                debug!("last chunk digest does not match expected value");
188                return false;
189            }
190        }
191
192        // Reconstruct the MMR root.
193        let mmr_root = match self
194            .proof
195            .reconstruct_root(&mut verifier, &elements, start_loc)
196        {
197            Ok(root) => root,
198            Err(error) => {
199                debug!(error = ?error, "invalid proof input");
200                return false;
201            }
202        };
203
204        let reconstructed_root = CleanBitMap::<H::Digest, N>::partial_chunk_root(
205            hasher,
206            &mmr_root,
207            next_bit,
208            &last_chunk_digest,
209        );
210
211        reconstructed_root == *root
212    }
213}
214
215/// A proof that a specific operation is currently active in the database.
216#[derive(Clone, Eq, PartialEq, Debug)]
217pub struct OperationProof<D: Digest, const N: usize> {
218    /// The location of the operation in the db.
219    pub loc: Location,
220
221    /// The status bitmap chunk that contains the bit corresponding the operation's location.
222    pub chunk: [u8; N],
223
224    /// The range proof that incorporates activity status for the operation designated by `loc`.
225    pub range_proof: RangeProof<D>,
226}
227
228impl<D: Digest, const N: usize> OperationProof<D, N> {
229    /// Return an inclusion proof that incorporates activity status for the operation designated by
230    /// `loc`.
231    ///
232    /// # Panics
233    ///
234    /// - Panics if `loc` is out of bounds.
235    pub async fn new<H: CHasher<Digest = D>, S: Storage<D>>(
236        hasher: &mut H,
237        status: &CleanBitMap<D, N>,
238        grafting_height: u32,
239        mmr: &S,
240        loc: Location,
241    ) -> Result<Self, Error> {
242        // Since `loc` is assumed to be in-bounds, `loc + 1` won't overflow.
243        let range_proof =
244            RangeProof::<D>::new(hasher, status, grafting_height, mmr, loc..loc + 1).await?;
245        let chunk = *status.get_chunk_containing(*loc);
246
247        Ok(Self {
248            loc,
249            chunk,
250            range_proof,
251        })
252    }
253
254    /// Verify that the proof proves that `operation` is active in the database with the given
255    /// `root`.
256    pub fn verify<H: CHasher<Digest = D>, O: Codec>(
257        &self,
258        hasher: &mut H,
259        grafting_height: u32,
260        operation: O,
261        root: &D,
262    ) -> bool {
263        // Make sure that the bit for the operation in the bitmap chunk is actually a 1 (indicating
264        // the operation is indeed active).
265        if !CleanBitMap::<H::Digest, N>::get_bit_from_chunk(&self.chunk, *self.loc) {
266            debug!(
267                ?self.loc,
268                "proof verification failed, operation is inactive"
269            );
270            return false;
271        }
272
273        self.range_proof.verify(
274            hasher,
275            grafting_height,
276            self.loc,
277            &[operation],
278            &[self.chunk],
279            root,
280        )
281    }
282}