Skip to main content

commonware_storage/qmdb/current/
proof.rs

1//! Proof types for [crate::qmdb::current] authenticated databases.
2//!
3//! This module provides:
4//! - [RangeProof]: Proves a range of operations exist in the database.
5//! - [OperationProof]: Proves a specific operation is active in the database.
6
7use crate::{
8    journal::contiguous::{Contiguous, Reader as _},
9    mmr::{hasher::Hasher as _, storage::Storage, verification, Location, Proof},
10    qmdb::{current::grafting, Error},
11};
12use commonware_codec::Codec;
13use commonware_cryptography::{Digest, Hasher as CHasher};
14use commonware_utils::bitmap::Prunable as BitMap;
15use core::ops::Range;
16use futures::future::try_join_all;
17use std::num::NonZeroU64;
18use tracing::debug;
19
20/// A proof that a range of operations exist in the database.
21#[derive(Clone, Eq, PartialEq, Debug)]
22pub struct RangeProof<D: Digest> {
23    /// The MMR digest material required to verify the proof.
24    pub proof: Proof<D>,
25
26    /// The partial chunk digest from the status bitmap at the time of proof generation, if any.
27    pub partial_chunk_digest: Option<D>,
28
29    /// The ops MMR root at the time of proof generation.
30    /// Needed by the verifier to reconstruct the canonical root.
31    pub ops_root: D,
32}
33
34impl<D: Digest> RangeProof<D> {
35    /// Create a new range proof for the provided `range` of operations.
36    pub async fn new<H: CHasher<Digest = D>, S: Storage<D>, const N: usize>(
37        hasher: &mut H,
38        status: &BitMap<N>,
39        storage: &S,
40        range: Range<Location>,
41        ops_root: D,
42    ) -> Result<Self, Error> {
43        let proof = verification::range_proof(storage, range).await?;
44
45        let (last_chunk, next_bit) = status.last_chunk();
46        let partial_chunk_digest = if next_bit != BitMap::<N>::CHUNK_SIZE_BITS {
47            // Last chunk is incomplete, meaning it's not yet in the MMR and needs to be included
48            // in the proof.
49            hasher.update(last_chunk);
50            Some(hasher.finalize())
51        } else {
52            None
53        };
54
55        Ok(Self {
56            proof,
57            partial_chunk_digest,
58            ops_root,
59        })
60    }
61
62    /// Returns a proof that the specified range of operations are part of the database, along with
63    /// the operations from the range and their activity status chunks. A truncated range (from
64    /// hitting the max) can be detected by looking at the length of the returned operations vector.
65    ///
66    /// # Errors
67    ///
68    /// Returns [Error::OperationPruned] if `start_loc` falls in a pruned bitmap chunk.
69    /// Returns [crate::mmr::Error::LocationOverflow] if `start_loc` > [crate::mmr::MAX_LOCATION].
70    /// Returns [crate::mmr::Error::RangeOutOfBounds] if `start_loc` >= number of leaves in the MMR.
71    pub async fn new_with_ops<
72        H: CHasher<Digest = D>,
73        C: Contiguous,
74        S: Storage<D>,
75        const N: usize,
76    >(
77        hasher: &mut H,
78        status: &BitMap<N>,
79        storage: &S,
80        log: &C,
81        start_loc: Location,
82        max_ops: NonZeroU64,
83        ops_root: D,
84    ) -> Result<(Self, Vec<C::Item>, Vec<[u8; N]>), Error> {
85        // Compute the start and end locations & positions of the range.
86        let leaves = Location::new(status.len());
87        if start_loc >= leaves {
88            return Err(crate::mmr::Error::RangeOutOfBounds(start_loc).into());
89        }
90
91        // Reject ranges that start in pruned bitmap chunks.
92        let chunk_bits = BitMap::<N>::CHUNK_SIZE_BITS;
93        let start = *start_loc / chunk_bits;
94        if (start as usize) < status.pruned_chunks() {
95            return Err(Error::OperationPruned(start_loc));
96        }
97
98        let max_loc = start_loc.saturating_add(max_ops.get());
99        let end_loc = core::cmp::min(max_loc, leaves);
100
101        // Generate the proof from the grafted storage.
102        let proof = Self::new(hasher, status, storage, start_loc..end_loc, ops_root).await?;
103
104        // Collect the operations necessary to verify the proof.
105        let mut ops = Vec::with_capacity((*end_loc - *start_loc) as usize);
106        let reader = log.reader().await;
107        let futures = (*start_loc..*end_loc)
108            .map(|i| reader.read(i))
109            .collect::<Vec<_>>();
110        try_join_all(futures)
111            .await?
112            .into_iter()
113            .for_each(|op| ops.push(op));
114
115        // Gather the chunks necessary to verify the proof.
116        let end = (*end_loc - 1) / chunk_bits; // chunk that contains the last bit
117        let mut chunks = Vec::with_capacity((end - start + 1) as usize);
118        for i in start..=end {
119            chunks.push(*status.get_chunk(i as usize));
120        }
121
122        Ok((proof, ops, chunks))
123    }
124
125    /// Return true if the given sequence of `ops` were applied starting at location `start_loc` in
126    /// the db with the provided root, and having the activity status described by `chunks`.
127    pub fn verify<H: CHasher<Digest = D>, O: Codec, const N: usize>(
128        &self,
129        hasher: &mut H,
130        start_loc: Location,
131        ops: &[O],
132        chunks: &[[u8; N]],
133        root: &H::Digest,
134    ) -> bool {
135        if ops.is_empty() || chunks.is_empty() {
136            debug!("verification failed, empty input");
137            return false;
138        }
139
140        // Compute the (non-inclusive) end location of the range.
141        let Some(end_loc) = start_loc.checked_add(ops.len() as u64) else {
142            debug!("verification failed, end_loc overflow");
143            return false;
144        };
145
146        let leaves = self.proof.leaves;
147        if end_loc > leaves {
148            debug!(
149                loc = ?end_loc,
150                ?leaves, "verification failed, invalid range"
151            );
152            return false;
153        }
154
155        // Validate the number of input chunks.
156        let chunk_bits = BitMap::<N>::CHUNK_SIZE_BITS;
157        let start = *start_loc / chunk_bits; // chunk that contains first bit
158        let end = (*end_loc.saturating_sub(1)) / chunk_bits; // chunk that contains the last bit
159        let expected = end - start + 1;
160        let actual = chunks.len() as u64;
161        if expected != actual {
162            debug!(expected, actual, "verification failed, chunk mismatch");
163            return false;
164        }
165
166        let elements = ops.iter().map(|op| op.encode()).collect::<Vec<_>>();
167
168        let chunk_vec = chunks.iter().map(|c| c.as_ref()).collect::<Vec<_>>();
169        let start_chunk_idx = *start_loc / BitMap::<N>::CHUNK_SIZE_BITS;
170        let mut verifier =
171            grafting::Verifier::<H>::new(grafting::height::<N>(), start_chunk_idx, chunk_vec);
172
173        let next_bit = *leaves % BitMap::<N>::CHUNK_SIZE_BITS;
174        let has_partial_chunk = next_bit != 0;
175
176        // For partial chunks, validate the last chunk digest from the proof.
177        if has_partial_chunk {
178            let Some(last_chunk_digest) = self.partial_chunk_digest else {
179                debug!("proof has no partial chunk digest");
180                return false;
181            };
182
183            // If the proof covers an operation in the partial chunk, verify that the
184            // chunk provided by the caller matches the digest embedded in the proof.
185            if *(end_loc - 1) / BitMap::<N>::CHUNK_SIZE_BITS
186                == *leaves / BitMap::<N>::CHUNK_SIZE_BITS
187            {
188                let Some(last_chunk) = chunks.last() else {
189                    debug!("chunks is empty");
190                    return false;
191                };
192                let expected_last_chunk_digest = verifier.digest(last_chunk);
193                if last_chunk_digest != expected_last_chunk_digest {
194                    debug!("last chunk digest does not match expected value");
195                    return false;
196                }
197            }
198        }
199
200        // Reconstruct the grafted MMR root from the proof.
201        let mmr_root = match self
202            .proof
203            .reconstruct_root(&mut verifier, &elements, start_loc)
204        {
205            Ok(root) => root,
206            Err(error) => {
207                debug!(error = ?error, "invalid proof input");
208                return false;
209            }
210        };
211
212        // Compute the canonical root and compare.
213        hasher.update(&self.ops_root);
214        hasher.update(&mmr_root);
215        if has_partial_chunk {
216            // partial_chunk_digest is guaranteed Some by the check above.
217            hasher.update(&next_bit.to_be_bytes());
218            hasher.update(self.partial_chunk_digest.as_ref().unwrap());
219        }
220        let reconstructed_root = hasher.finalize();
221        reconstructed_root == *root
222    }
223}
224
225/// A proof that a specific operation is currently active in the database.
226#[derive(Clone, Eq, PartialEq, Debug)]
227pub struct OperationProof<D: Digest, const N: usize> {
228    /// The location of the operation in the db.
229    pub loc: Location,
230
231    /// The status bitmap chunk that contains the bit corresponding the operation's location.
232    pub chunk: [u8; N],
233
234    /// The range proof that incorporates activity status for the operation designated by `loc`.
235    pub range_proof: RangeProof<D>,
236}
237
238impl<D: Digest, const N: usize> OperationProof<D, N> {
239    /// Return an inclusion proof that incorporates activity status for the operation designated by
240    /// `loc`.
241    ///
242    /// # Errors
243    ///
244    /// Returns [Error::OperationPruned] if `loc` falls in a pruned bitmap chunk.
245    pub async fn new<H: CHasher<Digest = D>, S: Storage<D>>(
246        hasher: &mut H,
247        status: &BitMap<N>,
248        storage: &S,
249        loc: Location,
250        ops_root: D,
251    ) -> Result<Self, Error> {
252        // Reject locations in pruned bitmap chunks.
253        if BitMap::<N>::to_chunk_index(*loc) < status.pruned_chunks() {
254            return Err(Error::OperationPruned(loc));
255        }
256        let range_proof = RangeProof::new(hasher, status, storage, loc..loc + 1, ops_root).await?;
257        let chunk = *status.get_chunk_containing(*loc);
258        Ok(Self {
259            loc,
260            chunk,
261            range_proof,
262        })
263    }
264
265    /// Verify that the proof proves that `operation` is active in the database with the given
266    /// `root`.
267    pub fn verify<H: CHasher<Digest = D>, O: Codec>(
268        &self,
269        hasher: &mut H,
270        operation: O,
271        root: &D,
272    ) -> bool {
273        // Make sure that the bit for the operation in the bitmap chunk is actually a 1 (indicating
274        // the operation is indeed active).
275        if !BitMap::<N>::get_bit_from_chunk(&self.chunk, *self.loc) {
276            debug!(
277                ?self.loc,
278                "proof verification failed, operation is inactive"
279            );
280            return false;
281        }
282
283        self.range_proof
284            .verify(hasher, self.loc, &[operation], &[self.chunk], root)
285    }
286}