Skip to main content

commonware_storage/qmdb/current/
proof.rs

1//! Proof types for [crate::qmdb::current] authenticated databases.
2//!
3//! This module provides:
4//! - [RangeProof]: Proves a range of operations exist in the database.
5//! - [OperationProof]: Proves a specific operation is active in the database.
6
7use crate::{
8    journal::contiguous::{Contiguous, Reader as _},
9    merkle::{
10        self, hasher::Hasher, storage::Storage, Family, Graftable, Location, Position, Proof,
11    },
12    qmdb::{current::grafting, Error},
13};
14use commonware_codec::Codec;
15use commonware_cryptography::{Digest, Hasher as CHasher};
16use commonware_utils::bitmap::{Prunable as BitMap, Readable as BitmapReadable};
17use core::ops::Range;
18use futures::future::try_join_all;
19use std::{collections::BTreeMap, num::NonZeroU64};
20use tracing::debug;
21
22/// An inventory of all structural peaks for a Merkle-family tree, mapped linearly top-to-bottom
23/// relative to the bounds of a verified range proof.
24///
25/// Because the database operations log acts dynamically like an append-only structure (MMR or MMB),
26/// the elements verified in a range proof intersect with zero or more contiguous root peaks.
27/// This struct mechanically buckets every peak into exactly one of three sequential layout regions:
28/// those appearing structurally before the range, those structurally overlapping the range, and
29/// those physically placed after the range bounds.
30struct PeakLayout<F: Family> {
31    /// Peaks whose leaves are entirely preceding the operation range's starting location.
32    prefix: Vec<(Position<F>, u32)>,
33    /// Peaks that physically intersect with the operations proven within the range.
34    range: Vec<(Position<F>, u32)>,
35    /// Peaks whose leaves entirely succeed the operation range's ending location.
36    after: Vec<(Position<F>, u32)>,
37}
38
39/// Helper to bucket a tree's current peaks into prefix, range, and after sub-vectors.
40///
41/// Traverses all structural peaks left-to-right (from largest/leftmost to smallest/rightmost)
42/// and divides them into the three regions described by [`PeakLayout`].
43fn peak_layout<F: Family>(
44    leaves: Location<F>,
45    range: Range<Location<F>>,
46) -> Result<PeakLayout<F>, merkle::Error<F>> {
47    if range.is_empty() {
48        return Err(merkle::Error::Empty);
49    }
50    let end_minus_one = range.end.checked_sub(1).expect("range is non-empty");
51    if end_minus_one >= leaves {
52        return Err(merkle::Error::RangeOutOfBounds(range.end));
53    }
54
55    let size = Position::<F>::try_from(leaves)?;
56    let mut prefix = Vec::new();
57    let mut range_peaks = Vec::new();
58    let mut after = Vec::new();
59    let mut leaf_cursor = 0u64;
60
61    for (peak_pos, height) in F::peaks(size) {
62        let leaf_end = leaf_cursor + (1u64 << height);
63        if leaf_end <= *range.start {
64            prefix.push((peak_pos, height));
65        } else if leaf_cursor >= *range.end {
66            after.push((peak_pos, height));
67        } else {
68            range_peaks.push((peak_pos, height));
69        }
70        leaf_cursor = leaf_end;
71    }
72
73    Ok(PeakLayout {
74        prefix,
75        range: range_peaks,
76        after,
77    })
78}
79
80/// Determines if a specific chunk index spans multiple peaks and has been fully sealed.
81///
82/// If `chunk_idx` is less than `complete_chunks` and structurally covers more than
83/// one peak, it implies those discrete ops peaks must be explicitly folded together (via the
84/// "grafted fold" interception mechanics) during proof verification.
85fn chunk_needs_grafted_fold<F: Graftable>(
86    size: Position<F>,
87    chunk_idx: u64,
88    grafting_height: u32,
89    complete_chunks: u64,
90) -> bool {
91    chunk_idx < complete_chunks && F::chunk_peaks(size, chunk_idx, grafting_height).count() > 1
92}
93
94/// Finds the index in `prefix_peaks` where the standard prefix accumulation must stop.
95///
96/// During proof generation, prefix peaks are typically accumulated into a single digest
97/// (`pre_prefix_acc`). However, if a complete chunk spans some of these prefix peaks AND
98/// extends into the proven range, we must NOT fold those overlapping prefix peaks into the generic
99/// accumulator. Instead, we must expose them individually (`unfolded_prefix_peaks`) so the
100/// Verifier can correctly regroup them with the in-range peaks for grafted root reconstruction.
101/// Returns the slice index where unfolding must begin, or `None` if zero unfolding is needed.
102fn unfolding_start_idx<F: Graftable>(
103    prefix_peaks: &[(Position<F>, u32)],
104    grafting_height: u32,
105    start_chunk: u64,
106    complete_chunks: u64,
107) -> Option<usize> {
108    let mut leaf_cursor = 0;
109    prefix_peaks.iter().position(|&(_pos, height)| {
110        let chunk_idx = leaf_cursor / (1u64 << grafting_height);
111        leaf_cursor += 1u64 << height;
112        chunk_idx == start_chunk && chunk_idx < complete_chunks
113    })
114}
115
116/// Checks if the provided proof interacts with ANY multi-peak complete chunks.
117///
118/// It scans all peaks dynamically bucketed by `layout` (prefix, active range, and suffix after).
119/// If any peak's height is sub-grafting-height and falls within a completely sealed chunk
120/// that spans multiple peaks, the standard contiguous root verification MUST be intercepted and
121/// rebuilt completely using the `reconstruct_grafted_root` algorithm.
122fn proof_needs_grafted_peak_fold<F: Graftable>(
123    layout: &PeakLayout<F>,
124    size: Position<F>,
125    grafting_height: u32,
126    complete_chunks: u64,
127) -> bool {
128    layout
129        .prefix
130        .iter()
131        .chain(layout.range.iter())
132        .chain(layout.after.iter())
133        .any(|(pos, height)| {
134            if *height < grafting_height {
135                let chunk_idx = *F::leftmost_leaf(*pos, *height) >> grafting_height;
136                chunk_needs_grafted_fold(size, chunk_idx, grafting_height, complete_chunks)
137            } else {
138                false
139            }
140        })
141}
142
143/// Accumulates the absolute physical leaf count spanned by the slice of given prefix peaks.
144fn prefix_leaf_end<F: Family>(prefix_peaks: &[(Position<F>, u32)]) -> u64 {
145    prefix_peaks
146        .iter()
147        .fold(0u64, |acc, (_, height)| acc + (1u64 << *height))
148}
149
150/// Safely extracts the computed digests for all active-range and after-range peaks.
151///
152/// Since standard verification sequentially computes and caches these intermediate node digests
153/// into the `collected` map, this function plucks them out linearly to pass into the grafted root
154/// reconstruction engine. Returns `None` if any structurally required peak is missing.
155fn collect_peak_digests<F: Family, D: Digest>(
156    layout: &PeakLayout<F>,
157    collected: &BTreeMap<Position<F>, D>,
158) -> Option<Vec<D>> {
159    let mut peak_digests = Vec::with_capacity(layout.range.len() + layout.after.len());
160    for (pos, _) in layout.range.iter().chain(layout.after.iter()) {
161        peak_digests.push(*collected.get(pos)?);
162    }
163    Some(peak_digests)
164}
165
166// Reconstructs the canonical grafted root from the combination of generic proof boundaries,
167// the operation elements, and the prefix hashes provided by the prover.
168fn reconstruct_grafted_root<F: Graftable, H: CHasher, C: AsRef<[u8]>>(
169    verifier: &grafting::Verifier<'_, F, H>,
170    proof: &RangeProof<F, H::Digest>,
171    layout: &PeakLayout<F>,
172    leaves: Location<F>,
173    collected: &BTreeMap<Position<F>, H::Digest>,
174    grafting_height: u32,
175    get_chunk: impl Fn(u64) -> Option<C>,
176) -> Option<H::Digest> {
177    let suffix_peaks = collect_peak_digests(layout, collected)?;
178
179    let (initial_acc, start_leaf, prefix_peaks) =
180        if proof.pre_prefix_acc.is_some() || !proof.unfolded_prefix_peaks.is_empty() {
181            let split_idx = layout
182                .prefix
183                .len()
184                .checked_sub(proof.unfolded_prefix_peaks.len())?;
185            (
186                proof.pre_prefix_acc,
187                prefix_leaf_end(&layout.prefix[..split_idx]),
188                layout.prefix[split_idx..]
189                    .iter()
190                    .zip(proof.unfolded_prefix_peaks.iter())
191                    .map(|((_, height), &digest)| (*height, digest))
192                    .collect::<Vec<_>>(),
193            )
194        } else {
195            (None, prefix_leaf_end(&layout.prefix), vec![])
196        };
197
198    let peaks = prefix_peaks.into_iter().chain(
199        layout
200            .range
201            .iter()
202            .chain(layout.after.iter())
203            .zip(suffix_peaks)
204            .map(|((_, height), digest)| (*height, digest)),
205    );
206
207    let acc = grafting::fold_grafted_peaks::<F, H::Digest, _, _>(
208        verifier,
209        initial_acc,
210        start_leaf,
211        peaks,
212        grafting_height,
213        get_chunk,
214    );
215    Some(acc.map_or_else(
216        || verifier.digest(&(*leaves).to_be_bytes()),
217        |acc| verifier.hash([(*leaves).to_be_bytes().as_slice(), acc.as_ref()]),
218    ))
219}
220
221/// A proof that a range of operations exist in the database.
222#[derive(Clone, Eq, PartialEq, Debug)]
223pub struct RangeProof<F: Family, D: Digest> {
224    /// The Merkle digest material required to verify the proof.
225    pub proof: Proof<F, D>,
226
227    /// The single folded accumulator of all aligned prefix peaks that do not require unfolding.
228    pub pre_prefix_acc: Option<D>,
229
230    /// Individual fold-prefix peak digests, in peak order, when the generic proof's single
231    /// folded prefix accumulator would otherwise hide multi-peak chunk structure needed for
232    /// grafted-root reconstruction.
233    pub unfolded_prefix_peaks: Vec<D>,
234
235    /// The partial chunk digest from the status bitmap at the time of proof generation, if any.
236    pub partial_chunk_digest: Option<D>,
237
238    /// The ops-tree root at the time of proof generation.
239    /// Needed by the verifier to reconstruct the canonical root.
240    pub ops_root: D,
241}
242
243impl<F: Graftable, D: Digest> RangeProof<F, D> {
244    /// Create a new range proof for the provided `range` of operations.
245    pub async fn new<H: CHasher<Digest = D>, S: Storage<F, Digest = D>, const N: usize>(
246        hasher: &mut H,
247        status: &impl BitmapReadable<N>,
248        storage: &S,
249        range: Range<Location<F>>,
250        ops_root: D,
251    ) -> Result<Self, Error<F>> {
252        let std_hasher = merkle::hasher::Standard::<H>::new();
253        let range_for_layout = range.clone();
254        let start_chunk = *range.start / BitMap::<N>::CHUNK_SIZE_BITS;
255        let complete_chunks = status.complete_chunks() as u64;
256        let pruned_chunks = status.pruned_chunks() as u64;
257        let proof = merkle::verification::range_proof(&std_hasher, storage, range).await?;
258        let layout = peak_layout(proof.leaves, range_for_layout)?;
259        let grafting_height = grafting::height::<N>();
260
261        let split_idx_opt = unfolding_start_idx(
262            &layout.prefix,
263            grafting_height,
264            start_chunk,
265            complete_chunks,
266        );
267        let split_idx = split_idx_opt.unwrap_or(layout.prefix.len());
268        let mut pre_prefix_acc: Option<D> = None;
269        let mut unfolded_prefix_peaks = Vec::new();
270        if split_idx > 0 {
271            let mut prefix_peaks = Vec::with_capacity(split_idx);
272            for (pos, height) in &layout.prefix[..split_idx] {
273                let digest = storage
274                    .get_node(*pos)
275                    .await?
276                    .ok_or(merkle::Error::<F>::MissingNode(*pos))?;
277                prefix_peaks.push((*height, digest));
278            }
279            pre_prefix_acc = grafting::fold_grafted_peaks::<F, D, _, _>(
280                &std_hasher,
281                None,
282                0,
283                prefix_peaks,
284                grafting_height,
285                |idx| {
286                    if idx < complete_chunks {
287                        // Pruned chunks are guaranteed all-zero (only chunks with no active
288                        // operations are prunable), so a synthetic zero chunk produces the correct
289                        // grafted digest via the zero-chunk identity shortcut.
290                        if idx < pruned_chunks {
291                            Some([0u8; N])
292                        } else {
293                            Some(status.get_chunk(idx as usize))
294                        }
295                    } else {
296                        None
297                    }
298                },
299            );
300        }
301        if split_idx < layout.prefix.len() {
302            unfolded_prefix_peaks.reserve(layout.prefix.len() - split_idx);
303            for (pos, _) in &layout.prefix[split_idx..] {
304                let digest = storage
305                    .get_node(*pos)
306                    .await?
307                    .ok_or(merkle::Error::<F>::MissingNode(*pos))?;
308                unfolded_prefix_peaks.push(digest);
309            }
310        }
311
312        let (last_chunk, next_bit) = status.last_chunk();
313        let partial_chunk_digest = if next_bit != BitMap::<N>::CHUNK_SIZE_BITS {
314            // Last chunk is incomplete, meaning it's not yet in the MMR and needs to be included
315            // in the proof.
316            hasher.update(&last_chunk);
317            Some(hasher.finalize())
318        } else {
319            None
320        };
321
322        Ok(Self {
323            proof,
324            pre_prefix_acc,
325            unfolded_prefix_peaks,
326            partial_chunk_digest,
327            ops_root,
328        })
329    }
330
331    /// Returns a proof that the specified range of operations are part of the database, along with
332    /// the operations from the range and their activity status chunks. A truncated range (from
333    /// hitting the max) can be detected by looking at the length of the returned operations vector.
334    ///
335    /// # Errors
336    ///
337    /// Returns [Error::OperationPruned] if `start_loc` falls in a pruned bitmap chunk.
338    /// Returns [`merkle::Error::LocationOverflow`] if `start_loc` > [merkle::Family::MAX_LEAVES].
339    /// Returns [`merkle::Error::RangeOutOfBounds`] if `start_loc` >= number of leaves in the MMR.
340    pub async fn new_with_ops<
341        H: CHasher<Digest = D>,
342        C: Contiguous,
343        S: Storage<F, Digest = D>,
344        const N: usize,
345    >(
346        hasher: &mut H,
347        status: &impl BitmapReadable<N>,
348        storage: &S,
349        log: &C,
350        start_loc: Location<F>,
351        max_ops: NonZeroU64,
352        ops_root: D,
353    ) -> Result<(Self, Vec<C::Item>, Vec<[u8; N]>), Error<F>> {
354        // Compute the start and end locations & positions of the range.
355        let leaves = Location::new(status.len());
356        if start_loc >= leaves {
357            return Err(merkle::Error::RangeOutOfBounds(start_loc).into());
358        }
359
360        // Reject ranges that start in pruned bitmap chunks.
361        let chunk_bits = BitMap::<N>::CHUNK_SIZE_BITS;
362        let start = *start_loc / chunk_bits;
363        if (start as usize) < status.pruned_chunks() {
364            return Err(Error::OperationPruned(start_loc));
365        }
366
367        let max_loc = start_loc.saturating_add(max_ops.get());
368        let end_loc = core::cmp::min(max_loc, leaves);
369
370        // Generate the proof from the grafted storage.
371        let proof = Self::new(hasher, status, storage, start_loc..end_loc, ops_root).await?;
372
373        // Collect the operations necessary to verify the proof.
374        let mut ops = Vec::with_capacity((*end_loc - *start_loc) as usize);
375        let reader = log.reader().await;
376        let futures = (*start_loc..*end_loc)
377            .map(|i| reader.read(i))
378            .collect::<Vec<_>>();
379        try_join_all(futures)
380            .await?
381            .into_iter()
382            .for_each(|op| ops.push(op));
383
384        // Gather the chunks necessary to verify the proof.
385        let end = (*end_loc - 1) / chunk_bits; // chunk that contains the last bit
386        let mut chunks = Vec::with_capacity((end - start + 1) as usize);
387        for i in start..=end {
388            chunks.push(status.get_chunk(i as usize));
389        }
390
391        Ok((proof, ops, chunks))
392    }
393}
394
395impl<F: Graftable, D: Digest> RangeProof<F, D> {
396    /// Return true if the given sequence of `ops` were applied starting at location `start_loc` in
397    /// the db with the provided root, and having the activity status described by `chunks`.
398    pub fn verify<H: CHasher<Digest = D>, O: Codec, const N: usize>(
399        &self,
400        hasher: &mut H,
401        start_loc: Location<F>,
402        ops: &[O],
403        chunks: &[[u8; N]],
404        root: &H::Digest,
405    ) -> bool {
406        if ops.is_empty() || chunks.is_empty() {
407            debug!("verification failed, empty input");
408            return false;
409        }
410
411        // Compute the (non-inclusive) end location of the range.
412        let Some(end_loc) = start_loc.checked_add(ops.len() as u64) else {
413            debug!("verification failed, end_loc overflow");
414            return false;
415        };
416
417        let leaves = self.proof.leaves;
418        if end_loc > leaves {
419            debug!(
420                loc = ?end_loc,
421                ?leaves, "verification failed, invalid range"
422            );
423            return false;
424        }
425
426        // Validate the number of input chunks.
427        let chunk_bits = BitMap::<N>::CHUNK_SIZE_BITS;
428        let start_chunk = *start_loc / chunk_bits;
429        let end_chunk = (*end_loc - 1) / chunk_bits;
430        let complete_chunks = *leaves / chunk_bits;
431
432        if (end_chunk - start_chunk + 1) != chunks.len() as u64 {
433            debug!("verification failed, chunk metadata length mismatch");
434            return false;
435        }
436
437        let next_bit = *leaves % chunk_bits;
438        let has_partial_chunk = next_bit != 0;
439
440        let elements = ops.iter().map(|op| op.encode()).collect::<Vec<_>>();
441        let chunk_vec = chunks.iter().map(|c| c.as_ref()).collect::<Vec<_>>();
442        let grafting_height = grafting::height::<N>();
443        let verifier = grafting::Verifier::<F, H>::new(grafting_height, start_chunk, chunk_vec);
444
445        // For partial chunks, validate the last chunk digest from the proof.
446        if has_partial_chunk {
447            let Some(last_chunk_digest) = self.partial_chunk_digest else {
448                debug!("proof has no partial chunk digest");
449                return false;
450            };
451
452            // If the proof covers an operation in the partial chunk, verify that the
453            // chunk provided by the caller matches the digest embedded in the proof.
454            if end_chunk == complete_chunks {
455                let last_chunk = chunks.last().expect("chunks non-empty");
456                if last_chunk_digest != verifier.digest(last_chunk) {
457                    debug!("last chunk digest does not match expected value");
458                    return false;
459                }
460            }
461        } else if self.partial_chunk_digest.is_some() {
462            debug!("proof has unexpected partial chunk digest");
463            return false;
464        }
465
466        let layout = match peak_layout(leaves, start_loc..end_loc) {
467            Ok(layout) => layout,
468            Err(error) => {
469                debug!(?error, "verification failed, invalid peak layout");
470                return false;
471            }
472        };
473        let size = match Position::<F>::try_from(leaves) {
474            Ok(size) => size,
475            Err(error) => {
476                debug!(?error, "verification failed, invalid size");
477                return false;
478            }
479        };
480        let needs_grafted_peak_fold =
481            proof_needs_grafted_peak_fold(&layout, size, grafting_height, complete_chunks);
482        let merkle_root = if !needs_grafted_peak_fold {
483            match self.proof.reconstruct_root(&verifier, &elements, start_loc) {
484                Ok(root) => root,
485                Err(error) => {
486                    debug!(?error, "invalid proof input");
487                    return false;
488                }
489            }
490        } else {
491            let mut collected = Vec::new();
492            if let Err(error) = self.proof.reconstruct_root_collecting(
493                &verifier,
494                &elements,
495                start_loc,
496                Some(&mut collected),
497            ) {
498                debug!(?error, "invalid proof input");
499                return false;
500            }
501
502            let collected: BTreeMap<Position<F>, D> = collected.into_iter().collect();
503            let get_chunk = |chunk_idx: u64| -> Option<&[u8]> {
504                if chunk_idx >= complete_chunks {
505                    return None;
506                }
507                chunk_idx
508                    .checked_sub(start_chunk)
509                    .filter(|&idx| idx < chunks.len() as u64)
510                    .map(|idx| chunks[idx as usize].as_ref())
511            };
512            let Some(root) = reconstruct_grafted_root(
513                &verifier,
514                self,
515                &layout,
516                leaves,
517                &collected,
518                grafting_height,
519                get_chunk,
520            ) else {
521                debug!("verification failed, could not reconstruct grafted root");
522                return false;
523            };
524            root
525        };
526
527        // Compute the canonical root and compare.
528        hasher.update(&self.ops_root);
529        hasher.update(&merkle_root);
530        if has_partial_chunk {
531            // partial_chunk_digest is guaranteed Some by the check above.
532            hasher.update(&next_bit.to_be_bytes());
533            hasher.update(self.partial_chunk_digest.as_ref().unwrap());
534        }
535        let reconstructed_root = hasher.finalize();
536        reconstructed_root == *root
537    }
538}
539
540/// A proof that a specific operation is currently active in the database.
541#[derive(Clone, Eq, PartialEq, Debug)]
542pub struct OperationProof<F: Family, D: Digest, const N: usize> {
543    /// The location of the operation in the db.
544    pub loc: Location<F>,
545
546    /// The status bitmap chunk that contains the bit corresponding the operation's location.
547    pub chunk: [u8; N],
548
549    /// The range proof that incorporates activity status for the operation designated by `loc`.
550    pub range_proof: RangeProof<F, D>,
551}
552
553impl<F: Graftable, D: Digest, const N: usize> OperationProof<F, D, N> {
554    /// Return an inclusion proof that incorporates activity status for the operation designated by
555    /// `loc`.
556    ///
557    /// # Errors
558    ///
559    /// Returns [Error::OperationPruned] if `loc` falls in a pruned bitmap chunk.
560    pub async fn new<H: CHasher<Digest = D>, S: Storage<F, Digest = D>>(
561        hasher: &mut H,
562        status: &impl BitmapReadable<N>,
563        storage: &S,
564        loc: Location<F>,
565        ops_root: D,
566    ) -> Result<Self, Error<F>> {
567        // Reject locations in pruned bitmap chunks.
568        if BitMap::<N>::to_chunk_index(*loc) < status.pruned_chunks() {
569            return Err(Error::OperationPruned(loc));
570        }
571        let range_proof = RangeProof::new(hasher, status, storage, loc..loc + 1, ops_root).await?;
572        let chunk = status.get_chunk(BitMap::<N>::to_chunk_index(*loc));
573        Ok(Self {
574            loc,
575            chunk,
576            range_proof,
577        })
578    }
579}
580
581impl<F: Graftable, D: Digest, const N: usize> OperationProof<F, D, N> {
582    /// Verify that the proof proves that `operation` is active in the database with the given
583    /// `root`.
584    pub fn verify<H: CHasher<Digest = D>, O: Codec>(
585        &self,
586        hasher: &mut H,
587        operation: O,
588        root: &D,
589    ) -> bool {
590        // Make sure that the bit for the operation in the bitmap chunk is actually a 1 (indicating
591        // the operation is indeed active).
592        if !BitMap::<N>::get_bit_from_chunk(&self.chunk, *self.loc) {
593            debug!(
594                ?self.loc,
595                "proof verification failed, operation is inactive"
596            );
597            return false;
598        }
599
600        self.range_proof
601            .verify(hasher, self.loc, &[operation], &[self.chunk], root)
602    }
603}
604
605#[cfg(test)]
606mod tests {
607    use super::*;
608    use crate::{
609        merkle::conformance::build_test_mem,
610        mmb,
611        mmr::StandardHasher,
612        qmdb::current::{db, grafting},
613    };
614    use commonware_cryptography::{sha256, Sha256};
615    use commonware_macros::test_traced;
616    use commonware_runtime::{deterministic, Runner};
617    use commonware_utils::bitmap::{Prunable as BitMap, Readable as BitmapReadable};
618
619    #[test_traced]
620    fn test_range_proof_verifies_for_mmb_multi_peak_chunk() {
621        let executor = deterministic::Runner::default();
622        executor.start(|_| async move {
623            type F = mmb::Family;
624            const N: usize = 1;
625
626            let hasher: StandardHasher<Sha256> = StandardHasher::new();
627            let grafting_height = grafting::height::<N>();
628
629            let leaf_count = (16..=64u64)
630                .find(|&leaves| {
631                    let size = F::location_to_position(mmb::Location::new(leaves));
632                    F::chunk_peaks(size, 1, grafting_height).count() > 1
633                })
634                .expect("expected an MMB size whose second chunk spans multiple peaks");
635
636            let mut status = BitMap::<N>::new();
637            for _ in 0..leaf_count {
638                status.push(true);
639            }
640            let ops = build_test_mem(&hasher, mmb::mem::Mmb::new(&hasher), leaf_count);
641            let ops_root = *ops.root();
642
643            let chunk_inputs: Vec<_> =
644                (0..<BitMap<N> as BitmapReadable<N>>::complete_chunks(&status))
645                    .map(|chunk_idx| {
646                        (
647                            chunk_idx,
648                            <BitMap<N> as BitmapReadable<N>>::get_chunk(&status, chunk_idx),
649                        )
650                    })
651                    .collect();
652            let mut leaf_digests =
653                db::compute_grafted_leaves::<F, Sha256, N>(&hasher, &ops, chunk_inputs, None)
654                    .await
655                    .unwrap();
656            leaf_digests.sort_by_key(|(chunk_idx, _)| *chunk_idx);
657
658            let grafted_hasher =
659                grafting::GraftedHasher::<F, _>::new(hasher.clone(), grafting_height);
660            let mut grafted = merkle::mem::Mem::<F, sha256::Digest>::new(&grafted_hasher);
661            let merkleized = {
662                let mut batch = grafted.new_batch();
663                for (_, digest) in leaf_digests {
664                    batch = batch.add_leaf_digest(digest);
665                }
666                batch.merkleize(&grafted, &grafted_hasher)
667            };
668            grafted.apply_batch(&merkleized).unwrap();
669
670            let storage = grafting::Storage::new(&grafted, grafting_height, &ops);
671            let root = db::compute_db_root::<F, Sha256, _, _, _, N>(
672                &hasher, &status, &storage, None, &ops_root,
673            )
674            .await
675            .unwrap();
676
677            let loc = mmb::Location::new(BitMap::<N>::CHUNK_SIZE_BITS + 4);
678            let mut proof_hasher = Sha256::new();
679            let proof =
680                RangeProof::new(&mut proof_hasher, &status, &storage, loc..loc + 1, ops_root)
681                    .await
682                    .unwrap();
683
684            let element = hasher.digest(&(*loc).to_be_bytes());
685            let mut verify_hasher = Sha256::new();
686            assert!(proof.verify(
687                &mut verify_hasher,
688                loc,
689                &[element],
690                &[<BitMap<N> as BitmapReadable<N>>::get_chunk(&status, 1)],
691                &root,
692            ));
693        });
694    }
695
696    #[test_traced]
697    fn test_range_proof_verifies_with_partial_suffix_mmb() {
698        let executor = deterministic::Runner::default();
699        executor.start(|_| async move {
700            type F = mmb::Family;
701            const N: usize = 1;
702
703            let hasher: StandardHasher<Sha256> = StandardHasher::new();
704            let grafting_height = grafting::height::<N>();
705
706            let (leaf_count, loc) = (17..=64u64)
707                .find_map(|leaves| {
708                    let complete_chunks = leaves / BitMap::<N>::CHUNK_SIZE_BITS;
709                    if complete_chunks < 2 || leaves % BitMap::<N>::CHUNK_SIZE_BITS == 0 {
710                        return None;
711                    }
712
713                    let size = F::location_to_position(mmb::Location::new(leaves));
714                    if F::chunk_peaks(size, 1, grafting_height).count() <= 1 {
715                        return None;
716                    }
717
718                    for offset in 0..BitMap::<N>::CHUNK_SIZE_BITS {
719                        let loc = mmb::Location::new(BitMap::<N>::CHUNK_SIZE_BITS + offset);
720                        if *loc >= leaves {
721                            break;
722                        }
723                        let after_peaks = peak_layout(mmb::Location::new(leaves), loc..loc + 1)
724                            .ok()?
725                            .after;
726                        let has_partial_suffix_peak = after_peaks.iter().any(|(pos, height)| {
727                            *height < grafting_height
728                                && (*F::leftmost_leaf(*pos, *height) >> grafting_height)
729                                    == complete_chunks
730                        });
731                        if has_partial_suffix_peak {
732                            return Some((leaves, loc));
733                        }
734                    }
735                    None
736                })
737                .expect("expected an MMB proof with a partial trailing suffix chunk");
738
739            let mut status = BitMap::<N>::new();
740            for _ in 0..leaf_count {
741                status.push(true);
742            }
743            let ops = build_test_mem(&hasher, mmb::mem::Mmb::new(&hasher), leaf_count);
744            let ops_root = *ops.root();
745
746            let chunk_inputs: Vec<_> =
747                (0..<BitMap<N> as BitmapReadable<N>>::complete_chunks(&status))
748                    .map(|chunk_idx| {
749                        (
750                            chunk_idx,
751                            <BitMap<N> as BitmapReadable<N>>::get_chunk(&status, chunk_idx),
752                        )
753                    })
754                    .collect();
755            let mut leaf_digests =
756                db::compute_grafted_leaves::<F, Sha256, N>(&hasher, &ops, chunk_inputs, None)
757                    .await
758                    .unwrap();
759            leaf_digests.sort_by_key(|(chunk_idx, _)| *chunk_idx);
760
761            let grafted_hasher =
762                grafting::GraftedHasher::<F, _>::new(hasher.clone(), grafting_height);
763            let mut grafted = merkle::mem::Mem::<F, sha256::Digest>::new(&grafted_hasher);
764            let merkleized = {
765                let mut batch = grafted.new_batch();
766                for (_, digest) in leaf_digests {
767                    batch = batch.add_leaf_digest(digest);
768                }
769                batch.merkleize(&grafted, &grafted_hasher)
770            };
771            grafted.apply_batch(&merkleized).unwrap();
772
773            let storage = grafting::Storage::new(&grafted, grafting_height, &ops);
774            let partial = {
775                let (chunk, next_bit) = status.last_chunk();
776                Some((*chunk, next_bit))
777            };
778            let root = db::compute_db_root::<F, Sha256, _, _, _, N>(
779                &hasher, &status, &storage, partial, &ops_root,
780            )
781            .await
782            .unwrap();
783
784            let mut proof_hasher = Sha256::new();
785            let proof =
786                RangeProof::new(&mut proof_hasher, &status, &storage, loc..loc + 1, ops_root)
787                    .await
788                    .unwrap();
789
790            let element = hasher.digest(&(*loc).to_be_bytes());
791            let chunk_idx = (*loc / BitMap::<N>::CHUNK_SIZE_BITS) as usize;
792            let mut verify_hasher = Sha256::new();
793            assert!(proof.verify(
794                &mut verify_hasher,
795                loc,
796                &[element],
797                &[<BitMap<N> as BitmapReadable<N>>::get_chunk(
798                    &status, chunk_idx
799                )],
800                &root,
801            ));
802        });
803    }
804
805    #[test_traced]
806    fn test_range_proof_verifies_when_range_reaches_partial_chunk_mmb() {
807        let executor = deterministic::Runner::default();
808        executor.start(|_| async move {
809            type F = mmb::Family;
810            const N: usize = 1;
811
812            let hasher: StandardHasher<Sha256> = StandardHasher::new();
813            let grafting_height = grafting::height::<N>();
814            let chunk_bits = BitMap::<N>::CHUNK_SIZE_BITS;
815
816            let (leaf_count, start_loc, complete_chunks) = (17..=128u64)
817                .find_map(|leaves| {
818                    let complete_chunks = leaves / chunk_bits;
819                    if complete_chunks < 2 || leaves % chunk_bits == 0 {
820                        return None;
821                    }
822
823                    let leaves_loc = mmb::Location::new(leaves);
824                    let size = F::location_to_position(leaves_loc);
825                    if F::chunk_peaks(size, 1, grafting_height).count() <= 1 {
826                        return None;
827                    }
828
829                    (0..chunk_bits).find_map(|offset| {
830                        let start_loc = mmb::Location::new(chunk_bits + offset);
831                        if *start_loc >= complete_chunks * chunk_bits {
832                            return None;
833                        }
834                        let layout = peak_layout(leaves_loc, start_loc..leaves_loc).ok()?;
835                        proof_needs_grafted_peak_fold(
836                            &layout,
837                            size,
838                            grafting_height,
839                            complete_chunks,
840                        )
841                        .then_some((leaves, start_loc, complete_chunks))
842                    })
843                })
844                .expect("expected an MMB proof into the trailing partial chunk");
845
846            let mut status = BitMap::<N>::new();
847            for _ in 0..leaf_count {
848                status.push(true);
849            }
850            let ops = build_test_mem(&hasher, mmb::mem::Mmb::new(&hasher), leaf_count);
851            let ops_root = *ops.root();
852
853            let chunk_inputs: Vec<_> =
854                (0..<BitMap<N> as BitmapReadable<N>>::complete_chunks(&status))
855                    .map(|chunk_idx| {
856                        (
857                            chunk_idx,
858                            <BitMap<N> as BitmapReadable<N>>::get_chunk(&status, chunk_idx),
859                        )
860                    })
861                    .collect();
862            let mut leaf_digests =
863                db::compute_grafted_leaves::<F, Sha256, N>(&hasher, &ops, chunk_inputs, None)
864                    .await
865                    .unwrap();
866            leaf_digests.sort_by_key(|(chunk_idx, _)| *chunk_idx);
867
868            let grafted_hasher =
869                grafting::GraftedHasher::<F, _>::new(hasher.clone(), grafting_height);
870            let mut grafted = merkle::mem::Mem::<F, sha256::Digest>::new(&grafted_hasher);
871            let merkleized = {
872                let mut batch = grafted.new_batch();
873                for (_, digest) in leaf_digests {
874                    batch = batch.add_leaf_digest(digest);
875                }
876                batch.merkleize(&grafted, &grafted_hasher)
877            };
878            grafted.apply_batch(&merkleized).unwrap();
879
880            let storage = grafting::Storage::new(&grafted, grafting_height, &ops);
881            let partial = {
882                let (chunk, next_bit) = status.last_chunk();
883                Some((*chunk, next_bit))
884            };
885            let root = db::compute_db_root::<F, Sha256, _, _, _, N>(
886                &hasher, &status, &storage, partial, &ops_root,
887            )
888            .await
889            .unwrap();
890
891            let leaves_loc = mmb::Location::new(leaf_count);
892            let mut proof_hasher = Sha256::new();
893            let proof = RangeProof::new(
894                &mut proof_hasher,
895                &status,
896                &storage,
897                start_loc..leaves_loc,
898                ops_root,
899            )
900            .await
901            .unwrap();
902
903            let elements = (*start_loc..leaf_count)
904                .map(|idx| hasher.digest(&idx.to_be_bytes()))
905                .collect::<Vec<_>>();
906            let start_chunk_idx = (*start_loc / chunk_bits) as usize;
907            let end_chunk_idx = complete_chunks as usize;
908            let chunks = (start_chunk_idx..=end_chunk_idx)
909                .map(|chunk_idx| <BitMap<N> as BitmapReadable<N>>::get_chunk(&status, chunk_idx))
910                .collect::<Vec<_>>();
911
912            let mut verify_hasher = Sha256::new();
913            assert!(proof.verify(&mut verify_hasher, start_loc, &elements, &chunks, &root,));
914        });
915    }
916    #[test_traced]
917    fn test_range_proof_rejects_unexpected_partial_chunk_digest() {
918        let executor = deterministic::Runner::default();
919        executor.start(|_| async move {
920            type F = mmb::Family;
921            const N: usize = 1;
922
923            let hasher: StandardHasher<Sha256> = StandardHasher::new();
924            let grafting_height = grafting::height::<N>();
925            let chunk_bits = BitMap::<N>::CHUNK_SIZE_BITS;
926
927            let leaf_count = chunk_bits * 2; // Perfect chunks, NO partial trailing bits
928            let mut status = BitMap::<N>::new();
929            for _ in 0..leaf_count {
930                status.push(true);
931            }
932            let ops = build_test_mem(&hasher, mmb::mem::Mmb::new(&hasher), leaf_count);
933            let ops_root = *ops.root();
934
935            let chunk_inputs: Vec<_> =
936                (0..<BitMap<N> as BitmapReadable<N>>::complete_chunks(&status))
937                    .map(|chunk_idx| {
938                        (
939                            chunk_idx,
940                            <BitMap<N> as BitmapReadable<N>>::get_chunk(&status, chunk_idx),
941                        )
942                    })
943                    .collect();
944            let mut leaf_digests =
945                db::compute_grafted_leaves::<F, Sha256, N>(&hasher, &ops, chunk_inputs, None)
946                    .await
947                    .unwrap();
948            leaf_digests.sort_by_key(|(chunk_idx, _)| *chunk_idx);
949
950            let grafted_hasher =
951                grafting::GraftedHasher::<F, _>::new(hasher.clone(), grafting_height);
952            let mut grafted = merkle::mem::Mem::<F, sha256::Digest>::new(&grafted_hasher);
953            let merkleized = {
954                let mut batch = grafted.new_batch();
955                for (_, digest) in leaf_digests {
956                    batch = batch.add_leaf_digest(digest);
957                }
958                batch.merkleize(&grafted, &grafted_hasher)
959            };
960            grafted.apply_batch(&merkleized).unwrap();
961
962            let storage = grafting::Storage::new(&grafted, grafting_height, &ops);
963            let root = db::compute_db_root::<F, Sha256, _, _, _, N>(
964                &hasher, &status, &storage, None, &ops_root,
965            )
966            .await
967            .unwrap();
968
969            let loc = mmb::Location::new(0);
970            let mut proof_hasher = Sha256::new();
971            let mut proof =
972                RangeProof::new(&mut proof_hasher, &status, &storage, loc..loc + 1, ops_root)
973                    .await
974                    .unwrap();
975
976            // Tamper with the proof by injecting a fake partial chunk digest
977            proof.partial_chunk_digest = Some(hasher.digest(b"fake partial chunk"));
978
979            let element = hasher.digest(&(*loc).to_be_bytes());
980            let mut verify_hasher = Sha256::new();
981            assert!(!proof.verify(
982                &mut verify_hasher,
983                loc,
984                &[element],
985                &[<BitMap<N> as BitmapReadable<N>>::get_chunk(&status, 0)],
986                &root,
987            ));
988        });
989    }
990
991    #[test_traced]
992    fn test_range_proof_uses_compact_mmb_prefix_unfolding() {
993        let executor = deterministic::Runner::default();
994        executor.start(|_| async move {
995            type F = mmb::Family;
996            const N: usize = 1;
997
998            let hasher: StandardHasher<Sha256> = StandardHasher::new();
999            let grafting_height = grafting::height::<N>();
1000            let chunk_bits = BitMap::<N>::CHUNK_SIZE_BITS;
1001
1002            let (leaf_count, loc, layout, split_idx, complete_chunks) = (chunk_bits * 3..=128u64)
1003                .filter(|leaves| leaves % chunk_bits == 0)
1004                .find_map(|leaves| {
1005                    let leaves_loc = mmb::Location::new(leaves);
1006                    let complete_chunks = leaves / chunk_bits;
1007                    (0..leaves).find_map(|idx| {
1008                        let loc = mmb::Location::new(idx);
1009                        let start_chunk = *loc / chunk_bits;
1010                        let layout = peak_layout(leaves_loc, loc..loc + 1).ok()?;
1011                        let split_idx = unfolding_start_idx(
1012                            &layout.prefix,
1013                            grafting_height,
1014                            start_chunk,
1015                            complete_chunks,
1016                        )?;
1017                        (split_idx > 0 && split_idx < layout.prefix.len()).then_some((
1018                            leaves,
1019                            loc,
1020                            layout,
1021                            split_idx,
1022                            complete_chunks,
1023                        ))
1024                    })
1025                })
1026                .expect("expected an MMB proof with a partially unfolded prefix");
1027
1028            let mut status = BitMap::<N>::new();
1029            for _ in 0..leaf_count {
1030                status.push(true);
1031            }
1032            let ops = build_test_mem(&hasher, mmb::mem::Mmb::new(&hasher), leaf_count);
1033            let ops_root = *ops.root();
1034
1035            let chunk_inputs: Vec<_> =
1036                (0..<BitMap<N> as BitmapReadable<N>>::complete_chunks(&status))
1037                    .map(|chunk_idx| {
1038                        (
1039                            chunk_idx,
1040                            <BitMap<N> as BitmapReadable<N>>::get_chunk(&status, chunk_idx),
1041                        )
1042                    })
1043                    .collect();
1044            let mut leaf_digests =
1045                db::compute_grafted_leaves::<F, Sha256, N>(&hasher, &ops, chunk_inputs, None)
1046                    .await
1047                    .unwrap();
1048            leaf_digests.sort_by_key(|(chunk_idx, _)| *chunk_idx);
1049
1050            let grafted_hasher =
1051                grafting::GraftedHasher::<F, _>::new(hasher.clone(), grafting_height);
1052            let mut grafted = merkle::mem::Mem::<F, sha256::Digest>::new(&grafted_hasher);
1053            let merkleized = {
1054                let mut batch = grafted.new_batch();
1055                for (_, digest) in leaf_digests {
1056                    batch = batch.add_leaf_digest(digest);
1057                }
1058                batch.merkleize(&grafted, &grafted_hasher)
1059            };
1060            grafted.apply_batch(&merkleized).unwrap();
1061
1062            let storage = grafting::Storage::new(&grafted, grafting_height, &ops);
1063            let root = db::compute_db_root::<F, Sha256, _, _, _, N>(
1064                &hasher, &status, &storage, None, &ops_root,
1065            )
1066            .await
1067            .unwrap();
1068
1069            let mut proof_hasher = Sha256::new();
1070            let proof =
1071                RangeProof::new(&mut proof_hasher, &status, &storage, loc..loc + 1, ops_root)
1072                    .await
1073                    .unwrap();
1074
1075            assert_eq!(
1076                proof.unfolded_prefix_peaks.len(),
1077                layout.prefix.len() - split_idx
1078            );
1079            assert!(!proof.unfolded_prefix_peaks.is_empty());
1080            assert!(proof.unfolded_prefix_peaks.len() < layout.prefix.len());
1081            assert!(proof.pre_prefix_acc.is_some());
1082
1083            let mut prefix_peaks = Vec::with_capacity(split_idx);
1084            for (pos, height) in &layout.prefix[..split_idx] {
1085                let digest = storage
1086                    .get_node(*pos)
1087                    .await
1088                    .unwrap()
1089                    .expect("prefix peak must exist");
1090                prefix_peaks.push((*height, digest));
1091            }
1092            let expected_pre_prefix_acc = grafting::fold_grafted_peaks::<F, sha256::Digest, _, _>(
1093                &hasher,
1094                None,
1095                0,
1096                prefix_peaks,
1097                grafting_height,
1098                |idx| {
1099                    if idx < complete_chunks {
1100                        Some(<BitMap<N> as BitmapReadable<N>>::get_chunk(
1101                            &status,
1102                            idx as usize,
1103                        ))
1104                    } else {
1105                        None
1106                    }
1107                },
1108            );
1109            assert_eq!(proof.pre_prefix_acc, expected_pre_prefix_acc);
1110
1111            let element = hasher.digest(&(*loc).to_be_bytes());
1112            let chunk_idx = (*loc / chunk_bits) as usize;
1113            let mut verify_hasher = Sha256::new();
1114            assert!(proof.verify(
1115                &mut verify_hasher,
1116                loc,
1117                &[element],
1118                &[<BitMap<N> as BitmapReadable<N>>::get_chunk(
1119                    &status, chunk_idx
1120                )],
1121                &root,
1122            ));
1123        });
1124    }
1125}