blake3_tree/
lib.rs

1use std::{borrow::Borrow, cmp::Ordering, fmt::Debug, ptr};
2
3use arrayref::array_ref;
4use arrayvec::ArrayVec;
5pub use fleek_blake3 as blake3;
6use thiserror::Error;
7
8// Debug only code for testing against memory leaks.
9#[cfg(debug_assertions)]
10thread_local! {
11    /// Number of pointers that `IncrementalVerifierTreeNode` has allocated.
12    static POINTERS: std::cell::RefCell<usize> = std::cell::RefCell::new(0);
13}
14
15/// An incremental verifier that can consume a stream of proofs and content
16/// and verify the integrity of the content using a blake3 root hash.
17pub struct IncrementalVerifier {
18    /// The configuration the Blake3.
19    iv: blake3::tree::IV,
20    /// The pointer to the current tree node we want to verify.
21    ///
22    /// # Guarantees
23    ///
24    /// 1. The cursor is never null in any valid execution path of the
25    ///    public methods.
26    ///
27    /// 2. The children of the cursor are always null. i.e we're always
28    ///    pointing to a leaf/non-internal node.
29    cursor: *mut IncrementalVerifierTreeNode,
30    /// The index of the block we're verifying now, starting from zero.
31    block_counter: usize,
32    /// A stack used for when we're expanding the current node.
33    stack: ArrayVec<*mut IncrementalVerifierTreeNode, 2>,
34    /// Contains the first node that gets created as result of an expansion
35    /// so that we can change the cursor to that node once the stack is merged
36    /// with the current cursor during the final phase of the feed_proof procedure.
37    next_head: *mut IncrementalVerifierTreeNode,
38}
39
40#[derive(Error, Debug, PartialEq, Eq)]
41pub enum IncrementalVerifierError {
42    #[error("The proof provided to the verifier does not have a valid length.")]
43    InvalidProofSize,
44    #[error("The proof provided did not belong to the tree.")]
45    HashMismatch,
46    #[error("Verifier has already finished its job.")]
47    VerifierTerminated,
48}
49
50struct IncrementalVerifierTreeNode {
51    /// A non-owning pointer to the parent node in the tree, can be null if
52    /// the node is in pending state (part of stack), or is the root node.
53    parent: *mut IncrementalVerifierTreeNode,
54    /// The left child of the node.
55    left: *mut IncrementalVerifierTreeNode,
56    /// The right child of the node.
57    right: *mut IncrementalVerifierTreeNode,
58    /// The Blake3 chaining value for nodes or the finalized root hash if
59    /// this is a root node.
60    hash: [u8; 32],
61}
62
63unsafe impl Send for IncrementalVerifier {}
64
65impl IncrementalVerifier {
66    /// Create a new incremental verifier that verifies an stream of proofs and
67    /// content against the provided root hash.
68    ///
69    /// The `starting_block` determines where the content stream will start from.
70    pub fn new(root_hash: [u8; 32], starting_block: usize) -> Self {
71        Self {
72            iv: blake3::tree::IV::new(),
73            cursor: IncrementalVerifierTreeNode::leaf(root_hash),
74            block_counter: starting_block,
75            stack: ArrayVec::new(),
76            next_head: ptr::null_mut(),
77        }
78    }
79
80    /// Returns true if the stream is complete.
81    pub fn is_done(&self) -> bool {
82        self.cursor.is_null()
83    }
84
85    /// Moves the verifier to the finished state.
86    #[inline(always)]
87    fn finish(&mut self) {
88        debug_assert!(!self.cursor.is_null());
89
90        // SAFETY: Find the root of the tree from the current cursor by traversing
91        // the tree up as much as we can, and free the leaf. The Drop implementation
92        // of `IncrementalVerifierTreeNode` will be called recursively and will free the entire
93        // data owned by this tree.
94        unsafe {
95            let mut current = self.cursor;
96            while !(*current).parent.is_null() {
97                current = (*current).parent;
98            }
99            debug_assert!(!current.is_null());
100            debug_assert!((*current).parent.is_null());
101            IncrementalVerifierTreeNode::free(current);
102            self.cursor = ptr::null_mut();
103        }
104    }
105
106    /// Verify the new block of data only by providing its hash, you should be aware of
107    /// what mode you have finalized the block at.
108    pub fn verify_hash(&mut self, hash: &[u8; 32]) -> Result<(), IncrementalVerifierError> {
109        if self.is_done() {
110            return Err(IncrementalVerifierError::VerifierTerminated);
111        }
112
113        // 1. Hash the content using a block hasher with the current index.
114        // 2. Compare to the hash we have under the cursor.
115        // 3. Move to the next node.
116
117        if hash != self.current_hash() {
118            return Err(IncrementalVerifierError::HashMismatch);
119        }
120
121        self.move_to_next();
122        self.block_counter += 1;
123
124        if self.is_root() {
125            self.finish();
126        }
127
128        Ok(())
129    }
130
131    /// Verify the new block.
132    pub fn verify(
133        &mut self,
134        block: blake3::tree::BlockHasher,
135    ) -> Result<(), IncrementalVerifierError> {
136        if self.is_done() {
137            return Err(IncrementalVerifierError::VerifierTerminated);
138        }
139
140        let hash = block.finalize(self.is_root());
141        self.verify_hash(&hash)
142    }
143
144    /// Go to the next element in the tree.
145    fn move_to_next(&mut self) {
146        debug_assert!(!self.cursor.is_null());
147
148        // Move to the next leaf node, to do this we:
149        // 1. Move up as long as we are the right child of our parent.
150        // 2. Move one more node up.
151        // 3. Drop the left child (we don't need it anymore)
152        // 4. Move right.
153        // 5. Move to the leftmost child.
154        // At any step if going to parent results in a null cursor, avoid it.
155
156        // Step 1: Moving up the tree as long as we're the right child.
157        unsafe {
158            loop {
159                if (*self.cursor).parent.is_null() {
160                    return;
161                }
162
163                if (*(*self.cursor).parent).right != self.cursor {
164                    break;
165                }
166
167                self.cursor = (*self.cursor).parent;
168            }
169        }
170
171        // Step 2: Moving one more node up, the next node is gonna be the
172        // leftmost child of right child of this node.
173        unsafe {
174            if (*self.cursor).parent.is_null() {
175                return;
176            }
177
178            self.cursor = (*self.cursor).parent;
179        }
180
181        // Step 3: Drop the left child.
182        // SAFETY: Since the incremental verifier only moves to the right, this means
183        // we will never going to access the node on the left side of a node which we
184        // have already visited, so we can free the memory.
185        unsafe {
186            IncrementalVerifierTreeNode::free((*self.cursor).left);
187            (*self.cursor).left = ptr::null_mut();
188        }
189
190        // Step 4: Move right
191        // SAFETY: Since this function (`next`) is never called when we're in the root
192        // this means both of the left and right children are set during the initialization
193        // of the `IncrementalVerifierTreeNode`.
194        //
195        // And we only ever set the `left` children to null, so we can always assume that for
196        // a non-root/non-leaf node, the `right` child is *ALWAYS* set and is not null.
197        //
198        // And since we got to this current cursor by moving up the tree (on the left side)
199        // this simply means that the right side is also set. See the `for` loop a few lines
200        // ago where we always go through the branch at least 1 time before we reach this point
201        // in the code.
202        self.cursor = unsafe { (*self.cursor).right };
203        debug_assert!(!self.cursor.is_null());
204
205        // Step 5:
206        self.move_to_leftmost();
207    }
208
209    /// Feed some new proof to the verifier which it can use to expand its internal
210    /// blake3 tree.
211    pub fn feed_proof(&mut self, mut proof: &[u8]) -> Result<(), IncrementalVerifierError> {
212        const SEGMENT_SIZE: usize = 32 * 8 + 1;
213
214        if self.is_done() {
215            return Err(IncrementalVerifierError::VerifierTerminated);
216        }
217
218        if !is_valid_proof_len(proof.len()) {
219            return Err(IncrementalVerifierError::InvalidProofSize);
220        }
221
222        if proof.is_empty() {
223            return Ok(());
224        }
225
226        // Number of bytes to read per iteration. For the first iteration read
227        // the partial first segment and we will then start reading full segments.
228        let mut read = proof.len() % SEGMENT_SIZE;
229        // Handle the case where we have complete segments.
230        if read == 0 {
231            read = SEGMENT_SIZE;
232        }
233
234        while !proof.is_empty() {
235            // The `is_valid_proof_len` should not allow this to happen.
236            debug_assert!((read - 1) % 32 == 0);
237            debug_assert!(proof.len() >= read);
238
239            let sign = proof[0];
240
241            for (i, hash) in proof[1..read].chunks_exact(32).enumerate() {
242                let should_flip = (1 << (8 - i - 1)) & sign != 0;
243                self.push(should_flip, *array_ref![hash, 0, 32]);
244            }
245
246            // Move to the next part of the proof.
247            proof = &proof[read..];
248
249            // After the first partial segment revert to reading full segments (8 hash at a time).
250            read = SEGMENT_SIZE;
251        }
252
253        let result = self.finalize_expansion();
254        debug_assert!(self.next_head.is_null());
255        debug_assert!(self.stack.is_empty());
256        result
257    }
258
259    /// Push a new proof chunk to the stack of pending subtree and merge the
260    /// two previous pushed values if they are present.
261    ///
262    /// # Guarantees
263    ///
264    /// This function guarantees that after getting called the value of `self.next_head`
265    /// is no longer null.
266    fn push(&mut self, flip: bool, hash: [u8; 32]) {
267        debug_assert!(!self.cursor.is_null());
268
269        // Merge the two previous subtree as non-root hashes.
270        if self.stack.is_full() {
271            self.merge_stack(false);
272        }
273
274        let node = IncrementalVerifierTreeNode::leaf(hash);
275        self.stack.push(node);
276
277        if flip && self.stack.is_full() {
278            self.stack.swap(0, 1);
279        }
280
281        // Always remember the first node generated using a new proof, since this
282        // is where we want to go next if we're currently at root.
283        if self.next_head.is_null() {
284            self.next_head = node;
285        }
286    }
287
288    /// Finalizes the stack operations of the current ongoing proof.
289    ///
290    /// # Panics
291    ///
292    /// If the stack does not have two elements.
293    fn finalize_expansion(&mut self) -> Result<(), IncrementalVerifierError> {
294        debug_assert!(!self.cursor.is_null());
295        assert!(self.stack.is_full());
296
297        // SAFETY: This function is only called after validating the proof len and since the
298        // smallest valid proof (that goes through without early return) has two hashes, it
299        // is guaranteed that the stack has at least two elements so this call to `merge_stack`
300        // will not panic.
301        self.merge_stack(self.is_root());
302        debug_assert_eq!(self.stack.len(), 1);
303
304        // SAFETY: `merge_stack` guarantees the stack has exactly one item.
305        let node = self.stack.pop().unwrap();
306        debug_assert!(!node.is_null());
307
308        // SAFETY: This block only contains debug assertions.
309        unsafe {
310            // the cursor *must* not have children.
311            debug_assert!((*self.cursor).left.is_null());
312            debug_assert!((*self.cursor).right.is_null());
313            // the new parent node *must* have children.
314            // This will always be true because a valid proof has at least two
315            // hashes, which means it will be merged into one node that has both
316            // a left and right child set.
317            debug_assert!(!(*node).left.is_null());
318            debug_assert!(!(*node).right.is_null());
319        }
320
321        // SAFETY:
322        // 1. Dereferencing the `self.cursor` is safe since we don't set it to null.
323        // 2. `self.merge_stack` guarantees that the new node in the stack which we
324        //     popped has both children set.
325        unsafe {
326            if &(*node).hash != self.current_hash() {
327                // This is an error and we need to return the error, we should also
328                // remember that `node` is not referenced by anyone anymore so we should
329                // remove it here before we return.
330                debug_assert!(!(*node).left.is_null());
331                debug_assert!(!(*node).right.is_null());
332                IncrementalVerifierTreeNode::free(node);
333                // Also set the next_head to null since we no longer need it anymore.
334                self.next_head = ptr::null_mut();
335                return Err(IncrementalVerifierError::HashMismatch);
336            }
337
338            // Set the left and right children of the current cursor.
339            (*self.cursor).left = (*node).left;
340            (*self.cursor).right = (*node).right;
341        }
342
343        // SAFETY: node.left and node.right are always set at this point and self.cursor is
344        // also never null.
345        unsafe {
346            // Update the parent of left and right to link to the cursor
347            // and not the new parent node.
348            (*(*node).left).parent = self.cursor;
349            (*(*node).right).parent = self.cursor;
350        }
351
352        // SAFETY: At this point of the code `self.cursor.left` and `self.cursor.right` are
353        // set to the `node.left` and `node.right` so by setting them to null here, we are
354        // not losing track of those pointers.
355        unsafe {
356            // Remove the left and right node of the new node so we can
357            // drop it without dropping the children.
358            (*node).left = ptr::null_mut();
359            (*node).right = ptr::null_mut();
360        }
361
362        // SAFETY: We no longer need this node, and since it was popped from the stack it
363        // is not referenced anywhere else, so we can free that memory, furthermore the
364        // left and right children on the node are set to null at this point so dropping
365        // the node will not drop those nodes.
366        unsafe {
367            debug_assert!((*node).left.is_null());
368            debug_assert!((*node).right.is_null());
369            IncrementalVerifierTreeNode::free(node);
370        }
371
372        // If we're at the root right now instead of traversing all the way to
373        // the deepest left node, we need to respect the value of `self.index`
374        // (in case it is not zero) and instead try to get to that node.
375        if self.is_root() && self.block_counter != 0 {
376            debug_assert!(!self.next_head.is_null());
377            // SAFETY: For any non-empty proof the `push` function is at least called once,
378            // and it is guranteed that self.next_head is set to non-null pointer after calling
379            // the `push` and since we are here in the code it means the stack was full which
380            // translates into `push` function being at least called once.
381            self.cursor = self.next_head;
382        } else {
383            // Traverse the current cursor into the deepest newly added left node so that
384            // our guarantee about the cursor not having children is preserved.
385            self.move_to_leftmost();
386            // The leftmost node is the same as the cursor, but move_to_leftmost is cheap
387            // enough and easier to see its correctness, that is why it is preferred in
388            // this branch.
389            debug_assert_eq!(self.next_head, self.cursor);
390        }
391
392        self.next_head = ptr::null_mut();
393        Ok(())
394    }
395
396    /// Returns true if the current cursor is pointing to the root of the tree.
397    #[inline(always)]
398    fn is_root(&self) -> bool {
399        debug_assert!(!self.cursor.is_null(), "cursor is null");
400        // SAFETY: Dereferencing cursor is safe since we never set it a null value.
401        unsafe { (*self.cursor).parent.is_null() }
402    }
403
404    /// Returns the hash of the current node in the tree.
405    #[inline(always)]
406    fn current_hash(&self) -> &[u8; 32] {
407        debug_assert!(!self.cursor.is_null(), "cursor is null");
408        // SAFETY: Dereferencing cursor is safe since we never set it a null value.
409        unsafe { &(*self.cursor).hash }
410    }
411
412    /// Moves the cursor to the leftmost node under the cursor.
413    #[inline(always)]
414    fn move_to_leftmost(&mut self) {
415        debug_assert!(!self.cursor.is_null(), "cursor is null");
416        // SAFETY: We can always assume dereferencing the cursor is safe since
417        // we guarantee never setting it to null.
418        //
419        // And even here we change the cursor to a new value, after we're checking
420        // it's not null.
421        unsafe {
422            while !(*self.cursor).left.is_null() {
423                self.cursor = (*self.cursor).left;
424            }
425        }
426    }
427
428    /// Merge the current stack items into a new one.
429    ///
430    /// # Panics
431    ///
432    /// This function panics if the stack is not full. (i.e does not have 2 elements).
433    ///
434    /// # Guarantees
435    ///
436    /// After calling this function it is guaranteed that:
437    ///
438    /// 1- The stack has exactly one item.
439    /// 2- The new node in the stack has both its left and right children set.
440    fn merge_stack(&mut self, is_root: bool) {
441        debug_assert!(!self.cursor.is_null(), "cursor is null");
442        assert!(self.stack.is_full());
443
444        let right = self.stack.pop().unwrap();
445        let left = self.stack.pop().unwrap();
446        debug_assert!(!right.is_null(), "stack item is not supposed to be null.");
447        debug_assert!(!left.is_null(), "stack item is not supposed to be null.");
448
449        // SAFETY: The only function pushing to the stack is this same function
450        // and we can guarantee that these are not null;
451        let (left_cv, right_cv) = unsafe { (&(*left).hash, &(*right).hash) };
452
453        let parent_hash = self.iv.merge(left_cv, right_cv, is_root);
454        let parent = IncrementalVerifierTreeNode::new(left, right, parent_hash);
455
456        // Push the new parent node into the stack.
457        self.stack.push(parent);
458
459        // SAFETY: The left and right are guaranteed to not be null and they need to link to
460        // the parent, and the parent will be pushed to the stack right after this line which
461        // will result into its safe drop when the `IncrementalTree` is dropped.
462        unsafe {
463            debug_assert!(
464                (*left).parent.is_null(),
465                "parent node is supposed to be null."
466            );
467            debug_assert!(
468                (*right).parent.is_null(),
469                "parent node is supposed to be null."
470            );
471            (*left).parent = parent;
472            (*right).parent = parent;
473        }
474    }
475}
476
477impl IncrementalVerifierTreeNode {
478    #[inline(always)]
479    pub fn new(left: *mut Self, right: *mut Self, hash: [u8; 32]) -> *mut Self {
480        #[cfg(debug_assertions)]
481        POINTERS.with(|c| {
482            *c.borrow_mut() += 1;
483        });
484
485        Box::into_raw(Box::new(Self {
486            parent: ptr::null_mut(),
487            left,
488            right,
489            hash,
490        }))
491    }
492
493    #[inline(always)]
494    pub fn leaf(hash: [u8; 32]) -> *mut Self {
495        Self::new(ptr::null_mut(), ptr::null_mut(), hash)
496    }
497
498    #[inline(always)]
499    pub unsafe fn free(ptr: *mut Self) {
500        debug_assert!(!ptr.is_null(), "Attempted to free null pointer.");
501
502        #[cfg(debug_assertions)]
503        POINTERS.with(|c| {
504            let mut pointers_mut = c.borrow_mut();
505            if *pointers_mut == 0 {
506                panic!("Double free detected.");
507            }
508            *pointers_mut -= 1;
509        });
510
511        drop(Box::from_raw(ptr))
512    }
513}
514
515impl Drop for IncrementalVerifier {
516    fn drop(&mut self) {
517        if !self.cursor.is_null() {
518            self.finish();
519        }
520
521        // If there are any items left in the stack also free those.
522        for pointer in self.stack.drain(..) {
523            // SAFETY: The stack owns its pending items.
524            unsafe {
525                IncrementalVerifierTreeNode::free(pointer);
526            }
527        }
528    }
529}
530
531impl Drop for IncrementalVerifierTreeNode {
532    fn drop(&mut self) {
533        // SAFETY: Each node owns its children and is responsible for
534        // dropping them when its being drooped.
535        unsafe {
536            if !self.left.is_null() {
537                IncrementalVerifierTreeNode::free(self.left);
538                self.left = ptr::null_mut();
539            }
540
541            if !self.right.is_null() {
542                IncrementalVerifierTreeNode::free(self.right);
543                self.right = ptr::null_mut();
544            }
545        }
546    }
547}
548
549/// A buffer containing a proof for a block of data.
550pub struct ProofBuf {
551    // The index at which the slice starts at in the boxed buffer.
552    index: usize,
553    buffer: Box<[u8]>,
554}
555
556impl ProofBuf {
557    fn new_internal(tree: &[[u8; 32]], walker: TreeWalker) -> Self {
558        let size = walker.size_hint().0;
559        let mut encoder = ProofEncoder::new(size);
560        for (direction, index) in walker {
561            debug_assert!(index < tree.len(), "Index overflow.");
562            encoder.insert(direction, &tree[index]);
563        }
564        encoder.finalize()
565    }
566
567    /// Construct a new proof for the given block index from the provided
568    /// tree.
569    pub fn new(tree: &[[u8; 32]], block: usize) -> Self {
570        Self::new_internal(tree, TreeWalker::new(block, tree.len()))
571    }
572
573    /// Construct proof for the given block number assuming that previous
574    /// blocks have already been sent.
575    pub fn resume(tree: &[[u8; 32]], block: usize) -> Self {
576        Self::new_internal(tree, TreeWalker::resume(block, tree.len()))
577    }
578
579    /// Returns the proof as a slice.
580    #[inline(always)]
581    pub fn as_slice(&self) -> &[u8] {
582        &self.buffer[self.index..]
583    }
584
585    /// Returns the length of the proof.
586    #[inline]
587    pub fn len(&self) -> usize {
588        self.buffer.len() - self.index
589    }
590
591    /// Returns if the buffer is empty.
592    #[inline]
593    pub fn is_empty(&self) -> bool {
594        self.len() == 0
595    }
596}
597
598impl AsRef<[u8]> for ProofBuf {
599    #[inline(always)]
600    fn as_ref(&self) -> &[u8] {
601        self.as_slice()
602    }
603}
604
605impl Borrow<[u8]> for ProofBuf {
606    #[inline(always)]
607    fn borrow(&self) -> &[u8] {
608        self.as_slice()
609    }
610}
611
612impl Debug for ProofBuf {
613    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
614        std::fmt::Debug::fmt(self.as_slice(), f)
615    }
616}
617
618impl PartialEq<&[u8]> for ProofBuf {
619    fn eq(&self, other: &&[u8]) -> bool {
620        self.as_slice().eq(*other)
621    }
622}
623
624pub struct ProofSizeEstimator(pub usize);
625
626impl ProofSizeEstimator {
627    pub fn new(block: usize, num_blocks: usize) -> Self {
628        assert!(num_blocks > 0 && block < num_blocks);
629        let tree_len = 2 * (num_blocks - 1) + 1;
630        let walker = TreeWalker::new(block, tree_len);
631        let count = walker.count();
632        Self(count * 32 + ((count + 7) / 8))
633    }
634    pub fn resume(block: usize, num_blocks: usize) -> Self {
635        assert!(num_blocks > 0 && block < num_blocks);
636        let tree_len = 2 * (num_blocks - 1) + 1;
637        let walker = TreeWalker::resume(block, tree_len);
638        let count = walker.count();
639        Self(count * 32 + ((count + 7) / 8))
640    }
641}
642
643/// An encoder that manages a reverse buffer which can be used to convert the
644/// root-to-leaf ordering of the [`TreeWalker`] to the proper stack ordering.
645pub struct ProofEncoder {
646    cursor: usize,
647    size: usize,
648    buffer: Box<[u8]>,
649}
650
651impl ProofEncoder {
652    /// Create a new proof encoder for encoding a tree with the provided max number of
653    /// items. An instance of ProofEncoder can not be used to encode more than the `n`
654    /// items specified here. Providing an `n` smaller than the actual number of nodes
655    /// can result in panics.
656    pub fn new(n: usize) -> Self {
657        // Compute the byte capacity for this encoder, which is 32-byte per hash and 1
658        // byte per 8 one of these.
659        let capacity = n * 32 + (n + 8 - 1) / 8;
660        // Create a `Vec<u8>` with the given size and set its len to the byte capacity
661        // it is not important for us to take care of initializing the items since the
662        // type is a u8 and has no drop logic except the deallocatation of the slice
663        // itself.
664        let mut vec = Vec::<u8>::with_capacity(capacity);
665        if capacity > 0 {
666            // SAFETY: The note above explains the use case. The justification of this
667            // customization over just using a regular vector is that we need to write
668            // from the end of the vector to the beginning (rev push), of course we can
669            // use a regular vector and just flip everything at the end, but that will
670            // be more complicated.
671            unsafe {
672                vec.set_len(capacity);
673                // Make sure the last item in the vec which is supposed to be holding the
674                // non-finalized sign byte is not dirty by setting it to zero.
675                *vec.get_unchecked_mut(capacity - 1) = 0;
676            }
677        }
678
679        let buffer = vec.into_boxed_slice();
680        debug_assert_eq!(
681            buffer.len(),
682            capacity,
683            "The buffer is smaller than expected."
684        );
685
686        Self {
687            buffer,
688            cursor: capacity,
689            size: 0,
690        }
691    }
692
693    /// Insert a new node into the tree, the direction determines whether or not we should
694    /// be flipping the stack order when we're trying to rebuild the tree later on (on the
695    /// client side).
696    ///
697    /// # Panics
698    ///
699    /// If called more than the number of times specified when it got constructed.
700    pub fn insert(&mut self, direction: Direction, hash: &[u8; 32]) {
701        assert!(self.cursor > 0);
702
703        // Get the current non-finalized sign byte.
704        let mut sign = self.buffer[self.cursor - 1];
705
706        self.cursor -= 32;
707        self.buffer[self.cursor..self.cursor + 32].copy_from_slice(hash);
708
709        // update the sign byte.
710        if direction == Direction::Left {
711            sign |= 1 << (self.size & 7);
712        }
713
714        self.size += 1;
715
716        // Always put the sign as the leading byte of the data without moving the
717        // cursor, this way the finalize can return a valid proof for when it's
718        // called when the number of items does not divide 8.
719        self.buffer[self.cursor - 1] = sign;
720
721        // If we have consumed a multiple of 8 hashes so far, consume the sign byte
722        // by moving the cursor.
723        if self.size & 7 == 0 {
724            debug_assert!(self.cursor > 0);
725            self.cursor -= 1;
726            // If we have more data coming in, make sure the dirty byte which will
727            // be used for the next sign byte is set to zero.
728            if self.cursor > 0 {
729                self.buffer[self.cursor - 1] = 0;
730            }
731        }
732    }
733
734    /// Finalize the result of the encoder and return the proof buffer.
735    pub fn finalize(mut self) -> ProofBuf {
736        // Here we don't want to consume or get a mutable reference to the internal buffer
737        // we have, but also we might be called when the number of passed hashes does not
738        // divide 8. In this case we already have the current sign byte as the leading byte,
739        // so we need to return data start one byte before the cursor.
740        //
741        // Furthermore we could have been returning a Vec here, but that would imply that the
742        // current allocated memory would needed to be copied first into the Vec (in case the
743        // cursor != 0) and then freed as well, which is not really suitable for this use case
744        // we want to provide the caller with the buffer in the valid range (starting from cursor)
745        // and at the same time avoid any memory copy and extra allocation and deallocation which
746        // might come with dropping the box and acquiring a vec.
747        //
748        // This way the caller will have access to the data, and can use it the way they want,
749        // for example sending it over the wire, and then once they are done with reading the
750        // data they can free the used memory.
751        //
752        // Another idea here is to also leverage a slab allocator on the Context object which we
753        // are gonna have down the line which may improve the performance (not sure how much).
754        if self.size & 7 > 0 {
755            debug_assert!(self.cursor > 0);
756            // shit the final sign byte.
757            self.buffer[self.cursor - 1] <<= 8 - (self.size & 7);
758            ProofBuf {
759                buffer: self.buffer,
760                index: self.cursor - 1,
761            }
762        } else {
763            ProofBuf {
764                buffer: self.buffer,
765                index: self.cursor,
766            }
767        }
768    }
769}
770
771/// The logic responsible for walking a full blake3 tree from top to bottom searching
772/// for a path.
773pub struct TreeWalker {
774    /// Index of the node we're looking for.
775    target: usize,
776    /// Where we're at right now.
777    current: usize,
778    /// Size of the current sub tree, which is the total number of
779    /// leafs under the current node.
780    subtree_size: usize,
781}
782
783impl TreeWalker {
784    /// Construct a new [`TreeWalker`] to walk a tree of `tree_len` items (in the array
785    /// representation), looking for the provided `target`-th leaf.
786    pub fn new(target: usize, tree_len: usize) -> Self {
787        if tree_len <= 1 {
788            return Self::empty();
789        }
790
791        let walker = Self {
792            // Compute the index of the n-th leaf in the array representation of the
793            // tree.
794            // see: https://oeis.org/A005187
795            target: target * 2 - target.count_ones() as usize,
796            // Start the walk from the root of the full tree, which is the last item
797            // in the array representation of the tree.
798            current: tree_len - 1,
799            // for `k` number of leaf nodes, the total nodes of the binary tree will
800            // be `n = 2k - 1`, therefore for computing the number of leaf nodes given
801            // the total number of all nodes, we can use the formula `k = ceil((n + 1) / 2)`
802            // and we have `ceil(a / b) = floor((a + b - 1) / b)`.
803            subtree_size: (tree_len + 2) / 2,
804        };
805
806        if walker.target > walker.current {
807            return Self::empty();
808        }
809
810        walker
811    }
812
813    /// Construct a new [`TreeWalker`] to walk the tree assuming that a previous walk
814    /// to the previous block has been made, and does not visit the nodes that the previous
815    /// walker has visited.
816    ///
817    /// # Panics
818    ///
819    /// If target is zero. It doesn't make sense to call this function with target=zero since
820    /// we don't have a -1 block that is already visited.
821    pub fn resume(target: usize, tree_len: usize) -> Self {
822        assert_ne!(target, 0, "Block zero has no previous blocks.");
823
824        // Compute the index of the target in the tree representation.
825        let target_index = target * 2 - target.count_ones() as usize;
826        // If the target is not in this tree (out of bound) or the tree size is not
827        // large enough for a resume walk return the empty iterator.
828        if target_index >= tree_len || tree_len < 3 {
829            return Self::empty();
830        }
831
832        let distance_to_ancestor = target.trailing_zeros();
833        let subtree_size = (tree_len + 2) / 2;
834        let subtree_size = (1 << distance_to_ancestor).min(subtree_size - target);
835        let ancestor = target_index + (subtree_size << 1) - 2;
836
837        if subtree_size <= 1 {
838            return Self::empty();
839        }
840
841        debug_assert!(distance_to_ancestor >= 1);
842
843        Self {
844            target: target_index,
845            current: ancestor,
846            subtree_size,
847        }
848    }
849
850    #[inline(always)]
851    const fn empty() -> Self {
852        Self {
853            target: 0,
854            current: 0,
855            subtree_size: 0,
856        }
857    }
858
859    /// Return the index of the target element in the array representation of the
860    /// complete tree.
861    pub fn tree_index(&self) -> usize {
862        self.target
863    }
864}
865
866/// The position of a element in an element in a binary tree.
867#[derive(Debug, Clone, Copy, PartialEq, Eq)]
868pub enum Direction {
869    /// The element is the current root of the tree, it's neither on the
870    /// left or right side.
871    Target,
872    /// The element is on the left side of the tree.
873    Left,
874    /// The element is on the right side of the tree.
875    Right,
876}
877
878impl Iterator for TreeWalker {
879    type Item = (Direction, usize);
880
881    #[inline(always)]
882    fn next(&mut self) -> Option<Self::Item> {
883        // If we are at a leaf node, we've already finished the traversal, and if the
884        // target is greater than the current (which can only happen in the first iteration),
885        // the target is already not in this tree anywhere.
886        if self.subtree_size == 0 || self.target > self.current {
887            return None;
888        }
889
890        if self.current == self.target {
891            self.subtree_size = 0;
892            return Some((Direction::Target, self.current));
893        }
894
895        // The left subtree in a blake3 tree is always guranteed to contain a power of two
896        // number of leaf (chunks), therefore the number of items on the left subtree can
897        // be easily computed as the previous power of two (less than but not equal to)
898        // the current items that we know our current subtree has, anything else goes
899        // to the right subtree.
900        let left_subtree_size = previous_pow_of_two(self.subtree_size);
901        let right_subtree_size = self.subtree_size - left_subtree_size;
902        // Use the formula `n = 2k - 1` to compute the total number of items on the
903        // right side of this node, the index of the left node will be `n`-th item
904        // before where we currently are at.
905        let right_subtree_total_nodes = right_subtree_size * 2 - 1;
906        let left = self.current - right_subtree_total_nodes - 1;
907        let right = self.current - 1;
908
909        match left.cmp(&self.target) {
910            // The target is on the left side, so we need to prune the right subtree.
911            Ordering::Equal | Ordering::Greater => {
912                self.subtree_size = left_subtree_size;
913                self.current = left;
914                Some((Direction::Right, right))
915            },
916            // The target is on the right side, prune the left subtree.
917            Ordering::Less => {
918                self.subtree_size = right_subtree_size;
919                self.current = right;
920                Some((Direction::Left, left))
921            },
922        }
923    }
924
925    #[inline(always)]
926    fn size_hint(&self) -> (usize, Option<usize>) {
927        // If we're done iterating return 0.
928        if self.subtree_size == 0 {
929            return (0, Some(0));
930        }
931
932        // Return the upper bound as the result of the size estimation, the actual lower bound
933        // can be computed more accurately but we don't really care about the accuracy of the
934        // size estimate and the upper bound should be small enough for most use cases we have.
935        //
936        // This line is basically `ceil(log2(self.subtree_size)) + 1` which is the max depth of
937        // the current subtree and one additional element + 1.
938        let upper =
939            usize::BITS as usize - self.subtree_size.saturating_sub(1).leading_zeros() as usize + 1;
940        (upper, Some(upper))
941    }
942}
943
944/// Returns the previous power of two of a given number, the returned
945/// value is always less than the provided `n`.
946#[inline(always)]
947fn previous_pow_of_two(n: usize) -> usize {
948    n.next_power_of_two() / 2
949}
950
951/// Validates that the provided number of bytes is a valid number of bytes for a proof
952/// buffer. A valid proof is either
953#[inline(always)]
954fn is_valid_proof_len(n: usize) -> bool {
955    const SEG_SIZE: usize = 32 * 8 + 1;
956    let sign_bytes = (n + SEG_SIZE - 1) / SEG_SIZE;
957    let hash_bytes = n - sign_bytes;
958    hash_bytes & 31 == 0 && n <= 32 * 47 + 6 && ((hash_bytes / 32) >= 2 || n == 0)
959}
960
961#[cfg(test)]
962mod tests {
963    use super::*;
964
965    fn assert_no_leak() {
966        #[cfg(debug_assertions)]
967        POINTERS.with(|c| {
968            let n = *c.borrow();
969            assert_eq!(n, 0, "Memory leak detected.");
970        })
971    }
972
973    /// Create a mock tree that has n leaf nodes, each leaf node `i` starting
974    /// from 1 has their `i`-th bit set to 1, and merging two nodes is done
975    /// via `|` operation.
976    fn make_mock_tree(n: u8) -> Vec<u128> {
977        let n = n as usize;
978        assert!(n > 0 && n <= 128);
979        let mut tree = Vec::with_capacity(n * 2 - 1);
980        let mut stack = Vec::with_capacity(8);
981        for counter in 0..n {
982            let mut node = 1u128 << counter;
983            let mut counter = counter;
984            while counter & 1 == 1 {
985                let prev = stack.pop().unwrap();
986                tree.push(node);
987                node |= prev;
988                counter >>= 1;
989            }
990            stack.push(node);
991            tree.push(node);
992        }
993
994        while stack.len() >= 2 {
995            let a = stack.pop().unwrap();
996            let b = stack.pop().unwrap();
997            tree.push(a | b);
998            stack.push(a | b);
999        }
1000
1001        tree
1002    }
1003
1004    #[test]
1005    fn tree_walker() {
1006        for size in 2..100 {
1007            let tree = make_mock_tree(size);
1008
1009            for start in 0..size {
1010                let mut walk = TreeWalker::new(start as usize, tree.len()).collect::<Vec<_>>();
1011                walk.reverse();
1012
1013                assert_eq!(walk[0].0, Direction::Target);
1014                assert_eq!(tree[walk[0].1], (1 << start));
1015
1016                let mut current = tree[walk[0].1];
1017
1018                for (direction, i) in &walk[1..] {
1019                    let node = tree[*i];
1020
1021                    assert_eq!(
1022                        node & current,
1023                        0,
1024                        "the node should not have common bits with the current node."
1025                    );
1026
1027                    match direction {
1028                        Direction::Target => panic!("Target should only appear at the start."),
1029                        Direction::Left => {
1030                            assert_eq!(((current >> 1) & node).count_ones(), 1);
1031                            current |= node;
1032                        },
1033                        Direction::Right => {
1034                            assert_eq!(((current << 1) & node).count_ones(), 1);
1035                            current |= node;
1036                        },
1037                    }
1038                }
1039
1040                assert_eq!(tree[tree.len() - 1], current);
1041            }
1042        }
1043    }
1044
1045    #[test]
1046    fn tree_walker_one_block() {
1047        let walk = TreeWalker::new(0, 1).collect::<Vec<_>>();
1048        assert_eq!(walk.len(), 0);
1049    }
1050
1051    #[test]
1052    fn tree_walker_out_of_bound() {
1053        let walk = TreeWalker::new(2, 3).collect::<Vec<_>>();
1054        assert_eq!(walk.len(), 0);
1055    }
1056
1057    #[test]
1058    fn walker_partial_tree() {
1059        let walk = TreeWalker::resume(2, 5).collect::<Vec<_>>();
1060        assert_eq!(walk.len(), 0);
1061    }
1062
1063    #[test]
1064    fn encoder_zero_capacity() {
1065        let encoder = ProofEncoder::new(0);
1066        assert_eq!(0, encoder.buffer.len());
1067        assert_eq!(encoder.finalize().len(), 0);
1068    }
1069
1070    #[test]
1071    fn encoder_one_item() {
1072        let mut expected = Vec::<u8>::new();
1073        let mut hash = [0; 32];
1074        hash[0] = 1;
1075        hash[31] = 31;
1076
1077        // sign byte on the left
1078        let mut encoder = ProofEncoder::new(1);
1079        encoder.insert(Direction::Left, &hash);
1080        expected.push(0b10000000); // sign byte
1081        expected.extend_from_slice(&hash);
1082        assert_eq!(expected.len(), encoder.buffer.len());
1083        assert_eq!(encoder.finalize(), expected.as_slice());
1084
1085        // sign byte on the right
1086        let mut encoder = ProofEncoder::new(1);
1087        encoder.insert(Direction::Right, &hash);
1088        expected.clear();
1089        expected.push(0); // sign byte
1090        expected.extend_from_slice(&hash);
1091        assert_eq!(encoder.finalize(), expected.as_slice());
1092    }
1093
1094    #[test]
1095    fn encoder_two_item() {
1096        let mut expected = Vec::<u8>::new();
1097
1098        let mut encoder = ProofEncoder::new(2);
1099        encoder.insert(Direction::Right, &[0; 32]);
1100        encoder.insert(Direction::Right, &[1; 32]);
1101        expected.push(0); // sign byte
1102        expected.extend_from_slice(&[1; 32]);
1103        expected.extend_from_slice(&[0; 32]);
1104        assert_eq!(encoder.finalize(), expected.as_slice());
1105
1106        let mut encoder = ProofEncoder::new(2);
1107        encoder.insert(Direction::Left, &[0; 32]);
1108        encoder.insert(Direction::Right, &[1; 32]);
1109        expected.clear();
1110        expected.push(0b01000000); // sign byte
1111        expected.extend_from_slice(&[1; 32]);
1112        expected.extend_from_slice(&[0; 32]);
1113        assert_eq!(encoder.finalize(), expected.as_slice());
1114
1115        let mut encoder = ProofEncoder::new(2);
1116        encoder.insert(Direction::Left, &[0; 32]);
1117        encoder.insert(Direction::Left, &[1; 32]);
1118        expected.clear();
1119        expected.push(0b11000000); // sign byte
1120        expected.extend_from_slice(&[1; 32]);
1121        expected.extend_from_slice(&[0; 32]);
1122        assert_eq!(encoder.finalize(), expected.as_slice());
1123
1124        let mut encoder = ProofEncoder::new(2);
1125        encoder.insert(Direction::Right, &[0; 32]);
1126        encoder.insert(Direction::Left, &[1; 32]);
1127        expected.clear();
1128        expected.push(0b10000000); // sign byte
1129        expected.extend_from_slice(&[1; 32]);
1130        expected.extend_from_slice(&[0; 32]);
1131        assert_eq!(expected.len(), encoder.buffer.len());
1132        assert_eq!(encoder.finalize(), expected.as_slice());
1133    }
1134
1135    #[test]
1136    fn valid_proof_len() {
1137        assert!(is_valid_proof_len(0));
1138        assert!(!is_valid_proof_len(1));
1139        assert!(!is_valid_proof_len(2));
1140        assert!(!is_valid_proof_len(32));
1141        // [sign byte + 1 hash] -> not valid proof
1142        // since it does not expand anything.
1143        assert!(!is_valid_proof_len(33));
1144        assert!(!is_valid_proof_len(40));
1145        assert!(!is_valid_proof_len(64));
1146        assert!(is_valid_proof_len(65));
1147
1148        for full_seg in 0..5 {
1149            let bytes = full_seg * 32 * 8 + full_seg;
1150            assert!(is_valid_proof_len(bytes), "failed for len={bytes}");
1151
1152            for partial_seg in 1..8 {
1153                let bytes = bytes + 1 + partial_seg * 32;
1154                let is_valid = bytes > 64;
1155                assert_eq!(
1156                    is_valid_proof_len(bytes),
1157                    is_valid,
1158                    "failed for len={bytes}"
1159                );
1160                assert!(!is_valid_proof_len(bytes - 1), "failed for len={bytes}");
1161                assert!(!is_valid_proof_len(bytes + 1), "failed for len={bytes}");
1162            }
1163        }
1164    }
1165
1166    #[test]
1167    fn proof_size_estimator() {
1168        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1169        for size in 1..100 {
1170            tree_builder.update(&[size as u8; 256 * 1024]);
1171            // clone the builder and finalize it at the current size,
1172            // leaving the builder for the next size
1173            let output = tree_builder.clone().finalize();
1174
1175            // check the first block's proof buf estimation
1176            let mut estimator = ProofSizeEstimator::new(0, size);
1177            let mut proof = ProofBuf::new(&output.tree, 0);
1178            assert_eq!(proof.len(), estimator.0);
1179
1180            // iterate over the remaining blocks and ensure the estimator outputs matches the
1181            // proof buf lengths
1182            for i in 1..size {
1183                proof = ProofBuf::resume(&output.tree, i);
1184                estimator = ProofSizeEstimator::resume(i, size);
1185                assert_eq!(proof.len(), estimator.0);
1186            }
1187        }
1188    }
1189
1190    #[test]
1191    fn incremental_verifier_basic() {
1192        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1193        (0..4).for_each(|i| tree_builder.update(&[i; 256 * 1024]));
1194        let output = tree_builder.finalize();
1195
1196        for i in 0..4 {
1197            let proof = ProofBuf::new(&output.tree, i);
1198            let mut verifier = IncrementalVerifier::new(*output.hash.as_bytes(), i);
1199            verifier.feed_proof(proof.as_slice()).unwrap();
1200
1201            let mut block = blake3::tree::BlockHasher::new();
1202            block.set_block(i);
1203            block.update(&[i as u8; 256 * 1024]);
1204            verifier.verify(block).unwrap();
1205
1206            assert_eq!(verifier.is_done(), i > 2);
1207
1208            // for even blocks we should be able to also verify the next block without
1209            // the need to feed new proof.
1210            if i % 2 == 0 {
1211                let mut block = blake3::tree::BlockHasher::new();
1212                block.set_block(i + 1);
1213                block.update(&[i as u8 + 1; 256 * 1024]);
1214                verifier.verify(block).unwrap();
1215            }
1216
1217            assert_eq!(verifier.is_done(), i > 1);
1218            if i > 1 {
1219                assert_eq!(
1220                    verifier.verify(blake3::tree::BlockHasher::new()),
1221                    Err(IncrementalVerifierError::VerifierTerminated)
1222                );
1223            }
1224
1225            drop(verifier);
1226            assert_no_leak();
1227        }
1228    }
1229
1230    #[test]
1231    fn incremental_verifier_small_data() {
1232        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1233        tree_builder.update(&[17; 64]);
1234        let output = tree_builder.finalize();
1235
1236        let proof = ProofBuf::new(&output.tree, 0);
1237        assert_eq!(proof.len(), 0);
1238
1239        let mut verifier = IncrementalVerifier::new(*output.hash.as_bytes(), 0);
1240        verifier.feed_proof(proof.as_slice()).unwrap();
1241
1242        let mut block = blake3::tree::BlockHasher::new();
1243        block.set_block(0);
1244        block.update(&[17; 64]);
1245        verifier.verify(block).unwrap();
1246
1247        assert!(verifier.is_done());
1248
1249        assert_eq!(
1250            verifier.verify(blake3::tree::BlockHasher::new()),
1251            Err(IncrementalVerifierError::VerifierTerminated)
1252        );
1253
1254        drop(verifier);
1255        assert_no_leak();
1256    }
1257
1258    #[test]
1259    fn incremental_verifier_resume_simple() {
1260        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1261        (0..4).for_each(|i| tree_builder.update(&[i; 256 * 1024]));
1262        let output = tree_builder.finalize();
1263
1264        let mut verifier = IncrementalVerifier::new(*output.hash.as_bytes(), 0);
1265
1266        let proof = ProofBuf::new(&output.tree, 0);
1267        verifier.feed_proof(proof.as_slice()).unwrap();
1268        let mut block = blake3::tree::BlockHasher::new();
1269        block.set_block(0);
1270        block.update(&[0; 256 * 1024]);
1271        verifier.verify(block).unwrap();
1272        assert_eq!(verifier.block_counter, 1);
1273        assert_eq!(verifier.current_hash(), &output.tree[1]);
1274
1275        let proof = ProofBuf::resume(&output.tree, 1);
1276        assert_eq!(proof.len(), 0);
1277        verifier.feed_proof(proof.as_slice()).unwrap();
1278        let mut block = blake3::tree::BlockHasher::new();
1279        block.set_block(1);
1280        block.update(&[1; 256 * 1024]);
1281        verifier.verify(block).unwrap();
1282        assert_eq!(verifier.block_counter, 2);
1283
1284        // now the cursor should have moved to 5.
1285        //         6
1286        //    2        5
1287        // 0    1   [3  4] <- pruned
1288        assert_eq!(verifier.current_hash(), &output.tree[5]);
1289        let proof = ProofBuf::resume(&output.tree, 2);
1290        verifier.feed_proof(proof.as_slice()).unwrap();
1291        assert_eq!(verifier.current_hash(), &output.tree[3]);
1292        let mut block = blake3::tree::BlockHasher::new();
1293        block.set_block(2);
1294        block.update(&[2; 256 * 1024]);
1295        verifier.verify(block).unwrap();
1296        assert_eq!(verifier.block_counter, 3);
1297        assert_eq!(verifier.current_hash(), &output.tree[4]);
1298
1299        let proof = ProofBuf::resume(&output.tree, 3);
1300        assert_eq!(proof.len(), 0);
1301        verifier.feed_proof(proof.as_slice()).unwrap();
1302
1303        let mut block = blake3::tree::BlockHasher::new();
1304        block.set_block(3);
1305        block.update(&[3; 256 * 1024]);
1306        verifier.verify(block).unwrap();
1307        assert_eq!(verifier.block_counter, 4);
1308        assert!(verifier.is_done());
1309
1310        drop(verifier);
1311        assert_no_leak();
1312    }
1313
1314    #[test]
1315    fn incremental_verifier_partial_tree() {
1316        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1317        (0..3).for_each(|i| tree_builder.update(&[i; 256 * 1024]));
1318        let output = tree_builder.finalize();
1319        let mut verifier = IncrementalVerifier::new(*output.hash.as_bytes(), 0);
1320
1321        let proof = ProofBuf::new(&output.tree, 0);
1322        verifier.feed_proof(proof.as_slice()).unwrap();
1323        let mut block = blake3::tree::BlockHasher::new();
1324        block.set_block(0);
1325        block.update(&[0; 256 * 1024]);
1326        verifier.verify(block).unwrap();
1327        assert_eq!(verifier.block_counter, 1);
1328        assert_eq!(verifier.current_hash(), &output.tree[1]);
1329
1330        let proof = ProofBuf::resume(&output.tree, 1);
1331        assert_eq!(proof.len(), 0);
1332        verifier.feed_proof(proof.as_slice()).unwrap();
1333        let mut block = blake3::tree::BlockHasher::new();
1334        block.set_block(1);
1335        block.update(&[1; 256 * 1024]);
1336        verifier.verify(block).unwrap();
1337        assert_eq!(verifier.block_counter, 2);
1338
1339        assert_eq!(verifier.current_hash(), &output.tree[3]);
1340        let proof = ProofBuf::resume(&output.tree, 2);
1341        assert_eq!(proof.len(), 0);
1342        verifier.feed_proof(proof.as_slice()).unwrap();
1343        assert_eq!(verifier.current_hash(), &output.tree[3]);
1344        let mut block = blake3::tree::BlockHasher::new();
1345        block.set_block(2);
1346        block.update(&[2; 256 * 1024]);
1347        verifier.verify(block).unwrap();
1348        assert_eq!(verifier.block_counter, 3);
1349        assert!(verifier.is_done());
1350
1351        drop(verifier);
1352        assert_no_leak();
1353    }
1354
1355    #[test]
1356    fn incremental_verifier_range_req() {
1357        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1358        (0..4).for_each(|i| tree_builder.update(&[i; 256 * 1024]));
1359        let output = tree_builder.finalize();
1360
1361        let mut verifier = IncrementalVerifier::new(*output.hash.as_bytes(), 1);
1362
1363        let proof = ProofBuf::new(&output.tree, 1);
1364        verifier.feed_proof(proof.as_slice()).unwrap();
1365        let mut block = blake3::tree::BlockHasher::new();
1366        block.set_block(1);
1367        block.update(&[1; 256 * 1024]);
1368        verifier.verify(block).unwrap();
1369        assert_eq!(verifier.block_counter, 2);
1370
1371        assert_eq!(verifier.current_hash(), &output.tree[5]);
1372        let proof = ProofBuf::resume(&output.tree, 2);
1373        verifier.feed_proof(proof.as_slice()).unwrap();
1374        assert_eq!(verifier.current_hash(), &output.tree[3]);
1375        let mut block = blake3::tree::BlockHasher::new();
1376        block.set_block(2);
1377        block.update(&[2; 256 * 1024]);
1378        verifier.verify(block).unwrap();
1379        assert_eq!(verifier.block_counter, 3);
1380        assert_eq!(verifier.current_hash(), &output.tree[4]);
1381
1382        let proof = ProofBuf::resume(&output.tree, 3);
1383        assert_eq!(proof.len(), 0);
1384        verifier.feed_proof(proof.as_slice()).unwrap();
1385
1386        let mut block = blake3::tree::BlockHasher::new();
1387        block.set_block(3);
1388        block.update(&[3; 256 * 1024]);
1389        verifier.verify(block).unwrap();
1390        assert_eq!(verifier.block_counter, 4);
1391        assert!(verifier.is_done());
1392
1393        drop(verifier);
1394        assert_no_leak();
1395    }
1396
1397    #[test]
1398    fn incremental_verifier_large_data_first_proof_only() {
1399        #[inline(always)]
1400        fn block_data(n: usize) -> [u8; 256 * 1024] {
1401            let mut data = [0; 256 * 1024];
1402            for i in data.chunks_exact_mut(2) {
1403                i[0] = n as u8;
1404                i[1] = (n / 256) as u8;
1405            }
1406            data
1407        }
1408
1409        const SIZE: usize = 2702;
1410
1411        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1412        (0..SIZE).for_each(|i| tree_builder.update(&block_data(i)));
1413        let output = tree_builder.finalize();
1414
1415        for start in (0..SIZE).step_by(127) {
1416            let mut verifier = IncrementalVerifier::new(*output.hash.as_bytes(), start);
1417
1418            verifier
1419                .feed_proof(ProofBuf::new(&output.tree, start).as_slice())
1420                .unwrap_or_else(|_| panic!("Invalid Proof: size={SIZE} start={start}"));
1421
1422            verifier
1423                .verify({
1424                    let mut block = blake3::tree::BlockHasher::new();
1425                    block.set_block(start);
1426                    block.update(&block_data(start));
1427                    block
1428                })
1429                .unwrap_or_else(|_| panic!("Invalid Content: size={SIZE} start={start}"));
1430
1431            drop(verifier);
1432            assert_no_leak();
1433        }
1434    }
1435
1436    #[test]
1437    fn incremental_verifier_large_data_first_one_resume() {
1438        #[inline(always)]
1439        fn block_data(n: usize) -> [u8; 256 * 1024] {
1440            let mut data = [0; 256 * 1024];
1441            for i in data.chunks_exact_mut(2) {
1442                i[0] = n as u8;
1443                i[1] = (n / 256) as u8;
1444            }
1445            data
1446        }
1447
1448        const SIZE: usize = 654;
1449
1450        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1451        (0..SIZE).for_each(|i| tree_builder.update(&block_data(i)));
1452        let output = tree_builder.finalize();
1453
1454        for start in 0..SIZE - 1 {
1455            let mut verifier = IncrementalVerifier::new(*output.hash.as_bytes(), start);
1456
1457            verifier
1458                .feed_proof(ProofBuf::new(&output.tree, start).as_slice())
1459                .unwrap_or_else(|_| panic!("Invalid Proof: size={SIZE} start={start}"));
1460
1461            verifier
1462                .verify({
1463                    let mut block = blake3::tree::BlockHasher::new();
1464                    block.set_block(start);
1465                    block.update(&block_data(start));
1466                    block
1467                })
1468                .unwrap_or_else(|_| panic!("Invalid Content: size={SIZE} start={start}"));
1469
1470            verifier
1471                .feed_proof(ProofBuf::resume(&output.tree, start + 1).as_slice())
1472                .unwrap_or_else(|_| panic!("Invalid Resume Proof: size={SIZE} start={start}"));
1473
1474            verifier
1475                .verify({
1476                    let mut block = blake3::tree::BlockHasher::new();
1477                    block.set_block(start + 1);
1478                    block.update(&block_data(start + 1));
1479                    block
1480                })
1481                .unwrap_or_else(|_| panic!("Invalid Resume Content: size={SIZE} start={start}"));
1482
1483            drop(verifier);
1484            assert_no_leak();
1485        }
1486    }
1487
1488    // This is a very long running test so let's only do it sometimes.
1489    #[cfg(feature = "all-tests")]
1490    #[test]
1491    fn incremental_verifier_large_data() {
1492        #[inline(always)]
1493        fn block_data(n: usize) -> [u8; 256 * 1024] {
1494            let mut data = [0; 256 * 1024];
1495            for i in data.chunks_exact_mut(2) {
1496                i[0] = n as u8;
1497                i[1] = (n / 256) as u8;
1498            }
1499            data
1500        }
1501
1502        const SIZE: usize = 2702;
1503
1504        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1505        (0..SIZE).for_each(|i| tree_builder.update(&block_data(i)));
1506        let output = tree_builder.finalize();
1507
1508        for start in (0..SIZE).step_by(127) {
1509            let mut verifier = IncrementalVerifier::new(*output.hash.as_bytes(), start);
1510
1511            verifier
1512                .feed_proof(ProofBuf::new(&output.tree, start).as_slice())
1513                .unwrap_or_else(|_| panic!("Invalid Proof: size={SIZE} start={start}"));
1514
1515            verifier
1516                .verify({
1517                    let mut block = blake3::tree::BlockHasher::new();
1518                    block.set_block(start);
1519                    block.update(&block_data(start));
1520                    block
1521                })
1522                .unwrap_or_else(|_| panic!("Invalid Content: size={SIZE} start={start}"));
1523
1524            for i in start + 1..SIZE {
1525                verifier
1526                    .feed_proof(ProofBuf::resume(&output.tree, i).as_slice())
1527                    .unwrap_or_else(|_| {
1528                        panic!("Invalid Proof on Resume: size={SIZE} start={start} i={i}")
1529                    });
1530
1531                verifier
1532                    .verify({
1533                        let mut block = blake3::tree::BlockHasher::new();
1534                        block.set_block(i);
1535                        block.update(&block_data(i));
1536                        block
1537                    })
1538                    .unwrap_or_else(|_| {
1539                        panic!("Invalid Content on Resume: size={SIZE} start={start} i={i}")
1540                    });
1541            }
1542
1543            assert!(
1544                verifier.is_done(),
1545                "verifier not terminated: size={SIZE} start={start}"
1546            );
1547
1548            drop(verifier);
1549            assert_no_leak();
1550        }
1551    }
1552
1553    #[test]
1554    fn incremental_verifier_with_resume() {
1555        #[inline(always)]
1556        fn block_data(n: usize) -> [u8; 256 * 1024] {
1557            let mut data = [0; 256 * 1024];
1558            for i in data.chunks_exact_mut(2) {
1559                i[0] = n as u8;
1560                i[1] = (n / 256) as u8;
1561            }
1562            data
1563        }
1564
1565        const SIZE: usize = 30;
1566
1567        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1568        (0..SIZE).for_each(|i| tree_builder.update(&block_data(i)));
1569        let output = tree_builder.finalize();
1570        let start = 0;
1571
1572        let mut verifier = IncrementalVerifier::new(*output.hash.as_bytes(), start);
1573
1574        verifier
1575            .feed_proof(ProofBuf::new(&output.tree, start).as_slice())
1576            .unwrap_or_else(|_| panic!("Invalid Proof: size={SIZE} start={start}"));
1577
1578        verifier
1579            .verify({
1580                let mut block = blake3::tree::BlockHasher::new();
1581                block.set_block(start);
1582                block.update(&block_data(start));
1583                block
1584            })
1585            .unwrap_or_else(|_| panic!("Invalid Content: size={SIZE} start={start}"));
1586
1587        for i in start + 1..SIZE {
1588            verifier
1589                .feed_proof(ProofBuf::resume(&output.tree, i).as_slice())
1590                .unwrap_or_else(|_| {
1591                    panic!("Invalid Proof on Resume: size={SIZE} start={start} i={i}")
1592                });
1593
1594            verifier
1595                .verify({
1596                    let mut block = blake3::tree::BlockHasher::new();
1597                    block.set_block(i);
1598                    block.update(&block_data(i));
1599                    block
1600                })
1601                .unwrap_or_else(|_| {
1602                    panic!("Invalid Content on Resume: size={SIZE} start={start} i={i}")
1603                });
1604        }
1605
1606        assert!(
1607            verifier.is_done(),
1608            "verifier not terminated: size={SIZE} start={start}"
1609        );
1610
1611        drop(verifier);
1612        assert_no_leak();
1613    }
1614
1615    #[test]
1616    fn incremental_verifier_resume_654() {
1617        #[inline(always)]
1618        fn block_data(n: usize) -> [u8; 256 * 1024] {
1619            let mut data = [0; 256 * 1024];
1620            for i in data.chunks_exact_mut(2) {
1621                i[0] = n as u8;
1622                i[1] = (n / 256) as u8;
1623            }
1624            data
1625        }
1626
1627        const SIZE: usize = 654;
1628
1629        let mut tree_builder = blake3::tree::HashTreeBuilder::new();
1630        (0..SIZE).for_each(|i| tree_builder.update(&block_data(i)));
1631        let output = tree_builder.finalize();
1632
1633        let mut verifier = IncrementalVerifier::new(*output.hash.as_bytes(), 639);
1634
1635        verifier
1636            .feed_proof(ProofBuf::new(&output.tree, 639).as_slice())
1637            .unwrap_or_else(|_| panic!("Invalid Proof: size={SIZE}"));
1638
1639        verifier
1640            .verify({
1641                let mut block = blake3::tree::BlockHasher::new();
1642                block.set_block(639);
1643                block.update(&block_data(639));
1644                block
1645            })
1646            .unwrap_or_else(|_| panic!("Invalid Content: size={SIZE}"));
1647
1648        verifier
1649            .feed_proof(ProofBuf::resume(&output.tree, 640).as_slice())
1650            .unwrap_or_else(|_| panic!("Invalid Proof on Resume: size={SIZE}"));
1651
1652        verifier
1653            .verify({
1654                let mut block = blake3::tree::BlockHasher::new();
1655                block.set_block(640);
1656                block.update(&block_data(640));
1657                block
1658            })
1659            .unwrap_or_else(|_| panic!("Invalid Content on Resume: size={SIZE}"));
1660
1661        drop(verifier);
1662        assert_no_leak();
1663    }
1664}