light_concurrent_merkle_tree/
lib.rs

1use std::{
2    alloc::{self, handle_alloc_error, Layout},
3    iter::Skip,
4    marker::PhantomData,
5    mem,
6};
7
8use changelog::ChangelogPath;
9use light_bounded_vec::{
10    BoundedVec, BoundedVecMetadata, CyclicBoundedVec, CyclicBoundedVecIterator,
11    CyclicBoundedVecMetadata,
12};
13pub use light_hasher;
14use light_hasher::Hasher;
15
16pub mod changelog;
17pub mod copy;
18pub mod errors;
19pub mod event;
20pub mod hash;
21pub mod zero_copy;
22
23use crate::{
24    changelog::ChangelogEntry,
25    errors::ConcurrentMerkleTreeError,
26    hash::{compute_parent_node, compute_root},
27};
28
29/// [Concurrent Merkle tree](https://drive.google.com/file/d/1BOpa5OFmara50fTvL0VIVYjtg-qzHCVc/view)
30/// which allows for multiple requests of updating leaves, without making any
31/// of the requests invalid, as long as they are not modyfing the same leaf.
32///
33/// When any of the above happens, some of the concurrent requests are going to
34/// be invalid, forcing the clients to re-generate the Merkle proof. But that's
35/// still better than having such a failure after any update happening in the
36/// middle of requesting the update.
37///
38/// Due to ability to make a decent number of concurrent update requests to be
39/// valid, no lock is necessary.
40#[repr(C)]
41#[derive(Debug)]
42// TODO(vadorovsky): The only reason why are we still keeping `HEIGHT` as a
43// const generic here is that removing it would require keeping a `BoundecVec`
44// inside `CyclicBoundedVec`. Casting byte slices to such nested vector is not
45// a trivial task, but we might eventually do it at some point.
46pub struct ConcurrentMerkleTree<H, const HEIGHT: usize>
47where
48    H: Hasher,
49{
50    pub height: usize,
51    pub canopy_depth: usize,
52
53    pub next_index: *mut usize,
54    pub sequence_number: *mut usize,
55    pub rightmost_leaf: *mut [u8; 32],
56
57    /// Hashes of subtrees.
58    pub filled_subtrees: BoundedVec<[u8; 32]>,
59    /// History of Merkle proofs.
60    pub changelog: CyclicBoundedVec<ChangelogEntry<HEIGHT>>,
61    /// History of roots.
62    pub roots: CyclicBoundedVec<[u8; 32]>,
63    /// Cached upper nodes.
64    pub canopy: BoundedVec<[u8; 32]>,
65
66    pub _hasher: PhantomData<H>,
67}
68
69pub type ConcurrentMerkleTree26<H> = ConcurrentMerkleTree<H, 26>;
70
71impl<H, const HEIGHT: usize> ConcurrentMerkleTree<H, HEIGHT>
72where
73    H: Hasher,
74{
75    /// Number of nodes to include in canopy, based on `canopy_depth`.
76    #[inline(always)]
77    pub fn canopy_size(canopy_depth: usize) -> usize {
78        (1 << (canopy_depth + 1)) - 2
79    }
80
81    /// Size of the struct **without** dynamically sized fields (`BoundedVec`,
82    /// `CyclicBoundedVec`).
83    pub fn non_dyn_fields_size() -> usize {
84        // height
85        mem::size_of::<usize>()
86        // changelog_capacity
87        + mem::size_of::<usize>()
88        // next_index
89        + mem::size_of::<usize>()
90        // sequence_number
91        + mem::size_of::<usize>()
92        // rightmost_leaf
93        + mem::size_of::<[u8; 32]>()
94        // filled_subtrees (metadata)
95        + mem::size_of::<BoundedVecMetadata>()
96        // changelog (metadata)
97        + mem::size_of::<CyclicBoundedVecMetadata>()
98        // roots (metadata)
99        + mem::size_of::<CyclicBoundedVecMetadata>()
100        // canopy (metadata)
101        + mem::size_of::<BoundedVecMetadata>()
102    }
103
104    // TODO(vadorovsky): Make a macro for that.
105    pub fn size_in_account(
106        height: usize,
107        changelog_size: usize,
108        roots_size: usize,
109        canopy_depth: usize,
110    ) -> usize {
111        // non-dynamic fields
112        Self::non_dyn_fields_size()
113        // filled_subtrees
114        + mem::size_of::<[u8; 32]>() * height
115        // changelog
116        + mem::size_of::<ChangelogEntry<HEIGHT>>() * changelog_size
117        // roots
118        + mem::size_of::<[u8; 32]>() * roots_size
119        // canopy
120        + mem::size_of::<[u8; 32]>() * Self::canopy_size(canopy_depth)
121    }
122
123    fn check_size_constraints_new(
124        height: usize,
125        changelog_size: usize,
126        roots_size: usize,
127        canopy_depth: usize,
128    ) -> Result<(), ConcurrentMerkleTreeError> {
129        if height == 0 || HEIGHT == 0 {
130            return Err(ConcurrentMerkleTreeError::HeightZero);
131        }
132        if height != HEIGHT {
133            return Err(ConcurrentMerkleTreeError::InvalidHeight(HEIGHT));
134        }
135        if canopy_depth > height {
136            return Err(ConcurrentMerkleTreeError::CanopyGeThanHeight);
137        }
138        // Changelog needs to be at least 1, because it's used for storing
139        // Merkle paths in `append`/`append_batch`.
140        if changelog_size == 0 {
141            return Err(ConcurrentMerkleTreeError::ChangelogZero);
142        }
143        if roots_size == 0 {
144            return Err(ConcurrentMerkleTreeError::RootsZero);
145        }
146        Ok(())
147    }
148
149    fn check_size_constraints(&self) -> Result<(), ConcurrentMerkleTreeError> {
150        Self::check_size_constraints_new(
151            self.height,
152            self.changelog.capacity(),
153            self.roots.capacity(),
154            self.canopy_depth,
155        )
156    }
157
158    pub fn new(
159        height: usize,
160        changelog_size: usize,
161        roots_size: usize,
162        canopy_depth: usize,
163    ) -> Result<Self, ConcurrentMerkleTreeError> {
164        Self::check_size_constraints_new(height, changelog_size, roots_size, canopy_depth)?;
165
166        let layout = Layout::new::<usize>();
167        let next_index = unsafe { alloc::alloc(layout) as *mut usize };
168        if next_index.is_null() {
169            handle_alloc_error(layout);
170        }
171        unsafe { *next_index = 0 };
172
173        let layout = Layout::new::<usize>();
174        let sequence_number = unsafe { alloc::alloc(layout) as *mut usize };
175        if sequence_number.is_null() {
176            handle_alloc_error(layout);
177        }
178        unsafe { *sequence_number = 0 };
179
180        let layout = Layout::new::<[u8; 32]>();
181        let rightmost_leaf = unsafe { alloc::alloc(layout) as *mut [u8; 32] };
182        if rightmost_leaf.is_null() {
183            handle_alloc_error(layout);
184        }
185        unsafe { *rightmost_leaf = [0u8; 32] };
186
187        Ok(Self {
188            height,
189            canopy_depth,
190
191            next_index,
192            sequence_number,
193            rightmost_leaf,
194
195            filled_subtrees: BoundedVec::with_capacity(height),
196            changelog: CyclicBoundedVec::with_capacity(changelog_size),
197            roots: CyclicBoundedVec::with_capacity(roots_size),
198            canopy: BoundedVec::with_capacity(Self::canopy_size(canopy_depth)),
199
200            _hasher: PhantomData,
201        })
202    }
203
204    /// Initializes the Merkle tree.
205    pub fn init(&mut self) -> Result<(), ConcurrentMerkleTreeError> {
206        self.check_size_constraints()?;
207
208        // Initialize root.
209        let root = H::zero_bytes()[self.height];
210        self.roots.push(root);
211
212        // Initialize changelog.
213        let path = ChangelogPath::from_fn(|i| Some(H::zero_bytes()[i]));
214        let changelog_entry = ChangelogEntry { path, index: 0 };
215        self.changelog.push(changelog_entry);
216
217        // Initialize filled subtrees.
218        for i in 0..self.height {
219            self.filled_subtrees.push(H::zero_bytes()[i]).unwrap();
220        }
221
222        // Initialize canopy.
223        for level_i in 0..self.canopy_depth {
224            let level_nodes = 1 << (level_i + 1);
225            for _ in 0..level_nodes {
226                let node = H::zero_bytes()[self.height - level_i - 1];
227                self.canopy.push(node)?;
228            }
229        }
230
231        Ok(())
232    }
233
234    /// Returns the index of the current changelog entry.
235    pub fn changelog_index(&self) -> usize {
236        self.changelog.last_index()
237    }
238
239    /// Returns the index of the current root in the tree's root buffer.
240    pub fn root_index(&self) -> usize {
241        self.roots.last_index()
242    }
243
244    /// Returns the current root.
245    pub fn root(&self) -> [u8; 32] {
246        // PANICS: This should never happen - there is always a root in the
247        // tree and `self.root_index()` should always point to an existing index.
248        self.roots[self.root_index()]
249    }
250
251    pub fn current_index(&self) -> usize {
252        let next_index = self.next_index();
253        if next_index > 0 {
254            next_index - 1
255        } else {
256            next_index
257        }
258    }
259
260    pub fn next_index(&self) -> usize {
261        unsafe { *self.next_index }
262    }
263
264    fn inc_next_index(&mut self) -> Result<(), ConcurrentMerkleTreeError> {
265        unsafe {
266            *self.next_index = self
267                .next_index()
268                .checked_add(1)
269                .ok_or(ConcurrentMerkleTreeError::IntegerOverflow)?;
270        }
271        Ok(())
272    }
273
274    pub fn sequence_number(&self) -> usize {
275        unsafe { *self.sequence_number }
276    }
277
278    fn inc_sequence_number(&mut self) -> Result<(), ConcurrentMerkleTreeError> {
279        unsafe {
280            *self.sequence_number = self
281                .sequence_number()
282                .checked_add(1)
283                .ok_or(ConcurrentMerkleTreeError::IntegerOverflow)?;
284        }
285        Ok(())
286    }
287
288    pub fn rightmost_leaf(&self) -> [u8; 32] {
289        unsafe { *self.rightmost_leaf }
290    }
291
292    fn set_rightmost_leaf(&mut self, leaf: &[u8; 32]) {
293        unsafe { *self.rightmost_leaf = *leaf };
294    }
295
296    pub fn update_proof_from_canopy(
297        &self,
298        leaf_index: usize,
299        proof: &mut BoundedVec<[u8; 32]>,
300    ) -> Result<(), ConcurrentMerkleTreeError> {
301        let mut node_index = ((1 << self.height) + leaf_index) >> (self.height - self.canopy_depth);
302        while node_index > 1 {
303            // `node_index - 2` maps to the canopy index.
304            let canopy_index = node_index - 2;
305            let canopy_index = if canopy_index % 2 == 0 {
306                canopy_index + 1
307            } else {
308                canopy_index - 1
309            };
310            proof.push(self.canopy[canopy_index])?;
311            node_index >>= 1;
312        }
313
314        Ok(())
315    }
316
317    /// Returns an iterator with changelog entries newer than the requested
318    /// `changelog_index`.
319    pub fn changelog_entries(
320        &self,
321        changelog_index: usize,
322    ) -> Result<Skip<CyclicBoundedVecIterator<'_, ChangelogEntry<HEIGHT>>>, ConcurrentMerkleTreeError>
323    {
324        // `CyclicBoundedVec::iter_from` returns an iterator which includes also
325        // the element indicated by the provided index.
326        //
327        // However, we want to iterate only on changelog events **newer** than
328        // the provided one.
329        //
330        // Calling `iter_from(changelog_index + 1)` wouldn't work. If
331        // `changelog_index` points to the newest changelog entry,
332        // `changelog_index + 1` would point to the **oldest** changelog entry.
333        // That would result in iterating over the whole changelog - from the
334        // oldest to the newest element.
335        Ok(self.changelog.iter_from(changelog_index)?.skip(1))
336    }
337
338    /// Updates the given Merkle proof.
339    ///
340    /// The update is performed by checking whether there are any new changelog
341    /// entries and whether they contain changes which affect the current
342    /// proof. To be precise, for each changelog entry, it's done in the
343    /// following steps:
344    ///
345    /// * Check if the changelog entry was directly updating the `leaf_index`
346    ///   we are trying to update.
347    ///   * If no (we check that condition first, since it's more likely),
348    ///     it means that there is a change affecting the proof, but not the
349    ///     leaf.
350    ///     Check which element from our proof was affected by the change
351    ///     (using the `critbit_index` method) and update it (copy the new
352    ///     element from the changelog to our updated proof).
353    ///   * If yes, it means that the same leaf we want to update was already
354    ///     updated. In such case, updating the proof is not possible.
355    pub fn update_proof_from_changelog(
356        &self,
357        changelog_index: usize,
358        leaf_index: usize,
359        proof: &mut BoundedVec<[u8; 32]>,
360    ) -> Result<(), ConcurrentMerkleTreeError> {
361        // Iterate over changelog entries starting from the requested
362        // `changelog_index`.
363        //
364        // Since we are interested only in subsequent, new changelog entries,
365        // skip the first result.
366        for changelog_entry in self.changelog_entries(changelog_index)? {
367            changelog_entry.update_proof(leaf_index, proof)?;
368        }
369
370        Ok(())
371    }
372
373    /// Checks whether the given Merkle `proof` for the given `node` (with index
374    /// `i`) is valid. The proof is valid when computing parent node hashes using
375    /// the whole path of the proof gives the same result as the given `root`.
376    pub fn validate_proof(
377        &self,
378        leaf: &[u8; 32],
379        leaf_index: usize,
380        proof: &BoundedVec<[u8; 32]>,
381    ) -> Result<(), ConcurrentMerkleTreeError> {
382        let expected_root = self.root();
383        let computed_root = compute_root::<H>(leaf, leaf_index, proof)?;
384        if computed_root == expected_root {
385            Ok(())
386        } else {
387            Err(ConcurrentMerkleTreeError::InvalidProof(
388                expected_root,
389                computed_root,
390            ))
391        }
392    }
393
394    /// Updates the leaf under `leaf_index` with the `new_leaf` value.
395    ///
396    /// 1. Computes the new path and root from `new_leaf` and Merkle proof
397    ///    (`proof`).
398    /// 2. Stores the new path as the latest changelog entry and increments the
399    ///    latest changelog index.
400    /// 3. Stores the latest root and increments the latest root index.
401    /// 4. If new leaf is at the rightmost index, stores it as the new
402    ///    rightmost leaft and stores the Merkle proof as the new rightmost
403    ///    proof.
404    ///
405    /// # Validation
406    ///
407    /// This method doesn't validate the proof. Caller is responsible for
408    /// doing that before.
409    fn update_leaf_in_tree(
410        &mut self,
411        new_leaf: &[u8; 32],
412        leaf_index: usize,
413        proof: &BoundedVec<[u8; 32]>,
414    ) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
415        let mut changelog_entry = ChangelogEntry::default_with_index(leaf_index);
416        let mut current_node = *new_leaf;
417        for (level, sibling) in proof.iter().enumerate() {
418            changelog_entry.path[level] = Some(current_node);
419            current_node = compute_parent_node::<H>(&current_node, sibling, leaf_index, level)?;
420        }
421
422        self.inc_sequence_number()?;
423
424        self.roots.push(current_node);
425
426        // Check if the leaf is the last leaf in the tree.
427        if self.next_index() < (1 << self.height) {
428            changelog_entry.update_proof(self.next_index(), &mut self.filled_subtrees)?;
429            // Check if we updated the rightmost leaf.
430            if leaf_index >= self.current_index() {
431                self.set_rightmost_leaf(new_leaf);
432            }
433        }
434        self.changelog.push(changelog_entry);
435
436        if self.canopy_depth > 0 {
437            self.update_canopy(self.changelog.last_index(), 1);
438        }
439
440        Ok((self.changelog.last_index(), self.sequence_number()))
441    }
442
443    /// Replaces the `old_leaf` under the `leaf_index` with a `new_leaf`, using
444    /// the given `proof` and `changelog_index` (pointing to the changelog entry
445    /// which was the newest at the time of preparing the proof).
446    #[inline(never)]
447    pub fn update(
448        &mut self,
449        changelog_index: usize,
450        old_leaf: &[u8; 32],
451        new_leaf: &[u8; 32],
452        leaf_index: usize,
453        proof: &mut BoundedVec<[u8; 32]>,
454    ) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
455        let expected_proof_len = self.height - self.canopy_depth;
456        if proof.len() != expected_proof_len {
457            return Err(ConcurrentMerkleTreeError::InvalidProofLength(
458                expected_proof_len,
459                proof.len(),
460            ));
461        }
462        if leaf_index >= self.next_index() {
463            return Err(ConcurrentMerkleTreeError::CannotUpdateEmpty);
464        }
465
466        if self.canopy_depth > 0 {
467            self.update_proof_from_canopy(leaf_index, proof)?;
468        }
469        if changelog_index != self.changelog_index() {
470            self.update_proof_from_changelog(changelog_index, leaf_index, proof)?;
471        }
472        self.validate_proof(old_leaf, leaf_index, proof)?;
473        self.update_leaf_in_tree(new_leaf, leaf_index, proof)
474    }
475
476    /// Appends a new leaf to the tree.
477    pub fn append(&mut self, leaf: &[u8; 32]) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
478        self.append_batch(&[leaf])
479    }
480
481    /// Appends a new leaf to the tree. Saves Merkle proof to the provided
482    /// `proof` reference.
483    pub fn append_with_proof(
484        &mut self,
485        leaf: &[u8; 32],
486        proof: &mut BoundedVec<[u8; 32]>,
487    ) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
488        self.append_batch_with_proofs(&[leaf], &mut [proof])
489    }
490
491    /// Appends a batch of new leaves to the tree.
492    pub fn append_batch(
493        &mut self,
494        leaves: &[&[u8; 32]],
495    ) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
496        self.append_batch_common::<false>(leaves, None)
497    }
498
499    /// Appends a batch of new leaves to the tree. Saves Merkle proofs to the
500    /// provided `proofs` slice.
501    pub fn append_batch_with_proofs(
502        &mut self,
503        leaves: &[&[u8; 32]],
504        proofs: &mut [&mut BoundedVec<[u8; 32]>],
505    ) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
506        self.append_batch_common::<true>(leaves, Some(proofs))
507    }
508
509    /// Appends a batch of new leaves to the tree.
510    ///
511    /// This method contains the common logic and is not intended for external
512    /// use. Callers should choose between [`append_batch`](ConcurrentMerkleTree::append_batch)
513    /// and [`append_batch_with_proofs`](ConcurrentMerkleTree::append_batch_with_proofs).
514    fn append_batch_common<
515        // The only purpose of this const generic is to force compiler to
516        // produce separate functions, with and without proof.
517        //
518        // Unfortunately, using `Option` is not enough:
519        //
520        // https://godbolt.org/z/fEMMfMdPc
521        // https://godbolt.org/z/T3dxnjMzz
522        //
523        // Using the const generic helps and ends up generating two separate
524        // functions:
525        //
526        // https://godbolt.org/z/zGnM7Ycn1
527        const WITH_PROOFS: bool,
528    >(
529        &mut self,
530        leaves: &[&[u8; 32]],
531        // Slice for saving Merkle proofs.
532        //
533        // Currently it's used only for indexed Merkle trees.
534        mut proofs: Option<&mut [&mut BoundedVec<[u8; 32]>]>,
535    ) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
536        if leaves.is_empty() {
537            return Err(ConcurrentMerkleTreeError::EmptyLeaves);
538        }
539        if (self.next_index() + leaves.len() - 1) >= 1 << self.height {
540            return Err(ConcurrentMerkleTreeError::TreeFull);
541        }
542        if leaves.len() > self.changelog.capacity() {
543            return Err(ConcurrentMerkleTreeError::BatchGreaterThanChangelog(
544                leaves.len(),
545                self.changelog.capacity(),
546            ));
547        }
548
549        let first_changelog_index = (self.changelog.last_index() + 1) % self.changelog.capacity();
550        let first_sequence_number = self.sequence_number() + 1;
551
552        for (leaf_i, leaf) in leaves.iter().enumerate() {
553            let mut current_index = self.next_index();
554
555            self.changelog
556                .push(ChangelogEntry::<HEIGHT>::default_with_index(current_index));
557            let changelog_index = self.changelog_index();
558
559            let mut current_node = **leaf;
560
561            self.changelog[changelog_index].path[0] = Some(**leaf);
562
563            for i in 0..self.height {
564                let is_left = current_index % 2 == 0;
565
566                if is_left {
567                    // If the current node is on the left side:
568                    //
569                    //     U
570                    //    / \
571                    //  CUR  SIB
572                    //  /     \
573                    // N       N
574                    //
575                    // * The sibling (on the right) is a "zero node".
576                    // * That "zero node" becomes a part of Merkle proof.
577                    // * The upper (next current) node is `H(cur, Ø)`.
578                    let empty_node = H::zero_bytes()[i];
579
580                    if WITH_PROOFS {
581                        // PANICS: `proofs` should be always `Some` at this point.
582                        proofs.as_mut().unwrap()[leaf_i].push(empty_node)?;
583                    }
584
585                    self.filled_subtrees[i] = current_node;
586
587                    // For all non-terminal leaves, stop computing parents as
588                    // soon as we are on the left side.
589                    // Computation of the parent nodes is going to happen in
590                    // the next iterations.
591                    if leaf_i < leaves.len() - 1 {
592                        break;
593                    }
594
595                    current_node = H::hashv(&[&current_node, &empty_node])?;
596                } else {
597                    // If the current node is on the right side:
598                    //
599                    //     U
600                    //    / \
601                    //  SIB  CUR
602                    //  /     \
603                    // N       N
604                    // * The sigling on the left is a "filled subtree".
605                    // * That "filled subtree" becomes a part of Merkle proof.
606                    // * The upper (next current) node is `H(sib, cur)`.
607
608                    if WITH_PROOFS {
609                        // PANICS: `proofs` should be always `Some` at this point.
610                        proofs.as_mut().unwrap()[leaf_i].push(self.filled_subtrees[i])?;
611                    }
612
613                    current_node = H::hashv(&[&self.filled_subtrees[i], &current_node])?;
614                }
615
616                if i < self.height - 1 {
617                    self.changelog[changelog_index].path[i + 1] = Some(current_node);
618                }
619
620                current_index /= 2;
621            }
622
623            if leaf_i == leaves.len() - 1 {
624                self.roots.push(current_node);
625            } else {
626                // Photon returns only the sequence number and we use it in the
627                // JS client and forester to derive the root index. Therefore,
628                // we need to emit a "zero root" to not break that property.
629                self.roots.push([0u8; 32]);
630            }
631
632            self.inc_next_index()?;
633            self.inc_sequence_number()?;
634
635            self.set_rightmost_leaf(leaf);
636        }
637
638        if self.canopy_depth > 0 {
639            self.update_canopy(first_changelog_index, leaves.len());
640        }
641
642        Ok((first_changelog_index, first_sequence_number))
643    }
644
645    fn update_canopy(&mut self, first_changelog_index: usize, num_leaves: usize) {
646        for i in 0..num_leaves {
647            let changelog_index = (first_changelog_index + i) % self.changelog.capacity();
648            for (i, path_node) in self.changelog[changelog_index]
649                .path
650                .iter()
651                .rev()
652                .take(self.canopy_depth)
653                .enumerate()
654            {
655                if let Some(path_node) = path_node {
656                    let level = self.height - i - 1;
657                    let index = (1 << (self.height - level))
658                        + (self.changelog[changelog_index].index >> level);
659                    // `index - 2` maps to the canopy index.
660                    self.canopy[(index - 2) as usize] = *path_node;
661                }
662            }
663        }
664    }
665}
666
667impl<H, const HEIGHT: usize> Drop for ConcurrentMerkleTree<H, HEIGHT>
668where
669    H: Hasher,
670{
671    fn drop(&mut self) {
672        let layout = Layout::new::<usize>();
673        unsafe { alloc::dealloc(self.next_index as *mut u8, layout) };
674
675        let layout = Layout::new::<usize>();
676        unsafe { alloc::dealloc(self.sequence_number as *mut u8, layout) };
677
678        let layout = Layout::new::<[u8; 32]>();
679        unsafe { alloc::dealloc(self.rightmost_leaf as *mut u8, layout) };
680    }
681}
682
683impl<H, const HEIGHT: usize> PartialEq for ConcurrentMerkleTree<H, HEIGHT>
684where
685    H: Hasher,
686{
687    fn eq(&self, other: &Self) -> bool {
688        self.height.eq(&other.height)
689            && self.canopy_depth.eq(&other.canopy_depth)
690            && self.next_index().eq(&other.next_index())
691            && self.sequence_number().eq(&other.sequence_number())
692            && self.rightmost_leaf().eq(&other.rightmost_leaf())
693            && self
694                .filled_subtrees
695                .as_slice()
696                .eq(other.filled_subtrees.as_slice())
697            && self.changelog.iter().eq(other.changelog.iter())
698            && self.roots.iter().eq(other.roots.iter())
699            && self.canopy.as_slice().eq(other.canopy.as_slice())
700    }
701}