light_concurrent_merkle_tree/
zero_copy.rs

1use std::{
2    marker::PhantomData,
3    mem,
4    ops::{Deref, DerefMut},
5};
6
7use light_bounded_vec::{
8    BoundedVec, BoundedVecMetadata, CyclicBoundedVec, CyclicBoundedVecMetadata,
9};
10use light_hasher::Hasher;
11use light_utils::offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at};
12use memoffset::{offset_of, span_of};
13
14use crate::{errors::ConcurrentMerkleTreeError, ConcurrentMerkleTree};
15
16#[derive(Debug)]
17pub struct ConcurrentMerkleTreeZeroCopy<'a, H, const HEIGHT: usize>
18where
19    H: Hasher,
20{
21    merkle_tree: mem::ManuallyDrop<ConcurrentMerkleTree<H, HEIGHT>>,
22    // The purpose of this field is ensuring that the wrapper does not outlive
23    // the buffer.
24    _bytes: &'a [u8],
25}
26
27impl<'a, H, const HEIGHT: usize> ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>
28where
29    H: Hasher,
30{
31    pub fn struct_from_bytes_zero_copy(
32        bytes: &'a [u8],
33    ) -> Result<(ConcurrentMerkleTree<H, HEIGHT>, usize), ConcurrentMerkleTreeError> {
34        let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::non_dyn_fields_size();
35        if bytes.len() < expected_size {
36            return Err(ConcurrentMerkleTreeError::BufferSize(
37                expected_size,
38                bytes.len(),
39            ));
40        }
41
42        let height = usize::from_le_bytes(
43            bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, height)]
44                .try_into()
45                .unwrap(),
46        );
47        let canopy_depth = usize::from_le_bytes(
48            bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, canopy_depth)]
49                .try_into()
50                .unwrap(),
51        );
52
53        let mut offset = offset_of!(ConcurrentMerkleTree<H, HEIGHT>, next_index);
54
55        let next_index = unsafe { read_ptr_at(bytes, &mut offset) };
56        let sequence_number = unsafe { read_ptr_at(bytes, &mut offset) };
57        let rightmost_leaf = unsafe { read_ptr_at(bytes, &mut offset) };
58        let filled_subtrees_metadata = unsafe { read_ptr_at(bytes, &mut offset) };
59        let changelog_metadata: *mut CyclicBoundedVecMetadata =
60            unsafe { read_ptr_at(bytes, &mut offset) };
61        let roots_metadata: *mut CyclicBoundedVecMetadata =
62            unsafe { read_ptr_at(bytes, &mut offset) };
63        let canopy_metadata = unsafe { read_ptr_at(bytes, &mut offset) };
64
65        let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(
66            height,
67            unsafe { (*changelog_metadata).capacity() },
68            unsafe { (*roots_metadata).capacity() },
69            canopy_depth,
70        );
71        if bytes.len() < expected_size {
72            return Err(ConcurrentMerkleTreeError::BufferSize(
73                expected_size,
74                bytes.len(),
75            ));
76        }
77
78        let filled_subtrees = unsafe {
79            BoundedVec::from_raw_parts(
80                filled_subtrees_metadata,
81                read_array_like_ptr_at(bytes, &mut offset, height),
82            )
83        };
84        let changelog = unsafe {
85            CyclicBoundedVec::from_raw_parts(
86                changelog_metadata,
87                read_array_like_ptr_at(bytes, &mut offset, (*changelog_metadata).capacity()),
88            )
89        };
90        let roots = unsafe {
91            CyclicBoundedVec::from_raw_parts(
92                roots_metadata,
93                read_array_like_ptr_at(bytes, &mut offset, (*roots_metadata).capacity()),
94            )
95        };
96        let canopy = unsafe {
97            BoundedVec::from_raw_parts(
98                canopy_metadata,
99                read_array_like_ptr_at(bytes, &mut offset, (*canopy_metadata).capacity()),
100            )
101        };
102
103        let merkle_tree = ConcurrentMerkleTree {
104            height,
105            canopy_depth,
106            next_index,
107            sequence_number,
108            rightmost_leaf,
109            filled_subtrees,
110            changelog,
111            roots,
112            canopy,
113            _hasher: PhantomData,
114        };
115        merkle_tree.check_size_constraints()?;
116
117        Ok((merkle_tree, offset))
118    }
119
120    pub fn from_bytes_zero_copy(bytes: &'a [u8]) -> Result<Self, ConcurrentMerkleTreeError> {
121        let (merkle_tree, _) = Self::struct_from_bytes_zero_copy(bytes)?;
122        merkle_tree.check_size_constraints()?;
123
124        Ok(Self {
125            merkle_tree: mem::ManuallyDrop::new(merkle_tree),
126            _bytes: bytes,
127        })
128    }
129}
130
131impl<'a, H, const HEIGHT: usize> Deref for ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>
132where
133    H: Hasher,
134{
135    type Target = ConcurrentMerkleTree<H, HEIGHT>;
136
137    fn deref(&self) -> &Self::Target {
138        &self.merkle_tree
139    }
140}
141
142impl<'a, H, const HEIGHT: usize> Drop for ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>
143where
144    H: Hasher,
145{
146    fn drop(&mut self) {
147        // SAFETY: Don't do anything here! Why?
148        //
149        // * Primitive fields of `ConcurrentMerkleTree` implement `Copy`,
150        //   therefore `drop()` has no effect on them - Rust drops them when
151        //   they go out of scope.
152        // * Don't drop the dynamic fields (`filled_subtrees`, `roots` etc.). In
153        //   `ConcurrentMerkleTreeZeroCopy`, they are backed by buffers provided
154        //   by the caller. These buffers are going to be eventually deallocated.
155        //   Performing an another `drop()` here would result double `free()`
156        //   which would result in aborting the program (either with `SIGABRT`
157        //   or `SIGSEGV`).
158    }
159}
160
161#[derive(Debug)]
162pub struct ConcurrentMerkleTreeZeroCopyMut<'a, H, const HEIGHT: usize>(
163    ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>,
164)
165where
166    H: Hasher;
167
168impl<'a, H, const HEIGHT: usize> ConcurrentMerkleTreeZeroCopyMut<'a, H, HEIGHT>
169where
170    H: Hasher,
171{
172    pub fn from_bytes_zero_copy_mut(
173        bytes: &'a mut [u8],
174    ) -> Result<Self, ConcurrentMerkleTreeError> {
175        Ok(Self(ConcurrentMerkleTreeZeroCopy::from_bytes_zero_copy(
176            bytes,
177        )?))
178    }
179
180    pub fn fill_non_dyn_fields_in_buffer(
181        bytes: &mut [u8],
182        height: usize,
183        canopy_depth: usize,
184        changelog_capacity: usize,
185        roots_capacity: usize,
186    ) -> Result<usize, ConcurrentMerkleTreeError> {
187        let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(
188            height,
189            changelog_capacity,
190            roots_capacity,
191            canopy_depth,
192        );
193        if bytes.len() < expected_size {
194            return Err(ConcurrentMerkleTreeError::BufferSize(
195                expected_size,
196                bytes.len(),
197            ));
198        }
199
200        bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, height)]
201            .copy_from_slice(&height.to_le_bytes());
202        bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, canopy_depth)]
203            .copy_from_slice(&canopy_depth.to_le_bytes());
204
205        let mut offset = offset_of!(ConcurrentMerkleTree<H, HEIGHT>, next_index);
206        // next_index
207        write_at::<usize>(bytes, &0_usize.to_le_bytes(), &mut offset);
208        // sequence_number
209        write_at::<usize>(bytes, &0_usize.to_le_bytes(), &mut offset);
210        // rightmost_leaf
211        write_at::<[u8; 32]>(bytes, &H::zero_bytes()[0], &mut offset);
212        // filled_subtrees (metadata)
213        let filled_subtrees_metadata = BoundedVecMetadata::new(height);
214        write_at::<BoundedVecMetadata>(bytes, &filled_subtrees_metadata.to_le_bytes(), &mut offset);
215        // changelog (metadata)
216        let changelog_metadata = CyclicBoundedVecMetadata::new(changelog_capacity);
217        write_at::<CyclicBoundedVecMetadata>(bytes, &changelog_metadata.to_le_bytes(), &mut offset);
218        // roots (metadata)
219        let roots_metadata = CyclicBoundedVecMetadata::new(roots_capacity);
220        write_at::<CyclicBoundedVecMetadata>(bytes, &roots_metadata.to_le_bytes(), &mut offset);
221        // canopy (metadata)
222        let canopy_size = ConcurrentMerkleTree::<H, HEIGHT>::canopy_size(canopy_depth);
223        let canopy_metadata = BoundedVecMetadata::new(canopy_size);
224        write_at::<BoundedVecMetadata>(bytes, &canopy_metadata.to_le_bytes(), &mut offset);
225
226        Ok(offset)
227    }
228
229    pub fn from_bytes_zero_copy_init(
230        bytes: &'a mut [u8],
231        height: usize,
232        canopy_depth: usize,
233        changelog_capacity: usize,
234        roots_capacity: usize,
235    ) -> Result<Self, ConcurrentMerkleTreeError> {
236        Self::fill_non_dyn_fields_in_buffer(
237            bytes,
238            height,
239            canopy_depth,
240            changelog_capacity,
241            roots_capacity,
242        )?;
243        Self::from_bytes_zero_copy_mut(bytes)
244    }
245}
246
247impl<'a, H, const HEIGHT: usize> Deref for ConcurrentMerkleTreeZeroCopyMut<'a, H, HEIGHT>
248where
249    H: Hasher,
250{
251    type Target = ConcurrentMerkleTree<H, HEIGHT>;
252
253    fn deref(&self) -> &Self::Target {
254        &self.0.merkle_tree
255    }
256}
257impl<'a, H, const HEIGHT: usize> DerefMut for ConcurrentMerkleTreeZeroCopyMut<'a, H, HEIGHT>
258where
259    H: Hasher,
260{
261    fn deref_mut(&mut self) -> &mut Self::Target {
262        &mut self.0.merkle_tree
263    }
264}
265
266#[cfg(test)]
267mod test {
268    use super::*;
269
270    use ark_bn254::Fr;
271    use ark_ff::{BigInteger, PrimeField, UniformRand};
272    use light_hasher::Poseidon;
273    use rand::{thread_rng, Rng};
274
275    fn load_from_bytes<
276        const HEIGHT: usize,
277        const CHANGELOG: usize,
278        const ROOTS: usize,
279        const CANOPY_DEPTH: usize,
280        const OPERATIONS: usize,
281    >() {
282        let mut mt_1 =
283            ConcurrentMerkleTree::<Poseidon, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY_DEPTH)
284                .unwrap();
285        mt_1.init().unwrap();
286
287        // Create a buffer with random bytes - the `*_init` method should
288        // initialize the buffer gracefully and the randomness shouldn't cause
289        // undefined behavior.
290        let mut bytes = vec![
291            0u8;
292            ConcurrentMerkleTree::<Poseidon, HEIGHT>::size_in_account(
293                HEIGHT,
294                CHANGELOG,
295                ROOTS,
296                CANOPY_DEPTH
297            )
298        ];
299        thread_rng().fill(bytes.as_mut_slice());
300
301        // Initialize a Merkle tree on top of a byte slice.
302        {
303            let mut mt =
304                ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_init(
305                    bytes.as_mut_slice(),
306                    HEIGHT,
307                    CANOPY_DEPTH,
308                    CHANGELOG,
309                    ROOTS,
310                )
311                .unwrap();
312            mt.init().unwrap();
313
314            // Ensure that it was properly initialized.
315            assert_eq!(mt.height, HEIGHT);
316            assert_eq!(mt.canopy_depth, CANOPY_DEPTH,);
317            assert_eq!(mt.next_index(), 0);
318            assert_eq!(mt.sequence_number(), 0);
319            assert_eq!(mt.rightmost_leaf(), Poseidon::zero_bytes()[0]);
320
321            assert_eq!(mt.filled_subtrees.capacity(), HEIGHT);
322            assert_eq!(mt.filled_subtrees.len(), HEIGHT);
323
324            assert_eq!(mt.changelog.capacity(), CHANGELOG);
325            assert_eq!(mt.changelog.len(), 1);
326
327            assert_eq!(mt.roots.capacity(), ROOTS);
328            assert_eq!(mt.roots.len(), 1);
329
330            assert_eq!(
331                mt.canopy.capacity(),
332                ConcurrentMerkleTree::<Poseidon, HEIGHT>::canopy_size(CANOPY_DEPTH)
333            );
334
335            assert_eq!(mt.root(), Poseidon::zero_bytes()[HEIGHT]);
336        }
337
338        let mut rng = thread_rng();
339
340        for _ in 0..OPERATIONS {
341            // Reload the tree from bytes on each iteration.
342            let mut mt_2 =
343                ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_mut(
344                    &mut bytes,
345                )
346                .unwrap();
347
348            let leaf: [u8; 32] = Fr::rand(&mut rng)
349                .into_bigint()
350                .to_bytes_be()
351                .try_into()
352                .unwrap();
353            mt_1.append(&leaf).unwrap();
354            mt_2.append(&leaf).unwrap();
355
356            assert_eq!(mt_1, *mt_2);
357        }
358    }
359
360    #[test]
361    fn test_load_from_bytes_22_256_256_0_1024() {
362        load_from_bytes::<22, 256, 256, 0, 1024>()
363    }
364
365    #[test]
366    fn test_load_from_bytes_22_256_256_10_1024() {
367        load_from_bytes::<22, 256, 256, 10, 1024>()
368    }
369}