light_concurrent_merkle_tree/
zero_copy.rs

1use std::{
2    marker::PhantomData,
3    mem,
4    ops::{Deref, DerefMut},
5};
6
7use light_bounded_vec::{
8    BoundedVec, BoundedVecMetadata, CyclicBoundedVec, CyclicBoundedVecMetadata,
9};
10use light_hasher::Hasher;
11use memoffset::{offset_of, span_of};
12
13use crate::{
14    errors::ConcurrentMerkleTreeError,
15    offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at},
16    ConcurrentMerkleTree,
17};
18
19#[derive(Debug)]
20pub struct ConcurrentMerkleTreeZeroCopy<'a, H, const HEIGHT: usize>
21where
22    H: Hasher,
23{
24    merkle_tree: mem::ManuallyDrop<ConcurrentMerkleTree<H, HEIGHT>>,
25    // The purpose of this field is ensuring that the wrapper does not outlive
26    // the buffer.
27    _bytes: &'a [u8],
28}
29
30impl<'a, H, const HEIGHT: usize> ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>
31where
32    H: Hasher,
33{
34    pub fn struct_from_bytes_zero_copy(
35        bytes: &'a [u8],
36    ) -> Result<(ConcurrentMerkleTree<H, HEIGHT>, usize), ConcurrentMerkleTreeError> {
37        let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::non_dyn_fields_size();
38        if bytes.len() < expected_size {
39            return Err(ConcurrentMerkleTreeError::BufferSize(
40                expected_size,
41                bytes.len(),
42            ));
43        }
44
45        let height = usize::from_le_bytes(
46            bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, height)]
47                .try_into()
48                .unwrap(),
49        );
50        let canopy_depth = usize::from_le_bytes(
51            bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, canopy_depth)]
52                .try_into()
53                .unwrap(),
54        );
55
56        let mut offset = offset_of!(ConcurrentMerkleTree<H, HEIGHT>, next_index);
57
58        let next_index = unsafe { read_ptr_at(bytes, &mut offset) };
59        let sequence_number = unsafe { read_ptr_at(bytes, &mut offset) };
60        let rightmost_leaf = unsafe { read_ptr_at(bytes, &mut offset) };
61        let filled_subtrees_metadata = unsafe { read_ptr_at(bytes, &mut offset) };
62        let changelog_metadata: *mut CyclicBoundedVecMetadata =
63            unsafe { read_ptr_at(bytes, &mut offset) };
64        let roots_metadata: *mut CyclicBoundedVecMetadata =
65            unsafe { read_ptr_at(bytes, &mut offset) };
66        let canopy_metadata = unsafe { read_ptr_at(bytes, &mut offset) };
67
68        let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(
69            height,
70            unsafe { (*changelog_metadata).capacity() },
71            unsafe { (*roots_metadata).capacity() },
72            canopy_depth,
73        );
74        if bytes.len() < expected_size {
75            return Err(ConcurrentMerkleTreeError::BufferSize(
76                expected_size,
77                bytes.len(),
78            ));
79        }
80
81        let filled_subtrees = unsafe {
82            BoundedVec::from_raw_parts(
83                filled_subtrees_metadata,
84                read_array_like_ptr_at(bytes, &mut offset, height),
85            )
86        };
87        let changelog = unsafe {
88            CyclicBoundedVec::from_raw_parts(
89                changelog_metadata,
90                read_array_like_ptr_at(bytes, &mut offset, (*changelog_metadata).capacity()),
91            )
92        };
93        let roots = unsafe {
94            CyclicBoundedVec::from_raw_parts(
95                roots_metadata,
96                read_array_like_ptr_at(bytes, &mut offset, (*roots_metadata).capacity()),
97            )
98        };
99        let canopy = unsafe {
100            BoundedVec::from_raw_parts(
101                canopy_metadata,
102                read_array_like_ptr_at(bytes, &mut offset, (*canopy_metadata).capacity()),
103            )
104        };
105
106        let merkle_tree = ConcurrentMerkleTree {
107            height,
108            canopy_depth,
109            next_index,
110            sequence_number,
111            rightmost_leaf,
112            filled_subtrees,
113            changelog,
114            roots,
115            canopy,
116            _hasher: PhantomData,
117        };
118        merkle_tree.check_size_constraints()?;
119
120        Ok((merkle_tree, offset))
121    }
122
123    pub fn from_bytes_zero_copy(bytes: &'a [u8]) -> Result<Self, ConcurrentMerkleTreeError> {
124        let (merkle_tree, _) = Self::struct_from_bytes_zero_copy(bytes)?;
125        merkle_tree.check_size_constraints()?;
126
127        Ok(Self {
128            merkle_tree: mem::ManuallyDrop::new(merkle_tree),
129            _bytes: bytes,
130        })
131    }
132}
133
134impl<H, const HEIGHT: usize> Deref for ConcurrentMerkleTreeZeroCopy<'_, H, HEIGHT>
135where
136    H: Hasher,
137{
138    type Target = ConcurrentMerkleTree<H, HEIGHT>;
139
140    fn deref(&self) -> &Self::Target {
141        &self.merkle_tree
142    }
143}
144
145impl<H, const HEIGHT: usize> Drop for ConcurrentMerkleTreeZeroCopy<'_, H, HEIGHT>
146where
147    H: Hasher,
148{
149    fn drop(&mut self) {
150        // SAFETY: Don't do anything here! Why?
151        //
152        // * Primitive fields of `ConcurrentMerkleTree` implement `Copy`,
153        //   therefore `drop()` has no effect on them - Rust drops them when
154        //   they go out of scope.
155        // * Don't drop the dynamic fields (`filled_subtrees`, `roots` etc.). In
156        //   `ConcurrentMerkleTreeZeroCopy`, they are backed by buffers provided
157        //   by the caller. These buffers are going to be eventually deallocated.
158        //   Performing an another `drop()` here would result double `free()`
159        //   which would result in aborting the program (either with `SIGABRT`
160        //   or `SIGSEGV`).
161    }
162}
163
164#[derive(Debug)]
165pub struct ConcurrentMerkleTreeZeroCopyMut<'a, H, const HEIGHT: usize>(
166    ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>,
167)
168where
169    H: Hasher;
170
171impl<'a, H, const HEIGHT: usize> ConcurrentMerkleTreeZeroCopyMut<'a, H, HEIGHT>
172where
173    H: Hasher,
174{
175    pub fn from_bytes_zero_copy_mut(
176        bytes: &'a mut [u8],
177    ) -> Result<Self, ConcurrentMerkleTreeError> {
178        Ok(Self(ConcurrentMerkleTreeZeroCopy::from_bytes_zero_copy(
179            bytes,
180        )?))
181    }
182
183    pub fn fill_non_dyn_fields_in_buffer(
184        bytes: &mut [u8],
185        height: usize,
186        canopy_depth: usize,
187        changelog_capacity: usize,
188        roots_capacity: usize,
189    ) -> Result<usize, ConcurrentMerkleTreeError> {
190        let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(
191            height,
192            changelog_capacity,
193            roots_capacity,
194            canopy_depth,
195        );
196        if bytes.len() < expected_size {
197            return Err(ConcurrentMerkleTreeError::BufferSize(
198                expected_size,
199                bytes.len(),
200            ));
201        }
202
203        bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, height)]
204            .copy_from_slice(&height.to_le_bytes());
205        bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, canopy_depth)]
206            .copy_from_slice(&canopy_depth.to_le_bytes());
207
208        let mut offset = offset_of!(ConcurrentMerkleTree<H, HEIGHT>, next_index);
209        // next_index
210        write_at::<usize>(bytes, &0_usize.to_le_bytes(), &mut offset);
211        // sequence_number
212        write_at::<usize>(bytes, &0_usize.to_le_bytes(), &mut offset);
213        // rightmost_leaf
214        write_at::<[u8; 32]>(bytes, &H::zero_bytes()[0], &mut offset);
215        // filled_subtrees (metadata)
216        let filled_subtrees_metadata = BoundedVecMetadata::new(height);
217        write_at::<BoundedVecMetadata>(bytes, &filled_subtrees_metadata.to_le_bytes(), &mut offset);
218        // changelog (metadata)
219        let changelog_metadata = CyclicBoundedVecMetadata::new(changelog_capacity);
220        write_at::<CyclicBoundedVecMetadata>(bytes, &changelog_metadata.to_le_bytes(), &mut offset);
221        // roots (metadata)
222        let roots_metadata = CyclicBoundedVecMetadata::new(roots_capacity);
223        write_at::<CyclicBoundedVecMetadata>(bytes, &roots_metadata.to_le_bytes(), &mut offset);
224        // canopy (metadata)
225        let canopy_size = ConcurrentMerkleTree::<H, HEIGHT>::canopy_size(canopy_depth);
226        let canopy_metadata = BoundedVecMetadata::new(canopy_size);
227        write_at::<BoundedVecMetadata>(bytes, &canopy_metadata.to_le_bytes(), &mut offset);
228
229        Ok(offset)
230    }
231
232    pub fn from_bytes_zero_copy_init(
233        bytes: &'a mut [u8],
234        height: usize,
235        canopy_depth: usize,
236        changelog_capacity: usize,
237        roots_capacity: usize,
238    ) -> Result<Self, ConcurrentMerkleTreeError> {
239        Self::fill_non_dyn_fields_in_buffer(
240            bytes,
241            height,
242            canopy_depth,
243            changelog_capacity,
244            roots_capacity,
245        )?;
246        Self::from_bytes_zero_copy_mut(bytes)
247    }
248}
249
250impl<H, const HEIGHT: usize> Deref for ConcurrentMerkleTreeZeroCopyMut<'_, H, HEIGHT>
251where
252    H: Hasher,
253{
254    type Target = ConcurrentMerkleTree<H, HEIGHT>;
255
256    fn deref(&self) -> &Self::Target {
257        &self.0.merkle_tree
258    }
259}
260impl<H, const HEIGHT: usize> DerefMut for ConcurrentMerkleTreeZeroCopyMut<'_, H, HEIGHT>
261where
262    H: Hasher,
263{
264    fn deref_mut(&mut self) -> &mut Self::Target {
265        &mut self.0.merkle_tree
266    }
267}
268
269#[cfg(test)]
270mod test {
271    use ark_bn254::Fr;
272    use ark_ff::{BigInteger, PrimeField, UniformRand};
273    use light_hasher::Poseidon;
274    use rand::{thread_rng, Rng};
275
276    use super::*;
277
278    fn load_from_bytes<
279        const HEIGHT: usize,
280        const CHANGELOG: usize,
281        const ROOTS: usize,
282        const CANOPY_DEPTH: usize,
283        const OPERATIONS: usize,
284    >() {
285        let mut mt_1 =
286            ConcurrentMerkleTree::<Poseidon, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY_DEPTH)
287                .unwrap();
288        mt_1.init().unwrap();
289
290        // Create a buffer with random bytes - the `*_init` method should
291        // initialize the buffer gracefully and the randomness shouldn't cause
292        // undefined behavior.
293        let mut bytes = vec![
294            0u8;
295            ConcurrentMerkleTree::<Poseidon, HEIGHT>::size_in_account(
296                HEIGHT,
297                CHANGELOG,
298                ROOTS,
299                CANOPY_DEPTH
300            )
301        ];
302        thread_rng().fill(bytes.as_mut_slice());
303
304        // Initialize a Merkle tree on top of a byte slice.
305        {
306            let mut mt =
307                ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_init(
308                    bytes.as_mut_slice(),
309                    HEIGHT,
310                    CANOPY_DEPTH,
311                    CHANGELOG,
312                    ROOTS,
313                )
314                .unwrap();
315            mt.init().unwrap();
316
317            // Ensure that it was properly initialized.
318            assert_eq!(mt.height, HEIGHT);
319            assert_eq!(mt.canopy_depth, CANOPY_DEPTH,);
320            assert_eq!(mt.next_index(), 0);
321            assert_eq!(mt.sequence_number(), 0);
322            assert_eq!(mt.rightmost_leaf(), Poseidon::zero_bytes()[0]);
323
324            assert_eq!(mt.filled_subtrees.capacity(), HEIGHT);
325            assert_eq!(mt.filled_subtrees.len(), HEIGHT);
326
327            assert_eq!(mt.changelog.capacity(), CHANGELOG);
328            assert_eq!(mt.changelog.len(), 1);
329
330            assert_eq!(mt.roots.capacity(), ROOTS);
331            assert_eq!(mt.roots.len(), 1);
332
333            assert_eq!(
334                mt.canopy.capacity(),
335                ConcurrentMerkleTree::<Poseidon, HEIGHT>::canopy_size(CANOPY_DEPTH)
336            );
337
338            assert_eq!(mt.root(), Poseidon::zero_bytes()[HEIGHT]);
339        }
340
341        let mut rng = thread_rng();
342
343        for _ in 0..OPERATIONS {
344            // Reload the tree from bytes on each iteration.
345            let mut mt_2 =
346                ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_mut(
347                    &mut bytes,
348                )
349                .unwrap();
350
351            let leaf: [u8; 32] = Fr::rand(&mut rng)
352                .into_bigint()
353                .to_bytes_be()
354                .try_into()
355                .unwrap();
356            mt_1.append(&leaf).unwrap();
357            mt_2.append(&leaf).unwrap();
358
359            assert_eq!(mt_1, *mt_2);
360        }
361    }
362
363    #[test]
364    fn test_load_from_bytes_22_256_256_0_1024() {
365        load_from_bytes::<22, 256, 256, 0, 1024>()
366    }
367
368    #[test]
369    fn test_load_from_bytes_22_256_256_10_1024() {
370        load_from_bytes::<22, 256, 256, 10, 1024>()
371    }
372}