1use light_bloom_filter::BloomFilter;
2use light_hasher::{Hasher, Poseidon};
3use light_zero_copy::vec::ZeroCopyVecU64;
4use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
5
6use crate::{errors::BatchedMerkleTreeError, BorshDeserialize, BorshSerialize};
7
8#[derive(Clone, Debug, PartialEq, Eq, Copy)]
9#[repr(u64)]
10pub enum BatchState {
11 Fill,
13 Inserted,
15 Full,
17}
18
19impl From<u64> for BatchState {
20 fn from(value: u64) -> Self {
21 match value {
22 0 => BatchState::Fill,
23 1 => BatchState::Inserted,
24 2 => BatchState::Full,
25 _ => panic!("Invalid BatchState value"),
26 }
27 }
28}
29
30impl From<BatchState> for u64 {
31 fn from(val: BatchState) -> Self {
32 val as u64
33 }
34}
35
36#[repr(C)]
45#[derive(
46 Clone,
47 Copy,
48 Debug,
49 PartialEq,
50 Eq,
51 KnownLayout,
52 Immutable,
53 IntoBytes,
54 FromBytes,
55 Default,
56 BorshSerialize,
57 BorshDeserialize,
58)]
59pub struct Batch {
60 num_inserted: u64,
62 state: u64,
63 pub(crate) num_full_zkp_batches: u64,
66 num_inserted_zkp_batches: u64,
68 pub num_iters: u64,
70 pub bloom_filter_capacity: u64,
74 pub batch_size: u64,
76 pub zkp_batch_size: u64,
79 pub sequence_number: u64,
82 pub start_index: u64,
84 pub start_slot: u64,
88 pub root_index: u32,
89 start_slot_is_set: u8,
90 bloom_filter_is_zeroed: u8,
91 _padding: [u8; 2],
92}
93
94impl Batch {
95 pub fn new(
96 num_iters: u64,
97 bloom_filter_capacity: u64,
98 batch_size: u64,
99 zkp_batch_size: u64,
100 start_index: u64,
101 ) -> Self {
102 Batch {
103 num_iters,
104 bloom_filter_capacity,
105 batch_size,
106 num_inserted: 0,
107 state: BatchState::Fill.into(),
108 zkp_batch_size,
109 num_full_zkp_batches: 0,
110 num_inserted_zkp_batches: 0,
111 sequence_number: 0,
112 root_index: 0,
113 start_index,
114 start_slot: 0,
115 start_slot_is_set: 0,
116 bloom_filter_is_zeroed: 0,
117 _padding: [0u8; 2],
118 }
119 }
120
121 pub fn get_state(&self) -> BatchState {
123 self.state.into()
124 }
125
126 pub fn bloom_filter_is_zeroed(&self) -> bool {
127 self.bloom_filter_is_zeroed == 1
128 }
129
130 pub fn set_bloom_filter_to_zeroed(&mut self) {
131 self.bloom_filter_is_zeroed = 1;
134 }
135
136 pub fn set_bloom_filter_to_not_zeroed(&mut self) {
137 self.bloom_filter_is_zeroed = 0;
140 }
141
142 pub fn start_slot_is_set(&self) -> bool {
143 self.start_slot_is_set == 1
144 }
145
146 pub fn set_start_slot(&mut self, start_slot: &u64) {
147 if !self.start_slot_is_set() {
148 self.start_slot = *start_slot;
149 self.start_slot_is_set = 1;
150 }
151 }
152
153 pub fn advance_state_to_fill(
156 &mut self,
157 start_index: Option<u64>,
158 ) -> Result<(), BatchedMerkleTreeError> {
159 if self.get_state() == BatchState::Inserted {
160 self.state = BatchState::Fill.into();
161 self.set_bloom_filter_to_not_zeroed();
162 self.sequence_number = 0;
163 self.root_index = 0;
164 self.num_inserted_zkp_batches = 0;
165 self.start_slot_is_set = 0;
166 self.start_slot = 0;
167 if let Some(start_index) = start_index {
168 self.start_index = start_index;
169 }
170 self.num_full_zkp_batches = 0;
171 } else {
172 #[cfg(feature = "solana")]
173 solana_msg::msg!(
174 "Batch is in incorrect state {} expected BatchState::Inserted 1",
175 self.state
176 );
177 return Err(BatchedMerkleTreeError::BatchNotReady);
178 }
179 Ok(())
180 }
181
182 pub fn advance_state_to_inserted(&mut self) -> Result<(), BatchedMerkleTreeError> {
185 if self.get_state() == BatchState::Full {
186 self.state = BatchState::Inserted.into();
187 } else {
188 #[cfg(feature = "solana")]
189 solana_msg::msg!(
190 "Batch is in incorrect state {} expected BatchState::Full 2",
191 self.state
192 );
193 return Err(BatchedMerkleTreeError::BatchNotReady);
194 }
195 Ok(())
196 }
197
198 pub fn advance_state_to_full(&mut self) -> Result<(), BatchedMerkleTreeError> {
201 if self.get_state() == BatchState::Fill {
202 self.state = BatchState::Full.into();
203 } else {
204 #[cfg(feature = "solana")]
205 solana_msg::msg!(
206 "Batch is in incorrect state {} expected BatchState::Fill 0",
207 self.state
208 );
209 return Err(BatchedMerkleTreeError::BatchNotReady);
210 }
211 Ok(())
212 }
213
214 pub fn get_first_ready_zkp_batch(&self) -> Result<u64, BatchedMerkleTreeError> {
215 if self.get_state() == BatchState::Inserted {
216 Err(BatchedMerkleTreeError::BatchAlreadyInserted)
217 } else if self.batch_is_ready_to_insert() {
218 Ok(self.num_inserted_zkp_batches)
219 } else {
220 Err(BatchedMerkleTreeError::BatchNotReady)
221 }
222 }
223
224 pub fn batch_is_ready_to_insert(&self) -> bool {
225 self.num_full_zkp_batches > self.num_inserted_zkp_batches
226 }
227
228 pub fn get_num_ready_zkp_updates(&self) -> u64 {
231 self.num_full_zkp_batches
232 .saturating_sub(self.num_inserted_zkp_batches)
233 }
234
235 pub fn get_num_inserted_zkp_batch(&self) -> u64 {
238 self.num_inserted
239 }
240
241 pub fn get_current_zkp_batch_index(&self) -> u64 {
244 self.num_full_zkp_batches
245 }
246
247 pub fn get_num_inserted_zkps(&self) -> u64 {
249 self.num_inserted_zkp_batches
250 }
251
252 pub fn get_num_elements_inserted_into_tree(&self) -> u64 {
254 self.num_inserted_zkp_batches * self.zkp_batch_size
255 }
256
257 pub fn get_num_inserted_elements(&self) -> u64 {
259 self.num_full_zkp_batches * self.zkp_batch_size + self.num_inserted
260 }
261
262 pub fn get_num_zkp_batches(&self) -> u64 {
264 self.batch_size / self.zkp_batch_size
265 }
266
267 pub fn get_num_hash_chain_store(&self) -> usize {
269 self.get_num_zkp_batches() as usize
270 }
271
272 pub fn get_value_index_in_batch(&self, leaf_index: u64) -> Result<u64, BatchedMerkleTreeError> {
275 self.check_leaf_index_exists(leaf_index)?;
276 let index = leaf_index
277 .checked_sub(self.start_index)
278 .ok_or(BatchedMerkleTreeError::LeafIndexNotInBatch)?;
279 Ok(index)
280 }
281
282 pub fn store_and_hash_value(
285 &mut self,
286 value: &[u8; 32],
287 value_store: &mut ZeroCopyVecU64<[u8; 32]>,
288 hash_chain_store: &mut ZeroCopyVecU64<[u8; 32]>,
289 start_slot: &u64,
290 ) -> Result<(), BatchedMerkleTreeError> {
291 self.set_start_slot(start_slot);
292 self.add_to_hash_chain(value, hash_chain_store)?;
293 value_store.push(*value)?;
294 Ok(())
295 }
296
297 pub fn insert(
305 &mut self,
306 bloom_filter_value: &[u8; 32],
307 hash_chain_value: &[u8; 32],
308 bloom_filter_stores: &mut [&mut [u8]],
309 hash_chain_store: &mut ZeroCopyVecU64<[u8; 32]>,
310 bloom_filter_index: usize,
311 start_slot: &u64,
312 ) -> Result<(), BatchedMerkleTreeError> {
313 self.set_start_slot(start_slot);
315 self.add_to_hash_chain(hash_chain_value, hash_chain_store)?;
317 {
319 let other_bloom_filter_index = if bloom_filter_index == 0 { 1 } else { 0 };
320
321 BloomFilter::new(
323 self.num_iters as usize,
324 self.bloom_filter_capacity,
325 bloom_filter_stores[bloom_filter_index],
326 )?
327 .insert(bloom_filter_value)?;
328 Self::check_non_inclusion(
330 self.num_iters as usize,
331 self.bloom_filter_capacity,
332 bloom_filter_value,
333 bloom_filter_stores[other_bloom_filter_index],
334 )?;
335 }
336 Ok(())
337 }
338
339 pub fn add_to_hash_chain(
346 &mut self,
347 value: &[u8; 32],
348 hash_chain_store: &mut ZeroCopyVecU64<[u8; 32]>,
349 ) -> Result<(), BatchedMerkleTreeError> {
350 if self.get_state() != BatchState::Fill {
352 return Err(BatchedMerkleTreeError::BatchNotReady);
353 }
354 let start_new_hash_chain = self.num_inserted == 0;
355 if start_new_hash_chain {
356 hash_chain_store.push(*value)?;
358 } else if let Some(last_hash_chain) = hash_chain_store.last_mut() {
359 let hash_chain = Poseidon::hashv(&[last_hash_chain, value.as_slice()])?;
361 *last_hash_chain = hash_chain;
362 } else {
363 unreachable!();
364 }
365 self.num_inserted += 1;
366
367 let zkp_batch_is_full = self.num_inserted == self.zkp_batch_size;
369 if zkp_batch_is_full {
370 self.num_full_zkp_batches += 1;
371 self.num_inserted = 0;
374
375 let batch_is_full = self.num_full_zkp_batches == self.get_num_zkp_batches();
377 if batch_is_full {
378 self.advance_state_to_full()?;
379 }
380 }
381
382 Ok(())
383 }
384
385 pub fn check_non_inclusion(
387 num_iters: usize,
388 bloom_filter_capacity: u64,
389 value: &[u8; 32],
390 store: &mut [u8],
391 ) -> Result<(), BatchedMerkleTreeError> {
392 let mut bloom_filter = BloomFilter::new(num_iters, bloom_filter_capacity, store)?;
393 if bloom_filter.contains(value) {
394 return Err(BatchedMerkleTreeError::NonInclusionCheckFailed);
395 }
396 Ok(())
397 }
398
399 pub fn mark_as_inserted_in_merkle_tree(
405 &mut self,
406 sequence_number: u64,
407 root_index: u32,
408 root_history_length: u32,
409 ) -> Result<BatchState, BatchedMerkleTreeError> {
410 self.get_first_ready_zkp_batch()?;
412
413 let num_zkp_batches = self.get_num_zkp_batches();
414
415 self.num_inserted_zkp_batches += 1;
417 let batch_is_completely_inserted = self.num_inserted_zkp_batches == num_zkp_batches;
419 if batch_is_completely_inserted {
420 self.advance_state_to_inserted()?;
421 self.sequence_number = sequence_number + root_history_length as u64;
425 self.root_index = root_index;
426 }
427
428 Ok(self.get_state())
429 }
430
431 pub fn check_leaf_index_exists(&self, leaf_index: u64) -> Result<(), BatchedMerkleTreeError> {
432 if !self.leaf_index_exists(leaf_index) {
433 return Err(BatchedMerkleTreeError::LeafIndexNotInBatch);
434 }
435 Ok(())
436 }
437
438 pub fn leaf_index_exists(&self, leaf_index: u64) -> bool {
443 let next_batch_leaf_index = self.get_num_inserted_elements() + self.start_index;
444 let min_batch_leaf_index = self.start_index;
445 leaf_index < next_batch_leaf_index && leaf_index >= min_batch_leaf_index
446 }
447}
448
449#[cfg(test)]
450mod tests {
451
452 use light_compressed_account::{pubkey::Pubkey, QueueType};
453 use light_merkle_tree_metadata::queue::QueueMetadata;
454
455 use super::*;
456 use crate::queue::BatchedQueueAccount;
457
458 fn get_test_batch() -> Batch {
459 Batch::new(3, 160_000, 500, 100, 0)
460 }
461
462 fn test_mark_as_inserted(mut batch: Batch) {
464 let mut sequence_number = 10;
465 let mut root_index = 20;
466 let root_history_length = 23;
467 let current_slot = 1;
468 for i in 0..batch.get_num_zkp_batches() {
469 sequence_number += i;
470 root_index += i as u32;
471 batch
472 .mark_as_inserted_in_merkle_tree(sequence_number, root_index, root_history_length)
473 .unwrap();
474 if i != batch.get_num_zkp_batches() - 1 {
475 assert_eq!(batch.get_state(), BatchState::Full);
476 assert_eq!(batch.get_num_inserted_zkp_batch(), 0);
477 assert_eq!(batch.get_current_zkp_batch_index(), 5);
478 assert_eq!(batch.get_num_inserted_zkps(), i + 1);
479 } else {
480 assert_eq!(batch.get_state(), BatchState::Inserted);
481 assert_eq!(batch.get_num_inserted_zkp_batch(), 0);
482 assert_eq!(batch.get_current_zkp_batch_index(), 5);
483 assert_eq!(batch.get_num_inserted_zkps(), i + 1);
484 }
485 }
486 assert_eq!(batch.get_state(), BatchState::Inserted);
487 assert_eq!(batch.get_num_inserted_zkp_batch(), 0);
488 let mut ref_batch = get_test_batch();
489 ref_batch.state = BatchState::Inserted.into();
490 ref_batch.root_index = root_index;
491 ref_batch.sequence_number = sequence_number + root_history_length as u64;
492 ref_batch.num_inserted_zkp_batches = 5;
493 ref_batch.start_slot = current_slot;
494 ref_batch.start_slot_is_set = 1;
495 ref_batch.num_full_zkp_batches = 5;
496 assert_eq!(batch, ref_batch);
497 batch.advance_state_to_fill(Some(1)).unwrap();
498 let mut ref_batch = get_test_batch();
499 ref_batch.start_index = 1;
500 assert_eq!(batch, ref_batch);
501 }
502
503 #[test]
504 fn test_store_value() {
505 let mut batch = get_test_batch();
506 let current_slot = 1;
507
508 let mut value_store_bytes =
509 vec![0u8; ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(batch.batch_size)];
510 let mut value_store =
511 ZeroCopyVecU64::new(batch.batch_size, &mut value_store_bytes).unwrap();
512 let mut hash_chain_store_bytes = vec![
513 0u8;
514 ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(
515 batch.get_num_hash_chain_store() as u64
516 )
517 ];
518 let mut hash_chain_store = ZeroCopyVecU64::new(
519 batch.get_num_hash_chain_store() as u64,
520 hash_chain_store_bytes.as_mut_slice(),
521 )
522 .unwrap();
523
524 let mut ref_batch = get_test_batch();
525 for i in 0..batch.batch_size {
526 if i == 0 {
527 ref_batch.start_slot = current_slot;
528 ref_batch.start_slot_is_set = 1;
529 }
530 ref_batch.num_inserted %= ref_batch.zkp_batch_size;
531
532 let mut value = [0u8; 32];
533 value[24..].copy_from_slice(&i.to_be_bytes());
534 assert!(batch
535 .store_and_hash_value(
536 &value,
537 &mut value_store,
538 &mut hash_chain_store,
539 ¤t_slot
540 )
541 .is_ok());
542 ref_batch.num_inserted += 1;
543 if ref_batch.num_inserted == ref_batch.zkp_batch_size {
544 ref_batch.num_full_zkp_batches += 1;
545 ref_batch.num_inserted = 0;
546 }
547 if ref_batch.num_full_zkp_batches == ref_batch.get_num_zkp_batches() {
548 ref_batch.state = BatchState::Full.into();
549 ref_batch.num_inserted = 0;
550 }
551 assert_eq!(batch, ref_batch);
552 assert_eq!(*value_store.get(i as usize).unwrap(), value);
553 }
554 let result = batch.store_and_hash_value(
555 &[1u8; 32],
556 &mut value_store,
557 &mut hash_chain_store,
558 ¤t_slot,
559 );
560 assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady);
561 assert_eq!(batch.get_state(), BatchState::Full);
562 assert_eq!(batch.get_num_inserted_zkp_batch(), 0);
563 assert_eq!(batch.get_current_zkp_batch_index(), 5);
564 assert_eq!(batch.get_num_zkp_batches(), 5);
565 assert_eq!(batch.get_num_inserted_zkps(), 0);
566
567 test_mark_as_inserted(batch);
568 }
569
570 #[test]
571 fn test_insert() {
572 let mut batch = get_test_batch();
574 let mut current_slot = 1;
575 let mut stores = vec![vec![0u8; 20_000]; 2];
576 let mut bloom_filter_stores = stores
577 .iter_mut()
578 .map(|store| &mut store[..])
579 .collect::<Vec<_>>();
580 let mut hash_chain_store_bytes = vec![
581 0u8;
582 ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(
583 batch.get_num_hash_chain_store() as u64
584 )
585 ];
586 ZeroCopyVecU64::<[u8; 32]>::new(
587 batch.get_num_hash_chain_store() as u64,
588 hash_chain_store_bytes.as_mut_slice(),
589 )
590 .unwrap();
591
592 let mut ref_batch = get_test_batch();
593 for processing_index in 0..=1 {
594 for i in 0..(batch.batch_size / 2) {
595 let i = i + (batch.batch_size / 2) * (processing_index as u64);
596 if i == 0 && processing_index == 0 {
597 assert_eq!(batch.start_slot, 0);
598 assert_eq!(batch.start_slot_is_set, 0);
599 ref_batch.start_slot = current_slot;
600 ref_batch.start_slot_is_set = 1;
601 } else {
602 assert_eq!(batch.start_slot, 1);
603 assert_eq!(batch.start_slot_is_set, 1);
604 }
605
606 ref_batch.num_inserted %= ref_batch.zkp_batch_size;
607 let mut hash_chain_store =
608 ZeroCopyVecU64::<[u8; 32]>::from_bytes(hash_chain_store_bytes.as_mut_slice())
609 .unwrap();
610
611 let mut value = [0u8; 32];
612 value[24..].copy_from_slice(&i.to_be_bytes());
613 #[allow(clippy::manual_is_multiple_of)]
614 let ref_hash_chain = if i % batch.zkp_batch_size == 0 {
615 value
616 } else {
617 Poseidon::hashv(&[hash_chain_store.last().unwrap(), &value]).unwrap()
618 };
619 let result = batch.insert(
620 &value,
621 &value,
622 bloom_filter_stores.as_mut_slice(),
623 &mut hash_chain_store,
624 processing_index,
625 ¤t_slot,
626 );
627 assert!(result.is_ok(), "Failed result: {:?}", result);
629 assert_eq!(*hash_chain_store.last().unwrap(), ref_hash_chain);
630
631 {
632 let mut cloned_hash_chain_store = hash_chain_store_bytes.clone();
633 let mut hash_chain_store = ZeroCopyVecU64::<[u8; 32]>::from_bytes(
634 cloned_hash_chain_store.as_mut_slice(),
635 )
636 .unwrap();
637 let mut batch = batch;
638 assert!(batch
640 .insert(
641 &value,
642 &value,
643 bloom_filter_stores.as_mut_slice(),
644 &mut hash_chain_store,
645 processing_index,
646 ¤t_slot
647 )
648 .is_err());
649 }
650 let mut bloom_filter = BloomFilter {
651 num_iters: batch.num_iters as usize,
652 capacity: batch.bloom_filter_capacity,
653 store: bloom_filter_stores[processing_index],
654 };
655 assert!(bloom_filter.contains(&value));
656 let other_index = if processing_index == 0 { 1 } else { 0 };
657 Batch::check_non_inclusion(
658 batch.num_iters as usize,
659 batch.bloom_filter_capacity,
660 &value,
661 bloom_filter_stores[other_index],
662 )
663 .unwrap();
664 Batch::check_non_inclusion(
665 batch.num_iters as usize,
666 batch.bloom_filter_capacity,
667 &value,
668 bloom_filter_stores[processing_index],
669 )
670 .unwrap_err();
671
672 ref_batch.num_inserted += 1;
673 if ref_batch.num_inserted == ref_batch.zkp_batch_size {
674 ref_batch.num_full_zkp_batches += 1;
675 ref_batch.num_inserted = 0;
676 }
677 if i == batch.batch_size - 1 {
678 ref_batch.state = BatchState::Full.into();
679 ref_batch.num_inserted = 0;
680 }
681 assert_eq!(batch, ref_batch);
682 current_slot += 1;
683 }
684 }
685 test_mark_as_inserted(batch);
686 }
687
688 #[test]
689 fn test_add_to_hash_chain() {
690 let mut batch = get_test_batch();
691 let mut hash_chain_store_bytes = vec![
692 0u8;
693 ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(
694 batch.get_num_hash_chain_store() as u64
695 )
696 ];
697 let mut hash_chain_store = ZeroCopyVecU64::<[u8; 32]>::new(
698 batch.get_num_hash_chain_store() as u64,
699 hash_chain_store_bytes.as_mut_slice(),
700 )
701 .unwrap();
702 let value = [1u8; 32];
703
704 assert!(batch
705 .add_to_hash_chain(&value, &mut hash_chain_store)
706 .is_ok());
707 let mut ref_batch = get_test_batch();
708 let user_hash_chain = value;
709 ref_batch.num_inserted = 1;
710 assert_eq!(batch, ref_batch);
711 assert_eq!(hash_chain_store[0], user_hash_chain);
712 let value = [2u8; 32];
713 let ref_hash_chain = Poseidon::hashv(&[&user_hash_chain, &value]).unwrap();
714 assert!(batch
715 .add_to_hash_chain(&value, &mut hash_chain_store)
716 .is_ok());
717
718 ref_batch.num_inserted = 2;
719 assert_eq!(batch, ref_batch);
720 assert_eq!(hash_chain_store[0], ref_hash_chain);
721 }
722
723 #[test]
724 fn test_check_non_inclusion() {
725 let mut current_slot = 1;
726 for processing_index in 0..=1 {
727 let mut batch = get_test_batch();
728
729 let value = [1u8; 32];
730 let mut stores = vec![vec![0u8; 20_000]; 2];
731 let mut bloom_filter_stores = stores
732 .iter_mut()
733 .map(|store| &mut store[..])
734 .collect::<Vec<_>>();
735 let mut hash_chain_store_bytes = vec![
736 0u8;
737 ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(
738 batch.get_num_hash_chain_store() as u64
739 )
740 ];
741 let mut hash_chain_store = ZeroCopyVecU64::<[u8; 32]>::new(
742 batch.get_num_hash_chain_store() as u64,
743 hash_chain_store_bytes.as_mut_slice(),
744 )
745 .unwrap();
746
747 assert_eq!(
748 Batch::check_non_inclusion(
749 batch.num_iters as usize,
750 batch.bloom_filter_capacity,
751 &value,
752 bloom_filter_stores[processing_index]
753 ),
754 Ok(())
755 );
756 let ref_batch = get_test_batch();
757 assert_eq!(batch, ref_batch);
758 batch
759 .insert(
760 &value,
761 &value,
762 bloom_filter_stores.as_mut_slice(),
763 &mut hash_chain_store,
764 processing_index,
765 ¤t_slot,
766 )
767 .unwrap();
768 current_slot += 1;
769 assert!(Batch::check_non_inclusion(
770 batch.num_iters as usize,
771 batch.bloom_filter_capacity,
772 &value,
773 bloom_filter_stores[processing_index]
774 )
775 .is_err());
776
777 let other_index = if processing_index == 0 { 1 } else { 0 };
778 assert!(Batch::check_non_inclusion(
779 batch.num_iters as usize,
780 batch.bloom_filter_capacity,
781 &value,
782 bloom_filter_stores[other_index]
783 )
784 .is_ok());
785 }
786 }
787
788 #[test]
789 fn test_getters() {
790 let mut batch = get_test_batch();
791 assert_eq!(batch.get_num_zkp_batches(), 5);
792 assert_eq!(batch.get_num_hash_chain_store(), 5);
793 assert_eq!(batch.get_state(), BatchState::Fill);
794 assert_eq!(batch.get_num_inserted_zkp_batch(), 0);
795 assert_eq!(batch.get_current_zkp_batch_index(), 0);
796 assert_eq!(batch.get_num_inserted_zkps(), 0);
797 batch.advance_state_to_full().unwrap();
798 assert_eq!(batch.get_state(), BatchState::Full);
799 batch.advance_state_to_inserted().unwrap();
800 assert_eq!(batch.get_state(), BatchState::Inserted);
801 }
802
803 #[test]
809 fn test_value_is_inserted_in_batch() {
810 let mut batch = get_test_batch();
811 batch.advance_state_to_full().unwrap();
812 batch.advance_state_to_inserted().unwrap();
813 batch.start_index = 1;
814 batch.num_inserted = 5;
815 let lowest_eligible_value = batch.start_index;
816 let highest_eligible_value = batch.start_index + batch.get_num_inserted_elements() - 1;
817 assert!(!batch.leaf_index_exists(lowest_eligible_value - 1));
819 assert!(batch.leaf_index_exists(lowest_eligible_value));
821 assert!(batch.leaf_index_exists(highest_eligible_value));
823 assert!(!batch.leaf_index_exists(highest_eligible_value + 1));
825 }
826
827 #[test]
831 fn test_can_insert_batch() {
832 let mut batch = get_test_batch();
833 let mut current_slot = 1;
834 assert_eq!(
835 batch.get_first_ready_zkp_batch(),
836 Err(BatchedMerkleTreeError::BatchNotReady)
837 );
838 let mut value_store_bytes =
839 vec![0u8; ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(batch.batch_size)];
840 let mut value_store =
841 ZeroCopyVecU64::<[u8; 32]>::new(batch.batch_size, &mut value_store_bytes).unwrap();
842 let mut hash_chain_store_bytes = vec![
843 0u8;
844 ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(
845 batch.get_num_hash_chain_store() as u64
846 )
847 ];
848 let mut hash_chain_store = ZeroCopyVecU64::<[u8; 32]>::new(
849 batch.get_num_hash_chain_store() as u64,
850 hash_chain_store_bytes.as_mut_slice(),
851 )
852 .unwrap();
853
854 for i in 0..batch.batch_size + 10 {
855 let mut value = [0u8; 32];
856 value[24..].copy_from_slice(&i.to_be_bytes());
857 if i < batch.batch_size {
858 batch
859 .store_and_hash_value(
860 &value,
861 &mut value_store,
862 &mut hash_chain_store,
863 ¤t_slot,
864 )
865 .unwrap();
866 }
867 #[allow(clippy::manual_is_multiple_of)]
868 if (i + 1) % batch.zkp_batch_size == 0 && i != 0 {
869 assert_eq!(
870 batch.get_first_ready_zkp_batch().unwrap(),
871 i / batch.zkp_batch_size
872 );
873 batch.mark_as_inserted_in_merkle_tree(0, 0, 0).unwrap();
874 } else if i >= batch.batch_size {
875 assert_eq!(
876 batch.get_first_ready_zkp_batch(),
877 Err(BatchedMerkleTreeError::BatchAlreadyInserted)
878 );
879 } else {
880 assert_eq!(
881 batch.get_first_ready_zkp_batch(),
882 Err(BatchedMerkleTreeError::BatchNotReady)
883 );
884 }
885 current_slot += 1;
886 }
887 }
888
889 #[test]
890 fn test_get_state() {
891 let mut batch = get_test_batch();
892 assert_eq!(batch.get_state(), BatchState::Fill);
893 {
894 let result = batch.advance_state_to_inserted();
895 assert_eq!(result, Err(BatchedMerkleTreeError::BatchNotReady));
896 let result = batch.advance_state_to_fill(None);
897 assert_eq!(result, Err(BatchedMerkleTreeError::BatchNotReady));
898 }
899 batch.advance_state_to_full().unwrap();
900 assert_eq!(batch.get_state(), BatchState::Full);
901 {
902 let result = batch.advance_state_to_full();
903 assert_eq!(result, Err(BatchedMerkleTreeError::BatchNotReady));
904 let result = batch.advance_state_to_fill(None);
905 assert_eq!(result, Err(BatchedMerkleTreeError::BatchNotReady));
906 }
907 batch.advance_state_to_inserted().unwrap();
908 assert_eq!(batch.get_state(), BatchState::Inserted);
909 }
910
911 #[test]
912 fn test_bloom_filter_is_zeroed() {
913 let mut batch = get_test_batch();
914 assert!(!batch.bloom_filter_is_zeroed());
915 batch.set_bloom_filter_to_zeroed();
916 assert!(batch.bloom_filter_is_zeroed());
917 batch.set_bloom_filter_to_not_zeroed();
918 assert!(!batch.bloom_filter_is_zeroed());
919 }
920
921 #[test]
922 fn test_num_ready_zkp_updates() {
923 let mut batch = get_test_batch();
924 assert_eq!(batch.get_num_ready_zkp_updates(), 0);
925 batch.num_full_zkp_batches = 1;
926 assert_eq!(batch.get_num_ready_zkp_updates(), 1);
927 batch.num_inserted_zkp_batches = 1;
928 assert_eq!(batch.get_num_ready_zkp_updates(), 0);
929 batch.num_full_zkp_batches = 2;
930 assert_eq!(batch.get_num_ready_zkp_updates(), 1);
931 }
932
933 #[test]
934 fn test_get_num_inserted_elements() {
935 let mut batch = get_test_batch();
936 assert_eq!(batch.get_num_inserted_elements(), 0);
937 let mut hash_chain_bytes = vec![0u8; 32 * batch.batch_size as usize];
938 let mut hash_chain_store = ZeroCopyVecU64::<[u8; 32]>::new(
939 batch.get_num_zkp_batches(),
940 hash_chain_bytes.as_mut_slice(),
941 )
942 .unwrap();
943
944 for i in 0..batch.batch_size {
945 let mut value = [0u8; 32];
946 value[24..].copy_from_slice(&i.to_be_bytes());
947 batch
948 .add_to_hash_chain(&value, &mut hash_chain_store)
949 .unwrap();
950 assert_eq!(batch.get_num_inserted_elements(), i + 1);
951 }
952 }
953
954 #[test]
955 fn test_get_num_elements_inserted_into_tree() {
956 let mut batch = get_test_batch();
957 assert_eq!(batch.get_num_elements_inserted_into_tree(), 0);
958 for i in 0..batch.get_num_zkp_batches() {
959 #[allow(clippy::manual_is_multiple_of)]
960 if i % batch.zkp_batch_size == 0 {
961 batch.num_full_zkp_batches += 1;
962 batch
963 .mark_as_inserted_in_merkle_tree(i, i as u32, 0)
964 .unwrap();
965 assert_eq!(
966 batch.get_num_elements_inserted_into_tree(),
967 (i + 1) * batch.zkp_batch_size
968 );
969 }
970 }
971 }
972
973 #[test]
976 fn test_get_num_inserted() {
977 let mut account_data = vec![0u8; 1000];
978 let mut queue_metadata = QueueMetadata::default();
979 let associated_merkle_tree = Pubkey::new_unique();
980 queue_metadata.associated_merkle_tree = associated_merkle_tree;
981 queue_metadata.queue_type = QueueType::OutputStateV2 as u64;
982 let batch_size = 4;
983 let zkp_batch_size = 2;
984 let bloom_filter_capacity = 0;
985 let num_iters = 0;
986 let mut current_slot = 1;
987 let mut account = BatchedQueueAccount::init(
988 &mut account_data,
989 queue_metadata,
990 batch_size,
991 zkp_batch_size,
992 num_iters,
993 bloom_filter_capacity,
994 Pubkey::new_unique(),
995 16, )
997 .unwrap();
998 assert_eq!(account.get_num_inserted_in_current_batch(), 0);
999 for i in 1..=batch_size {
1001 account
1002 .insert_into_current_batch(&[1u8; 32], ¤t_slot)
1003 .unwrap();
1004 if i == batch_size {
1005 assert_eq!(account.get_num_inserted_in_current_batch(), 0);
1007 assert_eq!(
1008 account.batch_metadata.batches[0].get_num_inserted_elements(),
1009 i
1010 );
1011 } else {
1012 assert_eq!(account.get_num_inserted_in_current_batch(), i);
1013 }
1014 current_slot += 1;
1015 }
1016 println!("full batch 0 {:?}", account.batch_metadata.batches[0]);
1017
1018 for i in 1..=batch_size {
1020 account
1021 .insert_into_current_batch(&[2u8; 32], ¤t_slot)
1022 .unwrap();
1023 if i == batch_size {
1024 assert_eq!(account.get_num_inserted_in_current_batch(), 4);
1026 assert_eq!(
1027 account.batch_metadata.batches[1].get_num_inserted_elements(),
1028 i
1029 );
1030 } else {
1031 assert_eq!(account.get_num_inserted_in_current_batch(), i);
1032 }
1033 current_slot += 1;
1034 }
1035 println!("account {:?}", account.batch_metadata);
1036 println!("account {:?}", account.batch_metadata.batches[0]);
1037 println!("account {:?}", account.batch_metadata.batches[1]);
1038 assert_eq!(account.get_num_inserted_in_current_batch(), batch_size);
1039 assert_eq!(
1040 account.insert_into_current_batch(&[1u8; 32], ¤t_slot),
1041 Err(BatchedMerkleTreeError::BatchNotReady)
1042 );
1043 let ref_value_array = vec![[1u8; 32]; 4];
1044 assert_eq!(account.value_vecs[0].as_slice(), ref_value_array.as_slice());
1045 let ref_value_array = vec![[2u8; 32]; 4];
1046 assert_eq!(account.value_vecs[1].as_slice(), ref_value_array.as_slice());
1047 assert_eq!(account.batch_metadata.get_current_batch().start_index, 0);
1048 {
1049 let batch_0 = account.batch_metadata.batches[0];
1050 let mut expected_batch = Batch::new(
1051 num_iters,
1052 bloom_filter_capacity,
1053 batch_size,
1054 zkp_batch_size,
1055 0,
1056 );
1057 expected_batch.num_full_zkp_batches = 2;
1058 expected_batch.start_slot = 1;
1059 expected_batch.start_slot_is_set = 1;
1060 expected_batch.advance_state_to_full().unwrap();
1061 assert_eq!(batch_0, expected_batch);
1062 }
1063 {
1064 let batch_1 = account.batch_metadata.batches[1];
1065 let mut expected_batch = Batch::new(
1066 num_iters,
1067 bloom_filter_capacity,
1068 batch_size,
1069 zkp_batch_size,
1070 batch_size,
1071 );
1072 expected_batch.num_full_zkp_batches = 2;
1073 expected_batch.start_slot = 1 + batch_size;
1074 expected_batch.start_slot_is_set = 1;
1075 expected_batch.advance_state_to_full().unwrap();
1076 assert_eq!(batch_1, expected_batch);
1077 }
1078 {
1080 account.batch_metadata.batches[0]
1081 .advance_state_to_inserted()
1082 .unwrap();
1083 }
1084 {
1086 assert_eq!(
1087 account.batch_metadata.get_current_batch().get_state(),
1088 BatchState::Inserted
1089 );
1090 account
1091 .insert_into_current_batch(&[1u8; 32], ¤t_slot)
1092 .unwrap();
1093 assert_eq!(account.value_vecs[0].as_slice(), [[1u8; 32]].as_slice());
1094 assert_eq!(account.value_vecs[1].as_slice(), ref_value_array.as_slice());
1095 assert_eq!(
1096 account.hash_chain_stores[0].as_slice(),
1097 [[1u8; 32]].as_slice()
1098 );
1099 assert_eq!(
1100 account.batch_metadata.get_current_batch().get_state(),
1101 BatchState::Fill
1102 );
1103 let mut expected_batch = Batch::new(
1104 num_iters,
1105 bloom_filter_capacity,
1106 batch_size,
1107 zkp_batch_size,
1108 batch_size * 2,
1109 );
1110
1111 assert_ne!(*account.batch_metadata.get_current_batch(), expected_batch);
1112 expected_batch.num_inserted = 1;
1113 expected_batch.start_slot_is_set = 1;
1114 expected_batch.start_slot = current_slot;
1115 assert_eq!(*account.batch_metadata.get_current_batch(), expected_batch);
1116
1117 assert_eq!(account.batch_metadata.get_current_batch().start_index, 8);
1118 }
1119 {
1121 let expected_start_slot = current_slot;
1122 for i in 1..batch_size {
1123 assert_eq!(account.get_num_inserted_in_current_batch(), i);
1124 account
1125 .insert_into_current_batch(&[1u8; 32], ¤t_slot)
1126 .unwrap();
1127 current_slot += 1;
1128 }
1129 assert_eq!(account.get_num_inserted_in_current_batch(), batch_size);
1130 let mut expected_batch = Batch::new(
1131 num_iters,
1132 bloom_filter_capacity,
1133 batch_size,
1134 zkp_batch_size,
1135 batch_size * 2,
1136 );
1137
1138 expected_batch.num_full_zkp_batches = 2;
1139 expected_batch.advance_state_to_full().unwrap();
1140 expected_batch.start_slot = expected_start_slot;
1141 expected_batch.start_slot_is_set = 1;
1142 assert_eq!(account.batch_metadata.batches[0], expected_batch);
1143 assert_ne!(*account.batch_metadata.get_current_batch(), expected_batch);
1144 assert_eq!(
1145 *account.batch_metadata.get_current_batch(),
1146 account.batch_metadata.batches[1]
1147 );
1148 }
1149 assert_eq!(account.batch_metadata.next_index, 12);
1150 account
1152 .batch_metadata
1153 .get_current_batch_mut()
1154 .advance_state_to_inserted()
1155 .unwrap();
1156
1157 {
1158 let expected_start_slot = current_slot;
1159 for _ in 0..batch_size {
1160 assert!(!account.tree_is_full());
1161 assert!(account.check_tree_is_full().is_ok());
1162 account
1163 .insert_into_current_batch(&[1u8; 32], ¤t_slot)
1164 .unwrap();
1165 current_slot += 1;
1166 }
1167 assert_eq!(account.get_num_inserted_in_current_batch(), batch_size);
1168 let mut expected_batch = Batch::new(
1169 num_iters,
1170 bloom_filter_capacity,
1171 batch_size,
1172 zkp_batch_size,
1173 batch_size * 3,
1174 );
1175 expected_batch.num_full_zkp_batches = 2;
1176 expected_batch.start_slot = expected_start_slot;
1177 expected_batch.start_slot_is_set = 1;
1178 expected_batch.advance_state_to_full().unwrap();
1179 assert_eq!(account.batch_metadata.batches[1], expected_batch);
1180 }
1181 assert_eq!(account.batch_metadata.next_index, 16);
1182 assert!(account.tree_is_full());
1183 assert_eq!(
1184 account.check_tree_is_full(),
1185 Err(BatchedMerkleTreeError::TreeIsFull)
1186 );
1187 }
1188}