1use std::{fmt::Debug, time::Duration};
2
3use account_compression::{
4 AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig,
5};
6use async_trait::async_trait;
7use borsh::BorshDeserialize;
8use light_batched_merkle_tree::{
9 constants::{
10 DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_ROOT_HISTORY_LEN,
11 DEFAULT_BATCH_STATE_TREE_HEIGHT,
12 },
13 merkle_tree::BatchedMerkleTreeAccount,
14};
15use light_client::{
16 fee::FeeConfig,
17 indexer::{
18 AccountProofInputs, Address, AddressMerkleTreeAccounts, AddressProofInputs,
19 AddressWithTree, BatchAddressUpdateIndexerResponse, CompressedAccount, Context,
20 GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions,
21 Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof,
22 MerkleProofWithContext, NewAddressProofWithContext, OwnerBalance, PaginatedOptions,
23 Response, RetryConfig, RootIndex, SignatureWithMetadata, StateMerkleTreeAccounts,
24 TokenAccount, TokenBalance, ValidityProofWithContext,
25 },
26 rpc::{Rpc, RpcError},
27};
28use light_compressed_account::{
29 compressed_account::{CompressedAccountWithMerkleContext, MerkleContext},
30 hash_chain::create_hash_chain_from_slice,
31 indexer_event::event::PublicTransactionEvent,
32 instruction_data::compressed_proof::CompressedProof,
33 tx_hash::create_tx_hash,
34 TreeType,
35};
36use light_hasher::{bigint::bigint_to_be_bytes_array, Poseidon};
37use light_merkle_tree_metadata::QueueType;
38use light_merkle_tree_reference::MerkleTree;
39use light_prover_client::{
40 constants::{PROVE_PATH, SERVER_ADDRESS},
41 helpers::{big_int_to_string, bigint_to_u8_32, string_to_big_int},
42 proof::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct},
43 proof_type::ProofType,
44 proof_types::{
45 combined::{v1::CombinedJsonStruct as CombinedJsonStructLegacy, v2::CombinedJsonStruct},
46 inclusion::{
47 v1::{
48 BatchInclusionJsonStruct as BatchInclusionJsonStructLegacy,
49 InclusionProofInputs as InclusionProofInputsLegacy,
50 },
51 v2::{BatchInclusionJsonStruct, InclusionMerkleProofInputs, InclusionProofInputs},
52 },
53 non_inclusion::{
54 v1::{
55 BatchNonInclusionJsonStruct as BatchNonInclusionJsonStructLegacy,
56 NonInclusionProofInputs as NonInclusionProofInputsLegacy,
57 },
58 v2::{BatchNonInclusionJsonStruct, NonInclusionProofInputs},
59 },
60 },
61};
62use light_sdk::{
63 light_hasher::Hash,
64 token::{TokenData, TokenDataWithMerkleContext},
65};
66use log::info;
67use num_bigint::{BigInt, BigUint};
68use num_traits::FromBytes;
69use reqwest::Client;
70use solana_sdk::{
71 bs58,
72 pubkey::Pubkey,
73 signature::{Keypair, Signer},
74};
75
76use super::{
77 address_tree::{AddressMerkleTreeBundle, IndexedMerkleTreeVersion},
78 state_tree::{LeafIndexInfo, StateMerkleTreeBundle},
79};
80#[cfg(feature = "devenv")]
81use crate::accounts::{
82 address_tree_v2::create_batch_address_merkle_tree,
83 state_tree_v2::create_batched_state_merkle_tree,
84};
85use crate::{
86 accounts::{
87 address_tree::create_address_merkle_tree_and_queue_account,
88 state_tree::create_state_merkle_tree_and_queue_account, test_accounts::TestAccounts,
89 test_keypairs::BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR,
90 },
91 indexer::TestIndexerExtensions,
92};
93
94#[derive(Debug)]
95pub struct TestIndexer {
96 pub state_merkle_trees: Vec<StateMerkleTreeBundle>,
97 pub address_merkle_trees: Vec<AddressMerkleTreeBundle>,
98 pub payer: Keypair,
99 pub group_pda: Pubkey,
100 pub compressed_accounts: Vec<CompressedAccountWithMerkleContext>,
101 pub nullified_compressed_accounts: Vec<CompressedAccountWithMerkleContext>,
102 pub token_compressed_accounts: Vec<TokenDataWithMerkleContext>,
103 pub token_nullified_compressed_accounts: Vec<TokenDataWithMerkleContext>,
104 pub events: Vec<PublicTransactionEvent>,
105}
106
107impl Clone for TestIndexer {
108 fn clone(&self) -> Self {
109 Self {
110 state_merkle_trees: self.state_merkle_trees.clone(),
111 address_merkle_trees: self.address_merkle_trees.clone(),
112 payer: self.payer.insecure_clone(),
113 group_pda: self.group_pda,
114 compressed_accounts: self.compressed_accounts.clone(),
115 nullified_compressed_accounts: self.nullified_compressed_accounts.clone(),
116 token_compressed_accounts: self.token_compressed_accounts.clone(),
117 token_nullified_compressed_accounts: self.token_nullified_compressed_accounts.clone(),
118 events: self.events.clone(),
119 }
120 }
121}
122
123#[async_trait]
124impl Indexer for TestIndexer {
125 async fn get_indexer_slot(&self, _config: Option<RetryConfig>) -> Result<u64, IndexerError> {
127 Ok(u64::MAX)
129 }
130
131 async fn get_multiple_compressed_account_proofs(
132 &self,
133 hashes: Vec<[u8; 32]>,
134 _config: Option<IndexerRpcConfig>,
135 ) -> Result<Response<Items<MerkleProof>>, IndexerError> {
136 info!("Getting proofs for {:?}", hashes);
137 let mut proofs: Vec<MerkleProof> = Vec::new();
138 hashes.iter().for_each(|hash| {
139 self.state_merkle_trees.iter().for_each(|tree| {
140 if let Some(leaf_index) = tree.merkle_tree.get_leaf_index(hash) {
141 let proof = tree
142 .merkle_tree
143 .get_proof_of_leaf(leaf_index, false)
144 .unwrap();
145 proofs.push(MerkleProof {
146 hash: *hash,
147 leaf_index: leaf_index as u64,
148 merkle_tree: tree.accounts.merkle_tree,
149 proof: proof.to_vec(),
150 root_seq: tree.merkle_tree.sequence_number as u64,
151 root: *tree.merkle_tree.roots.last().unwrap(),
152 });
153 }
154 })
155 });
156 Ok(Response {
157 context: Context {
158 slot: self.get_current_slot(),
159 },
160 value: Items { items: proofs },
161 })
162 }
163
164 async fn get_compressed_accounts_by_owner(
165 &self,
166 owner: &Pubkey,
167 _options: Option<GetCompressedAccountsByOwnerConfig>,
168 _config: Option<IndexerRpcConfig>,
169 ) -> Result<Response<ItemsWithCursor<CompressedAccount>>, IndexerError> {
170 let accounts_with_context = <TestIndexer as TestIndexerExtensions>::get_compressed_accounts_with_merkle_context_by_owner(self, owner);
171 let accounts: Result<Vec<CompressedAccount>, IndexerError> = accounts_with_context
172 .into_iter()
173 .map(|acc| acc.try_into())
174 .collect();
175
176 Ok(Response {
177 context: Context {
178 slot: self.get_current_slot(),
179 },
180 value: ItemsWithCursor {
181 items: accounts?,
182 cursor: None,
183 },
184 })
185 }
186
187 async fn get_compressed_account(
188 &self,
189 address: Address,
190 _config: Option<IndexerRpcConfig>,
191 ) -> Result<Response<CompressedAccount>, IndexerError> {
192 let account = self
193 .compressed_accounts
194 .iter()
195 .find(|acc| acc.compressed_account.address == Some(address));
196
197 let account_data = account
198 .ok_or(IndexerError::AccountNotFound)?
199 .clone()
200 .try_into()?;
201
202 Ok(Response {
203 context: Context {
204 slot: self.get_current_slot(),
205 },
206 value: account_data,
207 })
208 }
209
210 async fn get_compressed_account_by_hash(
211 &self,
212 hash: Hash,
213 _config: Option<IndexerRpcConfig>,
214 ) -> Result<Response<CompressedAccount>, IndexerError> {
215 let res = self
216 .compressed_accounts
217 .iter()
218 .find(|acc| acc.hash() == Ok(hash));
219
220 let account = if res.is_none() {
222 let res = self
223 .token_compressed_accounts
224 .iter()
225 .find(|acc| acc.compressed_account.hash() == Ok(hash));
226 res.map(|x| &x.compressed_account)
227 } else {
228 res
229 };
230
231 let account_data = account
232 .ok_or(IndexerError::AccountNotFound)?
233 .clone()
234 .try_into()?;
235
236 Ok(Response {
237 context: Context {
238 slot: self.get_current_slot(),
239 },
240 value: account_data,
241 })
242 }
243
244 async fn get_compressed_token_accounts_by_owner(
245 &self,
246 owner: &Pubkey,
247 options: Option<GetCompressedTokenAccountsByOwnerOrDelegateOptions>,
248 _config: Option<IndexerRpcConfig>,
249 ) -> Result<Response<ItemsWithCursor<TokenAccount>>, IndexerError> {
250 let mint = options.as_ref().and_then(|opts| opts.mint);
251 let token_accounts: Result<Vec<TokenAccount>, IndexerError> = self
252 .token_compressed_accounts
253 .iter()
254 .filter(|acc| {
255 acc.token_data.owner == *owner && mint.is_none_or(|m| acc.token_data.mint == m)
256 })
257 .map(|acc| TokenAccount::try_from(acc.clone()))
258 .collect();
259 let token_accounts = token_accounts?;
260 let token_accounts = if let Some(options) = options {
261 if let Some(limit) = options.limit {
262 token_accounts.into_iter().take(limit as usize).collect()
263 } else {
264 token_accounts
265 }
266 } else {
267 token_accounts
268 };
269
270 Ok(Response {
271 context: Context {
272 slot: self.get_current_slot(),
273 },
274 value: ItemsWithCursor {
275 items: token_accounts,
276 cursor: None,
277 },
278 })
279 }
280
281 async fn get_compressed_balance(
282 &self,
283 address: Option<Address>,
284 hash: Option<Hash>,
285 _config: Option<IndexerRpcConfig>,
286 ) -> Result<Response<u64>, IndexerError> {
287 let account_response = match (address, hash) {
288 (Some(addr), _) => self.get_compressed_account(addr, None).await?,
289 (_, Some(h)) => self.get_compressed_account_by_hash(h, None).await?,
290 _ => {
291 return Err(IndexerError::InvalidParameters(
292 "Either address or hash must be provided".to_string(),
293 ))
294 }
295 };
296 Ok(Response {
297 context: Context {
298 slot: self.get_current_slot(),
299 },
300 value: account_response.value.lamports,
301 })
302 }
303
304 async fn get_compressed_token_account_balance(
305 &self,
306 address: Option<Address>,
307 hash: Option<Hash>,
308 _config: Option<IndexerRpcConfig>,
309 ) -> Result<Response<u64>, IndexerError> {
310 let account = match (address, hash) {
311 (Some(address), _) => self
312 .token_compressed_accounts
313 .iter()
314 .find(|acc| acc.compressed_account.compressed_account.address == Some(address)),
315 (_, Some(hash)) => self
316 .token_compressed_accounts
317 .iter()
318 .find(|acc| acc.compressed_account.hash() == Ok(hash)),
319 (None, None) => {
320 return Err(IndexerError::InvalidParameters(
321 "Either address or hash must be provided".to_string(),
322 ))
323 }
324 };
325
326 let amount = account
327 .map(|acc| acc.token_data.amount)
328 .ok_or(IndexerError::AccountNotFound)?;
329
330 Ok(Response {
331 context: Context {
332 slot: self.get_current_slot(),
333 },
334 value: amount,
335 })
336 }
337
338 async fn get_multiple_compressed_accounts(
339 &self,
340 addresses: Option<Vec<Address>>,
341 hashes: Option<Vec<Hash>>,
342 _config: Option<IndexerRpcConfig>,
343 ) -> Result<Response<Items<CompressedAccount>>, IndexerError> {
344 match (addresses, hashes) {
345 (Some(addresses), _) => {
346 let accounts = self
347 .compressed_accounts
348 .iter()
349 .filter(|acc| {
350 acc.compressed_account
351 .address
352 .is_some_and(|addr| addresses.contains(&addr))
353 })
354 .map(|acc| acc.clone().try_into())
355 .collect::<Result<Vec<CompressedAccount>, IndexerError>>()?;
356 Ok(Response {
357 context: Context {
358 slot: self.get_current_slot(),
359 },
360 value: Items { items: accounts },
361 })
362 }
363 (_, Some(hashes)) => {
364 let accounts = self
365 .compressed_accounts
366 .iter()
367 .filter(|acc| acc.hash().is_ok_and(|hash| hashes.contains(&hash)))
368 .map(|acc| acc.clone().try_into())
369 .collect::<Result<Vec<CompressedAccount>, IndexerError>>()?;
370 Ok(Response {
371 context: Context {
372 slot: self.get_current_slot(),
373 },
374 value: Items { items: accounts },
375 })
376 }
377 (None, None) => Err(IndexerError::InvalidParameters(
378 "Either addresses or hashes must be provided".to_string(),
379 )),
380 }
381 }
382
383 async fn get_compressed_token_balances_by_owner_v2(
384 &self,
385 owner: &Pubkey,
386 _options: Option<GetCompressedTokenAccountsByOwnerOrDelegateOptions>,
387 _config: Option<IndexerRpcConfig>,
388 ) -> Result<Response<ItemsWithCursor<TokenBalance>>, IndexerError> {
389 let mint = _options.as_ref().and_then(|opts| opts.mint);
390 let balances: Vec<TokenBalance> = self
391 .token_compressed_accounts
392 .iter()
393 .filter(|acc| {
394 acc.token_data.owner == *owner && mint.is_none_or(|m| acc.token_data.mint == m)
395 })
396 .fold(std::collections::HashMap::new(), |mut map, acc| {
397 *map.entry(acc.token_data.mint).or_insert(0) += acc.token_data.amount;
398 map
399 })
400 .into_iter()
401 .map(|(mint, balance)| TokenBalance { balance, mint })
402 .collect();
403
404 Ok(Response {
405 context: Context {
406 slot: self.get_current_slot(),
407 },
408 value: ItemsWithCursor {
409 items: balances,
410 cursor: None,
411 },
412 })
413 }
414
415 async fn get_compression_signatures_for_account(
416 &self,
417 _hash: Hash,
418 _config: Option<IndexerRpcConfig>,
419 ) -> Result<Response<Items<SignatureWithMetadata>>, IndexerError> {
420 todo!()
421 }
422
423 async fn get_multiple_new_address_proofs(
424 &self,
425 merkle_tree_pubkey: [u8; 32],
426 addresses: Vec<[u8; 32]>,
427 _config: Option<IndexerRpcConfig>,
428 ) -> Result<Response<Items<NewAddressProofWithContext>>, IndexerError> {
429 let proofs = self
430 ._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, false)
431 .await?;
432 Ok(Response {
433 context: Context {
434 slot: self.get_current_slot(),
435 },
436 value: Items { items: proofs },
437 })
438 }
439
440 async fn get_validity_proof(
441 &self,
442 hashes: Vec<[u8; 32]>,
443 new_addresses_with_trees: Vec<AddressWithTree>,
444 _config: Option<IndexerRpcConfig>,
445 ) -> Result<Response<ValidityProofWithContext>, IndexerError> {
446 #[cfg(feature = "v2")]
447 {
448 let mut state_merkle_tree_pubkeys = Vec::new();
450
451 for hash in hashes.iter() {
452 let account = self.get_compressed_account_by_hash(*hash, None).await?;
453 state_merkle_tree_pubkeys.push(account.value.tree_info.tree);
454 }
455 let mut proof_inputs = vec![];
456
457 let mut indices_to_remove = Vec::new();
458 let compressed_accounts = if !hashes.is_empty() && !state_merkle_tree_pubkeys.is_empty()
460 {
461 let zipped_accounts = hashes.iter().zip(state_merkle_tree_pubkeys.iter());
462
463 for (i, (compressed_account, state_merkle_tree_pubkey)) in
464 zipped_accounts.enumerate()
465 {
466 let accounts = self.state_merkle_trees.iter().find(|x| {
467 x.accounts.merkle_tree == *state_merkle_tree_pubkey
468 && x.tree_type == TreeType::StateV2
469 });
470
471 if let Some(accounts) = accounts {
472 let queue_element = accounts
473 .output_queue_elements
474 .iter()
475 .find(|(hash, _)| hash == compressed_account);
476 if let Some((_, index)) = queue_element {
477 if accounts.output_queue_batch_size.is_some()
478 && accounts.leaf_index_in_queue_range(*index as usize)?
479 {
480 use light_client::indexer::RootIndex;
481
482 indices_to_remove.push(i);
483 proof_inputs.push(AccountProofInputs {
484 hash: *compressed_account,
485 root: [0u8; 32],
486 root_index: RootIndex::new_none(),
487 leaf_index: accounts
488 .output_queue_elements
489 .iter()
490 .position(|(x, _)| x == compressed_account)
491 .unwrap()
492 as u64,
493 tree_info: light_client::indexer::TreeInfo {
494 cpi_context: Some(accounts.accounts.cpi_context),
495 tree: accounts.accounts.merkle_tree,
496 queue: accounts.accounts.nullifier_queue,
497 next_tree_info: None,
498 tree_type: accounts.tree_type,
499 },
500 })
501 }
502 }
503 }
504 }
505
506 let compress_accounts = hashes
507 .iter()
508 .enumerate()
509 .filter(|(i, _)| !indices_to_remove.contains(i))
510 .map(|(_, x)| *x)
511 .collect::<Vec<[u8; 32]>>();
512
513 if compress_accounts.is_empty() {
514 None
515 } else {
516 Some(compress_accounts)
517 }
518 } else {
519 None
520 };
521
522 let rpc_result: Option<ValidityProofWithContext> = if (compressed_accounts.is_some()
524 && !compressed_accounts.as_ref().unwrap().is_empty())
525 || !new_addresses_with_trees.is_empty()
526 {
527 Some(
528 self._get_validity_proof_v1_implementation(
529 compressed_accounts.unwrap_or_default(),
530 new_addresses_with_trees,
531 )
532 .await?,
533 )
534 } else {
535 None
536 };
537
538 let addresses = if let Some(rpc_result) = rpc_result.as_ref() {
540 rpc_result.addresses.to_vec()
541 } else {
542 Vec::new()
543 };
544 let accounts = {
545 let mut root_indices = if let Some(rpc_result) = rpc_result.as_ref() {
546 rpc_result.accounts.to_vec()
547 } else {
548 Vec::new()
549 };
550 #[cfg(debug_assertions)]
551 {
552 if std::env::var("RUST_BACKTRACE").is_ok() {
553 println!("get_validit_proof: rpc_result {:?}", rpc_result);
554 }
555 }
556
557 proof_inputs.reverse();
559 for index in indices_to_remove.iter().rev() {
561 if root_indices.len() <= *index {
562 root_indices.push(proof_inputs.pop().unwrap());
563 } else {
564 root_indices.insert(*index, proof_inputs.pop().unwrap());
565 }
566 }
567 root_indices
568 };
569
570 Ok(Response {
571 context: Context {
572 slot: self.get_current_slot(),
573 },
574 value: ValidityProofWithContext {
575 accounts,
576 addresses,
577 proof: rpc_result
578 .map(|rpc_result| rpc_result.proof.0.unwrap())
579 .into(),
580 },
581 })
582 }
583
584 #[cfg(not(feature = "v2"))]
585 {
586 let result = self
588 ._get_validity_proof_v1_implementation(hashes, new_addresses_with_trees)
589 .await?;
590 Ok(Response {
591 context: Context {
592 slot: self.get_current_slot(),
593 },
594 value: result,
595 })
596 }
597 }
598
599 async fn get_queue_elements(
600 &mut self,
601 _merkle_tree_pubkey: [u8; 32],
602 _queue_type: QueueType,
603 _num_elements: u16,
604 _start_offset: Option<u64>,
605 _config: Option<IndexerRpcConfig>,
606 ) -> Result<Response<Items<MerkleProofWithContext>>, IndexerError> {
607 #[cfg(not(feature = "v2"))]
608 unimplemented!("get_queue_elements");
609 #[cfg(feature = "v2")]
610 {
611 let merkle_tree_pubkey = _merkle_tree_pubkey;
612 let queue_type = _queue_type;
613 let num_elements = _num_elements;
614 let pubkey = Pubkey::new_from_array(merkle_tree_pubkey);
615 let address_tree_bundle = self
616 .address_merkle_trees
617 .iter()
618 .find(|x| x.accounts.merkle_tree == pubkey);
619 if let Some(address_tree_bundle) = address_tree_bundle {
620 let end_offset = std::cmp::min(
621 num_elements as usize,
622 address_tree_bundle.queue_elements.len(),
623 );
624 let queue_elements = address_tree_bundle.queue_elements[0..end_offset].to_vec();
625
626 let merkle_proofs_with_context = queue_elements
627 .iter()
628 .map(|element| MerkleProofWithContext {
629 proof: Vec::new(),
630 leaf: [0u8; 32],
631 leaf_index: 0,
632 merkle_tree: address_tree_bundle.accounts.merkle_tree.to_bytes(),
633 root: address_tree_bundle.root(),
634 tx_hash: None,
635 root_seq: 0,
636 account_hash: *element,
637 })
638 .collect();
639 return Ok(Response {
640 context: Context {
641 slot: self.get_current_slot(),
642 },
643 value: Items {
644 items: merkle_proofs_with_context,
645 },
646 });
647 }
648
649 let state_tree_bundle = self
650 .state_merkle_trees
651 .iter_mut()
652 .find(|x| x.accounts.merkle_tree == pubkey);
653 if queue_type == QueueType::InputStateV2 {
654 if let Some(state_tree_bundle) = state_tree_bundle {
655 let end_offset = std::cmp::min(
656 num_elements as usize,
657 state_tree_bundle.input_leaf_indices.len(),
658 );
659 let queue_elements =
660 state_tree_bundle.input_leaf_indices[0..end_offset].to_vec();
661 let merkle_proofs = queue_elements
662 .iter()
663 .map(|leaf_info| {
664 match state_tree_bundle
665 .merkle_tree
666 .get_proof_of_leaf(leaf_info.leaf_index as usize, true)
667 {
668 Ok(proof) => proof.to_vec(),
669 Err(_) => {
670 let mut next_index =
671 state_tree_bundle.merkle_tree.get_next_index() as u64;
672 while next_index < leaf_info.leaf_index as u64 {
673 state_tree_bundle.merkle_tree.append(&[0u8; 32]).unwrap();
674 next_index =
675 state_tree_bundle.merkle_tree.get_next_index() as u64;
676 }
677 state_tree_bundle
678 .merkle_tree
679 .get_proof_of_leaf(leaf_info.leaf_index as usize, true)
680 .unwrap()
681 .to_vec();
682 Vec::new()
683 }
684 }
685 })
686 .collect::<Vec<_>>();
687 let leaves = queue_elements
688 .iter()
689 .map(|leaf_info| {
690 state_tree_bundle
691 .merkle_tree
692 .get_leaf(leaf_info.leaf_index as usize)
693 .unwrap_or_default()
694 })
695 .collect::<Vec<_>>();
696 let merkle_proofs_with_context = merkle_proofs
697 .iter()
698 .zip(queue_elements.iter())
699 .zip(leaves.iter())
700 .map(|((proof, element), leaf)| MerkleProofWithContext {
701 proof: proof.clone(),
702 leaf: *leaf,
703 leaf_index: element.leaf_index as u64,
704 merkle_tree: state_tree_bundle.accounts.merkle_tree.to_bytes(),
705 root: state_tree_bundle.merkle_tree.root(),
706 tx_hash: Some(element.tx_hash),
707 root_seq: 0,
708 account_hash: element.leaf,
709 })
710 .collect();
711
712 return Ok(Response {
713 context: Context {
714 slot: self.get_current_slot(),
715 },
716 value: Items {
717 items: merkle_proofs_with_context,
718 },
719 });
720 }
721 }
722
723 if queue_type == QueueType::OutputStateV2 {
724 if let Some(state_tree_bundle) = state_tree_bundle {
725 let end_offset = std::cmp::min(
726 num_elements as usize,
727 state_tree_bundle.output_queue_elements.len(),
728 );
729 let queue_elements =
730 state_tree_bundle.output_queue_elements[0..end_offset].to_vec();
731 let indices = queue_elements
732 .iter()
733 .map(|(_, index)| index)
734 .collect::<Vec<_>>();
735 let merkle_proofs = indices
736 .iter()
737 .map(|index| {
738 match state_tree_bundle
739 .merkle_tree
740 .get_proof_of_leaf(**index as usize, true)
741 {
742 Ok(proof) => proof.to_vec(),
743 Err(_) => {
744 let mut next_index =
745 state_tree_bundle.merkle_tree.get_next_index() as u64;
746 while next_index < **index {
747 state_tree_bundle.merkle_tree.append(&[0u8; 32]).unwrap();
748 next_index =
749 state_tree_bundle.merkle_tree.get_next_index() as u64;
750 }
751 state_tree_bundle
752 .merkle_tree
753 .get_proof_of_leaf(**index as usize, true)
754 .unwrap()
755 .to_vec();
756 Vec::new()
757 }
758 }
759 })
760 .collect::<Vec<_>>();
761 let leaves = indices
762 .iter()
763 .map(|index| {
764 state_tree_bundle
765 .merkle_tree
766 .get_leaf(**index as usize)
767 .unwrap_or_default()
768 })
769 .collect::<Vec<_>>();
770 let merkle_proofs_with_context = merkle_proofs
771 .iter()
772 .zip(queue_elements.iter())
773 .zip(leaves.iter())
774 .map(|((proof, (element, index)), leaf)| MerkleProofWithContext {
775 proof: proof.clone(),
776 leaf: *leaf,
777 leaf_index: *index,
778 merkle_tree: state_tree_bundle.accounts.merkle_tree.to_bytes(),
779 root: state_tree_bundle.merkle_tree.root(),
780 tx_hash: None,
781 root_seq: 0,
782 account_hash: *element,
783 })
784 .collect();
785 return Ok(Response {
786 context: Context {
787 slot: self.get_current_slot(),
788 },
789 value: Items {
790 items: merkle_proofs_with_context,
791 },
792 });
793 }
794 }
795
796 Err(IndexerError::InvalidParameters(
797 "Merkle tree not found".to_string(),
798 ))
799 }
800 }
801
802 async fn get_subtrees(
803 &self,
804 _merkle_tree_pubkey: [u8; 32],
805 _config: Option<IndexerRpcConfig>,
806 ) -> Result<Response<Items<[u8; 32]>>, IndexerError> {
807 #[cfg(not(feature = "v2"))]
808 unimplemented!("get_subtrees");
809 #[cfg(feature = "v2")]
810 {
811 let merkle_tree_pubkey = Pubkey::new_from_array(_merkle_tree_pubkey);
812 let address_tree_bundle = self
813 .address_merkle_trees
814 .iter()
815 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey);
816 if let Some(address_tree_bundle) = address_tree_bundle {
817 Ok(Response {
818 context: Context {
819 slot: self.get_current_slot(),
820 },
821 value: Items {
822 items: address_tree_bundle.get_subtrees(),
823 },
824 })
825 } else {
826 let state_tree_bundle = self
827 .state_merkle_trees
828 .iter()
829 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey);
830 if let Some(state_tree_bundle) = state_tree_bundle {
831 Ok(Response {
832 context: Context {
833 slot: self.get_current_slot(),
834 },
835 value: Items {
836 items: state_tree_bundle.merkle_tree.get_subtrees(),
837 },
838 })
839 } else {
840 Err(IndexerError::InvalidParameters(
841 "Merkle tree not found".to_string(),
842 ))
843 }
844 }
845 }
846 }
847
848 async fn get_address_queue_with_proofs(
849 &mut self,
850 _merkle_tree_pubkey: &Pubkey,
851 _zkp_batch_size: u16,
852 _start_offset: Option<u64>,
853 _config: Option<IndexerRpcConfig>,
854 ) -> Result<Response<BatchAddressUpdateIndexerResponse>, IndexerError> {
855 #[cfg(not(feature = "v2"))]
856 unimplemented!("get_address_queue_with_proofs");
857 #[cfg(feature = "v2")]
858 {
859 use light_client::indexer::AddressQueueIndex;
860 let merkle_tree_pubkey = _merkle_tree_pubkey;
861 let zkp_batch_size = _zkp_batch_size;
862
863 let batch_start_index = self
864 .get_address_merkle_trees()
865 .iter()
866 .find(|x| x.accounts.merkle_tree == *merkle_tree_pubkey)
867 .unwrap()
868 .get_v2_indexed_merkle_tree()
869 .ok_or(IndexerError::Unknown(
870 "Failed to get v2 indexed merkle tree".into(),
871 ))?
872 .merkle_tree
873 .rightmost_index;
874
875 let address_proofs = self
876 .get_queue_elements(
877 merkle_tree_pubkey.to_bytes(),
878 QueueType::AddressV2,
879 zkp_batch_size,
880 None,
881 None,
882 )
883 .await
884 .map_err(|_| IndexerError::Unknown("Failed to get queue elements".into()))?
885 .value;
886
887 let addresses: Vec<AddressQueueIndex> = address_proofs
888 .items
889 .iter()
890 .enumerate()
891 .map(|(i, proof)| AddressQueueIndex {
892 address: proof.account_hash,
893 queue_index: proof.root_seq + i as u64,
894 })
895 .collect();
896 let non_inclusion_proofs = self
897 .get_multiple_new_address_proofs(
898 merkle_tree_pubkey.to_bytes(),
899 address_proofs
900 .items
901 .iter()
902 .map(|x| x.account_hash)
903 .collect(),
904 None,
905 )
906 .await
907 .map_err(|_| {
908 IndexerError::Unknown(
909 "Failed to get get_multiple_new_address_proofs_full".into(),
910 )
911 })?
912 .value;
913
914 let subtrees = self
915 .get_subtrees(merkle_tree_pubkey.to_bytes(), None)
916 .await
917 .map_err(|_| IndexerError::Unknown("Failed to get subtrees".into()))?
918 .value;
919
920 Ok(Response {
921 context: Context {
922 slot: self.get_current_slot(),
923 },
924 value: BatchAddressUpdateIndexerResponse {
925 batch_start_index: batch_start_index as u64,
926 addresses,
927 non_inclusion_proofs: non_inclusion_proofs.items,
928 subtrees: subtrees.items,
929 },
930 })
931 }
932 }
933
934 async fn get_compressed_balance_by_owner(
936 &self,
937 _owner: &Pubkey,
938 _config: Option<IndexerRpcConfig>,
939 ) -> Result<Response<u64>, IndexerError> {
940 todo!("get_compressed_balance_by_owner not implemented")
941 }
942
943 async fn get_compressed_mint_token_holders(
944 &self,
945 _mint: &Pubkey,
946 _options: Option<PaginatedOptions>,
947 _config: Option<IndexerRpcConfig>,
948 ) -> Result<Response<ItemsWithCursor<OwnerBalance>>, IndexerError> {
949 todo!("get_compressed_mint_token_holders not implemented")
950 }
951
952 async fn get_compressed_token_accounts_by_delegate(
953 &self,
954 _delegate: &Pubkey,
955 _options: Option<GetCompressedTokenAccountsByOwnerOrDelegateOptions>,
956 _config: Option<IndexerRpcConfig>,
957 ) -> Result<Response<ItemsWithCursor<TokenAccount>>, IndexerError> {
958 todo!("get_compressed_token_accounts_by_delegate not implemented")
959 }
960
961 async fn get_compression_signatures_for_address(
962 &self,
963 _address: &[u8; 32],
964 _options: Option<PaginatedOptions>,
965 _config: Option<IndexerRpcConfig>,
966 ) -> Result<Response<ItemsWithCursor<SignatureWithMetadata>>, IndexerError> {
967 todo!("get_compression_signatures_for_address not implemented")
968 }
969
970 async fn get_compression_signatures_for_owner(
971 &self,
972 _owner: &Pubkey,
973 _options: Option<PaginatedOptions>,
974 _config: Option<IndexerRpcConfig>,
975 ) -> Result<Response<ItemsWithCursor<SignatureWithMetadata>>, IndexerError> {
976 todo!("get_compression_signatures_for_owner not implemented")
977 }
978
979 async fn get_compression_signatures_for_token_owner(
980 &self,
981 _owner: &Pubkey,
982 _options: Option<PaginatedOptions>,
983 _config: Option<IndexerRpcConfig>,
984 ) -> Result<Response<ItemsWithCursor<SignatureWithMetadata>>, IndexerError> {
985 todo!("get_compression_signatures_for_token_owner not implemented")
986 }
987
988 async fn get_indexer_health(&self, _config: Option<RetryConfig>) -> Result<bool, IndexerError> {
989 todo!("get_indexer_health not implemented")
990 }
991}
992
993#[async_trait]
994impl TestIndexerExtensions for TestIndexer {
995 fn get_address_merkle_tree(
996 &self,
997 merkle_tree_pubkey: Pubkey,
998 ) -> Option<&AddressMerkleTreeBundle> {
999 self.address_merkle_trees
1000 .iter()
1001 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
1002 }
1003
1004 fn add_compressed_accounts_with_token_data(
1011 &mut self,
1012 slot: u64,
1013 event: &PublicTransactionEvent,
1014 ) {
1015 TestIndexer::add_event_and_compressed_accounts(self, slot, event);
1016 }
1017
1018 fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) {
1019 let decoded_hash: [u8; 32] = bs58::decode(account_hash)
1020 .into_vec()
1021 .unwrap()
1022 .as_slice()
1023 .try_into()
1024 .unwrap();
1025
1026 if let Some(state_tree_bundle) = self
1027 .state_merkle_trees
1028 .iter_mut()
1029 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
1030 {
1031 if let Some(leaf_index) = state_tree_bundle.merkle_tree.get_leaf_index(&decoded_hash) {
1032 state_tree_bundle
1033 .merkle_tree
1034 .update(&[0u8; 32], leaf_index)
1035 .unwrap();
1036 }
1037 }
1038 }
1039
1040 fn address_tree_updated(
1041 &mut self,
1042 merkle_tree_pubkey: Pubkey,
1043 context: &NewAddressProofWithContext,
1044 ) {
1045 info!("Updating address tree...");
1046 let pos = self
1047 .address_merkle_trees
1048 .iter()
1049 .position(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
1050 .unwrap();
1051 let new_low_element = context.new_low_element.clone().unwrap();
1052 let new_element = context.new_element.clone().unwrap();
1053 let new_element_next_value = context.new_element_next_value.clone().unwrap();
1054 self.address_merkle_trees[pos]
1056 .get_v1_indexed_merkle_tree_mut()
1057 .expect("Failed to get v1 indexed merkle tree.")
1058 .update(&new_low_element, &new_element, &new_element_next_value)
1059 .unwrap();
1060 self.address_merkle_trees[pos]
1061 .append_with_low_element_index(new_low_element.index, &new_element.value)
1062 .unwrap();
1063 info!("Address tree updated");
1064 }
1065
1066 fn get_state_merkle_tree_accounts(&self, pubkeys: &[Pubkey]) -> Vec<StateMerkleTreeAccounts> {
1067 pubkeys
1068 .iter()
1069 .map(|x| {
1070 self.state_merkle_trees
1071 .iter()
1072 .find(|y| y.accounts.merkle_tree == *x || y.accounts.nullifier_queue == *x)
1073 .unwrap()
1074 .accounts
1075 })
1076 .collect::<Vec<_>>()
1077 }
1078
1079 fn get_state_merkle_trees(&self) -> &Vec<StateMerkleTreeBundle> {
1080 &self.state_merkle_trees
1081 }
1082
1083 fn get_state_merkle_trees_mut(&mut self) -> &mut Vec<StateMerkleTreeBundle> {
1084 &mut self.state_merkle_trees
1085 }
1086
1087 fn get_address_merkle_trees(&self) -> &Vec<AddressMerkleTreeBundle> {
1088 &self.address_merkle_trees
1089 }
1090
1091 fn get_address_merkle_trees_mut(&mut self) -> &mut Vec<AddressMerkleTreeBundle> {
1092 &mut self.address_merkle_trees
1093 }
1094
1095 fn get_token_compressed_accounts(&self) -> &Vec<TokenDataWithMerkleContext> {
1096 &self.token_compressed_accounts
1097 }
1098
1099 fn get_group_pda(&self) -> &Pubkey {
1100 &self.group_pda
1101 }
1102
1103 fn add_address_merkle_tree_accounts(
1104 &mut self,
1105 merkle_tree_keypair: &Keypair,
1106 queue_keypair: &Keypair,
1107 _owning_program_id: Option<Pubkey>,
1108 ) -> AddressMerkleTreeAccounts {
1109 info!("Adding address merkle tree accounts...");
1110 let address_merkle_tree_accounts = AddressMerkleTreeAccounts {
1111 merkle_tree: merkle_tree_keypair.pubkey(),
1112 queue: queue_keypair.pubkey(),
1113 };
1114 self.address_merkle_trees
1115 .push(Self::add_address_merkle_tree_bundle(address_merkle_tree_accounts).unwrap());
1116 info!(
1117 "Address merkle tree accounts added. Total: {}",
1118 self.address_merkle_trees.len()
1119 );
1120 address_merkle_tree_accounts
1121 }
1122
1123 fn get_compressed_accounts_with_merkle_context_by_owner(
1124 &self,
1125 owner: &Pubkey,
1126 ) -> Vec<CompressedAccountWithMerkleContext> {
1127 self.compressed_accounts
1128 .iter()
1129 .filter(|x| x.compressed_account.owner.to_bytes() == owner.to_bytes())
1130 .cloned()
1131 .collect()
1132 }
1133
1134 fn add_state_bundle(&mut self, state_bundle: StateMerkleTreeBundle) {
1135 Self::get_state_merkle_trees_mut(self).push(state_bundle);
1136 }
1137
1138 fn add_event_and_compressed_accounts(
1139 &mut self,
1140 slot: u64,
1141 event: &PublicTransactionEvent,
1142 ) -> (
1143 Vec<CompressedAccountWithMerkleContext>,
1144 Vec<TokenDataWithMerkleContext>,
1145 ) {
1146 let mut compressed_accounts = Vec::new();
1147 let mut token_compressed_accounts = Vec::new();
1148 let event_inputs_len = event.input_compressed_account_hashes.len();
1149 let event_outputs_len = event.output_compressed_account_hashes.len();
1150 for i in 0..std::cmp::max(event_inputs_len, event_outputs_len) {
1151 self.process_v1_compressed_account(
1152 slot,
1153 event,
1154 i,
1155 &mut token_compressed_accounts,
1156 &mut compressed_accounts,
1157 );
1158 }
1159
1160 self.events.push(event.clone());
1161 (compressed_accounts, token_compressed_accounts)
1162 }
1163
1164 fn get_proof_by_index(&mut self, merkle_tree_pubkey: Pubkey, index: u64) -> MerkleProof {
1165 let bundle = self
1166 .state_merkle_trees
1167 .iter_mut()
1168 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
1169 .unwrap();
1170
1171 while bundle.merkle_tree.leaves().len() <= index as usize {
1172 bundle.merkle_tree.append(&[0u8; 32]).unwrap();
1173 }
1174
1175 let leaf = match bundle.merkle_tree.get_leaf(index as usize) {
1176 Ok(leaf) => leaf,
1177 Err(_) => {
1178 bundle.merkle_tree.append(&[0u8; 32]).unwrap();
1179 bundle.merkle_tree.get_leaf(index as usize).unwrap()
1180 }
1181 };
1182
1183 let proof = bundle
1184 .merkle_tree
1185 .get_proof_of_leaf(index as usize, true)
1186 .unwrap()
1187 .to_vec();
1188
1189 MerkleProof {
1190 hash: leaf,
1191 leaf_index: index,
1192 merkle_tree: merkle_tree_pubkey,
1193 proof,
1194 root_seq: bundle.merkle_tree.sequence_number as u64,
1195 root: bundle.merkle_tree.root(),
1196 }
1197 }
1198
1199 async fn finalize_batched_address_tree_update(
1200 &mut self,
1201 merkle_tree_pubkey: Pubkey,
1202 account_data: &mut [u8],
1203 ) {
1204 let onchain_account =
1205 BatchedMerkleTreeAccount::address_from_bytes(account_data, &merkle_tree_pubkey.into())
1206 .unwrap();
1207 let address_tree = self
1208 .address_merkle_trees
1209 .iter_mut()
1210 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
1211 .unwrap();
1212 let address_tree_index = address_tree.right_most_index();
1213 let onchain_next_index = onchain_account.next_index;
1214 let diff_onchain_indexer = onchain_next_index - address_tree_index as u64;
1215 let addresses = address_tree.queue_elements[0..diff_onchain_indexer as usize].to_vec();
1216 for _ in 0..diff_onchain_indexer {
1217 address_tree.queue_elements.remove(0);
1218 }
1219 for new_element_value in &addresses {
1220 address_tree
1221 .append(&BigUint::from_bytes_be(new_element_value))
1222 .unwrap();
1223 }
1224 match &mut address_tree.merkle_tree {
1225 IndexedMerkleTreeVersion::V2(tree) => tree.merkle_tree.num_root_updates += 1,
1226 IndexedMerkleTreeVersion::V1(_) => {
1227 unimplemented!("finalize_batched_address_tree_update not implemented for v1 trees.")
1228 }
1229 }
1230 let onchain_root = onchain_account.root_history.last().unwrap();
1231 let new_root = address_tree.root();
1232 assert_eq!(*onchain_root, new_root);
1233 }
1234}
1235
1236impl TestIndexer {
1237 fn get_current_slot(&self) -> u64 {
1238 u64::MAX
1240 }
1241
1242 pub async fn init_from_acounts(
1243 payer: &Keypair,
1244 env: &TestAccounts,
1245 output_queue_batch_size: usize,
1246 ) -> Self {
1247 let mut state_merkle_tree_accounts = env.v1_state_trees.clone();
1249
1250 for v2_state_tree in &env.v2_state_trees {
1252 state_merkle_tree_accounts.push(StateMerkleTreeAccounts {
1253 merkle_tree: v2_state_tree.merkle_tree,
1254 nullifier_queue: v2_state_tree.output_queue,
1255 cpi_context: v2_state_tree.cpi_context,
1256 });
1257 }
1258
1259 let mut address_merkle_tree_accounts = env.v1_address_trees.clone();
1261
1262 for &v2_address_tree in &env.v2_address_trees {
1264 address_merkle_tree_accounts.push(AddressMerkleTreeAccounts {
1265 merkle_tree: v2_address_tree,
1266 queue: v2_address_tree,
1267 });
1268 }
1269
1270 Self::new(
1271 state_merkle_tree_accounts,
1272 address_merkle_tree_accounts,
1273 payer.insecure_clone(),
1274 env.protocol.group_pda,
1275 output_queue_batch_size,
1276 )
1277 .await
1278 }
1279
1280 pub async fn new(
1281 state_merkle_tree_accounts: Vec<StateMerkleTreeAccounts>,
1282 address_merkle_tree_accounts: Vec<AddressMerkleTreeAccounts>,
1283 payer: Keypair,
1284 group_pda: Pubkey,
1285 output_queue_batch_size: usize,
1286 ) -> Self {
1287 let mut state_merkle_trees = Vec::new();
1288 for state_merkle_tree_account in state_merkle_tree_accounts.iter() {
1289 let test_batched_output_queue =
1290 Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap();
1291 let (tree_type, merkle_tree, output_queue_batch_size) = if state_merkle_tree_account
1292 .nullifier_queue
1293 == test_batched_output_queue.pubkey()
1294 {
1295 let merkle_tree = Box::new(MerkleTree::<Poseidon>::new_with_history(
1296 DEFAULT_BATCH_STATE_TREE_HEIGHT as usize,
1297 0,
1298 0,
1299 DEFAULT_BATCH_STATE_ROOT_HISTORY_LEN as usize,
1300 ));
1301 (
1302 TreeType::StateV2,
1303 merkle_tree,
1304 Some(output_queue_batch_size),
1305 )
1306 } else {
1307 let merkle_tree = Box::new(MerkleTree::<Poseidon>::new_with_history(
1308 account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize,
1309 account_compression::utils::constants::STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
1310 0,
1311 account_compression::utils::constants::STATE_MERKLE_TREE_ROOTS as usize,
1312 ));
1313 (TreeType::StateV1, merkle_tree, None)
1314 };
1315
1316 state_merkle_trees.push(StateMerkleTreeBundle {
1317 accounts: *state_merkle_tree_account,
1318 merkle_tree,
1319 rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64,
1320 tree_type,
1321 output_queue_elements: vec![],
1322 input_leaf_indices: vec![],
1323 output_queue_batch_size,
1324 num_inserted_batches: 0,
1325 });
1326 }
1327
1328 let mut address_merkle_trees = Vec::new();
1329 for address_merkle_tree_account in address_merkle_tree_accounts {
1330 address_merkle_trees
1331 .push(Self::add_address_merkle_tree_bundle(address_merkle_tree_account).unwrap());
1332 }
1333
1334 Self {
1335 state_merkle_trees,
1336 address_merkle_trees,
1337 payer,
1338 compressed_accounts: vec![],
1339 nullified_compressed_accounts: vec![],
1340 events: vec![],
1341 token_compressed_accounts: vec![],
1342 token_nullified_compressed_accounts: vec![],
1343 group_pda,
1344 }
1345 }
1346
1347 pub fn add_address_merkle_tree_bundle(
1348 address_merkle_tree_accounts: AddressMerkleTreeAccounts,
1349 ) -> Result<AddressMerkleTreeBundle, IndexerError> {
1351 if address_merkle_tree_accounts.merkle_tree == address_merkle_tree_accounts.queue {
1352 AddressMerkleTreeBundle::new_v2(address_merkle_tree_accounts)
1353 } else {
1354 AddressMerkleTreeBundle::new_v1(address_merkle_tree_accounts)
1355 }
1356 }
1357
1358 async fn add_address_merkle_tree_v1<R: Rpc>(
1359 &mut self,
1360 rpc: &mut R,
1361 merkle_tree_keypair: &Keypair,
1362 queue_keypair: &Keypair,
1363 owning_program_id: Option<Pubkey>,
1364 ) -> Result<AddressMerkleTreeAccounts, RpcError> {
1365 create_address_merkle_tree_and_queue_account(
1366 &self.payer,
1367 true,
1368 rpc,
1369 merkle_tree_keypair,
1370 queue_keypair,
1371 owning_program_id,
1372 None,
1373 &AddressMerkleTreeConfig::default(),
1374 &AddressQueueConfig::default(),
1375 0,
1376 )
1377 .await?;
1378
1379 let accounts = <TestIndexer as TestIndexerExtensions>::add_address_merkle_tree_accounts(
1380 self,
1381 merkle_tree_keypair,
1382 queue_keypair,
1383 owning_program_id,
1384 );
1385 Ok(accounts)
1386 }
1387
1388 #[cfg(feature = "devenv")]
1389 async fn add_address_merkle_tree_v2<R: Rpc>(
1390 &mut self,
1391 rpc: &mut R,
1392 merkle_tree_keypair: &Keypair,
1393 queue_keypair: &Keypair,
1394 _owning_program_id: Option<Pubkey>,
1395 ) -> Result<AddressMerkleTreeAccounts, RpcError> {
1396 info!(
1397 "Adding address merkle tree accounts v2 {:?}",
1398 merkle_tree_keypair.pubkey()
1399 );
1400
1401 let params = light_batched_merkle_tree::initialize_address_tree::InitAddressTreeAccountsInstructionData::test_default();
1402
1403 info!(
1404 "Creating batched address merkle tree {:?}",
1405 merkle_tree_keypair.pubkey()
1406 );
1407 create_batch_address_merkle_tree(rpc, &self.payer, merkle_tree_keypair, params).await?;
1408 info!(
1409 "Batched address merkle tree created {:?}",
1410 merkle_tree_keypair.pubkey()
1411 );
1412
1413 let accounts = self.add_address_merkle_tree_accounts(
1414 merkle_tree_keypair,
1415 queue_keypair,
1416 _owning_program_id,
1417 );
1418 Ok(accounts)
1419 }
1420
1421 pub async fn add_address_merkle_tree<R: Rpc>(
1422 &mut self,
1423 rpc: &mut R,
1424 merkle_tree_keypair: &Keypair,
1425 queue_keypair: &Keypair,
1426 owning_program_id: Option<Pubkey>,
1427 tree_type: TreeType,
1428 ) -> Result<AddressMerkleTreeAccounts, RpcError> {
1429 if tree_type == TreeType::AddressV1 {
1430 self.add_address_merkle_tree_v1(
1431 rpc,
1432 merkle_tree_keypair,
1433 queue_keypair,
1434 owning_program_id,
1435 )
1436 .await
1437 } else if tree_type == TreeType::AddressV2 {
1438 #[cfg(not(feature = "devenv"))]
1439 panic!("Batched address merkle trees require the 'devenv' feature to be enabled");
1440 #[cfg(feature = "devenv")]
1441 self.add_address_merkle_tree_v2(
1442 rpc,
1443 merkle_tree_keypair,
1444 queue_keypair,
1445 owning_program_id,
1446 )
1447 .await
1448 } else {
1449 Err(RpcError::CustomError(format!(
1450 "add_address_merkle_tree: Version not supported, {}. Versions: AddressV1, AddressV2",
1451 tree_type
1452 )))
1453 }
1454 }
1455
1456 #[allow(clippy::too_many_arguments)]
1457 pub async fn add_state_merkle_tree<R: Rpc>(
1458 &mut self,
1459 rpc: &mut R,
1460 merkle_tree_keypair: &Keypair,
1461 queue_keypair: &Keypair,
1462 cpi_context_keypair: &Keypair,
1463 owning_program_id: Option<Pubkey>,
1464 forester: Option<Pubkey>,
1465 tree_type: TreeType,
1466 ) {
1467 let (rollover_fee, merkle_tree, output_queue_batch_size) = match tree_type {
1468 TreeType::StateV1 => {
1469 create_state_merkle_tree_and_queue_account(
1470 &self.payer,
1471 true,
1472 rpc,
1473 merkle_tree_keypair,
1474 queue_keypair,
1475 Some(cpi_context_keypair),
1476 owning_program_id,
1477 forester,
1478 self.state_merkle_trees.len() as u64,
1479 &StateMerkleTreeConfig::default(),
1480 &NullifierQueueConfig::default(),
1481 )
1482 .await
1483 .unwrap();
1484 let merkle_tree = Box::new(MerkleTree::<Poseidon>::new_with_history(
1485 account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize,
1486 account_compression::utils::constants::STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
1487 0,
1488 account_compression::utils::constants::STATE_MERKLE_TREE_ROOTS as usize,
1489
1490 ));
1491 (FeeConfig::default().state_merkle_tree_rollover as i64,merkle_tree, None)
1492 }
1493 TreeType::StateV2 => {
1494 #[cfg(feature = "devenv")]
1495 {
1496 let params = light_batched_merkle_tree::initialize_state_tree::InitStateTreeAccountsInstructionData::test_default();
1497
1498 create_batched_state_merkle_tree(
1499 &self.payer,
1500 true,
1501 rpc,
1502 merkle_tree_keypair,
1503 queue_keypair,
1504 cpi_context_keypair,
1505 params,
1506 ).await.unwrap();
1507 let merkle_tree = Box::new(MerkleTree::<Poseidon>::new_with_history(
1508 DEFAULT_BATCH_STATE_TREE_HEIGHT as usize,
1509 0,
1510 0,
1511 DEFAULT_BATCH_STATE_ROOT_HISTORY_LEN as usize,
1512
1513 ));
1514 (FeeConfig::test_batched().state_merkle_tree_rollover as i64,merkle_tree, Some(params.output_queue_batch_size as usize))
1515 }
1516
1517 #[cfg(not(feature = "devenv"))]
1518 panic!("Batched state merkle trees require the 'devenv' feature to be enabled")
1519 }
1520 _ => panic!(
1521 "add_state_merkle_tree: tree_type not supported, {}. tree_type: 1 concurrent, 2 batched",
1522 tree_type
1523 ),
1524 };
1525 let state_merkle_tree_account = StateMerkleTreeAccounts {
1526 merkle_tree: merkle_tree_keypair.pubkey(),
1527 nullifier_queue: queue_keypair.pubkey(),
1528 cpi_context: cpi_context_keypair.pubkey(),
1529 };
1530
1531 self.state_merkle_trees.push(StateMerkleTreeBundle {
1532 merkle_tree,
1533 accounts: state_merkle_tree_account,
1534 rollover_fee,
1535 tree_type,
1536 output_queue_elements: vec![],
1537 input_leaf_indices: vec![],
1538 num_inserted_batches: 0,
1539 output_queue_batch_size,
1540 });
1541 println!(
1542 "creating Merkle tree bundle {:?}",
1543 self.state_merkle_trees
1544 .iter()
1545 .map(|x| x.accounts.merkle_tree)
1546 .collect::<Vec<_>>()
1547 );
1548 }
1549
1550 pub fn add_lamport_compressed_accounts(&mut self, slot: u64, event_bytes: Vec<u8>) {
1555 let event_bytes = event_bytes.clone();
1556 let event = PublicTransactionEvent::deserialize(&mut event_bytes.as_slice()).unwrap();
1557 <TestIndexer as TestIndexerExtensions>::add_event_and_compressed_accounts(
1559 self, slot, &event,
1560 );
1561 }
1562
1563 pub fn get_compressed_balance(&self, owner: &Pubkey) -> u64 {
1565 self.compressed_accounts
1566 .iter()
1567 .filter(|x| x.compressed_account.owner.to_bytes() == owner.to_bytes())
1568 .map(|x| x.compressed_account.lamports)
1569 .sum()
1570 }
1571
1572 pub fn get_compressed_token_balance(&self, owner: &Pubkey, mint: &Pubkey) -> u64 {
1574 self.token_compressed_accounts
1575 .iter()
1576 .filter(|x| {
1577 x.compressed_account.compressed_account.owner.to_bytes() == owner.to_bytes()
1578 && x.token_data.mint == *mint
1579 })
1580 .map(|x| x.token_data.amount)
1581 .sum()
1582 }
1583
1584 fn process_v1_compressed_account(
1585 &mut self,
1586 slot: u64,
1587 event: &PublicTransactionEvent,
1588 i: usize,
1589 token_compressed_accounts: &mut Vec<TokenDataWithMerkleContext>,
1590 compressed_accounts: &mut Vec<CompressedAccountWithMerkleContext>,
1591 ) {
1592 let mut input_addresses = vec![];
1593 if event.input_compressed_account_hashes.len() > i {
1594 let tx_hash: [u8; 32] = create_tx_hash(
1595 &event.input_compressed_account_hashes,
1596 &event.output_compressed_account_hashes,
1597 slot,
1598 )
1599 .unwrap();
1600 let hash = event.input_compressed_account_hashes[i];
1601 let index = self
1602 .compressed_accounts
1603 .iter()
1604 .position(|x| x.hash().unwrap() == hash);
1605 let (leaf_index, merkle_tree_pubkey) = if let Some(index) = index {
1606 self.nullified_compressed_accounts
1607 .push(self.compressed_accounts[index].clone());
1608 let leaf_index = self.compressed_accounts[index].merkle_context.leaf_index;
1609 let merkle_tree_pubkey = self.compressed_accounts[index]
1610 .merkle_context
1611 .merkle_tree_pubkey;
1612 if let Some(address) = self.compressed_accounts[index].compressed_account.address {
1613 input_addresses.push(address);
1614 }
1615 self.compressed_accounts.remove(index);
1616 (leaf_index, merkle_tree_pubkey)
1617 } else {
1618 let index = self
1619 .token_compressed_accounts
1620 .iter()
1621 .position(|x| x.compressed_account.hash().unwrap() == hash)
1622 .expect("input compressed account not found");
1623 self.token_nullified_compressed_accounts
1624 .push(self.token_compressed_accounts[index].clone());
1625 let leaf_index = self.token_compressed_accounts[index]
1626 .compressed_account
1627 .merkle_context
1628 .leaf_index;
1629 let merkle_tree_pubkey = self.token_compressed_accounts[index]
1630 .compressed_account
1631 .merkle_context
1632 .merkle_tree_pubkey;
1633 self.token_compressed_accounts.remove(index);
1634 (leaf_index, merkle_tree_pubkey)
1635 };
1636 let bundle =
1637 &mut <TestIndexer as TestIndexerExtensions>::get_state_merkle_trees_mut(self)
1638 .iter_mut()
1639 .find(|x| {
1640 x.accounts.merkle_tree
1641 == solana_pubkey::Pubkey::from(merkle_tree_pubkey.to_bytes())
1642 })
1643 .unwrap();
1644 if bundle.tree_type == TreeType::StateV2 {
1646 let leaf_hash = event.input_compressed_account_hashes[i];
1647 bundle.input_leaf_indices.push(LeafIndexInfo {
1648 leaf_index,
1649 leaf: leaf_hash,
1650 tx_hash,
1651 });
1652 }
1653 }
1654 let mut new_addresses = vec![];
1655 if event.output_compressed_accounts.len() > i {
1656 let compressed_account = &event.output_compressed_accounts[i];
1657 if let Some(address) = compressed_account.compressed_account.address {
1658 if !input_addresses.iter().any(|x| x == &address) {
1659 new_addresses.push(address);
1660 }
1661 }
1662 let merkle_tree = self.state_merkle_trees.iter().find(|x| {
1663 x.accounts.merkle_tree
1664 == solana_pubkey::Pubkey::from(
1665 event.pubkey_array
1666 [event.output_compressed_accounts[i].merkle_tree_index as usize]
1667 .to_bytes(),
1668 )
1669 });
1670 let merkle_tree = if let Some(merkle_tree) = merkle_tree {
1672 merkle_tree
1673 } else {
1674 self.state_merkle_trees
1675 .iter()
1676 .find(|x| {
1677 x.accounts.nullifier_queue
1678 == solana_pubkey::Pubkey::from(
1679 event.pubkey_array[event.output_compressed_accounts[i]
1680 .merkle_tree_index
1681 as usize]
1682 .to_bytes(),
1683 )
1684 })
1685 .unwrap()
1686 };
1687 let nullifier_queue_pubkey = merkle_tree.accounts.nullifier_queue;
1688 let merkle_tree_pubkey = merkle_tree.accounts.merkle_tree;
1689 match compressed_account.compressed_account.data.as_ref() {
1693 Some(data) => {
1694 if compressed_account.compressed_account.owner == light_compressed_token::ID.to_bytes()
1695 && data.discriminator == light_compressed_token::constants::TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR
1696 {
1697 if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) {
1698 let token_account = TokenDataWithMerkleContext {
1699 token_data,
1700 compressed_account: CompressedAccountWithMerkleContext {
1701 compressed_account: compressed_account
1702 .compressed_account
1703 .clone(),
1704 merkle_context: MerkleContext {
1705 leaf_index: event.output_leaf_indices[i],
1706 merkle_tree_pubkey: merkle_tree_pubkey.into(),
1707 queue_pubkey: nullifier_queue_pubkey.into(),
1708 prove_by_index: false,
1709 tree_type:merkle_tree.tree_type,
1710 },
1711 },
1712 };
1713 token_compressed_accounts.push(token_account.clone());
1714 self.token_compressed_accounts.insert(0, token_account);
1715 }
1716 } else {
1717 let compressed_account = CompressedAccountWithMerkleContext {
1718 compressed_account: compressed_account.compressed_account.clone(),
1719 merkle_context: MerkleContext {
1720 leaf_index: event.output_leaf_indices[i],
1721 merkle_tree_pubkey: merkle_tree_pubkey.into(),
1722 queue_pubkey: nullifier_queue_pubkey.into(),
1723 prove_by_index: false,
1724 tree_type: merkle_tree.tree_type
1725 },
1726 };
1727 compressed_accounts.push(compressed_account.clone());
1728 self.compressed_accounts.insert(0, compressed_account);
1729 }
1730 }
1731 None => {
1732 let compressed_account = CompressedAccountWithMerkleContext {
1733 compressed_account: compressed_account.compressed_account.clone(),
1734 merkle_context: MerkleContext {
1735 leaf_index: event.output_leaf_indices[i],
1736 merkle_tree_pubkey: merkle_tree_pubkey.into(),
1737 queue_pubkey: nullifier_queue_pubkey.into(),
1738 prove_by_index: false,
1739 tree_type: merkle_tree.tree_type,
1740 },
1741 };
1742 compressed_accounts.push(compressed_account.clone());
1743 self.compressed_accounts.insert(0, compressed_account);
1744 }
1745 };
1746 let merkle_tree = &mut self.state_merkle_trees.iter_mut().find(|x| {
1747 x.accounts.merkle_tree
1748 == solana_pubkey::Pubkey::from(
1749 event.pubkey_array
1750 [event.output_compressed_accounts[i].merkle_tree_index as usize]
1751 .to_bytes(),
1752 )
1753 });
1754 if merkle_tree.is_some() {
1755 let merkle_tree = merkle_tree.as_mut().unwrap();
1756 let leaf_hash = compressed_account
1757 .compressed_account
1758 .hash(
1759 &event.pubkey_array
1760 [event.output_compressed_accounts[i].merkle_tree_index as usize],
1761 &event.output_leaf_indices[i],
1762 false,
1763 )
1764 .unwrap();
1765 merkle_tree
1766 .merkle_tree
1767 .append(&leaf_hash)
1768 .expect("insert failed");
1769 } else {
1770 let merkle_tree = &mut self
1771 .state_merkle_trees
1772 .iter_mut()
1773 .find(|x| {
1774 x.accounts.nullifier_queue
1775 == solana_pubkey::Pubkey::from(
1776 event.pubkey_array[event.output_compressed_accounts[i]
1777 .merkle_tree_index
1778 as usize]
1779 .to_bytes(),
1780 )
1781 })
1782 .unwrap();
1783
1784 merkle_tree.output_queue_elements.push((
1785 event.output_compressed_account_hashes[i],
1786 event.output_leaf_indices[i].into(),
1787 ));
1788 }
1789 }
1790 if !new_addresses.is_empty() {
1797 for pubkey in event.pubkey_array.iter() {
1798 if let Some((_, address_merkle_tree)) = self
1799 .address_merkle_trees
1800 .iter_mut()
1801 .enumerate()
1802 .find(|(_, x)| {
1803 x.accounts.merkle_tree == solana_pubkey::Pubkey::from(pubkey.to_bytes())
1804 })
1805 {
1806 address_merkle_tree
1807 .queue_elements
1808 .append(&mut new_addresses);
1809 }
1810 }
1811 }
1812 }
1813
1814 async fn _get_multiple_new_address_proofs(
1815 &self,
1816 merkle_tree_pubkey: [u8; 32],
1817 addresses: Vec<[u8; 32]>,
1818 full: bool,
1819 ) -> Result<Vec<NewAddressProofWithContext>, IndexerError> {
1820 let mut proofs: Vec<NewAddressProofWithContext> = Vec::new();
1821
1822 for address in addresses.iter() {
1823 info!("Getting new address proof for {:?}", address);
1824 let pubkey = Pubkey::from(merkle_tree_pubkey);
1825 let address_tree_bundle = self
1826 .address_merkle_trees
1827 .iter()
1828 .find(|x| x.accounts.merkle_tree == pubkey)
1829 .unwrap();
1830
1831 let address_biguint = BigUint::from_bytes_be(address.as_slice());
1832 let (old_low_address, _old_low_address_next_value) =
1833 address_tree_bundle.find_low_element_for_nonexistent(&address_biguint)?;
1834 let address_bundle = address_tree_bundle
1835 .new_element_with_low_element_index(old_low_address.index, &address_biguint)?;
1836
1837 let (old_low_address, old_low_address_next_value) =
1838 address_tree_bundle.find_low_element_for_nonexistent(&address_biguint)?;
1839
1840 let low_address_proof =
1842 address_tree_bundle.get_proof_of_leaf(old_low_address.index, full)?;
1843
1844 let low_address_index: u64 = old_low_address.index as u64;
1845 let low_address_value: [u8; 32] =
1846 bigint_to_be_bytes_array(&old_low_address.value).unwrap();
1847 let low_address_next_index: u64 = old_low_address.next_index as u64;
1848 let low_address_next_value: [u8; 32] =
1849 bigint_to_be_bytes_array(&old_low_address_next_value).unwrap();
1850 let proof = NewAddressProofWithContext {
1851 merkle_tree: Pubkey::new_from_array(merkle_tree_pubkey),
1852 low_address_index,
1853 low_address_value,
1854 low_address_next_index,
1855 low_address_next_value,
1856 low_address_proof,
1857 root: address_tree_bundle.root(),
1858 root_seq: address_tree_bundle.sequence_number(),
1859 new_low_element: Some(address_bundle.new_low_element),
1860 new_element: Some(address_bundle.new_element),
1861 new_element_next_value: Some(address_bundle.new_element_next_value),
1862 };
1863 proofs.push(proof);
1864 }
1865 Ok(proofs)
1866 }
1867}
1868
1869impl TestIndexer {
1870 async fn process_inclusion_proofs(
1871 &self,
1872 merkle_tree_pubkeys: &[Pubkey],
1873 accounts: &[[u8; 32]],
1874 ) -> Result<
1875 (
1876 Option<BatchInclusionJsonStruct>,
1877 Option<BatchInclusionJsonStructLegacy>,
1878 Vec<AccountProofInputs>,
1879 ),
1880 IndexerError,
1881 > {
1882 let mut inclusion_proofs = Vec::new();
1883 let mut account_proof_inputs = Vec::new();
1884 let mut height = 0;
1885 let mut queues = vec![];
1886 let mut cpi_contextes = vec![];
1887 let mut tree_types = vec![];
1888 let proof_data: Vec<_> = accounts
1890 .iter()
1891 .zip(merkle_tree_pubkeys.iter())
1892 .map(|(account, &pubkey)| {
1893 let bundle = self
1894 .state_merkle_trees
1895 .iter()
1896 .find(|x| {
1897 x.accounts.merkle_tree == pubkey || x.accounts.nullifier_queue == pubkey
1898 })
1899 .unwrap();
1900 let merkle_tree = &bundle.merkle_tree;
1901 queues.push(bundle.accounts.nullifier_queue);
1902 cpi_contextes.push(bundle.accounts.cpi_context);
1903 tree_types.push(bundle.tree_type);
1904 let leaf_index = merkle_tree.get_leaf_index(account).unwrap();
1905 let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap();
1906
1907 let proof: Vec<BigInt> = proof.iter().map(|x| BigInt::from_be_bytes(x)).collect();
1909
1910 if height == 0 {
1911 height = merkle_tree.height;
1912 } else {
1913 assert_eq!(height, merkle_tree.height);
1914 }
1915 let root_index = if bundle.tree_type == TreeType::StateV1 {
1916 merkle_tree.get_history_root_index().unwrap()
1917 } else {
1918 merkle_tree.get_history_root_index_v2().unwrap()
1919 };
1920
1921 Ok((leaf_index, proof, merkle_tree.root(), root_index))
1922 })
1923 .collect::<Result<_, IndexerError>>()?;
1924
1925 for (i, (leaf_index, proof, merkle_root, root_index)) in proof_data.into_iter().enumerate()
1927 {
1928 inclusion_proofs.push(InclusionMerkleProofInputs {
1929 root: BigInt::from_be_bytes(merkle_root.as_slice()),
1930 leaf: BigInt::from_be_bytes(&accounts[i]),
1931 path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()),
1932 path_elements: proof,
1933 });
1934
1935 account_proof_inputs.push(AccountProofInputs {
1936 root_index: RootIndex::new_some(root_index),
1937 root: merkle_root,
1938 leaf_index: leaf_index as u64,
1939 hash: accounts[i],
1940 tree_info: light_client::indexer::TreeInfo {
1941 cpi_context: Some(cpi_contextes[i]),
1942 next_tree_info: None,
1943 queue: queues[i],
1944 tree: merkle_tree_pubkeys[i],
1945 tree_type: tree_types[i],
1946 },
1947 });
1948 }
1949
1950 let (batch_inclusion_proof_inputs, legacy) = if height
1951 == DEFAULT_BATCH_STATE_TREE_HEIGHT as usize
1952 {
1953 let inclusion_proof_inputs =
1954 InclusionProofInputs::new(inclusion_proofs.as_slice()).unwrap();
1955 (
1956 Some(BatchInclusionJsonStruct::from_inclusion_proof_inputs(
1957 &inclusion_proof_inputs,
1958 )),
1959 None,
1960 )
1961 } else if height == account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize
1962 {
1963 let inclusion_proof_inputs = InclusionProofInputsLegacy(inclusion_proofs.as_slice());
1964 (
1965 None,
1966 Some(BatchInclusionJsonStructLegacy::from_inclusion_proof_inputs(
1967 &inclusion_proof_inputs,
1968 )),
1969 )
1970 } else {
1971 return Err(IndexerError::CustomError(
1972 "Unsupported tree height".to_string(),
1973 ));
1974 };
1975
1976 Ok((batch_inclusion_proof_inputs, legacy, account_proof_inputs))
1977 }
1978
1979 async fn process_non_inclusion_proofs(
1980 &self,
1981 address_merkle_tree_pubkeys: &[Pubkey],
1982 addresses: Vec<[u8; 32]>,
1983 ) -> Result<
1984 (
1985 Option<BatchNonInclusionJsonStruct>,
1986 Option<BatchNonInclusionJsonStructLegacy>,
1987 Vec<AddressProofInputs>,
1988 ),
1989 IndexerError,
1990 > {
1991 let mut non_inclusion_proofs = Vec::new();
1992 let mut address_root_indices = Vec::new();
1993 let mut tree_heights = Vec::new();
1994 for (i, address) in addresses.iter().enumerate() {
1995 let address_tree = self
1996 .address_merkle_trees
1997 .iter()
1998 .find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i])
1999 .unwrap();
2000 tree_heights.push(address_tree.height());
2001
2002 let proof_inputs = address_tree.get_non_inclusion_proof_inputs(address)?;
2003 non_inclusion_proofs.push(proof_inputs);
2004
2005 let (root_index, root, tree_type) = match &address_tree.merkle_tree {
2006 super::address_tree::IndexedMerkleTreeVersion::V1(tree) => (
2007 tree.merkle_tree.get_history_root_index().unwrap() + 1,
2008 tree.merkle_tree.root(),
2009 TreeType::AddressV1,
2010 ),
2011 super::address_tree::IndexedMerkleTreeVersion::V2(tree) => (
2012 tree.merkle_tree.get_history_root_index_v2().unwrap(),
2013 tree.merkle_tree.root(),
2014 TreeType::AddressV2,
2015 ),
2016 };
2017 address_root_indices.push(AddressProofInputs {
2018 root_index,
2019 root,
2020 address: *address,
2021 tree_info: light_client::indexer::TreeInfo {
2022 cpi_context: None,
2023 next_tree_info: None,
2024 queue: address_tree.accounts.queue,
2025 tree: address_tree.accounts.merkle_tree,
2026 tree_type,
2027 },
2028 });
2029 }
2030 if tree_heights.iter().any(|&x| x != tree_heights[0]) {
2032 return Err(IndexerError::CustomError(format!(
2033 "All address merkle trees must have the same height {:?}",
2034 tree_heights
2035 )));
2036 }
2037 let (batch_non_inclusion_proof_inputs, batch_non_inclusion_proof_inputs_legacy) =
2038 if tree_heights[0] == 26 {
2039 let non_inclusion_proof_inputs =
2040 NonInclusionProofInputsLegacy::new(non_inclusion_proofs.as_slice());
2041 (
2042 None,
2043 Some(
2044 BatchNonInclusionJsonStructLegacy::from_non_inclusion_proof_inputs(
2045 &non_inclusion_proof_inputs,
2046 ),
2047 ),
2048 )
2049 } else if tree_heights[0] == 40 {
2050 let non_inclusion_proof_inputs =
2051 NonInclusionProofInputs::new(non_inclusion_proofs.as_slice()).unwrap();
2052 (
2053 Some(
2054 BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs(
2055 &non_inclusion_proof_inputs,
2056 ),
2057 ),
2058 None,
2059 )
2060 } else {
2061 return Err(IndexerError::CustomError(
2062 "Unsupported tree height".to_string(),
2063 ));
2064 };
2065 Ok((
2066 batch_non_inclusion_proof_inputs,
2067 batch_non_inclusion_proof_inputs_legacy,
2068 address_root_indices,
2069 ))
2070 }
2071}
2072
2073impl TestIndexer {
2074 async fn _get_validity_proof_v1_implementation(
2075 &self,
2076 hashes: Vec<[u8; 32]>,
2077 new_addresses_with_trees: Vec<AddressWithTree>,
2078 ) -> Result<ValidityProofWithContext, IndexerError> {
2079 let mut state_merkle_tree_pubkeys = Vec::new();
2080
2081 for hash in hashes.iter() {
2082 state_merkle_tree_pubkeys.push(
2083 self.get_compressed_account_by_hash(*hash, None)
2084 .await?
2085 .value
2086 .tree_info
2087 .tree,
2088 );
2089 }
2090
2091 let state_merkle_tree_pubkeys = if state_merkle_tree_pubkeys.is_empty() {
2092 None
2093 } else {
2094 Some(state_merkle_tree_pubkeys)
2095 };
2096 let hashes = if hashes.is_empty() {
2097 None
2098 } else {
2099 Some(hashes)
2100 };
2101 let new_addresses = if new_addresses_with_trees.is_empty() {
2102 None
2103 } else {
2104 Some(
2105 new_addresses_with_trees
2106 .iter()
2107 .map(|x| x.address)
2108 .collect::<Vec<[u8; 32]>>(),
2109 )
2110 };
2111 let address_merkle_tree_pubkeys = if new_addresses_with_trees.is_empty() {
2112 None
2113 } else {
2114 Some(
2115 new_addresses_with_trees
2116 .iter()
2117 .map(|x| x.tree)
2118 .collect::<Vec<Pubkey>>(),
2119 )
2120 };
2121
2122 {
2123 let compressed_accounts = hashes;
2124 if compressed_accounts.is_some()
2125 && ![1usize, 2usize, 3usize, 4usize, 8usize]
2126 .contains(&compressed_accounts.as_ref().unwrap().len())
2127 {
2128 return Err(IndexerError::CustomError(format!(
2129 "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}",
2130 compressed_accounts.unwrap().len()
2131 )));
2132 }
2133 if new_addresses.is_some()
2134 && ![1usize, 2usize, 3usize, 4usize, 8usize]
2135 .contains(&new_addresses.as_ref().unwrap().len())
2136 {
2137 return Err(IndexerError::CustomError(format!(
2138 "new_addresses must be of length 1, 2, 3, 4 or 8 != {}",
2139 new_addresses.unwrap().len()
2140 )));
2141 }
2142 let client = Client::new();
2143 let (account_proof_inputs, address_proof_inputs, json_payload) =
2144 match (compressed_accounts, new_addresses) {
2145 (Some(accounts), None) => {
2146 let (payload, payload_legacy, indices) = self
2147 .process_inclusion_proofs(
2148 &state_merkle_tree_pubkeys.unwrap(),
2149 &accounts,
2150 )
2151 .await?;
2152 if let Some(payload) = payload {
2153 (indices, Vec::new(), payload.to_string())
2154 } else {
2155 (indices, Vec::new(), payload_legacy.unwrap().to_string())
2156 }
2157 }
2158 (None, Some(addresses)) => {
2159 let (payload, payload_legacy, indices) = self
2160 .process_non_inclusion_proofs(
2161 address_merkle_tree_pubkeys.unwrap().as_slice(),
2162 addresses,
2163 )
2164 .await?;
2165 let payload_string = if let Some(payload) = payload {
2166 payload.to_string()
2167 } else {
2168 payload_legacy.unwrap().to_string()
2169 };
2170 (Vec::new(), indices, payload_string)
2171 }
2172 (Some(accounts), Some(addresses)) => {
2173 let (inclusion_payload, inclusion_payload_legacy, inclusion_indices) = self
2174 .process_inclusion_proofs(
2175 &state_merkle_tree_pubkeys.unwrap(),
2176 &accounts,
2177 )
2178 .await?;
2179
2180 let (
2181 non_inclusion_payload,
2182 non_inclusion_payload_legacy,
2183 non_inclusion_indices,
2184 ) = self
2185 .process_non_inclusion_proofs(
2186 address_merkle_tree_pubkeys.unwrap().as_slice(),
2187 addresses,
2188 )
2189 .await?;
2190 let json_payload = if let Some(non_inclusion_payload) =
2191 non_inclusion_payload
2192 {
2193 let public_input_hash = BigInt::from_bytes_be(
2194 num_bigint::Sign::Plus,
2195 &create_hash_chain_from_slice(&[
2196 bigint_to_u8_32(
2197 &string_to_big_int(
2198 &inclusion_payload.as_ref().unwrap().public_input_hash,
2199 )
2200 .unwrap(),
2201 )
2202 .unwrap(),
2203 bigint_to_u8_32(
2204 &string_to_big_int(
2205 &non_inclusion_payload.public_input_hash,
2206 )
2207 .unwrap(),
2208 )
2209 .unwrap(),
2210 ])
2211 .unwrap(),
2212 );
2213
2214 CombinedJsonStruct {
2215 circuit_type: ProofType::Combined.to_string(),
2216 state_tree_height: DEFAULT_BATCH_STATE_TREE_HEIGHT,
2217 address_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT,
2218 public_input_hash: big_int_to_string(&public_input_hash),
2219 inclusion: inclusion_payload.unwrap().inputs,
2220 non_inclusion: non_inclusion_payload.inputs,
2221 }
2222 .to_string()
2223 } else if let Some(non_inclusion_payload) = non_inclusion_payload_legacy {
2224 CombinedJsonStructLegacy {
2225 circuit_type: ProofType::Combined.to_string(),
2226 state_tree_height: 26,
2227 address_tree_height: 26,
2228 inclusion: inclusion_payload_legacy.unwrap().inputs,
2229 non_inclusion: non_inclusion_payload.inputs,
2230 }
2231 .to_string()
2232 } else {
2233 panic!("Unsupported tree height")
2234 };
2235 (inclusion_indices, non_inclusion_indices, json_payload)
2236 }
2237 _ => {
2238 panic!(
2239 "At least one of compressed_accounts or new_addresses must be provided"
2240 )
2241 }
2242 };
2243
2244 let mut retries = 3;
2245 while retries > 0 {
2246 let response_result = client
2247 .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH))
2248 .header("Content-Type", "text/plain; charset=utf-8")
2249 .body(json_payload.clone())
2250 .send()
2251 .await;
2252 if let Ok(response_result) = response_result {
2253 if response_result.status().is_success() {
2254 let body = response_result.text().await.unwrap();
2255 let proof_json = deserialize_gnark_proof_json(&body).unwrap();
2256 let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json);
2257 let (proof_a, proof_b, proof_c) =
2258 compress_proof(&proof_a, &proof_b, &proof_c);
2259 return Ok(ValidityProofWithContext {
2260 accounts: account_proof_inputs,
2261 addresses: address_proof_inputs,
2262 proof: CompressedProof {
2263 a: proof_a,
2264 b: proof_b,
2265 c: proof_c,
2266 }
2267 .into(),
2268 });
2269 }
2270 } else {
2271 println!("Error: {:#?}", response_result);
2272 tokio::time::sleep(Duration::from_secs(5)).await;
2273 retries -= 1;
2274 }
2275 }
2276 Err(IndexerError::CustomError(
2277 "Failed to get proof from server".to_string(),
2278 ))
2279 }
2280 }
2281}