1use std::{fmt::Debug, time::Duration};
2
3use account_compression::{
4 AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig,
5};
6use async_trait::async_trait;
7use borsh::BorshDeserialize;
8use light_batched_merkle_tree::{
9 constants::{
10 DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_ROOT_HISTORY_LEN,
11 DEFAULT_BATCH_STATE_TREE_HEIGHT,
12 },
13 merkle_tree::BatchedMerkleTreeAccount,
14};
15use light_client::{
16 fee::FeeConfig,
17 indexer::{
18 AccountProofInputs, Address, AddressMerkleTreeAccounts, AddressProofInputs,
19 AddressWithTree, BatchAddressUpdateIndexerResponse, CompressedAccount, Context,
20 GetCompressedAccountsByOwnerConfig, GetCompressedTokenAccountsByOwnerOrDelegateOptions,
21 Indexer, IndexerError, IndexerRpcConfig, Items, ItemsWithCursor, MerkleProof,
22 MerkleProofWithContext, NewAddressProofWithContext, OwnerBalance, PaginatedOptions,
23 Response, RetryConfig, RootIndex, SignatureWithMetadata, StateMerkleTreeAccounts,
24 TokenAccount, TokenBalance, ValidityProofWithContext,
25 },
26 rpc::{Rpc, RpcError},
27};
28use light_compressed_account::{
29 compressed_account::{CompressedAccountWithMerkleContext, MerkleContext},
30 hash_chain::create_hash_chain_from_slice,
31 indexer_event::event::PublicTransactionEvent,
32 instruction_data::compressed_proof::CompressedProof,
33 tx_hash::create_tx_hash,
34 TreeType,
35};
36use light_hasher::{bigint::bigint_to_be_bytes_array, Poseidon};
37use light_merkle_tree_metadata::QueueType;
38use light_merkle_tree_reference::MerkleTree;
39use light_prover_client::{
40 constants::{PROVE_PATH, SERVER_ADDRESS},
41 helpers::{big_int_to_string, bigint_to_u8_32, string_to_big_int},
42 proof::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct},
43 proof_type::ProofType,
44 proof_types::{
45 combined::{v1::CombinedJsonStruct as CombinedJsonStructLegacy, v2::CombinedJsonStruct},
46 inclusion::{
47 v1::{
48 BatchInclusionJsonStruct as BatchInclusionJsonStructLegacy,
49 InclusionProofInputs as InclusionProofInputsLegacy,
50 },
51 v2::{BatchInclusionJsonStruct, InclusionMerkleProofInputs, InclusionProofInputs},
52 },
53 non_inclusion::{
54 v1::{
55 BatchNonInclusionJsonStruct as BatchNonInclusionJsonStructLegacy,
56 NonInclusionProofInputs as NonInclusionProofInputsLegacy,
57 },
58 v2::{BatchNonInclusionJsonStruct, NonInclusionProofInputs},
59 },
60 },
61};
62use light_sdk::{
63 light_hasher::Hash,
64 token::{TokenData, TokenDataWithMerkleContext},
65};
66use log::info;
67use num_bigint::{BigInt, BigUint};
68use num_traits::FromBytes;
69use reqwest::Client;
70use solana_sdk::{
71 bs58,
72 pubkey::Pubkey,
73 signature::{Keypair, Signer},
74};
75
76use super::{
77 address_tree::{AddressMerkleTreeBundle, IndexedMerkleTreeVersion},
78 state_tree::{LeafIndexInfo, StateMerkleTreeBundle},
79};
80#[cfg(feature = "devenv")]
81use crate::accounts::{
82 address_tree_v2::create_batch_address_merkle_tree,
83 state_tree_v2::create_batched_state_merkle_tree,
84};
85use crate::{
86 accounts::{
87 address_tree::create_address_merkle_tree_and_queue_account,
88 state_tree::create_state_merkle_tree_and_queue_account, test_accounts::TestAccounts,
89 test_keypairs::BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR,
90 },
91 indexer::TestIndexerExtensions,
92};
93
94#[derive(Debug)]
95pub struct TestIndexer {
96 pub state_merkle_trees: Vec<StateMerkleTreeBundle>,
97 pub address_merkle_trees: Vec<AddressMerkleTreeBundle>,
98 pub payer: Keypair,
99 pub group_pda: Pubkey,
100 pub compressed_accounts: Vec<CompressedAccountWithMerkleContext>,
101 pub nullified_compressed_accounts: Vec<CompressedAccountWithMerkleContext>,
102 pub token_compressed_accounts: Vec<TokenDataWithMerkleContext>,
103 pub token_nullified_compressed_accounts: Vec<TokenDataWithMerkleContext>,
104 pub events: Vec<PublicTransactionEvent>,
105}
106
107impl Clone for TestIndexer {
108 fn clone(&self) -> Self {
109 Self {
110 state_merkle_trees: self.state_merkle_trees.clone(),
111 address_merkle_trees: self.address_merkle_trees.clone(),
112 payer: self.payer.insecure_clone(),
113 group_pda: self.group_pda,
114 compressed_accounts: self.compressed_accounts.clone(),
115 nullified_compressed_accounts: self.nullified_compressed_accounts.clone(),
116 token_compressed_accounts: self.token_compressed_accounts.clone(),
117 token_nullified_compressed_accounts: self.token_nullified_compressed_accounts.clone(),
118 events: self.events.clone(),
119 }
120 }
121}
122
123#[async_trait]
124impl Indexer for TestIndexer {
125 async fn get_indexer_slot(&self, _config: Option<RetryConfig>) -> Result<u64, IndexerError> {
127 Ok(u64::MAX)
129 }
130
131 async fn get_multiple_compressed_account_proofs(
132 &self,
133 hashes: Vec<[u8; 32]>,
134 _config: Option<IndexerRpcConfig>,
135 ) -> Result<Response<Items<MerkleProof>>, IndexerError> {
136 info!("Getting proofs for {:?}", hashes);
137 let mut proofs: Vec<MerkleProof> = Vec::new();
138 hashes.iter().for_each(|hash| {
139 self.state_merkle_trees.iter().for_each(|tree| {
140 if let Some(leaf_index) = tree.merkle_tree.get_leaf_index(hash) {
141 let proof = tree
142 .merkle_tree
143 .get_proof_of_leaf(leaf_index, false)
144 .unwrap();
145 proofs.push(MerkleProof {
146 hash: *hash,
147 leaf_index: leaf_index as u64,
148 merkle_tree: tree.accounts.merkle_tree,
149 proof: proof.to_vec(),
150 root_seq: tree.merkle_tree.sequence_number as u64,
151 root: *tree.merkle_tree.roots.last().unwrap(),
152 });
153 }
154 })
155 });
156 Ok(Response {
157 context: Context {
158 slot: self.get_current_slot(),
159 },
160 value: Items { items: proofs },
161 })
162 }
163
164 async fn get_compressed_accounts_by_owner(
165 &self,
166 owner: &Pubkey,
167 _options: Option<GetCompressedAccountsByOwnerConfig>,
168 _config: Option<IndexerRpcConfig>,
169 ) -> Result<Response<ItemsWithCursor<CompressedAccount>>, IndexerError> {
170 let accounts_with_context = <TestIndexer as TestIndexerExtensions>::get_compressed_accounts_with_merkle_context_by_owner(self, owner);
171 let accounts: Result<Vec<CompressedAccount>, IndexerError> = accounts_with_context
172 .into_iter()
173 .map(|acc| acc.try_into())
174 .collect();
175
176 Ok(Response {
177 context: Context {
178 slot: self.get_current_slot(),
179 },
180 value: ItemsWithCursor {
181 items: accounts?,
182 cursor: None,
183 },
184 })
185 }
186
187 async fn get_compressed_account(
188 &self,
189 address: Address,
190 _config: Option<IndexerRpcConfig>,
191 ) -> Result<Response<CompressedAccount>, IndexerError> {
192 let account = self
193 .compressed_accounts
194 .iter()
195 .find(|acc| acc.compressed_account.address == Some(address));
196
197 let account_data = account
198 .ok_or(IndexerError::AccountNotFound)?
199 .clone()
200 .try_into()?;
201
202 Ok(Response {
203 context: Context {
204 slot: self.get_current_slot(),
205 },
206 value: account_data,
207 })
208 }
209
210 async fn get_compressed_account_by_hash(
211 &self,
212 hash: Hash,
213 _config: Option<IndexerRpcConfig>,
214 ) -> Result<Response<CompressedAccount>, IndexerError> {
215 let res = self
216 .compressed_accounts
217 .iter()
218 .find(|acc| acc.hash() == Ok(hash));
219
220 let account = if res.is_none() {
222 let res = self
223 .token_compressed_accounts
224 .iter()
225 .find(|acc| acc.compressed_account.hash() == Ok(hash));
226 res.map(|x| &x.compressed_account)
227 } else {
228 res
229 };
230
231 let account_data = account
232 .ok_or(IndexerError::AccountNotFound)?
233 .clone()
234 .try_into()?;
235
236 Ok(Response {
237 context: Context {
238 slot: self.get_current_slot(),
239 },
240 value: account_data,
241 })
242 }
243
244 async fn get_compressed_token_accounts_by_owner(
245 &self,
246 owner: &Pubkey,
247 options: Option<GetCompressedTokenAccountsByOwnerOrDelegateOptions>,
248 _config: Option<IndexerRpcConfig>,
249 ) -> Result<Response<ItemsWithCursor<TokenAccount>>, IndexerError> {
250 let mint = options.as_ref().and_then(|opts| opts.mint);
251 let token_accounts: Result<Vec<TokenAccount>, IndexerError> = self
252 .token_compressed_accounts
253 .iter()
254 .filter(|acc| {
255 acc.token_data.owner == *owner && mint.is_none_or(|m| acc.token_data.mint == m)
256 })
257 .map(|acc| TokenAccount::try_from(acc.clone()))
258 .collect();
259 let token_accounts = token_accounts?;
260 let token_accounts = if let Some(options) = options {
261 if let Some(limit) = options.limit {
262 token_accounts.into_iter().take(limit as usize).collect()
263 } else {
264 token_accounts
265 }
266 } else {
267 token_accounts
268 };
269
270 Ok(Response {
271 context: Context {
272 slot: self.get_current_slot(),
273 },
274 value: ItemsWithCursor {
275 items: token_accounts,
276 cursor: None,
277 },
278 })
279 }
280
281 async fn get_compressed_balance(
282 &self,
283 address: Option<Address>,
284 hash: Option<Hash>,
285 _config: Option<IndexerRpcConfig>,
286 ) -> Result<Response<u64>, IndexerError> {
287 let account_response = match (address, hash) {
288 (Some(addr), _) => self.get_compressed_account(addr, None).await?,
289 (_, Some(h)) => self.get_compressed_account_by_hash(h, None).await?,
290 _ => {
291 return Err(IndexerError::InvalidParameters(
292 "Either address or hash must be provided".to_string(),
293 ))
294 }
295 };
296 Ok(Response {
297 context: Context {
298 slot: self.get_current_slot(),
299 },
300 value: account_response.value.lamports,
301 })
302 }
303
304 async fn get_compressed_token_account_balance(
305 &self,
306 address: Option<Address>,
307 hash: Option<Hash>,
308 _config: Option<IndexerRpcConfig>,
309 ) -> Result<Response<u64>, IndexerError> {
310 let account = match (address, hash) {
311 (Some(address), _) => self
312 .token_compressed_accounts
313 .iter()
314 .find(|acc| acc.compressed_account.compressed_account.address == Some(address)),
315 (_, Some(hash)) => self
316 .token_compressed_accounts
317 .iter()
318 .find(|acc| acc.compressed_account.hash() == Ok(hash)),
319 (None, None) => {
320 return Err(IndexerError::InvalidParameters(
321 "Either address or hash must be provided".to_string(),
322 ))
323 }
324 };
325
326 let amount = account
327 .map(|acc| acc.token_data.amount)
328 .ok_or(IndexerError::AccountNotFound)?;
329
330 Ok(Response {
331 context: Context {
332 slot: self.get_current_slot(),
333 },
334 value: amount,
335 })
336 }
337
338 async fn get_multiple_compressed_accounts(
339 &self,
340 addresses: Option<Vec<Address>>,
341 hashes: Option<Vec<Hash>>,
342 _config: Option<IndexerRpcConfig>,
343 ) -> Result<Response<Items<CompressedAccount>>, IndexerError> {
344 match (addresses, hashes) {
345 (Some(addresses), _) => {
346 let accounts = self
347 .compressed_accounts
348 .iter()
349 .filter(|acc| {
350 acc.compressed_account
351 .address
352 .is_some_and(|addr| addresses.contains(&addr))
353 })
354 .map(|acc| acc.clone().try_into())
355 .collect::<Result<Vec<CompressedAccount>, IndexerError>>()?;
356 Ok(Response {
357 context: Context {
358 slot: self.get_current_slot(),
359 },
360 value: Items { items: accounts },
361 })
362 }
363 (_, Some(hashes)) => {
364 let accounts = self
365 .compressed_accounts
366 .iter()
367 .filter(|acc| acc.hash().is_ok_and(|hash| hashes.contains(&hash)))
368 .map(|acc| acc.clone().try_into())
369 .collect::<Result<Vec<CompressedAccount>, IndexerError>>()?;
370 Ok(Response {
371 context: Context {
372 slot: self.get_current_slot(),
373 },
374 value: Items { items: accounts },
375 })
376 }
377 (None, None) => Err(IndexerError::InvalidParameters(
378 "Either addresses or hashes must be provided".to_string(),
379 )),
380 }
381 }
382
383 async fn get_compressed_token_balances_by_owner_v2(
384 &self,
385 owner: &Pubkey,
386 _options: Option<GetCompressedTokenAccountsByOwnerOrDelegateOptions>,
387 _config: Option<IndexerRpcConfig>,
388 ) -> Result<Response<ItemsWithCursor<TokenBalance>>, IndexerError> {
389 let mint = _options.as_ref().and_then(|opts| opts.mint);
390 let balances: Vec<TokenBalance> = self
391 .token_compressed_accounts
392 .iter()
393 .filter(|acc| {
394 acc.token_data.owner == *owner && mint.is_none_or(|m| acc.token_data.mint == m)
395 })
396 .fold(std::collections::HashMap::new(), |mut map, acc| {
397 *map.entry(acc.token_data.mint).or_insert(0) += acc.token_data.amount;
398 map
399 })
400 .into_iter()
401 .map(|(mint, balance)| TokenBalance { balance, mint })
402 .collect();
403
404 Ok(Response {
405 context: Context {
406 slot: self.get_current_slot(),
407 },
408 value: ItemsWithCursor {
409 items: balances,
410 cursor: None,
411 },
412 })
413 }
414
415 async fn get_compression_signatures_for_account(
416 &self,
417 _hash: Hash,
418 _config: Option<IndexerRpcConfig>,
419 ) -> Result<Response<Items<SignatureWithMetadata>>, IndexerError> {
420 todo!()
421 }
422
423 async fn get_multiple_new_address_proofs(
424 &self,
425 merkle_tree_pubkey: [u8; 32],
426 addresses: Vec<[u8; 32]>,
427 _config: Option<IndexerRpcConfig>,
428 ) -> Result<Response<Items<NewAddressProofWithContext>>, IndexerError> {
429 let proofs = self
430 ._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, false)
431 .await?;
432 Ok(Response {
433 context: Context {
434 slot: self.get_current_slot(),
435 },
436 value: Items { items: proofs },
437 })
438 }
439
440 async fn get_validity_proof(
441 &self,
442 hashes: Vec<[u8; 32]>,
443 new_addresses_with_trees: Vec<AddressWithTree>,
444 _config: Option<IndexerRpcConfig>,
445 ) -> Result<Response<ValidityProofWithContext>, IndexerError> {
446 #[cfg(feature = "v2")]
447 {
448 let mut state_merkle_tree_pubkeys = Vec::new();
450
451 for hash in hashes.iter() {
452 let account = self.get_compressed_account_by_hash(*hash, None).await?;
453 state_merkle_tree_pubkeys.push(account.value.tree_info.tree);
454 }
455 let mut proof_inputs = vec![];
456
457 let mut indices_to_remove = Vec::new();
458 let compressed_accounts = if !hashes.is_empty() && !state_merkle_tree_pubkeys.is_empty()
460 {
461 let zipped_accounts = hashes.iter().zip(state_merkle_tree_pubkeys.iter());
462
463 for (i, (compressed_account, state_merkle_tree_pubkey)) in
464 zipped_accounts.enumerate()
465 {
466 let accounts = self.state_merkle_trees.iter().find(|x| {
467 x.accounts.merkle_tree == *state_merkle_tree_pubkey
468 && x.tree_type == TreeType::StateV2
469 });
470
471 if let Some(accounts) = accounts {
472 let queue_element = accounts
473 .output_queue_elements
474 .iter()
475 .find(|(hash, _)| hash == compressed_account);
476 if let Some((_, index)) = queue_element {
477 if accounts.output_queue_batch_size.is_some()
478 && accounts.leaf_index_in_queue_range(*index as usize)?
479 {
480 use light_client::indexer::RootIndex;
481
482 indices_to_remove.push(i);
483 proof_inputs.push(AccountProofInputs {
484 hash: *compressed_account,
485 root: [0u8; 32],
486 root_index: RootIndex::new_none(),
487 leaf_index: accounts
488 .output_queue_elements
489 .iter()
490 .position(|(x, _)| x == compressed_account)
491 .unwrap()
492 as u64,
493 tree_info: light_client::indexer::TreeInfo {
494 cpi_context: Some(accounts.accounts.cpi_context),
495 tree: accounts.accounts.merkle_tree,
496 queue: accounts.accounts.nullifier_queue,
497 next_tree_info: None,
498 tree_type: accounts.tree_type,
499 },
500 })
501 }
502 }
503 }
504 }
505
506 let compress_accounts = hashes
507 .iter()
508 .enumerate()
509 .filter(|(i, _)| !indices_to_remove.contains(i))
510 .map(|(_, x)| *x)
511 .collect::<Vec<[u8; 32]>>();
512
513 if compress_accounts.is_empty() {
514 None
515 } else {
516 Some(compress_accounts)
517 }
518 } else {
519 None
520 };
521
522 let rpc_result: Option<ValidityProofWithContext> = if (compressed_accounts.is_some()
524 && !compressed_accounts.as_ref().unwrap().is_empty())
525 || !new_addresses_with_trees.is_empty()
526 {
527 Some(
528 self._get_validity_proof_v1_implementation(
529 compressed_accounts.unwrap_or_default(),
530 new_addresses_with_trees,
531 )
532 .await?,
533 )
534 } else {
535 None
536 };
537
538 let addresses = if let Some(rpc_result) = rpc_result.as_ref() {
540 rpc_result.addresses.to_vec()
541 } else {
542 Vec::new()
543 };
544 let accounts = {
545 let mut root_indices = if let Some(rpc_result) = rpc_result.as_ref() {
546 rpc_result.accounts.to_vec()
547 } else {
548 Vec::new()
549 };
550 #[cfg(debug_assertions)]
551 {
552 if std::env::var("RUST_BACKTRACE").is_ok() {
553 println!("get_validit_proof: rpc_result {:?}", rpc_result);
554 }
555 }
556
557 for (proof_input, &index) in proof_inputs.iter().zip(indices_to_remove.iter()) {
559 if root_indices.len() <= index {
560 root_indices.push(proof_input.clone());
561 } else {
562 root_indices.insert(index, proof_input.clone());
563 }
564 }
565 root_indices
566 };
567
568 Ok(Response {
569 context: Context {
570 slot: self.get_current_slot(),
571 },
572 value: ValidityProofWithContext {
573 accounts,
574 addresses,
575 proof: rpc_result
576 .map(|rpc_result| rpc_result.proof.0.unwrap())
577 .into(),
578 },
579 })
580 }
581
582 #[cfg(not(feature = "v2"))]
583 {
584 let result = self
586 ._get_validity_proof_v1_implementation(hashes, new_addresses_with_trees)
587 .await?;
588 Ok(Response {
589 context: Context {
590 slot: self.get_current_slot(),
591 },
592 value: result,
593 })
594 }
595 }
596
597 async fn get_queue_elements(
598 &mut self,
599 _merkle_tree_pubkey: [u8; 32],
600 _queue_type: QueueType,
601 _num_elements: u16,
602 _start_offset: Option<u64>,
603 _config: Option<IndexerRpcConfig>,
604 ) -> Result<Response<Items<MerkleProofWithContext>>, IndexerError> {
605 #[cfg(not(feature = "v2"))]
606 unimplemented!("get_queue_elements");
607 #[cfg(feature = "v2")]
608 {
609 let merkle_tree_pubkey = _merkle_tree_pubkey;
610 let queue_type = _queue_type;
611 let num_elements = _num_elements;
612 let pubkey = Pubkey::new_from_array(merkle_tree_pubkey);
613 let address_tree_bundle = self
614 .address_merkle_trees
615 .iter()
616 .find(|x| x.accounts.merkle_tree == pubkey);
617 if let Some(address_tree_bundle) = address_tree_bundle {
618 let end_offset = std::cmp::min(
619 num_elements as usize,
620 address_tree_bundle.queue_elements.len(),
621 );
622 let queue_elements = address_tree_bundle.queue_elements[0..end_offset].to_vec();
623
624 let merkle_proofs_with_context = queue_elements
625 .iter()
626 .map(|element| MerkleProofWithContext {
627 proof: Vec::new(),
628 leaf: [0u8; 32],
629 leaf_index: 0,
630 merkle_tree: address_tree_bundle.accounts.merkle_tree.to_bytes(),
631 root: address_tree_bundle.root(),
632 tx_hash: None,
633 root_seq: 0,
634 account_hash: *element,
635 })
636 .collect();
637 return Ok(Response {
638 context: Context {
639 slot: self.get_current_slot(),
640 },
641 value: Items {
642 items: merkle_proofs_with_context,
643 },
644 });
645 }
646
647 let state_tree_bundle = self
648 .state_merkle_trees
649 .iter_mut()
650 .find(|x| x.accounts.merkle_tree == pubkey);
651 if queue_type == QueueType::InputStateV2 {
652 if let Some(state_tree_bundle) = state_tree_bundle {
653 let end_offset = std::cmp::min(
654 num_elements as usize,
655 state_tree_bundle.input_leaf_indices.len(),
656 );
657 let queue_elements =
658 state_tree_bundle.input_leaf_indices[0..end_offset].to_vec();
659 let merkle_proofs = queue_elements
660 .iter()
661 .map(|leaf_info| {
662 match state_tree_bundle
663 .merkle_tree
664 .get_proof_of_leaf(leaf_info.leaf_index as usize, true)
665 {
666 Ok(proof) => proof.to_vec(),
667 Err(_) => {
668 let mut next_index =
669 state_tree_bundle.merkle_tree.get_next_index() as u64;
670 while next_index < leaf_info.leaf_index as u64 {
671 state_tree_bundle.merkle_tree.append(&[0u8; 32]).unwrap();
672 next_index =
673 state_tree_bundle.merkle_tree.get_next_index() as u64;
674 }
675 state_tree_bundle
676 .merkle_tree
677 .get_proof_of_leaf(leaf_info.leaf_index as usize, true)
678 .unwrap()
679 .to_vec();
680 Vec::new()
681 }
682 }
683 })
684 .collect::<Vec<_>>();
685 let leaves = queue_elements
686 .iter()
687 .map(|leaf_info| {
688 state_tree_bundle
689 .merkle_tree
690 .get_leaf(leaf_info.leaf_index as usize)
691 .unwrap_or_default()
692 })
693 .collect::<Vec<_>>();
694 let merkle_proofs_with_context = merkle_proofs
695 .iter()
696 .zip(queue_elements.iter())
697 .zip(leaves.iter())
698 .map(|((proof, element), leaf)| MerkleProofWithContext {
699 proof: proof.clone(),
700 leaf: *leaf,
701 leaf_index: element.leaf_index as u64,
702 merkle_tree: state_tree_bundle.accounts.merkle_tree.to_bytes(),
703 root: state_tree_bundle.merkle_tree.root(),
704 tx_hash: Some(element.tx_hash),
705 root_seq: 0,
706 account_hash: element.leaf,
707 })
708 .collect();
709
710 return Ok(Response {
711 context: Context {
712 slot: self.get_current_slot(),
713 },
714 value: Items {
715 items: merkle_proofs_with_context,
716 },
717 });
718 }
719 }
720
721 if queue_type == QueueType::OutputStateV2 {
722 if let Some(state_tree_bundle) = state_tree_bundle {
723 let end_offset = std::cmp::min(
724 num_elements as usize,
725 state_tree_bundle.output_queue_elements.len(),
726 );
727 let queue_elements =
728 state_tree_bundle.output_queue_elements[0..end_offset].to_vec();
729 let indices = queue_elements
730 .iter()
731 .map(|(_, index)| index)
732 .collect::<Vec<_>>();
733 let merkle_proofs = indices
734 .iter()
735 .map(|index| {
736 match state_tree_bundle
737 .merkle_tree
738 .get_proof_of_leaf(**index as usize, true)
739 {
740 Ok(proof) => proof.to_vec(),
741 Err(_) => {
742 let mut next_index =
743 state_tree_bundle.merkle_tree.get_next_index() as u64;
744 while next_index < **index {
745 state_tree_bundle.merkle_tree.append(&[0u8; 32]).unwrap();
746 next_index =
747 state_tree_bundle.merkle_tree.get_next_index() as u64;
748 }
749 state_tree_bundle
750 .merkle_tree
751 .get_proof_of_leaf(**index as usize, true)
752 .unwrap()
753 .to_vec();
754 Vec::new()
755 }
756 }
757 })
758 .collect::<Vec<_>>();
759 let leaves = indices
760 .iter()
761 .map(|index| {
762 state_tree_bundle
763 .merkle_tree
764 .get_leaf(**index as usize)
765 .unwrap_or_default()
766 })
767 .collect::<Vec<_>>();
768 let merkle_proofs_with_context = merkle_proofs
769 .iter()
770 .zip(queue_elements.iter())
771 .zip(leaves.iter())
772 .map(|((proof, (element, index)), leaf)| MerkleProofWithContext {
773 proof: proof.clone(),
774 leaf: *leaf,
775 leaf_index: *index,
776 merkle_tree: state_tree_bundle.accounts.merkle_tree.to_bytes(),
777 root: state_tree_bundle.merkle_tree.root(),
778 tx_hash: None,
779 root_seq: 0,
780 account_hash: *element,
781 })
782 .collect();
783 return Ok(Response {
784 context: Context {
785 slot: self.get_current_slot(),
786 },
787 value: Items {
788 items: merkle_proofs_with_context,
789 },
790 });
791 }
792 }
793
794 Err(IndexerError::InvalidParameters(
795 "Merkle tree not found".to_string(),
796 ))
797 }
798 }
799
800 async fn get_subtrees(
801 &self,
802 _merkle_tree_pubkey: [u8; 32],
803 _config: Option<IndexerRpcConfig>,
804 ) -> Result<Response<Items<[u8; 32]>>, IndexerError> {
805 #[cfg(not(feature = "v2"))]
806 unimplemented!("get_subtrees");
807 #[cfg(feature = "v2")]
808 {
809 let merkle_tree_pubkey = Pubkey::new_from_array(_merkle_tree_pubkey);
810 let address_tree_bundle = self
811 .address_merkle_trees
812 .iter()
813 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey);
814 if let Some(address_tree_bundle) = address_tree_bundle {
815 Ok(Response {
816 context: Context {
817 slot: self.get_current_slot(),
818 },
819 value: Items {
820 items: address_tree_bundle.get_subtrees(),
821 },
822 })
823 } else {
824 let state_tree_bundle = self
825 .state_merkle_trees
826 .iter()
827 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey);
828 if let Some(state_tree_bundle) = state_tree_bundle {
829 Ok(Response {
830 context: Context {
831 slot: self.get_current_slot(),
832 },
833 value: Items {
834 items: state_tree_bundle.merkle_tree.get_subtrees(),
835 },
836 })
837 } else {
838 Err(IndexerError::InvalidParameters(
839 "Merkle tree not found".to_string(),
840 ))
841 }
842 }
843 }
844 }
845
846 async fn get_address_queue_with_proofs(
847 &mut self,
848 _merkle_tree_pubkey: &Pubkey,
849 _zkp_batch_size: u16,
850 _start_offset: Option<u64>,
851 _config: Option<IndexerRpcConfig>,
852 ) -> Result<Response<BatchAddressUpdateIndexerResponse>, IndexerError> {
853 #[cfg(not(feature = "v2"))]
854 unimplemented!("get_address_queue_with_proofs");
855 #[cfg(feature = "v2")]
856 {
857 use light_client::indexer::AddressQueueIndex;
858 let merkle_tree_pubkey = _merkle_tree_pubkey;
859 let zkp_batch_size = _zkp_batch_size;
860
861 let batch_start_index = self
862 .get_address_merkle_trees()
863 .iter()
864 .find(|x| x.accounts.merkle_tree == *merkle_tree_pubkey)
865 .unwrap()
866 .get_v2_indexed_merkle_tree()
867 .ok_or(IndexerError::Unknown(
868 "Failed to get v2 indexed merkle tree".into(),
869 ))?
870 .merkle_tree
871 .rightmost_index;
872
873 let address_proofs = self
874 .get_queue_elements(
875 merkle_tree_pubkey.to_bytes(),
876 QueueType::AddressV2,
877 zkp_batch_size,
878 None,
879 None,
880 )
881 .await
882 .map_err(|_| IndexerError::Unknown("Failed to get queue elements".into()))?
883 .value;
884
885 let addresses: Vec<AddressQueueIndex> = address_proofs
886 .items
887 .iter()
888 .enumerate()
889 .map(|(i, proof)| AddressQueueIndex {
890 address: proof.account_hash,
891 queue_index: proof.root_seq + i as u64,
892 })
893 .collect();
894 let non_inclusion_proofs = self
895 .get_multiple_new_address_proofs(
896 merkle_tree_pubkey.to_bytes(),
897 address_proofs
898 .items
899 .iter()
900 .map(|x| x.account_hash)
901 .collect(),
902 None,
903 )
904 .await
905 .map_err(|_| {
906 IndexerError::Unknown(
907 "Failed to get get_multiple_new_address_proofs_full".into(),
908 )
909 })?
910 .value;
911
912 let subtrees = self
913 .get_subtrees(merkle_tree_pubkey.to_bytes(), None)
914 .await
915 .map_err(|_| IndexerError::Unknown("Failed to get subtrees".into()))?
916 .value;
917
918 Ok(Response {
919 context: Context {
920 slot: self.get_current_slot(),
921 },
922 value: BatchAddressUpdateIndexerResponse {
923 batch_start_index: batch_start_index as u64,
924 addresses,
925 non_inclusion_proofs: non_inclusion_proofs.items,
926 subtrees: subtrees.items,
927 },
928 })
929 }
930 }
931
932 async fn get_compressed_balance_by_owner(
934 &self,
935 _owner: &Pubkey,
936 _config: Option<IndexerRpcConfig>,
937 ) -> Result<Response<u64>, IndexerError> {
938 todo!("get_compressed_balance_by_owner not implemented")
939 }
940
941 async fn get_compressed_mint_token_holders(
942 &self,
943 _mint: &Pubkey,
944 _options: Option<PaginatedOptions>,
945 _config: Option<IndexerRpcConfig>,
946 ) -> Result<Response<ItemsWithCursor<OwnerBalance>>, IndexerError> {
947 todo!("get_compressed_mint_token_holders not implemented")
948 }
949
950 async fn get_compressed_token_accounts_by_delegate(
951 &self,
952 _delegate: &Pubkey,
953 _options: Option<GetCompressedTokenAccountsByOwnerOrDelegateOptions>,
954 _config: Option<IndexerRpcConfig>,
955 ) -> Result<Response<ItemsWithCursor<TokenAccount>>, IndexerError> {
956 todo!("get_compressed_token_accounts_by_delegate not implemented")
957 }
958
959 async fn get_compression_signatures_for_address(
960 &self,
961 _address: &[u8; 32],
962 _options: Option<PaginatedOptions>,
963 _config: Option<IndexerRpcConfig>,
964 ) -> Result<Response<ItemsWithCursor<SignatureWithMetadata>>, IndexerError> {
965 todo!("get_compression_signatures_for_address not implemented")
966 }
967
968 async fn get_compression_signatures_for_owner(
969 &self,
970 _owner: &Pubkey,
971 _options: Option<PaginatedOptions>,
972 _config: Option<IndexerRpcConfig>,
973 ) -> Result<Response<ItemsWithCursor<SignatureWithMetadata>>, IndexerError> {
974 todo!("get_compression_signatures_for_owner not implemented")
975 }
976
977 async fn get_compression_signatures_for_token_owner(
978 &self,
979 _owner: &Pubkey,
980 _options: Option<PaginatedOptions>,
981 _config: Option<IndexerRpcConfig>,
982 ) -> Result<Response<ItemsWithCursor<SignatureWithMetadata>>, IndexerError> {
983 todo!("get_compression_signatures_for_token_owner not implemented")
984 }
985
986 async fn get_indexer_health(&self, _config: Option<RetryConfig>) -> Result<bool, IndexerError> {
987 todo!("get_indexer_health not implemented")
988 }
989}
990
991#[async_trait]
992impl TestIndexerExtensions for TestIndexer {
993 fn get_address_merkle_tree(
994 &self,
995 merkle_tree_pubkey: Pubkey,
996 ) -> Option<&AddressMerkleTreeBundle> {
997 self.address_merkle_trees
998 .iter()
999 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
1000 }
1001
1002 fn add_compressed_accounts_with_token_data(
1009 &mut self,
1010 slot: u64,
1011 event: &PublicTransactionEvent,
1012 ) {
1013 TestIndexer::add_event_and_compressed_accounts(self, slot, event);
1014 }
1015
1016 fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) {
1017 let decoded_hash: [u8; 32] = bs58::decode(account_hash)
1018 .into_vec()
1019 .unwrap()
1020 .as_slice()
1021 .try_into()
1022 .unwrap();
1023
1024 if let Some(state_tree_bundle) = self
1025 .state_merkle_trees
1026 .iter_mut()
1027 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
1028 {
1029 if let Some(leaf_index) = state_tree_bundle.merkle_tree.get_leaf_index(&decoded_hash) {
1030 state_tree_bundle
1031 .merkle_tree
1032 .update(&[0u8; 32], leaf_index)
1033 .unwrap();
1034 }
1035 }
1036 }
1037
1038 fn address_tree_updated(
1039 &mut self,
1040 merkle_tree_pubkey: Pubkey,
1041 context: &NewAddressProofWithContext,
1042 ) {
1043 info!("Updating address tree...");
1044 let pos = self
1045 .address_merkle_trees
1046 .iter()
1047 .position(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
1048 .unwrap();
1049 let new_low_element = context.new_low_element.clone().unwrap();
1050 let new_element = context.new_element.clone().unwrap();
1051 let new_element_next_value = context.new_element_next_value.clone().unwrap();
1052 self.address_merkle_trees[pos]
1054 .get_v1_indexed_merkle_tree_mut()
1055 .expect("Failed to get v1 indexed merkle tree.")
1056 .update(&new_low_element, &new_element, &new_element_next_value)
1057 .unwrap();
1058 self.address_merkle_trees[pos]
1059 .append_with_low_element_index(new_low_element.index, &new_element.value)
1060 .unwrap();
1061 info!("Address tree updated");
1062 }
1063
1064 fn get_state_merkle_tree_accounts(&self, pubkeys: &[Pubkey]) -> Vec<StateMerkleTreeAccounts> {
1065 pubkeys
1066 .iter()
1067 .map(|x| {
1068 self.state_merkle_trees
1069 .iter()
1070 .find(|y| y.accounts.merkle_tree == *x || y.accounts.nullifier_queue == *x)
1071 .unwrap()
1072 .accounts
1073 })
1074 .collect::<Vec<_>>()
1075 }
1076
1077 fn get_state_merkle_trees(&self) -> &Vec<StateMerkleTreeBundle> {
1078 &self.state_merkle_trees
1079 }
1080
1081 fn get_state_merkle_trees_mut(&mut self) -> &mut Vec<StateMerkleTreeBundle> {
1082 &mut self.state_merkle_trees
1083 }
1084
1085 fn get_address_merkle_trees(&self) -> &Vec<AddressMerkleTreeBundle> {
1086 &self.address_merkle_trees
1087 }
1088
1089 fn get_address_merkle_trees_mut(&mut self) -> &mut Vec<AddressMerkleTreeBundle> {
1090 &mut self.address_merkle_trees
1091 }
1092
1093 fn get_token_compressed_accounts(&self) -> &Vec<TokenDataWithMerkleContext> {
1094 &self.token_compressed_accounts
1095 }
1096
1097 fn get_group_pda(&self) -> &Pubkey {
1098 &self.group_pda
1099 }
1100
1101 fn add_address_merkle_tree_accounts(
1102 &mut self,
1103 merkle_tree_keypair: &Keypair,
1104 queue_keypair: &Keypair,
1105 _owning_program_id: Option<Pubkey>,
1106 ) -> AddressMerkleTreeAccounts {
1107 info!("Adding address merkle tree accounts...");
1108 let address_merkle_tree_accounts = AddressMerkleTreeAccounts {
1109 merkle_tree: merkle_tree_keypair.pubkey(),
1110 queue: queue_keypair.pubkey(),
1111 };
1112 self.address_merkle_trees
1113 .push(Self::add_address_merkle_tree_bundle(address_merkle_tree_accounts).unwrap());
1114 info!(
1115 "Address merkle tree accounts added. Total: {}",
1116 self.address_merkle_trees.len()
1117 );
1118 address_merkle_tree_accounts
1119 }
1120
1121 fn get_compressed_accounts_with_merkle_context_by_owner(
1122 &self,
1123 owner: &Pubkey,
1124 ) -> Vec<CompressedAccountWithMerkleContext> {
1125 self.compressed_accounts
1126 .iter()
1127 .filter(|x| x.compressed_account.owner.to_bytes() == owner.to_bytes())
1128 .cloned()
1129 .collect()
1130 }
1131
1132 fn add_state_bundle(&mut self, state_bundle: StateMerkleTreeBundle) {
1133 Self::get_state_merkle_trees_mut(self).push(state_bundle);
1134 }
1135
1136 fn add_event_and_compressed_accounts(
1137 &mut self,
1138 slot: u64,
1139 event: &PublicTransactionEvent,
1140 ) -> (
1141 Vec<CompressedAccountWithMerkleContext>,
1142 Vec<TokenDataWithMerkleContext>,
1143 ) {
1144 let mut compressed_accounts = Vec::new();
1145 let mut token_compressed_accounts = Vec::new();
1146 let event_inputs_len = event.input_compressed_account_hashes.len();
1147 let event_outputs_len = event.output_compressed_account_hashes.len();
1148 for i in 0..std::cmp::max(event_inputs_len, event_outputs_len) {
1149 self.process_v1_compressed_account(
1150 slot,
1151 event,
1152 i,
1153 &mut token_compressed_accounts,
1154 &mut compressed_accounts,
1155 );
1156 }
1157
1158 self.events.push(event.clone());
1159 (compressed_accounts, token_compressed_accounts)
1160 }
1161
1162 fn get_proof_by_index(&mut self, merkle_tree_pubkey: Pubkey, index: u64) -> MerkleProof {
1163 let bundle = self
1164 .state_merkle_trees
1165 .iter_mut()
1166 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
1167 .unwrap();
1168
1169 while bundle.merkle_tree.leaves().len() <= index as usize {
1170 bundle.merkle_tree.append(&[0u8; 32]).unwrap();
1171 }
1172
1173 let leaf = match bundle.merkle_tree.get_leaf(index as usize) {
1174 Ok(leaf) => leaf,
1175 Err(_) => {
1176 bundle.merkle_tree.append(&[0u8; 32]).unwrap();
1177 bundle.merkle_tree.get_leaf(index as usize).unwrap()
1178 }
1179 };
1180
1181 let proof = bundle
1182 .merkle_tree
1183 .get_proof_of_leaf(index as usize, true)
1184 .unwrap()
1185 .to_vec();
1186
1187 MerkleProof {
1188 hash: leaf,
1189 leaf_index: index,
1190 merkle_tree: merkle_tree_pubkey,
1191 proof,
1192 root_seq: bundle.merkle_tree.sequence_number as u64,
1193 root: bundle.merkle_tree.root(),
1194 }
1195 }
1196
1197 async fn finalize_batched_address_tree_update(
1198 &mut self,
1199 merkle_tree_pubkey: Pubkey,
1200 account_data: &mut [u8],
1201 ) {
1202 let onchain_account =
1203 BatchedMerkleTreeAccount::address_from_bytes(account_data, &merkle_tree_pubkey.into())
1204 .unwrap();
1205 let address_tree = self
1206 .address_merkle_trees
1207 .iter_mut()
1208 .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
1209 .unwrap();
1210 let address_tree_index = address_tree.right_most_index();
1211 let onchain_next_index = onchain_account.next_index;
1212 let diff_onchain_indexer = onchain_next_index - address_tree_index as u64;
1213 let addresses = address_tree.queue_elements[0..diff_onchain_indexer as usize].to_vec();
1214 for _ in 0..diff_onchain_indexer {
1215 address_tree.queue_elements.remove(0);
1216 }
1217 for new_element_value in &addresses {
1218 address_tree
1219 .append(&BigUint::from_bytes_be(new_element_value))
1220 .unwrap();
1221 }
1222 match &mut address_tree.merkle_tree {
1223 IndexedMerkleTreeVersion::V2(tree) => tree.merkle_tree.num_root_updates += 1,
1224 IndexedMerkleTreeVersion::V1(_) => {
1225 unimplemented!("finalize_batched_address_tree_update not implemented for v1 trees.")
1226 }
1227 }
1228 let onchain_root = onchain_account.root_history.last().unwrap();
1229 let new_root = address_tree.root();
1230 assert_eq!(*onchain_root, new_root);
1231 }
1232}
1233
1234impl TestIndexer {
1235 fn get_current_slot(&self) -> u64 {
1236 u64::MAX
1238 }
1239
1240 pub async fn init_from_acounts(
1241 payer: &Keypair,
1242 env: &TestAccounts,
1243 output_queue_batch_size: usize,
1244 ) -> Self {
1245 let mut state_merkle_tree_accounts = env.v1_state_trees.clone();
1247
1248 for v2_state_tree in &env.v2_state_trees {
1250 state_merkle_tree_accounts.push(StateMerkleTreeAccounts {
1251 merkle_tree: v2_state_tree.merkle_tree,
1252 nullifier_queue: v2_state_tree.output_queue,
1253 cpi_context: v2_state_tree.cpi_context,
1254 });
1255 }
1256
1257 let mut address_merkle_tree_accounts = env.v1_address_trees.clone();
1259
1260 for &v2_address_tree in &env.v2_address_trees {
1262 address_merkle_tree_accounts.push(AddressMerkleTreeAccounts {
1263 merkle_tree: v2_address_tree,
1264 queue: v2_address_tree,
1265 });
1266 }
1267
1268 Self::new(
1269 state_merkle_tree_accounts,
1270 address_merkle_tree_accounts,
1271 payer.insecure_clone(),
1272 env.protocol.group_pda,
1273 output_queue_batch_size,
1274 )
1275 .await
1276 }
1277
1278 pub async fn new(
1279 state_merkle_tree_accounts: Vec<StateMerkleTreeAccounts>,
1280 address_merkle_tree_accounts: Vec<AddressMerkleTreeAccounts>,
1281 payer: Keypair,
1282 group_pda: Pubkey,
1283 output_queue_batch_size: usize,
1284 ) -> Self {
1285 let mut state_merkle_trees = Vec::new();
1286 for state_merkle_tree_account in state_merkle_tree_accounts.iter() {
1287 let test_batched_output_queue =
1288 Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap();
1289 let (tree_type, merkle_tree, output_queue_batch_size) = if state_merkle_tree_account
1290 .nullifier_queue
1291 == test_batched_output_queue.pubkey()
1292 {
1293 let merkle_tree = Box::new(MerkleTree::<Poseidon>::new_with_history(
1294 DEFAULT_BATCH_STATE_TREE_HEIGHT as usize,
1295 0,
1296 0,
1297 DEFAULT_BATCH_STATE_ROOT_HISTORY_LEN as usize,
1298 ));
1299 (
1300 TreeType::StateV2,
1301 merkle_tree,
1302 Some(output_queue_batch_size),
1303 )
1304 } else {
1305 let merkle_tree = Box::new(MerkleTree::<Poseidon>::new_with_history(
1306 account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize,
1307 account_compression::utils::constants::STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
1308 0,
1309 account_compression::utils::constants::STATE_MERKLE_TREE_ROOTS as usize,
1310 ));
1311 (TreeType::StateV1, merkle_tree, None)
1312 };
1313
1314 state_merkle_trees.push(StateMerkleTreeBundle {
1315 accounts: *state_merkle_tree_account,
1316 merkle_tree,
1317 rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64,
1318 tree_type,
1319 output_queue_elements: vec![],
1320 input_leaf_indices: vec![],
1321 output_queue_batch_size,
1322 num_inserted_batches: 0,
1323 });
1324 }
1325
1326 let mut address_merkle_trees = Vec::new();
1327 for address_merkle_tree_account in address_merkle_tree_accounts {
1328 address_merkle_trees
1329 .push(Self::add_address_merkle_tree_bundle(address_merkle_tree_account).unwrap());
1330 }
1331
1332 Self {
1333 state_merkle_trees,
1334 address_merkle_trees,
1335 payer,
1336 compressed_accounts: vec![],
1337 nullified_compressed_accounts: vec![],
1338 events: vec![],
1339 token_compressed_accounts: vec![],
1340 token_nullified_compressed_accounts: vec![],
1341 group_pda,
1342 }
1343 }
1344
1345 pub fn add_address_merkle_tree_bundle(
1346 address_merkle_tree_accounts: AddressMerkleTreeAccounts,
1347 ) -> Result<AddressMerkleTreeBundle, IndexerError> {
1349 if address_merkle_tree_accounts.merkle_tree == address_merkle_tree_accounts.queue {
1350 AddressMerkleTreeBundle::new_v2(address_merkle_tree_accounts)
1351 } else {
1352 AddressMerkleTreeBundle::new_v1(address_merkle_tree_accounts)
1353 }
1354 }
1355
1356 async fn add_address_merkle_tree_v1<R: Rpc>(
1357 &mut self,
1358 rpc: &mut R,
1359 merkle_tree_keypair: &Keypair,
1360 queue_keypair: &Keypair,
1361 owning_program_id: Option<Pubkey>,
1362 ) -> Result<AddressMerkleTreeAccounts, RpcError> {
1363 create_address_merkle_tree_and_queue_account(
1364 &self.payer,
1365 true,
1366 rpc,
1367 merkle_tree_keypair,
1368 queue_keypair,
1369 owning_program_id,
1370 None,
1371 &AddressMerkleTreeConfig::default(),
1372 &AddressQueueConfig::default(),
1373 0,
1374 )
1375 .await?;
1376
1377 let accounts = <TestIndexer as TestIndexerExtensions>::add_address_merkle_tree_accounts(
1378 self,
1379 merkle_tree_keypair,
1380 queue_keypair,
1381 owning_program_id,
1382 );
1383 Ok(accounts)
1384 }
1385
1386 #[cfg(feature = "devenv")]
1387 async fn add_address_merkle_tree_v2<R: Rpc>(
1388 &mut self,
1389 rpc: &mut R,
1390 merkle_tree_keypair: &Keypair,
1391 queue_keypair: &Keypair,
1392 _owning_program_id: Option<Pubkey>,
1393 ) -> Result<AddressMerkleTreeAccounts, RpcError> {
1394 info!(
1395 "Adding address merkle tree accounts v2 {:?}",
1396 merkle_tree_keypair.pubkey()
1397 );
1398
1399 let params = light_batched_merkle_tree::initialize_address_tree::InitAddressTreeAccountsInstructionData::test_default();
1400
1401 info!(
1402 "Creating batched address merkle tree {:?}",
1403 merkle_tree_keypair.pubkey()
1404 );
1405 create_batch_address_merkle_tree(rpc, &self.payer, merkle_tree_keypair, params).await?;
1406 info!(
1407 "Batched address merkle tree created {:?}",
1408 merkle_tree_keypair.pubkey()
1409 );
1410
1411 let accounts = self.add_address_merkle_tree_accounts(
1412 merkle_tree_keypair,
1413 queue_keypair,
1414 _owning_program_id,
1415 );
1416 Ok(accounts)
1417 }
1418
1419 pub async fn add_address_merkle_tree<R: Rpc>(
1420 &mut self,
1421 rpc: &mut R,
1422 merkle_tree_keypair: &Keypair,
1423 queue_keypair: &Keypair,
1424 owning_program_id: Option<Pubkey>,
1425 tree_type: TreeType,
1426 ) -> Result<AddressMerkleTreeAccounts, RpcError> {
1427 if tree_type == TreeType::AddressV1 {
1428 self.add_address_merkle_tree_v1(
1429 rpc,
1430 merkle_tree_keypair,
1431 queue_keypair,
1432 owning_program_id,
1433 )
1434 .await
1435 } else if tree_type == TreeType::AddressV2 {
1436 #[cfg(not(feature = "devenv"))]
1437 panic!("Batched address merkle trees require the 'devenv' feature to be enabled");
1438 #[cfg(feature = "devenv")]
1439 self.add_address_merkle_tree_v2(
1440 rpc,
1441 merkle_tree_keypair,
1442 queue_keypair,
1443 owning_program_id,
1444 )
1445 .await
1446 } else {
1447 Err(RpcError::CustomError(format!(
1448 "add_address_merkle_tree: Version not supported, {}. Versions: AddressV1, AddressV2",
1449 tree_type
1450 )))
1451 }
1452 }
1453
1454 #[allow(clippy::too_many_arguments)]
1455 pub async fn add_state_merkle_tree<R: Rpc>(
1456 &mut self,
1457 rpc: &mut R,
1458 merkle_tree_keypair: &Keypair,
1459 queue_keypair: &Keypair,
1460 cpi_context_keypair: &Keypair,
1461 owning_program_id: Option<Pubkey>,
1462 forester: Option<Pubkey>,
1463 tree_type: TreeType,
1464 ) {
1465 let (rollover_fee, merkle_tree, output_queue_batch_size) = match tree_type {
1466 TreeType::StateV1 => {
1467 create_state_merkle_tree_and_queue_account(
1468 &self.payer,
1469 true,
1470 rpc,
1471 merkle_tree_keypair,
1472 queue_keypair,
1473 Some(cpi_context_keypair),
1474 owning_program_id,
1475 forester,
1476 self.state_merkle_trees.len() as u64,
1477 &StateMerkleTreeConfig::default(),
1478 &NullifierQueueConfig::default(),
1479 )
1480 .await
1481 .unwrap();
1482 let merkle_tree = Box::new(MerkleTree::<Poseidon>::new_with_history(
1483 account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize,
1484 account_compression::utils::constants::STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
1485 0,
1486 account_compression::utils::constants::STATE_MERKLE_TREE_ROOTS as usize,
1487
1488 ));
1489 (FeeConfig::default().state_merkle_tree_rollover as i64,merkle_tree, None)
1490 }
1491 TreeType::StateV2 => {
1492 #[cfg(feature = "devenv")]
1493 {
1494 let params = light_batched_merkle_tree::initialize_state_tree::InitStateTreeAccountsInstructionData::test_default();
1495
1496 create_batched_state_merkle_tree(
1497 &self.payer,
1498 true,
1499 rpc,
1500 merkle_tree_keypair,
1501 queue_keypair,
1502 cpi_context_keypair,
1503 params,
1504 ).await.unwrap();
1505 let merkle_tree = Box::new(MerkleTree::<Poseidon>::new_with_history(
1506 DEFAULT_BATCH_STATE_TREE_HEIGHT as usize,
1507 0,
1508 0,
1509 DEFAULT_BATCH_STATE_ROOT_HISTORY_LEN as usize,
1510
1511 ));
1512 (FeeConfig::test_batched().state_merkle_tree_rollover as i64,merkle_tree, Some(params.output_queue_batch_size as usize))
1513 }
1514
1515 #[cfg(not(feature = "devenv"))]
1516 panic!("Batched state merkle trees require the 'devenv' feature to be enabled")
1517 }
1518 _ => panic!(
1519 "add_state_merkle_tree: tree_type not supported, {}. tree_type: 1 concurrent, 2 batched",
1520 tree_type
1521 ),
1522 };
1523 let state_merkle_tree_account = StateMerkleTreeAccounts {
1524 merkle_tree: merkle_tree_keypair.pubkey(),
1525 nullifier_queue: queue_keypair.pubkey(),
1526 cpi_context: cpi_context_keypair.pubkey(),
1527 };
1528
1529 self.state_merkle_trees.push(StateMerkleTreeBundle {
1530 merkle_tree,
1531 accounts: state_merkle_tree_account,
1532 rollover_fee,
1533 tree_type,
1534 output_queue_elements: vec![],
1535 input_leaf_indices: vec![],
1536 num_inserted_batches: 0,
1537 output_queue_batch_size,
1538 });
1539 println!(
1540 "creating Merkle tree bundle {:?}",
1541 self.state_merkle_trees
1542 .iter()
1543 .map(|x| x.accounts.merkle_tree)
1544 .collect::<Vec<_>>()
1545 );
1546 }
1547
1548 pub fn add_lamport_compressed_accounts(&mut self, slot: u64, event_bytes: Vec<u8>) {
1553 let event_bytes = event_bytes.clone();
1554 let event = PublicTransactionEvent::deserialize(&mut event_bytes.as_slice()).unwrap();
1555 <TestIndexer as TestIndexerExtensions>::add_event_and_compressed_accounts(
1557 self, slot, &event,
1558 );
1559 }
1560
1561 pub fn get_compressed_balance(&self, owner: &Pubkey) -> u64 {
1563 self.compressed_accounts
1564 .iter()
1565 .filter(|x| x.compressed_account.owner.to_bytes() == owner.to_bytes())
1566 .map(|x| x.compressed_account.lamports)
1567 .sum()
1568 }
1569
1570 pub fn get_compressed_token_balance(&self, owner: &Pubkey, mint: &Pubkey) -> u64 {
1572 self.token_compressed_accounts
1573 .iter()
1574 .filter(|x| {
1575 x.compressed_account.compressed_account.owner.to_bytes() == owner.to_bytes()
1576 && x.token_data.mint == *mint
1577 })
1578 .map(|x| x.token_data.amount)
1579 .sum()
1580 }
1581
1582 fn process_v1_compressed_account(
1583 &mut self,
1584 slot: u64,
1585 event: &PublicTransactionEvent,
1586 i: usize,
1587 token_compressed_accounts: &mut Vec<TokenDataWithMerkleContext>,
1588 compressed_accounts: &mut Vec<CompressedAccountWithMerkleContext>,
1589 ) {
1590 let mut input_addresses = vec![];
1591 if event.input_compressed_account_hashes.len() > i {
1592 let tx_hash: [u8; 32] = create_tx_hash(
1593 &event.input_compressed_account_hashes,
1594 &event.output_compressed_account_hashes,
1595 slot,
1596 )
1597 .unwrap();
1598 let hash = event.input_compressed_account_hashes[i];
1599 let index = self
1600 .compressed_accounts
1601 .iter()
1602 .position(|x| x.hash().unwrap() == hash);
1603 let (leaf_index, merkle_tree_pubkey) = if let Some(index) = index {
1604 self.nullified_compressed_accounts
1605 .push(self.compressed_accounts[index].clone());
1606 let leaf_index = self.compressed_accounts[index].merkle_context.leaf_index;
1607 let merkle_tree_pubkey = self.compressed_accounts[index]
1608 .merkle_context
1609 .merkle_tree_pubkey;
1610 if let Some(address) = self.compressed_accounts[index].compressed_account.address {
1611 input_addresses.push(address);
1612 }
1613 self.compressed_accounts.remove(index);
1614 (leaf_index, merkle_tree_pubkey)
1615 } else {
1616 let index = self
1617 .token_compressed_accounts
1618 .iter()
1619 .position(|x| x.compressed_account.hash().unwrap() == hash)
1620 .expect("input compressed account not found");
1621 self.token_nullified_compressed_accounts
1622 .push(self.token_compressed_accounts[index].clone());
1623 let leaf_index = self.token_compressed_accounts[index]
1624 .compressed_account
1625 .merkle_context
1626 .leaf_index;
1627 let merkle_tree_pubkey = self.token_compressed_accounts[index]
1628 .compressed_account
1629 .merkle_context
1630 .merkle_tree_pubkey;
1631 self.token_compressed_accounts.remove(index);
1632 (leaf_index, merkle_tree_pubkey)
1633 };
1634 let bundle =
1635 &mut <TestIndexer as TestIndexerExtensions>::get_state_merkle_trees_mut(self)
1636 .iter_mut()
1637 .find(|x| {
1638 x.accounts.merkle_tree
1639 == solana_pubkey::Pubkey::from(merkle_tree_pubkey.to_bytes())
1640 })
1641 .unwrap();
1642 if bundle.tree_type == TreeType::StateV2 {
1644 let leaf_hash = event.input_compressed_account_hashes[i];
1645 bundle.input_leaf_indices.push(LeafIndexInfo {
1646 leaf_index,
1647 leaf: leaf_hash,
1648 tx_hash,
1649 });
1650 }
1651 }
1652 let mut new_addresses = vec![];
1653 if event.output_compressed_accounts.len() > i {
1654 let compressed_account = &event.output_compressed_accounts[i];
1655 if let Some(address) = compressed_account.compressed_account.address {
1656 if !input_addresses.iter().any(|x| x == &address) {
1657 new_addresses.push(address);
1658 }
1659 }
1660 let merkle_tree = self.state_merkle_trees.iter().find(|x| {
1661 x.accounts.merkle_tree
1662 == solana_pubkey::Pubkey::from(
1663 event.pubkey_array
1664 [event.output_compressed_accounts[i].merkle_tree_index as usize]
1665 .to_bytes(),
1666 )
1667 });
1668 let merkle_tree = if let Some(merkle_tree) = merkle_tree {
1670 merkle_tree
1671 } else {
1672 self.state_merkle_trees
1673 .iter()
1674 .find(|x| {
1675 x.accounts.nullifier_queue
1676 == solana_pubkey::Pubkey::from(
1677 event.pubkey_array[event.output_compressed_accounts[i]
1678 .merkle_tree_index
1679 as usize]
1680 .to_bytes(),
1681 )
1682 })
1683 .unwrap()
1684 };
1685 let nullifier_queue_pubkey = merkle_tree.accounts.nullifier_queue;
1686 let merkle_tree_pubkey = merkle_tree.accounts.merkle_tree;
1687 match compressed_account.compressed_account.data.as_ref() {
1691 Some(data) => {
1692 if compressed_account.compressed_account.owner == light_compressed_token::ID.to_bytes()
1693 && data.discriminator == light_compressed_token::constants::TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR
1694 {
1695 if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) {
1696 let token_account = TokenDataWithMerkleContext {
1697 token_data,
1698 compressed_account: CompressedAccountWithMerkleContext {
1699 compressed_account: compressed_account
1700 .compressed_account
1701 .clone(),
1702 merkle_context: MerkleContext {
1703 leaf_index: event.output_leaf_indices[i],
1704 merkle_tree_pubkey: merkle_tree_pubkey.into(),
1705 queue_pubkey: nullifier_queue_pubkey.into(),
1706 prove_by_index: false,
1707 tree_type:merkle_tree.tree_type,
1708 },
1709 },
1710 };
1711 token_compressed_accounts.push(token_account.clone());
1712 self.token_compressed_accounts.insert(0, token_account);
1713 }
1714 } else {
1715 let compressed_account = CompressedAccountWithMerkleContext {
1716 compressed_account: compressed_account.compressed_account.clone(),
1717 merkle_context: MerkleContext {
1718 leaf_index: event.output_leaf_indices[i],
1719 merkle_tree_pubkey: merkle_tree_pubkey.into(),
1720 queue_pubkey: nullifier_queue_pubkey.into(),
1721 prove_by_index: false,
1722 tree_type: merkle_tree.tree_type
1723 },
1724 };
1725 compressed_accounts.push(compressed_account.clone());
1726 self.compressed_accounts.insert(0, compressed_account);
1727 }
1728 }
1729 None => {
1730 let compressed_account = CompressedAccountWithMerkleContext {
1731 compressed_account: compressed_account.compressed_account.clone(),
1732 merkle_context: MerkleContext {
1733 leaf_index: event.output_leaf_indices[i],
1734 merkle_tree_pubkey: merkle_tree_pubkey.into(),
1735 queue_pubkey: nullifier_queue_pubkey.into(),
1736 prove_by_index: false,
1737 tree_type: merkle_tree.tree_type,
1738 },
1739 };
1740 compressed_accounts.push(compressed_account.clone());
1741 self.compressed_accounts.insert(0, compressed_account);
1742 }
1743 };
1744 let merkle_tree = &mut self.state_merkle_trees.iter_mut().find(|x| {
1745 x.accounts.merkle_tree
1746 == solana_pubkey::Pubkey::from(
1747 event.pubkey_array
1748 [event.output_compressed_accounts[i].merkle_tree_index as usize]
1749 .to_bytes(),
1750 )
1751 });
1752 if merkle_tree.is_some() {
1753 let merkle_tree = merkle_tree.as_mut().unwrap();
1754 let leaf_hash = compressed_account
1755 .compressed_account
1756 .hash(
1757 &event.pubkey_array
1758 [event.output_compressed_accounts[i].merkle_tree_index as usize],
1759 &event.output_leaf_indices[i],
1760 false,
1761 )
1762 .unwrap();
1763 merkle_tree
1764 .merkle_tree
1765 .append(&leaf_hash)
1766 .expect("insert failed");
1767 } else {
1768 let merkle_tree = &mut self
1769 .state_merkle_trees
1770 .iter_mut()
1771 .find(|x| {
1772 x.accounts.nullifier_queue
1773 == solana_pubkey::Pubkey::from(
1774 event.pubkey_array[event.output_compressed_accounts[i]
1775 .merkle_tree_index
1776 as usize]
1777 .to_bytes(),
1778 )
1779 })
1780 .unwrap();
1781
1782 merkle_tree.output_queue_elements.push((
1783 event.output_compressed_account_hashes[i],
1784 event.output_leaf_indices[i].into(),
1785 ));
1786 }
1787 }
1788 if !new_addresses.is_empty() {
1795 for pubkey in event.pubkey_array.iter() {
1796 if let Some((_, address_merkle_tree)) = self
1797 .address_merkle_trees
1798 .iter_mut()
1799 .enumerate()
1800 .find(|(_, x)| {
1801 x.accounts.merkle_tree == solana_pubkey::Pubkey::from(pubkey.to_bytes())
1802 })
1803 {
1804 address_merkle_tree
1805 .queue_elements
1806 .append(&mut new_addresses);
1807 }
1808 }
1809 }
1810 }
1811
1812 async fn _get_multiple_new_address_proofs(
1813 &self,
1814 merkle_tree_pubkey: [u8; 32],
1815 addresses: Vec<[u8; 32]>,
1816 full: bool,
1817 ) -> Result<Vec<NewAddressProofWithContext>, IndexerError> {
1818 let mut proofs: Vec<NewAddressProofWithContext> = Vec::new();
1819
1820 for address in addresses.iter() {
1821 info!("Getting new address proof for {:?}", address);
1822 let pubkey = Pubkey::from(merkle_tree_pubkey);
1823 let address_tree_bundle = self
1824 .address_merkle_trees
1825 .iter()
1826 .find(|x| x.accounts.merkle_tree == pubkey)
1827 .unwrap();
1828
1829 let address_biguint = BigUint::from_bytes_be(address.as_slice());
1830 let (old_low_address, _old_low_address_next_value) =
1831 address_tree_bundle.find_low_element_for_nonexistent(&address_biguint)?;
1832 let address_bundle = address_tree_bundle
1833 .new_element_with_low_element_index(old_low_address.index, &address_biguint)?;
1834
1835 let (old_low_address, old_low_address_next_value) =
1836 address_tree_bundle.find_low_element_for_nonexistent(&address_biguint)?;
1837
1838 let low_address_proof =
1840 address_tree_bundle.get_proof_of_leaf(old_low_address.index, full)?;
1841
1842 let low_address_index: u64 = old_low_address.index as u64;
1843 let low_address_value: [u8; 32] =
1844 bigint_to_be_bytes_array(&old_low_address.value).unwrap();
1845 let low_address_next_index: u64 = old_low_address.next_index as u64;
1846 let low_address_next_value: [u8; 32] =
1847 bigint_to_be_bytes_array(&old_low_address_next_value).unwrap();
1848 let proof = NewAddressProofWithContext {
1849 merkle_tree: Pubkey::new_from_array(merkle_tree_pubkey),
1850 low_address_index,
1851 low_address_value,
1852 low_address_next_index,
1853 low_address_next_value,
1854 low_address_proof,
1855 root: address_tree_bundle.root(),
1856 root_seq: address_tree_bundle.sequence_number(),
1857 new_low_element: Some(address_bundle.new_low_element),
1858 new_element: Some(address_bundle.new_element),
1859 new_element_next_value: Some(address_bundle.new_element_next_value),
1860 };
1861 proofs.push(proof);
1862 }
1863 Ok(proofs)
1864 }
1865}
1866
1867impl TestIndexer {
1868 async fn process_inclusion_proofs(
1869 &self,
1870 merkle_tree_pubkeys: &[Pubkey],
1871 accounts: &[[u8; 32]],
1872 ) -> Result<
1873 (
1874 Option<BatchInclusionJsonStruct>,
1875 Option<BatchInclusionJsonStructLegacy>,
1876 Vec<AccountProofInputs>,
1877 ),
1878 IndexerError,
1879 > {
1880 let mut inclusion_proofs = Vec::new();
1881 let mut account_proof_inputs = Vec::new();
1882 let mut height = 0;
1883 let mut queues = vec![];
1884 let mut cpi_contextes = vec![];
1885 let mut tree_types = vec![];
1886 let proof_data: Vec<_> = accounts
1888 .iter()
1889 .zip(merkle_tree_pubkeys.iter())
1890 .map(|(account, &pubkey)| {
1891 let bundle = self
1892 .state_merkle_trees
1893 .iter()
1894 .find(|x| {
1895 x.accounts.merkle_tree == pubkey || x.accounts.nullifier_queue == pubkey
1896 })
1897 .unwrap();
1898 let merkle_tree = &bundle.merkle_tree;
1899 queues.push(bundle.accounts.nullifier_queue);
1900 cpi_contextes.push(bundle.accounts.cpi_context);
1901 tree_types.push(bundle.tree_type);
1902 let leaf_index = merkle_tree.get_leaf_index(account).unwrap();
1903 let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap();
1904
1905 let proof: Vec<BigInt> = proof.iter().map(|x| BigInt::from_be_bytes(x)).collect();
1907
1908 if height == 0 {
1909 height = merkle_tree.height;
1910 } else {
1911 assert_eq!(height, merkle_tree.height);
1912 }
1913 let root_index = if bundle.tree_type == TreeType::StateV1 {
1914 merkle_tree.get_history_root_index().unwrap()
1915 } else {
1916 merkle_tree.get_history_root_index_v2().unwrap()
1917 };
1918
1919 Ok((leaf_index, proof, merkle_tree.root(), root_index))
1920 })
1921 .collect::<Result<_, IndexerError>>()?;
1922
1923 for (i, (leaf_index, proof, merkle_root, root_index)) in proof_data.into_iter().enumerate()
1925 {
1926 inclusion_proofs.push(InclusionMerkleProofInputs {
1927 root: BigInt::from_be_bytes(merkle_root.as_slice()),
1928 leaf: BigInt::from_be_bytes(&accounts[i]),
1929 path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()),
1930 path_elements: proof,
1931 });
1932
1933 account_proof_inputs.push(AccountProofInputs {
1934 root_index: RootIndex::new_some(root_index),
1935 root: merkle_root,
1936 leaf_index: leaf_index as u64,
1937 hash: accounts[i],
1938 tree_info: light_client::indexer::TreeInfo {
1939 cpi_context: Some(cpi_contextes[i]),
1940 next_tree_info: None,
1941 queue: queues[i],
1942 tree: merkle_tree_pubkeys[i],
1943 tree_type: tree_types[i],
1944 },
1945 });
1946 }
1947
1948 let (batch_inclusion_proof_inputs, legacy) = if height
1949 == DEFAULT_BATCH_STATE_TREE_HEIGHT as usize
1950 {
1951 let inclusion_proof_inputs =
1952 InclusionProofInputs::new(inclusion_proofs.as_slice()).unwrap();
1953 (
1954 Some(BatchInclusionJsonStruct::from_inclusion_proof_inputs(
1955 &inclusion_proof_inputs,
1956 )),
1957 None,
1958 )
1959 } else if height == account_compression::utils::constants::STATE_MERKLE_TREE_HEIGHT as usize
1960 {
1961 let inclusion_proof_inputs = InclusionProofInputsLegacy(inclusion_proofs.as_slice());
1962 (
1963 None,
1964 Some(BatchInclusionJsonStructLegacy::from_inclusion_proof_inputs(
1965 &inclusion_proof_inputs,
1966 )),
1967 )
1968 } else {
1969 return Err(IndexerError::CustomError(
1970 "Unsupported tree height".to_string(),
1971 ));
1972 };
1973
1974 Ok((batch_inclusion_proof_inputs, legacy, account_proof_inputs))
1975 }
1976
1977 async fn process_non_inclusion_proofs(
1978 &self,
1979 address_merkle_tree_pubkeys: &[Pubkey],
1980 addresses: Vec<[u8; 32]>,
1981 ) -> Result<
1982 (
1983 Option<BatchNonInclusionJsonStruct>,
1984 Option<BatchNonInclusionJsonStructLegacy>,
1985 Vec<AddressProofInputs>,
1986 ),
1987 IndexerError,
1988 > {
1989 let mut non_inclusion_proofs = Vec::new();
1990 let mut address_root_indices = Vec::new();
1991 let mut tree_heights = Vec::new();
1992 for (i, address) in addresses.iter().enumerate() {
1993 let address_tree = self
1994 .address_merkle_trees
1995 .iter()
1996 .find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i])
1997 .unwrap();
1998 tree_heights.push(address_tree.height());
1999
2000 let proof_inputs = address_tree.get_non_inclusion_proof_inputs(address)?;
2001 non_inclusion_proofs.push(proof_inputs);
2002
2003 let (root_index, root, tree_type) = match &address_tree.merkle_tree {
2004 super::address_tree::IndexedMerkleTreeVersion::V1(tree) => (
2005 tree.merkle_tree.get_history_root_index().unwrap() + 1,
2006 tree.merkle_tree.root(),
2007 TreeType::AddressV1,
2008 ),
2009 super::address_tree::IndexedMerkleTreeVersion::V2(tree) => (
2010 tree.merkle_tree.get_history_root_index_v2().unwrap(),
2011 tree.merkle_tree.root(),
2012 TreeType::AddressV2,
2013 ),
2014 };
2015 address_root_indices.push(AddressProofInputs {
2016 root_index,
2017 root,
2018 address: *address,
2019 tree_info: light_client::indexer::TreeInfo {
2020 cpi_context: None,
2021 next_tree_info: None,
2022 queue: address_tree.accounts.queue,
2023 tree: address_tree.accounts.merkle_tree,
2024 tree_type,
2025 },
2026 });
2027 }
2028 if tree_heights.iter().any(|&x| x != tree_heights[0]) {
2030 return Err(IndexerError::CustomError(format!(
2031 "All address merkle trees must have the same height {:?}",
2032 tree_heights
2033 )));
2034 }
2035 let (batch_non_inclusion_proof_inputs, batch_non_inclusion_proof_inputs_legacy) =
2036 if tree_heights[0] == 26 {
2037 let non_inclusion_proof_inputs =
2038 NonInclusionProofInputsLegacy::new(non_inclusion_proofs.as_slice());
2039 (
2040 None,
2041 Some(
2042 BatchNonInclusionJsonStructLegacy::from_non_inclusion_proof_inputs(
2043 &non_inclusion_proof_inputs,
2044 ),
2045 ),
2046 )
2047 } else if tree_heights[0] == 40 {
2048 let non_inclusion_proof_inputs =
2049 NonInclusionProofInputs::new(non_inclusion_proofs.as_slice()).unwrap();
2050 (
2051 Some(
2052 BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs(
2053 &non_inclusion_proof_inputs,
2054 ),
2055 ),
2056 None,
2057 )
2058 } else {
2059 return Err(IndexerError::CustomError(
2060 "Unsupported tree height".to_string(),
2061 ));
2062 };
2063 Ok((
2064 batch_non_inclusion_proof_inputs,
2065 batch_non_inclusion_proof_inputs_legacy,
2066 address_root_indices,
2067 ))
2068 }
2069}
2070
2071impl TestIndexer {
2072 async fn _get_validity_proof_v1_implementation(
2073 &self,
2074 hashes: Vec<[u8; 32]>,
2075 new_addresses_with_trees: Vec<AddressWithTree>,
2076 ) -> Result<ValidityProofWithContext, IndexerError> {
2077 let mut state_merkle_tree_pubkeys = Vec::new();
2078
2079 for hash in hashes.iter() {
2080 state_merkle_tree_pubkeys.push(
2081 self.get_compressed_account_by_hash(*hash, None)
2082 .await?
2083 .value
2084 .tree_info
2085 .tree,
2086 );
2087 }
2088
2089 let state_merkle_tree_pubkeys = if state_merkle_tree_pubkeys.is_empty() {
2090 None
2091 } else {
2092 Some(state_merkle_tree_pubkeys)
2093 };
2094 let hashes = if hashes.is_empty() {
2095 None
2096 } else {
2097 Some(hashes)
2098 };
2099 let new_addresses = if new_addresses_with_trees.is_empty() {
2100 None
2101 } else {
2102 Some(
2103 new_addresses_with_trees
2104 .iter()
2105 .map(|x| x.address)
2106 .collect::<Vec<[u8; 32]>>(),
2107 )
2108 };
2109 let address_merkle_tree_pubkeys = if new_addresses_with_trees.is_empty() {
2110 None
2111 } else {
2112 Some(
2113 new_addresses_with_trees
2114 .iter()
2115 .map(|x| x.tree)
2116 .collect::<Vec<Pubkey>>(),
2117 )
2118 };
2119
2120 {
2121 let compressed_accounts = hashes;
2122 if compressed_accounts.is_some()
2123 && ![1usize, 2usize, 3usize, 4usize, 8usize]
2124 .contains(&compressed_accounts.as_ref().unwrap().len())
2125 {
2126 return Err(IndexerError::CustomError(format!(
2127 "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}",
2128 compressed_accounts.unwrap().len()
2129 )));
2130 }
2131 if new_addresses.is_some()
2132 && ![1usize, 2usize, 3usize, 4usize, 8usize]
2133 .contains(&new_addresses.as_ref().unwrap().len())
2134 {
2135 return Err(IndexerError::CustomError(format!(
2136 "new_addresses must be of length 1, 2, 3, 4 or 8 != {}",
2137 new_addresses.unwrap().len()
2138 )));
2139 }
2140 let client = Client::new();
2141 let (account_proof_inputs, address_proof_inputs, json_payload) =
2142 match (compressed_accounts, new_addresses) {
2143 (Some(accounts), None) => {
2144 let (payload, payload_legacy, indices) = self
2145 .process_inclusion_proofs(
2146 &state_merkle_tree_pubkeys.unwrap(),
2147 &accounts,
2148 )
2149 .await?;
2150 if let Some(payload) = payload {
2151 (indices, Vec::new(), payload.to_string())
2152 } else {
2153 (indices, Vec::new(), payload_legacy.unwrap().to_string())
2154 }
2155 }
2156 (None, Some(addresses)) => {
2157 let (payload, payload_legacy, indices) = self
2158 .process_non_inclusion_proofs(
2159 address_merkle_tree_pubkeys.unwrap().as_slice(),
2160 addresses,
2161 )
2162 .await?;
2163 let payload_string = if let Some(payload) = payload {
2164 payload.to_string()
2165 } else {
2166 payload_legacy.unwrap().to_string()
2167 };
2168 (Vec::new(), indices, payload_string)
2169 }
2170 (Some(accounts), Some(addresses)) => {
2171 let (inclusion_payload, inclusion_payload_legacy, inclusion_indices) = self
2172 .process_inclusion_proofs(
2173 &state_merkle_tree_pubkeys.unwrap(),
2174 &accounts,
2175 )
2176 .await?;
2177
2178 let (
2179 non_inclusion_payload,
2180 non_inclusion_payload_legacy,
2181 non_inclusion_indices,
2182 ) = self
2183 .process_non_inclusion_proofs(
2184 address_merkle_tree_pubkeys.unwrap().as_slice(),
2185 addresses,
2186 )
2187 .await?;
2188 let json_payload = if let Some(non_inclusion_payload) =
2189 non_inclusion_payload
2190 {
2191 let public_input_hash = BigInt::from_bytes_be(
2192 num_bigint::Sign::Plus,
2193 &create_hash_chain_from_slice(&[
2194 bigint_to_u8_32(
2195 &string_to_big_int(
2196 &inclusion_payload.as_ref().unwrap().public_input_hash,
2197 )
2198 .unwrap(),
2199 )
2200 .unwrap(),
2201 bigint_to_u8_32(
2202 &string_to_big_int(
2203 &non_inclusion_payload.public_input_hash,
2204 )
2205 .unwrap(),
2206 )
2207 .unwrap(),
2208 ])
2209 .unwrap(),
2210 );
2211
2212 CombinedJsonStruct {
2213 circuit_type: ProofType::Combined.to_string(),
2214 state_tree_height: DEFAULT_BATCH_STATE_TREE_HEIGHT,
2215 address_tree_height: DEFAULT_BATCH_ADDRESS_TREE_HEIGHT,
2216 public_input_hash: big_int_to_string(&public_input_hash),
2217 inclusion: inclusion_payload.unwrap().inputs,
2218 non_inclusion: non_inclusion_payload.inputs,
2219 }
2220 .to_string()
2221 } else if let Some(non_inclusion_payload) = non_inclusion_payload_legacy {
2222 CombinedJsonStructLegacy {
2223 circuit_type: ProofType::Combined.to_string(),
2224 state_tree_height: 26,
2225 address_tree_height: 26,
2226 inclusion: inclusion_payload_legacy.unwrap().inputs,
2227 non_inclusion: non_inclusion_payload.inputs,
2228 }
2229 .to_string()
2230 } else {
2231 panic!("Unsupported tree height")
2232 };
2233 (inclusion_indices, non_inclusion_indices, json_payload)
2234 }
2235 _ => {
2236 panic!(
2237 "At least one of compressed_accounts or new_addresses must be provided"
2238 )
2239 }
2240 };
2241
2242 let mut retries = 3;
2243 while retries > 0 {
2244 let response_result = client
2245 .post(format!("{}{}", SERVER_ADDRESS, PROVE_PATH))
2246 .header("Content-Type", "text/plain; charset=utf-8")
2247 .body(json_payload.clone())
2248 .send()
2249 .await;
2250 if let Ok(response_result) = response_result {
2251 if response_result.status().is_success() {
2252 let body = response_result.text().await.unwrap();
2253 let proof_json = deserialize_gnark_proof_json(&body).unwrap();
2254 let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json);
2255 let (proof_a, proof_b, proof_c) =
2256 compress_proof(&proof_a, &proof_b, &proof_c);
2257 return Ok(ValidityProofWithContext {
2258 accounts: account_proof_inputs,
2259 addresses: address_proof_inputs,
2260 proof: CompressedProof {
2261 a: proof_a,
2262 b: proof_b,
2263 c: proof_c,
2264 }
2265 .into(),
2266 });
2267 }
2268 } else {
2269 println!("Error: {:#?}", response_result);
2270 tokio::time::sleep(Duration::from_secs(5)).await;
2271 retries -= 1;
2272 }
2273 }
2274 Err(IndexerError::CustomError(
2275 "Failed to get proof from server".to_string(),
2276 ))
2277 }
2278 }
2279}