Skip to main content

miden_client/sync/
state_sync.rs

1use alloc::boxed::Box;
2use alloc::collections::{BTreeMap, BTreeSet};
3use alloc::sync::Arc;
4use alloc::vec::Vec;
5
6use async_trait::async_trait;
7use miden_protocol::account::{
8    Account,
9    AccountCode,
10    AccountDelta,
11    AccountHeader,
12    AccountId,
13    AccountStorage,
14    AccountStorageDelta,
15    AccountVaultDelta,
16    StorageMapKey,
17    StorageSlot,
18    StorageSlotContent,
19    StorageSlotName,
20    StorageSlotType,
21};
22use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey};
23use miden_protocol::block::{BlockHeader, BlockNumber};
24use miden_protocol::crypto::merkle::mmr::{MmrDelta, PartialMmr};
25use miden_protocol::note::{Note, NoteId, NoteTag, NoteType, Nullifier};
26use miden_protocol::transaction::InputNoteCommitment;
27use miden_protocol::{EMPTY_WORD, Felt, Word};
28use tracing::info;
29
30use super::state_sync_update::TransactionUpdateTracker;
31use super::{AccountUpdates, PublicAccountUpdate, StateSyncUpdate};
32use crate::ClientError;
33use crate::note::NoteUpdateTracker;
34use crate::rpc::domain::account::{
35    AccountDetails,
36    AccountStorageMapDetails,
37    AccountStorageRequirements,
38    FetchedAccount,
39};
40use crate::rpc::domain::note::{CommittedNote, NoteSyncBlock};
41use crate::rpc::domain::storage_map::StorageMapUpdate;
42use crate::rpc::domain::transaction::{
43    TransactionInclusion,
44    TransactionRecord as RpcTransactionRecord,
45};
46use crate::rpc::{AccountStateAt, NodeRpcClient, RpcError};
47use crate::store::{AccountStorageFilter, InputNoteRecord, OutputNoteRecord, Store, StoreError};
48use crate::transaction::TransactionRecord;
49
50// STATE UPDATE DATA
51// ================================================================================================
52
53/// Raw data fetched from the node needed to sync the client to the chain tip.
54///
55/// Aggregates the responses of `sync_chain_mmr`, `sync_notes`, `get_notes_by_id`, and
56/// `sync_transactions`. This may contain more data than a particular client needs to store — it is
57/// filtered and transformed into a [`StateSyncUpdate`] before being applied.
58struct RawStateSyncData {
59    /// MMR delta covering the full range from `current_block` to `chain_tip`.
60    mmr_delta: MmrDelta,
61    /// Chain tip block header.
62    chain_tip_header: BlockHeader,
63    /// Blocks with matching notes that the client is interested in.
64    note_blocks: Vec<NoteSyncBlock>,
65    /// Full note bodies for public notes, keyed by note ID.
66    public_notes: BTreeMap<NoteId, Note>,
67    /// Account commitment updates for the synced range.
68    account_commitment_updates: Vec<(AccountId, Word)>,
69    /// Transaction inclusions for the synced range.
70    transactions: Vec<TransactionInclusion>,
71    /// Nullifiers for the synced range.
72    nullifiers: Vec<Nullifier>,
73}
74
75// SYNC REQUEST
76// ================================================================================================
77
78/// Bundles the client state needed to perform a sync operation.
79///
80/// The sync process uses these inputs to:
81/// - Request account commitment updates from the node for the provided accounts.
82/// - Filter which note inclusions the node returns based on the provided note tags.
83/// - Follow the lifecycle of every tracked note (input and output), transitioning them from pending
84///   to committed to consumed as the network state advances.
85/// - Track uncommitted transactions so they can be marked as committed when the node confirms them,
86///   or discarded when they become stale.
87///
88/// Use [`Client::build_sync_input()`](`crate::Client::build_sync_input()`) to build a default input
89/// from the client state, or construct this struct manually for custom sync scenarios.
90pub struct StateSyncInput {
91    /// Account headers to request commitment updates for.
92    pub accounts: Vec<AccountHeader>,
93    /// Note tags that the node uses to filter which note inclusions to return.
94    pub note_tags: BTreeSet<NoteTag>,
95    /// Input notes whose lifecycle should be followed during sync.
96    pub input_notes: Vec<InputNoteRecord>,
97    /// Output notes whose lifecycle should be followed during sync.
98    pub output_notes: Vec<OutputNoteRecord>,
99    /// Transactions to track for commitment or discard during sync.
100    pub uncommitted_transactions: Vec<TransactionRecord>,
101}
102
103// SYNC CALLBACKS
104// ================================================================================================
105
106/// The action to be taken when a note update is received as part of the sync response.
107#[allow(clippy::large_enum_variant)]
108pub enum NoteUpdateAction {
109    /// The note commit update is relevant and the specified note should be marked as committed in
110    /// the store, storing its inclusion proof.
111    Commit(CommittedNote),
112    /// The public note is relevant and should be inserted into the store.
113    Insert(InputNoteRecord),
114    /// The note update is not relevant and should be discarded.
115    Discard,
116}
117
118#[async_trait(?Send)]
119pub trait OnNoteReceived {
120    /// Callback that gets executed when a new note is received as part of the sync response.
121    ///
122    /// It receives:
123    ///
124    /// - The committed note received from the network.
125    /// - An optional note record that corresponds to the state of the note in the network (only if
126    ///   the note is public).
127    ///
128    /// It returns an enum indicating the action to be taken for the received note update. Whether
129    /// the note updated should be committed, new public note inserted, or ignored.
130    async fn on_note_received(
131        &self,
132        committed_note: CommittedNote,
133        public_note: Option<InputNoteRecord>,
134    ) -> Result<NoteUpdateAction, ClientError>;
135}
136// STATE SYNC
137// ================================================================================================
138
139/// The state sync component encompasses the client's sync logic. It is then used to request
140/// updates from the node and apply them to the relevant elements. The updates are then returned and
141/// can be applied to the store to persist the changes.
142#[derive(Clone)]
143pub struct StateSync {
144    /// The RPC client used to communicate with the node.
145    rpc_api: Arc<dyn NodeRpcClient>,
146    /// The client's store, used to fetch account storage and vault data on demand during
147    /// delta-based sync of public accounts. When `None`, oversized public accounts fall back
148    /// to `get_account_details` (full sync from block 0).
149    store: Option<Arc<dyn Store>>,
150    /// Responsible for checking the relevance of notes and executing the
151    /// [`OnNoteReceived`] callback when a new note inclusion is received.
152    note_screener: Arc<dyn OnNoteReceived>,
153    /// Number of blocks after which pending transactions are considered stale and discarded.
154    /// If `None`, there is no limit and transactions will be kept indefinitely.
155    tx_discard_delta: Option<u32>,
156    /// Whether to check for nullifiers during state sync. When enabled, the component will query
157    /// the nullifiers for unspent notes at each sync step. This allows to detect when tracked
158    /// notes have been consumed externally and discard local transactions that depend on them.
159    sync_nullifiers: bool,
160}
161
162impl StateSync {
163    /// Creates a new instance of the state sync component.
164    ///
165    /// The nullifiers sync is enabled by default. To disable it, see
166    /// [`Self::disable_nullifier_sync`].
167    ///
168    /// # Arguments
169    ///
170    /// * `rpc_api` - The RPC client used to communicate with the node.
171    /// * `store` - Optional store for on-demand account data access during delta sync.
172    /// * `note_screener` - The note screener used to check the relevance of notes.
173    /// * `tx_discard_delta` - Number of blocks after which pending transactions are discarded.
174    pub fn new(
175        rpc_api: Arc<dyn NodeRpcClient>,
176        store: Option<Arc<dyn Store>>,
177        note_screener: Arc<dyn OnNoteReceived>,
178        tx_discard_delta: Option<u32>,
179    ) -> Self {
180        Self {
181            rpc_api,
182            store,
183            note_screener,
184            tx_discard_delta,
185            sync_nullifiers: true,
186        }
187    }
188
189    /// Disables the nullifier sync.
190    ///
191    /// When disabled, the component will not query the node for new nullifiers after each sync
192    /// step. This is useful for clients that don't need to track note consumption, such as
193    /// faucets.
194    pub fn disable_nullifier_sync(&mut self) {
195        self.sync_nullifiers = false;
196    }
197
198    /// Enables the nullifier sync.
199    pub fn enable_nullifier_sync(&mut self) {
200        self.sync_nullifiers = true;
201    }
202
203    /// Syncs the state of the client with the chain tip of the node, returning the updates that
204    /// should be applied to the store.
205    ///
206    /// Use [`Client::build_sync_input()`](`crate::Client::build_sync_input()`) to build the default
207    /// input, or assemble it manually for custom sync. The `current_partial_mmr` is taken by
208    /// mutable reference so callers can keep it in memory across syncs.
209    ///
210    /// During the sync process, the following steps are performed:
211    /// 1. A request is sent to the node to get the state updates. This request includes tracked
212    ///    account IDs and the tags of notes that might have changed or that might be of interest to
213    ///    the client.
214    /// 2. A response is received with the current state of the network. The response includes
215    ///    information about new and committed notes, updated accounts, and committed transactions.
216    /// 3. Tracked public accounts are updated and private accounts are validated against the node
217    ///    state.
218    /// 4. Tracked notes are updated with their new states. Notes might be committed or nullified
219    ///    during the sync processing.
220    /// 5. New notes are checked, and only relevant ones are stored. Relevance is determined by the
221    ///    [`OnNoteReceived`] callback.
222    /// 6. Transactions are updated with their new states. Transactions might be committed or
223    ///    discarded.
224    /// 7. The MMR is updated with the new peaks and authentication nodes.
225    pub async fn sync_state(
226        &self,
227        current_partial_mmr: &mut PartialMmr,
228        input: StateSyncInput,
229    ) -> Result<StateSyncUpdate, ClientError> {
230        let StateSyncInput {
231            accounts,
232            note_tags,
233            input_notes,
234            output_notes,
235            uncommitted_transactions,
236        } = input;
237        let block_num = u32::try_from(current_partial_mmr.forest().num_leaves().saturating_sub(1))
238            .map_err(|_| ClientError::InvalidPartialMmrForest)?
239            .into();
240
241        let mut state_sync_update = StateSyncUpdate {
242            block_num,
243            note_updates: NoteUpdateTracker::new(input_notes, output_notes),
244            transaction_updates: TransactionUpdateTracker::new(uncommitted_transactions),
245            ..Default::default()
246        };
247
248        let note_tags = Arc::new(note_tags);
249        let account_ids: Vec<AccountId> = accounts.iter().map(AccountHeader::id).collect();
250        let Some(mut sync_data) = self
251            .fetch_sync_data(state_sync_update.block_num, &account_ids, &note_tags)
252            .await?
253        else {
254            // No progress — already at the tip.
255            return Ok(state_sync_update);
256        };
257
258        state_sync_update.block_num = sync_data.chain_tip_header.block_num();
259
260        // Build input note records for public notes from the fetched note bodies and the
261        // inclusion proofs already present in the note blocks.
262        let mut public_note_records: BTreeMap<NoteId, InputNoteRecord> = BTreeMap::new();
263        for (note_id, note) in core::mem::take(&mut sync_data.public_notes) {
264            let inclusion_proof = sync_data
265                .note_blocks
266                .iter()
267                .find_map(|b| b.notes.get(&note_id))
268                .map(|committed| committed.inclusion_proof().clone());
269
270            if let Some(inclusion_proof) = inclusion_proof {
271                let state = crate::store::input_note_states::UnverifiedNoteState {
272                    metadata: note.metadata().clone(),
273                    inclusion_proof,
274                }
275                .into();
276                let record = InputNoteRecord::new(note.into(), None, state);
277                public_note_records.insert(record.id(), record);
278            }
279        }
280
281        self.account_state_sync(
282            &mut state_sync_update.account_updates,
283            &accounts,
284            &sync_data.account_commitment_updates,
285            block_num,
286        )
287        .await?;
288
289        // Apply local changes: update the MMR, screen notes, and apply state transitions.
290        self.apply_sync_result(
291            sync_data,
292            &public_note_records,
293            &mut state_sync_update,
294            current_partial_mmr,
295        )
296        .await?;
297
298        if self.sync_nullifiers {
299            self.nullifiers_state_sync(&mut state_sync_update, block_num).await?;
300        }
301
302        Ok(state_sync_update)
303    }
304
305    /// Fetches the sync data from the node by calling the following endpoints:
306    /// 1. `sync_chain_mmr` — discovers the chain tip, gets the MMR delta and chain tip header.
307    /// 2. `sync_notes` — loops until the full range to the chain tip is covered (handles paginated
308    ///    responses).
309    /// 3. `get_notes_by_id` — fetches full metadata for notes with attachments.
310    /// 4. `sync_transactions` — gets transaction data for the full range.
311    ///
312    /// Returns `None` when the client is already at the chain tip (no progress).
313    async fn fetch_sync_data(
314        &self,
315        current_block_num: BlockNumber,
316        account_ids: &[AccountId],
317        note_tags: &Arc<BTreeSet<NoteTag>>,
318    ) -> Result<Option<RawStateSyncData>, ClientError> {
319        // Step 1: Fetch the MMR delta and chain tip header.
320        let chain_mmr_info = self.rpc_api.sync_chain_mmr(current_block_num, None).await?;
321        let chain_tip = chain_mmr_info.block_to;
322
323        // No progress — already at the tip.
324        if chain_tip == current_block_num {
325            info!(block_num = %current_block_num, "Already at chain tip, nothing to sync.");
326            return Ok(None);
327        }
328
329        info!(
330            block_from = %current_block_num,
331            block_to = %chain_tip,
332            "Syncing state.",
333        );
334
335        // Step 2: Paginate sync_notes using the same chain tip so MMR paths are opened at
336        // a consistent forest.
337        let sync_notes_result = self
338            .rpc_api
339            .sync_notes_with_details(current_block_num, Some(chain_tip), note_tags.as_ref())
340            .await?;
341
342        let note_count: usize = sync_notes_result.blocks.iter().map(|b| b.notes.len()).sum();
343        info!(
344            blocks_with_notes = sync_notes_result.blocks.len(),
345            notes = note_count,
346            public_notes = sync_notes_result.public_notes.len(),
347            "Fetched note sync data.",
348        );
349
350        // Step 3: Gather transactions for tracked accounts over the full range.
351        let (account_commitment_updates, transactions, nullifiers) =
352            self.fetch_transaction_data(current_block_num, chain_tip, account_ids).await?;
353
354        Ok(Some(RawStateSyncData {
355            mmr_delta: chain_mmr_info.mmr_delta,
356            chain_tip_header: chain_mmr_info.block_header,
357            note_blocks: sync_notes_result.blocks,
358            public_notes: sync_notes_result.public_notes,
359            account_commitment_updates,
360            transactions,
361            nullifiers,
362        }))
363    }
364
365    /// Fetches transaction data for the given range and account IDs.
366    async fn fetch_transaction_data(
367        &self,
368        block_from: BlockNumber,
369        block_to: BlockNumber,
370        account_ids: &[AccountId],
371    ) -> Result<(Vec<(AccountId, Word)>, Vec<TransactionInclusion>, Vec<Nullifier>), ClientError>
372    {
373        if account_ids.is_empty() {
374            return Ok((vec![], vec![], vec![]));
375        }
376
377        let tx_info = self
378            .rpc_api
379            .sync_transactions(block_from, Some(block_to), account_ids.to_vec())
380            .await?;
381
382        let transaction_records = tx_info.transaction_records;
383
384        let account_updates = derive_account_commitment_updates(&transaction_records);
385        let nullifiers = compute_ordered_nullifiers(&transaction_records);
386
387        let tx_inclusions = transaction_records
388            .into_iter()
389            .map(|r| {
390                let nullifiers = r
391                    .transaction_header
392                    .input_notes()
393                    .iter()
394                    .map(InputNoteCommitment::nullifier)
395                    .collect();
396                TransactionInclusion {
397                    transaction_id: r.transaction_header.id(),
398                    block_num: r.block_num,
399                    account_id: r.transaction_header.account_id(),
400                    initial_state_commitment: r.transaction_header.initial_state_commitment(),
401                    nullifiers,
402                    output_notes: r.output_notes,
403                }
404            })
405            .collect();
406
407        Ok((account_updates, tx_inclusions, nullifiers))
408    }
409
410    // HELPERS
411    // --------------------------------------------------------------------------------------------
412
413    /// Applies sync results to the local state update.
414    ///
415    /// Applies fetched sync data to the local state:
416    /// 1. Advances the partial MMR (delta + chain tip leaf).
417    /// 2. Screens note blocks and tracks relevant ones in the MMR.
418    /// 3. Applies transaction and nullifier updates.
419    async fn apply_sync_result(
420        &self,
421        sync_data: RawStateSyncData,
422        public_note_records: &BTreeMap<NoteId, InputNoteRecord>,
423        state_sync_update: &mut StateSyncUpdate,
424        current_partial_mmr: &mut PartialMmr,
425    ) -> Result<(), ClientError> {
426        let RawStateSyncData {
427            mmr_delta,
428            chain_tip_header,
429            note_blocks,
430            nullifiers,
431            transactions,
432            ..
433        } = sync_data;
434
435        // Advance the partial MMR: apply delta (up to chain_tip - 1), capture peaks for
436        // storage, then add the chain tip leaf (which the delta excludes due to the
437        // one-block lag in block header MMR commitments).
438        let mut new_authentication_nodes =
439            current_partial_mmr.apply(mmr_delta).map_err(StoreError::MmrError)?;
440        let new_peaks = current_partial_mmr.peaks();
441        new_authentication_nodes
442            .append(&mut current_partial_mmr.add(chain_tip_header.commitment(), false));
443
444        state_sync_update.block_updates.insert(
445            chain_tip_header.clone(),
446            false,
447            new_peaks,
448            new_authentication_nodes,
449        );
450
451        // Screen each note block and track relevant ones in the partial MMR using the
452        // authentication path from the sync_notes response.
453        for block in note_blocks {
454            let found_relevant_note = self
455                .note_state_sync(
456                    &mut state_sync_update.note_updates,
457                    block.notes,
458                    &block.block_header,
459                    public_note_records,
460                )
461                .await?;
462
463            if found_relevant_note {
464                let block_pos = block.block_header.block_num().as_usize();
465
466                let nodes_before: BTreeMap<_, _> =
467                    current_partial_mmr.nodes().map(|(k, v)| (*k, *v)).collect();
468
469                if !current_partial_mmr.is_tracked(block_pos) {
470                    current_partial_mmr
471                        .track(block_pos, block.block_header.commitment(), &block.mmr_path)
472                        .map_err(StoreError::MmrError)?;
473                }
474
475                // Always collect new authentication nodes — even when the block was
476                // already tracked from the MMR delta, the delta's nodes may not include
477                // the full authentication path needed to reconstruct the PartialMmr
478                // from storage later.
479                let track_auth_nodes: Vec<_> = current_partial_mmr
480                    .nodes()
481                    .filter(|(k, _)| !nodes_before.contains_key(k))
482                    .map(|(k, v)| (*k, *v))
483                    .collect();
484
485                state_sync_update.block_updates.insert(
486                    block.block_header,
487                    true,
488                    current_partial_mmr.peaks(),
489                    track_auth_nodes,
490                );
491            }
492        }
493
494        // Apply transaction and nullifier data.
495        state_sync_update.note_updates.extend_nullifiers(nullifiers);
496        self.transaction_state_sync(
497            &mut state_sync_update.transaction_updates,
498            &chain_tip_header,
499            &transactions,
500        );
501
502        // Transition tracked output notes to Committed using inclusion proofs from the
503        // transaction sync response. This covers output notes regardless of whether their
504        // tags were tracked in the note sync.
505        for transaction in &transactions {
506            state_sync_update
507                .note_updates
508                .apply_output_note_inclusion_proofs(&transaction.output_notes)?;
509        }
510
511        Ok(())
512    }
513
514    /// Compares the state of tracked accounts with the updates received from the node. The method
515    /// Updates the `account_updates` with the details of the accounts that need to be updated.
516    ///
517    /// The account updates might include:
518    /// * Public accounts that have been updated in the node (full or delta-based).
519    /// * Network accounts that have been updated in the node and are being tracked by the client.
520    /// * Private accounts that have been marked as mismatched because the current commitment
521    ///   doesn't match the one received from the node. The client will need to handle these cases
522    ///   as they could be a stale account state or a reason to lock the account.
523    async fn account_state_sync(
524        &self,
525        account_updates: &mut AccountUpdates,
526        accounts: &[AccountHeader],
527        account_commitment_updates: &[(AccountId, Word)],
528        block_num: BlockNumber,
529    ) -> Result<(), ClientError> {
530        // "Public" here includes both Public and Network accounts, since both have
531        // their state stored on-chain and follow the same sync path.
532        let (public_accounts, private_accounts): (Vec<_>, Vec<_>) =
533            accounts.iter().partition(|a| !a.id().is_private());
534
535        self.sync_public_accounts(
536            account_updates,
537            account_commitment_updates,
538            &public_accounts,
539            block_num,
540        )
541        .await?;
542
543        let mismatched_private_accounts = account_commitment_updates
544            .iter()
545            .filter(|(account_id, digest)| {
546                private_accounts
547                    .iter()
548                    .any(|a| a.id() == *account_id && &a.to_commitment() != digest)
549            })
550            .copied()
551            .collect::<Vec<_>>();
552
553        account_updates.extend(AccountUpdates::new(Vec::new(), mismatched_private_accounts));
554
555        Ok(())
556    }
557
558    /// Queries the node for updated public accounts and populates `account_updates`.
559    ///
560    /// When a store is available, storage and vault data are fetched on demand to build
561    /// deltas for oversized accounts. Without a store, oversized accounts fall back to
562    /// `get_account_details` (full sync from block 0).
563    async fn sync_public_accounts(
564        &self,
565        account_updates: &mut AccountUpdates,
566        commitment_updates: &[(AccountId, Word)],
567        current_public_accounts: &[&AccountHeader],
568        block_num: BlockNumber,
569    ) -> Result<(), ClientError> {
570        for (id, commitment) in commitment_updates {
571            let Some(local_header) = current_public_accounts
572                .iter()
573                .find(|acc| *id == acc.id() && *commitment != acc.to_commitment())
574            else {
575                continue;
576            };
577
578            let account_id = local_header.id();
579
580            // Build storage requirements and known code from store (if available) to
581            // request all entries for every map slot and avoid re-downloading code.
582            let (storage_requirements, known_code) =
583                self.fetch_local_account_hints(account_id).await;
584
585            let (proof_block_num, proof) = self
586                .rpc_api
587                .get_account_proof(
588                    account_id,
589                    storage_requirements,
590                    AccountStateAt::ChainTip,
591                    known_code,
592                    Some(EMPTY_WORD),
593                )
594                .await
595                .map_err(ClientError::RpcError)?;
596
597            let Some(details) = proof.into_parts().1 else {
598                // Private account returned — should not happen for public accounts.
599                continue;
600            };
601
602            // Skip if the remote nonce is not newer than what we already have.
603            if details.header.nonce().as_canonical_u64() <= local_header.nonce().as_canonical_u64()
604            {
605                continue;
606            }
607
608            let has_oversized_data = details.vault_details.too_many_assets
609                || details.storage_details.map_details.iter().any(|m| m.too_many_entries);
610
611            if has_oversized_data {
612                if self.store.is_some() {
613                    // Delta path: build an AccountDelta from incremental updates,
614                    // fetching storage slots and vault from the store on demand.
615                    let delta = self
616                        .build_account_delta(&details, local_header, block_num, proof_block_num)
617                        .await?;
618                    account_updates.extend(AccountUpdates::new(
619                        vec![PublicAccountUpdate::Delta {
620                            new_header: details.header.clone(),
621                            delta,
622                        }],
623                        Vec::new(),
624                    ));
625                } else {
626                    // No store available — fall back to get_account_details which
627                    // handles oversized data internally (syncing from block 0).
628                    let response = self
629                        .rpc_api
630                        .get_account_details(account_id)
631                        .await
632                        .map_err(ClientError::RpcError)?;
633
634                    match response {
635                        FetchedAccount::Public(account, _) => {
636                            account_updates.extend(AccountUpdates::new(
637                                vec![PublicAccountUpdate::Full(*account)],
638                                Vec::new(),
639                            ));
640                        },
641                        // This should not happen since we only fetch public accounts here.
642                        FetchedAccount::Private(..) => {},
643                    }
644                }
645            } else {
646                // Small account: build directly from the response details.
647                let account = Account::try_from(&details).map_err(ClientError::RpcError)?;
648                account_updates.extend(AccountUpdates::new(
649                    vec![PublicAccountUpdate::Full(account)],
650                    Vec::new(),
651                ));
652            }
653        }
654
655        Ok(())
656    }
657
658    /// Fetches storage requirements and known code from the store for a given account.
659    ///
660    /// Returns defaults when no store is available.
661    async fn fetch_local_account_hints(
662        &self,
663        account_id: AccountId,
664    ) -> (AccountStorageRequirements, Option<AccountCode>) {
665        let Some(store) = &self.store else {
666            return (AccountStorageRequirements::default(), None);
667        };
668
669        let storage_requirements = store
670            .get_account_storage(account_id, AccountStorageFilter::All)
671            .await
672            .map(|storage| Self::build_storage_requirements(&storage))
673            .unwrap_or_default();
674
675        let known_code = store.get_account_code(account_id).await.ok().flatten();
676
677        (storage_requirements, known_code)
678    }
679
680    /// Builds [`AccountStorageRequirements`] from [`AccountStorage`], requesting all entries for
681    /// every map slot.
682    fn build_storage_requirements(storage: &AccountStorage) -> AccountStorageRequirements {
683        let map_slots = storage.slots().iter().filter_map(|slot: &StorageSlot| {
684            if slot.slot_type() == StorageSlotType::Map {
685                // Passing an empty key list requests all entries for this map slot.
686                Some((slot.name().clone(), core::iter::empty::<&StorageMapKey>()))
687            } else {
688                None
689            }
690        });
691        AccountStorageRequirements::new(map_slots)
692    }
693
694    /// Builds an [`AccountDelta`] from incremental RPC sync data, fetching local account
695    /// data from the store on demand.
696    ///
697    /// For oversized storage maps: fetches delta entries via `sync_storage_maps`.
698    /// For oversized vaults: fetches delta entries via `sync_account_vault`.
699    /// Non-oversized parts are diffed against local data fetched from the store.
700    ///
701    /// # Panics
702    ///
703    /// Panics if `self.store` is `None`. Callers must check before invoking.
704    #[allow(clippy::too_many_lines)]
705    async fn build_account_delta(
706        &self,
707        details: &AccountDetails,
708        local_header: &AccountHeader,
709        block_from: BlockNumber,
710        block_to: BlockNumber,
711    ) -> Result<AccountDelta, ClientError> {
712        let store = self.store.as_ref().expect("store required for delta sync");
713        let account_id = details.header.id();
714
715        let storage_delta = self
716            .build_storage_delta(details, account_id, block_from, block_to, store.as_ref())
717            .await?;
718
719        let vault_delta = self
720            .build_vault_delta(details, account_id, block_from, block_to, store.as_ref())
721            .await?;
722
723        // --- Nonce delta ---
724        let old_nonce = local_header.nonce().as_canonical_u64();
725        let new_nonce = details.header.nonce().as_canonical_u64();
726        let nonce_delta = Felt::new(new_nonce - old_nonce);
727
728        AccountDelta::new(account_id, storage_delta, vault_delta, nonce_delta).map_err(|err| {
729            ClientError::RpcError(RpcError::InvalidResponse(format!(
730                "failed to construct account delta: {err}"
731            )))
732        })
733    }
734
735    /// Computes the full storage delta (value slots + map slots) for the account.
736    ///
737    /// For value slots, compares the response values against the local store. For map slots,
738    /// oversized maps (`too_many_entries`) fetch incremental delta entries from the sync endpoint
739    /// and deduplicate by key keeping the latest value; non-oversized maps diff the full response
740    /// entries against the local store.
741    async fn build_storage_delta(
742        &self,
743        details: &AccountDetails,
744        account_id: AccountId,
745        block_from: BlockNumber,
746        block_to: BlockNumber,
747        store: &dyn Store,
748    ) -> Result<AccountStorageDelta, ClientError> {
749        let mut storage_delta = AccountStorageDelta::new();
750
751        for slot_header in details.storage_details.header.slots() {
752            if slot_header.slot_type() == StorageSlotType::Value {
753                let local_value = store
754                    .get_account_storage_item(account_id, slot_header.name().clone())
755                    .await
756                    .ok();
757
758                if local_value.as_ref() != Some(&slot_header.value()) {
759                    storage_delta
760                        .set_item(slot_header.name().clone(), slot_header.value())
761                        .map_err(|err| {
762                            ClientError::RpcError(RpcError::InvalidResponse(format!(
763                                "failed to set storage delta item: {err}"
764                            )))
765                        })?;
766                }
767            }
768        }
769
770        let mut map_delta_cache: Option<Vec<StorageMapUpdate>> = None;
771
772        for slot_header in details.storage_details.header.slots() {
773            if slot_header.slot_type() != StorageSlotType::Map {
774                continue;
775            }
776
777            let map_details =
778                details.storage_details.find_map_details(slot_header.name()).ok_or_else(|| {
779                    ClientError::RpcError(RpcError::ExpectedDataMissing(format!(
780                        "slot '{}' is a map but has no map_details in response",
781                        slot_header.name()
782                    )))
783                })?;
784
785            if map_details.too_many_entries {
786                // Oversized map: fetch delta entries from the sync endpoint.
787                if map_delta_cache.is_none() {
788                    let map_info = self
789                        .rpc_api
790                        .sync_storage_maps(block_from, Some(block_to), account_id)
791                        .await
792                        .map_err(ClientError::RpcError)?;
793                    map_delta_cache = Some(map_info.updates);
794                }
795
796                Self::apply_oversized_map_delta(
797                    map_delta_cache.as_deref().unwrap_or_default(),
798                    slot_header.name(),
799                    &mut storage_delta,
800                )?;
801            } else {
802                Self::apply_full_map_delta(
803                    map_details,
804                    slot_header.name(),
805                    account_id,
806                    store,
807                    &mut storage_delta,
808                )
809                .await?;
810            }
811        }
812
813        Ok(storage_delta)
814    }
815
816    /// Applies delta updates from the sync endpoint for an oversized storage map slot.
817    ///
818    /// Filters the cached delta updates to the target slot, sorts by block number, and
819    /// deduplicates by key (keeping the latest value).
820    fn apply_oversized_map_delta(
821        delta_updates: &[StorageMapUpdate],
822        slot_name: &StorageSlotName,
823        storage_delta: &mut AccountStorageDelta,
824    ) -> Result<(), ClientError> {
825        let mut relevant: Vec<_> =
826            delta_updates.iter().filter(|u| u.slot_name == *slot_name).collect();
827        relevant.sort_by_key(|u| u.block_num);
828
829        // Deduplicate: keep latest value per key.
830        let mut seen = BTreeMap::new();
831        for update in relevant {
832            seen.insert(update.key, update.value);
833        }
834
835        for (key, value) in seen {
836            storage_delta.set_map_item(slot_name.clone(), key, value).map_err(|err| {
837                ClientError::RpcError(RpcError::InvalidResponse(format!(
838                    "failed to set storage map delta: {err}"
839                )))
840            })?;
841        }
842
843        Ok(())
844    }
845
846    /// Diffs the full response map entries against the local store for a non-oversized map slot.
847    ///
848    /// Entries present in the response but missing or different locally are added to the delta.
849    /// Entries present locally but absent in the response are set to `Word::default()` (removal).
850    async fn apply_full_map_delta(
851        map_details: &AccountStorageMapDetails,
852        slot_name: &StorageSlotName,
853        account_id: AccountId,
854        store: &dyn Store,
855        storage_delta: &mut AccountStorageDelta,
856    ) -> Result<(), ClientError> {
857        let response_map = map_details
858            .entries
859            .clone()
860            .into_storage_map()
861            .ok_or_else(|| {
862                ClientError::RpcError(RpcError::ExpectedDataMissing(
863                    "expected AllEntries for map, got EntriesWithProofs".into(),
864                ))
865            })?
866            .map_err(|err| {
867                ClientError::RpcError(RpcError::InvalidResponse(format!(
868                    "the rpc api returned a non-valid map entry: {err}"
869                )))
870            })?;
871
872        let local_entries: BTreeMap<StorageMapKey, Word> = store
873            .get_account_storage(account_id, AccountStorageFilter::SlotName(slot_name.clone()))
874            .await
875            .ok()
876            .and_then(|storage| storage.get(slot_name).cloned())
877            .map(|slot| match slot.content() {
878                StorageSlotContent::Map(map) => map.entries().map(|(k, v)| (*k, *v)).collect(),
879                StorageSlotContent::Value(_) => BTreeMap::new(),
880            })
881            .unwrap_or_default();
882
883        let response_entries: BTreeMap<StorageMapKey, Word> =
884            response_map.entries().map(|(k, v)| (*k, *v)).collect();
885
886        // Entries in response but not in local, or with different values.
887        for (key, value) in &response_entries {
888            if local_entries.get(key) != Some(value) {
889                storage_delta.set_map_item(slot_name.clone(), *key, *value).map_err(|err| {
890                    ClientError::RpcError(RpcError::InvalidResponse(format!(
891                        "failed to set storage map delta: {err}"
892                    )))
893                })?;
894            }
895        }
896
897        // Entries in local but removed in response (set to empty word).
898        for key in local_entries.keys() {
899            if !response_entries.contains_key(key) {
900                storage_delta.set_map_item(slot_name.clone(), *key, Word::default()).map_err(
901                    |err| {
902                        ClientError::RpcError(RpcError::InvalidResponse(format!(
903                            "failed to set storage map delta for removal: {err}"
904                        )))
905                    },
906                )?;
907            }
908        }
909
910        Ok(())
911    }
912
913    /// Computes the vault delta between local and remote account state.
914    ///
915    /// For oversized vaults (`too_many_assets`), fetches incremental updates from the sync
916    /// endpoint and replays them on top of the local vault. For non-oversized vaults, diffs
917    /// the full response assets against the local vault.
918    async fn build_vault_delta(
919        &self,
920        details: &AccountDetails,
921        account_id: AccountId,
922        block_from: BlockNumber,
923        block_to: BlockNumber,
924        store: &dyn Store,
925    ) -> Result<AccountVaultDelta, ClientError> {
926        let mut vault_delta = AccountVaultDelta::default();
927        let local_vault =
928            store.get_account_vault(account_id).await.map_err(ClientError::StoreError)?;
929
930        if details.vault_details.too_many_assets {
931            // Oversized vault: fetch delta from sync endpoint.
932            let vault_info = self
933                .rpc_api
934                .sync_account_vault(block_from, Some(block_to), account_id)
935                .await
936                .map_err(ClientError::RpcError)?;
937
938            // Build the final vault state by applying updates to local vault.
939            let mut vault_map: BTreeMap<AssetVaultKey, Asset> =
940                local_vault.assets().map(|asset| (asset.vault_key(), asset)).collect();
941
942            let mut vault_updates = vault_info.updates;
943            vault_updates.sort_by_key(|u| u.block_num);
944
945            for update in vault_updates {
946                match update.asset {
947                    Some(asset) => {
948                        vault_map.insert(update.vault_key, asset);
949                    },
950                    None => {
951                        vault_map.remove(&update.vault_key);
952                    },
953                }
954            }
955
956            Self::compute_vault_delta_from_diff(&local_vault, &vault_map, &mut vault_delta)?;
957        } else {
958            // Non-oversized vault: diff response assets against local.
959            let final_assets: BTreeMap<AssetVaultKey, Asset> = details
960                .vault_details
961                .assets
962                .iter()
963                .map(|asset| (asset.vault_key(), *asset))
964                .collect();
965
966            Self::compute_vault_delta_from_diff(&local_vault, &final_assets, &mut vault_delta)?;
967        }
968
969        Ok(vault_delta)
970    }
971
972    /// Computes a vault delta from the difference between a local vault and a final asset map.
973    fn compute_vault_delta_from_diff(
974        local_vault: &AssetVault,
975        final_assets: &BTreeMap<AssetVaultKey, Asset>,
976        vault_delta: &mut AccountVaultDelta,
977    ) -> Result<(), ClientError> {
978        let local_assets: BTreeMap<AssetVaultKey, Asset> =
979            local_vault.assets().map(|a| (a.vault_key(), a)).collect();
980
981        // Assets in final but not in local -> add. Changed amounts -> remove old, add new.
982        for (key, final_asset) in final_assets {
983            match local_assets.get(key) {
984                None => {
985                    vault_delta.add_asset(*final_asset).map_err(|err| {
986                        ClientError::RpcError(RpcError::InvalidResponse(format!(
987                            "failed to add asset to vault delta: {err}"
988                        )))
989                    })?;
990                },
991                Some(local_asset) if local_asset != final_asset => {
992                    vault_delta.remove_asset(*local_asset).map_err(|err| {
993                        ClientError::RpcError(RpcError::InvalidResponse(format!(
994                            "failed to remove old asset from vault delta: {err}"
995                        )))
996                    })?;
997                    vault_delta.add_asset(*final_asset).map_err(|err| {
998                        ClientError::RpcError(RpcError::InvalidResponse(format!(
999                            "failed to add new asset to vault delta: {err}"
1000                        )))
1001                    })?;
1002                },
1003                _ => {}, // No change
1004            }
1005        }
1006
1007        // Assets in local but not in final -> remove.
1008        for (key, local_asset) in &local_assets {
1009            if !final_assets.contains_key(key) {
1010                vault_delta.remove_asset(*local_asset).map_err(|err| {
1011                    ClientError::RpcError(RpcError::InvalidResponse(format!(
1012                        "failed to remove asset from vault delta: {err}"
1013                    )))
1014                })?;
1015            }
1016        }
1017
1018        Ok(())
1019    }
1020
1021    /// Applies the changes received from the sync response to the notes and transactions tracked
1022    /// by the client and updates the `note_updates` accordingly.
1023    ///
1024    /// This method uses the callbacks provided to the [`StateSync`] component to check if the
1025    /// updates received are relevant to the client.
1026    ///
1027    /// The note updates might include:
1028    /// * New notes that we received from the node and might be relevant to the client.
1029    /// * Tracked expected notes that were committed in the block.
1030    /// * Tracked notes that were being processed by a transaction that got committed.
1031    /// * Tracked notes that were nullified by an external transaction.
1032    ///
1033    /// The `public_notes` parameter provides cached public note details for the current sync
1034    /// iteration so the node is only queried once per batch.
1035    async fn note_state_sync(
1036        &self,
1037        note_updates: &mut NoteUpdateTracker,
1038        note_inclusions: BTreeMap<NoteId, CommittedNote>,
1039        block_header: &BlockHeader,
1040        public_notes: &BTreeMap<NoteId, InputNoteRecord>,
1041    ) -> Result<bool, ClientError> {
1042        // `found_relevant_note` tracks whether we want to persist the block header in the end
1043        let mut found_relevant_note = false;
1044
1045        for (_, committed_note) in note_inclusions {
1046            let public_note = (committed_note.note_type() != NoteType::Private)
1047                .then(|| public_notes.get(committed_note.note_id()))
1048                .flatten()
1049                .cloned();
1050
1051            match self.note_screener.on_note_received(committed_note, public_note).await? {
1052                NoteUpdateAction::Commit(committed_note) => {
1053                    // Only mark the downloaded block header as relevant if we are talking about
1054                    // an input note (output notes get marked as committed but we don't need the
1055                    // block for anything there)
1056                    found_relevant_note |= note_updates
1057                        .apply_committed_note_state_transitions(&committed_note, block_header)?;
1058                },
1059                NoteUpdateAction::Insert(public_note) => {
1060                    found_relevant_note = true;
1061
1062                    note_updates.apply_new_public_note(public_note, block_header)?;
1063                },
1064                NoteUpdateAction::Discard => {},
1065            }
1066        }
1067
1068        Ok(found_relevant_note)
1069    }
1070
1071    /// Collects the nullifier tags for the notes that were updated in the sync response and uses
1072    /// the `sync_nullifiers` endpoint to check if there are new nullifiers for these
1073    /// notes. It then processes the nullifiers to apply the state transitions on the note updates.
1074    ///
1075    /// The `state_sync_update` parameter will be updated to track the new discarded transactions.
1076    async fn nullifiers_state_sync(
1077        &self,
1078        state_sync_update: &mut StateSyncUpdate,
1079        current_block_num: BlockNumber,
1080    ) -> Result<(), ClientError> {
1081        // To receive information about added nullifiers, we reduce them to the higher 16 bits
1082        // Note that besides filtering by nullifier prefixes, the node also filters by block number
1083        // (it only returns nullifiers from current_block_num until
1084        // response.block_header.block_num())
1085
1086        // Check for new nullifiers for input notes that were updated
1087        let nullifiers_tags: Vec<u16> = state_sync_update
1088            .note_updates
1089            .unspent_nullifiers()
1090            .map(|nullifier| nullifier.prefix())
1091            .collect();
1092
1093        let mut new_nullifiers = self
1094            .rpc_api
1095            .sync_nullifiers(&nullifiers_tags, current_block_num, Some(state_sync_update.block_num))
1096            .await?;
1097
1098        // Discard nullifiers that are newer than the current block (this might happen if the block
1099        // changes between the sync_state and the check_nullifier calls)
1100        new_nullifiers.retain(|update| update.block_num <= state_sync_update.block_num);
1101
1102        for nullifier_update in new_nullifiers {
1103            let external_consumer_account = state_sync_update
1104                .transaction_updates
1105                .external_nullifier_account(&nullifier_update.nullifier);
1106
1107            state_sync_update.note_updates.apply_nullifiers_state_transitions(
1108                &nullifier_update,
1109                state_sync_update.transaction_updates.committed_transactions(),
1110                external_consumer_account,
1111            )?;
1112
1113            // Process nullifiers and track the updates of local tracked transactions that were
1114            // discarded because the notes that they were processing were nullified by an
1115            // another transaction.
1116            state_sync_update
1117                .transaction_updates
1118                .apply_input_note_nullified(nullifier_update.nullifier);
1119        }
1120
1121        Ok(())
1122    }
1123
1124    /// Applies the changes received from the sync response to the transactions tracked by the
1125    /// client and updates the `transaction_updates` accordingly.
1126    ///
1127    /// The transaction updates might include:
1128    /// * New transactions that were committed in the block.
1129    /// * Transactions that were discarded because they were stale or expired.
1130    fn transaction_state_sync(
1131        &self,
1132        transaction_updates: &mut TransactionUpdateTracker,
1133        new_block_header: &BlockHeader,
1134        transaction_inclusions: &[TransactionInclusion],
1135    ) {
1136        for transaction_inclusion in transaction_inclusions {
1137            transaction_updates.apply_transaction_inclusion(
1138                transaction_inclusion,
1139                u64::from(new_block_header.timestamp()),
1140            ); //TODO: Change timestamps from u64 to u32
1141        }
1142
1143        transaction_updates
1144            .apply_sync_height_update(new_block_header.block_num(), self.tx_discard_delta);
1145    }
1146}
1147
1148// HELPERS
1149// ================================================================================================
1150
1151/// Derives account commitment updates from transaction records.
1152///
1153/// For each unique account, takes the `final_state_commitment` from the transaction with the
1154/// highest `block_num`. This replicates the old `SyncState` behavior where the node returned
1155/// the latest account commitment per account in the synced range.
1156fn derive_account_commitment_updates(
1157    transaction_records: &[RpcTransactionRecord],
1158) -> Vec<(AccountId, Word)> {
1159    let mut latest_by_account: BTreeMap<AccountId, &RpcTransactionRecord> = BTreeMap::new();
1160
1161    for record in transaction_records {
1162        let account_id = record.transaction_header.account_id();
1163        latest_by_account
1164            .entry(account_id)
1165            .and_modify(|existing| {
1166                if record.block_num > existing.block_num {
1167                    *existing = record;
1168                }
1169            })
1170            .or_insert(record);
1171    }
1172
1173    latest_by_account
1174        .into_iter()
1175        .map(|(account_id, record)| {
1176            (account_id, record.transaction_header.final_state_commitment())
1177        })
1178        .collect()
1179}
1180
1181/// Returns nullifiers ordered by consuming transaction position, per account.
1182///
1183/// Groups RPC transaction records by (`account_id`, `block_num`), chains them using
1184/// `initial_state_commitment` / `final_state_commitment`, and collects each transaction's
1185/// input note nullifiers in execution order. Nullifiers from the same account are in execution
1186/// order; ordering across different accounts is arbitrary.
1187fn compute_ordered_nullifiers(transaction_records: &[RpcTransactionRecord]) -> Vec<Nullifier> {
1188    // Group transactions by (account_id, block_num).
1189    let mut groups: BTreeMap<(AccountId, BlockNumber), Vec<&RpcTransactionRecord>> =
1190        BTreeMap::new();
1191
1192    for record in transaction_records {
1193        let account_id = record.transaction_header.account_id();
1194        groups.entry((account_id, record.block_num)).or_default().push(record);
1195    }
1196
1197    let mut result = Vec::new();
1198
1199    for txs in groups.values() {
1200        // Build a lookup from initial_state_commitment -> transaction record.
1201        let mut init_to_tx: BTreeMap<Word, &RpcTransactionRecord> = txs
1202            .iter()
1203            .map(|tx| (tx.transaction_header.initial_state_commitment(), *tx))
1204            .collect();
1205
1206        // Build a set of all final states to find the chain start.
1207        let final_states: BTreeSet<Word> =
1208            txs.iter().map(|tx| tx.transaction_header.final_state_commitment()).collect();
1209
1210        // Find the chain start: the tx whose initial_state_commitment is not any other tx's
1211        // final_state_commitment.
1212        let chain_start = txs
1213            .iter()
1214            .find(|tx| !final_states.contains(&tx.transaction_header.initial_state_commitment()));
1215
1216        let Some(start_tx) = chain_start else {
1217            continue;
1218        };
1219
1220        // Walk the chain from start, removing each step from the map.
1221        let mut current =
1222            init_to_tx.remove(&start_tx.transaction_header.initial_state_commitment());
1223
1224        while let Some(tx) = current {
1225            for commitment in tx.transaction_header.input_notes().iter() {
1226                result.push(commitment.nullifier());
1227            }
1228            current = init_to_tx.remove(&tx.transaction_header.final_state_commitment());
1229        }
1230    }
1231
1232    result
1233}
1234
1235#[cfg(test)]
1236mod tests {
1237    use alloc::collections::BTreeSet;
1238    use alloc::sync::Arc;
1239
1240    use async_trait::async_trait;
1241    use miden_protocol::assembly::DefaultSourceManager;
1242    use miden_protocol::crypto::merkle::mmr::{Forest, InOrderIndex, PartialMmr};
1243    use miden_protocol::note::{NoteTag, NoteType};
1244    use miden_protocol::{Felt, Word};
1245    use miden_standards::code_builder::CodeBuilder;
1246    use miden_testing::MockChainBuilder;
1247
1248    use super::*;
1249    use crate::testing::mock::MockRpcApi;
1250
1251    /// Mock note screener that discards all notes, for minimal test setup.
1252    struct MockScreener;
1253
1254    #[async_trait(?Send)]
1255    impl OnNoteReceived for MockScreener {
1256        async fn on_note_received(
1257            &self,
1258            _committed_note: CommittedNote,
1259            _public_note: Option<InputNoteRecord>,
1260        ) -> Result<NoteUpdateAction, ClientError> {
1261            Ok(NoteUpdateAction::Discard)
1262        }
1263    }
1264
1265    fn empty() -> StateSyncInput {
1266        StateSyncInput {
1267            accounts: vec![],
1268            note_tags: BTreeSet::new(),
1269            input_notes: vec![],
1270            output_notes: vec![],
1271            uncommitted_transactions: vec![],
1272        }
1273    }
1274
1275    // COMPUTE NULLIFIER TX ORDER TESTS
1276    // --------------------------------------------------------------------------------------------
1277
1278    mod compute_nullifiers_tests {
1279        use alloc::vec;
1280
1281        use miden_protocol::asset::FungibleAsset;
1282        use miden_protocol::block::BlockNumber;
1283        use miden_protocol::note::Nullifier;
1284        use miden_protocol::transaction::{InputNoteCommitment, InputNotes, TransactionHeader};
1285        use miden_protocol::{Felt, ZERO};
1286
1287        use crate::rpc::domain::transaction::{
1288            ACCOUNT_ID_NATIVE_ASSET_FAUCET,
1289            TransactionRecord as RpcTransactionRecord,
1290        };
1291
1292        fn word(n: u64) -> miden_protocol::Word {
1293            [Felt::new(n), ZERO, ZERO, ZERO].into()
1294        }
1295
1296        fn make_rpc_tx(
1297            init_state: u64,
1298            final_state: u64,
1299            nullifier_vals: &[u64],
1300            block_number: u32,
1301        ) -> RpcTransactionRecord {
1302            let account_id = miden_protocol::account::AccountId::try_from(
1303                miden_protocol::testing::account_id::ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE,
1304            )
1305            .unwrap();
1306
1307            let input_notes = InputNotes::new_unchecked(
1308                nullifier_vals
1309                    .iter()
1310                    .map(|v| InputNoteCommitment::from(Nullifier::from_raw(word(*v))))
1311                    .collect(),
1312            );
1313
1314            let fee =
1315                FungibleAsset::new(ACCOUNT_ID_NATIVE_ASSET_FAUCET.try_into().expect("valid"), 0u64)
1316                    .unwrap();
1317
1318            RpcTransactionRecord {
1319                block_num: BlockNumber::from(block_number),
1320                transaction_header: TransactionHeader::new(
1321                    account_id,
1322                    word(init_state),
1323                    word(final_state),
1324                    input_notes,
1325                    vec![],
1326                    fee,
1327                ),
1328                output_notes: vec![],
1329            }
1330        }
1331
1332        #[test]
1333        fn chains_rpc_transactions_by_state_commitment() {
1334            // Chain: tx_a (state 1->2) -> tx_b (state 2->3) -> tx_c (state 3->4)
1335            // Passed in reverse order to verify chaining uses state, not insertion order.
1336            let tx_a = make_rpc_tx(1, 2, &[10], 5);
1337            let tx_b = make_rpc_tx(2, 3, &[20], 5);
1338            let tx_c = make_rpc_tx(3, 4, &[30], 5);
1339
1340            let result = super::super::compute_ordered_nullifiers(&[tx_c, tx_a, tx_b]);
1341
1342            assert_eq!(result[0], Nullifier::from_raw(word(10)));
1343            assert_eq!(result[1], Nullifier::from_raw(word(20)));
1344            assert_eq!(result[2], Nullifier::from_raw(word(30)));
1345        }
1346
1347        #[test]
1348        fn groups_independently_by_account_and_block() {
1349            // Account A, block 5: two chained txs.
1350            let tx_a1 = make_rpc_tx(1, 2, &[10], 5);
1351            let tx_a2 = make_rpc_tx(2, 3, &[20], 5);
1352
1353            // Account A, block 6: independent chain.
1354            let tx_a3 = make_rpc_tx(3, 4, &[30], 6);
1355
1356            // Account B, block 5: independent chain.
1357            let account_b = miden_protocol::account::AccountId::try_from(
1358                miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET,
1359            )
1360            .unwrap();
1361
1362            let fee =
1363                FungibleAsset::new(ACCOUNT_ID_NATIVE_ASSET_FAUCET.try_into().expect("valid"), 0u64)
1364                    .unwrap();
1365
1366            let tx_b1 = RpcTransactionRecord {
1367                block_num: BlockNumber::from(5u32),
1368                transaction_header: TransactionHeader::new(
1369                    account_b,
1370                    word(100),
1371                    word(200),
1372                    InputNotes::new_unchecked(vec![InputNoteCommitment::from(
1373                        Nullifier::from_raw(word(40)),
1374                    )]),
1375                    vec![],
1376                    fee,
1377                ),
1378                output_notes: vec![],
1379            };
1380
1381            let result = super::super::compute_ordered_nullifiers(&[tx_a2, tx_b1, tx_a3, tx_a1]);
1382
1383            // Nullifiers are ordered by chain position within each (account, block) group.
1384            // The exact global indices depend on BTreeMap iteration order of the groups.
1385            let pos = |val: u64| -> usize {
1386                result.iter().position(|n| *n == Nullifier::from_raw(word(val))).unwrap()
1387            };
1388
1389            // Within the same group, chain order is preserved.
1390            assert!(pos(10) < pos(20)); // A, block 5: pos 0 < pos 1
1391            // Nullifiers from different groups are all present.
1392            assert!(result.contains(&Nullifier::from_raw(word(30)))); // A, block 6
1393            assert!(result.contains(&Nullifier::from_raw(word(40)))); // B, block 5
1394        }
1395
1396        #[test]
1397        fn multiple_nullifiers_per_transaction_are_consecutive() {
1398            // Single tx consuming 3 notes — all should appear consecutively.
1399            let tx = make_rpc_tx(1, 2, &[10, 20, 30], 5);
1400
1401            let result = super::super::compute_ordered_nullifiers(&[tx]);
1402
1403            assert_eq!(result.len(), 3);
1404            assert!(result.contains(&Nullifier::from_raw(word(10))));
1405            assert!(result.contains(&Nullifier::from_raw(word(20))));
1406            assert!(result.contains(&Nullifier::from_raw(word(30))));
1407        }
1408
1409        #[test]
1410        fn empty_input_returns_empty_vec() {
1411            let result = super::super::compute_ordered_nullifiers(&[]);
1412            assert!(result.is_empty());
1413        }
1414    }
1415
1416    // CONSUMED NOTE ORDERING INTEGRATION TESTS
1417    // --------------------------------------------------------------------------------------------
1418
1419    /// Mock note screener that commits all notes matching tracked input notes.
1420    /// This ensures committed notes get their inclusion proofs set during sync.
1421    struct CommitAllScreener;
1422
1423    #[async_trait(?Send)]
1424    impl OnNoteReceived for CommitAllScreener {
1425        async fn on_note_received(
1426            &self,
1427            committed_note: CommittedNote,
1428            _public_note: Option<InputNoteRecord>,
1429        ) -> Result<NoteUpdateAction, ClientError> {
1430            Ok(NoteUpdateAction::Commit(committed_note))
1431        }
1432    }
1433
1434    use miden_protocol::account::Account;
1435    use miden_protocol::note::Note;
1436
1437    /// Builds a `MockChain` where 3 notes are consumed by chained transactions in the same block.
1438    ///
1439    /// Returns the chain, the account, and the 3 notes (in consumption order).
1440    async fn build_chain_with_chained_consume_txs() -> (miden_testing::MockChain, Account, [Note; 3])
1441    {
1442        use miden_protocol::asset::{Asset, FungibleAsset};
1443        use miden_protocol::note::NoteType;
1444        use miden_protocol::testing::account_id::{
1445            ACCOUNT_ID_PRIVATE_FUNGIBLE_FAUCET,
1446            ACCOUNT_ID_SENDER,
1447        };
1448        use miden_testing::{MockChainBuilder, TxContextInput};
1449
1450        let sender_id: AccountId = ACCOUNT_ID_SENDER.try_into().unwrap();
1451        let faucet_id: AccountId = ACCOUNT_ID_PRIVATE_FUNGIBLE_FAUCET.try_into().unwrap();
1452
1453        let mut builder = MockChainBuilder::new();
1454        let account = builder.add_existing_mock_account(miden_testing::Auth::IncrNonce).unwrap();
1455        let account_id = account.id();
1456
1457        let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 100u64).unwrap());
1458        let note1 = builder
1459            .add_p2id_note(sender_id, account_id, &[asset], NoteType::Public)
1460            .unwrap();
1461        let note2 = builder
1462            .add_p2id_note(sender_id, account_id, &[asset], NoteType::Public)
1463            .unwrap();
1464        let note3 = builder
1465            .add_p2id_note(sender_id, account_id, &[asset], NoteType::Public)
1466            .unwrap();
1467
1468        let mut chain = builder.build().unwrap();
1469        chain.prove_next_block().unwrap(); // block 1: makes genesis notes consumable
1470
1471        // Execute 3 chained consume transactions (state S0→S1→S2→S3).
1472        let mut current_account = account.clone();
1473        for note in [&note1, &note2, &note3] {
1474            let tx = Box::pin(
1475                chain
1476                    .build_tx_context(
1477                        TxContextInput::Account(current_account.clone()),
1478                        &[],
1479                        core::slice::from_ref(note),
1480                    )
1481                    .unwrap()
1482                    .build()
1483                    .unwrap()
1484                    .execute(),
1485            )
1486            .await
1487            .unwrap();
1488            current_account.apply_delta(tx.account_delta()).unwrap();
1489            chain.add_pending_executed_transaction(&tx).unwrap();
1490        }
1491
1492        chain.prove_next_block().unwrap(); // block 2: all 3 txs in one block
1493        (chain, account, [note1, note2, note3])
1494    }
1495
1496    /// Verifies that `consumed_tx_order` is correctly set when multiple chained transactions
1497    /// for the same account consume notes in the same block.
1498    #[tokio::test]
1499    async fn sync_state_sets_consumed_tx_order_for_chained_transactions() {
1500        use miden_protocol::note::NoteMetadata;
1501
1502        let (chain, account, [note1, note2, note3]) = build_chain_with_chained_consume_txs().await;
1503
1504        let mock_rpc = MockRpcApi::new(chain);
1505        let state_sync =
1506            StateSync::new(Arc::new(mock_rpc.clone()), None, Arc::new(CommitAllScreener), None);
1507
1508        let genesis_peaks = mock_rpc.get_mmr().peaks_at(Forest::new(1)).unwrap();
1509        let mut partial_mmr = PartialMmr::from_peaks(genesis_peaks);
1510
1511        let input_notes: Vec<InputNoteRecord> = [&note1, &note2, &note3]
1512            .into_iter()
1513            .map(|n| InputNoteRecord::from(n.clone()))
1514            .collect();
1515
1516        let note_tags: BTreeSet<NoteTag> =
1517            input_notes.iter().filter_map(|n| n.metadata().map(NoteMetadata::tag)).collect();
1518
1519        let account_id = account.id();
1520        let sync_input = StateSyncInput {
1521            accounts: vec![AccountHeader::from(account)],
1522            note_tags,
1523            input_notes,
1524            output_notes: vec![],
1525            uncommitted_transactions: vec![],
1526        };
1527
1528        let update = state_sync.sync_state(&mut partial_mmr, sync_input).await.unwrap();
1529
1530        let updated_notes: Vec<_> = update.note_updates.updated_input_notes().collect();
1531
1532        let find_order = |note_id: NoteId| -> Option<u32> {
1533            updated_notes
1534                .iter()
1535                .find(|n| n.id() == note_id)
1536                .and_then(|n| n.consumed_tx_order())
1537        };
1538
1539        assert_eq!(find_order(note1.id()), Some(0), "note1 should have tx_order 0");
1540        assert_eq!(find_order(note2.id()), Some(1), "note2 should have tx_order 1");
1541        assert_eq!(find_order(note3.id()), Some(2), "note3 should have tx_order 2");
1542
1543        // Since there are no uncommitted_transactions, these notes were consumed by a tracked
1544        // account via external transactions. Verify that consumer_account is populated.
1545        for note in &updated_notes {
1546            let record = note.inner();
1547            assert!(record.is_consumed(), "note should be in a consumed state");
1548            assert_eq!(
1549                record.consumer_account(),
1550                Some(account_id),
1551                "externally-consumed notes by a tracked account should have consumer_account set",
1552            );
1553        }
1554    }
1555
1556    #[tokio::test]
1557    async fn sync_state_across_multiple_iterations_with_same_mmr() {
1558        // Setup: create a mock chain and advance it so there are blocks to sync.
1559        let mock_rpc = MockRpcApi::default();
1560        mock_rpc.advance_blocks(3);
1561        let chain_tip_1 = mock_rpc.get_chain_tip_block_num();
1562
1563        let state_sync =
1564            StateSync::new(Arc::new(mock_rpc.clone()), None, Arc::new(MockScreener), None);
1565
1566        // Build the initial PartialMmr from genesis (only 1 leaf).
1567        let genesis_peaks = mock_rpc.get_mmr().peaks_at(Forest::new(1)).unwrap();
1568        let mut partial_mmr = PartialMmr::from_peaks(genesis_peaks);
1569        assert_eq!(partial_mmr.forest().num_leaves(), 1);
1570
1571        // First sync
1572        let update = state_sync.sync_state(&mut partial_mmr, empty()).await.unwrap();
1573
1574        assert_eq!(update.block_num, chain_tip_1);
1575        let forest_1 = partial_mmr.forest();
1576        // The MMR should contain one leaf per block (genesis + the new blocks).
1577        assert_eq!(forest_1.num_leaves(), chain_tip_1.as_u32() as usize + 1);
1578
1579        // Second sync
1580        mock_rpc.advance_blocks(2);
1581        let chain_tip_2 = mock_rpc.get_chain_tip_block_num();
1582
1583        let update = state_sync.sync_state(&mut partial_mmr, empty()).await.unwrap();
1584
1585        assert_eq!(update.block_num, chain_tip_2);
1586        let forest_2 = partial_mmr.forest();
1587        assert!(forest_2 > forest_1);
1588        assert_eq!(forest_2.num_leaves(), chain_tip_2.as_u32() as usize + 1);
1589
1590        // Third sync (no new blocks)
1591        let update = state_sync.sync_state(&mut partial_mmr, empty()).await.unwrap();
1592
1593        assert_eq!(update.block_num, chain_tip_2);
1594        assert_eq!(partial_mmr.forest(), forest_2);
1595    }
1596
1597    /// Builds a mock chain with a faucet that mints `num_blocks` notes, one per block.
1598    /// Returns the chain and the set of note tags for filtering.
1599    async fn build_chain_with_mint_notes(
1600        num_blocks: u64,
1601    ) -> (miden_testing::MockChain, BTreeSet<NoteTag>) {
1602        let mut builder = MockChainBuilder::new();
1603        let faucet = builder
1604            .add_existing_basic_faucet(
1605                miden_testing::Auth::BasicAuth {
1606                    auth_scheme: miden_protocol::account::auth::AuthScheme::Falcon512Poseidon2,
1607                },
1608                "TST",
1609                10_000,
1610                None,
1611            )
1612            .unwrap();
1613        let _target = builder.add_existing_mock_account(miden_testing::Auth::IncrNonce).unwrap();
1614        let mut chain = builder.build().unwrap();
1615
1616        let recipient: Word = [0u32, 1, 2, 3].into();
1617        let tag = NoteTag::default();
1618        let mut faucet_account = faucet.clone();
1619        let mut note_tags = BTreeSet::new();
1620
1621        for i in 0..num_blocks {
1622            let amount = Felt::new(100 + i);
1623            let source_manager = Arc::new(DefaultSourceManager::default());
1624            let tx_script_code = format!(
1625                "
1626                begin
1627                    padw padw push.0
1628                    push.{r0}.{r1}.{r2}.{r3}
1629                    push.{note_type}
1630                    push.{tag}
1631                    push.{amount}
1632                    call.::miden::standards::faucets::basic_fungible::mint_and_send
1633                    dropw dropw dropw dropw
1634                end
1635                ",
1636                r0 = recipient[0],
1637                r1 = recipient[1],
1638                r2 = recipient[2],
1639                r3 = recipient[3],
1640                note_type = NoteType::Private as u8,
1641                tag = u32::from(tag),
1642                amount = amount,
1643            );
1644            let tx_script = CodeBuilder::with_source_manager(source_manager.clone())
1645                .compile_tx_script(tx_script_code)
1646                .unwrap();
1647            let tx = Box::pin(
1648                chain
1649                    .build_tx_context(
1650                        miden_testing::TxContextInput::Account(faucet_account.clone()),
1651                        &[],
1652                        &[],
1653                    )
1654                    .unwrap()
1655                    .tx_script(tx_script)
1656                    .with_source_manager(source_manager)
1657                    .build()
1658                    .unwrap()
1659                    .execute(),
1660            )
1661            .await
1662            .unwrap();
1663
1664            for output_note in tx.output_notes().iter() {
1665                note_tags.insert(output_note.metadata().tag());
1666            }
1667
1668            faucet_account.apply_delta(tx.account_delta()).unwrap();
1669            chain.add_pending_executed_transaction(&tx).unwrap();
1670            chain.prove_next_block().unwrap();
1671        }
1672
1673        (chain, note_tags)
1674    }
1675
1676    /// Verifies that the sync correctly processes notes committed in multiple blocks
1677    /// (batched `SyncNotes` response) and tracks their blocks in the partial MMR.
1678    ///
1679    /// This test creates a faucet and mints notes in separate blocks (blocks 1, 2, 3),
1680    /// so `sync_notes` returns multiple `NoteSyncBlock`s. It then verifies:
1681    /// - The MMR is advanced to the chain tip
1682    /// - Blocks containing relevant notes are tracked in the partial MMR via `track()`
1683    /// - Note inclusion proofs are set correctly
1684    /// - Block headers for note blocks are stored
1685    #[tokio::test]
1686    async fn sync_state_tracks_note_blocks_in_mmr() {
1687        let (chain, note_tags) = build_chain_with_mint_notes(3).await;
1688        let mock_rpc = MockRpcApi::new(chain);
1689        let chain_tip = mock_rpc.get_chain_tip_block_num();
1690
1691        // Verify the mock returns notes across multiple blocks.
1692        let note_sync =
1693            mock_rpc.sync_notes(BlockNumber::from(0u32), None, &note_tags).await.unwrap();
1694        assert!(
1695            note_sync.blocks.len() >= 2,
1696            "expected notes in multiple blocks, got {}",
1697            note_sync.blocks.len()
1698        );
1699
1700        // Collect the block numbers that have notes.
1701        let note_block_nums: BTreeSet<BlockNumber> =
1702            note_sync.blocks.iter().map(|b| b.block_header.block_num()).collect();
1703
1704        // Test that fetch_sync_data returns note blocks with valid MMR paths that
1705        // can be used to track blocks in the partial MMR.
1706        let state_sync =
1707            StateSync::new(Arc::new(mock_rpc.clone()), None, Arc::new(MockScreener), None);
1708
1709        let genesis_peaks = mock_rpc.get_mmr().peaks_at(Forest::new(1)).unwrap();
1710        let mut partial_mmr = PartialMmr::from_peaks(genesis_peaks);
1711
1712        let sync_data = state_sync
1713            .fetch_sync_data(BlockNumber::GENESIS, &[], &Arc::new(note_tags.clone()))
1714            .await
1715            .unwrap()
1716            .expect("should have progressed past genesis");
1717
1718        // Should have advanced to the chain tip.
1719        assert_eq!(sync_data.chain_tip_header.block_num(), chain_tip);
1720        assert!(!sync_data.note_blocks.is_empty(), "should have note blocks");
1721
1722        // Apply the MMR delta and add the chain tip block.
1723        let _auth_nodes: Vec<(InOrderIndex, Word)> =
1724            partial_mmr.apply(sync_data.mmr_delta).map_err(StoreError::MmrError).unwrap();
1725        partial_mmr.add(sync_data.chain_tip_header.commitment(), false);
1726
1727        assert_eq!(partial_mmr.forest().num_leaves(), chain_tip.as_u32() as usize + 1);
1728
1729        // Track each note block using the MMR path from the sync_notes response.
1730        for block in &sync_data.note_blocks {
1731            let bn = block.block_header.block_num();
1732            partial_mmr
1733                .track(bn.as_usize(), block.block_header.commitment(), &block.mmr_path)
1734                .map_err(StoreError::MmrError)
1735                .unwrap();
1736
1737            assert!(
1738                partial_mmr.is_tracked(bn.as_usize()),
1739                "block {bn} should be tracked after calling track()"
1740            );
1741        }
1742
1743        // Verify the tracked blocks match the note blocks.
1744        for &bn in &note_block_nums {
1745            assert!(
1746                partial_mmr.is_tracked(bn.as_usize()),
1747                "block {bn} with notes should be tracked in partial MMR"
1748            );
1749        }
1750    }
1751}