Skip to main content

miden_client/sync/
state_sync.rs

1use alloc::boxed::Box;
2use alloc::collections::{BTreeMap, BTreeSet};
3use alloc::sync::Arc;
4use alloc::vec::Vec;
5
6use async_trait::async_trait;
7use miden_protocol::account::{
8    Account,
9    AccountCode,
10    AccountDelta,
11    AccountHeader,
12    AccountId,
13    AccountStorage,
14    AccountStorageDelta,
15    AccountVaultDelta,
16    StorageMapKey,
17    StorageSlot,
18    StorageSlotContent,
19    StorageSlotName,
20    StorageSlotType,
21};
22use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey};
23use miden_protocol::block::{BlockHeader, BlockNumber};
24use miden_protocol::crypto::merkle::mmr::{MmrDelta, PartialMmr};
25use miden_protocol::note::{Note, NoteId, NoteTag, NoteType, Nullifier};
26use miden_protocol::transaction::InputNoteCommitment;
27use miden_protocol::{EMPTY_WORD, Felt, Word};
28use tracing::info;
29
30use super::state_sync_update::TransactionUpdateTracker;
31use super::{AccountUpdates, PublicAccountUpdate, StateSyncUpdate};
32use crate::ClientError;
33use crate::note::NoteUpdateTracker;
34use crate::rpc::domain::account::{
35    AccountDetails,
36    AccountStorageMapDetails,
37    AccountStorageRequirements,
38    FetchedAccount,
39};
40use crate::rpc::domain::note::{CommittedNote, NoteSyncBlock};
41use crate::rpc::domain::storage_map::StorageMapUpdate;
42use crate::rpc::domain::transaction::{
43    TransactionInclusion,
44    TransactionRecord as RpcTransactionRecord,
45};
46use crate::rpc::{AccountStateAt, NodeRpcClient, RpcError};
47use crate::store::{AccountStorageFilter, InputNoteRecord, OutputNoteRecord, Store, StoreError};
48use crate::transaction::TransactionRecord;
49
50// STATE UPDATE DATA
51// ================================================================================================
52
53/// Raw data fetched from the node needed to sync the client to the chain tip.
54///
55/// Aggregates the responses of `sync_chain_mmr`, `sync_notes`, `get_notes_by_id`, and
56/// `sync_transactions`. This may contain more data than a particular client needs to store — it is
57/// filtered and transformed into a [`StateSyncUpdate`] before being applied.
58struct RawStateSyncData {
59    /// MMR delta covering the full range from `current_block` to `chain_tip`.
60    mmr_delta: MmrDelta,
61    /// Chain tip block header.
62    chain_tip_header: BlockHeader,
63    /// Blocks with matching notes that the client is interested in.
64    note_blocks: Vec<NoteSyncBlock>,
65    /// Full note bodies for public notes, keyed by note ID.
66    public_notes: BTreeMap<NoteId, Note>,
67    /// Account commitment updates for the synced range.
68    account_commitment_updates: Vec<(AccountId, Word)>,
69    /// Transaction inclusions for the synced range.
70    transactions: Vec<TransactionInclusion>,
71    /// Nullifiers for the synced range.
72    nullifiers: Vec<Nullifier>,
73}
74
75// SYNC REQUEST
76// ================================================================================================
77
78/// Bundles the client state needed to perform a sync operation.
79///
80/// The sync process uses these inputs to:
81/// - Request account commitment updates from the node for the provided accounts.
82/// - Filter which note inclusions the node returns based on the provided note tags.
83/// - Follow the lifecycle of every tracked note (input and output), transitioning them from pending
84///   to committed to consumed as the network state advances.
85/// - Track uncommitted transactions so they can be marked as committed when the node confirms them,
86///   or discarded when they become stale.
87///
88/// Use [`Client::build_sync_input()`](`crate::Client::build_sync_input()`) to build a default input
89/// from the client state, or construct this struct manually for custom sync scenarios.
90pub struct StateSyncInput {
91    /// Account headers to request commitment updates for.
92    pub accounts: Vec<AccountHeader>,
93    /// Note tags that the node uses to filter which note inclusions to return.
94    pub note_tags: BTreeSet<NoteTag>,
95    /// Input notes whose lifecycle should be followed during sync.
96    pub input_notes: Vec<InputNoteRecord>,
97    /// Output notes whose lifecycle should be followed during sync.
98    pub output_notes: Vec<OutputNoteRecord>,
99    /// Transactions to track for commitment or discard during sync.
100    pub uncommitted_transactions: Vec<TransactionRecord>,
101}
102
103// SYNC CALLBACKS
104// ================================================================================================
105
106/// The action to be taken when a note update is received as part of the sync response.
107#[allow(clippy::large_enum_variant)]
108pub enum NoteUpdateAction {
109    /// The note commit update is relevant and the specified note should be marked as committed in
110    /// the store, storing its inclusion proof.
111    Commit(CommittedNote),
112    /// The public note is relevant and should be inserted into the store.
113    Insert(InputNoteRecord),
114    /// The note update is not relevant and should be discarded.
115    Discard,
116}
117
118#[async_trait(?Send)]
119pub trait OnNoteReceived {
120    /// Callback that gets executed when a new note is received as part of the sync response.
121    ///
122    /// It receives:
123    ///
124    /// - The committed note received from the network.
125    /// - An optional note record that corresponds to the state of the note in the network (only if
126    ///   the note is public).
127    ///
128    /// It returns an enum indicating the action to be taken for the received note update. Whether
129    /// the note updated should be committed, new public note inserted, or ignored.
130    async fn on_note_received(
131        &self,
132        committed_note: CommittedNote,
133        public_note: Option<InputNoteRecord>,
134    ) -> Result<NoteUpdateAction, ClientError>;
135}
136// STATE SYNC
137// ================================================================================================
138
139/// The state sync component encompasses the client's sync logic. It is then used to request
140/// updates from the node and apply them to the relevant elements. The updates are then returned and
141/// can be applied to the store to persist the changes.
142#[derive(Clone)]
143pub struct StateSync {
144    /// The RPC client used to communicate with the node.
145    rpc_api: Arc<dyn NodeRpcClient>,
146    /// The client's store, used to fetch account storage and vault data on demand during
147    /// delta-based sync of public accounts. When `None`, oversized public accounts fall back
148    /// to `get_account_details` (full sync from block 0).
149    store: Option<Arc<dyn Store>>,
150    /// Responsible for checking the relevance of notes and executing the
151    /// [`OnNoteReceived`] callback when a new note inclusion is received.
152    note_screener: Arc<dyn OnNoteReceived>,
153    /// Number of blocks after which pending transactions are considered stale and discarded.
154    /// If `None`, there is no limit and transactions will be kept indefinitely.
155    tx_discard_delta: Option<u32>,
156    /// Whether to check for nullifiers during state sync. When enabled, the component will query
157    /// the nullifiers for unspent notes at each sync step. This allows to detect when tracked
158    /// notes have been consumed externally and discard local transactions that depend on them.
159    sync_nullifiers: bool,
160}
161
162impl StateSync {
163    /// Creates a new instance of the state sync component.
164    ///
165    /// The nullifiers sync is enabled by default. To disable it, see
166    /// [`Self::disable_nullifier_sync`].
167    ///
168    /// # Arguments
169    ///
170    /// * `rpc_api` - The RPC client used to communicate with the node.
171    /// * `store` - Optional store for on-demand account data access during delta sync.
172    /// * `note_screener` - The note screener used to check the relevance of notes.
173    /// * `tx_discard_delta` - Number of blocks after which pending transactions are discarded.
174    pub fn new(
175        rpc_api: Arc<dyn NodeRpcClient>,
176        store: Option<Arc<dyn Store>>,
177        note_screener: Arc<dyn OnNoteReceived>,
178        tx_discard_delta: Option<u32>,
179    ) -> Self {
180        Self {
181            rpc_api,
182            store,
183            note_screener,
184            tx_discard_delta,
185            sync_nullifiers: true,
186        }
187    }
188
189    /// Disables the nullifier sync.
190    ///
191    /// When disabled, the component will not query the node for new nullifiers after each sync
192    /// step. This is useful for clients that don't need to track note consumption, such as
193    /// faucets.
194    pub fn disable_nullifier_sync(&mut self) {
195        self.sync_nullifiers = false;
196    }
197
198    /// Enables the nullifier sync.
199    pub fn enable_nullifier_sync(&mut self) {
200        self.sync_nullifiers = true;
201    }
202
203    /// Syncs the state of the client with the chain tip of the node, returning the updates that
204    /// should be applied to the store.
205    ///
206    /// Use [`Client::build_sync_input()`](`crate::Client::build_sync_input()`) to build the default
207    /// input, or assemble it manually for custom sync. The `current_partial_mmr` is taken by
208    /// mutable reference so callers can keep it in memory across syncs.
209    ///
210    /// During the sync process, the following steps are performed:
211    /// 1. A request is sent to the node to get the state updates. This request includes tracked
212    ///    account IDs and the tags of notes that might have changed or that might be of interest to
213    ///    the client.
214    /// 2. A response is received with the current state of the network. The response includes
215    ///    information about new and committed notes, updated accounts, and committed transactions.
216    /// 3. Tracked public accounts are updated and private accounts are validated against the node
217    ///    state.
218    /// 4. Tracked notes are updated with their new states. Notes might be committed or nullified
219    ///    during the sync processing.
220    /// 5. New notes are checked, and only relevant ones are stored. Relevance is determined by the
221    ///    [`OnNoteReceived`] callback.
222    /// 6. Transactions are updated with their new states. Transactions might be committed or
223    ///    discarded.
224    /// 7. The MMR is updated with the new peaks and authentication nodes.
225    pub async fn sync_state(
226        &self,
227        current_partial_mmr: &mut PartialMmr,
228        input: StateSyncInput,
229    ) -> Result<StateSyncUpdate, ClientError> {
230        let StateSyncInput {
231            accounts,
232            note_tags,
233            input_notes,
234            output_notes,
235            uncommitted_transactions,
236        } = input;
237        let block_num = u32::try_from(current_partial_mmr.forest().num_leaves().saturating_sub(1))
238            .map_err(|_| ClientError::InvalidPartialMmrForest)?
239            .into();
240
241        let mut state_sync_update = StateSyncUpdate {
242            block_num,
243            note_updates: NoteUpdateTracker::new(input_notes, output_notes),
244            transaction_updates: TransactionUpdateTracker::new(uncommitted_transactions),
245            ..Default::default()
246        };
247
248        let note_tags = Arc::new(note_tags);
249        let account_ids: Vec<AccountId> = accounts.iter().map(AccountHeader::id).collect();
250        let Some(mut sync_data) = self
251            .fetch_sync_data(state_sync_update.block_num, &account_ids, &note_tags)
252            .await?
253        else {
254            // No progress — already at the tip.
255            return Ok(state_sync_update);
256        };
257
258        state_sync_update.block_num = sync_data.chain_tip_header.block_num();
259
260        // Build input note records for public notes from the fetched note bodies and the
261        // inclusion proofs already present in the note blocks.
262        let mut public_note_records: BTreeMap<NoteId, InputNoteRecord> = BTreeMap::new();
263        for (note_id, note) in core::mem::take(&mut sync_data.public_notes) {
264            let inclusion_proof = sync_data
265                .note_blocks
266                .iter()
267                .find_map(|b| b.notes.get(&note_id))
268                .map(|committed| committed.inclusion_proof().clone());
269
270            if let Some(inclusion_proof) = inclusion_proof {
271                let state = crate::store::input_note_states::UnverifiedNoteState {
272                    metadata: note.metadata().clone(),
273                    inclusion_proof,
274                }
275                .into();
276                let record = InputNoteRecord::new(note.into(), None, state);
277                public_note_records.insert(record.id(), record);
278            }
279        }
280
281        self.account_state_sync(
282            &mut state_sync_update.account_updates,
283            &accounts,
284            &sync_data.account_commitment_updates,
285            block_num,
286        )
287        .await?;
288
289        // Apply local changes: update the MMR, screen notes, and apply state transitions.
290        self.apply_sync_result(
291            sync_data,
292            &public_note_records,
293            &mut state_sync_update,
294            current_partial_mmr,
295        )
296        .await?;
297
298        if self.sync_nullifiers {
299            self.nullifiers_state_sync(&mut state_sync_update, block_num).await?;
300        }
301
302        Ok(state_sync_update)
303    }
304
305    /// Fetches the sync data from the node by calling the following endpoints:
306    /// 1. `sync_chain_mmr` — discovers the chain tip, gets the MMR delta and chain tip header.
307    /// 2. `sync_notes` — loops until the full range to the chain tip is covered (handles paginated
308    ///    responses).
309    /// 3. `get_notes_by_id` — fetches full metadata for notes with attachments.
310    /// 4. `sync_transactions` — gets transaction data for the full range.
311    ///
312    /// Returns `None` when the client is already at the chain tip (no progress).
313    async fn fetch_sync_data(
314        &self,
315        current_block_num: BlockNumber,
316        account_ids: &[AccountId],
317        note_tags: &Arc<BTreeSet<NoteTag>>,
318    ) -> Result<Option<RawStateSyncData>, ClientError> {
319        // Step 1: Fetch the MMR delta and chain tip header.
320        let chain_mmr_info = self.rpc_api.sync_chain_mmr(current_block_num, None).await?;
321        let chain_tip = chain_mmr_info.block_to;
322
323        // No progress — already at the tip.
324        if chain_tip == current_block_num {
325            info!(block_num = %current_block_num, "Already at chain tip, nothing to sync.");
326            return Ok(None);
327        }
328
329        info!(
330            block_from = %current_block_num,
331            block_to = %chain_tip,
332            "Syncing state.",
333        );
334
335        // Step 2: Paginate sync_notes using the same chain tip so MMR paths are opened at
336        // a consistent forest.
337        let sync_notes_result = self
338            .rpc_api
339            .sync_notes_with_details(current_block_num, Some(chain_tip), note_tags.as_ref())
340            .await?;
341
342        let note_count: usize = sync_notes_result.blocks.iter().map(|b| b.notes.len()).sum();
343        info!(
344            blocks_with_notes = sync_notes_result.blocks.len(),
345            notes = note_count,
346            public_notes = sync_notes_result.public_notes.len(),
347            "Fetched note sync data.",
348        );
349
350        // Step 3: Gather transactions for tracked accounts over the full range.
351        let (account_commitment_updates, transactions, nullifiers) =
352            self.fetch_transaction_data(current_block_num, chain_tip, account_ids).await?;
353
354        Ok(Some(RawStateSyncData {
355            mmr_delta: chain_mmr_info.mmr_delta,
356            chain_tip_header: chain_mmr_info.block_header,
357            note_blocks: sync_notes_result.blocks,
358            public_notes: sync_notes_result.public_notes,
359            account_commitment_updates,
360            transactions,
361            nullifiers,
362        }))
363    }
364
365    /// Fetches transaction data for the given range and account IDs.
366    async fn fetch_transaction_data(
367        &self,
368        block_from: BlockNumber,
369        block_to: BlockNumber,
370        account_ids: &[AccountId],
371    ) -> Result<(Vec<(AccountId, Word)>, Vec<TransactionInclusion>, Vec<Nullifier>), ClientError>
372    {
373        if account_ids.is_empty() {
374            return Ok((vec![], vec![], vec![]));
375        }
376
377        let tx_info = self
378            .rpc_api
379            .sync_transactions(block_from, Some(block_to), account_ids.to_vec())
380            .await?;
381
382        let transaction_records = tx_info.transaction_records;
383
384        let account_updates = derive_account_commitment_updates(&transaction_records);
385        let nullifiers = compute_ordered_nullifiers(&transaction_records);
386
387        let tx_inclusions = transaction_records
388            .into_iter()
389            .map(|r| {
390                let nullifiers = r
391                    .transaction_header
392                    .input_notes()
393                    .iter()
394                    .map(InputNoteCommitment::nullifier)
395                    .collect();
396                TransactionInclusion {
397                    transaction_id: r.transaction_header.id(),
398                    block_num: r.block_num,
399                    account_id: r.transaction_header.account_id(),
400                    initial_state_commitment: r.transaction_header.initial_state_commitment(),
401                    nullifiers,
402                    output_notes: r.output_notes,
403                    erased_output_note_ids: r.erased_output_note_ids,
404                }
405            })
406            .collect();
407
408        Ok((account_updates, tx_inclusions, nullifiers))
409    }
410
411    // HELPERS
412    // --------------------------------------------------------------------------------------------
413
414    /// Applies sync results to the local state update.
415    ///
416    /// Applies fetched sync data to the local state:
417    /// 1. Advances the partial MMR (delta + chain tip leaf).
418    /// 2. Screens note blocks and tracks relevant ones in the MMR.
419    /// 3. Applies transaction and nullifier updates.
420    async fn apply_sync_result(
421        &self,
422        sync_data: RawStateSyncData,
423        public_note_records: &BTreeMap<NoteId, InputNoteRecord>,
424        state_sync_update: &mut StateSyncUpdate,
425        current_partial_mmr: &mut PartialMmr,
426    ) -> Result<(), ClientError> {
427        let RawStateSyncData {
428            mmr_delta,
429            chain_tip_header,
430            note_blocks,
431            nullifiers,
432            transactions,
433            ..
434        } = sync_data;
435
436        // Advance the partial MMR: apply delta (up to chain_tip - 1), capture peaks for
437        // storage, then add the chain tip leaf (which the delta excludes due to the
438        // one-block lag in block header MMR commitments).
439        let mut new_authentication_nodes =
440            current_partial_mmr.apply(mmr_delta).map_err(StoreError::MmrError)?;
441        let new_peaks = current_partial_mmr.peaks();
442        new_authentication_nodes
443            .append(&mut current_partial_mmr.add(chain_tip_header.commitment(), false));
444
445        state_sync_update.block_updates.insert(
446            chain_tip_header.clone(),
447            false,
448            new_peaks,
449            new_authentication_nodes,
450        );
451
452        // Screen each note block and track relevant ones in the partial MMR using the
453        // authentication path from the sync_notes response.
454        for block in note_blocks {
455            let found_relevant_note = self
456                .note_state_sync(
457                    &mut state_sync_update.note_updates,
458                    block.notes,
459                    &block.block_header,
460                    public_note_records,
461                )
462                .await?;
463
464            if found_relevant_note {
465                let block_pos = block.block_header.block_num().as_usize();
466
467                let nodes_before: BTreeMap<_, _> =
468                    current_partial_mmr.nodes().map(|(k, v)| (*k, *v)).collect();
469
470                if !current_partial_mmr.is_tracked(block_pos) {
471                    current_partial_mmr
472                        .track(block_pos, block.block_header.commitment(), &block.mmr_path)
473                        .map_err(StoreError::MmrError)?;
474                }
475
476                // Always collect new authentication nodes — even when the block was
477                // already tracked from the MMR delta, the delta's nodes may not include
478                // the full authentication path needed to reconstruct the PartialMmr
479                // from storage later.
480                let track_auth_nodes: Vec<_> = current_partial_mmr
481                    .nodes()
482                    .filter(|(k, _)| !nodes_before.contains_key(k))
483                    .map(|(k, v)| (*k, *v))
484                    .collect();
485
486                state_sync_update.block_updates.insert(
487                    block.block_header,
488                    true,
489                    current_partial_mmr.peaks(),
490                    track_auth_nodes,
491                );
492            }
493        }
494
495        // Apply transaction and nullifier data.
496        state_sync_update.note_updates.extend_nullifiers(nullifiers);
497        self.transaction_state_sync(
498            &mut state_sync_update.transaction_updates,
499            &chain_tip_header,
500            &transactions,
501        );
502
503        // Process each transaction
504        for transaction in &transactions {
505            // Transition tracked output notes to Committed using inclusion proofs from the
506            // transaction sync response. This covers output notes regardless of whether their
507            // tags were tracked in the note sync.
508            state_sync_update
509                .note_updates
510                .apply_output_note_inclusion_proofs(&transaction.output_notes)?;
511
512            // Detect output notes erased by same-batch note erasure.
513            Self::mark_erased_notes_as_consumed(state_sync_update, transaction);
514        }
515
516        Ok(())
517    }
518
519    /// Marks output notes that were erased by same-batch note erasure as consumed.
520    ///
521    /// When a note is created and consumed in the same batch, note erasure removes it from
522    /// the block body. The node reports these as erased output notes in the transaction
523    /// record (note ID only, no inclusion proof). We mark them as consumed.
524    fn mark_erased_notes_as_consumed(
525        state_sync_update: &mut StateSyncUpdate,
526        transaction: &TransactionInclusion,
527    ) {
528        for note_id in &transaction.erased_output_note_ids {
529            // Best-effort: ignore errors for notes not tracked by this client.
530            let _ = state_sync_update
531                .note_updates
532                .mark_erased_note_as_consumed(*note_id, transaction.block_num);
533        }
534    }
535
536    /// Compares the state of tracked accounts with the updates received from the node. The method
537    /// Updates the `account_updates` with the details of the accounts that need to be updated.
538    ///
539    /// The account updates might include:
540    /// * Public accounts that have been updated in the node (full or delta-based).
541    /// * Network accounts that have been updated in the node and are being tracked by the client.
542    /// * Private accounts that have been marked as mismatched because the current commitment
543    ///   doesn't match the one received from the node. The client will need to handle these cases
544    ///   as they could be a stale account state or a reason to lock the account.
545    async fn account_state_sync(
546        &self,
547        account_updates: &mut AccountUpdates,
548        accounts: &[AccountHeader],
549        account_commitment_updates: &[(AccountId, Word)],
550        block_num: BlockNumber,
551    ) -> Result<(), ClientError> {
552        // "Public" here includes both Public and Network accounts, since both have
553        // their state stored on-chain and follow the same sync path.
554        let (public_accounts, private_accounts): (Vec<_>, Vec<_>) =
555            accounts.iter().partition(|a| !a.id().is_private());
556
557        self.sync_public_accounts(
558            account_updates,
559            account_commitment_updates,
560            &public_accounts,
561            block_num,
562        )
563        .await?;
564
565        let mismatched_private_accounts = account_commitment_updates
566            .iter()
567            .filter(|(account_id, digest)| {
568                private_accounts
569                    .iter()
570                    .any(|a| a.id() == *account_id && &a.to_commitment() != digest)
571            })
572            .copied()
573            .collect::<Vec<_>>();
574
575        account_updates.extend(AccountUpdates::new(Vec::new(), mismatched_private_accounts));
576
577        Ok(())
578    }
579
580    /// Queries the node for updated public accounts and populates `account_updates`.
581    ///
582    /// When a store is available, storage and vault data are fetched on demand to build
583    /// deltas for oversized accounts. Without a store, oversized accounts fall back to
584    /// `get_account_details` (full sync from block 0).
585    async fn sync_public_accounts(
586        &self,
587        account_updates: &mut AccountUpdates,
588        commitment_updates: &[(AccountId, Word)],
589        current_public_accounts: &[&AccountHeader],
590        block_num: BlockNumber,
591    ) -> Result<(), ClientError> {
592        for (id, commitment) in commitment_updates {
593            let Some(local_header) = current_public_accounts
594                .iter()
595                .find(|acc| *id == acc.id() && *commitment != acc.to_commitment())
596            else {
597                continue;
598            };
599
600            let account_id = local_header.id();
601
602            // Build storage requirements and known code from store (if available) to
603            // request all entries for every map slot and avoid re-downloading code.
604            let (storage_requirements, known_code) =
605                self.fetch_local_account_hints(account_id).await;
606
607            let (proof_block_num, proof) = self
608                .rpc_api
609                .get_account_proof(
610                    account_id,
611                    storage_requirements,
612                    AccountStateAt::ChainTip,
613                    known_code,
614                    Some(EMPTY_WORD),
615                )
616                .await
617                .map_err(ClientError::RpcError)?;
618
619            let Some(details) = proof.into_parts().1 else {
620                // Private account returned — should not happen for public accounts.
621                continue;
622            };
623
624            // Skip if the remote nonce is not newer than what we already have.
625            if details.header.nonce().as_canonical_u64() <= local_header.nonce().as_canonical_u64()
626            {
627                continue;
628            }
629
630            let has_oversized_data = details.vault_details.too_many_assets
631                || details.storage_details.map_details.iter().any(|m| m.too_many_entries);
632
633            if has_oversized_data {
634                if self.store.is_some() {
635                    // Delta path: build an AccountDelta from incremental updates,
636                    // fetching storage slots and vault from the store on demand.
637                    let delta = self
638                        .build_account_delta(&details, local_header, block_num, proof_block_num)
639                        .await?;
640                    account_updates.extend(AccountUpdates::new(
641                        vec![PublicAccountUpdate::Delta {
642                            new_header: details.header.clone(),
643                            delta,
644                        }],
645                        Vec::new(),
646                    ));
647                } else {
648                    // No store available — fall back to get_account_details which
649                    // handles oversized data internally (syncing from block 0).
650                    let response = self
651                        .rpc_api
652                        .get_account_details(account_id)
653                        .await
654                        .map_err(ClientError::RpcError)?;
655
656                    match response {
657                        FetchedAccount::Public(account, _) => {
658                            account_updates.extend(AccountUpdates::new(
659                                vec![PublicAccountUpdate::Full(*account)],
660                                Vec::new(),
661                            ));
662                        },
663                        // This should not happen since we only fetch public accounts here.
664                        FetchedAccount::Private(..) => {},
665                    }
666                }
667            } else {
668                // Small account: build directly from the response details.
669                let account = Account::try_from(&details).map_err(ClientError::RpcError)?;
670                account_updates.extend(AccountUpdates::new(
671                    vec![PublicAccountUpdate::Full(account)],
672                    Vec::new(),
673                ));
674            }
675        }
676
677        Ok(())
678    }
679
680    /// Fetches storage requirements and known code from the store for a given account.
681    ///
682    /// Returns defaults when no store is available.
683    async fn fetch_local_account_hints(
684        &self,
685        account_id: AccountId,
686    ) -> (AccountStorageRequirements, Option<AccountCode>) {
687        let Some(store) = &self.store else {
688            return (AccountStorageRequirements::default(), None);
689        };
690
691        let storage_requirements = store
692            .get_account_storage(account_id, AccountStorageFilter::All)
693            .await
694            .map(|storage| Self::build_storage_requirements(&storage))
695            .unwrap_or_default();
696
697        let known_code = store.get_account_code(account_id).await.ok().flatten();
698
699        (storage_requirements, known_code)
700    }
701
702    /// Builds [`AccountStorageRequirements`] from [`AccountStorage`], requesting all entries for
703    /// every map slot.
704    fn build_storage_requirements(storage: &AccountStorage) -> AccountStorageRequirements {
705        let map_slots = storage.slots().iter().filter_map(|slot: &StorageSlot| {
706            if slot.slot_type() == StorageSlotType::Map {
707                // Passing an empty key list requests all entries for this map slot.
708                Some((slot.name().clone(), core::iter::empty::<&StorageMapKey>()))
709            } else {
710                None
711            }
712        });
713        AccountStorageRequirements::new(map_slots)
714    }
715
716    /// Builds an [`AccountDelta`] from incremental RPC sync data, fetching local account
717    /// data from the store on demand.
718    ///
719    /// For oversized storage maps: fetches delta entries via `sync_storage_maps`.
720    /// For oversized vaults: fetches delta entries via `sync_account_vault`.
721    /// Non-oversized parts are diffed against local data fetched from the store.
722    ///
723    /// # Panics
724    ///
725    /// Panics if `self.store` is `None`. Callers must check before invoking.
726    #[allow(clippy::too_many_lines)]
727    async fn build_account_delta(
728        &self,
729        details: &AccountDetails,
730        local_header: &AccountHeader,
731        block_from: BlockNumber,
732        block_to: BlockNumber,
733    ) -> Result<AccountDelta, ClientError> {
734        let store = self.store.as_ref().expect("store required for delta sync");
735        let account_id = details.header.id();
736
737        let storage_delta = self
738            .build_storage_delta(details, account_id, block_from, block_to, store.as_ref())
739            .await?;
740
741        let vault_delta = self
742            .build_vault_delta(details, account_id, block_from, block_to, store.as_ref())
743            .await?;
744
745        // --- Nonce delta ---
746        let old_nonce = local_header.nonce().as_canonical_u64();
747        let new_nonce = details.header.nonce().as_canonical_u64();
748        let nonce_delta = Felt::new(new_nonce - old_nonce);
749
750        AccountDelta::new(account_id, storage_delta, vault_delta, nonce_delta).map_err(|err| {
751            ClientError::RpcError(RpcError::InvalidResponse(format!(
752                "failed to construct account delta: {err}"
753            )))
754        })
755    }
756
757    /// Computes the full storage delta (value slots + map slots) for the account.
758    ///
759    /// For value slots, compares the response values against the local store. For map slots,
760    /// oversized maps (`too_many_entries`) fetch incremental delta entries from the sync endpoint
761    /// and deduplicate by key keeping the latest value; non-oversized maps diff the full response
762    /// entries against the local store.
763    async fn build_storage_delta(
764        &self,
765        details: &AccountDetails,
766        account_id: AccountId,
767        block_from: BlockNumber,
768        block_to: BlockNumber,
769        store: &dyn Store,
770    ) -> Result<AccountStorageDelta, ClientError> {
771        let mut storage_delta = AccountStorageDelta::new();
772
773        for slot_header in details.storage_details.header.slots() {
774            if slot_header.slot_type() == StorageSlotType::Value {
775                let local_value = store
776                    .get_account_storage_item(account_id, slot_header.name().clone())
777                    .await
778                    .ok();
779
780                if local_value.as_ref() != Some(&slot_header.value()) {
781                    storage_delta
782                        .set_item(slot_header.name().clone(), slot_header.value())
783                        .map_err(|err| {
784                            ClientError::RpcError(RpcError::InvalidResponse(format!(
785                                "failed to set storage delta item: {err}"
786                            )))
787                        })?;
788                }
789            }
790        }
791
792        let mut map_delta_cache: Option<Vec<StorageMapUpdate>> = None;
793
794        for slot_header in details.storage_details.header.slots() {
795            if slot_header.slot_type() != StorageSlotType::Map {
796                continue;
797            }
798
799            let map_details =
800                details.storage_details.find_map_details(slot_header.name()).ok_or_else(|| {
801                    ClientError::RpcError(RpcError::ExpectedDataMissing(format!(
802                        "slot '{}' is a map but has no map_details in response",
803                        slot_header.name()
804                    )))
805                })?;
806
807            if map_details.too_many_entries {
808                // Oversized map: fetch delta entries from the sync endpoint.
809                if map_delta_cache.is_none() {
810                    let map_info = self
811                        .rpc_api
812                        .sync_storage_maps(block_from, Some(block_to), account_id)
813                        .await
814                        .map_err(ClientError::RpcError)?;
815                    map_delta_cache = Some(map_info.updates);
816                }
817
818                Self::apply_oversized_map_delta(
819                    map_delta_cache.as_deref().unwrap_or_default(),
820                    slot_header.name(),
821                    &mut storage_delta,
822                )?;
823            } else {
824                Self::apply_full_map_delta(
825                    map_details,
826                    slot_header.name(),
827                    account_id,
828                    store,
829                    &mut storage_delta,
830                )
831                .await?;
832            }
833        }
834
835        Ok(storage_delta)
836    }
837
838    /// Applies delta updates from the sync endpoint for an oversized storage map slot.
839    ///
840    /// Filters the cached delta updates to the target slot, sorts by block number, and
841    /// deduplicates by key (keeping the latest value).
842    fn apply_oversized_map_delta(
843        delta_updates: &[StorageMapUpdate],
844        slot_name: &StorageSlotName,
845        storage_delta: &mut AccountStorageDelta,
846    ) -> Result<(), ClientError> {
847        let mut relevant: Vec<_> =
848            delta_updates.iter().filter(|u| u.slot_name == *slot_name).collect();
849        relevant.sort_by_key(|u| u.block_num);
850
851        // Deduplicate: keep latest value per key.
852        let mut seen = BTreeMap::new();
853        for update in relevant {
854            seen.insert(update.key, update.value);
855        }
856
857        for (key, value) in seen {
858            storage_delta.set_map_item(slot_name.clone(), key, value).map_err(|err| {
859                ClientError::RpcError(RpcError::InvalidResponse(format!(
860                    "failed to set storage map delta: {err}"
861                )))
862            })?;
863        }
864
865        Ok(())
866    }
867
868    /// Diffs the full response map entries against the local store for a non-oversized map slot.
869    ///
870    /// Entries present in the response but missing or different locally are added to the delta.
871    /// Entries present locally but absent in the response are set to `Word::default()` (removal).
872    async fn apply_full_map_delta(
873        map_details: &AccountStorageMapDetails,
874        slot_name: &StorageSlotName,
875        account_id: AccountId,
876        store: &dyn Store,
877        storage_delta: &mut AccountStorageDelta,
878    ) -> Result<(), ClientError> {
879        let response_map = map_details
880            .entries
881            .clone()
882            .into_storage_map()
883            .ok_or_else(|| {
884                ClientError::RpcError(RpcError::ExpectedDataMissing(
885                    "expected AllEntries for map, got EntriesWithProofs".into(),
886                ))
887            })?
888            .map_err(|err| {
889                ClientError::RpcError(RpcError::InvalidResponse(format!(
890                    "the rpc api returned a non-valid map entry: {err}"
891                )))
892            })?;
893
894        let local_entries: BTreeMap<StorageMapKey, Word> = store
895            .get_account_storage(account_id, AccountStorageFilter::SlotName(slot_name.clone()))
896            .await
897            .ok()
898            .and_then(|storage| storage.get(slot_name).cloned())
899            .map(|slot| match slot.content() {
900                StorageSlotContent::Map(map) => map.entries().map(|(k, v)| (*k, *v)).collect(),
901                StorageSlotContent::Value(_) => BTreeMap::new(),
902            })
903            .unwrap_or_default();
904
905        let response_entries: BTreeMap<StorageMapKey, Word> =
906            response_map.entries().map(|(k, v)| (*k, *v)).collect();
907
908        // Entries in response but not in local, or with different values.
909        for (key, value) in &response_entries {
910            if local_entries.get(key) != Some(value) {
911                storage_delta.set_map_item(slot_name.clone(), *key, *value).map_err(|err| {
912                    ClientError::RpcError(RpcError::InvalidResponse(format!(
913                        "failed to set storage map delta: {err}"
914                    )))
915                })?;
916            }
917        }
918
919        // Entries in local but removed in response (set to empty word).
920        for key in local_entries.keys() {
921            if !response_entries.contains_key(key) {
922                storage_delta.set_map_item(slot_name.clone(), *key, Word::default()).map_err(
923                    |err| {
924                        ClientError::RpcError(RpcError::InvalidResponse(format!(
925                            "failed to set storage map delta for removal: {err}"
926                        )))
927                    },
928                )?;
929            }
930        }
931
932        Ok(())
933    }
934
935    /// Computes the vault delta between local and remote account state.
936    ///
937    /// For oversized vaults (`too_many_assets`), fetches incremental updates from the sync
938    /// endpoint and replays them on top of the local vault. For non-oversized vaults, diffs
939    /// the full response assets against the local vault.
940    async fn build_vault_delta(
941        &self,
942        details: &AccountDetails,
943        account_id: AccountId,
944        block_from: BlockNumber,
945        block_to: BlockNumber,
946        store: &dyn Store,
947    ) -> Result<AccountVaultDelta, ClientError> {
948        let mut vault_delta = AccountVaultDelta::default();
949        let local_vault =
950            store.get_account_vault(account_id).await.map_err(ClientError::StoreError)?;
951
952        if details.vault_details.too_many_assets {
953            // Oversized vault: fetch delta from sync endpoint.
954            let vault_info = self
955                .rpc_api
956                .sync_account_vault(block_from, Some(block_to), account_id)
957                .await
958                .map_err(ClientError::RpcError)?;
959
960            // Build the final vault state by applying updates to local vault.
961            let mut vault_map: BTreeMap<AssetVaultKey, Asset> =
962                local_vault.assets().map(|asset| (asset.vault_key(), asset)).collect();
963
964            let mut vault_updates = vault_info.updates;
965            vault_updates.sort_by_key(|u| u.block_num);
966
967            for update in vault_updates {
968                match update.asset {
969                    Some(asset) => {
970                        vault_map.insert(update.vault_key, asset);
971                    },
972                    None => {
973                        vault_map.remove(&update.vault_key);
974                    },
975                }
976            }
977
978            Self::compute_vault_delta_from_diff(&local_vault, &vault_map, &mut vault_delta)?;
979        } else {
980            // Non-oversized vault: diff response assets against local.
981            let final_assets: BTreeMap<AssetVaultKey, Asset> = details
982                .vault_details
983                .assets
984                .iter()
985                .map(|asset| (asset.vault_key(), *asset))
986                .collect();
987
988            Self::compute_vault_delta_from_diff(&local_vault, &final_assets, &mut vault_delta)?;
989        }
990
991        Ok(vault_delta)
992    }
993
994    /// Computes a vault delta from the difference between a local vault and a final asset map.
995    fn compute_vault_delta_from_diff(
996        local_vault: &AssetVault,
997        final_assets: &BTreeMap<AssetVaultKey, Asset>,
998        vault_delta: &mut AccountVaultDelta,
999    ) -> Result<(), ClientError> {
1000        let local_assets: BTreeMap<AssetVaultKey, Asset> =
1001            local_vault.assets().map(|a| (a.vault_key(), a)).collect();
1002
1003        // Assets in final but not in local -> add. Changed amounts -> remove old, add new.
1004        for (key, final_asset) in final_assets {
1005            match local_assets.get(key) {
1006                None => {
1007                    vault_delta.add_asset(*final_asset).map_err(|err| {
1008                        ClientError::RpcError(RpcError::InvalidResponse(format!(
1009                            "failed to add asset to vault delta: {err}"
1010                        )))
1011                    })?;
1012                },
1013                Some(local_asset) if local_asset != final_asset => {
1014                    vault_delta.remove_asset(*local_asset).map_err(|err| {
1015                        ClientError::RpcError(RpcError::InvalidResponse(format!(
1016                            "failed to remove old asset from vault delta: {err}"
1017                        )))
1018                    })?;
1019                    vault_delta.add_asset(*final_asset).map_err(|err| {
1020                        ClientError::RpcError(RpcError::InvalidResponse(format!(
1021                            "failed to add new asset to vault delta: {err}"
1022                        )))
1023                    })?;
1024                },
1025                _ => {}, // No change
1026            }
1027        }
1028
1029        // Assets in local but not in final -> remove.
1030        for (key, local_asset) in &local_assets {
1031            if !final_assets.contains_key(key) {
1032                vault_delta.remove_asset(*local_asset).map_err(|err| {
1033                    ClientError::RpcError(RpcError::InvalidResponse(format!(
1034                        "failed to remove asset from vault delta: {err}"
1035                    )))
1036                })?;
1037            }
1038        }
1039
1040        Ok(())
1041    }
1042
1043    /// Applies the changes received from the sync response to the notes and transactions tracked
1044    /// by the client and updates the `note_updates` accordingly.
1045    ///
1046    /// This method uses the callbacks provided to the [`StateSync`] component to check if the
1047    /// updates received are relevant to the client.
1048    ///
1049    /// The note updates might include:
1050    /// * New notes that we received from the node and might be relevant to the client.
1051    /// * Tracked expected notes that were committed in the block.
1052    /// * Tracked notes that were being processed by a transaction that got committed.
1053    /// * Tracked notes that were nullified by an external transaction.
1054    ///
1055    /// The `public_notes` parameter provides cached public note details for the current sync
1056    /// iteration so the node is only queried once per batch.
1057    async fn note_state_sync(
1058        &self,
1059        note_updates: &mut NoteUpdateTracker,
1060        note_inclusions: BTreeMap<NoteId, CommittedNote>,
1061        block_header: &BlockHeader,
1062        public_notes: &BTreeMap<NoteId, InputNoteRecord>,
1063    ) -> Result<bool, ClientError> {
1064        // `found_relevant_note` tracks whether we want to persist the block header in the end
1065        let mut found_relevant_note = false;
1066
1067        for (_, committed_note) in note_inclusions {
1068            let public_note = (committed_note.note_type() != NoteType::Private)
1069                .then(|| public_notes.get(committed_note.note_id()))
1070                .flatten()
1071                .cloned();
1072
1073            match self.note_screener.on_note_received(committed_note, public_note).await? {
1074                NoteUpdateAction::Commit(committed_note) => {
1075                    // Only mark the downloaded block header as relevant if we are talking about
1076                    // an input note (output notes get marked as committed but we don't need the
1077                    // block for anything there)
1078                    found_relevant_note |= note_updates
1079                        .apply_committed_note_state_transitions(&committed_note, block_header)?;
1080                },
1081                NoteUpdateAction::Insert(public_note) => {
1082                    found_relevant_note = true;
1083
1084                    note_updates.apply_new_public_note(public_note, block_header)?;
1085                },
1086                NoteUpdateAction::Discard => {},
1087            }
1088        }
1089
1090        Ok(found_relevant_note)
1091    }
1092
1093    /// Collects the nullifier tags for the notes that were updated in the sync response and uses
1094    /// the `sync_nullifiers` endpoint to check if there are new nullifiers for these
1095    /// notes. It then processes the nullifiers to apply the state transitions on the note updates.
1096    ///
1097    /// The `state_sync_update` parameter will be updated to track the new discarded transactions.
1098    async fn nullifiers_state_sync(
1099        &self,
1100        state_sync_update: &mut StateSyncUpdate,
1101        current_block_num: BlockNumber,
1102    ) -> Result<(), ClientError> {
1103        // To receive information about added nullifiers, we reduce them to the higher 16 bits
1104        // Note that besides filtering by nullifier prefixes, the node also filters by block number
1105        // (it only returns nullifiers from current_block_num until
1106        // response.block_header.block_num())
1107
1108        // Check for new nullifiers for input notes that were updated
1109        let nullifiers_tags: Vec<u16> = state_sync_update
1110            .note_updates
1111            .unspent_nullifiers()
1112            .map(|nullifier| nullifier.prefix())
1113            .collect();
1114
1115        let mut new_nullifiers = self
1116            .rpc_api
1117            .sync_nullifiers(&nullifiers_tags, current_block_num, Some(state_sync_update.block_num))
1118            .await?;
1119
1120        // Discard nullifiers that are newer than the current block (this might happen if the block
1121        // changes between the sync_state and the check_nullifier calls)
1122        new_nullifiers.retain(|update| update.block_num <= state_sync_update.block_num);
1123
1124        for nullifier_update in new_nullifiers {
1125            let external_consumer_account = state_sync_update
1126                .transaction_updates
1127                .external_nullifier_account(&nullifier_update.nullifier);
1128
1129            state_sync_update.note_updates.apply_nullifiers_state_transitions(
1130                &nullifier_update,
1131                state_sync_update.transaction_updates.committed_transactions(),
1132                external_consumer_account,
1133            )?;
1134
1135            // Process nullifiers and track the updates of local tracked transactions that were
1136            // discarded because the notes that they were processing were nullified by an
1137            // another transaction.
1138            state_sync_update
1139                .transaction_updates
1140                .apply_input_note_nullified(nullifier_update.nullifier);
1141        }
1142
1143        Ok(())
1144    }
1145
1146    /// Applies the changes received from the sync response to the transactions tracked by the
1147    /// client and updates the `transaction_updates` accordingly.
1148    ///
1149    /// The transaction updates might include:
1150    /// * New transactions that were committed in the block.
1151    /// * Transactions that were discarded because they were stale or expired.
1152    fn transaction_state_sync(
1153        &self,
1154        transaction_updates: &mut TransactionUpdateTracker,
1155        new_block_header: &BlockHeader,
1156        transaction_inclusions: &[TransactionInclusion],
1157    ) {
1158        for transaction_inclusion in transaction_inclusions {
1159            transaction_updates.apply_transaction_inclusion(
1160                transaction_inclusion,
1161                u64::from(new_block_header.timestamp()),
1162            ); //TODO: Change timestamps from u64 to u32
1163        }
1164
1165        transaction_updates
1166            .apply_sync_height_update(new_block_header.block_num(), self.tx_discard_delta);
1167    }
1168}
1169
1170// HELPERS
1171// ================================================================================================
1172
1173/// Derives account commitment updates from transaction records.
1174///
1175/// For each unique account, takes the `final_state_commitment` from the transaction with the
1176/// highest `block_num`. This replicates the old `SyncState` behavior where the node returned
1177/// the latest account commitment per account in the synced range.
1178fn derive_account_commitment_updates(
1179    transaction_records: &[RpcTransactionRecord],
1180) -> Vec<(AccountId, Word)> {
1181    let mut latest_by_account: BTreeMap<AccountId, &RpcTransactionRecord> = BTreeMap::new();
1182
1183    for record in transaction_records {
1184        let account_id = record.transaction_header.account_id();
1185        latest_by_account
1186            .entry(account_id)
1187            .and_modify(|existing| {
1188                if record.block_num > existing.block_num {
1189                    *existing = record;
1190                }
1191            })
1192            .or_insert(record);
1193    }
1194
1195    latest_by_account
1196        .into_iter()
1197        .map(|(account_id, record)| {
1198            (account_id, record.transaction_header.final_state_commitment())
1199        })
1200        .collect()
1201}
1202
1203/// Returns nullifiers ordered by consuming transaction position, per account.
1204///
1205/// Groups RPC transaction records by (`account_id`, `block_num`), chains them using
1206/// `initial_state_commitment` / `final_state_commitment`, and collects each transaction's
1207/// input note nullifiers in execution order. Nullifiers from the same account are in execution
1208/// order; ordering across different accounts is arbitrary.
1209fn compute_ordered_nullifiers(transaction_records: &[RpcTransactionRecord]) -> Vec<Nullifier> {
1210    // Group transactions by (account_id, block_num).
1211    let mut groups: BTreeMap<(AccountId, BlockNumber), Vec<&RpcTransactionRecord>> =
1212        BTreeMap::new();
1213
1214    for record in transaction_records {
1215        let account_id = record.transaction_header.account_id();
1216        groups.entry((account_id, record.block_num)).or_default().push(record);
1217    }
1218
1219    let mut result = Vec::new();
1220
1221    for txs in groups.values() {
1222        // Build a lookup from initial_state_commitment -> transaction record.
1223        let mut init_to_tx: BTreeMap<Word, &RpcTransactionRecord> = txs
1224            .iter()
1225            .map(|tx| (tx.transaction_header.initial_state_commitment(), *tx))
1226            .collect();
1227
1228        // Build a set of all final states to find the chain start.
1229        let final_states: BTreeSet<Word> =
1230            txs.iter().map(|tx| tx.transaction_header.final_state_commitment()).collect();
1231
1232        // Find the chain start: the tx whose initial_state_commitment is not any other tx's
1233        // final_state_commitment.
1234        let chain_start = txs
1235            .iter()
1236            .find(|tx| !final_states.contains(&tx.transaction_header.initial_state_commitment()));
1237
1238        let Some(start_tx) = chain_start else {
1239            continue;
1240        };
1241
1242        // Walk the chain from start, removing each step from the map.
1243        let mut current =
1244            init_to_tx.remove(&start_tx.transaction_header.initial_state_commitment());
1245
1246        while let Some(tx) = current {
1247            for commitment in tx.transaction_header.input_notes().iter() {
1248                result.push(commitment.nullifier());
1249            }
1250            current = init_to_tx.remove(&tx.transaction_header.final_state_commitment());
1251        }
1252    }
1253
1254    result
1255}
1256
1257#[cfg(test)]
1258mod tests {
1259    use alloc::collections::BTreeSet;
1260    use alloc::sync::Arc;
1261
1262    use async_trait::async_trait;
1263    use miden_protocol::assembly::DefaultSourceManager;
1264    use miden_protocol::crypto::merkle::mmr::{Forest, InOrderIndex, PartialMmr};
1265    use miden_protocol::note::{NoteTag, NoteType};
1266    use miden_protocol::{Felt, Word};
1267    use miden_standards::code_builder::CodeBuilder;
1268    use miden_testing::MockChainBuilder;
1269
1270    use super::*;
1271    use crate::testing::mock::MockRpcApi;
1272
1273    /// Mock note screener that discards all notes, for minimal test setup.
1274    struct MockScreener;
1275
1276    #[async_trait(?Send)]
1277    impl OnNoteReceived for MockScreener {
1278        async fn on_note_received(
1279            &self,
1280            _committed_note: CommittedNote,
1281            _public_note: Option<InputNoteRecord>,
1282        ) -> Result<NoteUpdateAction, ClientError> {
1283            Ok(NoteUpdateAction::Discard)
1284        }
1285    }
1286
1287    fn empty() -> StateSyncInput {
1288        StateSyncInput {
1289            accounts: vec![],
1290            note_tags: BTreeSet::new(),
1291            input_notes: vec![],
1292            output_notes: vec![],
1293            uncommitted_transactions: vec![],
1294        }
1295    }
1296
1297    // COMPUTE NULLIFIER TX ORDER TESTS
1298    // --------------------------------------------------------------------------------------------
1299
1300    mod compute_nullifiers_tests {
1301        use alloc::vec;
1302
1303        use miden_protocol::asset::FungibleAsset;
1304        use miden_protocol::block::BlockNumber;
1305        use miden_protocol::note::Nullifier;
1306        use miden_protocol::transaction::{InputNoteCommitment, InputNotes, TransactionHeader};
1307        use miden_protocol::{Felt, ZERO};
1308
1309        use crate::rpc::domain::transaction::{
1310            ACCOUNT_ID_NATIVE_ASSET_FAUCET,
1311            TransactionRecord as RpcTransactionRecord,
1312        };
1313
1314        fn word(n: u64) -> miden_protocol::Word {
1315            [Felt::new(n), ZERO, ZERO, ZERO].into()
1316        }
1317
1318        fn make_rpc_tx(
1319            init_state: u64,
1320            final_state: u64,
1321            nullifier_vals: &[u64],
1322            block_number: u32,
1323        ) -> RpcTransactionRecord {
1324            let account_id = miden_protocol::account::AccountId::try_from(
1325                miden_protocol::testing::account_id::ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE,
1326            )
1327            .unwrap();
1328
1329            let input_notes = InputNotes::new_unchecked(
1330                nullifier_vals
1331                    .iter()
1332                    .map(|v| InputNoteCommitment::from(Nullifier::from_raw(word(*v))))
1333                    .collect(),
1334            );
1335
1336            let fee =
1337                FungibleAsset::new(ACCOUNT_ID_NATIVE_ASSET_FAUCET.try_into().expect("valid"), 0u64)
1338                    .unwrap();
1339
1340            RpcTransactionRecord {
1341                block_num: BlockNumber::from(block_number),
1342                transaction_header: TransactionHeader::new(
1343                    account_id,
1344                    word(init_state),
1345                    word(final_state),
1346                    input_notes,
1347                    vec![],
1348                    fee,
1349                ),
1350                output_notes: vec![],
1351                erased_output_note_ids: vec![],
1352            }
1353        }
1354
1355        #[test]
1356        fn chains_rpc_transactions_by_state_commitment() {
1357            // Chain: tx_a (state 1->2) -> tx_b (state 2->3) -> tx_c (state 3->4)
1358            // Passed in reverse order to verify chaining uses state, not insertion order.
1359            let tx_a = make_rpc_tx(1, 2, &[10], 5);
1360            let tx_b = make_rpc_tx(2, 3, &[20], 5);
1361            let tx_c = make_rpc_tx(3, 4, &[30], 5);
1362
1363            let result = super::super::compute_ordered_nullifiers(&[tx_c, tx_a, tx_b]);
1364
1365            assert_eq!(result[0], Nullifier::from_raw(word(10)));
1366            assert_eq!(result[1], Nullifier::from_raw(word(20)));
1367            assert_eq!(result[2], Nullifier::from_raw(word(30)));
1368        }
1369
1370        #[test]
1371        fn groups_independently_by_account_and_block() {
1372            // Account A, block 5: two chained txs.
1373            let tx_a1 = make_rpc_tx(1, 2, &[10], 5);
1374            let tx_a2 = make_rpc_tx(2, 3, &[20], 5);
1375
1376            // Account A, block 6: independent chain.
1377            let tx_a3 = make_rpc_tx(3, 4, &[30], 6);
1378
1379            // Account B, block 5: independent chain.
1380            let account_b = miden_protocol::account::AccountId::try_from(
1381                miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET,
1382            )
1383            .unwrap();
1384
1385            let fee =
1386                FungibleAsset::new(ACCOUNT_ID_NATIVE_ASSET_FAUCET.try_into().expect("valid"), 0u64)
1387                    .unwrap();
1388
1389            let tx_b1 = RpcTransactionRecord {
1390                block_num: BlockNumber::from(5u32),
1391                transaction_header: TransactionHeader::new(
1392                    account_b,
1393                    word(100),
1394                    word(200),
1395                    InputNotes::new_unchecked(vec![InputNoteCommitment::from(
1396                        Nullifier::from_raw(word(40)),
1397                    )]),
1398                    vec![],
1399                    fee,
1400                ),
1401                output_notes: vec![],
1402                erased_output_note_ids: vec![],
1403            };
1404
1405            let result = super::super::compute_ordered_nullifiers(&[tx_a2, tx_b1, tx_a3, tx_a1]);
1406
1407            // Nullifiers are ordered by chain position within each (account, block) group.
1408            // The exact global indices depend on BTreeMap iteration order of the groups.
1409            let pos = |val: u64| -> usize {
1410                result.iter().position(|n| *n == Nullifier::from_raw(word(val))).unwrap()
1411            };
1412
1413            // Within the same group, chain order is preserved.
1414            assert!(pos(10) < pos(20)); // A, block 5: pos 0 < pos 1
1415            // Nullifiers from different groups are all present.
1416            assert!(result.contains(&Nullifier::from_raw(word(30)))); // A, block 6
1417            assert!(result.contains(&Nullifier::from_raw(word(40)))); // B, block 5
1418        }
1419
1420        #[test]
1421        fn multiple_nullifiers_per_transaction_are_consecutive() {
1422            // Single tx consuming 3 notes — all should appear consecutively.
1423            let tx = make_rpc_tx(1, 2, &[10, 20, 30], 5);
1424
1425            let result = super::super::compute_ordered_nullifiers(&[tx]);
1426
1427            assert_eq!(result.len(), 3);
1428            assert!(result.contains(&Nullifier::from_raw(word(10))));
1429            assert!(result.contains(&Nullifier::from_raw(word(20))));
1430            assert!(result.contains(&Nullifier::from_raw(word(30))));
1431        }
1432
1433        #[test]
1434        fn empty_input_returns_empty_vec() {
1435            let result = super::super::compute_ordered_nullifiers(&[]);
1436            assert!(result.is_empty());
1437        }
1438    }
1439
1440    // CONSUMED NOTE ORDERING INTEGRATION TESTS
1441    // --------------------------------------------------------------------------------------------
1442
1443    /// Mock note screener that commits all notes matching tracked input notes.
1444    /// This ensures committed notes get their inclusion proofs set during sync.
1445    struct CommitAllScreener;
1446
1447    #[async_trait(?Send)]
1448    impl OnNoteReceived for CommitAllScreener {
1449        async fn on_note_received(
1450            &self,
1451            committed_note: CommittedNote,
1452            _public_note: Option<InputNoteRecord>,
1453        ) -> Result<NoteUpdateAction, ClientError> {
1454            Ok(NoteUpdateAction::Commit(committed_note))
1455        }
1456    }
1457
1458    use miden_protocol::account::Account;
1459    use miden_protocol::note::Note;
1460
1461    /// Builds a `MockChain` where 3 notes are consumed by chained transactions in the same block.
1462    ///
1463    /// Returns the chain, the account, and the 3 notes (in consumption order).
1464    async fn build_chain_with_chained_consume_txs() -> (miden_testing::MockChain, Account, [Note; 3])
1465    {
1466        use miden_protocol::asset::{Asset, FungibleAsset};
1467        use miden_protocol::note::NoteType;
1468        use miden_protocol::testing::account_id::{
1469            ACCOUNT_ID_PRIVATE_FUNGIBLE_FAUCET,
1470            ACCOUNT_ID_SENDER,
1471        };
1472        use miden_testing::{MockChainBuilder, TxContextInput};
1473
1474        let sender_id: AccountId = ACCOUNT_ID_SENDER.try_into().unwrap();
1475        let faucet_id: AccountId = ACCOUNT_ID_PRIVATE_FUNGIBLE_FAUCET.try_into().unwrap();
1476
1477        let mut builder = MockChainBuilder::new();
1478        let account = builder.add_existing_mock_account(miden_testing::Auth::IncrNonce).unwrap();
1479        let account_id = account.id();
1480
1481        let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 100u64).unwrap());
1482        let note1 = builder
1483            .add_p2id_note(sender_id, account_id, &[asset], NoteType::Public)
1484            .unwrap();
1485        let note2 = builder
1486            .add_p2id_note(sender_id, account_id, &[asset], NoteType::Public)
1487            .unwrap();
1488        let note3 = builder
1489            .add_p2id_note(sender_id, account_id, &[asset], NoteType::Public)
1490            .unwrap();
1491
1492        let mut chain = builder.build().unwrap();
1493        chain.prove_next_block().unwrap(); // block 1: makes genesis notes consumable
1494
1495        // Execute 3 chained consume transactions (state S0→S1→S2→S3).
1496        let mut current_account = account.clone();
1497        for note in [&note1, &note2, &note3] {
1498            let tx = Box::pin(
1499                chain
1500                    .build_tx_context(
1501                        TxContextInput::Account(current_account.clone()),
1502                        &[],
1503                        core::slice::from_ref(note),
1504                    )
1505                    .unwrap()
1506                    .build()
1507                    .unwrap()
1508                    .execute(),
1509            )
1510            .await
1511            .unwrap();
1512            current_account.apply_delta(tx.account_delta()).unwrap();
1513            chain.add_pending_executed_transaction(&tx).unwrap();
1514        }
1515
1516        chain.prove_next_block().unwrap(); // block 2: all 3 txs in one block
1517        (chain, account, [note1, note2, note3])
1518    }
1519
1520    /// Verifies that `consumed_tx_order` is correctly set when multiple chained transactions
1521    /// for the same account consume notes in the same block.
1522    #[tokio::test]
1523    async fn sync_state_sets_consumed_tx_order_for_chained_transactions() {
1524        use miden_protocol::note::NoteMetadata;
1525
1526        let (chain, account, [note1, note2, note3]) = build_chain_with_chained_consume_txs().await;
1527
1528        let mock_rpc = MockRpcApi::new(chain);
1529        let state_sync =
1530            StateSync::new(Arc::new(mock_rpc.clone()), None, Arc::new(CommitAllScreener), None);
1531
1532        let genesis_peaks = mock_rpc.get_mmr().peaks_at(Forest::new(1)).unwrap();
1533        let mut partial_mmr = PartialMmr::from_peaks(genesis_peaks);
1534
1535        let input_notes: Vec<InputNoteRecord> = [&note1, &note2, &note3]
1536            .into_iter()
1537            .map(|n| InputNoteRecord::from(n.clone()))
1538            .collect();
1539
1540        let note_tags: BTreeSet<NoteTag> =
1541            input_notes.iter().filter_map(|n| n.metadata().map(NoteMetadata::tag)).collect();
1542
1543        let account_id = account.id();
1544        let sync_input = StateSyncInput {
1545            accounts: vec![AccountHeader::from(account)],
1546            note_tags,
1547            input_notes,
1548            output_notes: vec![],
1549            uncommitted_transactions: vec![],
1550        };
1551
1552        let update = state_sync.sync_state(&mut partial_mmr, sync_input).await.unwrap();
1553
1554        let updated_notes: Vec<_> = update.note_updates.updated_input_notes().collect();
1555
1556        let find_order = |note_id: NoteId| -> Option<u32> {
1557            updated_notes
1558                .iter()
1559                .find(|n| n.id() == note_id)
1560                .and_then(|n| n.consumed_tx_order())
1561        };
1562
1563        assert_eq!(find_order(note1.id()), Some(0), "note1 should have tx_order 0");
1564        assert_eq!(find_order(note2.id()), Some(1), "note2 should have tx_order 1");
1565        assert_eq!(find_order(note3.id()), Some(2), "note3 should have tx_order 2");
1566
1567        // Since there are no uncommitted_transactions, these notes were consumed by a tracked
1568        // account via external transactions. Verify that consumer_account is populated.
1569        for note in &updated_notes {
1570            let record = note.inner();
1571            assert!(record.is_consumed(), "note should be in a consumed state");
1572            assert_eq!(
1573                record.consumer_account(),
1574                Some(account_id),
1575                "externally-consumed notes by a tracked account should have consumer_account set",
1576            );
1577        }
1578    }
1579
1580    #[tokio::test]
1581    async fn sync_state_across_multiple_iterations_with_same_mmr() {
1582        // Setup: create a mock chain and advance it so there are blocks to sync.
1583        let mock_rpc = MockRpcApi::default();
1584        mock_rpc.advance_blocks(3);
1585        let chain_tip_1 = mock_rpc.get_chain_tip_block_num();
1586
1587        let state_sync =
1588            StateSync::new(Arc::new(mock_rpc.clone()), None, Arc::new(MockScreener), None);
1589
1590        // Build the initial PartialMmr from genesis (only 1 leaf).
1591        let genesis_peaks = mock_rpc.get_mmr().peaks_at(Forest::new(1)).unwrap();
1592        let mut partial_mmr = PartialMmr::from_peaks(genesis_peaks);
1593        assert_eq!(partial_mmr.forest().num_leaves(), 1);
1594
1595        // First sync
1596        let update = state_sync.sync_state(&mut partial_mmr, empty()).await.unwrap();
1597
1598        assert_eq!(update.block_num, chain_tip_1);
1599        let forest_1 = partial_mmr.forest();
1600        // The MMR should contain one leaf per block (genesis + the new blocks).
1601        assert_eq!(forest_1.num_leaves(), chain_tip_1.as_u32() as usize + 1);
1602
1603        // Second sync
1604        mock_rpc.advance_blocks(2);
1605        let chain_tip_2 = mock_rpc.get_chain_tip_block_num();
1606
1607        let update = state_sync.sync_state(&mut partial_mmr, empty()).await.unwrap();
1608
1609        assert_eq!(update.block_num, chain_tip_2);
1610        let forest_2 = partial_mmr.forest();
1611        assert!(forest_2 > forest_1);
1612        assert_eq!(forest_2.num_leaves(), chain_tip_2.as_u32() as usize + 1);
1613
1614        // Third sync (no new blocks)
1615        let update = state_sync.sync_state(&mut partial_mmr, empty()).await.unwrap();
1616
1617        assert_eq!(update.block_num, chain_tip_2);
1618        assert_eq!(partial_mmr.forest(), forest_2);
1619    }
1620
1621    /// Builds a mock chain with a faucet that mints `num_blocks` notes, one per block.
1622    /// Returns the chain and the set of note tags for filtering.
1623    async fn build_chain_with_mint_notes(
1624        num_blocks: u64,
1625    ) -> (miden_testing::MockChain, BTreeSet<NoteTag>) {
1626        let mut builder = MockChainBuilder::new();
1627        let faucet = builder
1628            .add_existing_basic_faucet(
1629                miden_testing::Auth::BasicAuth {
1630                    auth_scheme: miden_protocol::account::auth::AuthScheme::Falcon512Poseidon2,
1631                },
1632                "TST",
1633                10_000,
1634                None,
1635            )
1636            .unwrap();
1637        let _target = builder.add_existing_mock_account(miden_testing::Auth::IncrNonce).unwrap();
1638        let mut chain = builder.build().unwrap();
1639
1640        let recipient: Word = [0u32, 1, 2, 3].into();
1641        let tag = NoteTag::default();
1642        let mut faucet_account = faucet.clone();
1643        let mut note_tags = BTreeSet::new();
1644
1645        for i in 0..num_blocks {
1646            let amount = Felt::new(100 + i);
1647            let source_manager = Arc::new(DefaultSourceManager::default());
1648            let tx_script_code = format!(
1649                "
1650                begin
1651                    padw padw push.0
1652                    push.{r0}.{r1}.{r2}.{r3}
1653                    push.{note_type}
1654                    push.{tag}
1655                    push.{amount}
1656                    call.::miden::standards::faucets::basic_fungible::mint_and_send
1657                    dropw dropw dropw dropw
1658                end
1659                ",
1660                r0 = recipient[0],
1661                r1 = recipient[1],
1662                r2 = recipient[2],
1663                r3 = recipient[3],
1664                note_type = NoteType::Private as u8,
1665                tag = u32::from(tag),
1666                amount = amount,
1667            );
1668            let tx_script = CodeBuilder::with_source_manager(source_manager.clone())
1669                .compile_tx_script(tx_script_code)
1670                .unwrap();
1671            let tx = Box::pin(
1672                chain
1673                    .build_tx_context(
1674                        miden_testing::TxContextInput::Account(faucet_account.clone()),
1675                        &[],
1676                        &[],
1677                    )
1678                    .unwrap()
1679                    .tx_script(tx_script)
1680                    .with_source_manager(source_manager)
1681                    .build()
1682                    .unwrap()
1683                    .execute(),
1684            )
1685            .await
1686            .unwrap();
1687
1688            for output_note in tx.output_notes().iter() {
1689                note_tags.insert(output_note.metadata().tag());
1690            }
1691
1692            faucet_account.apply_delta(tx.account_delta()).unwrap();
1693            chain.add_pending_executed_transaction(&tx).unwrap();
1694            chain.prove_next_block().unwrap();
1695        }
1696
1697        (chain, note_tags)
1698    }
1699
1700    /// Verifies that the sync correctly processes notes committed in multiple blocks
1701    /// (batched `SyncNotes` response) and tracks their blocks in the partial MMR.
1702    ///
1703    /// This test creates a faucet and mints notes in separate blocks (blocks 1, 2, 3),
1704    /// so `sync_notes` returns multiple `NoteSyncBlock`s. It then verifies:
1705    /// - The MMR is advanced to the chain tip
1706    /// - Blocks containing relevant notes are tracked in the partial MMR via `track()`
1707    /// - Note inclusion proofs are set correctly
1708    /// - Block headers for note blocks are stored
1709    #[tokio::test]
1710    async fn sync_state_tracks_note_blocks_in_mmr() {
1711        let (chain, note_tags) = build_chain_with_mint_notes(3).await;
1712        let mock_rpc = MockRpcApi::new(chain);
1713        let chain_tip = mock_rpc.get_chain_tip_block_num();
1714
1715        // Verify the mock returns notes across multiple blocks.
1716        let note_sync =
1717            mock_rpc.sync_notes(BlockNumber::from(0u32), None, &note_tags).await.unwrap();
1718        assert!(
1719            note_sync.blocks.len() >= 2,
1720            "expected notes in multiple blocks, got {}",
1721            note_sync.blocks.len()
1722        );
1723
1724        // Collect the block numbers that have notes.
1725        let note_block_nums: BTreeSet<BlockNumber> =
1726            note_sync.blocks.iter().map(|b| b.block_header.block_num()).collect();
1727
1728        // Test that fetch_sync_data returns note blocks with valid MMR paths that
1729        // can be used to track blocks in the partial MMR.
1730        let state_sync =
1731            StateSync::new(Arc::new(mock_rpc.clone()), None, Arc::new(MockScreener), None);
1732
1733        let genesis_peaks = mock_rpc.get_mmr().peaks_at(Forest::new(1)).unwrap();
1734        let mut partial_mmr = PartialMmr::from_peaks(genesis_peaks);
1735
1736        let sync_data = state_sync
1737            .fetch_sync_data(BlockNumber::GENESIS, &[], &Arc::new(note_tags.clone()))
1738            .await
1739            .unwrap()
1740            .expect("should have progressed past genesis");
1741
1742        // Should have advanced to the chain tip.
1743        assert_eq!(sync_data.chain_tip_header.block_num(), chain_tip);
1744        assert!(!sync_data.note_blocks.is_empty(), "should have note blocks");
1745
1746        // Apply the MMR delta and add the chain tip block.
1747        let _auth_nodes: Vec<(InOrderIndex, Word)> =
1748            partial_mmr.apply(sync_data.mmr_delta).map_err(StoreError::MmrError).unwrap();
1749        partial_mmr.add(sync_data.chain_tip_header.commitment(), false);
1750
1751        assert_eq!(partial_mmr.forest().num_leaves(), chain_tip.as_u32() as usize + 1);
1752
1753        // Track each note block using the MMR path from the sync_notes response.
1754        for block in &sync_data.note_blocks {
1755            let bn = block.block_header.block_num();
1756            partial_mmr
1757                .track(bn.as_usize(), block.block_header.commitment(), &block.mmr_path)
1758                .map_err(StoreError::MmrError)
1759                .unwrap();
1760
1761            assert!(
1762                partial_mmr.is_tracked(bn.as_usize()),
1763                "block {bn} should be tracked after calling track()"
1764            );
1765        }
1766
1767        // Verify the tracked blocks match the note blocks.
1768        for &bn in &note_block_nums {
1769            assert!(
1770                partial_mmr.is_tracked(bn.as_usize()),
1771                "block {bn} with notes should be tracked in partial MMR"
1772            );
1773        }
1774    }
1775
1776    /// Tests that erased notes are marked as consumed when a committed transaction
1777    /// reports output notes that were erased by same-batch note erasure.
1778    ///
1779    /// This simulates same-batch note erasure: the transaction was committed, its header
1780    /// says it produced a note, but the note was erased and doesn't exist on the node.
1781    #[tokio::test]
1782    async fn erased_notes_are_marked_as_consumed() {
1783        use miden_protocol::block::BlockNumber;
1784        use miden_protocol::note::{
1785            NoteAssets,
1786            NoteMetadata,
1787            NoteRecipient,
1788            NoteStorage,
1789            NoteType,
1790        };
1791        use miden_protocol::testing::account_id::ACCOUNT_ID_SENDER;
1792
1793        use crate::store::{OutputNoteRecord, OutputNoteState};
1794
1795        // Create a public output note. It won't be in the mock chain (simulating erasure).
1796        let sender_id: AccountId = ACCOUNT_ID_SENDER.try_into().unwrap();
1797        let metadata = NoteMetadata::new(sender_id, NoteType::Public);
1798        let script = CodeBuilder::new()
1799            .compile_note_script("@note_script\npub proc main\n    nop\nend")
1800            .unwrap();
1801        let recipient = NoteRecipient::new(
1802            Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]),
1803            script,
1804            NoteStorage::new(vec![]).unwrap(),
1805        );
1806        let output_note = OutputNoteRecord::new(
1807            recipient.digest(),
1808            NoteAssets::new(vec![]).unwrap(),
1809            metadata,
1810            OutputNoteState::ExpectedFull { recipient },
1811            BlockNumber::from(1u32),
1812        );
1813        let note_id = output_note.id();
1814
1815        // Build a NoteUpdateTracker with the output note.
1816        let mut note_updates = NoteUpdateTracker::new(vec![], vec![output_note]);
1817
1818        // Mark the note as erased (created and consumed in the same batch).
1819        let block_num = BlockNumber::from(3u32);
1820        note_updates
1821            .mark_erased_note_as_consumed(note_id, block_num)
1822            .expect("marking erased note should succeed");
1823
1824        let updated = note_updates
1825            .updated_output_notes()
1826            .find(|n| n.id() == note_id)
1827            .expect("output note should be in the update");
1828
1829        assert!(
1830            updated.inner().is_consumed(),
1831            "output note should be consumed after erasure detection, but state is: {}",
1832            updated.inner().state()
1833        );
1834    }
1835}