Skip to main content

miden_client/sync/
state_sync.rs

1use alloc::boxed::Box;
2use alloc::collections::{BTreeMap, BTreeSet};
3use alloc::sync::Arc;
4use alloc::vec::Vec;
5
6use async_trait::async_trait;
7use miden_protocol::account::{
8    Account,
9    AccountCode,
10    AccountDelta,
11    AccountHeader,
12    AccountId,
13    AccountStorage,
14    AccountStorageDelta,
15    AccountVaultDelta,
16    StorageMapKey,
17    StorageSlot,
18    StorageSlotContent,
19    StorageSlotName,
20    StorageSlotType,
21};
22use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey};
23use miden_protocol::block::{BlockHeader, BlockNumber};
24use miden_protocol::crypto::merkle::mmr::{MmrDelta, PartialMmr};
25use miden_protocol::note::{Note, NoteId, NoteTag, NoteType, Nullifier};
26use miden_protocol::transaction::InputNoteCommitment;
27use miden_protocol::{EMPTY_WORD, Felt, Word};
28use tracing::info;
29
30use super::state_sync_update::TransactionUpdateTracker;
31use super::{AccountUpdates, PublicAccountUpdate, StateSyncUpdate};
32use crate::ClientError;
33use crate::note::NoteUpdateTracker;
34use crate::rpc::domain::account::{
35    AccountDetails,
36    AccountStorageMapDetails,
37    AccountStorageRequirements,
38    FetchedAccount,
39};
40use crate::rpc::domain::note::{CommittedNote, NoteSyncBlock};
41use crate::rpc::domain::storage_map::StorageMapUpdate;
42use crate::rpc::domain::transaction::{
43    TransactionInclusion,
44    TransactionRecord as RpcTransactionRecord,
45};
46use crate::rpc::{AccountStateAt, NodeRpcClient, RpcError};
47use crate::store::{AccountStorageFilter, InputNoteRecord, OutputNoteRecord, Store, StoreError};
48use crate::transaction::TransactionRecord;
49
50// STATE UPDATE DATA
51// ================================================================================================
52
53/// Raw data fetched from the node needed to sync the client to the chain tip.
54///
55/// Aggregates the responses of `sync_chain_mmr`, `sync_notes`, `get_notes_by_id`, and
56/// `sync_transactions`. This may contain more data than a particular client needs to store — it is
57/// filtered and transformed into a [`StateSyncUpdate`] before being applied.
58struct RawStateSyncData {
59    /// MMR delta covering the full range from `current_block` to `chain_tip`.
60    mmr_delta: MmrDelta,
61    /// Chain tip block header.
62    chain_tip_header: BlockHeader,
63    /// Blocks with matching notes that the client is interested in.
64    note_blocks: Vec<NoteSyncBlock>,
65    /// Full note bodies for public notes, keyed by note ID.
66    public_notes: BTreeMap<NoteId, Note>,
67    /// Account commitment updates for the synced range.
68    account_commitment_updates: Vec<(AccountId, Word)>,
69    /// Transaction inclusions for the synced range.
70    transactions: Vec<TransactionInclusion>,
71    /// Nullifiers for the synced range.
72    nullifiers: Vec<Nullifier>,
73}
74
75// SYNC REQUEST
76// ================================================================================================
77
78/// Bundles the client state needed to perform a sync operation.
79///
80/// The sync process uses these inputs to:
81/// - Request account commitment updates from the node for the provided accounts.
82/// - Filter which note inclusions the node returns based on the provided note tags.
83/// - Follow the lifecycle of every tracked note (input and output), transitioning them from pending
84///   to committed to consumed as the network state advances.
85/// - Track uncommitted transactions so they can be marked as committed when the node confirms them,
86///   or discarded when they become stale.
87///
88/// Use [`Client::build_sync_input()`](`crate::Client::build_sync_input()`) to build a default input
89/// from the client state, or construct this struct manually for custom sync scenarios.
90pub struct StateSyncInput {
91    /// Account headers to request commitment updates for.
92    pub accounts: Vec<AccountHeader>,
93    /// Note tags that the node uses to filter which note inclusions to return.
94    pub note_tags: BTreeSet<NoteTag>,
95    /// Input notes whose lifecycle should be followed during sync.
96    pub input_notes: Vec<InputNoteRecord>,
97    /// Output notes whose lifecycle should be followed during sync.
98    pub output_notes: Vec<OutputNoteRecord>,
99    /// Transactions to track for commitment or discard during sync.
100    pub uncommitted_transactions: Vec<TransactionRecord>,
101}
102
103// SYNC CALLBACKS
104// ================================================================================================
105
106/// The action to be taken when a note update is received as part of the sync response.
107#[allow(clippy::large_enum_variant)]
108pub enum NoteUpdateAction {
109    /// The note commit update is relevant and the specified note should be marked as committed in
110    /// the store, storing its inclusion proof.
111    Commit(CommittedNote),
112    /// The public note is relevant and should be inserted into the store.
113    Insert(InputNoteRecord),
114    /// The note update is not relevant and should be discarded.
115    Discard,
116}
117
118#[async_trait(?Send)]
119pub trait OnNoteReceived {
120    /// Callback that gets executed when a new note is received as part of the sync response.
121    ///
122    /// It receives:
123    ///
124    /// - The committed note received from the network.
125    /// - An optional note record that corresponds to the state of the note in the network (only if
126    ///   the note is public).
127    ///
128    /// It returns an enum indicating the action to be taken for the received note update. Whether
129    /// the note updated should be committed, new public note inserted, or ignored.
130    async fn on_note_received(
131        &self,
132        committed_note: CommittedNote,
133        public_note: Option<InputNoteRecord>,
134    ) -> Result<NoteUpdateAction, ClientError>;
135}
136// STATE SYNC
137// ================================================================================================
138
139/// The state sync component encompasses the client's sync logic. It is then used to request
140/// updates from the node and apply them to the relevant elements. The updates are then returned and
141/// can be applied to the store to persist the changes.
142#[derive(Clone)]
143pub struct StateSync {
144    /// The RPC client used to communicate with the node.
145    rpc_api: Arc<dyn NodeRpcClient>,
146    /// The client's store, used to fetch account storage and vault data on demand during
147    /// delta-based sync of public accounts. When `None`, oversized public accounts fall back
148    /// to `get_account_details` (full sync from block 0).
149    store: Option<Arc<dyn Store>>,
150    /// Responsible for checking the relevance of notes and executing the
151    /// [`OnNoteReceived`] callback when a new note inclusion is received.
152    note_screener: Arc<dyn OnNoteReceived>,
153    /// Number of blocks after which pending transactions are considered stale and discarded.
154    /// If `None`, there is no limit and transactions will be kept indefinitely.
155    tx_discard_delta: Option<u32>,
156    /// Whether to check for nullifiers during state sync. When enabled, the component will query
157    /// the nullifiers for unspent notes at each sync step. This allows to detect when tracked
158    /// notes have been consumed externally and discard local transactions that depend on them.
159    sync_nullifiers: bool,
160}
161
162impl StateSync {
163    /// Creates a new instance of the state sync component.
164    ///
165    /// The nullifiers sync is enabled by default. To disable it, see
166    /// [`Self::disable_nullifier_sync`].
167    ///
168    /// # Arguments
169    ///
170    /// * `rpc_api` - The RPC client used to communicate with the node.
171    /// * `store` - Optional store for on-demand account data access during delta sync.
172    /// * `note_screener` - The note screener used to check the relevance of notes.
173    /// * `tx_discard_delta` - Number of blocks after which pending transactions are discarded.
174    pub fn new(
175        rpc_api: Arc<dyn NodeRpcClient>,
176        store: Option<Arc<dyn Store>>,
177        note_screener: Arc<dyn OnNoteReceived>,
178        tx_discard_delta: Option<u32>,
179    ) -> Self {
180        Self {
181            rpc_api,
182            store,
183            note_screener,
184            tx_discard_delta,
185            sync_nullifiers: true,
186        }
187    }
188
189    /// Disables the nullifier sync.
190    ///
191    /// When disabled, the component will not query the node for new nullifiers after each sync
192    /// step. This is useful for clients that don't need to track note consumption, such as
193    /// faucets.
194    pub fn disable_nullifier_sync(&mut self) {
195        self.sync_nullifiers = false;
196    }
197
198    /// Enables the nullifier sync.
199    pub fn enable_nullifier_sync(&mut self) {
200        self.sync_nullifiers = true;
201    }
202
203    /// Syncs the state of the client with the chain tip of the node, returning the updates that
204    /// should be applied to the store.
205    ///
206    /// Use [`Client::build_sync_input()`](`crate::Client::build_sync_input()`) to build the default
207    /// input, or assemble it manually for custom sync. The `current_partial_mmr` is taken by
208    /// mutable reference so callers can keep it in memory across syncs.
209    ///
210    /// During the sync process, the following steps are performed:
211    /// 1. A request is sent to the node to get the state updates. This request includes tracked
212    ///    account IDs and the tags of notes that might have changed or that might be of interest to
213    ///    the client.
214    /// 2. A response is received with the current state of the network. The response includes
215    ///    information about new and committed notes, updated accounts, and committed transactions.
216    /// 3. Tracked public accounts are updated and private accounts are validated against the node
217    ///    state.
218    /// 4. Tracked notes are updated with their new states. Notes might be committed or nullified
219    ///    during the sync processing.
220    /// 5. New notes are checked, and only relevant ones are stored. Relevance is determined by the
221    ///    [`OnNoteReceived`] callback.
222    /// 6. Transactions are updated with their new states. Transactions might be committed or
223    ///    discarded.
224    /// 7. The MMR is updated with the new peaks and authentication nodes.
225    pub async fn sync_state(
226        &self,
227        current_partial_mmr: &mut PartialMmr,
228        input: StateSyncInput,
229    ) -> Result<StateSyncUpdate, ClientError> {
230        let StateSyncInput {
231            accounts,
232            note_tags,
233            input_notes,
234            output_notes,
235            uncommitted_transactions,
236        } = input;
237        let block_num = u32::try_from(current_partial_mmr.forest().num_leaves().saturating_sub(1))
238            .map_err(|_| ClientError::InvalidPartialMmrForest)?
239            .into();
240
241        let note_tags = Arc::new(note_tags);
242        let account_ids: Vec<AccountId> = accounts.iter().map(AccountHeader::id).collect();
243        let tracked_accounts: BTreeSet<AccountId> = account_ids.iter().copied().collect();
244
245        let mut state_sync_update = StateSyncUpdate {
246            block_num,
247            note_updates: NoteUpdateTracker::new(input_notes, output_notes)
248                .with_tracked_accounts(tracked_accounts),
249            transaction_updates: TransactionUpdateTracker::new(uncommitted_transactions),
250            ..Default::default()
251        };
252        let Some(mut sync_data) = self
253            .fetch_sync_data(state_sync_update.block_num, &account_ids, &note_tags)
254            .await?
255        else {
256            // No progress — already at the tip.
257            return Ok(state_sync_update);
258        };
259
260        state_sync_update.block_num = sync_data.chain_tip_header.block_num();
261
262        // Build input note records for public notes from the fetched note bodies and the
263        // inclusion proofs already present in the note blocks.
264        let mut public_note_records: BTreeMap<NoteId, InputNoteRecord> = BTreeMap::new();
265        for (note_id, note) in core::mem::take(&mut sync_data.public_notes) {
266            let inclusion_proof = sync_data
267                .note_blocks
268                .iter()
269                .find_map(|b| b.notes.get(&note_id))
270                .map(|committed| committed.inclusion_proof().clone());
271
272            if let Some(inclusion_proof) = inclusion_proof {
273                let state = crate::store::input_note_states::UnverifiedNoteState {
274                    metadata: note.metadata().clone(),
275                    inclusion_proof,
276                }
277                .into();
278                let record = InputNoteRecord::new(note.into(), None, state);
279                public_note_records.insert(record.id(), record);
280            }
281        }
282
283        self.account_state_sync(
284            &mut state_sync_update.account_updates,
285            &accounts,
286            &sync_data.account_commitment_updates,
287            block_num,
288        )
289        .await?;
290
291        // Apply local changes: update the MMR, screen notes, and apply state transitions.
292        self.apply_sync_result(
293            sync_data,
294            &public_note_records,
295            &mut state_sync_update,
296            current_partial_mmr,
297        )
298        .await?;
299
300        if self.sync_nullifiers {
301            self.nullifiers_state_sync(&mut state_sync_update, block_num).await?;
302        }
303
304        Ok(state_sync_update)
305    }
306
307    /// Fetches the sync data from the node by calling the following endpoints:
308    /// 1. `sync_chain_mmr` — discovers the chain tip, gets the MMR delta and chain tip header.
309    /// 2. `sync_notes` — loops until the full range to the chain tip is covered (handles paginated
310    ///    responses).
311    /// 3. `get_notes_by_id` — fetches full metadata for notes with attachments.
312    /// 4. `sync_transactions` — gets transaction data for the full range.
313    ///
314    /// Returns `None` when the client is already at the chain tip (no progress).
315    async fn fetch_sync_data(
316        &self,
317        current_block_num: BlockNumber,
318        account_ids: &[AccountId],
319        note_tags: &Arc<BTreeSet<NoteTag>>,
320    ) -> Result<Option<RawStateSyncData>, ClientError> {
321        // Step 1: Fetch the MMR delta and chain tip header.
322        let chain_mmr_info = self.rpc_api.sync_chain_mmr(current_block_num, None).await?;
323        let chain_tip = chain_mmr_info.block_to;
324
325        // No progress — already at the tip.
326        if chain_tip == current_block_num {
327            info!(block_num = %current_block_num, "Already at chain tip, nothing to sync.");
328            return Ok(None);
329        }
330
331        info!(
332            block_from = %current_block_num,
333            block_to = %chain_tip,
334            "Syncing state.",
335        );
336
337        // Step 2: Paginate sync_notes using the same chain tip so MMR paths are opened at
338        // a consistent forest.
339        let sync_notes_result = self
340            .rpc_api
341            .sync_notes_with_details(current_block_num, Some(chain_tip), note_tags.as_ref())
342            .await?;
343
344        let note_count: usize = sync_notes_result.blocks.iter().map(|b| b.notes.len()).sum();
345        info!(
346            blocks_with_notes = sync_notes_result.blocks.len(),
347            notes = note_count,
348            public_notes = sync_notes_result.public_notes.len(),
349            "Fetched note sync data.",
350        );
351
352        // Step 3: Gather transactions for tracked accounts over the full range.
353        let (account_commitment_updates, transactions, nullifiers) =
354            self.fetch_transaction_data(current_block_num, chain_tip, account_ids).await?;
355
356        Ok(Some(RawStateSyncData {
357            mmr_delta: chain_mmr_info.mmr_delta,
358            chain_tip_header: chain_mmr_info.block_header,
359            note_blocks: sync_notes_result.blocks,
360            public_notes: sync_notes_result.public_notes,
361            account_commitment_updates,
362            transactions,
363            nullifiers,
364        }))
365    }
366
367    /// Fetches transaction data for the given range and account IDs.
368    async fn fetch_transaction_data(
369        &self,
370        block_from: BlockNumber,
371        block_to: BlockNumber,
372        account_ids: &[AccountId],
373    ) -> Result<(Vec<(AccountId, Word)>, Vec<TransactionInclusion>, Vec<Nullifier>), ClientError>
374    {
375        if account_ids.is_empty() {
376            return Ok((vec![], vec![], vec![]));
377        }
378
379        let tx_info = self
380            .rpc_api
381            .sync_transactions(block_from, Some(block_to), account_ids.to_vec())
382            .await?;
383
384        let transaction_records = tx_info.transaction_records;
385
386        let account_updates = derive_account_commitment_updates(&transaction_records);
387        let nullifiers = compute_ordered_nullifiers(&transaction_records);
388
389        let tx_inclusions = transaction_records
390            .into_iter()
391            .map(|r| {
392                let nullifiers = r
393                    .transaction_header
394                    .input_notes()
395                    .iter()
396                    .map(InputNoteCommitment::nullifier)
397                    .collect();
398                TransactionInclusion {
399                    transaction_id: r.transaction_header.id(),
400                    block_num: r.block_num,
401                    account_id: r.transaction_header.account_id(),
402                    initial_state_commitment: r.transaction_header.initial_state_commitment(),
403                    nullifiers,
404                    output_notes: r.output_notes,
405                    erased_output_notes: r.erased_output_notes,
406                }
407            })
408            .collect();
409
410        Ok((account_updates, tx_inclusions, nullifiers))
411    }
412
413    // HELPERS
414    // --------------------------------------------------------------------------------------------
415
416    /// Applies sync results to the local state update.
417    ///
418    /// Applies fetched sync data to the local state:
419    /// 1. Advances the partial MMR (delta + chain tip leaf).
420    /// 2. Screens note blocks and tracks relevant ones in the MMR.
421    /// 3. Applies transaction and nullifier updates.
422    async fn apply_sync_result(
423        &self,
424        sync_data: RawStateSyncData,
425        public_note_records: &BTreeMap<NoteId, InputNoteRecord>,
426        state_sync_update: &mut StateSyncUpdate,
427        current_partial_mmr: &mut PartialMmr,
428    ) -> Result<(), ClientError> {
429        let RawStateSyncData {
430            mmr_delta,
431            chain_tip_header,
432            note_blocks,
433            nullifiers,
434            transactions,
435            ..
436        } = sync_data;
437
438        // Advance the partial MMR: apply delta (up to chain_tip - 1), capture peaks for
439        // storage, then add the chain tip leaf (which the delta excludes due to the
440        // one-block lag in block header MMR commitments).
441        let mut new_authentication_nodes =
442            current_partial_mmr.apply(mmr_delta).map_err(StoreError::MmrError)?;
443        let new_peaks = current_partial_mmr.peaks();
444        new_authentication_nodes
445            .append(&mut current_partial_mmr.add(chain_tip_header.commitment(), false));
446
447        state_sync_update.block_updates.insert(
448            chain_tip_header.clone(),
449            false,
450            new_peaks,
451            new_authentication_nodes,
452        );
453
454        // Screen each note block and track relevant ones in the partial MMR using the
455        // authentication path from the sync_notes response.
456        for block in note_blocks {
457            let found_relevant_note = self
458                .note_state_sync(
459                    &mut state_sync_update.note_updates,
460                    block.notes,
461                    &block.block_header,
462                    public_note_records,
463                )
464                .await?;
465
466            if found_relevant_note {
467                let block_pos = block.block_header.block_num().as_usize();
468
469                let nodes_before: BTreeMap<_, _> =
470                    current_partial_mmr.nodes().map(|(k, v)| (*k, *v)).collect();
471
472                if !current_partial_mmr.is_tracked(block_pos) {
473                    current_partial_mmr
474                        .track(block_pos, block.block_header.commitment(), &block.mmr_path)
475                        .map_err(StoreError::MmrError)?;
476                }
477
478                // Always collect new authentication nodes — even when the block was
479                // already tracked from the MMR delta, the delta's nodes may not include
480                // the full authentication path needed to reconstruct the PartialMmr
481                // from storage later.
482                let track_auth_nodes: Vec<_> = current_partial_mmr
483                    .nodes()
484                    .filter(|(k, _)| !nodes_before.contains_key(k))
485                    .map(|(k, v)| (*k, *v))
486                    .collect();
487
488                state_sync_update.block_updates.insert(
489                    block.block_header,
490                    true,
491                    current_partial_mmr.peaks(),
492                    track_auth_nodes,
493                );
494            }
495        }
496
497        // Apply transaction and nullifier data.
498        state_sync_update.note_updates.extend_nullifiers(nullifiers);
499        self.transaction_state_sync(
500            &mut state_sync_update.transaction_updates,
501            &chain_tip_header,
502            &transactions,
503        );
504
505        // Process each transaction
506        for transaction in &transactions {
507            // Transition tracked output notes to Committed using inclusion proofs from the
508            // transaction sync response. This covers output notes regardless of whether their
509            // tags were tracked in the note sync.
510            state_sync_update
511                .note_updates
512                .apply_output_note_inclusion_proofs(&transaction.output_notes)?;
513
514            // Detect output notes erased by same-batch note erasure.
515            Self::mark_erased_notes_as_consumed(state_sync_update, transaction);
516        }
517
518        Ok(())
519    }
520
521    /// Marks output notes that were erased by same-batch note erasure as consumed.
522    ///
523    /// When a note is created and consumed in the same batch, note erasure removes it from
524    /// the block body. The node reports these as erased output notes in the transaction
525    /// record (note ID only, no inclusion proof). We mark them as consumed.
526    fn mark_erased_notes_as_consumed(
527        state_sync_update: &mut StateSyncUpdate,
528        transaction: &TransactionInclusion,
529    ) {
530        for note_header in &transaction.erased_output_notes {
531            // Best-effort: ignore errors for notes not tracked by this client.
532            let _ = state_sync_update
533                .note_updates
534                .mark_erased_note_as_consumed(note_header, transaction.block_num);
535        }
536    }
537
538    /// Compares the state of tracked accounts with the updates received from the node. The method
539    /// Updates the `account_updates` with the details of the accounts that need to be updated.
540    ///
541    /// The account updates might include:
542    /// * Public accounts that have been updated in the node (full or delta-based).
543    /// * Network accounts that have been updated in the node and are being tracked by the client.
544    /// * Private accounts that have been marked as mismatched because the current commitment
545    ///   doesn't match the one received from the node. The client will need to handle these cases
546    ///   as they could be a stale account state or a reason to lock the account.
547    async fn account_state_sync(
548        &self,
549        account_updates: &mut AccountUpdates,
550        accounts: &[AccountHeader],
551        account_commitment_updates: &[(AccountId, Word)],
552        block_num: BlockNumber,
553    ) -> Result<(), ClientError> {
554        // "Public" here includes both Public and Network accounts, since both have
555        // their state stored on-chain and follow the same sync path.
556        let (public_accounts, private_accounts): (Vec<_>, Vec<_>) =
557            accounts.iter().partition(|a| !a.id().is_private());
558
559        self.sync_public_accounts(
560            account_updates,
561            account_commitment_updates,
562            &public_accounts,
563            block_num,
564        )
565        .await?;
566
567        let mismatched_private_accounts = account_commitment_updates
568            .iter()
569            .filter(|(account_id, digest)| {
570                private_accounts
571                    .iter()
572                    .any(|a| a.id() == *account_id && &a.to_commitment() != digest)
573            })
574            .copied()
575            .collect::<Vec<_>>();
576
577        account_updates.extend(AccountUpdates::new(Vec::new(), mismatched_private_accounts));
578
579        Ok(())
580    }
581
582    /// Queries the node for updated public accounts and populates `account_updates`.
583    ///
584    /// When a store is available, storage and vault data are fetched on demand to build
585    /// deltas for oversized accounts. Without a store, oversized accounts fall back to
586    /// `get_account_details` (full sync from block 0).
587    async fn sync_public_accounts(
588        &self,
589        account_updates: &mut AccountUpdates,
590        commitment_updates: &[(AccountId, Word)],
591        current_public_accounts: &[&AccountHeader],
592        block_num: BlockNumber,
593    ) -> Result<(), ClientError> {
594        for (id, commitment) in commitment_updates {
595            let Some(local_header) = current_public_accounts
596                .iter()
597                .find(|acc| *id == acc.id() && *commitment != acc.to_commitment())
598            else {
599                continue;
600            };
601
602            let account_id = local_header.id();
603
604            // Build storage requirements and known code from store (if available) to
605            // request all entries for every map slot and avoid re-downloading code.
606            let (storage_requirements, known_code) =
607                self.fetch_local_account_hints(account_id).await;
608
609            let (proof_block_num, proof) = self
610                .rpc_api
611                .get_account_proof(
612                    account_id,
613                    storage_requirements,
614                    AccountStateAt::ChainTip,
615                    known_code,
616                    Some(EMPTY_WORD),
617                )
618                .await
619                .map_err(ClientError::RpcError)?;
620
621            let Some(details) = proof.into_parts().1 else {
622                // Private account returned — should not happen for public accounts.
623                continue;
624            };
625
626            // Skip if the remote nonce is not newer than what we already have.
627            if details.header.nonce().as_canonical_u64() <= local_header.nonce().as_canonical_u64()
628            {
629                continue;
630            }
631
632            let has_oversized_data = details.vault_details.too_many_assets
633                || details.storage_details.map_details.iter().any(|m| m.too_many_entries);
634
635            if has_oversized_data {
636                if self.store.is_some() {
637                    // Delta path: build an AccountDelta from incremental updates,
638                    // fetching storage slots and vault from the store on demand.
639                    let delta = self
640                        .build_account_delta(&details, local_header, block_num, proof_block_num)
641                        .await?;
642                    account_updates.extend(AccountUpdates::new(
643                        vec![PublicAccountUpdate::Delta {
644                            new_header: details.header.clone(),
645                            delta,
646                        }],
647                        Vec::new(),
648                    ));
649                } else {
650                    // No store available — fall back to get_account_details which
651                    // handles oversized data internally (syncing from block 0).
652                    let response = self
653                        .rpc_api
654                        .get_account_details(account_id)
655                        .await
656                        .map_err(ClientError::RpcError)?;
657
658                    match response {
659                        FetchedAccount::Public(account, _) => {
660                            account_updates.extend(AccountUpdates::new(
661                                vec![PublicAccountUpdate::Full(*account)],
662                                Vec::new(),
663                            ));
664                        },
665                        // This should not happen since we only fetch public accounts here.
666                        FetchedAccount::Private(..) => {},
667                    }
668                }
669            } else {
670                // Small account: build directly from the response details.
671                let account = Account::try_from(&details).map_err(ClientError::RpcError)?;
672                account_updates.extend(AccountUpdates::new(
673                    vec![PublicAccountUpdate::Full(account)],
674                    Vec::new(),
675                ));
676            }
677        }
678
679        Ok(())
680    }
681
682    /// Fetches storage requirements and known code from the store for a given account.
683    ///
684    /// Returns defaults when no store is available.
685    async fn fetch_local_account_hints(
686        &self,
687        account_id: AccountId,
688    ) -> (AccountStorageRequirements, Option<AccountCode>) {
689        let Some(store) = &self.store else {
690            return (AccountStorageRequirements::default(), None);
691        };
692
693        let storage_requirements = store
694            .get_account_storage(account_id, AccountStorageFilter::All)
695            .await
696            .map(|storage| Self::build_storage_requirements(&storage))
697            .unwrap_or_default();
698
699        let known_code = store.get_account_code(account_id).await.ok().flatten();
700
701        (storage_requirements, known_code)
702    }
703
704    /// Builds [`AccountStorageRequirements`] from [`AccountStorage`], requesting all entries for
705    /// every map slot.
706    fn build_storage_requirements(storage: &AccountStorage) -> AccountStorageRequirements {
707        let map_slots = storage.slots().iter().filter_map(|slot: &StorageSlot| {
708            if slot.slot_type() == StorageSlotType::Map {
709                // Passing an empty key list requests all entries for this map slot.
710                Some((slot.name().clone(), core::iter::empty::<&StorageMapKey>()))
711            } else {
712                None
713            }
714        });
715        AccountStorageRequirements::new(map_slots)
716    }
717
718    /// Builds an [`AccountDelta`] from incremental RPC sync data, fetching local account
719    /// data from the store on demand.
720    ///
721    /// For oversized storage maps: fetches delta entries via `sync_storage_maps`.
722    /// For oversized vaults: fetches delta entries via `sync_account_vault`.
723    /// Non-oversized parts are diffed against local data fetched from the store.
724    ///
725    /// # Panics
726    ///
727    /// Panics if `self.store` is `None`. Callers must check before invoking.
728    #[allow(clippy::too_many_lines)]
729    async fn build_account_delta(
730        &self,
731        details: &AccountDetails,
732        local_header: &AccountHeader,
733        block_from: BlockNumber,
734        block_to: BlockNumber,
735    ) -> Result<AccountDelta, ClientError> {
736        let store = self.store.as_ref().expect("store required for delta sync");
737        let account_id = details.header.id();
738
739        let storage_delta = self
740            .build_storage_delta(details, account_id, block_from, block_to, store.as_ref())
741            .await?;
742
743        let vault_delta = self
744            .build_vault_delta(details, account_id, block_from, block_to, store.as_ref())
745            .await?;
746
747        // --- Nonce delta ---
748        let old_nonce = local_header.nonce().as_canonical_u64();
749        let new_nonce = details.header.nonce().as_canonical_u64();
750        let nonce_delta = Felt::new(new_nonce - old_nonce);
751
752        AccountDelta::new(account_id, storage_delta, vault_delta, nonce_delta).map_err(|err| {
753            ClientError::RpcError(RpcError::InvalidResponse(format!(
754                "failed to construct account delta: {err}"
755            )))
756        })
757    }
758
759    /// Computes the full storage delta (value slots + map slots) for the account.
760    ///
761    /// For value slots, compares the response values against the local store. For map slots,
762    /// oversized maps (`too_many_entries`) fetch incremental delta entries from the sync endpoint
763    /// and deduplicate by key keeping the latest value; non-oversized maps diff the full response
764    /// entries against the local store.
765    async fn build_storage_delta(
766        &self,
767        details: &AccountDetails,
768        account_id: AccountId,
769        block_from: BlockNumber,
770        block_to: BlockNumber,
771        store: &dyn Store,
772    ) -> Result<AccountStorageDelta, ClientError> {
773        let mut storage_delta = AccountStorageDelta::new();
774
775        for slot_header in details.storage_details.header.slots() {
776            if slot_header.slot_type() == StorageSlotType::Value {
777                let local_value = store
778                    .get_account_storage_item(account_id, slot_header.name().clone())
779                    .await
780                    .ok();
781
782                if local_value.as_ref() != Some(&slot_header.value()) {
783                    storage_delta
784                        .set_item(slot_header.name().clone(), slot_header.value())
785                        .map_err(|err| {
786                            ClientError::RpcError(RpcError::InvalidResponse(format!(
787                                "failed to set storage delta item: {err}"
788                            )))
789                        })?;
790                }
791            }
792        }
793
794        let mut map_delta_cache: Option<Vec<StorageMapUpdate>> = None;
795
796        for slot_header in details.storage_details.header.slots() {
797            if slot_header.slot_type() != StorageSlotType::Map {
798                continue;
799            }
800
801            let map_details =
802                details.storage_details.find_map_details(slot_header.name()).ok_or_else(|| {
803                    ClientError::RpcError(RpcError::ExpectedDataMissing(format!(
804                        "slot '{}' is a map but has no map_details in response",
805                        slot_header.name()
806                    )))
807                })?;
808
809            if map_details.too_many_entries {
810                // Oversized map: fetch delta entries from the sync endpoint.
811                if map_delta_cache.is_none() {
812                    let map_info = self
813                        .rpc_api
814                        .sync_storage_maps(block_from, Some(block_to), account_id)
815                        .await
816                        .map_err(ClientError::RpcError)?;
817                    map_delta_cache = Some(map_info.updates);
818                }
819
820                Self::apply_oversized_map_delta(
821                    map_delta_cache.as_deref().unwrap_or_default(),
822                    slot_header.name(),
823                    &mut storage_delta,
824                )?;
825            } else {
826                Self::apply_full_map_delta(
827                    map_details,
828                    slot_header.name(),
829                    account_id,
830                    store,
831                    &mut storage_delta,
832                )
833                .await?;
834            }
835        }
836
837        Ok(storage_delta)
838    }
839
840    /// Applies delta updates from the sync endpoint for an oversized storage map slot.
841    ///
842    /// Filters the cached delta updates to the target slot, sorts by block number, and
843    /// deduplicates by key (keeping the latest value).
844    fn apply_oversized_map_delta(
845        delta_updates: &[StorageMapUpdate],
846        slot_name: &StorageSlotName,
847        storage_delta: &mut AccountStorageDelta,
848    ) -> Result<(), ClientError> {
849        let mut relevant: Vec<_> =
850            delta_updates.iter().filter(|u| u.slot_name == *slot_name).collect();
851        relevant.sort_by_key(|u| u.block_num);
852
853        // Deduplicate: keep latest value per key.
854        let mut seen = BTreeMap::new();
855        for update in relevant {
856            seen.insert(update.key, update.value);
857        }
858
859        for (key, value) in seen {
860            storage_delta.set_map_item(slot_name.clone(), key, value).map_err(|err| {
861                ClientError::RpcError(RpcError::InvalidResponse(format!(
862                    "failed to set storage map delta: {err}"
863                )))
864            })?;
865        }
866
867        Ok(())
868    }
869
870    /// Diffs the full response map entries against the local store for a non-oversized map slot.
871    ///
872    /// Entries present in the response but missing or different locally are added to the delta.
873    /// Entries present locally but absent in the response are set to `Word::default()` (removal).
874    async fn apply_full_map_delta(
875        map_details: &AccountStorageMapDetails,
876        slot_name: &StorageSlotName,
877        account_id: AccountId,
878        store: &dyn Store,
879        storage_delta: &mut AccountStorageDelta,
880    ) -> Result<(), ClientError> {
881        let response_map = map_details
882            .entries
883            .clone()
884            .into_storage_map()
885            .ok_or_else(|| {
886                ClientError::RpcError(RpcError::ExpectedDataMissing(
887                    "expected AllEntries for map, got EntriesWithProofs".into(),
888                ))
889            })?
890            .map_err(|err| {
891                ClientError::RpcError(RpcError::InvalidResponse(format!(
892                    "the rpc api returned a non-valid map entry: {err}"
893                )))
894            })?;
895
896        let local_entries: BTreeMap<StorageMapKey, Word> = store
897            .get_account_storage(account_id, AccountStorageFilter::SlotName(slot_name.clone()))
898            .await
899            .ok()
900            .and_then(|storage| storage.get(slot_name).cloned())
901            .map(|slot| match slot.content() {
902                StorageSlotContent::Map(map) => map.entries().map(|(k, v)| (*k, *v)).collect(),
903                StorageSlotContent::Value(_) => BTreeMap::new(),
904            })
905            .unwrap_or_default();
906
907        let response_entries: BTreeMap<StorageMapKey, Word> =
908            response_map.entries().map(|(k, v)| (*k, *v)).collect();
909
910        // Entries in response but not in local, or with different values.
911        for (key, value) in &response_entries {
912            if local_entries.get(key) != Some(value) {
913                storage_delta.set_map_item(slot_name.clone(), *key, *value).map_err(|err| {
914                    ClientError::RpcError(RpcError::InvalidResponse(format!(
915                        "failed to set storage map delta: {err}"
916                    )))
917                })?;
918            }
919        }
920
921        // Entries in local but removed in response (set to empty word).
922        for key in local_entries.keys() {
923            if !response_entries.contains_key(key) {
924                storage_delta.set_map_item(slot_name.clone(), *key, Word::default()).map_err(
925                    |err| {
926                        ClientError::RpcError(RpcError::InvalidResponse(format!(
927                            "failed to set storage map delta for removal: {err}"
928                        )))
929                    },
930                )?;
931            }
932        }
933
934        Ok(())
935    }
936
937    /// Computes the vault delta between local and remote account state.
938    ///
939    /// For oversized vaults (`too_many_assets`), fetches incremental updates from the sync
940    /// endpoint and replays them on top of the local vault. For non-oversized vaults, diffs
941    /// the full response assets against the local vault.
942    async fn build_vault_delta(
943        &self,
944        details: &AccountDetails,
945        account_id: AccountId,
946        block_from: BlockNumber,
947        block_to: BlockNumber,
948        store: &dyn Store,
949    ) -> Result<AccountVaultDelta, ClientError> {
950        let mut vault_delta = AccountVaultDelta::default();
951        let local_vault =
952            store.get_account_vault(account_id).await.map_err(ClientError::StoreError)?;
953
954        if details.vault_details.too_many_assets {
955            // Oversized vault: fetch delta from sync endpoint.
956            let vault_info = self
957                .rpc_api
958                .sync_account_vault(block_from, Some(block_to), account_id)
959                .await
960                .map_err(ClientError::RpcError)?;
961
962            // Build the final vault state by applying updates to local vault.
963            let mut vault_map: BTreeMap<AssetVaultKey, Asset> =
964                local_vault.assets().map(|asset| (asset.vault_key(), asset)).collect();
965
966            let mut vault_updates = vault_info.updates;
967            vault_updates.sort_by_key(|u| u.block_num);
968
969            for update in vault_updates {
970                match update.asset {
971                    Some(asset) => {
972                        vault_map.insert(update.vault_key, asset);
973                    },
974                    None => {
975                        vault_map.remove(&update.vault_key);
976                    },
977                }
978            }
979
980            Self::compute_vault_delta_from_diff(&local_vault, &vault_map, &mut vault_delta)?;
981        } else {
982            // Non-oversized vault: diff response assets against local.
983            let final_assets: BTreeMap<AssetVaultKey, Asset> = details
984                .vault_details
985                .assets
986                .iter()
987                .map(|asset| (asset.vault_key(), *asset))
988                .collect();
989
990            Self::compute_vault_delta_from_diff(&local_vault, &final_assets, &mut vault_delta)?;
991        }
992
993        Ok(vault_delta)
994    }
995
996    /// Computes a vault delta from the difference between a local vault and a final asset map.
997    fn compute_vault_delta_from_diff(
998        local_vault: &AssetVault,
999        final_assets: &BTreeMap<AssetVaultKey, Asset>,
1000        vault_delta: &mut AccountVaultDelta,
1001    ) -> Result<(), ClientError> {
1002        let local_assets: BTreeMap<AssetVaultKey, Asset> =
1003            local_vault.assets().map(|a| (a.vault_key(), a)).collect();
1004
1005        // Assets in final but not in local -> add. Changed amounts -> remove old, add new.
1006        for (key, final_asset) in final_assets {
1007            match local_assets.get(key) {
1008                None => {
1009                    vault_delta.add_asset(*final_asset).map_err(|err| {
1010                        ClientError::RpcError(RpcError::InvalidResponse(format!(
1011                            "failed to add asset to vault delta: {err}"
1012                        )))
1013                    })?;
1014                },
1015                Some(local_asset) if local_asset != final_asset => {
1016                    vault_delta.remove_asset(*local_asset).map_err(|err| {
1017                        ClientError::RpcError(RpcError::InvalidResponse(format!(
1018                            "failed to remove old asset from vault delta: {err}"
1019                        )))
1020                    })?;
1021                    vault_delta.add_asset(*final_asset).map_err(|err| {
1022                        ClientError::RpcError(RpcError::InvalidResponse(format!(
1023                            "failed to add new asset to vault delta: {err}"
1024                        )))
1025                    })?;
1026                },
1027                _ => {}, // No change
1028            }
1029        }
1030
1031        // Assets in local but not in final -> remove.
1032        for (key, local_asset) in &local_assets {
1033            if !final_assets.contains_key(key) {
1034                vault_delta.remove_asset(*local_asset).map_err(|err| {
1035                    ClientError::RpcError(RpcError::InvalidResponse(format!(
1036                        "failed to remove asset from vault delta: {err}"
1037                    )))
1038                })?;
1039            }
1040        }
1041
1042        Ok(())
1043    }
1044
1045    /// Applies the changes received from the sync response to the notes and transactions tracked
1046    /// by the client and updates the `note_updates` accordingly.
1047    ///
1048    /// This method uses the callbacks provided to the [`StateSync`] component to check if the
1049    /// updates received are relevant to the client.
1050    ///
1051    /// The note updates might include:
1052    /// * New notes that we received from the node and might be relevant to the client.
1053    /// * Tracked expected notes that were committed in the block.
1054    /// * Tracked notes that were being processed by a transaction that got committed.
1055    /// * Tracked notes that were nullified by an external transaction.
1056    ///
1057    /// The `public_notes` parameter provides cached public note details for the current sync
1058    /// iteration so the node is only queried once per batch.
1059    async fn note_state_sync(
1060        &self,
1061        note_updates: &mut NoteUpdateTracker,
1062        note_inclusions: BTreeMap<NoteId, CommittedNote>,
1063        block_header: &BlockHeader,
1064        public_notes: &BTreeMap<NoteId, InputNoteRecord>,
1065    ) -> Result<bool, ClientError> {
1066        // `found_relevant_note` tracks whether we want to persist the block header in the end
1067        let mut found_relevant_note = false;
1068
1069        for (_, committed_note) in note_inclusions {
1070            let public_note = (committed_note.note_type() != NoteType::Private)
1071                .then(|| public_notes.get(committed_note.note_id()))
1072                .flatten()
1073                .cloned();
1074
1075            match self.note_screener.on_note_received(committed_note, public_note).await? {
1076                NoteUpdateAction::Commit(committed_note) => {
1077                    // Only mark the downloaded block header as relevant if we are talking about
1078                    // an input note (output notes get marked as committed but we don't need the
1079                    // block for anything there)
1080                    found_relevant_note |= note_updates
1081                        .apply_committed_note_state_transitions(&committed_note, block_header)?;
1082                },
1083                NoteUpdateAction::Insert(public_note) => {
1084                    found_relevant_note = true;
1085
1086                    note_updates.apply_new_public_note(public_note, block_header)?;
1087                },
1088                NoteUpdateAction::Discard => {},
1089            }
1090        }
1091
1092        Ok(found_relevant_note)
1093    }
1094
1095    /// Collects the nullifier tags for the notes that were updated in the sync response and uses
1096    /// the `sync_nullifiers` endpoint to check if there are new nullifiers for these
1097    /// notes. It then processes the nullifiers to apply the state transitions on the note updates.
1098    ///
1099    /// The `state_sync_update` parameter will be updated to track the new discarded transactions.
1100    async fn nullifiers_state_sync(
1101        &self,
1102        state_sync_update: &mut StateSyncUpdate,
1103        current_block_num: BlockNumber,
1104    ) -> Result<(), ClientError> {
1105        // To receive information about added nullifiers, we reduce them to the higher 16 bits
1106        // Note that besides filtering by nullifier prefixes, the node also filters by block number
1107        // (it only returns nullifiers from current_block_num until
1108        // response.block_header.block_num())
1109
1110        // Check for new nullifiers for input notes that were updated
1111        let nullifiers_tags: Vec<u16> = state_sync_update
1112            .note_updates
1113            .unspent_nullifiers()
1114            .map(|nullifier| nullifier.prefix())
1115            .collect();
1116
1117        let mut new_nullifiers = self
1118            .rpc_api
1119            .sync_nullifiers(&nullifiers_tags, current_block_num, Some(state_sync_update.block_num))
1120            .await?;
1121
1122        // Discard nullifiers that are newer than the current block (this might happen if the block
1123        // changes between the sync_state and the check_nullifier calls)
1124        new_nullifiers.retain(|update| update.block_num <= state_sync_update.block_num);
1125
1126        for nullifier_update in new_nullifiers {
1127            let external_consumer_account = state_sync_update
1128                .transaction_updates
1129                .external_nullifier_account(&nullifier_update.nullifier);
1130
1131            state_sync_update.note_updates.apply_nullifiers_state_transitions(
1132                &nullifier_update,
1133                state_sync_update.transaction_updates.committed_transactions(),
1134                external_consumer_account,
1135            )?;
1136
1137            // Process nullifiers and track the updates of local tracked transactions that were
1138            // discarded because the notes that they were processing were nullified by an
1139            // another transaction.
1140            state_sync_update
1141                .transaction_updates
1142                .apply_input_note_nullified(nullifier_update.nullifier);
1143        }
1144
1145        Ok(())
1146    }
1147
1148    /// Applies the changes received from the sync response to the transactions tracked by the
1149    /// client and updates the `transaction_updates` accordingly.
1150    ///
1151    /// The transaction updates might include:
1152    /// * New transactions that were committed in the block.
1153    /// * Transactions that were discarded because they were stale or expired.
1154    fn transaction_state_sync(
1155        &self,
1156        transaction_updates: &mut TransactionUpdateTracker,
1157        new_block_header: &BlockHeader,
1158        transaction_inclusions: &[TransactionInclusion],
1159    ) {
1160        for transaction_inclusion in transaction_inclusions {
1161            transaction_updates.apply_transaction_inclusion(
1162                transaction_inclusion,
1163                u64::from(new_block_header.timestamp()),
1164            ); //TODO: Change timestamps from u64 to u32
1165        }
1166
1167        transaction_updates
1168            .apply_sync_height_update(new_block_header.block_num(), self.tx_discard_delta);
1169    }
1170}
1171
1172// HELPERS
1173// ================================================================================================
1174
1175/// Derives account commitment updates from transaction records.
1176///
1177/// For each unique account, takes the `final_state_commitment` from the transaction with the
1178/// highest `block_num`. This replicates the old `SyncState` behavior where the node returned
1179/// the latest account commitment per account in the synced range.
1180fn derive_account_commitment_updates(
1181    transaction_records: &[RpcTransactionRecord],
1182) -> Vec<(AccountId, Word)> {
1183    let mut latest_by_account: BTreeMap<AccountId, &RpcTransactionRecord> = BTreeMap::new();
1184
1185    for record in transaction_records {
1186        let account_id = record.transaction_header.account_id();
1187        latest_by_account
1188            .entry(account_id)
1189            .and_modify(|existing| {
1190                if record.block_num > existing.block_num {
1191                    *existing = record;
1192                }
1193            })
1194            .or_insert(record);
1195    }
1196
1197    latest_by_account
1198        .into_iter()
1199        .map(|(account_id, record)| {
1200            (account_id, record.transaction_header.final_state_commitment())
1201        })
1202        .collect()
1203}
1204
1205/// Returns nullifiers ordered by consuming transaction position, per account.
1206///
1207/// Groups RPC transaction records by (`account_id`, `block_num`), chains them using
1208/// `initial_state_commitment` / `final_state_commitment`, and collects each transaction's
1209/// input note nullifiers in execution order. Nullifiers from the same account are in execution
1210/// order; ordering across different accounts is arbitrary.
1211fn compute_ordered_nullifiers(transaction_records: &[RpcTransactionRecord]) -> Vec<Nullifier> {
1212    // Group transactions by (account_id, block_num).
1213    let mut groups: BTreeMap<(AccountId, BlockNumber), Vec<&RpcTransactionRecord>> =
1214        BTreeMap::new();
1215
1216    for record in transaction_records {
1217        let account_id = record.transaction_header.account_id();
1218        groups.entry((account_id, record.block_num)).or_default().push(record);
1219    }
1220
1221    let mut result = Vec::new();
1222
1223    for txs in groups.values() {
1224        // Build a lookup from initial_state_commitment -> transaction record.
1225        let mut init_to_tx: BTreeMap<Word, &RpcTransactionRecord> = txs
1226            .iter()
1227            .map(|tx| (tx.transaction_header.initial_state_commitment(), *tx))
1228            .collect();
1229
1230        // Build a set of all final states to find the chain start.
1231        let final_states: BTreeSet<Word> =
1232            txs.iter().map(|tx| tx.transaction_header.final_state_commitment()).collect();
1233
1234        // Find the chain start: the tx whose initial_state_commitment is not any other tx's
1235        // final_state_commitment.
1236        let chain_start = txs
1237            .iter()
1238            .find(|tx| !final_states.contains(&tx.transaction_header.initial_state_commitment()));
1239
1240        let Some(start_tx) = chain_start else {
1241            continue;
1242        };
1243
1244        // Walk the chain from start, removing each step from the map.
1245        let mut current =
1246            init_to_tx.remove(&start_tx.transaction_header.initial_state_commitment());
1247
1248        while let Some(tx) = current {
1249            for commitment in tx.transaction_header.input_notes().iter() {
1250                result.push(commitment.nullifier());
1251            }
1252            current = init_to_tx.remove(&tx.transaction_header.final_state_commitment());
1253        }
1254    }
1255
1256    result
1257}
1258
1259#[cfg(test)]
1260mod tests {
1261    use alloc::collections::BTreeSet;
1262    use alloc::sync::Arc;
1263
1264    use async_trait::async_trait;
1265    use miden_protocol::account::Account;
1266    use miden_protocol::assembly::DefaultSourceManager;
1267    use miden_protocol::asset::{Asset, FungibleAsset};
1268    use miden_protocol::block::BlockNumber;
1269    use miden_protocol::crypto::merkle::mmr::{Forest, InOrderIndex, PartialMmr};
1270    use miden_protocol::note::{
1271        Note,
1272        NoteAssets,
1273        NoteAttachment,
1274        NoteHeader,
1275        NoteMetadata,
1276        NoteRecipient,
1277        NoteStorage,
1278        NoteTag,
1279        NoteType,
1280    };
1281    use miden_protocol::testing::account_id::{
1282        ACCOUNT_ID_PRIVATE_FUNGIBLE_FAUCET,
1283        ACCOUNT_ID_REGULAR_NETWORK_ACCOUNT_IMMUTABLE_CODE,
1284        ACCOUNT_ID_SENDER,
1285    };
1286    use miden_protocol::{Felt, Word};
1287    use miden_standards::code_builder::CodeBuilder;
1288    use miden_standards::note::{NetworkAccountTarget, NoteExecutionHint};
1289    use miden_testing::{MockChainBuilder, TxContextInput};
1290
1291    use super::*;
1292    use crate::store::{OutputNoteRecord, OutputNoteState};
1293    use crate::testing::mock::MockRpcApi;
1294
1295    /// Mock note screener that discards all notes, for minimal test setup.
1296    struct MockScreener;
1297
1298    #[async_trait(?Send)]
1299    impl OnNoteReceived for MockScreener {
1300        async fn on_note_received(
1301            &self,
1302            _committed_note: CommittedNote,
1303            _public_note: Option<InputNoteRecord>,
1304        ) -> Result<NoteUpdateAction, ClientError> {
1305            Ok(NoteUpdateAction::Discard)
1306        }
1307    }
1308
1309    fn empty() -> StateSyncInput {
1310        StateSyncInput {
1311            accounts: vec![],
1312            note_tags: BTreeSet::new(),
1313            input_notes: vec![],
1314            output_notes: vec![],
1315            uncommitted_transactions: vec![],
1316        }
1317    }
1318
1319    // COMPUTE NULLIFIER TX ORDER TESTS
1320    // --------------------------------------------------------------------------------------------
1321
1322    mod compute_nullifiers_tests {
1323        use alloc::vec;
1324
1325        use miden_protocol::asset::FungibleAsset;
1326        use miden_protocol::block::BlockNumber;
1327        use miden_protocol::note::Nullifier;
1328        use miden_protocol::transaction::{InputNoteCommitment, InputNotes, TransactionHeader};
1329        use miden_protocol::{Felt, ZERO};
1330
1331        use crate::rpc::domain::transaction::{
1332            ACCOUNT_ID_NATIVE_ASSET_FAUCET,
1333            TransactionRecord as RpcTransactionRecord,
1334        };
1335
1336        fn word(n: u64) -> miden_protocol::Word {
1337            [Felt::new(n), ZERO, ZERO, ZERO].into()
1338        }
1339
1340        fn make_rpc_tx(
1341            init_state: u64,
1342            final_state: u64,
1343            nullifier_vals: &[u64],
1344            block_number: u32,
1345        ) -> RpcTransactionRecord {
1346            let account_id = miden_protocol::account::AccountId::try_from(
1347                miden_protocol::testing::account_id::ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE,
1348            )
1349            .unwrap();
1350
1351            let input_notes = InputNotes::new_unchecked(
1352                nullifier_vals
1353                    .iter()
1354                    .map(|v| InputNoteCommitment::from(Nullifier::from_raw(word(*v))))
1355                    .collect(),
1356            );
1357
1358            let fee =
1359                FungibleAsset::new(ACCOUNT_ID_NATIVE_ASSET_FAUCET.try_into().expect("valid"), 0u64)
1360                    .unwrap();
1361
1362            RpcTransactionRecord {
1363                block_num: BlockNumber::from(block_number),
1364                transaction_header: TransactionHeader::new(
1365                    account_id,
1366                    word(init_state),
1367                    word(final_state),
1368                    input_notes,
1369                    vec![],
1370                    fee,
1371                ),
1372                output_notes: vec![],
1373                erased_output_notes: vec![],
1374            }
1375        }
1376
1377        #[test]
1378        fn chains_rpc_transactions_by_state_commitment() {
1379            // Chain: tx_a (state 1->2) -> tx_b (state 2->3) -> tx_c (state 3->4)
1380            // Passed in reverse order to verify chaining uses state, not insertion order.
1381            let tx_a = make_rpc_tx(1, 2, &[10], 5);
1382            let tx_b = make_rpc_tx(2, 3, &[20], 5);
1383            let tx_c = make_rpc_tx(3, 4, &[30], 5);
1384
1385            let result = super::super::compute_ordered_nullifiers(&[tx_c, tx_a, tx_b]);
1386
1387            assert_eq!(result[0], Nullifier::from_raw(word(10)));
1388            assert_eq!(result[1], Nullifier::from_raw(word(20)));
1389            assert_eq!(result[2], Nullifier::from_raw(word(30)));
1390        }
1391
1392        #[test]
1393        fn groups_independently_by_account_and_block() {
1394            // Account A, block 5: two chained txs.
1395            let tx_a1 = make_rpc_tx(1, 2, &[10], 5);
1396            let tx_a2 = make_rpc_tx(2, 3, &[20], 5);
1397
1398            // Account A, block 6: independent chain.
1399            let tx_a3 = make_rpc_tx(3, 4, &[30], 6);
1400
1401            // Account B, block 5: independent chain.
1402            let account_b = miden_protocol::account::AccountId::try_from(
1403                miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET,
1404            )
1405            .unwrap();
1406
1407            let fee =
1408                FungibleAsset::new(ACCOUNT_ID_NATIVE_ASSET_FAUCET.try_into().expect("valid"), 0u64)
1409                    .unwrap();
1410
1411            let tx_b1 = RpcTransactionRecord {
1412                block_num: BlockNumber::from(5u32),
1413                transaction_header: TransactionHeader::new(
1414                    account_b,
1415                    word(100),
1416                    word(200),
1417                    InputNotes::new_unchecked(vec![InputNoteCommitment::from(
1418                        Nullifier::from_raw(word(40)),
1419                    )]),
1420                    vec![],
1421                    fee,
1422                ),
1423                output_notes: vec![],
1424                erased_output_notes: vec![],
1425            };
1426
1427            let result = super::super::compute_ordered_nullifiers(&[tx_a2, tx_b1, tx_a3, tx_a1]);
1428
1429            // Nullifiers are ordered by chain position within each (account, block) group.
1430            // The exact global indices depend on BTreeMap iteration order of the groups.
1431            let pos = |val: u64| -> usize {
1432                result.iter().position(|n| *n == Nullifier::from_raw(word(val))).unwrap()
1433            };
1434
1435            // Within the same group, chain order is preserved.
1436            assert!(pos(10) < pos(20)); // A, block 5: pos 0 < pos 1
1437            // Nullifiers from different groups are all present.
1438            assert!(result.contains(&Nullifier::from_raw(word(30)))); // A, block 6
1439            assert!(result.contains(&Nullifier::from_raw(word(40)))); // B, block 5
1440        }
1441
1442        #[test]
1443        fn multiple_nullifiers_per_transaction_are_consecutive() {
1444            // Single tx consuming 3 notes — all should appear consecutively.
1445            let tx = make_rpc_tx(1, 2, &[10, 20, 30], 5);
1446
1447            let result = super::super::compute_ordered_nullifiers(&[tx]);
1448
1449            assert_eq!(result.len(), 3);
1450            assert!(result.contains(&Nullifier::from_raw(word(10))));
1451            assert!(result.contains(&Nullifier::from_raw(word(20))));
1452            assert!(result.contains(&Nullifier::from_raw(word(30))));
1453        }
1454
1455        #[test]
1456        fn empty_input_returns_empty_vec() {
1457            let result = super::super::compute_ordered_nullifiers(&[]);
1458            assert!(result.is_empty());
1459        }
1460    }
1461
1462    // CONSUMED NOTE ORDERING INTEGRATION TESTS
1463    // --------------------------------------------------------------------------------------------
1464
1465    /// Mock note screener that commits all notes matching tracked input notes.
1466    /// This ensures committed notes get their inclusion proofs set during sync.
1467    struct CommitAllScreener;
1468
1469    #[async_trait(?Send)]
1470    impl OnNoteReceived for CommitAllScreener {
1471        async fn on_note_received(
1472            &self,
1473            committed_note: CommittedNote,
1474            _public_note: Option<InputNoteRecord>,
1475        ) -> Result<NoteUpdateAction, ClientError> {
1476            Ok(NoteUpdateAction::Commit(committed_note))
1477        }
1478    }
1479
1480    /// Builds a `MockChain` where 3 notes are consumed by chained transactions in the same block.
1481    ///
1482    /// Returns the chain, the account, and the 3 notes (in consumption order).
1483    async fn build_chain_with_chained_consume_txs() -> (miden_testing::MockChain, Account, [Note; 3])
1484    {
1485        let sender_id: AccountId = ACCOUNT_ID_SENDER.try_into().unwrap();
1486        let faucet_id: AccountId = ACCOUNT_ID_PRIVATE_FUNGIBLE_FAUCET.try_into().unwrap();
1487
1488        let mut builder = MockChainBuilder::new();
1489        let account = builder.add_existing_mock_account(miden_testing::Auth::IncrNonce).unwrap();
1490        let account_id = account.id();
1491
1492        let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 100u64).unwrap());
1493        let note1 = builder
1494            .add_p2id_note(sender_id, account_id, &[asset], NoteType::Public)
1495            .unwrap();
1496        let note2 = builder
1497            .add_p2id_note(sender_id, account_id, &[asset], NoteType::Public)
1498            .unwrap();
1499        let note3 = builder
1500            .add_p2id_note(sender_id, account_id, &[asset], NoteType::Public)
1501            .unwrap();
1502
1503        let mut chain = builder.build().unwrap();
1504        chain.prove_next_block().unwrap(); // block 1: makes genesis notes consumable
1505
1506        // Execute 3 chained consume transactions (state S0→S1→S2→S3).
1507        let mut current_account = account.clone();
1508        for note in [&note1, &note2, &note3] {
1509            let tx = Box::pin(
1510                chain
1511                    .build_tx_context(
1512                        TxContextInput::Account(current_account.clone()),
1513                        &[],
1514                        core::slice::from_ref(note),
1515                    )
1516                    .unwrap()
1517                    .build()
1518                    .unwrap()
1519                    .execute(),
1520            )
1521            .await
1522            .unwrap();
1523            current_account.apply_delta(tx.account_delta()).unwrap();
1524            chain.add_pending_executed_transaction(&tx).unwrap();
1525        }
1526
1527        chain.prove_next_block().unwrap(); // block 2: all 3 txs in one block
1528        (chain, account, [note1, note2, note3])
1529    }
1530
1531    /// Verifies that `consumed_tx_order` is correctly set when multiple chained transactions
1532    /// for the same account consume notes in the same block.
1533    #[tokio::test]
1534    async fn sync_state_sets_consumed_tx_order_for_chained_transactions() {
1535        use miden_protocol::note::NoteMetadata;
1536
1537        let (chain, account, [note1, note2, note3]) = build_chain_with_chained_consume_txs().await;
1538
1539        let mock_rpc = MockRpcApi::new(chain);
1540        let state_sync =
1541            StateSync::new(Arc::new(mock_rpc.clone()), None, Arc::new(CommitAllScreener), None);
1542
1543        let genesis_peaks = mock_rpc.get_mmr().peaks_at(Forest::new(1)).unwrap();
1544        let mut partial_mmr = PartialMmr::from_peaks(genesis_peaks);
1545
1546        let input_notes: Vec<InputNoteRecord> = [&note1, &note2, &note3]
1547            .into_iter()
1548            .map(|n| InputNoteRecord::from(n.clone()))
1549            .collect();
1550
1551        let note_tags: BTreeSet<NoteTag> =
1552            input_notes.iter().filter_map(|n| n.metadata().map(NoteMetadata::tag)).collect();
1553
1554        let account_id = account.id();
1555        let sync_input = StateSyncInput {
1556            accounts: vec![AccountHeader::from(account)],
1557            note_tags,
1558            input_notes,
1559            output_notes: vec![],
1560            uncommitted_transactions: vec![],
1561        };
1562
1563        let update = state_sync.sync_state(&mut partial_mmr, sync_input).await.unwrap();
1564
1565        let updated_notes: Vec<_> = update.note_updates.updated_input_notes().collect();
1566
1567        let find_order = |note_id: NoteId| -> Option<u32> {
1568            updated_notes
1569                .iter()
1570                .find(|n| n.id() == note_id)
1571                .and_then(|n| n.consumed_tx_order())
1572        };
1573
1574        assert_eq!(find_order(note1.id()), Some(0), "note1 should have tx_order 0");
1575        assert_eq!(find_order(note2.id()), Some(1), "note2 should have tx_order 1");
1576        assert_eq!(find_order(note3.id()), Some(2), "note3 should have tx_order 2");
1577
1578        // Since there are no uncommitted_transactions, these notes were consumed by a tracked
1579        // account via external transactions. Verify that consumer_account is populated.
1580        for note in &updated_notes {
1581            let record = note.inner();
1582            assert!(record.is_consumed(), "note should be in a consumed state");
1583            assert_eq!(
1584                record.consumer_account(),
1585                Some(account_id),
1586                "externally-consumed notes by a tracked account should have consumer_account set",
1587            );
1588        }
1589    }
1590
1591    #[tokio::test]
1592    async fn sync_state_across_multiple_iterations_with_same_mmr() {
1593        // Setup: create a mock chain and advance it so there are blocks to sync.
1594        let mock_rpc = MockRpcApi::default();
1595        mock_rpc.advance_blocks(3);
1596        let chain_tip_1 = mock_rpc.get_chain_tip_block_num();
1597
1598        let state_sync =
1599            StateSync::new(Arc::new(mock_rpc.clone()), None, Arc::new(MockScreener), None);
1600
1601        // Build the initial PartialMmr from genesis (only 1 leaf).
1602        let genesis_peaks = mock_rpc.get_mmr().peaks_at(Forest::new(1)).unwrap();
1603        let mut partial_mmr = PartialMmr::from_peaks(genesis_peaks);
1604        assert_eq!(partial_mmr.forest().num_leaves(), 1);
1605
1606        // First sync
1607        let update = state_sync.sync_state(&mut partial_mmr, empty()).await.unwrap();
1608
1609        assert_eq!(update.block_num, chain_tip_1);
1610        let forest_1 = partial_mmr.forest();
1611        // The MMR should contain one leaf per block (genesis + the new blocks).
1612        assert_eq!(forest_1.num_leaves(), chain_tip_1.as_u32() as usize + 1);
1613
1614        // Second sync
1615        mock_rpc.advance_blocks(2);
1616        let chain_tip_2 = mock_rpc.get_chain_tip_block_num();
1617
1618        let update = state_sync.sync_state(&mut partial_mmr, empty()).await.unwrap();
1619
1620        assert_eq!(update.block_num, chain_tip_2);
1621        let forest_2 = partial_mmr.forest();
1622        assert!(forest_2 > forest_1);
1623        assert_eq!(forest_2.num_leaves(), chain_tip_2.as_u32() as usize + 1);
1624
1625        // Third sync (no new blocks)
1626        let update = state_sync.sync_state(&mut partial_mmr, empty()).await.unwrap();
1627
1628        assert_eq!(update.block_num, chain_tip_2);
1629        assert_eq!(partial_mmr.forest(), forest_2);
1630    }
1631
1632    /// Builds a mock chain with a faucet that mints `num_blocks` notes, one per block.
1633    /// Returns the chain and the set of note tags for filtering.
1634    async fn build_chain_with_mint_notes(
1635        num_blocks: u64,
1636    ) -> (miden_testing::MockChain, BTreeSet<NoteTag>) {
1637        let mut builder = MockChainBuilder::new();
1638        let faucet = builder
1639            .add_existing_basic_faucet(
1640                miden_testing::Auth::BasicAuth {
1641                    auth_scheme: miden_protocol::account::auth::AuthScheme::Falcon512Poseidon2,
1642                },
1643                "TST",
1644                10_000,
1645                None,
1646            )
1647            .unwrap();
1648        let _target = builder.add_existing_mock_account(miden_testing::Auth::IncrNonce).unwrap();
1649        let mut chain = builder.build().unwrap();
1650
1651        let recipient: Word = [0u32, 1, 2, 3].into();
1652        let tag = NoteTag::default();
1653        let mut faucet_account = faucet.clone();
1654        let mut note_tags = BTreeSet::new();
1655
1656        for i in 0..num_blocks {
1657            let amount = Felt::new(100 + i);
1658            let source_manager = Arc::new(DefaultSourceManager::default());
1659            let tx_script_code = format!(
1660                "
1661                begin
1662                    padw padw push.0
1663                    push.{r0}.{r1}.{r2}.{r3}
1664                    push.{note_type}
1665                    push.{tag}
1666                    push.{amount}
1667                    call.::miden::standards::faucets::basic_fungible::mint_and_send
1668                    dropw dropw dropw dropw
1669                end
1670                ",
1671                r0 = recipient[0],
1672                r1 = recipient[1],
1673                r2 = recipient[2],
1674                r3 = recipient[3],
1675                note_type = NoteType::Private as u8,
1676                tag = u32::from(tag),
1677                amount = amount,
1678            );
1679            let tx_script = CodeBuilder::with_source_manager(source_manager.clone())
1680                .compile_tx_script(tx_script_code)
1681                .unwrap();
1682            let tx = Box::pin(
1683                chain
1684                    .build_tx_context(
1685                        miden_testing::TxContextInput::Account(faucet_account.clone()),
1686                        &[],
1687                        &[],
1688                    )
1689                    .unwrap()
1690                    .tx_script(tx_script)
1691                    .with_source_manager(source_manager)
1692                    .build()
1693                    .unwrap()
1694                    .execute(),
1695            )
1696            .await
1697            .unwrap();
1698
1699            for output_note in tx.output_notes().iter() {
1700                note_tags.insert(output_note.metadata().tag());
1701            }
1702
1703            faucet_account.apply_delta(tx.account_delta()).unwrap();
1704            chain.add_pending_executed_transaction(&tx).unwrap();
1705            chain.prove_next_block().unwrap();
1706        }
1707
1708        (chain, note_tags)
1709    }
1710
1711    /// Verifies that the sync correctly processes notes committed in multiple blocks
1712    /// (batched `SyncNotes` response) and tracks their blocks in the partial MMR.
1713    ///
1714    /// This test creates a faucet and mints notes in separate blocks (blocks 1, 2, 3),
1715    /// so `sync_notes` returns multiple `NoteSyncBlock`s. It then verifies:
1716    /// - The MMR is advanced to the chain tip
1717    /// - Blocks containing relevant notes are tracked in the partial MMR via `track()`
1718    /// - Note inclusion proofs are set correctly
1719    /// - Block headers for note blocks are stored
1720    #[tokio::test]
1721    async fn sync_state_tracks_note_blocks_in_mmr() {
1722        let (chain, note_tags) = build_chain_with_mint_notes(3).await;
1723        let mock_rpc = MockRpcApi::new(chain);
1724        let chain_tip = mock_rpc.get_chain_tip_block_num();
1725
1726        // Verify the mock returns notes across multiple blocks.
1727        let note_sync =
1728            mock_rpc.sync_notes(BlockNumber::from(0u32), None, &note_tags).await.unwrap();
1729        assert!(
1730            note_sync.blocks.len() >= 2,
1731            "expected notes in multiple blocks, got {}",
1732            note_sync.blocks.len()
1733        );
1734
1735        // Collect the block numbers that have notes.
1736        let note_block_nums: BTreeSet<BlockNumber> =
1737            note_sync.blocks.iter().map(|b| b.block_header.block_num()).collect();
1738
1739        // Test that fetch_sync_data returns note blocks with valid MMR paths that
1740        // can be used to track blocks in the partial MMR.
1741        let state_sync =
1742            StateSync::new(Arc::new(mock_rpc.clone()), None, Arc::new(MockScreener), None);
1743
1744        let genesis_peaks = mock_rpc.get_mmr().peaks_at(Forest::new(1)).unwrap();
1745        let mut partial_mmr = PartialMmr::from_peaks(genesis_peaks);
1746
1747        let sync_data = state_sync
1748            .fetch_sync_data(BlockNumber::GENESIS, &[], &Arc::new(note_tags.clone()))
1749            .await
1750            .unwrap()
1751            .expect("should have progressed past genesis");
1752
1753        // Should have advanced to the chain tip.
1754        assert_eq!(sync_data.chain_tip_header.block_num(), chain_tip);
1755        assert!(!sync_data.note_blocks.is_empty(), "should have note blocks");
1756
1757        // Apply the MMR delta and add the chain tip block.
1758        let _auth_nodes: Vec<(InOrderIndex, Word)> =
1759            partial_mmr.apply(sync_data.mmr_delta).map_err(StoreError::MmrError).unwrap();
1760        partial_mmr.add(sync_data.chain_tip_header.commitment(), false);
1761
1762        assert_eq!(partial_mmr.forest().num_leaves(), chain_tip.as_u32() as usize + 1);
1763
1764        // Track each note block using the MMR path from the sync_notes response.
1765        for block in &sync_data.note_blocks {
1766            let bn = block.block_header.block_num();
1767            partial_mmr
1768                .track(bn.as_usize(), block.block_header.commitment(), &block.mmr_path)
1769                .map_err(StoreError::MmrError)
1770                .unwrap();
1771
1772            assert!(
1773                partial_mmr.is_tracked(bn.as_usize()),
1774                "block {bn} should be tracked after calling track()"
1775            );
1776        }
1777
1778        // Verify the tracked blocks match the note blocks.
1779        for &bn in &note_block_nums {
1780            assert!(
1781                partial_mmr.is_tracked(bn.as_usize()),
1782                "block {bn} with notes should be tracked in partial MMR"
1783            );
1784        }
1785    }
1786
1787    /// Tests that erased notes are marked as consumed when a committed transaction
1788    /// reports output notes that were erased by same-batch note erasure.
1789    ///
1790    /// This simulates same-batch note erasure: the transaction was committed, its header
1791    /// says it produced a note, but the note was erased and doesn't exist on the node.
1792    #[tokio::test]
1793    async fn erased_notes_are_marked_as_consumed() {
1794        // Create a public output note. It won't be in the mock chain (simulating erasure).
1795        let sender_id: AccountId = ACCOUNT_ID_SENDER.try_into().unwrap();
1796        let metadata = NoteMetadata::new(sender_id, NoteType::Public);
1797        let script = CodeBuilder::new()
1798            .compile_note_script("@note_script\npub proc main\n    nop\nend")
1799            .unwrap();
1800        let recipient = NoteRecipient::new(
1801            Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]),
1802            script,
1803            NoteStorage::new(vec![]).unwrap(),
1804        );
1805        let output_note = OutputNoteRecord::new(
1806            recipient.digest(),
1807            NoteAssets::new(vec![]).unwrap(),
1808            metadata.clone(),
1809            OutputNoteState::ExpectedFull { recipient },
1810            BlockNumber::from(1u32),
1811        );
1812        let note_id = output_note.id();
1813        let note_header = NoteHeader::new(note_id, metadata);
1814
1815        // Build a NoteUpdateTracker with the output note.
1816        let mut note_updates = NoteUpdateTracker::new(vec![], vec![output_note]);
1817
1818        // Mark the note as erased (created and consumed in the same batch).
1819        let block_num = BlockNumber::from(3u32);
1820        note_updates
1821            .mark_erased_note_as_consumed(&note_header, block_num)
1822            .expect("marking erased note should succeed");
1823
1824        let updated = note_updates
1825            .updated_output_notes()
1826            .find(|n| n.id() == note_id)
1827            .expect("output note should be in the update");
1828
1829        assert!(
1830            updated.inner().is_consumed(),
1831            "output note should be consumed after erasure detection, but state is: {}",
1832            updated.inner().state()
1833        );
1834    }
1835
1836    /// Tests that erased notes targeting a tracked network account are marked as consumed
1837    /// by that account through the full sync flow.
1838    ///
1839    /// Same-batch erasure scenario: a sender's transaction creates an output note
1840    /// targeting a network account that consumes it in the same batch, so the note never
1841    /// appears in the block body and the mock RPC surfaces it as erased in the
1842    /// transaction sync response.
1843    ///
1844    /// When the client tracks the network account, the expected end state is that an
1845    /// input note record is created for the erased note in a consumed state with the
1846    /// network account as the consumer.
1847    #[tokio::test]
1848    async fn erased_notes_are_marked_as_consumed_by_network_account() {
1849        // Build a chain with a sender that executes one tx so `sync_transactions` returns
1850        // a record. The mock attaches the registered erased note header to that record.
1851        let mut builder = MockChainBuilder::new();
1852        let p2id_sender: AccountId = ACCOUNT_ID_SENDER.try_into().unwrap();
1853        let faucet_id: AccountId = ACCOUNT_ID_PRIVATE_FUNGIBLE_FAUCET.try_into().unwrap();
1854        let sender_account =
1855            builder.add_existing_mock_account(miden_testing::Auth::IncrNonce).unwrap();
1856        let sender_id = sender_account.id();
1857
1858        let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 100u64).unwrap());
1859        let note = builder
1860            .add_p2id_note(p2id_sender, sender_id, &[asset], NoteType::Public)
1861            .unwrap();
1862
1863        let mut chain = builder.build().unwrap();
1864        chain.prove_next_block().unwrap();
1865
1866        let tx = Box::pin(
1867            chain
1868                .build_tx_context(
1869                    TxContextInput::Account(sender_account.clone()),
1870                    &[],
1871                    core::slice::from_ref(&note),
1872                )
1873                .unwrap()
1874                .build()
1875                .unwrap()
1876                .execute(),
1877        )
1878        .await
1879        .unwrap();
1880        chain.add_pending_executed_transaction(&tx).unwrap();
1881        chain.prove_next_block().unwrap();
1882
1883        // Construct the erased note that will be marked as consumed by the network account.
1884        let network_account_id: AccountId =
1885            ACCOUNT_ID_REGULAR_NETWORK_ACCOUNT_IMMUTABLE_CODE.try_into().unwrap();
1886        let target =
1887            NetworkAccountTarget::new(network_account_id, NoteExecutionHint::Always).unwrap();
1888        let attachment: NoteAttachment = target.into();
1889        let metadata = NoteMetadata::new(sender_id, NoteType::Public).with_attachment(attachment);
1890        let script = CodeBuilder::new()
1891            .compile_note_script("@note_script\npub proc main\n    nop\nend")
1892            .unwrap();
1893        let recipient = NoteRecipient::new(
1894            Word::from([Felt::new(7), Felt::new(8), Felt::new(9), Felt::new(10)]),
1895            script,
1896            NoteStorage::new(vec![]).unwrap(),
1897        );
1898        let recipient_digest = recipient.digest();
1899        let assets = NoteAssets::new(vec![]).unwrap();
1900
1901        // Output note record tracked by the sender prior to sync. The flow that builds the
1902        // input record from the erased header relies on this output entry being present.
1903        let output_note = OutputNoteRecord::new(
1904            recipient_digest,
1905            assets.clone(),
1906            metadata.clone(),
1907            OutputNoteState::ExpectedFull { recipient },
1908            BlockNumber::from(1u32),
1909        );
1910        let erased_note_id = output_note.id();
1911        let erased_note_header = NoteHeader::new(erased_note_id, metadata);
1912
1913        let mock_rpc = MockRpcApi::new(chain);
1914        mock_rpc.mark_note_as_erased(erased_note_header);
1915
1916        // Track both the sender (so its tx is returned) and the network account (so the
1917        // gating in `mark_erased_note_as_consumed` allows creating the input record).
1918        let network_header = AccountHeader::new(
1919            network_account_id,
1920            Felt::new(0),
1921            EMPTY_WORD,
1922            EMPTY_WORD,
1923            EMPTY_WORD,
1924        );
1925
1926        let state_sync =
1927            StateSync::new(Arc::new(mock_rpc.clone()), None, Arc::new(MockScreener), None);
1928
1929        let genesis_peaks = mock_rpc.get_mmr().peaks_at(Forest::new(1)).unwrap();
1930        let mut partial_mmr = PartialMmr::from_peaks(genesis_peaks);
1931
1932        let sync_input = StateSyncInput {
1933            accounts: vec![AccountHeader::from(sender_account), network_header],
1934            note_tags: BTreeSet::new(),
1935            input_notes: vec![],
1936            output_notes: vec![output_note],
1937            uncommitted_transactions: vec![],
1938        };
1939
1940        let update = state_sync.sync_state(&mut partial_mmr, sync_input).await.unwrap();
1941
1942        // The output note record should transition to consumed.
1943        let updated_output = update
1944            .note_updates
1945            .updated_output_notes()
1946            .find(|n| n.id() == erased_note_id)
1947            .expect("output note should be in the update");
1948        assert!(
1949            updated_output.inner().is_consumed(),
1950            "output note should be consumed, got: {}",
1951            updated_output.inner().state()
1952        );
1953
1954        // A new input note record should be created with the network account as consumer.
1955        let input_note_update = update
1956            .note_updates
1957            .updated_input_notes()
1958            .find(|n| n.id() == erased_note_id)
1959            .expect("input note should be created from the erased output note");
1960
1961        let inner = input_note_update.inner();
1962        assert!(
1963            inner.is_consumed(),
1964            "input note should be in a consumed state, got: {}",
1965            inner.state()
1966        );
1967        assert_eq!(
1968            inner.consumer_account(),
1969            Some(network_account_id),
1970            "consumer should be the tracked network account"
1971        );
1972    }
1973}