miden_node_store/
state.rs

1//! Abstraction to synchronize state modifications.
2//!
3//! The [State] provides data access and modifications methods, its main purpose is to ensure that
4//! data is atomically written, and that reads are consistent.
5
6use std::{
7    collections::{BTreeMap, BTreeSet},
8    ops::Not,
9    sync::Arc,
10};
11
12use miden_node_proto::{
13    convert,
14    domain::{
15        account::{AccountInfo, AccountProofRequest, StorageMapKeysProof},
16        block::BlockInclusionProof,
17        note::NoteAuthenticationInfo,
18    },
19    generated::responses::{
20        AccountProofsResponse, AccountStateHeader, GetBlockInputsResponse, StorageSlotMapProof,
21    },
22    AccountInputRecord, NullifierWitness,
23};
24use miden_node_utils::formatting::format_array;
25use miden_objects::{
26    account::{AccountDelta, AccountHeader, AccountId, StorageSlot},
27    block::{Block, BlockHeader, BlockNumber},
28    crypto::{
29        hash::rpo::RpoDigest,
30        merkle::{
31            LeafIndex, Mmr, MmrDelta, MmrError, MmrPeaks, MmrProof, SimpleSmt, SmtProof, ValuePath,
32        },
33    },
34    note::{NoteId, Nullifier},
35    transaction::OutputNote,
36    utils::Serializable,
37    AccountError, ACCOUNT_TREE_DEPTH,
38};
39use tokio::{
40    sync::{oneshot, Mutex, RwLock},
41    time::Instant,
42};
43use tracing::{info, info_span, instrument};
44
45use crate::{
46    blocks::BlockStore,
47    db::{Db, NoteRecord, NoteSyncUpdate, NullifierInfo, StateSyncUpdate},
48    errors::{
49        ApplyBlockError, DatabaseError, GetBlockHeaderError, GetBlockInputsError,
50        GetNoteInclusionProofError, InvalidBlockError, NoteSyncError, StateInitializationError,
51        StateSyncError,
52    },
53    nullifier_tree::NullifierTree,
54    COMPONENT,
55};
56// STRUCTURES
57// ================================================================================================
58
59/// Information needed from the store to validate and build a block
60#[derive(Debug)]
61pub struct BlockInputs {
62    /// Previous block header
63    pub block_header: BlockHeader,
64
65    /// MMR peaks for the current chain state
66    pub chain_peaks: MmrPeaks,
67
68    /// The hashes of the requested accounts and their authentication paths
69    pub account_states: Vec<AccountInputRecord>,
70
71    /// The requested nullifiers and their authentication paths
72    pub nullifiers: Vec<NullifierWitness>,
73
74    /// List of notes found in the store
75    pub found_unauthenticated_notes: NoteAuthenticationInfo,
76}
77
78impl From<BlockInputs> for GetBlockInputsResponse {
79    fn from(value: BlockInputs) -> Self {
80        Self {
81            block_header: Some(value.block_header.into()),
82            mmr_peaks: convert(value.chain_peaks.peaks()),
83            account_states: convert(value.account_states),
84            nullifiers: convert(value.nullifiers),
85            found_unauthenticated_notes: Some(value.found_unauthenticated_notes.into()),
86        }
87    }
88}
89
90#[derive(Debug)]
91pub struct TransactionInputs {
92    pub account_hash: RpoDigest,
93    pub nullifiers: Vec<NullifierInfo>,
94    pub found_unauthenticated_notes: BTreeSet<NoteId>,
95}
96
97/// Container for state that needs to be updated atomically.
98struct InnerState {
99    nullifier_tree: NullifierTree,
100    chain_mmr: Mmr,
101    account_tree: SimpleSmt<ACCOUNT_TREE_DEPTH>,
102}
103
104impl InnerState {
105    /// Returns the latest block number.
106    fn latest_block_num(&self) -> BlockNumber {
107        let block_number: u32 = (self.chain_mmr.forest() - 1)
108            .try_into()
109            .expect("chain_mmr always has, at least, the genesis block");
110
111        block_number.into()
112    }
113}
114
115/// The rollup state
116pub struct State {
117    /// The database which stores block headers, nullifiers, notes, and the latest states of
118    /// accounts.
119    db: Arc<Db>,
120
121    /// The block store which stores full block contents for all blocks.
122    block_store: Arc<BlockStore>,
123
124    /// Read-write lock used to prevent writing to a structure while it is being used.
125    ///
126    /// The lock is writer-preferring, meaning the writer won't be starved.
127    inner: RwLock<InnerState>,
128
129    /// To allow readers to access the tree data while an update in being performed, and prevent
130    /// TOCTOU issues, there must be no concurrent writers. This locks to serialize the writers.
131    writer: Mutex<()>,
132}
133
134impl State {
135    /// Loads the state from the `db`.
136    #[instrument(target = COMPONENT, skip_all)]
137    pub async fn load(
138        mut db: Db,
139        block_store: Arc<BlockStore>,
140    ) -> Result<Self, StateInitializationError> {
141        let nullifier_tree = load_nullifier_tree(&mut db).await?;
142        let chain_mmr = load_mmr(&mut db).await?;
143        let account_tree = load_accounts(&mut db).await?;
144
145        let inner = RwLock::new(InnerState { nullifier_tree, chain_mmr, account_tree });
146
147        let writer = Mutex::new(());
148        let db = Arc::new(db);
149
150        Ok(Self { db, block_store, inner, writer })
151    }
152
153    /// Apply changes of a new block to the DB and in-memory data structures.
154    ///
155    /// ## Note on state consistency
156    ///
157    /// The server contains in-memory representations of the existing trees, the in-memory
158    /// representation must be kept consistent with the committed data, this is necessary so to
159    /// provide consistent results for all endpoints. In order to achieve consistency, the
160    /// following steps are used:
161    ///
162    /// - the request data is validated, prior to starting any modifications.
163    /// - block is being saved into the store in parallel with updating the DB, but before
164    ///   committing. This block is considered as candidate and not yet available for reading
165    ///   because the latest block pointer is not updated yet.
166    /// - a transaction is open in the DB and the writes are started.
167    /// - while the transaction is not committed, concurrent reads are allowed, both the DB and the
168    ///   in-memory representations, which are consistent at this stage.
169    /// - prior to committing the changes to the DB, an exclusive lock to the in-memory data is
170    ///   acquired, preventing concurrent reads to the in-memory data, since that will be
171    ///   out-of-sync w.r.t. the DB.
172    /// - the DB transaction is committed, and requests that read only from the DB can proceed to
173    ///   use the fresh data.
174    /// - the in-memory structures are updated, including the latest block pointer and the lock is
175    ///   released.
176    // TODO: This span is logged in a root span, we should connect it to the parent span.
177    #[instrument(target = COMPONENT, skip_all, err)]
178    pub async fn apply_block(&self, block: Block) -> Result<(), ApplyBlockError> {
179        let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?;
180
181        let header = block.header();
182
183        let tx_hash = block.compute_tx_hash();
184        if header.tx_hash() != tx_hash {
185            return Err(InvalidBlockError::InvalidBlockTxHash {
186                expected: tx_hash,
187                actual: header.tx_hash(),
188            }
189            .into());
190        }
191
192        let block_num = header.block_num();
193        let block_hash = block.hash();
194
195        // ensures the right block header is being processed
196        let prev_block = self
197            .db
198            .select_block_header_by_block_num(None)
199            .await?
200            .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?;
201
202        if block_num != prev_block.block_num() + 1 {
203            return Err(InvalidBlockError::NewBlockInvalidBlockNum.into());
204        }
205        if header.prev_hash() != prev_block.hash() {
206            return Err(InvalidBlockError::NewBlockInvalidPrevHash.into());
207        }
208
209        let block_data = block.to_bytes();
210
211        // Save the block to the block store. In a case of a rolled-back DB transaction, the
212        // in-memory state will be unchanged, but the block might still be written into the
213        // block store. Thus, such block should be considered as block candidates, but not
214        // finalized blocks. So we should check for the latest block when getting block from
215        // the store.
216        let store = Arc::clone(&self.block_store);
217        let block_save_task =
218            tokio::spawn(async move { store.save_block(block_num, &block_data).await });
219
220        // scope to read in-memory data, compute mutations required for updating account
221        // and nullifier trees, and validate the request
222        let (
223            nullifier_tree_old_root,
224            nullifier_tree_update,
225            account_tree_old_root,
226            account_tree_update,
227        ) = {
228            let inner = self.inner.read().await;
229
230            let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered();
231
232            // nullifiers can be produced only once
233            let duplicate_nullifiers: Vec<_> = block
234                .nullifiers()
235                .iter()
236                .filter(|&n| inner.nullifier_tree.get_block_num(n).is_some())
237                .copied()
238                .collect();
239            if !duplicate_nullifiers.is_empty() {
240                return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into());
241            }
242
243            // compute updates for the in-memory data structures
244
245            // new_block.chain_root must be equal to the chain MMR root prior to the update
246            let peaks = inner.chain_mmr.peaks();
247            if peaks.hash_peaks() != header.chain_root() {
248                return Err(InvalidBlockError::NewBlockInvalidChainRoot.into());
249            }
250
251            // compute update for nullifier tree
252            let nullifier_tree_update = inner.nullifier_tree.compute_mutations(
253                block.nullifiers().iter().map(|nullifier| (*nullifier, block_num)),
254            );
255
256            if nullifier_tree_update.root() != header.nullifier_root() {
257                return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into());
258            }
259
260            // compute update for account tree
261            let account_tree_update = inner.account_tree.compute_mutations(
262                block.updated_accounts().iter().map(|update| {
263                    (
264                        LeafIndex::new_max_depth(update.account_id().prefix().into()),
265                        update.new_state_hash().into(),
266                    )
267                }),
268            );
269
270            if account_tree_update.root() != header.account_root() {
271                return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into());
272            }
273
274            (
275                inner.nullifier_tree.root(),
276                nullifier_tree_update,
277                inner.account_tree.root(),
278                account_tree_update,
279            )
280        };
281
282        // build note tree
283        let note_tree = block.build_note_tree();
284        if note_tree.root() != header.note_root() {
285            return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into());
286        }
287
288        let notes = block
289            .notes()
290            .map(|(note_index, note)| {
291                let details = match note {
292                    OutputNote::Full(note) => Some(note.to_bytes()),
293                    OutputNote::Header(_) => None,
294                    note @ OutputNote::Partial(_) => {
295                        return Err(InvalidBlockError::InvalidOutputNoteType(Box::new(
296                            note.clone(),
297                        )));
298                    },
299                };
300
301                let merkle_path = note_tree.get_note_path(note_index);
302
303                Ok(NoteRecord {
304                    block_num,
305                    note_index,
306                    note_id: note.id().into(),
307                    metadata: *note.metadata(),
308                    details,
309                    merkle_path,
310                })
311            })
312            .collect::<Result<Vec<NoteRecord>, InvalidBlockError>>()?;
313
314        // Signals the transaction is ready to be committed, and the write lock can be acquired
315        let (allow_acquire, acquired_allowed) = oneshot::channel::<()>();
316        // Signals the write lock has been acquired, and the transaction can be committed
317        let (inform_acquire_done, acquire_done) = oneshot::channel::<()>();
318
319        // The DB and in-memory state updates need to be synchronized and are partially
320        // overlapping. Namely, the DB transaction only proceeds after this task acquires the
321        // in-memory write lock. This requires the DB update to run concurrently, so a new task is
322        // spawned.
323        let db = Arc::clone(&self.db);
324        let db_update_task =
325            tokio::spawn(
326                async move { db.apply_block(allow_acquire, acquire_done, block, notes).await },
327            );
328
329        // Wait for the message from the DB update task, that we ready to commit the DB transaction
330        acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?;
331
332        // Awaiting the block saving task to complete without errors
333        block_save_task.await??;
334
335        // Scope to update the in-memory data
336        {
337            // We need to hold the write lock here to prevent inconsistency between the in-memory
338            // state and the DB state. Thus, we need to wait for the DB update task to complete
339            // successfully.
340            let mut inner = self.inner.write().await;
341
342            // We need to check that neither the nullifier tree nor the account tree have changed
343            // while we were waiting for the DB preparation task to complete. If either of them
344            // did change, we do not proceed with in-memory and database updates, since it may
345            // lead to an inconsistent state.
346            if inner.nullifier_tree.root() != nullifier_tree_old_root
347                || inner.account_tree.root() != account_tree_old_root
348            {
349                return Err(ApplyBlockError::ConcurrentWrite);
350            }
351
352            // Notify the DB update task that the write lock has been acquired, so it can commit
353            // the DB transaction
354            inform_acquire_done
355                .send(())
356                .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?;
357
358            // TODO: shutdown #91
359            // Await for successful commit of the DB transaction. If the commit fails, we mustn't
360            // change in-memory state, so we return a block applying error and don't proceed with
361            // in-memory updates.
362            db_update_task
363                .await?
364                .map_err(|err| ApplyBlockError::DbUpdateTaskFailed(err.to_string()))?;
365
366            // Update the in-memory data structures after successful commit of the DB transaction
367            inner
368                .nullifier_tree
369                .apply_mutations(nullifier_tree_update)
370                .expect("Unreachable: old nullifier tree root must be checked before this step");
371            inner
372                .account_tree
373                .apply_mutations(account_tree_update)
374                .expect("Unreachable: old account tree root must be checked before this step");
375            inner.chain_mmr.add(block_hash);
376        }
377
378        info!(%block_hash, block_num = block_num.as_u32(), COMPONENT, "apply_block successful");
379
380        Ok(())
381    }
382
383    /// Queries a [BlockHeader] from the database, and returns it alongside its inclusion proof.
384    ///
385    /// If [None] is given as the value of `block_num`, the data for the latest [BlockHeader] is
386    /// returned.
387    #[instrument(target = COMPONENT, skip_all, ret(level = "debug"), err)]
388    pub async fn get_block_header(
389        &self,
390        block_num: Option<BlockNumber>,
391        include_mmr_proof: bool,
392    ) -> Result<(Option<BlockHeader>, Option<MmrProof>), GetBlockHeaderError> {
393        let block_header = self.db.select_block_header_by_block_num(block_num).await?;
394        if let Some(header) = block_header {
395            let mmr_proof = if include_mmr_proof {
396                let inner = self.inner.read().await;
397                let mmr_proof = inner.chain_mmr.open(header.block_num().as_usize())?;
398                Some(mmr_proof)
399            } else {
400                None
401            };
402            Ok((Some(header), mmr_proof))
403        } else {
404            Ok((None, None))
405        }
406    }
407
408    pub async fn check_nullifiers_by_prefix(
409        &self,
410        prefix_len: u32,
411        nullifier_prefixes: Vec<u32>,
412    ) -> Result<Vec<NullifierInfo>, DatabaseError> {
413        self.db.select_nullifiers_by_prefix(prefix_len, nullifier_prefixes).await
414    }
415
416    /// Generates membership proofs for each one of the `nullifiers` against the latest nullifier
417    /// tree.
418    ///
419    /// Note: these proofs are invalidated once the nullifier tree is modified, i.e. on a new block.
420    #[instrument(target = COMPONENT, skip_all, ret(level = "debug"))]
421    pub async fn check_nullifiers(&self, nullifiers: &[Nullifier]) -> Vec<SmtProof> {
422        let inner = self.inner.read().await;
423        nullifiers.iter().map(|n| inner.nullifier_tree.open(n)).collect()
424    }
425
426    /// Queries a list of [`NoteRecord`] from the database.
427    ///
428    /// If the provided list of [`NoteId`] given is empty or no [`NoteRecord`] matches the provided
429    /// [`NoteId`] an empty list is returned.
430    pub async fn get_notes_by_id(
431        &self,
432        note_ids: Vec<NoteId>,
433    ) -> Result<Vec<NoteRecord>, DatabaseError> {
434        self.db.select_notes_by_id(note_ids).await
435    }
436
437    /// Queries all the note inclusion proofs matching a certain Note IDs from the database.
438    pub async fn get_note_authentication_info(
439        &self,
440        note_ids: BTreeSet<NoteId>,
441    ) -> Result<NoteAuthenticationInfo, GetNoteInclusionProofError> {
442        // First we grab block-inclusion proofs for the known notes. These proofs only
443        // prove that the note was included in a given block. We then also need to prove that
444        // each of those blocks is included in the chain.
445        let note_proofs = self.db.select_note_inclusion_proofs(note_ids).await?;
446
447        // The set of blocks that the notes are included in.
448        let blocks = note_proofs
449            .values()
450            .map(|proof| proof.location().block_num())
451            .collect::<BTreeSet<_>>()
452            .into_iter()
453            .collect::<Vec<_>>();
454
455        // Grab the block merkle paths from the inner state.
456        //
457        // NOTE: Scoped block to automatically drop the mutex guard asap.
458        //
459        // We also avoid accessing the db in the block as this would delay
460        // dropping the guard.
461        let (chain_length, merkle_paths) = {
462            let state = self.inner.read().await;
463            let chain_length = state.chain_mmr.forest();
464
465            let paths = blocks
466                .iter()
467                .map(|&block_num| {
468                    let proof = state.chain_mmr.open(block_num.as_usize())?.merkle_path;
469
470                    Ok::<_, MmrError>((block_num, proof))
471                })
472                .collect::<Result<BTreeMap<_, _>, MmrError>>()?;
473
474            let chain_length = u32::try_from(chain_length)
475                .expect("Forest is a chain length so should fit into a u32");
476
477            (chain_length.into(), paths)
478        };
479
480        let headers = self.db.select_block_headers(blocks).await?;
481        let headers = headers
482            .into_iter()
483            .map(|header| (header.block_num(), header))
484            .collect::<BTreeMap<BlockNumber, _>>();
485
486        let mut block_proofs = Vec::with_capacity(merkle_paths.len());
487        for (block_num, mmr_path) in merkle_paths {
488            let block_header =
489                *headers.get(&block_num).ok_or(DatabaseError::BlockNotFoundInDb(block_num))?;
490
491            block_proofs.push(BlockInclusionProof { block_header, mmr_path, chain_length });
492        }
493
494        Ok(NoteAuthenticationInfo { block_proofs, note_proofs })
495    }
496
497    /// Loads data to synchronize a client.
498    ///
499    /// The client's request contains a list of tag prefixes, this method will return the first
500    /// block with a matching tag, or the chain tip. All the other values are filter based on this
501    /// block range.
502    ///
503    /// # Arguments
504    ///
505    /// - `block_num`: The last block *known* by the client, updates start from the next block.
506    /// - `account_ids`: Include the account's hash if their _last change_ was in the result's block
507    ///   range.
508    /// - `note_tags`: The tags the client is interested in, result is restricted to the first block
509    ///   with any matches tags.
510    /// - `nullifier_prefixes`: Only the 16 high bits of the nullifiers the client is interested in,
511    ///   results will include nullifiers matching prefixes produced in the given block range.
512    #[instrument(target = COMPONENT, skip_all, ret(level = "debug"), err)]
513    pub async fn sync_state(
514        &self,
515        block_num: BlockNumber,
516        account_ids: Vec<AccountId>,
517        note_tags: Vec<u32>,
518        nullifier_prefixes: Vec<u32>,
519    ) -> Result<(StateSyncUpdate, MmrDelta), StateSyncError> {
520        let inner = self.inner.read().await;
521
522        let state_sync = self
523            .db
524            .get_state_sync(block_num, account_ids, note_tags, nullifier_prefixes)
525            .await?;
526
527        let delta = if block_num == state_sync.block_header.block_num() {
528            // The client is in sync with the chain tip.
529            MmrDelta {
530                forest: block_num.as_usize(),
531                data: vec![],
532            }
533        } else {
534            // Important notes about the boundary conditions:
535            //
536            // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root
537            // contained in the block header always lag behind by one block, this is because the Mmr
538            // leaves are hashes of block headers, and we can't have self-referential hashes. These
539            // two points cancel out and don't require adjusting.
540            // - Mmr::get_delta is inclusive, whereas the sync_state request block_num is defined to
541            //   be
542            // exclusive, so the from_forest has to be adjusted with a +1
543            let from_forest = (block_num + 1).as_usize();
544            let to_forest = state_sync.block_header.block_num().as_usize();
545            inner
546                .chain_mmr
547                .get_delta(from_forest, to_forest)
548                .map_err(StateSyncError::FailedToBuildMmrDelta)?
549        };
550
551        Ok((state_sync, delta))
552    }
553
554    /// Loads data to synchronize a client's notes.
555    ///
556    /// The client's request contains a list of tags, this method will return the first
557    /// block with a matching tag, or the chain tip. All the other values are filter based on this
558    /// block range.
559    ///
560    /// # Arguments
561    ///
562    /// - `block_num`: The last block *known* by the client, updates start from the next block.
563    /// - `note_tags`: The tags the client is interested in, resulting notes are restricted to the
564    ///   first block containing a matching note.
565    #[instrument(target = COMPONENT, skip_all, ret(level = "debug"), err)]
566    pub async fn sync_notes(
567        &self,
568        block_num: BlockNumber,
569        note_tags: Vec<u32>,
570    ) -> Result<(NoteSyncUpdate, MmrProof), NoteSyncError> {
571        let inner = self.inner.read().await;
572
573        let note_sync = self.db.get_note_sync(block_num, note_tags).await?;
574
575        let mmr_proof = inner.chain_mmr.open(note_sync.block_header.block_num().as_usize())?;
576
577        Ok((note_sync, mmr_proof))
578    }
579
580    /// Returns data needed by the block producer to construct and prove the next block.
581    pub async fn get_block_inputs(
582        &self,
583        account_ids: &[AccountId],
584        nullifiers: &[Nullifier],
585        unauthenticated_notes: BTreeSet<NoteId>,
586    ) -> Result<BlockInputs, GetBlockInputsError> {
587        let inner = self.inner.read().await;
588
589        let latest = self
590            .db
591            .select_block_header_by_block_num(None)
592            .await?
593            .ok_or(GetBlockInputsError::DbBlockHeaderEmpty)?;
594
595        // sanity check
596        if inner.chain_mmr.forest() != latest.block_num().as_usize() + 1 {
597            return Err(GetBlockInputsError::IncorrectChainMmrForestNumber {
598                forest: inner.chain_mmr.forest(),
599                block_num: latest.block_num(),
600            });
601        }
602
603        // using current block number gets us the peaks of the chain MMR as of one block ago;
604        // this is done so that latest.chain_root matches the returned peaks
605        let chain_peaks =
606            inner.chain_mmr.peaks_at(latest.block_num().as_usize()).map_err(|error| {
607                GetBlockInputsError::FailedToGetMmrPeaksForForest {
608                    forest: latest.block_num().as_usize(),
609                    error,
610                }
611            })?;
612        let account_states = account_ids
613            .iter()
614            .copied()
615            .map(|account_id| {
616                let ValuePath { value: account_hash, path: proof } =
617                    inner.account_tree.open(&LeafIndex::new_max_depth(account_id.prefix().into()));
618                Ok(AccountInputRecord { account_id, account_hash, proof })
619            })
620            .collect::<Result<_, AccountError>>()?;
621
622        let nullifiers: Vec<NullifierWitness> = nullifiers
623            .iter()
624            .map(|nullifier| {
625                let proof = inner.nullifier_tree.open(nullifier);
626
627                NullifierWitness { nullifier: *nullifier, proof }
628            })
629            .collect();
630
631        let found_unauthenticated_notes =
632            self.get_note_authentication_info(unauthenticated_notes).await?;
633
634        Ok(BlockInputs {
635            block_header: latest,
636            chain_peaks,
637            account_states,
638            nullifiers,
639            found_unauthenticated_notes,
640        })
641    }
642
643    /// Returns data needed by the block producer to verify transactions validity.
644    #[instrument(target = COMPONENT, skip_all, ret)]
645    pub async fn get_transaction_inputs(
646        &self,
647        account_id: AccountId,
648        nullifiers: &[Nullifier],
649        unauthenticated_notes: Vec<NoteId>,
650    ) -> Result<TransactionInputs, DatabaseError> {
651        info!(target: COMPONENT, account_id = %account_id.to_string(), nullifiers = %format_array(nullifiers));
652
653        let inner = self.inner.read().await;
654
655        let account_hash = inner
656            .account_tree
657            .open(&LeafIndex::new_max_depth(account_id.prefix().into()))
658            .value;
659
660        let nullifiers = nullifiers
661            .iter()
662            .map(|nullifier| NullifierInfo {
663                nullifier: *nullifier,
664                block_num: inner.nullifier_tree.get_block_num(nullifier).unwrap_or_default(),
665            })
666            .collect();
667
668        let found_unauthenticated_notes =
669            self.db.select_note_ids(unauthenticated_notes.clone()).await?;
670
671        Ok(TransactionInputs {
672            account_hash,
673            nullifiers,
674            found_unauthenticated_notes,
675        })
676    }
677
678    /// Returns details for public (on-chain) account.
679    pub async fn get_account_details(&self, id: AccountId) -> Result<AccountInfo, DatabaseError> {
680        self.db.select_account(id).await
681    }
682
683    /// Returns account proofs with optional account and storage headers.
684    pub async fn get_account_proofs(
685        &self,
686        account_requests: Vec<AccountProofRequest>,
687        known_code_commitments: BTreeSet<RpoDigest>,
688        include_headers: bool,
689    ) -> Result<(BlockNumber, Vec<AccountProofsResponse>), DatabaseError> {
690        // Lock inner state for the whole operation. We need to hold this lock to prevent the
691        // database, account tree and latest block number from changing during the operation,
692        // because changing one of them would lead to inconsistent state.
693        let inner_state = self.inner.read().await;
694
695        let account_ids: Vec<AccountId> =
696            account_requests.iter().map(|req| req.account_id).collect();
697
698        let state_headers = if include_headers.not() {
699            BTreeMap::<AccountId, AccountStateHeader>::default()
700        } else {
701            let infos = self.db.select_accounts_by_ids(account_ids.clone()).await?;
702            if account_ids.len() > infos.len() {
703                let found_ids = infos.iter().map(|info| info.summary.account_id).collect();
704                return Err(DatabaseError::AccountsNotFoundInDb(
705                    BTreeSet::from_iter(account_ids).difference(&found_ids).copied().collect(),
706                ));
707            }
708
709            let mut headers_map = BTreeMap::new();
710
711            // Iterate and build state headers for public accounts
712            for request in account_requests {
713                let account_info = infos
714                    .iter()
715                    .find(|info| info.summary.account_id == request.account_id)
716                    .expect("retrieved accounts were validated against request");
717
718                if let Some(details) = &account_info.details {
719                    let mut storage_slot_map_keys = Vec::new();
720
721                    for StorageMapKeysProof { storage_index, storage_keys } in
722                        &request.storage_requests
723                    {
724                        if let Some(StorageSlot::Map(storage_map)) =
725                            details.storage().slots().get(*storage_index as usize)
726                        {
727                            for map_key in storage_keys {
728                                let proof = storage_map.open(map_key);
729
730                                let slot_map_key = StorageSlotMapProof {
731                                    storage_slot: u32::from(*storage_index),
732                                    smt_proof: proof.to_bytes(),
733                                };
734                                storage_slot_map_keys.push(slot_map_key);
735                            }
736                        } else {
737                            return Err(AccountError::StorageSlotNotMap(*storage_index).into());
738                        }
739                    }
740
741                    // Only include unknown account codes
742                    let account_code = known_code_commitments
743                        .contains(&details.code().commitment())
744                        .not()
745                        .then(|| details.code().to_bytes());
746
747                    let state_header = AccountStateHeader {
748                        header: Some(AccountHeader::from(details).into()),
749                        storage_header: details.storage().get_header().to_bytes(),
750                        account_code,
751                        storage_maps: storage_slot_map_keys,
752                    };
753
754                    headers_map.insert(account_info.summary.account_id, state_header);
755                }
756            }
757
758            headers_map
759        };
760
761        let responses = account_ids
762            .into_iter()
763            .map(|account_id| {
764                let acc_leaf_idx = LeafIndex::new_max_depth(account_id.prefix().into());
765                let opening = inner_state.account_tree.open(&acc_leaf_idx);
766                let state_header = state_headers.get(&account_id).cloned();
767
768                AccountProofsResponse {
769                    account_id: Some(account_id.into()),
770                    account_hash: Some(opening.value.into()),
771                    account_proof: Some(opening.path.into()),
772                    state_header,
773                }
774            })
775            .collect();
776
777        Ok((inner_state.latest_block_num(), responses))
778    }
779
780    /// Returns the state delta between `from_block` (exclusive) and `to_block` (inclusive) for the
781    /// given account.
782    pub(crate) async fn get_account_state_delta(
783        &self,
784        account_id: AccountId,
785        from_block: BlockNumber,
786        to_block: BlockNumber,
787    ) -> Result<Option<AccountDelta>, DatabaseError> {
788        self.db
789            .select_account_state_delta(account_id, from_block, to_block)
790            .await
791            .map_err(Into::into)
792    }
793
794    /// Loads a block from the block store. Return `Ok(None)` if the block is not found.
795    pub async fn load_block(
796        &self,
797        block_num: BlockNumber,
798    ) -> Result<Option<Vec<u8>>, DatabaseError> {
799        if block_num > self.latest_block_num().await {
800            return Ok(None);
801        }
802        self.block_store.load_block(block_num).await.map_err(Into::into)
803    }
804
805    /// Returns the latest block number.
806    pub async fn latest_block_num(&self) -> BlockNumber {
807        self.inner.read().await.latest_block_num()
808    }
809}
810
811// UTILITIES
812// ================================================================================================
813
814#[instrument(target = COMPONENT, skip_all)]
815async fn load_nullifier_tree(db: &mut Db) -> Result<NullifierTree, StateInitializationError> {
816    let nullifiers = db.select_all_nullifiers().await?;
817    let len = nullifiers.len();
818
819    let now = Instant::now();
820    let nullifier_tree = NullifierTree::with_entries(nullifiers)
821        .map_err(StateInitializationError::FailedToCreateNullifierTree)?;
822    let elapsed = now.elapsed().as_secs();
823
824    info!(
825        num_of_leaves = len,
826        tree_construction = elapsed,
827        COMPONENT,
828        "Loaded nullifier tree"
829    );
830    Ok(nullifier_tree)
831}
832
833#[instrument(target = COMPONENT, skip_all)]
834async fn load_mmr(db: &mut Db) -> Result<Mmr, StateInitializationError> {
835    let block_hashes: Vec<RpoDigest> =
836        db.select_all_block_headers().await?.iter().map(BlockHeader::hash).collect();
837
838    Ok(block_hashes.into())
839}
840
841#[instrument(target = COMPONENT, skip_all)]
842async fn load_accounts(
843    db: &mut Db,
844) -> Result<SimpleSmt<ACCOUNT_TREE_DEPTH>, StateInitializationError> {
845    let account_data: Vec<_> = db
846        .select_all_account_hashes()
847        .await?
848        .into_iter()
849        .map(|(id, account_hash)| (id.prefix().into(), account_hash.into()))
850        .collect();
851
852    SimpleSmt::with_leaves(account_data)
853        .map_err(StateInitializationError::FailedToCreateAccountsTree)
854}