miden-node-store 0.14.8

Miden node's state store component
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
use std::collections::{BTreeMap, BTreeSet, HashSet};
use std::mem::size_of;
use std::ops::{Deref, DerefMut, RangeInclusive};
use std::path::PathBuf;
use std::sync::Arc;

use anyhow::Context;
use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection};
use miden_node_proto::domain::account::AccountInfo;
use miden_node_proto::{BlockProofRequest, generated as proto};
use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES;
use miden_node_utils::tracing::OpenTelemetrySpanExt;
use miden_protocol::Word;
use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader, StorageMapKey};
use miden_protocol::asset::{Asset, AssetVaultKey};
use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, SignedBlock};
use miden_protocol::crypto::merkle::SparseMerklePath;
use miden_protocol::note::{
    NoteDetails,
    NoteId,
    NoteInclusionProof,
    NoteMetadata,
    NoteScript,
    Nullifier,
};
use miden_protocol::transaction::TransactionHeader;
use miden_protocol::utils::serde::{Deserializable, Serializable};
use tokio::sync::oneshot;
use tracing::{info, instrument};

use crate::COMPONENT;
use crate::db::migrations::apply_migrations;
use crate::db::models::conv::SqlTypeConvert;
pub use crate::db::models::queries::{
    AccountCommitmentsPage,
    NullifiersPage,
    PublicAccountIdsPage,
};
use crate::db::models::queries::{BlockHeaderCommitment, StorageMapValuesPage};
use crate::db::models::{Page, queries};
use crate::errors::{DatabaseError, NoteSyncError};
use crate::genesis::GenesisBlock;

const STORAGE_MAP_VALUE_PER_ROW_BYTES: usize =
    2 * size_of::<Word>() + size_of::<u32>() + size_of::<u8>();

fn default_storage_map_entries_limit() -> usize {
    MAX_RESPONSE_PAYLOAD_BYTES / STORAGE_MAP_VALUE_PER_ROW_BYTES
}

mod migrations;
mod schema_hash;

#[cfg(test)]
mod tests;

pub(crate) mod models;

/// [diesel](https://diesel.rs) generated schema
///
/// ```sh
/// cargo binstall diesel_cli
/// sqlite3 -init ./src/db/migrations/001-init.sql ephemeral_setup.db ""
/// diesel setup --database-url=./ephemeral_setup.db
/// diesel print-schema > src/db/schema.rs
/// ```
///
/// which assumes an _existing_ database.
///
/// Unfortunately, there is no systematic way of modifying the schema other
/// than patching (in the diff sense) which is brittle at best.
/// So the above must be followed by a manual editing step, for now it's
/// limited to:
///
/// * `i64`/`u64` being represented as `BigInt`
///
/// The list might be extended.
pub(crate) mod schema;

pub type Result<T, E = DatabaseError> = std::result::Result<T, E>;

/// The Store's database.
///
/// Extends the underlying [`miden_node_db::Db`] type with functionality specific to the Store.
pub struct Db {
    db: miden_node_db::Db,
}

impl Deref for Db {
    type Target = miden_node_db::Db;

    fn deref(&self) -> &Self::Target {
        &self.db
    }
}

impl DerefMut for Db {
    fn deref_mut(&mut self) -> &mut Self::Target {
        &mut self.db
    }
}

/// Describes the value of an asset for an account ID at `block_num` specifically.
///
/// If `asset` is `None`, the asset was removed.
#[derive(Debug, Clone)]
pub struct AccountVaultValue {
    pub block_num: BlockNumber,
    pub vault_key: AssetVaultKey,
    /// None if the asset was removed
    pub asset: Option<Asset>,
}

impl AccountVaultValue {
    pub fn from_raw_row(row: (i64, Vec<u8>, Option<Vec<u8>>)) -> Result<Self, DatabaseError> {
        let (block_num, vault_key, asset) = row;
        let vault_key = Word::read_from_bytes(&vault_key)?;
        Ok(Self {
            block_num: BlockNumber::from_raw_sql(block_num)?,
            vault_key: AssetVaultKey::try_from(vault_key)?,
            asset: asset.map(|b| Asset::read_from_bytes(&b)).transpose()?,
        })
    }
}

#[derive(Debug, PartialEq)]
pub struct NullifierInfo {
    pub nullifier: Nullifier,
    pub block_num: BlockNumber,
}

impl PartialEq<(Nullifier, BlockNumber)> for NullifierInfo {
    fn eq(&self, (nullifier, block_num): &(Nullifier, BlockNumber)) -> bool {
        &self.nullifier == nullifier && &self.block_num == block_num
    }
}

#[derive(Debug, PartialEq)]
pub struct TransactionRecord {
    pub block_num: BlockNumber,
    pub header: TransactionHeader,
    /// Inclusion proofs for committed output notes. Notes in `header.output_notes()` without
    /// a corresponding proof here were erased (created and consumed within the same batch).
    pub output_note_proofs: Vec<NoteSyncRecord>,
}

impl TransactionRecord {
    /// Convert to proto `TransactionRecord`.
    ///
    /// The proto `TransactionHeader` contains all output notes as `NoteHeader`. Inclusion
    /// proofs for committed output notes are placed separately in
    /// `TransactionRecord.output_note_proofs`. Erased notes can be identified by comparing
    /// note IDs in the proofs with the header's output notes.
    pub fn into_proto(self) -> proto::rpc::TransactionRecord {
        let output_note_proofs = self
            .output_note_proofs
            .into_iter()
            .map(|n| proto::note::NoteInclusionInBlockProof {
                note_id: Some(n.note_id.into()),
                block_num: n.block_num.as_u32(),
                note_index_in_block: n.note_index.leaf_index_value().into(),
                inclusion_path: Some(n.inclusion_path.into()),
            })
            .collect();

        proto::rpc::TransactionRecord {
            header: Some(proto::transaction::TransactionHeader {
                transaction_id: Some(self.header.id().into()),
                account_id: Some(self.header.account_id().into()),
                initial_state_commitment: Some(self.header.initial_state_commitment().into()),
                final_state_commitment: Some(self.header.final_state_commitment().into()),
                input_notes: self.header.input_notes().iter().cloned().map(Into::into).collect(),
                output_notes: self.header.output_notes().iter().cloned().map(Into::into).collect(),
                fee: Some(Asset::from(self.header.fee()).into()),
            }),
            block_num: self.block_num.as_u32(),
            output_note_proofs,
        }
    }
}

#[derive(Debug, Clone, PartialEq)]
pub struct NoteRecord {
    pub block_num: BlockNumber,
    pub note_index: BlockNoteIndex,
    pub note_id: Word,
    pub note_commitment: Word,
    pub metadata: NoteMetadata,
    pub details: Option<NoteDetails>,
    pub inclusion_path: SparseMerklePath,
}

impl From<NoteRecord> for proto::note::CommittedNote {
    fn from(note: NoteRecord) -> Self {
        let inclusion_proof = Some(proto::note::NoteInclusionInBlockProof {
            note_id: Some(note.note_id.into()),
            block_num: note.block_num.as_u32(),
            note_index_in_block: note.note_index.leaf_index_value().into(),
            inclusion_path: Some(Into::into(note.inclusion_path)),
        });
        let note = Some(proto::note::Note {
            metadata: Some(note.metadata.into()),
            details: note.details.map(|details| details.to_bytes()),
        });
        Self { inclusion_proof, note }
    }
}

#[derive(Debug, PartialEq)]
pub struct NoteSyncUpdate {
    pub notes: Vec<NoteSyncRecord>,
    pub block_header: BlockHeader,
}

#[derive(Debug, Clone, PartialEq)]
pub struct NoteSyncRecord {
    pub block_num: BlockNumber,
    pub note_index: BlockNoteIndex,
    pub note_id: Word,
    pub metadata: NoteMetadata,
    pub inclusion_path: SparseMerklePath,
}

impl From<NoteSyncRecord> for proto::note::NoteSyncRecord {
    fn from(note: NoteSyncRecord) -> Self {
        let metadata_header = Some(note.metadata.to_header().into());
        let inclusion_proof = Some(proto::note::NoteInclusionInBlockProof {
            note_id: Some(note.note_id.into()),
            block_num: note.block_num.as_u32(),
            note_index_in_block: note.note_index.leaf_index_value().into(),
            inclusion_path: Some(note.inclusion_path.into()),
        });
        Self { metadata_header, inclusion_proof }
    }
}

impl From<NoteRecord> for NoteSyncRecord {
    fn from(note: NoteRecord) -> Self {
        Self {
            block_num: note.block_num,
            note_index: note.note_index,
            note_id: note.note_id,
            metadata: note.metadata,
            inclusion_path: note.inclusion_path,
        }
    }
}

impl Db {
    /// Creates a new database and inserts the genesis block.
    #[instrument(
        target = COMPONENT,
        name = "store.database.bootstrap",
        skip_all,
        fields(path=%database_filepath.display())
        err,
    )]
    pub fn bootstrap(database_filepath: PathBuf, genesis: GenesisBlock) -> anyhow::Result<()> {
        // Create database.
        //
        // This will create the file if it does not exist, but will also happily open it if already
        // exists. In the latter case we will error out when attempting to insert the genesis
        // block so this isn't such a problem.
        let mut conn: SqliteConnection = diesel::sqlite::SqliteConnection::establish(
            database_filepath.to_str().context("database filepath is invalid")?,
        )
        .context("failed to open a database connection")?;

        miden_node_db::configure_connection_on_creation(&mut conn)?;

        // Run migrations.
        apply_migrations(&mut conn).context("failed to apply database migrations")?;

        // Insert genesis block data. Deconstruct into signed block.
        let (header, body, signature, _proof) = genesis.into_inner().into_parts();
        let genesis_block = SignedBlock::new_unchecked(header, body, signature);
        conn.transaction(move |conn| models::queries::apply_block(conn, &genesis_block, &[], None))
            .context("failed to insert genesis block")?;
        Ok(())
    }

    /// Open a connection to the DB and apply any pending migrations.
    #[instrument(target = COMPONENT, skip_all)]
    pub async fn load(database_filepath: PathBuf) -> Result<Self, DatabaseError> {
        let db = miden_node_db::Db::new(&database_filepath)?;
        info!(
            target: COMPONENT,
            sqlite= %database_filepath.display(),
            "Connected to the database"
        );

        db.query("migrations", apply_migrations).await?;
        Ok(Self { db })
    }

    /// Returns a page of nullifiers for tree rebuilding.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_nullifiers_paged(
        &self,
        page_size: std::num::NonZeroUsize,
        after_nullifier: Option<Nullifier>,
    ) -> Result<NullifiersPage> {
        self.transact("read nullifiers paged", move |conn| {
            queries::select_nullifiers_paged(conn, page_size, after_nullifier)
        })
        .await
    }

    /// Loads the nullifiers that match the prefixes from the DB.
    #[instrument(
        level = "debug",
        target = COMPONENT,
        skip_all,
        fields(prefix_len, prefixes = nullifier_prefixes.len()),
        ret(level = "debug"),
        err
    )]
    pub async fn select_nullifiers_by_prefix(
        &self,
        prefix_len: u32,
        nullifier_prefixes: Vec<u32>,
        block_range: RangeInclusive<BlockNumber>,
    ) -> Result<(Vec<NullifierInfo>, BlockNumber)> {
        assert_eq!(prefix_len, 16, "Only 16-bit prefixes are supported");

        self.transact("nullifieres by prefix", move |conn| {
            let nullifier_prefixes =
                Vec::from_iter(nullifier_prefixes.into_iter().map(|prefix| prefix as u16));
            queries::select_nullifiers_by_prefix(
                conn,
                prefix_len as u8,
                &nullifier_prefixes[..],
                block_range,
            )
        })
        .await
    }

    /// Search for a [`BlockHeader`] from the database by its `block_num`.
    ///
    /// When `block_number` is [None], the latest block header is returned.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_block_header_by_block_num(
        &self,
        maybe_block_number: Option<BlockNumber>,
    ) -> Result<Option<BlockHeader>> {
        self.transact("block headers by block number", move |conn| {
            let val = queries::select_block_header_by_block_num(conn, maybe_block_number)?;
            Ok(val)
        })
        .await
    }

    /// Loads multiple block headers from the DB.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_block_headers(
        &self,
        blocks: impl Iterator<Item = BlockNumber> + Send + 'static,
    ) -> Result<Vec<BlockHeader>> {
        self.transact("block headers from given block numbers", move |conn| {
            let raw = queries::select_block_headers(conn, blocks)?;
            Ok(raw)
        })
        .await
    }

    /// Loads all the block headers from the DB.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_all_block_headers(&self) -> Result<Vec<BlockHeader>> {
        self.transact("all block headers", |conn| {
            let raw = queries::select_all_block_headers(conn)?;
            Ok(raw)
        })
        .await
    }

    /// Loads all the block headers from the DB.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_all_block_header_commitments(&self) -> Result<Vec<BlockHeaderCommitment>> {
        self.transact("all block headers", |conn| {
            let raw = queries::select_all_block_header_commitments(conn)?;
            Ok(raw)
        })
        .await
    }

    /// Returns a page of account commitments for tree rebuilding.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_account_commitments_paged(
        &self,
        page_size: std::num::NonZeroUsize,
        after_account_id: Option<AccountId>,
    ) -> Result<AccountCommitmentsPage> {
        self.transact("read account commitments paged", move |conn| {
            queries::select_account_commitments_paged(conn, page_size, after_account_id)
        })
        .await
    }

    /// Returns a page of public account IDs for forest rebuilding.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_public_account_ids_paged(
        &self,
        page_size: std::num::NonZeroUsize,
        after_account_id: Option<AccountId>,
    ) -> Result<PublicAccountIdsPage> {
        self.transact("read public account IDs paged", move |conn| {
            queries::select_public_account_ids_paged(conn, page_size, after_account_id)
        })
        .await
    }

    /// Loads public account details from the DB.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_account(&self, id: AccountId) -> Result<AccountInfo> {
        self.transact("Get account details", move |conn| queries::select_account(conn, id))
            .await
    }

    /// Loads public account details for a network account by its full account ID.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_network_account_by_id(
        &self,
        account_id: AccountId,
    ) -> Result<Option<AccountInfo>> {
        self.transact("Get network account by id", move |conn| {
            queries::select_network_account_by_id(conn, account_id)
        })
        .await
    }

    /// Returns network account IDs within the specified block range (based on account creation
    /// block).
    ///
    /// The function may return fewer accounts than exist in the range if the result would exceed
    /// `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` rows. In this case, the result is
    /// truncated at a block boundary to ensure all accounts from included blocks are returned.
    ///
    /// # Returns
    ///
    /// A tuple containing:
    /// - A vector of network account IDs.
    /// - The last block number that was fully included in the result. When truncated, this will be
    ///   less than the requested range end.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_all_network_account_ids(
        &self,
        block_range: RangeInclusive<BlockNumber>,
    ) -> Result<(Vec<AccountId>, BlockNumber)> {
        self.transact("Get all network account IDs", move |conn| {
            queries::select_all_network_account_ids(conn, block_range)
        })
        .await
    }

    /// Queries vault assets at a specific block
    #[instrument(target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_account_vault_at_block(
        &self,
        account_id: AccountId,
        block_num: BlockNumber,
    ) -> Result<Vec<Asset>> {
        self.transact("Get account vault at block", move |conn| {
            queries::select_account_vault_at_block(conn, account_id, block_num)
        })
        .await
    }

    /// Queries the account code by its commitment hash.
    ///
    /// Returns `None` if no code exists with that commitment.
    #[instrument(target = COMPONENT, skip_all)]
    pub async fn select_account_code_by_commitment(
        &self,
        code_commitment: Word,
    ) -> Result<Option<Vec<u8>>> {
        self.transact("Get account code by commitment", move |conn| {
            queries::select_account_code_by_commitment(conn, code_commitment)
        })
        .await
    }

    /// Queries the account header and storage header for a specific account at a block.
    ///
    /// Returns both in a single query to avoid querying the database twice.
    /// Returns `None` if the account doesn't exist at that block.
    #[instrument(target = COMPONENT, skip_all)]
    pub async fn select_account_header_with_storage_header_at_block(
        &self,
        account_id: AccountId,
        block_num: BlockNumber,
    ) -> Result<Option<(AccountHeader, AccountStorageHeader)>> {
        self.transact("Get account header with storage header at block", move |conn| {
            queries::select_account_header_with_storage_header_at_block(conn, account_id, block_num)
        })
        .await
    }

    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn get_note_sync(
        &self,
        block_range: RangeInclusive<BlockNumber>,
        note_tags: Arc<[u32]>,
    ) -> Result<Option<NoteSyncUpdate>, NoteSyncError> {
        self.transact("notes sync task", move |conn| {
            queries::get_note_sync(conn, &note_tags, block_range)
        })
        .await
    }

    /// Loads all the [`miden_protocol::note::Note`]s matching a certain [`NoteId`] from the
    /// database.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_notes_by_id(&self, note_ids: Vec<NoteId>) -> Result<Vec<NoteRecord>> {
        self.transact("note by id", move |conn| {
            queries::select_notes_by_id(conn, note_ids.as_slice())
        })
        .await
    }

    /// Returns all note commitments from the DB that match the provided ones.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_existing_note_commitments(
        &self,
        note_commitments: Vec<Word>,
    ) -> Result<HashSet<Word>> {
        self.transact("note by commitment", move |conn| {
            queries::select_existing_note_commitments(conn, note_commitments.as_slice())
        })
        .await
    }

    /// Loads inclusion proofs for notes matching the given note commitments.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_note_inclusion_proofs(
        &self,
        note_commitments: BTreeSet<Word>,
    ) -> Result<BTreeMap<NoteId, NoteInclusionProof>> {
        self.transact("block note inclusion proofs by commitment", move |conn| {
            models::queries::select_note_inclusion_proofs(conn, &note_commitments)
        })
        .await
    }

    /// Inserts the data of a new block into the DB.
    ///
    /// `allow_acquire` and `acquire_done` are used to synchronize writes to the DB with writes to
    /// the in-memory trees. Further details available on [`super::state::State::apply_block`].
    // TODO: This span is logged in a root span, we should connect it to the parent one.
    #[instrument(target = COMPONENT, skip_all, err)]
    pub async fn apply_block(
        &self,
        allow_acquire: oneshot::Sender<()>,
        acquire_done: oneshot::Receiver<()>,
        signed_block: SignedBlock,
        notes: Vec<(NoteRecord, Option<Nullifier>)>,
        proving_inputs: Option<BlockProofRequest>,
    ) -> Result<()> {
        self.transact("apply block", move |conn| -> Result<()> {
            models::queries::apply_block(conn, &signed_block, &notes, proving_inputs)?;

            // XXX FIXME TODO free floating mutex MUST NOT exist
            // it doesn't bind it properly to the data locked!
            if allow_acquire.send(()).is_err() {
                tracing::warn!(target: COMPONENT, "failed to send notification for successful block application, potential deadlock");
            }

            models::queries::prune_history(conn, signed_block.header().block_num())?;

            acquire_done.blocking_recv()?;

            Ok(())
        })
        .await
    }

    /// Marks a previously committed block as proven and advances the proven-in-sequence tip.
    ///
    /// Atomically clears `proving_inputs` for the given block, then walks forward from the
    /// current proven-in-sequence tip through consecutive proven blocks, marking each as
    /// proven-in-sequence. Returns the block numbers that were newly marked in-sequence.
    #[instrument(target = COMPONENT, skip_all, err)]
    pub async fn mark_proven_and_advance_sequence(
        &self,
        block_num: BlockNumber,
    ) -> Result<Vec<BlockNumber>> {
        self.transact("mark block proven", move |conn| {
            mark_proven_and_advance_sequence(conn, block_num)
        })
        .await
    }

    /// Returns the proving inputs for a given block number, if stored.
    #[instrument(level = "debug", target = COMPONENT, skip_all, err)]
    pub async fn select_block_proving_inputs(
        &self,
        block_num: BlockNumber,
    ) -> Result<Option<BlockProofRequest>> {
        self.transact("select block proving inputs", move |conn| {
            models::queries::select_block_proving_inputs(conn, block_num)
        })
        .await
    }

    /// Returns unproven block numbers greater than `after`, in ascending order, up to `limit`.
    #[instrument(level = "debug", target = COMPONENT, skip_all, err)]
    pub async fn select_unproven_blocks(
        &self,
        after: BlockNumber,
        limit: usize,
    ) -> Result<Vec<BlockNumber>> {
        self.transact("select unproven blocks", move |conn| {
            models::queries::select_unproven_blocks(conn, after, limit)
        })
        .await
    }

    /// Returns the highest block number that has been proven in sequence.
    ///
    /// This includes the genesis block, which is not technically proven, but treated as such.
    #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
    pub async fn select_latest_proven_in_sequence_block_num(&self) -> Result<BlockNumber> {
        self.transact("select latest proven block num", |conn| {
            models::queries::select_latest_proven_in_sequence_block_num(conn)
        })
        .await
    }

    /// Selects storage map values for syncing storage maps for a specific account ID.
    ///
    /// The returned values are the latest known values up to `block_range.end()`, and no values
    /// earlier than `block_range.start()` are returned.
    pub(crate) async fn select_storage_map_sync_values(
        &self,
        account_id: AccountId,
        block_range: RangeInclusive<BlockNumber>,
        entries_limit: Option<usize>,
    ) -> Result<StorageMapValuesPage> {
        let entries_limit = entries_limit.unwrap_or_else(default_storage_map_entries_limit);

        self.transact("select storage map sync values", move |conn| {
            models::queries::select_account_storage_map_values_paged(
                conn,
                account_id,
                block_range,
                entries_limit,
            )
        })
        .await
    }

    /// Reconstructs storage map details from the database for a specific slot at a block.
    ///
    /// Used as fallback when `AccountStateForest` cache misses (historical or evicted queries).
    /// Rebuilds all entries by querying the DB and filtering to the specific slot.
    ///
    /// Returns:
    ///     - `::LimitExceeded` when too many entries are present
    ///     - `::AllEntries` if the size is less than or equal given `entries_limit`, if any
    #[instrument(target = COMPONENT, skip_all)]
    pub(crate) async fn reconstruct_storage_map_from_db(
        &self,
        account_id: AccountId,
        slot_name: miden_protocol::account::StorageSlotName,
        block_num: BlockNumber,
        entries_limit: Option<usize>,
    ) -> Result<miden_node_proto::domain::account::AccountStorageMapDetails> {
        use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries};
        use miden_protocol::EMPTY_WORD;

        // TODO this remains expensive with a large history until we implement pruning for DB
        // columns
        let mut values = Vec::new();
        let mut block_range_start = BlockNumber::GENESIS;
        let entries_limit = entries_limit.unwrap_or_else(default_storage_map_entries_limit);

        let mut page = self
            .select_storage_map_sync_values(
                account_id,
                block_range_start..=block_num,
                Some(entries_limit),
            )
            .await?;

        values.extend(page.values);
        let mut last_block_included = page.last_block_included;

        // If the first page returned no values, the block at block_range_start has more
        // entries than the limit allows (e.g. genesis accounts with large storage maps).
        if values.is_empty() && last_block_included == block_range_start {
            return Ok(AccountStorageMapDetails::limit_exceeded(slot_name));
        }

        loop {
            if page.last_block_included == block_num || page.last_block_included < block_range_start
            {
                break;
            }

            block_range_start = page.last_block_included.child();
            page = self
                .select_storage_map_sync_values(
                    account_id,
                    block_range_start..=block_num,
                    Some(entries_limit),
                )
                .await?;

            if page.last_block_included <= last_block_included {
                return Ok(AccountStorageMapDetails::limit_exceeded(slot_name));
            }

            last_block_included = page.last_block_included;
            values.extend(page.values);
        }

        if page.last_block_included != block_num {
            return Ok(AccountStorageMapDetails::limit_exceeded(slot_name));
        }

        // Filter to the specific slot and collect latest values per key
        let mut latest_values = BTreeMap::<StorageMapKey, Word>::new();
        for value in values {
            if value.slot_name == slot_name {
                let raw_key = value.key;
                latest_values.insert(raw_key, value.value);
            }
        }

        // Remove EMPTY_WORD entries (deletions)
        latest_values.retain(|_, v| *v != EMPTY_WORD);

        if latest_values.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES {
            return Ok(AccountStorageMapDetails::limit_exceeded(slot_name));
        }

        let entries = Vec::from_iter(latest_values.into_iter());
        Ok(AccountStorageMapDetails {
            slot_name,
            entries: StorageMapEntries::AllEntries(entries),
        })
    }

    /// Emits size metrics for each table in the database, and the entire database.
    #[instrument(target = COMPONENT, skip_all, err)]
    pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> {
        self.transact("db analysis", |conn| {
            #[derive(QueryableByName)]
            struct TotalSize {
                #[diesel(sql_type = diesel::sql_types::BigInt)]
                size: i64,
            }

            #[derive(QueryableByName)]
            struct Table {
                #[diesel(sql_type = diesel::sql_types::Text)]
                name: String,
                #[diesel(sql_type = diesel::sql_types::BigInt)]
                size: i64,
            }

            let tables =
                diesel::sql_query("SELECT name, sum(payload) AS size FROM dbstat GROUP BY name")
                    .load::<Table>(conn)?;

            let span = tracing::Span::current();
            for Table { name, size } in tables {
                span.set_attribute(format!("database.table.{name}.size"), size);
            }

            let total = diesel::sql_query(
                "SELECT page_count * page_size as size FROM pragma_page_count(), pragma_page_size()",
            )
            .get_result::<TotalSize>(conn)?;
            span.set_attribute("database.total.size", total.size);

            Result::<_, DatabaseError>::Ok(())
        })
        .await
        .inspect_err(|err| tracing::Span::current().set_error(err))?;

        Ok(())
    }

    /// Loads the network notes for an account that are unconsumed by a specified block number.
    /// Pagination is used to limit the number of notes returned.
    pub(crate) async fn select_unconsumed_network_notes(
        &self,
        account_id: AccountId,
        block_num: BlockNumber,
        page: Page,
    ) -> Result<(Vec<NoteRecord>, Page)> {
        self.transact("unconsumed network notes for account", move |conn| {
            models::queries::select_unconsumed_network_notes_by_account_id(
                conn, account_id, block_num, page,
            )
        })
        .await
    }

    pub async fn get_account_vault_sync(
        &self,
        account_id: AccountId,
        block_range: RangeInclusive<BlockNumber>,
    ) -> Result<(BlockNumber, Vec<AccountVaultValue>)> {
        self.transact("account vault sync", move |conn| {
            queries::select_account_vault_assets(conn, account_id, block_range)
        })
        .await
    }

    /// Returns the script for a note by its root.
    pub async fn select_note_script_by_root(&self, root: Word) -> Result<Option<NoteScript>> {
        self.transact("note script by root", move |conn| {
            queries::select_note_script_by_root(conn, root)
        })
        .await
    }

    /// Returns the complete transaction records for the specified accounts within the specified
    /// block range, including state commitments and note IDs.
    ///
    /// Note: This method is size-limited (~5MB) and may not return all matching transactions
    /// if the limit is exceeded. Transactions from partial blocks are excluded to maintain
    /// consistency.
    pub async fn select_transactions_records(
        &self,
        account_ids: Vec<AccountId>,
        block_range: RangeInclusive<BlockNumber>,
    ) -> Result<(BlockNumber, Vec<TransactionRecord>)> {
        self.transact("full transactions records", move |conn| {
            queries::select_transactions_records(conn, &account_ids, block_range)
        })
        .await
    }
}

/// Mark a committed block as proven and advance the proven-in-sequence tip.
///
/// This is intended to atomically (when run in a transaction):
/// 1. Clears `proving_inputs` for the given block (marking it proven).
/// 2. Queries all blocks where `proving_inputs IS NULL AND proven_in_sequence = FALSE`.
/// 3. Walks forward from the current proven-in-sequence tip through consecutive proven blocks and
///    sets `proven_in_sequence = TRUE` for each.
///
/// Returns [`DatabaseError::DataCorrupted`] if any proven-but-not-in-sequence block is found at
/// or below the current tip, as that indicates a consistency bug.
pub(crate) fn mark_proven_and_advance_sequence(
    conn: &mut SqliteConnection,
    block_num: BlockNumber,
) -> Result<Vec<BlockNumber>, DatabaseError> {
    // Clear proving_inputs for the specified block.
    models::queries::clear_block_proving_inputs(conn, block_num)?;

    // Get the current proven-in-sequence tip (highest in-sequence).
    let mut tip = models::queries::select_latest_proven_in_sequence_block_num(conn)?;

    // Get all blocks that are proven but not yet marked in-sequence.
    let unsequenced = models::queries::select_proven_not_in_sequence_blocks(conn)?;

    // Walk forward from the tip through consecutive proven blocks.
    let mut newly_in_sequence = Vec::new();
    for candidate in unsequenced {
        if candidate <= tip {
            return Err(DatabaseError::DataCorrupted(format!(
                "block {candidate} is proven but not marked in-sequence while the tip is at {tip}"
            )));
        }
        if candidate == tip + 1 {
            tip = candidate;
            newly_in_sequence.push(candidate);
        } else {
            break;
        }
    }

    // Mark the newly contiguous blocks as proven-in-sequence.
    if let (Some(&from), Some(&to)) = (newly_in_sequence.first(), newly_in_sequence.last()) {
        models::queries::mark_blocks_as_proven_in_sequence(conn, from, to)?;
    }

    Ok(newly_in_sequence)
}