blvm-node 0.1.2

Bitcoin Commons BLVM: Minimal Bitcoin node implementation using blvm-protocol and blvm-consensus
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
//! Storage layer for blvm-node
//!
//! This module provides persistent storage for blocks, UTXO set, and chain state.
//! Supports multiple database backends via feature flags (tidesdb, redb, sled, rocksdb).

pub mod assumeutxo;
pub mod bitcoin_core_blocks;
pub mod bitcoin_core_detection;
pub mod bitcoin_core_format;
#[cfg(feature = "rocksdb")]
pub mod bitcoin_core_migrate;
pub mod bitcoin_core_storage;
pub mod blockstore;
pub mod buffered_store;
pub mod chainstate;
#[cfg(feature = "utxo-commitments")]
pub mod commitment_store;
pub mod database;
pub mod disk_utxo;
pub mod hashing;
pub mod ibd_autorepair;
#[cfg(feature = "production")]
pub mod ibd_utxo_muhash;
#[cfg(feature = "production")]
pub mod ibd_utxo_store;
pub mod pruning;
pub mod serialization_cache;
pub mod txindex;
pub mod utxostore;
pub mod wal;

use crate::config::PruningConfig;
use anyhow::Result;
use database::{create_database, default_backend, fallback_backend, Database, DatabaseBackend};
use std::path::Path;
use std::sync::Arc;
use tracing::{info, warn};

#[cfg(feature = "rocksdb")]
use bitcoin_core_detection::BitcoinCoreDetection;
#[cfg(feature = "rocksdb")]
use bitcoin_core_detection::BitcoinCoreNetwork;
#[cfg(feature = "rocksdb")]
use bitcoin_core_storage::BitcoinCoreStorage;

/// Storage manager that coordinates all storage operations
pub struct Storage {
    db: Arc<dyn Database>,
    blockstore: Arc<blockstore::BlockStore>,
    utxostore: Arc<utxostore::UtxoStore>,
    chainstate: chainstate::ChainState,
    txindex: Arc<txindex::TxIndex>,
    pruning_manager: Option<Arc<pruning::PruningManager>>,
}

impl Storage {
    /// Create a new storage instance with default backend
    ///
    /// Attempts to use the default backend (TidesDB if available, else Redb), and gracefully
    /// falls back to alternatives if the primary fails.
    ///
    /// If existing node data is detected, will use RocksDB to read it.
    pub fn new<P: AsRef<Path>>(data_dir: P) -> Result<Self> {
        // Check for existing node data first (if RocksDB is available)
        #[cfg(feature = "rocksdb")]
        {
            use bitcoin_core_detection::BitcoinCoreNetwork;
            use bitcoin_core_storage::BitcoinCoreStorage;

            // Try to detect existing mainnet data
            if let Ok(Some(backend)) =
                BitcoinCoreStorage::detect_and_open(data_dir.as_ref(), BitcoinCoreNetwork::Mainnet)
            {
                if backend == DatabaseBackend::RocksDB {
                    info!("Existing node data detected, opening with RocksDB backend");
                    // Open existing database directly and initialize storage
                    let db = Arc::from(BitcoinCoreStorage::open_bitcoin_core_database(
                        data_dir.as_ref(),
                        BitcoinCoreNetwork::Mainnet,
                    )?);

                    // Create block file reader if blocks directory exists
                    // Use cache directory for index persistence
                    let block_reader = if let Some(core_dir) =
                        BitcoinCoreDetection::detect_data_dir(BitcoinCoreNetwork::Mainnet)?
                    {
                        let blocks_dir = core_dir.join("blocks");
                        if blocks_dir.exists() {
                            // Use data_dir for index cache
                            match bitcoin_core_blocks::BitcoinCoreBlockReader::new_with_cache(
                                &blocks_dir,
                                BitcoinCoreNetwork::Mainnet,
                                Some(data_dir.as_ref()),
                            ) {
                                Ok(reader) => {
                                    info!("Block files detected, enabling block file reader with index cache");
                                    Some(Arc::new(reader))
                                }
                                Err(e) => {
                                    warn!("Failed to initialize block file reader: {}", e);
                                    None
                                }
                            }
                        } else {
                            None
                        }
                    } else {
                        None
                    };

                    // Initialize storage components with the opened database and block reader
                    let blockstore =
                        Arc::new(blockstore::BlockStore::new_with_bitcoin_core_reader(
                            Arc::clone(&db),
                            block_reader,
                        )?);
                    let utxostore = Arc::new(utxostore::UtxoStore::new(Arc::clone(&db))?);
                    let chainstate = chainstate::ChainState::new(Arc::clone(&db))?;
                    let txindex = Arc::new(txindex::TxIndex::new(Arc::clone(&db))?);
                    return Ok(Self {
                        db,
                        blockstore,
                        utxostore,
                        chainstate,
                        txindex,
                        pruning_manager: None,
                    });
                }
            }
        }

        let default = default_backend();

        // Try default backend first
        match Self::with_backend(data_dir.as_ref(), default) {
            Ok(storage) => Ok(storage),
            Err(e) => {
                // If default backend fails, try fallback
                if let Some(fallback_backend) = fallback_backend(default) {
                    warn!(
                        "Failed to initialize {:?} backend: {}. Falling back to {:?}.",
                        default, e, fallback_backend
                    );
                    info!(
                        "Attempting to initialize storage with fallback backend: {:?}",
                        fallback_backend
                    );
                    Self::with_backend(data_dir, fallback_backend)
                } else {
                    Err(anyhow::anyhow!(
                        "Failed to initialize {:?} backend: {}. No fallback backend available.",
                        default,
                        e
                    ))
                }
            }
        }
    }

    /// Create a new storage instance with specified backend
    pub fn with_backend<P: AsRef<Path>>(data_dir: P, backend: DatabaseBackend) -> Result<Self> {
        Self::with_backend_and_pruning(data_dir, backend, None)
    }

    /// Create a new storage instance with specified backend and pruning config
    pub fn with_backend_and_pruning<P: AsRef<Path>>(
        data_dir: P,
        backend: DatabaseBackend,
        pruning_config: Option<PruningConfig>,
    ) -> Result<Self> {
        Self::with_backend_pruning_and_indexing(data_dir, backend, pruning_config, None, None)
    }

    /// Create a new storage instance with backend, pruning, and indexing config
    pub fn with_backend_pruning_and_indexing<P: AsRef<Path>>(
        data_dir: P,
        backend: DatabaseBackend,
        pruning_config: Option<PruningConfig>,
        indexing_config: Option<crate::config::IndexingConfig>,
        storage_config: Option<&crate::config::StorageConfig>,
    ) -> Result<Self> {
        #[cfg(feature = "compression")]
        {
            Self::with_backend_pruning_indexing_and_compression(
                data_dir,
                backend,
                pruning_config,
                indexing_config,
                None,
                storage_config,
            )
        }
        #[cfg(not(feature = "compression"))]
        {
            // When compression feature is disabled, use the internal implementation
            let db = Arc::from(create_database(data_dir, backend, storage_config)?);
            let blockstore = Arc::new(blockstore::BlockStore::new(Arc::clone(&db))?);
            let utxostore = Arc::new(utxostore::UtxoStore::new(Arc::clone(&db))?);
            let chainstate = chainstate::ChainState::new(Arc::clone(&db))?;

            let txindex = if let Some(indexing) = indexing_config {
                Arc::new(txindex::TxIndex::with_indexing(
                    Arc::clone(&db),
                    indexing.enable_address_index,
                    indexing.enable_value_index,
                )?)
            } else {
                Arc::new(txindex::TxIndex::new(Arc::clone(&db))?)
            };

            let pruning_manager = pruning_config.map(|config| {
                #[cfg(feature = "utxo-commitments")]
                {
                    let needs_commitments = matches!(config.mode, crate::config::PruningMode::Aggressive { keep_commitments: true, .. })
                        || matches!(config.mode, crate::config::PruningMode::Custom { keep_commitments: true, .. });
                    if needs_commitments {
                        let commitment_store = match commitment_store::CommitmentStore::new(Arc::clone(&db)) {
                            Ok(store) => Arc::new(store),
                            Err(e) => {
                                warn!("Failed to create commitment store: {}. Pruning will continue without commitments.", e);
                                return Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)));
                            }
                        };
                        Arc::new(pruning::PruningManager::with_utxo_commitments(
                            config,
                            Arc::clone(&blockstore),
                            commitment_store,
                            Arc::clone(&utxostore),
                        ))
                    } else {
                        Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)))
                    }
                }
                #[cfg(not(feature = "utxo-commitments"))]
                {
                    Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)))
                }
            });

            Ok(Self {
                db,
                blockstore,
                utxostore,
                chainstate,
                txindex,
                pruning_manager,
            })
        }
    }

    /// Create a new storage instance with backend, pruning, indexing, and compression config
    #[cfg(feature = "compression")]
    pub fn with_backend_pruning_indexing_and_compression<P: AsRef<Path>>(
        data_dir: P,
        backend: DatabaseBackend,
        pruning_config: Option<PruningConfig>,
        indexing_config: Option<crate::config::IndexingConfig>,
        compression_config: Option<crate::config::CompressionConfig>,
        storage_config: Option<&crate::config::StorageConfig>,
    ) -> Result<Self> {
        let db = Arc::from(create_database(data_dir, backend, storage_config)?);

        // Configure block store with compression settings
        #[cfg(feature = "compression")]
        let blockstore = {
            let (
                block_compression_enabled,
                block_compression_level,
                witness_compression_enabled,
                witness_compression_level,
            ) = if let Some(compression) = &compression_config {
                (
                    compression.block_compression_enabled,
                    compression.block_compression_level,
                    compression.witness_compression_enabled,
                    compression.witness_compression_level,
                )
            } else {
                (false, 3, false, 2) // Defaults: disabled
            };
            Arc::new(blockstore::BlockStore::new_with_compression(
                Arc::clone(&db),
                block_compression_enabled,
                block_compression_level,
                witness_compression_enabled,
                witness_compression_level,
            )?)
        };

        #[cfg(not(feature = "compression"))]
        let blockstore = Arc::new(blockstore::BlockStore::new(Arc::clone(&db))?);
        let utxostore = Arc::new(utxostore::UtxoStore::new(Arc::clone(&db))?);
        let chainstate = chainstate::ChainState::new(Arc::clone(&db))?;

        // Configure transaction indexing based on config
        let txindex = if let Some(indexing) = indexing_config {
            Arc::new(txindex::TxIndex::with_indexing(
                Arc::clone(&db),
                indexing.enable_address_index,
                indexing.enable_value_index,
            )?)
        } else {
            Arc::new(txindex::TxIndex::new(Arc::clone(&db))?)
        };

        let pruning_manager = pruning_config.map(|config| {
            #[cfg(feature = "utxo-commitments")]
            {
                // Check if aggressive mode requires UTXO commitments
                let needs_commitments = matches!(config.mode, crate::config::PruningMode::Aggressive { keep_commitments: true, .. })
                    || matches!(config.mode, crate::config::PruningMode::Custom { keep_commitments: true, .. });
                if needs_commitments {
                    let commitment_store = match commitment_store::CommitmentStore::new(Arc::clone(&db)) {
                        Ok(store) => Arc::new(store),
                        Err(e) => {
                            warn!("Failed to create commitment store: {}. Pruning will continue without commitments.", e);
                            return Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)));
                        }
                    };
                    Arc::new(pruning::PruningManager::with_utxo_commitments(
                        config,
                        Arc::clone(&blockstore),
                        commitment_store,
                        Arc::clone(&utxostore),
                    ))
                } else {
                    Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)))
                }
            }
            #[cfg(not(feature = "utxo-commitments"))]
            {
                Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)))
            }
        });

        Ok(Self {
            db,
            blockstore,
            utxostore,
            chainstate,
            txindex,
            pruning_manager,
        })
    }

    /// Get the block store (as Arc for sharing)
    pub fn blocks(&self) -> Arc<blockstore::BlockStore> {
        Arc::clone(&self.blockstore)
    }

    /// Get the UTXO store
    pub fn utxos(&self) -> &utxostore::UtxoStore {
        &self.utxostore
    }

    /// Get the UTXO store as Arc (for sharing)
    pub fn utxos_arc(&self) -> Arc<utxostore::UtxoStore> {
        Arc::clone(&self.utxostore)
    }

    /// Get the chain state
    pub fn chain(&self) -> &chainstate::ChainState {
        &self.chainstate
    }

    /// Load AssumeUTXO snapshot into storage.
    /// Uses `tip_header` when provided; otherwise fetches header from blockstore by metadata.block_hash.
    pub fn load_assumeutxo_snapshot(
        &self,
        utxo_set: &blvm_protocol::UtxoSet,
        metadata: &crate::storage::assumeutxo::SnapshotMetadata,
        tip_header: Option<&blvm_protocol::BlockHeader>,
    ) -> Result<()> {
        self.utxostore.store_utxo_set(utxo_set)?;
        let header = match tip_header {
            Some(h) => h.clone(),
            None => self
                .blockstore
                .get_header(&metadata.block_hash)?
                .ok_or_else(|| {
                    anyhow::anyhow!(
                        "AssumeUTXO requires block header. Block {} not in store. Run IBD first or include header in snapshot.",
                        hex::encode(metadata.block_hash)
                    )
                })?,
        };
        let chain_info = chainstate::ChainInfo {
            tip_hash: metadata.block_hash,
            tip_header: header,
            height: metadata.block_height,
            total_work: 0, // Would need chainwork from header chain
            chain_params: chainstate::ChainParams::default(),
        };
        self.chainstate.store_chain_info(&chain_info)?;
        Ok(())
    }

    /// If `chain_info` is missing but the block index has blocks (e.g. crash before metadata
    /// flush, or legacy parallel IBD that never wrote `chain_info`), rebuild tip from the
    /// highest stored height so `get_height()` and IBD resume match on-disk state.
    pub fn recover_chain_tip_from_blockstore(&self) -> Result<()> {
        use crate::storage::chainstate::{ChainInfo, ChainParams};

        if self.chain().load_chain_info()?.is_some() {
            return Ok(());
        }
        let Some(max_h) = self.blockstore.highest_stored_height()? else {
            return Ok(());
        };
        let Some(tip_hash) = self.blockstore.get_hash_by_height(max_h)? else {
            return Ok(());
        };
        let Some(block) = self.blockstore.get_block(&tip_hash)? else {
            return Ok(());
        };
        let genesis_hash = self.blockstore.get_hash_by_height(0)?.unwrap_or_default();
        let mut params = ChainParams::default();
        params.genesis_hash = genesis_hash;
        let info = ChainInfo {
            tip_hash,
            tip_header: block.header.clone(),
            height: max_h,
            total_work: 0,
            chain_params: params,
        };
        self.chainstate.store_chain_info(&info)?;
        info!(
            "Recovered chain_info from block index (tip_height={}, tip_hash prefix {:02x}{:02x}{:02x}{:02x})",
            max_h, tip_hash[0], tip_hash[1], tip_hash[2], tip_hash[3]
        );
        Ok(())
    }

    /// Get the transaction index (as Arc for sharing)
    pub fn transactions(&self) -> Arc<txindex::TxIndex> {
        Arc::clone(&self.txindex)
    }

    /// Open a custom tree for application-specific data
    ///
    /// This allows modules to store their own key-value data in the database.
    /// The tree name should be unique and descriptive (e.g., "payment_states", "vaults").
    pub fn open_tree(&self, name: &str) -> Result<Arc<dyn database::Tree>> {
        Ok(Arc::from(self.db.open_tree(name)?))
    }

    /// Flush all pending writes to disk
    pub fn flush(&self) -> Result<()> {
        self.db.flush()
    }

    /// Forward IBD memory pressure to the database backend (RocksDB may reduce background jobs when opted in).
    #[inline]
    pub fn ibd_memory_pressure_tick(&self, level_u8: u8) {
        self.db.ibd_memory_pressure_tick(level_u8);
    }

    /// Get approximate disk size used by storage (in bytes)
    ///
    /// Returns an estimate based on tree sizes. If any operation fails,
    /// returns 0 gracefully rather than erroring.
    /// Includes bounds checking to prevent overflow.
    pub fn disk_size(&self) -> Result<u64> {
        // Estimate based on tree sizes (graceful degradation if counts fail)
        let mut size = 0u64;

        // Block size estimate (gracefully handle errors, with bounds checking)
        if let Ok(count) = self.blockstore.block_count() {
            const MAX_BLOCKS: u64 = 10_000_000; // 10M blocks max (safety limit)
            let safe_count = count.min(MAX_BLOCKS as usize) as u64;
            const BYTES_PER_BLOCK: u64 = 1_024_000; // ~1MB per block
            size = size.saturating_add(safe_count.saturating_mul(BYTES_PER_BLOCK));
        }

        // UTXO size estimate (gracefully handle errors, with bounds checking)
        if let Ok(count) = self.utxostore.utxo_count() {
            const MAX_UTXOS: u64 = 1_000_000_000; // 1B UTXOs max (safety limit)
            let safe_count = count.min(MAX_UTXOS as usize) as u64;
            const BYTES_PER_UTXO: u64 = 100; // ~100 bytes per UTXO
            size = size.saturating_add(safe_count.saturating_mul(BYTES_PER_UTXO));
        }

        // Transaction size estimate (gracefully handle errors, with bounds checking)
        if let Ok(count) = self.txindex.transaction_count() {
            const MAX_TXS: u64 = 1_000_000_000; // 1B transactions max (safety limit)
            let safe_count = count.min(MAX_TXS as usize) as u64;
            const BYTES_PER_TX: u64 = 500; // ~500 bytes per transaction
            size = size.saturating_add(safe_count.saturating_mul(BYTES_PER_TX));
        }

        // Final bounds check: prevent returning unrealistic values
        const MAX_DISK_SIZE: u64 = 10_000_000_000_000; // 10TB max (safety limit)
        Ok(size.min(MAX_DISK_SIZE))
    }

    /// Check storage bounds before operations
    /// Returns true if storage is within safe bounds, false if approaching limits
    pub fn check_storage_bounds(&self) -> Result<bool> {
        const MAX_BLOCKS: usize = 10_000_000; // 10M blocks
        const MAX_UTXOS: usize = 1_000_000_000; // 1B UTXOs
        const MAX_TXS: usize = 1_000_000_000; // 1B transactions

        let block_count = self.blockstore.block_count().unwrap_or(0);
        let utxo_count = self.utxostore.utxo_count().unwrap_or(0);
        let tx_count = self.txindex.transaction_count().unwrap_or(0);

        // Check if we're approaching limits (80% threshold)
        let blocks_ok = block_count < (MAX_BLOCKS * 8 / 10);
        let utxos_ok = utxo_count < (MAX_UTXOS * 8 / 10);
        let txs_ok = tx_count < (MAX_TXS * 8 / 10);

        if !blocks_ok {
            warn!(
                "Storage bounds: block count ({}) approaching limit ({})",
                block_count, MAX_BLOCKS
            );
        }
        if !utxos_ok {
            warn!(
                "Storage bounds: UTXO count ({}) approaching limit ({})",
                utxo_count, MAX_UTXOS
            );
        }
        if !txs_ok {
            warn!(
                "Storage bounds: transaction count ({}) approaching limit ({})",
                tx_count, MAX_TXS
            );
        }

        Ok(blocks_ok && utxos_ok && txs_ok)
    }

    /// Get transaction count from txindex
    pub fn transaction_count(&self) -> Result<usize> {
        self.txindex.transaction_count()
    }

    /// Index a block's transactions (optimized batch indexing)
    /// This should be called after a block is stored to index all its transactions
    pub fn index_block(
        &self,
        block: &blvm_protocol::Block,
        block_hash: &blvm_protocol::Hash,
        block_height: u64,
    ) -> Result<()> {
        self.txindex.index_block(block, block_hash, block_height)
    }

    /// Get pruning manager (if pruning is configured)
    pub fn pruning(&self) -> Option<Arc<pruning::PruningManager>> {
        self.pruning_manager.as_ref().map(Arc::clone)
    }

    /// Check if pruning is enabled
    pub fn is_pruning_enabled(&self) -> bool {
        self.pruning_manager
            .as_ref()
            .map(|pm| pm.is_enabled())
            .unwrap_or(false)
    }
}