pub mod assumeutxo;
pub mod bitcoin_core_blocks;
pub mod bitcoin_core_detection;
pub mod bitcoin_core_format;
#[cfg(feature = "rocksdb")]
pub mod bitcoin_core_migrate;
pub mod bitcoin_core_storage;
pub mod blockstore;
pub mod buffered_store;
pub mod chainstate;
#[cfg(feature = "utxo-commitments")]
pub mod commitment_store;
pub mod database;
pub mod disk_utxo;
pub mod hashing;
pub mod ibd_autorepair;
#[cfg(feature = "production")]
pub mod ibd_utxo_muhash;
#[cfg(feature = "production")]
pub mod ibd_utxo_store;
pub mod pruning;
pub mod serialization_cache;
pub mod txindex;
pub mod utxostore;
pub mod wal;
use crate::config::PruningConfig;
use anyhow::Result;
use database::{create_database, default_backend, fallback_backend, Database, DatabaseBackend};
use std::path::Path;
use std::sync::Arc;
use tracing::{info, warn};
#[cfg(feature = "rocksdb")]
use bitcoin_core_detection::BitcoinCoreDetection;
#[cfg(feature = "rocksdb")]
use bitcoin_core_detection::BitcoinCoreNetwork;
#[cfg(feature = "rocksdb")]
use bitcoin_core_storage::BitcoinCoreStorage;
pub struct Storage {
db: Arc<dyn Database>,
blockstore: Arc<blockstore::BlockStore>,
utxostore: Arc<utxostore::UtxoStore>,
chainstate: chainstate::ChainState,
txindex: Arc<txindex::TxIndex>,
pruning_manager: Option<Arc<pruning::PruningManager>>,
}
impl Storage {
pub fn new<P: AsRef<Path>>(data_dir: P) -> Result<Self> {
#[cfg(feature = "rocksdb")]
{
use bitcoin_core_detection::BitcoinCoreNetwork;
use bitcoin_core_storage::BitcoinCoreStorage;
if let Ok(Some(backend)) =
BitcoinCoreStorage::detect_and_open(data_dir.as_ref(), BitcoinCoreNetwork::Mainnet)
{
if backend == DatabaseBackend::RocksDB {
info!("Existing node data detected, opening with RocksDB backend");
let db = Arc::from(BitcoinCoreStorage::open_bitcoin_core_database(
data_dir.as_ref(),
BitcoinCoreNetwork::Mainnet,
)?);
let block_reader = if let Some(core_dir) =
BitcoinCoreDetection::detect_data_dir(BitcoinCoreNetwork::Mainnet)?
{
let blocks_dir = core_dir.join("blocks");
if blocks_dir.exists() {
match bitcoin_core_blocks::BitcoinCoreBlockReader::new_with_cache(
&blocks_dir,
BitcoinCoreNetwork::Mainnet,
Some(data_dir.as_ref()),
) {
Ok(reader) => {
info!("Block files detected, enabling block file reader with index cache");
Some(Arc::new(reader))
}
Err(e) => {
warn!("Failed to initialize block file reader: {}", e);
None
}
}
} else {
None
}
} else {
None
};
let blockstore =
Arc::new(blockstore::BlockStore::new_with_bitcoin_core_reader(
Arc::clone(&db),
block_reader,
)?);
let utxostore = Arc::new(utxostore::UtxoStore::new(Arc::clone(&db))?);
let chainstate = chainstate::ChainState::new(Arc::clone(&db))?;
let txindex = Arc::new(txindex::TxIndex::new(Arc::clone(&db))?);
return Ok(Self {
db,
blockstore,
utxostore,
chainstate,
txindex,
pruning_manager: None,
});
}
}
}
let default = default_backend();
match Self::with_backend(data_dir.as_ref(), default) {
Ok(storage) => Ok(storage),
Err(e) => {
if let Some(fallback_backend) = fallback_backend(default) {
warn!(
"Failed to initialize {:?} backend: {}. Falling back to {:?}.",
default, e, fallback_backend
);
info!(
"Attempting to initialize storage with fallback backend: {:?}",
fallback_backend
);
Self::with_backend(data_dir, fallback_backend)
} else {
Err(anyhow::anyhow!(
"Failed to initialize {:?} backend: {}. No fallback backend available.",
default,
e
))
}
}
}
}
pub fn with_backend<P: AsRef<Path>>(data_dir: P, backend: DatabaseBackend) -> Result<Self> {
Self::with_backend_and_pruning(data_dir, backend, None)
}
pub fn with_backend_and_pruning<P: AsRef<Path>>(
data_dir: P,
backend: DatabaseBackend,
pruning_config: Option<PruningConfig>,
) -> Result<Self> {
Self::with_backend_pruning_and_indexing(data_dir, backend, pruning_config, None, None)
}
pub fn with_backend_pruning_and_indexing<P: AsRef<Path>>(
data_dir: P,
backend: DatabaseBackend,
pruning_config: Option<PruningConfig>,
indexing_config: Option<crate::config::IndexingConfig>,
storage_config: Option<&crate::config::StorageConfig>,
) -> Result<Self> {
#[cfg(feature = "compression")]
{
Self::with_backend_pruning_indexing_and_compression(
data_dir,
backend,
pruning_config,
indexing_config,
None,
storage_config,
)
}
#[cfg(not(feature = "compression"))]
{
let db = Arc::from(create_database(data_dir, backend, storage_config)?);
let blockstore = Arc::new(blockstore::BlockStore::new(Arc::clone(&db))?);
let utxostore = Arc::new(utxostore::UtxoStore::new(Arc::clone(&db))?);
let chainstate = chainstate::ChainState::new(Arc::clone(&db))?;
let txindex = if let Some(indexing) = indexing_config {
Arc::new(txindex::TxIndex::with_indexing(
Arc::clone(&db),
indexing.enable_address_index,
indexing.enable_value_index,
)?)
} else {
Arc::new(txindex::TxIndex::new(Arc::clone(&db))?)
};
let pruning_manager = pruning_config.map(|config| {
#[cfg(feature = "utxo-commitments")]
{
let needs_commitments = matches!(config.mode, crate::config::PruningMode::Aggressive { keep_commitments: true, .. })
|| matches!(config.mode, crate::config::PruningMode::Custom { keep_commitments: true, .. });
if needs_commitments {
let commitment_store = match commitment_store::CommitmentStore::new(Arc::clone(&db)) {
Ok(store) => Arc::new(store),
Err(e) => {
warn!("Failed to create commitment store: {}. Pruning will continue without commitments.", e);
return Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)));
}
};
Arc::new(pruning::PruningManager::with_utxo_commitments(
config,
Arc::clone(&blockstore),
commitment_store,
Arc::clone(&utxostore),
))
} else {
Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)))
}
}
#[cfg(not(feature = "utxo-commitments"))]
{
Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)))
}
});
Ok(Self {
db,
blockstore,
utxostore,
chainstate,
txindex,
pruning_manager,
})
}
}
#[cfg(feature = "compression")]
pub fn with_backend_pruning_indexing_and_compression<P: AsRef<Path>>(
data_dir: P,
backend: DatabaseBackend,
pruning_config: Option<PruningConfig>,
indexing_config: Option<crate::config::IndexingConfig>,
compression_config: Option<crate::config::CompressionConfig>,
storage_config: Option<&crate::config::StorageConfig>,
) -> Result<Self> {
let db = Arc::from(create_database(data_dir, backend, storage_config)?);
#[cfg(feature = "compression")]
let blockstore = {
let (
block_compression_enabled,
block_compression_level,
witness_compression_enabled,
witness_compression_level,
) = if let Some(compression) = &compression_config {
(
compression.block_compression_enabled,
compression.block_compression_level,
compression.witness_compression_enabled,
compression.witness_compression_level,
)
} else {
(false, 3, false, 2) };
Arc::new(blockstore::BlockStore::new_with_compression(
Arc::clone(&db),
block_compression_enabled,
block_compression_level,
witness_compression_enabled,
witness_compression_level,
)?)
};
#[cfg(not(feature = "compression"))]
let blockstore = Arc::new(blockstore::BlockStore::new(Arc::clone(&db))?);
let utxostore = Arc::new(utxostore::UtxoStore::new(Arc::clone(&db))?);
let chainstate = chainstate::ChainState::new(Arc::clone(&db))?;
let txindex = if let Some(indexing) = indexing_config {
Arc::new(txindex::TxIndex::with_indexing(
Arc::clone(&db),
indexing.enable_address_index,
indexing.enable_value_index,
)?)
} else {
Arc::new(txindex::TxIndex::new(Arc::clone(&db))?)
};
let pruning_manager = pruning_config.map(|config| {
#[cfg(feature = "utxo-commitments")]
{
let needs_commitments = matches!(config.mode, crate::config::PruningMode::Aggressive { keep_commitments: true, .. })
|| matches!(config.mode, crate::config::PruningMode::Custom { keep_commitments: true, .. });
if needs_commitments {
let commitment_store = match commitment_store::CommitmentStore::new(Arc::clone(&db)) {
Ok(store) => Arc::new(store),
Err(e) => {
warn!("Failed to create commitment store: {}. Pruning will continue without commitments.", e);
return Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)));
}
};
Arc::new(pruning::PruningManager::with_utxo_commitments(
config,
Arc::clone(&blockstore),
commitment_store,
Arc::clone(&utxostore),
))
} else {
Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)))
}
}
#[cfg(not(feature = "utxo-commitments"))]
{
Arc::new(pruning::PruningManager::new(config, Arc::clone(&blockstore)))
}
});
Ok(Self {
db,
blockstore,
utxostore,
chainstate,
txindex,
pruning_manager,
})
}
pub fn blocks(&self) -> Arc<blockstore::BlockStore> {
Arc::clone(&self.blockstore)
}
pub fn utxos(&self) -> &utxostore::UtxoStore {
&self.utxostore
}
pub fn utxos_arc(&self) -> Arc<utxostore::UtxoStore> {
Arc::clone(&self.utxostore)
}
pub fn chain(&self) -> &chainstate::ChainState {
&self.chainstate
}
pub fn load_assumeutxo_snapshot(
&self,
utxo_set: &blvm_protocol::UtxoSet,
metadata: &crate::storage::assumeutxo::SnapshotMetadata,
tip_header: Option<&blvm_protocol::BlockHeader>,
) -> Result<()> {
self.utxostore.store_utxo_set(utxo_set)?;
let header = match tip_header {
Some(h) => h.clone(),
None => self
.blockstore
.get_header(&metadata.block_hash)?
.ok_or_else(|| {
anyhow::anyhow!(
"AssumeUTXO requires block header. Block {} not in store. Run IBD first or include header in snapshot.",
hex::encode(metadata.block_hash)
)
})?,
};
let chain_info = chainstate::ChainInfo {
tip_hash: metadata.block_hash,
tip_header: header,
height: metadata.block_height,
total_work: 0, chain_params: chainstate::ChainParams::default(),
};
self.chainstate.store_chain_info(&chain_info)?;
Ok(())
}
pub fn recover_chain_tip_from_blockstore(&self) -> Result<()> {
use crate::storage::chainstate::{ChainInfo, ChainParams};
if self.chain().load_chain_info()?.is_some() {
return Ok(());
}
let Some(max_h) = self.blockstore.highest_stored_height()? else {
return Ok(());
};
let Some(tip_hash) = self.blockstore.get_hash_by_height(max_h)? else {
return Ok(());
};
let Some(block) = self.blockstore.get_block(&tip_hash)? else {
return Ok(());
};
let genesis_hash = self.blockstore.get_hash_by_height(0)?.unwrap_or_default();
let mut params = ChainParams::default();
params.genesis_hash = genesis_hash;
let info = ChainInfo {
tip_hash,
tip_header: block.header.clone(),
height: max_h,
total_work: 0,
chain_params: params,
};
self.chainstate.store_chain_info(&info)?;
info!(
"Recovered chain_info from block index (tip_height={}, tip_hash prefix {:02x}{:02x}{:02x}{:02x})",
max_h, tip_hash[0], tip_hash[1], tip_hash[2], tip_hash[3]
);
Ok(())
}
pub fn transactions(&self) -> Arc<txindex::TxIndex> {
Arc::clone(&self.txindex)
}
pub fn open_tree(&self, name: &str) -> Result<Arc<dyn database::Tree>> {
Ok(Arc::from(self.db.open_tree(name)?))
}
pub fn flush(&self) -> Result<()> {
self.db.flush()
}
#[inline]
pub fn ibd_memory_pressure_tick(&self, level_u8: u8) {
self.db.ibd_memory_pressure_tick(level_u8);
}
pub fn disk_size(&self) -> Result<u64> {
let mut size = 0u64;
if let Ok(count) = self.blockstore.block_count() {
const MAX_BLOCKS: u64 = 10_000_000; let safe_count = count.min(MAX_BLOCKS as usize) as u64;
const BYTES_PER_BLOCK: u64 = 1_024_000; size = size.saturating_add(safe_count.saturating_mul(BYTES_PER_BLOCK));
}
if let Ok(count) = self.utxostore.utxo_count() {
const MAX_UTXOS: u64 = 1_000_000_000; let safe_count = count.min(MAX_UTXOS as usize) as u64;
const BYTES_PER_UTXO: u64 = 100; size = size.saturating_add(safe_count.saturating_mul(BYTES_PER_UTXO));
}
if let Ok(count) = self.txindex.transaction_count() {
const MAX_TXS: u64 = 1_000_000_000; let safe_count = count.min(MAX_TXS as usize) as u64;
const BYTES_PER_TX: u64 = 500; size = size.saturating_add(safe_count.saturating_mul(BYTES_PER_TX));
}
const MAX_DISK_SIZE: u64 = 10_000_000_000_000; Ok(size.min(MAX_DISK_SIZE))
}
pub fn check_storage_bounds(&self) -> Result<bool> {
const MAX_BLOCKS: usize = 10_000_000; const MAX_UTXOS: usize = 1_000_000_000; const MAX_TXS: usize = 1_000_000_000;
let block_count = self.blockstore.block_count().unwrap_or(0);
let utxo_count = self.utxostore.utxo_count().unwrap_or(0);
let tx_count = self.txindex.transaction_count().unwrap_or(0);
let blocks_ok = block_count < (MAX_BLOCKS * 8 / 10);
let utxos_ok = utxo_count < (MAX_UTXOS * 8 / 10);
let txs_ok = tx_count < (MAX_TXS * 8 / 10);
if !blocks_ok {
warn!(
"Storage bounds: block count ({}) approaching limit ({})",
block_count, MAX_BLOCKS
);
}
if !utxos_ok {
warn!(
"Storage bounds: UTXO count ({}) approaching limit ({})",
utxo_count, MAX_UTXOS
);
}
if !txs_ok {
warn!(
"Storage bounds: transaction count ({}) approaching limit ({})",
tx_count, MAX_TXS
);
}
Ok(blocks_ok && utxos_ok && txs_ok)
}
pub fn transaction_count(&self) -> Result<usize> {
self.txindex.transaction_count()
}
pub fn index_block(
&self,
block: &blvm_protocol::Block,
block_hash: &blvm_protocol::Hash,
block_height: u64,
) -> Result<()> {
self.txindex.index_block(block, block_hash, block_height)
}
pub fn pruning(&self) -> Option<Arc<pruning::PruningManager>> {
self.pruning_manager.as_ref().map(Arc::clone)
}
pub fn is_pruning_enabled(&self) -> bool {
self.pruning_manager
.as_ref()
.map(|pm| pm.is_enabled())
.unwrap_or(false)
}
}