use std::{
collections::{BTreeMap, HashMap},
sync::Arc,
};
use zebra_chain::{
amount::NonNegative, block::Height, block_info::BlockInfo, history_tree::HistoryTree,
serialization::ZcashSerialize as _, transparent, value_balance::ValueBalance,
};
use crate::{
request::FinalizedBlock,
service::finalized_state::{
disk_db::DiskWriteBatch,
disk_format::{chain::HistoryTreeParts, RawBytes},
zebra_db::{metrics::value_pool_metrics, ZebraDb},
TypedColumnFamily,
},
HashOrHeight, ValidateContextError,
};
pub const HISTORY_TREE: &str = "history_tree";
pub type HistoryTreePartsCf<'cf> = TypedColumnFamily<'cf, (), HistoryTreeParts>;
pub type LegacyHistoryTreePartsCf<'cf> = TypedColumnFamily<'cf, Height, HistoryTreeParts>;
pub type RawHistoryTreePartsCf<'cf> = TypedColumnFamily<'cf, RawBytes, HistoryTreeParts>;
pub const CHAIN_VALUE_POOLS: &str = "tip_chain_value_pool";
pub type ChainValuePoolsCf<'cf> = TypedColumnFamily<'cf, (), ValueBalance<NonNegative>>;
pub const BLOCK_INFO: &str = "block_info";
pub type BlockInfoCf<'cf> = TypedColumnFamily<'cf, Height, BlockInfo>;
impl ZebraDb {
pub(crate) fn history_tree_cf(&self) -> HistoryTreePartsCf<'_> {
HistoryTreePartsCf::new(&self.db, HISTORY_TREE)
.expect("column family was created when database was created")
}
pub(crate) fn legacy_history_tree_cf(&self) -> LegacyHistoryTreePartsCf<'_> {
LegacyHistoryTreePartsCf::new(&self.db, HISTORY_TREE)
.expect("column family was created when database was created")
}
pub(crate) fn raw_history_tree_cf(&self) -> RawHistoryTreePartsCf<'_> {
RawHistoryTreePartsCf::new(&self.db, HISTORY_TREE)
.expect("column family was created when database was created")
}
pub(crate) fn chain_value_pools_cf(&self) -> ChainValuePoolsCf<'_> {
ChainValuePoolsCf::new(&self.db, CHAIN_VALUE_POOLS)
.expect("column family was created when database was created")
}
pub(crate) fn block_info_cf(&self) -> BlockInfoCf<'_> {
BlockInfoCf::new(&self.db, BLOCK_INFO)
.expect("column family was created when database was created")
}
pub fn history_tree(&self) -> Arc<HistoryTree> {
let history_tree_cf = self.history_tree_cf();
let mut history_tree_parts = history_tree_cf.zs_get(&());
if history_tree_parts.is_none() {
let legacy_history_tree_cf = self.legacy_history_tree_cf();
history_tree_parts = legacy_history_tree_cf
.zs_last_key_value()
.map(|(_height_key, tree_value)| tree_value);
}
let history_tree = history_tree_parts.map(|parts| {
parts.with_network(&self.db.network()).expect(
"deserialization format should match the serialization format used by IntoDisk",
)
});
Arc::new(HistoryTree::from(history_tree))
}
pub(crate) fn history_trees_full_tip(&self) -> BTreeMap<RawBytes, Arc<HistoryTree>> {
let raw_history_tree_cf = self.raw_history_tree_cf();
raw_history_tree_cf
.zs_forward_range_iter(..)
.map(|(raw_key, history_tree_parts)| {
let history_tree = history_tree_parts.with_network(&self.db.network()).expect(
"deserialization format should match the serialization format used by IntoDisk",
);
(raw_key, Arc::new(HistoryTree::from(history_tree)))
})
.collect()
}
pub fn finalized_value_pool(&self) -> ValueBalance<NonNegative> {
let chain_value_pools_cf = self.chain_value_pools_cf();
chain_value_pools_cf
.zs_get(&())
.unwrap_or_else(ValueBalance::zero)
}
pub fn block_info(&self, hash_or_height: HashOrHeight) -> Option<BlockInfo> {
let height = hash_or_height.height_or_else(|hash| self.height(hash))?;
let block_info_cf = self.block_info_cf();
block_info_cf.zs_get(&height)
}
}
impl DiskWriteBatch {
pub fn update_history_tree(&mut self, db: &ZebraDb, tree: &HistoryTree) {
let history_tree_cf = db.history_tree_cf().with_batch_for_writing(self);
if let Some(tree) = tree.as_ref() {
let _ = history_tree_cf.zs_insert(&(), &HistoryTreeParts::from(tree));
}
}
pub fn delete_range_history_tree(
&mut self,
db: &ZebraDb,
from: &Height,
until_strictly_before: &Height,
) {
let history_tree_cf = db.legacy_history_tree_cf().with_batch_for_writing(self);
let _ = history_tree_cf.zs_delete_range(from, until_strictly_before);
}
pub fn prepare_chain_value_pools_batch(
&mut self,
db: &ZebraDb,
finalized: &FinalizedBlock,
utxos_spent_by_block: HashMap<transparent::OutPoint, transparent::Utxo>,
value_pool: ValueBalance<NonNegative>,
) -> Result<(), ValidateContextError> {
let block_value_pool_change = finalized
.block
.chain_value_pool_change(
&utxos_spent_by_block,
finalized.deferred_pool_balance_change,
)
.map_err(|value_balance_error| {
ValidateContextError::CalculateBlockChainValueChange {
value_balance_error,
height: finalized.height,
block_hash: finalized.hash,
transaction_count: finalized.transaction_hashes.len(),
spent_utxo_count: utxos_spent_by_block.len(),
}
})?;
let new_value_pool = value_pool
.add_chain_value_pool_change(block_value_pool_change)
.map_err(|value_balance_error| ValidateContextError::AddValuePool {
value_balance_error,
chain_value_pools: Box::new(value_pool),
block_value_pool_change: Box::new(block_value_pool_change),
height: Some(finalized.height),
})?;
value_pool_metrics(&new_value_pool);
let _ = db
.chain_value_pools_cf()
.with_batch_for_writing(self)
.zs_insert(&(), &new_value_pool);
let block_size = finalized.block.zcash_serialized_size();
let _ = db.block_info_cf().with_batch_for_writing(self).zs_insert(
&finalized.height,
&BlockInfo::new(new_value_pool, block_size as u32),
);
Ok(())
}
}