use std::{
collections::HashMap,
fs,
ops::Deref,
path::{Path, PathBuf},
sync::Arc,
};
use jmt::{
JellyfishMerkleTree,
KeyHash,
mock::MockTreeStore,
storage::{TreeReader, TreeUpdateBatch, TreeWriter},
};
use tari_common::configuration::Network;
use tari_common_types::{
chain_metadata::ChainMetadata,
epoch::VnEpoch,
tari_address::TariAddress,
types::{BadBlock, CompressedCommitment, CompressedPublicKey, CompressedSignature, FixedHash, HashOutput},
};
use tari_node_components::blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader};
use tari_sidechain::ShardGroup;
use tari_storage::lmdb_store::LMDBConfig;
use tari_test_utils::paths::create_temporary_data_path;
use tari_transaction_components::{
consensus::consensus_constants::ConsensusConstantsBuilder,
crypto_factories::CryptoFactories,
key_manager::{KeyManager, TariKeyId},
tari_proof_of_work::{Difficulty, PowAlgorithm},
transaction_components::{RangeProofType, TransactionInput, TransactionKernel, TransactionOutput, WalletOutput},
};
use tari_utilities::ByteArray;
use super::{create_block, create_consensus_constants, mine_to_difficulty};
use crate::{
blocks::{BlockAccumulatedData, BlockHeaderAccumulatedDataBuilder},
chain_storage::{
AccumulatedDataRebuildStatus,
BlockAddResult,
BlockchainBackend,
BlockchainCheckRequest,
BlockchainCheckStatus,
BlockchainDatabase,
BlockchainDatabaseConfig,
ChainStorageError,
DbBasicStats,
DbKey,
DbTotalSizeStats,
DbTransaction,
DbValue,
HorizonData,
HorizonSyncOutputCheckpoint,
InputMinedInfo,
LMDBDatabase,
MinedInfo,
MmrTree,
OutputMinedInfo,
OwnedLmdbTreeReader,
PayrefRebuildStatus,
Reorg,
SmtHasher,
TemplateRegistrationEntry,
ValidatorNodeRegistrationInfo,
Validators,
create_lmdb_database,
},
consensus::{BaseNodeConsensusManager, chain_strength_comparer::ChainStrengthComparerBuilder},
proof_of_work::AchievedTargetDifficulty,
test_helpers::{BlockSpec, block_spec::BlockSpecs, create_consensus_rules, default_coinbase_entities},
validation::{
DifficultyCalculator,
block_body::{BlockBodyFullValidator, BlockBodyInternalConsistencyValidator},
mocks::MockValidator,
},
};
pub fn create_new_blockchain() -> BlockchainDatabase<TempDatabase> {
create_new_blockchain_with_network(Network::LocalNet)
}
pub fn create_new_blockchain_with_network(network: Network) -> BlockchainDatabase<TempDatabase> {
let consensus_constants = ConsensusConstantsBuilder::new(network).build();
let consensus_manager = BaseNodeConsensusManager::builder(network)
.add_consensus_constants(consensus_constants)
.on_ties(ChainStrengthComparerBuilder::new().by_height().build())
.build()
.unwrap();
create_custom_blockchain(consensus_manager)
}
pub fn create_custom_blockchain(rules: BaseNodeConsensusManager) -> BlockchainDatabase<TempDatabase> {
let validators = Validators::new(
MockValidator::new(true),
MockValidator::new(true),
MockValidator::new(true),
);
create_store_with_consensus_and_validators(rules, validators)
}
pub fn create_store_with_consensus_and_validators(
rules: BaseNodeConsensusManager,
validators: Validators<TempDatabase>,
) -> BlockchainDatabase<TempDatabase> {
create_store_with_consensus_and_validators_and_config(rules, validators, BlockchainDatabaseConfig::default())
}
pub fn create_store_with_consensus_and_validators_and_config(
rules: BaseNodeConsensusManager,
validators: Validators<TempDatabase>,
config: BlockchainDatabaseConfig,
) -> BlockchainDatabase<TempDatabase> {
let backend = create_test_db();
BlockchainDatabase::start_new(
backend,
rules.clone(),
validators,
config,
DifficultyCalculator::new(rules, Default::default()),
)
.unwrap()
}
pub fn create_new_blockchain_with_lmdb_config(lmdb_config: LMDBConfig) -> BlockchainDatabase<TempDatabase> {
let rules = create_consensus_rules();
let temp_path = tari_test_utils::paths::create_temporary_data_path();
let backend = TempDatabase::from_path_with_lmdb_config(&temp_path, lmdb_config);
let validators = Validators::new(
MockValidator::new(true),
MockValidator::new(true),
MockValidator::new(true),
);
let config = BlockchainDatabaseConfig::default();
BlockchainDatabase::start_new(
backend,
rules.clone(),
validators,
config,
DifficultyCalculator::new(rules, Default::default()),
)
.unwrap()
}
pub fn create_store_with_consensus(rules: BaseNodeConsensusManager) -> BlockchainDatabase<TempDatabase> {
let factories = CryptoFactories::default();
let validators = Validators::new(
BlockBodyFullValidator::new(rules.clone(), true),
MockValidator::new(true),
BlockBodyInternalConsistencyValidator::new(rules.clone(), false, factories),
);
create_store_with_consensus_and_validators(rules, validators)
}
pub fn create_test_blockchain_db() -> BlockchainDatabase<TempDatabase> {
let rules = create_consensus_rules();
create_store_with_consensus(rules)
}
pub fn create_test_db() -> TempDatabase {
TempDatabase::new()
}
pub fn open_blockchain_db_from_path<P: AsRef<Path>>(path: P) -> BlockchainDatabase<TempDatabase> {
let rules = create_consensus_rules();
let backend = TempDatabase::from_path(path);
let validators = Validators::new(
MockValidator::new(true),
MockValidator::new(true),
MockValidator::new(true),
);
let config = BlockchainDatabaseConfig {
cleanup_orphans_at_startup: false,
clear_bad_blocks_at_startup: false,
..Default::default()
};
BlockchainDatabase::start_new(
backend,
rules.clone(),
validators,
config,
DifficultyCalculator::new(rules, Default::default()),
)
.unwrap()
}
pub struct TempDatabase {
path: PathBuf,
db: Option<LMDBDatabase>,
delete_on_drop: bool,
}
impl TempDatabase {
pub fn new() -> Self {
let temp_path = create_temporary_data_path();
let rules = create_consensus_rules();
Self {
db: Some(create_lmdb_database(&temp_path, LMDBConfig::default(), rules).unwrap()),
path: temp_path,
delete_on_drop: true,
}
}
pub fn from_path<P: AsRef<Path>>(temp_path: P) -> Self {
let rules = create_consensus_rules();
Self {
db: Some(create_lmdb_database(&temp_path, LMDBConfig::default(), rules).unwrap()),
path: temp_path.as_ref().to_path_buf(),
delete_on_drop: true,
}
}
pub fn from_path_with_lmdb_config<P: AsRef<Path>>(temp_path: P, lmdb_config: LMDBConfig) -> Self {
let rules = create_consensus_rules();
Self {
db: Some(create_lmdb_database(&temp_path, lmdb_config, rules).unwrap()),
path: temp_path.as_ref().to_path_buf(),
delete_on_drop: true,
}
}
pub fn disable_delete_on_drop(&mut self) -> &mut Self {
self.delete_on_drop = false;
self
}
pub fn db(&self) -> &LMDBDatabase {
self.db.as_ref().unwrap()
}
pub fn path(&self) -> &Path {
&self.path
}
}
impl Default for TempDatabase {
fn default() -> Self {
Self::new()
}
}
impl Deref for TempDatabase {
type Target = LMDBDatabase;
fn deref(&self) -> &Self::Target {
self.db.as_ref().unwrap()
}
}
impl Drop for TempDatabase {
fn drop(&mut self) {
self.db = None;
if self.delete_on_drop && Path::new(&self.path).exists() {
fs::remove_dir_all(&self.path).expect("Could not delete temporary file");
}
}
}
impl BlockchainBackend for TempDatabase {
fn write(&mut self, tx: DbTransaction) -> Result<(), ChainStorageError> {
self.db.as_mut().unwrap().write(tx)
}
fn fetch_all_orphans(&self) -> Result<Vec<ChainHeader>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_all_orphans()
}
fn fetch(&self, key: &DbKey) -> Result<Option<DbValue>, ChainStorageError> {
self.db.as_ref().unwrap().fetch(key)
}
fn contains(&self, key: &DbKey) -> Result<bool, ChainStorageError> {
self.db.as_ref().unwrap().contains(key)
}
fn fetch_chain_header_by_height(&self, height: u64) -> Result<ChainHeader, ChainStorageError> {
self.db.as_ref().unwrap().fetch_chain_header_by_height(height)
}
fn fetch_header_accumulated_data(
&self,
hash: &HashOutput,
) -> Result<Option<BlockHeaderAccumulatedData>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_header_accumulated_data(hash)
}
fn fetch_chain_header_in_all_chains(&self, hash: &HashOutput) -> Result<ChainHeader, ChainStorageError> {
self.db.as_ref().unwrap().fetch_chain_header_in_all_chains(hash)
}
fn fetch_header_containing_kernel_mmr(&self, mmr_position: u64) -> Result<ChainHeader, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.fetch_header_containing_kernel_mmr(mmr_position)
}
fn is_empty(&self) -> Result<bool, ChainStorageError> {
self.db.as_ref().unwrap().is_empty()
}
fn fetch_block_accumulated_data(
&self,
header_hash: &HashOutput,
) -> Result<Option<BlockAccumulatedData>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_block_accumulated_data(header_hash)
}
fn fetch_block_accumulated_data_by_height(
&self,
height: u64,
) -> Result<Option<BlockAccumulatedData>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_block_accumulated_data_by_height(height)
}
fn fetch_kernels_in_block(&self, header_hash: &HashOutput) -> Result<Vec<TransactionKernel>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_kernels_in_block(header_hash)
}
fn fetch_bad_blocks(&self) -> Result<Vec<BadBlock>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_bad_blocks()
}
fn clear_all_bad_blocks(&mut self) -> Result<(), ChainStorageError> {
self.db.as_mut().unwrap().clear_all_bad_blocks()
}
fn fetch_kernel_by_excess_sig(
&self,
excess_sig: &CompressedSignature,
) -> Result<Option<(TransactionKernel, HashOutput)>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_kernel_by_excess_sig(excess_sig)
}
fn fetch_outputs_in_block_with_spend_state(
&self,
header_hash: &HashOutput,
spend_status_at_header: Option<&HashOutput>,
) -> Result<Vec<(TransactionOutput, bool)>, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.fetch_outputs_in_block_with_spend_state(header_hash, spend_status_at_header)
}
fn fetch_output(&self, output_hash: &HashOutput) -> Result<Option<OutputMinedInfo>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_output(output_hash)
}
fn fetch_input(&self, output_hash: &HashOutput) -> Result<Option<InputMinedInfo>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_input(output_hash)
}
fn fetch_unspent_output_hash_by_commitment(
&self,
commitment: &CompressedCommitment,
) -> Result<Option<HashOutput>, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.fetch_unspent_output_hash_by_commitment(commitment)
}
fn fetch_mined_info_by_payref(&self, payref: &FixedHash) -> Result<MinedInfo, ChainStorageError> {
self.db.as_ref().unwrap().fetch_mined_info_by_payref(payref)
}
fn fetch_mined_info_by_output_hash(&self, output_hash: &HashOutput) -> Result<MinedInfo, ChainStorageError> {
self.db.as_ref().unwrap().fetch_mined_info_by_output_hash(output_hash)
}
fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result<Vec<TransactionOutput>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_outputs_in_block(header_hash)
}
fn fetch_inputs_in_block(&self, header_hash: &HashOutput) -> Result<Vec<TransactionInput>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_inputs_in_block(header_hash)
}
fn fetch_mmr_size(&self, tree: MmrTree) -> Result<u64, ChainStorageError> {
self.db.as_ref().unwrap().fetch_mmr_size(tree)
}
fn orphan_count(&self) -> Result<usize, ChainStorageError> {
self.db.as_ref().unwrap().orphan_count()
}
fn fetch_last_header(&self) -> Result<BlockHeader, ChainStorageError> {
self.db.as_ref().unwrap().fetch_last_header()
}
fn clear_all_pending_headers(&self) -> Result<usize, ChainStorageError> {
self.db.as_ref().unwrap().clear_all_pending_headers()
}
fn fetch_last_chain_header(&self) -> Result<ChainHeader, ChainStorageError> {
self.db.as_ref().unwrap().fetch_last_chain_header()
}
fn fetch_tip_header(&self) -> Result<ChainHeader, ChainStorageError> {
self.db.as_ref().unwrap().fetch_tip_header()
}
fn fetch_chain_metadata(&self) -> Result<ChainMetadata, ChainStorageError> {
self.db.as_ref().unwrap().fetch_chain_metadata()
}
fn fetch_payref_rebuild_status(&self) -> Result<PayrefRebuildStatus, ChainStorageError> {
self.db.as_ref().unwrap().fetch_payref_rebuild_status()
}
fn fetch_accumulated_data_rebuild_status(&self) -> Result<AccumulatedDataRebuildStatus, ChainStorageError> {
self.db.as_ref().unwrap().fetch_accumulated_data_rebuild_status()
}
fn update_accumulated_data_check_status(
&self,
request: BlockchainCheckRequest,
) -> Result<BlockchainCheckStatus, ChainStorageError> {
self.db.as_ref().unwrap().update_accumulated_data_check_status(request)
}
fn update_blockchain_consistency_check_status(
&self,
request: BlockchainCheckRequest,
) -> Result<BlockchainCheckStatus, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.update_blockchain_consistency_check_status(request)
}
fn fetch_accumulated_data_check_status(&self) -> Result<Option<BlockchainCheckStatus>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_accumulated_data_check_status()
}
fn fetch_blockchain_consistency_check_status(&self) -> Result<Option<BlockchainCheckStatus>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_blockchain_consistency_check_status()
}
fn build_payref_indexes_for_height(
&self,
height: u64,
metadata_at_start: ChainMetadata,
initialize_stats: Option<u64>,
finalize: bool,
) -> Result<PayrefRebuildStatus, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.build_payref_indexes_for_height(height, metadata_at_start, initialize_stats, finalize)
}
fn update_accumulated_difficulty(
&self,
height: u64,
header_accum_data: BlockHeaderAccumulatedData,
last_chain_header: ChainHeader,
update_meta_data_db: bool,
) -> Result<AccumulatedDataRebuildStatus, ChainStorageError> {
self.db.as_ref().unwrap().update_accumulated_difficulty(
height,
header_accum_data,
last_chain_header,
update_meta_data_db,
)
}
fn utxo_count(&self) -> Result<usize, ChainStorageError> {
self.db.as_ref().unwrap().utxo_count()
}
fn kernel_count(&self) -> Result<usize, ChainStorageError> {
self.db.as_ref().unwrap().kernel_count()
}
fn fetch_orphan_chain_tip_by_hash(&self, hash: &HashOutput) -> Result<Option<ChainHeader>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_orphan_chain_tip_by_hash(hash)
}
fn fetch_strongest_orphan_chain_tips(&self) -> Result<Vec<ChainHeader>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_strongest_orphan_chain_tips()
}
fn fetch_orphan_children_of(&self, hash: HashOutput) -> Result<Vec<Block>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_orphan_children_of(hash)
}
fn fetch_orphan_chain_block(&self, hash: HashOutput) -> Result<Option<ChainBlock>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_orphan_chain_block(hash)
}
fn delete_oldest_orphans(
&mut self,
horizon_height: u64,
orphan_storage_capacity: usize,
) -> Result<(), ChainStorageError> {
self.db
.as_mut()
.unwrap()
.delete_oldest_orphans(horizon_height, orphan_storage_capacity)
}
fn fetch_monero_seed_first_seen_height(&self, seed: &[u8]) -> Result<u64, ChainStorageError> {
self.db.as_ref().unwrap().fetch_monero_seed_first_seen_height(seed)
}
fn fetch_horizon_data(&self) -> Result<Option<HorizonData>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_horizon_data()
}
fn fetch_horizon_sync_output_checkpoint(&self) -> Result<Option<HorizonSyncOutputCheckpoint>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_horizon_sync_output_checkpoint()
}
fn verify_horizon_sync_output_root(
&self,
version: u64,
expected_root: HashOutput,
) -> Result<(), ChainStorageError> {
self.db
.as_ref()
.unwrap()
.verify_horizon_sync_output_root(version, expected_root)
}
fn get_stats(&self) -> Result<DbBasicStats, ChainStorageError> {
self.db.as_ref().unwrap().get_stats()
}
fn fetch_total_size_stats(&self) -> Result<DbTotalSizeStats, ChainStorageError> {
self.db.as_ref().unwrap().fetch_total_size_stats()
}
fn bad_block_exists(&self, block_hash: HashOutput) -> Result<(bool, String), ChainStorageError> {
self.db.as_ref().unwrap().bad_block_exists(block_hash)
}
fn fetch_all_reorgs(&self) -> Result<Vec<Reorg>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_all_reorgs()
}
fn fetch_all_active_validator_nodes(
&self,
height: u64,
) -> Result<Vec<ValidatorNodeRegistrationInfo>, ChainStorageError> {
self.db.as_ref().unwrap().fetch_all_active_validator_nodes(height)
}
fn fetch_active_validator_nodes(
&self,
sidechain_pk: Option<&CompressedPublicKey>,
height: u64,
) -> Result<Vec<ValidatorNodeRegistrationInfo>, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.fetch_active_validator_nodes(sidechain_pk, height)
}
fn fetch_validators_activating_in_epoch(
&self,
sidechain_pk: Option<&CompressedPublicKey>,
epoch: VnEpoch,
) -> Result<Vec<ValidatorNodeRegistrationInfo>, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.fetch_validators_activating_in_epoch(sidechain_pk, epoch)
}
fn fetch_validators_exiting_in_epoch(
&self,
sidechain_pk: Option<&CompressedPublicKey>,
epoch: VnEpoch,
) -> Result<Vec<ValidatorNodeRegistrationInfo>, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.fetch_validators_exiting_in_epoch(sidechain_pk, epoch)
}
fn validator_node_exists(
&self,
sidechain_pk: Option<&CompressedPublicKey>,
end_epoch: VnEpoch,
validator_node_pk: &CompressedPublicKey,
) -> Result<bool, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.validator_node_exists(sidechain_pk, end_epoch, validator_node_pk)
}
fn validator_node_is_active(
&self,
sidechain_pk: Option<&CompressedPublicKey>,
end_epoch: VnEpoch,
validator_node_pk: &CompressedPublicKey,
) -> Result<bool, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.validator_node_is_active(sidechain_pk, end_epoch, validator_node_pk)
}
fn validator_node_is_active_for_shard_group(
&self,
sidechain_pk: Option<&CompressedPublicKey>,
end_epoch: VnEpoch,
validator_node_pk: &CompressedPublicKey,
shard_group: ShardGroup,
) -> Result<bool, ChainStorageError> {
self.db.as_ref().unwrap().validator_node_is_active_for_shard_group(
sidechain_pk,
end_epoch,
validator_node_pk,
shard_group,
)
}
fn validator_nodes_count_for_shard_group(
&self,
sidechain_pk: Option<&CompressedPublicKey>,
end_epoch: VnEpoch,
shard_group: ShardGroup,
) -> Result<usize, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.validator_nodes_count_for_shard_group(sidechain_pk, end_epoch, shard_group)
}
fn get_validator_node(
&self,
sidechain_pk: Option<&CompressedPublicKey>,
public_key: CompressedPublicKey,
) -> Result<Option<ValidatorNodeRegistrationInfo>, ChainStorageError> {
self.db.as_ref().unwrap().get_validator_node(sidechain_pk, public_key)
}
fn fetch_template_registrations(
&self,
start_height: u64,
end_height: u64,
) -> Result<Vec<TemplateRegistrationEntry>, ChainStorageError> {
self.db
.as_ref()
.unwrap()
.fetch_template_registrations(start_height, end_height)
}
fn create_smt_reader(&self) -> Result<OwnedLmdbTreeReader<'_>, ChainStorageError> {
self.db.as_ref().unwrap().create_smt_reader()
}
fn set_stats_total_height(&self, _total: u64) {}
fn update_stats_progress(&self, _current: u64) {}
}
pub fn create_chained_blocks<T: Into<BlockSpecs>, TDB: BlockchainBackend>(
db: &BlockchainDatabase<TDB>,
blocks: T,
genesis_block: Arc<ChainBlock>,
) -> (Vec<String>, HashMap<String, Arc<ChainBlock>>) {
let mut block_hashes = HashMap::new();
let gb_height = genesis_block.header().height;
block_hashes.insert("GB".to_string(), genesis_block);
let rules = BaseNodeConsensusManager::builder(Network::LocalNet).build().unwrap();
let km = KeyManager::new_random().unwrap();
let blocks: BlockSpecs = blocks.into();
let mut block_names = Vec::with_capacity(blocks.len());
let (script_key_id, wallet_payment_address) = default_coinbase_entities(&km);
let mock_store = MockTreeStore::new(true);
let jmt = JellyfishMerkleTree::<_, SmtHasher>::new(&mock_store);
for h in 0..=gb_height {
let mut batch = vec![];
let h_block = db.fetch_block(h, false).unwrap();
for output in h_block.block().body.outputs() {
if !output.is_burned() {
let smt_key = KeyHash(output.commitment.as_bytes().try_into().expect("commitment is 32 bytes"));
let smt_value = output.smt_hash(h_block.block().header.height);
batch.push((smt_key, Some(smt_value.to_vec())));
}
}
for input in h_block.block().body.inputs() {
let smt_key = KeyHash(
input
.commitment()
.unwrap()
.as_bytes()
.try_into()
.expect("Commitment is 32 bytes"),
);
batch.push((smt_key, None));
}
let (root, updates) = jmt.put_value_set(batch, h).unwrap();
mock_store.write_node_batch(&updates.node_batch).unwrap();
assert_eq!(root.0.as_slice(), h_block.block().header.output_mr.as_slice());
}
for block_spec in blocks {
let prev_block = block_hashes
.get(block_spec.parent)
.unwrap_or_else(|| panic!("Could not find block {}", block_spec.parent));
let name = block_spec.name;
let difficulty = block_spec.difficulty;
let (mut block, _) = create_block(
db,
&rules,
prev_block.block(),
block_spec,
&km,
&script_key_id,
&wallet_payment_address,
None,
);
let updates = update_block_and_smt(&mut block, &jmt);
mock_store.write_node_batch(&updates.node_batch).unwrap();
let block = mine_block(block, prev_block.accumulated_data(), difficulty);
block_names.push(name.to_string());
block_hashes.insert(name.to_string(), block);
}
(block_names, block_hashes)
}
fn mine_block(block: Block, prev_block_accum: &BlockHeaderAccumulatedData, difficulty: Difficulty) -> Arc<ChainBlock> {
let block = mine_to_difficulty(block, difficulty).unwrap();
let accum = BlockHeaderAccumulatedDataBuilder::from_previous(prev_block_accum)
.with_hash(block.hash())
.with_achieved_target_difficulty(
AchievedTargetDifficulty::try_construct(PowAlgorithm::Sha3x, difficulty, difficulty).unwrap(),
)
.with_total_kernel_offset(block.header.total_kernel_offset.clone())
.build(&create_consensus_constants(block.header.height))
.unwrap();
Arc::new(ChainBlock::try_construct(Arc::new(block), accum).unwrap())
}
pub fn create_main_chain<T: Into<BlockSpecs>>(
db: &BlockchainDatabase<TempDatabase>,
blocks: T,
) -> (Vec<String>, HashMap<String, Arc<ChainBlock>>) {
let genesis_block = db
.fetch_block(0, true)
.unwrap()
.try_into_chain_block()
.map(Arc::new)
.unwrap();
let (names, chain) = { create_chained_blocks(db, blocks, genesis_block) };
names.iter().for_each(|name| {
let block = chain.get(name).unwrap();
db.add_block(block.to_arc_block()).unwrap();
});
(names, chain)
}
pub fn create_orphan_chain<T: Into<BlockSpecs>>(
db: &BlockchainDatabase<TempDatabase>,
blocks: T,
root_block: Arc<ChainBlock>,
) -> (Vec<String>, HashMap<String, Arc<ChainBlock>>) {
let (names, chain) = create_chained_blocks(db, blocks, root_block);
let mut txn = DbTransaction::new();
for name in &names {
let block = chain.get(name).unwrap().clone();
txn.insert_chained_orphan(block);
}
db.write(txn).unwrap();
(names, chain)
}
pub fn update_block_and_smt<T: TreeReader>(
block: &mut Block,
jmt: &JellyfishMerkleTree<T, SmtHasher>,
) -> TreeUpdateBatch {
let mut batch = vec![];
for output in block.body.outputs() {
if !output.is_burned() {
let smt_key = KeyHash(output.commitment.as_bytes().try_into().expect("commitment is 32 bytes"));
let smt_value = output.smt_hash(block.header.height);
batch.push((smt_key, Some(smt_value.to_vec())));
}
}
for input in block.body.inputs() {
let smt_key = KeyHash(
input
.commitment()
.unwrap()
.as_bytes()
.try_into()
.expect("Commitment is 32 bytes"),
);
batch.push((smt_key, None));
}
let (root, updates) = jmt.put_value_set(batch, block.header.height).unwrap();
block.header.output_mr = FixedHash::try_from(root.0.as_slice()).unwrap();
updates
}
pub struct TestBlockchain {
db: BlockchainDatabase<TempDatabase>,
chain: Vec<(&'static str, Arc<ChainBlock>)>,
rules: BaseNodeConsensusManager,
pub km: KeyManager,
script_key_id: TariKeyId,
wallet_payment_address: TariAddress,
range_proof_type: RangeProofType,
}
impl TestBlockchain {
pub fn new(db: BlockchainDatabase<TempDatabase>, rules: BaseNodeConsensusManager) -> Self {
let genesis = db
.fetch_block(0, true)
.unwrap()
.try_into_chain_block()
.map(Arc::new)
.unwrap();
let km = KeyManager::new_random().unwrap();
let (script_key_id, wallet_payment_address) = default_coinbase_entities(&km);
let mut blockchain = Self {
db,
chain: Default::default(),
rules,
km,
script_key_id,
wallet_payment_address,
range_proof_type: RangeProofType::BulletProofPlus,
};
blockchain.chain.push(("GB", genesis));
blockchain
}
pub fn create(rules: BaseNodeConsensusManager) -> Self {
Self::new(create_custom_blockchain(rules.clone()), rules)
}
pub fn append_chain(
&mut self,
block_specs: BlockSpecs,
) -> Result<Vec<(Arc<ChainBlock>, WalletOutput)>, ChainStorageError> {
let mut blocks = Vec::with_capacity(block_specs.len());
for spec in block_specs {
blocks.push(self.append(spec)?);
}
Ok(blocks)
}
pub fn create_chain(&mut self, block_specs: BlockSpecs) -> Vec<(Arc<ChainBlock>, WalletOutput)> {
let mut result = Vec::new();
for spec in block_specs {
result.push(self.create_chained_block(spec));
}
result
}
pub fn add_blocks(&self, blocks: Vec<Arc<ChainBlock>>) -> Result<(), ChainStorageError> {
for block in blocks {
let result = self.db.add_block(block.to_arc_block())?;
assert!(result.is_added());
}
Ok(())
}
pub fn with_validators(validators: Validators<TempDatabase>) -> Self {
let rules = BaseNodeConsensusManager::builder(Network::LocalNet).build().unwrap();
let db = create_store_with_consensus_and_validators(rules.clone(), validators);
Self::new(db, rules)
}
pub fn rules(&self) -> &BaseNodeConsensusManager {
&self.rules
}
pub fn db(&self) -> &BlockchainDatabase<TempDatabase> {
&self.db
}
pub fn add_block(&mut self, block_spec: BlockSpec) -> Result<(Arc<ChainBlock>, WalletOutput), ChainStorageError> {
let name = block_spec.name;
let (block, coinbase) = self.create_chained_block(block_spec);
let result = self.append_block(name, block.clone())?;
assert!(result.is_added());
Ok((block, coinbase))
}
pub fn add_next_tip(&mut self, spec: BlockSpec) -> Result<(Arc<ChainBlock>, WalletOutput), ChainStorageError> {
let name = spec.name;
let (block, coinbase) = self.create_next_tip(spec);
let result = self.append_block(name, block.clone())?;
assert!(result.is_added());
Ok((block, coinbase))
}
pub fn append_block(
&mut self,
name: &'static str,
block: Arc<ChainBlock>,
) -> Result<BlockAddResult, ChainStorageError> {
let result = self.db.add_block(block.to_arc_block())?;
self.chain.push((name, block));
Ok(result)
}
pub fn get_block_and_smt_by_name(&self, name: &'static str) -> Option<Arc<ChainBlock>> {
self.chain.iter().find(|(n, _)| *n == name).map(|(_, ch)| ch.clone())
}
pub fn get_tip_block(&self) -> (&'static str, Arc<ChainBlock>) {
self.chain.last().cloned().unwrap()
}
pub fn create_chained_block(&mut self, block_spec: BlockSpec) -> (Arc<ChainBlock>, WalletOutput) {
let parent = self.get_block_and_smt_by_name(block_spec.parent).unwrap();
let difficulty = block_spec.difficulty;
let Self {
db,
rules,
km,
script_key_id,
wallet_payment_address,
range_proof_type,
..
} = self;
let (block, coinbase) = create_block(
db,
rules,
parent.block(),
block_spec,
km,
script_key_id,
wallet_payment_address,
Some(*range_proof_type),
);
let block = mine_block(block, parent.accumulated_data(), difficulty);
(block, coinbase)
}
pub fn create_unmined_block(&mut self, block_spec: BlockSpec) -> (Block, WalletOutput) {
let parent = self.get_block_and_smt_by_name(block_spec.parent).unwrap();
let Self {
db,
rules,
km,
script_key_id,
wallet_payment_address,
range_proof_type,
..
} = self;
let (mut block, outputs) = create_block(
db,
rules,
parent.block(),
block_spec,
km,
script_key_id,
wallet_payment_address,
Some(*range_proof_type),
);
block.body.sort();
(block, outputs)
}
pub fn mine_block(&self, parent_name: &'static str, block: Block, difficulty: Difficulty) -> Arc<ChainBlock> {
let parent = self.get_block_and_smt_by_name(parent_name).unwrap();
mine_block(block, parent.accumulated_data(), difficulty)
}
pub fn create_next_tip(&mut self, spec: BlockSpec) -> (Arc<ChainBlock>, WalletOutput) {
let (name, _) = self.get_tip_block();
self.create_chained_block(spec.with_parent_block(name))
}
pub fn append_to_tip(&mut self, spec: BlockSpec) -> Result<(Arc<ChainBlock>, WalletOutput), ChainStorageError> {
let (tip, _) = self.get_tip_block();
self.append(spec.with_parent_block(tip))
}
pub fn append(&mut self, spec: BlockSpec) -> Result<(Arc<ChainBlock>, WalletOutput), ChainStorageError> {
let name = spec.name;
let (block, outputs) = self.create_chained_block(spec);
self.append_block(name, block.clone())?;
Ok((block, outputs))
}
pub fn get_genesis_block(&self) -> Arc<ChainBlock> {
self.chain.first().map(|(_, block)| block).unwrap().clone()
}
}