use crate::block::connect_block;
use crate::error::Result;
use crate::segwit::Witness;
use crate::types::*;
use blvm_spec_lock::spec_locked;
use std::collections::HashMap;
#[spec_locked("11.3")]
pub fn reorganize_chain(
new_chain: &[Block],
current_chain: &[Block],
current_utxo_set: UtxoSet,
current_height: Natural,
network: crate::types::Network,
) -> Result<ReorganizationResult> {
assert!(
current_height <= i64::MAX as u64,
"Current height {current_height} must fit in i64"
);
assert!(
current_utxo_set.len() <= u32::MAX as usize,
"Current UTXO set size {} exceeds maximum",
current_utxo_set.len()
);
assert!(
new_chain.len() <= 10_000,
"New chain length {} must be reasonable",
new_chain.len()
);
assert!(
current_chain.len() <= 10_000,
"Current chain length {} must be reasonable",
current_chain.len()
);
let empty_witnesses: Vec<Vec<Vec<Witness>>> = new_chain
.iter()
.map(|block| {
block
.transactions
.iter()
.map(|tx| tx.inputs.iter().map(|_| Vec::new()).collect())
.collect()
})
.collect();
assert!(
empty_witnesses.len() == new_chain.len(),
"Witness count {} must match new chain block count {}",
empty_witnesses.len(),
new_chain.len()
);
reorganize_chain_with_witnesses(
new_chain,
&empty_witnesses,
None, current_chain,
current_utxo_set,
current_height,
None::<fn(&Block) -> Option<Vec<Witness>>>, None::<fn(Natural) -> Option<Vec<BlockHeader>>>, None::<fn(&Hash) -> Option<BlockUndoLog>>, None::<fn(&Hash, &BlockUndoLog) -> Result<()>>, network,
)
}
#[allow(clippy::too_many_arguments)]
#[spec_locked("11.3")]
pub fn reorganize_chain_with_witnesses(
new_chain: &[Block],
new_chain_witnesses: &[Vec<Vec<Witness>>], new_chain_headers: Option<&[BlockHeader]>,
current_chain: &[Block],
current_utxo_set: UtxoSet,
current_height: Natural,
_get_witnesses_for_block: Option<impl Fn(&Block) -> Option<Vec<Witness>>>,
_get_headers_for_height: Option<impl Fn(Natural) -> Option<Vec<BlockHeader>>>,
get_undo_log_for_block: Option<impl Fn(&Hash) -> Option<BlockUndoLog>>,
store_undo_log_for_block: Option<impl Fn(&Hash, &BlockUndoLog) -> Result<()>>,
network: crate::types::Network,
) -> Result<ReorganizationResult> {
assert!(
current_height <= i64::MAX as u64,
"Current height {current_height} must fit in i64"
);
assert!(
current_utxo_set.len() <= u32::MAX as usize,
"Current UTXO set size {} exceeds maximum",
current_utxo_set.len()
);
assert!(
new_chain.len() <= 10_000,
"New chain length {} must be reasonable",
new_chain.len()
);
assert!(
current_chain.len() <= 10_000,
"Current chain length {} must be reasonable",
current_chain.len()
);
assert!(
new_chain_witnesses.len() == new_chain.len(),
"New chain witness count {} must match block count {}",
new_chain_witnesses.len(),
new_chain.len()
);
let common_ancestor = find_common_ancestor(new_chain, current_chain)?;
let common_ancestor_header = common_ancestor.header;
let common_ancestor_index = common_ancestor.new_chain_index;
let current_ancestor_index = common_ancestor.current_chain_index;
assert!(
common_ancestor_index < new_chain.len(),
"Common ancestor index {} must be < new chain length {}",
common_ancestor_index,
new_chain.len()
);
assert!(
current_ancestor_index < current_chain.len(),
"Common ancestor index {} must be < current chain length {}",
current_ancestor_index,
current_chain.len()
);
let mut utxo_set = current_utxo_set;
assert!(
utxo_set.len() <= u32::MAX as usize,
"UTXO set size {} must not exceed maximum",
utxo_set.len()
);
let disconnect_start = current_ancestor_index + 1;
assert!(
disconnect_start <= current_chain.len(),
"Disconnect start {} must be <= current chain length {}",
disconnect_start,
current_chain.len()
);
let mut disconnected_undo_logs: HashMap<Hash, BlockUndoLog> = HashMap::new();
assert!(
disconnected_undo_logs.is_empty(),
"Disconnected undo logs must start empty"
);
for i in (disconnect_start..current_chain.len()).rev() {
assert!(i < current_chain.len(), "Block index {i} out of bounds");
if let Some(block) = current_chain.get(i) {
assert!(
!block.transactions.is_empty(),
"Block at index {i} must have at least one transaction"
);
let block_hash = calculate_block_hash(&block.header);
assert!(block_hash != [0u8; 32], "Block hash must be non-zero");
let undo_log = if let Some(ref get_undo_log) = get_undo_log_for_block {
get_undo_log(&block_hash).unwrap_or_else(|| {
BlockUndoLog::new()
})
} else {
BlockUndoLog::new()
};
utxo_set = disconnect_block(block, &undo_log, utxo_set, (i as Natural) + 1)?;
disconnected_undo_logs.insert(block_hash, undo_log);
}
}
let blocks_after_ancestor = (current_chain.len() - 1 - current_ancestor_index) as Natural;
let common_ancestor_height = current_height.saturating_sub(blocks_after_ancestor);
let mut new_height = common_ancestor_height;
let mut connected_blocks = Vec::new();
let mut connected_undo_logs: HashMap<Hash, BlockUndoLog> = HashMap::new();
if new_chain_witnesses.len() != new_chain.len() {
return Err(crate::error::ConsensusError::ConsensusRuleViolation(
format!(
"Witness count {} does not match block count {}",
new_chain_witnesses.len(),
new_chain.len()
)
.into(),
));
}
for (i, block) in new_chain.iter().enumerate().skip(common_ancestor_index + 1) {
new_height += 1;
let witnesses = new_chain_witnesses.get(i).cloned().unwrap_or_else(|| {
block
.transactions
.iter()
.map(|tx| tx.inputs.iter().map(|_| Vec::new()).collect())
.collect()
});
let recent_headers = new_chain_headers;
let network_time = block.header.timestamp;
let context = crate::block::block_validation_context_for_connect_ibd(
recent_headers,
network_time,
network,
);
let (validation_result, new_utxo_set, undo_log) =
connect_block(block, &witnesses, utxo_set, new_height, &context)?;
if !matches!(validation_result, ValidationResult::Valid) {
return Err(crate::error::ConsensusError::ConsensusRuleViolation(
format!("Invalid block at height {new_height} during reorganization").into(),
));
}
let block_hash = calculate_block_hash(&block.header);
if let Some(ref store_undo_log) = store_undo_log_for_block {
if let Err(e) = store_undo_log(&block_hash, &undo_log) {
#[cfg(any(debug_assertions, feature = "profile"))]
eprintln!("Warning: Failed to store undo log for block {block_hash:?}: {e}");
#[cfg(not(any(debug_assertions, feature = "profile")))]
let _ = e;
}
}
connected_undo_logs.insert(block_hash, undo_log);
utxo_set = new_utxo_set;
connected_blocks.push(block.clone());
}
Ok(ReorganizationResult {
new_utxo_set: utxo_set,
new_height,
common_ancestor: common_ancestor_header,
disconnected_blocks: current_chain[disconnect_start..].to_vec(),
connected_blocks,
reorganization_depth: current_chain.len() - disconnect_start,
connected_block_undo_logs: connected_undo_logs,
})
}
#[spec_locked("11.3")]
pub fn update_mempool_after_reorg<F>(
mempool: &mut crate::mempool::Mempool,
reorg_result: &ReorganizationResult,
utxo_set: &UtxoSet,
get_tx_by_id: Option<F>,
) -> Result<Vec<Hash>>
where
F: Fn(&Hash) -> Option<Transaction>,
{
use crate::mempool::update_mempool_after_block;
let mut all_removed = Vec::new();
for block in &reorg_result.connected_blocks {
let removed = update_mempool_after_block(mempool, block, utxo_set)?;
all_removed.extend(removed);
}
let mut spent_outpoints = std::collections::HashSet::new();
for block in &reorg_result.connected_blocks {
for tx in &block.transactions {
if !crate::transaction::is_coinbase(tx) {
for input in &tx.inputs {
spent_outpoints.insert(input.prevout);
}
}
}
}
if let Some(lookup) = get_tx_by_id {
let mut invalid_tx_ids = Vec::new();
for &tx_id in mempool.iter() {
if let Some(tx) = lookup(&tx_id) {
for input in &tx.inputs {
if spent_outpoints.contains(&input.prevout) {
invalid_tx_ids.push(tx_id);
break;
}
}
}
}
for tx_id in invalid_tx_ids {
if mempool.remove(&tx_id) {
all_removed.push(tx_id);
}
}
}
Ok(all_removed)
}
#[spec_locked("11.3")]
pub fn update_mempool_after_reorg_simple(
mempool: &mut crate::mempool::Mempool,
reorg_result: &ReorganizationResult,
utxo_set: &UtxoSet,
) -> Result<Vec<Hash>> {
update_mempool_after_reorg(
mempool,
reorg_result,
utxo_set,
None::<fn(&Hash) -> Option<Transaction>>,
)
}
struct CommonAncestorResult {
header: BlockHeader,
new_chain_index: usize,
current_chain_index: usize,
}
#[spec_locked("11.3")]
fn find_common_ancestor(
new_chain: &[Block],
current_chain: &[Block],
) -> Result<CommonAncestorResult> {
if new_chain.is_empty() || current_chain.is_empty() {
return Err(crate::error::ConsensusError::ConsensusRuleViolation(
"Cannot find common ancestor: empty chain".into(),
));
}
let min_len = new_chain.len().min(current_chain.len());
for distance_from_tip in 0..min_len {
let new_idx = new_chain.len() - 1 - distance_from_tip;
let current_idx = current_chain.len() - 1 - distance_from_tip;
let new_hash = calculate_block_hash(&new_chain[new_idx].header);
let current_hash = calculate_block_hash(¤t_chain[current_idx].header);
if new_hash == current_hash {
return Ok(CommonAncestorResult {
header: new_chain[new_idx].header.clone(),
new_chain_index: new_idx,
current_chain_index: current_idx,
});
}
}
if !new_chain.is_empty() && !current_chain.is_empty() {
let new_genesis_hash = calculate_block_hash(&new_chain[0].header);
let current_genesis_hash = calculate_block_hash(¤t_chain[0].header);
if new_genesis_hash == current_genesis_hash {
return Ok(CommonAncestorResult {
header: new_chain[0].header.clone(),
new_chain_index: 0,
current_chain_index: 0,
});
}
}
Err(crate::error::ConsensusError::ConsensusRuleViolation(
"Chains do not share a common ancestor".into(),
))
}
#[spec_locked("11.3.1")]
fn disconnect_block(
_block: &Block,
undo_log: &BlockUndoLog,
mut utxo_set: UtxoSet,
_height: Natural,
) -> Result<UtxoSet> {
assert!(
!_block.transactions.is_empty(),
"Block must have at least one transaction"
);
assert!(
_height <= i64::MAX as u64,
"Block height {_height} must fit in i64"
);
assert!(
utxo_set.len() <= u32::MAX as usize,
"UTXO set size {} must not exceed maximum",
utxo_set.len()
);
assert!(
undo_log.entries.len() <= 10_000,
"Undo log entry count {} must be reasonable",
undo_log.entries.len()
);
for (i, entry) in undo_log.entries.iter().enumerate() {
assert!(i < undo_log.entries.len(), "Entry index {i} out of bounds");
if entry.new_utxo.is_some() {
utxo_set.remove(&entry.outpoint);
}
if let Some(previous_utxo) = &entry.previous_utxo {
utxo_set.insert(entry.outpoint, std::sync::Arc::clone(previous_utxo));
}
}
Ok(utxo_set)
}
#[track_caller] #[allow(clippy::redundant_comparisons)] #[spec_locked("11.3")]
pub fn should_reorganize(new_chain: &[Block], current_chain: &[Block]) -> Result<bool> {
assert!(
new_chain.len() <= 10_000,
"New chain length {} must be reasonable",
new_chain.len()
);
assert!(
current_chain.len() <= 10_000,
"Current chain length {} must be reasonable",
current_chain.len()
);
if new_chain.len() > current_chain.len() {
#[allow(clippy::eq_op)]
{
assert!(true == true || false == false, "Result must be boolean");
}
return Ok(true);
}
if new_chain.len() == current_chain.len() {
let new_work = calculate_chain_work(new_chain)?;
let current_work = calculate_chain_work(current_chain)?;
let result = new_work > current_work;
return Ok(result);
}
let result = false;
Ok(result)
}
#[spec_locked("11.3")]
fn calculate_chain_work(chain: &[Block]) -> Result<u128> {
let mut total_work = 0u128;
for block in chain {
let target = expand_target(block.header.bits)?;
if target > 0 {
let work_contribution = if target == u128::MAX {
0 } else {
u128::MAX.checked_div(target + 1).unwrap_or(0)
};
let old_total = total_work;
total_work = total_work.saturating_add(work_contribution);
debug_assert!(
total_work >= old_total,
"Total work ({total_work}) must be >= previous total ({old_total})"
);
}
}
Ok(total_work)
}
fn expand_target(bits: Natural) -> Result<u128> {
let exponent = (bits >> 24) as u8;
let mantissa = bits & 0x00ffffff;
if exponent <= 3 {
let shift = 8 * (3 - exponent);
Ok((mantissa as u128) >> shift)
} else {
if exponent > 19 {
return Err(crate::error::ConsensusError::InvalidProofOfWork(
"Target too large".into(),
));
}
let shift = 8 * (exponent - 3);
let mantissa_u128 = mantissa as u128;
let expanded = mantissa_u128.checked_shl(shift as u32).ok_or_else(|| {
crate::error::ConsensusError::InvalidProofOfWork("Target expansion overflow".into())
})?;
Ok(expanded)
}
}
#[allow(dead_code)] fn calculate_tx_id(tx: &Transaction) -> Hash {
let mut hash = [0u8; 32];
hash[0] = (tx.version & 0xff) as u8;
hash[1] = (tx.inputs.len() & 0xff) as u8;
hash[2] = (tx.outputs.len() & 0xff) as u8;
hash[3] = (tx.lock_time & 0xff) as u8;
hash
}
fn calculate_block_hash(header: &BlockHeader) -> Hash {
use sha2::{Digest, Sha256};
let mut bytes = Vec::with_capacity(80);
bytes.extend_from_slice(&header.version.to_le_bytes());
bytes.extend_from_slice(&header.prev_block_hash);
bytes.extend_from_slice(&header.merkle_root);
bytes.extend_from_slice(&header.timestamp.to_le_bytes());
bytes.extend_from_slice(&header.bits.to_le_bytes());
bytes.extend_from_slice(&header.nonce.to_le_bytes());
let first_hash = Sha256::digest(&bytes);
let second_hash = Sha256::digest(first_hash);
let mut hash = [0u8; 32];
hash.copy_from_slice(&second_hash);
hash
}
mod undo_entry_serde {
use crate::types::UTXO;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::sync::Arc;
pub fn serialize<S>(opt: &Option<Arc<UTXO>>, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
opt.as_ref().map(|a| a.as_ref()).serialize(s)
}
pub fn deserialize<'de, D>(d: D) -> Result<Option<Arc<UTXO>>, D::Error>
where
D: Deserializer<'de>,
{
Option::<UTXO>::deserialize(d).map(|opt| opt.map(Arc::new))
}
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct UndoEntry {
pub outpoint: OutPoint,
#[serde(with = "undo_entry_serde")]
pub previous_utxo: Option<std::sync::Arc<UTXO>>,
#[serde(with = "undo_entry_serde")]
pub new_utxo: Option<std::sync::Arc<UTXO>>,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct BlockUndoLog {
pub entries: Vec<UndoEntry>,
}
impl BlockUndoLog {
pub fn new() -> Self {
Self {
entries: Vec::new(),
}
}
pub fn push(&mut self, entry: UndoEntry) {
self.entries.push(entry);
}
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
}
impl Default for BlockUndoLog {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone)]
pub struct ReorganizationResult {
pub new_utxo_set: UtxoSet,
pub new_height: Natural,
pub common_ancestor: BlockHeader,
pub disconnected_blocks: Vec<Block>,
pub connected_blocks: Vec<Block>,
pub reorganization_depth: usize,
pub connected_block_undo_logs: HashMap<Hash, BlockUndoLog>,
}
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
fn chain_len_range() -> std::ops::Range<usize> {
if std::env::var("CARGO_TARPAULIN").is_ok() || std::env::var("TARPAULIN").is_ok() {
1..3 } else {
1..5
}
}
fn chain_len_range_det() -> std::ops::Range<usize> {
if std::env::var("CARGO_TARPAULIN").is_ok() || std::env::var("TARPAULIN").is_ok() {
0..3 } else {
0..10
}
}
fn arb_block() -> impl Strategy<Value = Block> {
(0x00000000u64..0x1d00ffffu64).prop_map(|bits| Block {
header: BlockHeader {
version: 1,
prev_block_hash: [0u8; 32],
merkle_root: [0u8; 32],
timestamp: 0,
bits,
nonce: 0,
},
transactions: Box::new([]),
})
}
proptest! {
#[test]
fn prop_should_reorganize_max_work(
new_chain in proptest::collection::vec(arb_block(), chain_len_range()),
current_chain in proptest::collection::vec(arb_block(), chain_len_range())
) {
let new_work = calculate_chain_work(&new_chain);
let current_work = calculate_chain_work(¤t_chain);
if let (Ok(new_w), Ok(current_w)) = (new_work, current_work) {
let should_reorg = should_reorganize(&new_chain, ¤t_chain).unwrap_or(false);
if new_chain.len() > current_chain.len() {
prop_assert!(should_reorg, "Must reorganize when new chain is longer");
} else if new_chain.len() == current_chain.len() {
if new_w > current_w {
prop_assert!(should_reorg, "Must reorganize when new chain has more work (equal length)");
} else {
prop_assert!(!should_reorg, "Must not reorganize when new chain has less or equal work (equal length)");
}
} else {
prop_assert!(!should_reorg, "Must not reorganize when new chain is shorter");
}
}
}
}
proptest! {
#[test]
fn prop_calculate_chain_work_deterministic(
chain in proptest::collection::vec(arb_block(), chain_len_range_det())
) {
let work1 = calculate_chain_work(&chain);
let work2 = calculate_chain_work(&chain);
match (work1, work2) {
(Ok(w1), Ok(w2)) => {
prop_assert_eq!(w1, w2, "Chain work calculation must be deterministic");
},
(Err(_), Err(_)) => {
},
_ => {
prop_assert!(false, "Chain work calculation must be deterministic (both succeed or both fail)");
}
}
}
}
proptest! {
#[test]
fn prop_expand_target_valid_range(
bits in 0x00000000u64..0x1d00ffffu64
) {
let result = expand_target(bits);
match result {
Ok(target) => {
let _ = target;
},
Err(_) => {
}
}
}
}
proptest! {
#[test]
fn prop_should_reorganize_equal_length(
chain1 in proptest::collection::vec(arb_block(), 1..3),
chain2 in proptest::collection::vec(arb_block(), 1..3)
) {
let len = chain1.len().min(chain2.len());
let chain1 = &chain1[..len];
let chain2 = &chain2[..len];
let work1 = calculate_chain_work(chain1);
let work2 = calculate_chain_work(chain2);
if let (Ok(w1), Ok(w2)) = (work1, work2) {
let should_reorg = should_reorganize(chain1, chain2).unwrap_or(false);
if w1 > w2 {
prop_assert!(should_reorg, "Must reorganize when first chain has more work");
} else {
prop_assert!(!should_reorg, "Must not reorganize when first chain has less or equal work");
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_should_reorganize_longer_chain() {
let new_chain = vec![create_test_block(), create_test_block()];
let current_chain = vec![create_test_block()];
assert!(should_reorganize(&new_chain, ¤t_chain).unwrap());
}
#[test]
fn test_should_reorganize_same_length_more_work() {
let mut new_chain = vec![create_test_block()];
let mut current_chain = vec![create_test_block()];
new_chain[0].header.bits = 0x0200ffff; current_chain[0].header.bits = 0x0300ffff;
assert!(should_reorganize(&new_chain, ¤t_chain).unwrap());
}
#[test]
fn test_should_not_reorganize_shorter_chain() {
let new_chain = vec![create_test_block()];
let current_chain = vec![create_test_block(), create_test_block()];
assert!(!should_reorganize(&new_chain, ¤t_chain).unwrap());
}
#[test]
fn test_find_common_ancestor() {
let new_chain = vec![create_test_block()];
let current_chain = vec![create_test_block()];
let ancestor = find_common_ancestor(&new_chain, ¤t_chain).unwrap();
assert_eq!(ancestor.header.version, 4);
assert_eq!(ancestor.new_chain_index, 0);
assert_eq!(ancestor.current_chain_index, 0);
}
#[test]
fn test_find_common_ancestor_empty_chain() {
let new_chain = vec![];
let current_chain = vec![create_test_block()];
let result = find_common_ancestor(&new_chain, ¤t_chain);
assert!(result.is_err());
}
#[test]
fn test_calculate_chain_work() {
let mut block = create_test_block();
block.header.bits = 0x0300ffff;
let chain = vec![block];
let work = calculate_chain_work(&chain).unwrap();
assert!(work > 0);
}
#[test]
fn test_reorganize_chain() {
let ancestor = create_test_block_at_height(0);
let mut new_block = create_test_block_at_height(1);
new_block.header.nonce = 42;
let new_chain = vec![ancestor.clone(), new_block];
let current_chain = vec![ancestor];
let utxo_set = UtxoSet::default();
let result = reorganize_chain(
&new_chain,
¤t_chain,
utxo_set,
1,
crate::types::Network::Regtest,
);
match result {
Ok(reorg_result) => {
assert_eq!(reorg_result.new_height, 2);
assert_eq!(reorg_result.connected_blocks.len(), 1);
assert_eq!(reorg_result.connected_block_undo_logs.len(), 1);
}
Err(_) => {
}
}
}
#[test]
fn test_reorganize_chain_deep_reorg() {
let mut block1 = create_test_block();
block1.header.nonce = 1;
let mut block2 = create_test_block();
block2.header.nonce = 2;
let mut block3 = create_test_block();
block3.header.nonce = 3;
let new_chain = vec![block1, block2, block3];
let mut current_block1 = create_test_block();
current_block1.header.nonce = 10;
let mut current_block2 = create_test_block();
current_block2.header.nonce = 11;
let current_chain = vec![current_block1, current_block2];
let utxo_set = UtxoSet::default();
let result = reorganize_chain(
&new_chain,
¤t_chain,
utxo_set,
2,
crate::types::Network::Regtest,
);
match result {
Ok(reorg_result) => {
assert_eq!(reorg_result.connected_blocks.len(), 3);
assert_eq!(reorg_result.reorganization_depth, 2);
assert_eq!(reorg_result.connected_block_undo_logs.len(), 3);
}
Err(_) => {
}
}
}
#[test]
fn test_undo_log_storage_and_retrieval() {
use crate::block::connect_block;
use crate::segwit::Witness;
let block = create_test_block_at_height(1);
let mut utxo_set = UtxoSet::default();
let tx_id = calculate_tx_id(&block.transactions[0]);
let outpoint = OutPoint {
hash: tx_id,
index: 0,
};
let utxo = UTXO {
value: 5_000_000_000,
script_pubkey: vec![0x51].into(),
height: 1,
is_coinbase: false,
};
utxo_set.insert(outpoint.clone(), std::sync::Arc::new(utxo.clone()));
let witnesses: Vec<Vec<Witness>> = block
.transactions
.iter()
.map(|tx| tx.inputs.iter().map(|_| Vec::new()).collect())
.collect();
let ctx = crate::block::BlockValidationContext::for_network(crate::types::Network::Regtest);
let (result, new_utxo_set, undo_log) =
connect_block(&block, &witnesses, utxo_set.clone(), 1, &ctx).unwrap();
assert!(matches!(result, crate::types::ValidationResult::Valid));
assert!(
!undo_log.entries.is_empty(),
"Undo log should contain entries"
);
let block_hash = calculate_block_hash(&block.header);
let mut undo_log_storage: HashMap<Hash, BlockUndoLog> = HashMap::new();
undo_log_storage.insert(block_hash, undo_log.clone());
let retrieved_undo_log = undo_log_storage.get(&block_hash);
assert!(
retrieved_undo_log.is_some(),
"Should be able to retrieve undo log"
);
assert_eq!(
retrieved_undo_log.unwrap().entries.len(),
undo_log.entries.len()
);
let disconnected_utxo_set = disconnect_block(&block, &undo_log, new_utxo_set, 1).unwrap();
assert!(
disconnected_utxo_set.contains_key(&outpoint),
"Disconnected UTXO set should contain restored UTXO"
);
}
#[test]
fn test_reorganize_with_undo_log_callback() {
use crate::block::connect_block;
use crate::segwit::Witness;
let block = create_test_block_at_height(1);
let utxo_set = UtxoSet::default();
let witnesses: Vec<Vec<Witness>> = block
.transactions
.iter()
.map(|tx| tx.inputs.iter().map(|_| Vec::new()).collect())
.collect();
let ctx = crate::block::BlockValidationContext::for_network(crate::types::Network::Regtest);
let (result, connected_utxo_set, undo_log) =
connect_block(&block, &witnesses, utxo_set.clone(), 1, &ctx).unwrap();
if !matches!(result, crate::types::ValidationResult::Valid) {
eprintln!("Block validation failed: {:?}", result);
}
assert!(matches!(result, crate::types::ValidationResult::Valid));
let block_hash = calculate_block_hash(&block.header);
let mut undo_log_storage: HashMap<Hash, BlockUndoLog> = HashMap::new();
undo_log_storage.insert(block_hash, undo_log);
let get_undo_log =
|hash: &Hash| -> Option<BlockUndoLog> { undo_log_storage.get(hash).cloned() };
let mut new_block = create_test_block_at_height(2);
new_block.header.nonce = 42; let new_chain = vec![block.clone(), new_block];
let current_chain = vec![block];
let empty_witnesses: Vec<Vec<Vec<Witness>>> = new_chain
.iter()
.map(|b| {
b.transactions
.iter()
.map(|tx| tx.inputs.iter().map(|_| Vec::new()).collect())
.collect()
})
.collect();
let reorg_result = reorganize_chain_with_witnesses(
&new_chain,
&empty_witnesses,
None,
¤t_chain,
connected_utxo_set,
1,
None::<fn(&Block) -> Option<Vec<Witness>>>,
None::<fn(Natural) -> Option<Vec<BlockHeader>>>,
Some(get_undo_log),
None::<fn(&Hash, &BlockUndoLog) -> Result<()>>, crate::types::Network::Regtest,
);
match reorg_result {
Ok(result) => {
assert!(!result.connected_block_undo_logs.is_empty());
}
Err(_) => {
}
}
}
#[test]
fn test_reorganize_chain_empty_new_chain() {
let new_chain = vec![];
let current_chain = vec![create_test_block()];
let utxo_set = UtxoSet::default();
let result = reorganize_chain(
&new_chain,
¤t_chain,
utxo_set,
1,
crate::types::Network::Regtest,
);
assert!(result.is_err());
}
#[test]
fn test_reorganize_chain_empty_current_chain() {
let new_chain = vec![create_test_block()];
let current_chain = vec![];
let utxo_set = UtxoSet::default();
let result = reorganize_chain(
&new_chain,
¤t_chain,
utxo_set,
0,
crate::types::Network::Regtest,
);
assert!(result.is_err());
}
#[test]
fn test_disconnect_block() {
let block = create_test_block();
let mut utxo_set = UtxoSet::default();
let tx_id = calculate_tx_id(&block.transactions[0]);
let outpoint = OutPoint {
hash: tx_id,
index: 0,
};
let utxo = UTXO {
value: 50_000_000_000,
script_pubkey: vec![0x51].into(),
height: 1,
is_coinbase: false,
};
utxo_set.insert(outpoint, std::sync::Arc::new(utxo));
let empty_undo_log = BlockUndoLog::new();
let result = disconnect_block(&block, &empty_undo_log, utxo_set, 1);
assert!(result.is_ok());
}
#[test]
fn test_calculate_chain_work_empty_chain() {
let chain = vec![];
let work = calculate_chain_work(&chain).unwrap();
assert_eq!(work, 0);
}
#[test]
fn test_calculate_chain_work_multiple_blocks() {
let mut chain = vec![create_test_block(), create_test_block()];
chain[0].header.bits = 0x0300ffff;
chain[1].header.bits = 0x0200ffff;
let work = calculate_chain_work(&chain).unwrap();
assert!(work > 0);
}
#[test]
fn test_expand_target_edge_cases() {
let result = expand_target(0x00000000);
assert!(result.is_ok());
let result = expand_target(0x03ffffff);
assert!(result.is_ok());
let result = expand_target(0x14000000); assert!(result.is_err());
}
#[test]
fn test_calculate_tx_id_different_transactions() {
let tx1 = Transaction {
version: 1,
inputs: vec![].into(),
outputs: vec![].into(),
lock_time: 0,
};
let tx2 = Transaction {
version: 2,
inputs: vec![].into(),
outputs: vec![].into(),
lock_time: 0,
};
let id1 = calculate_tx_id(&tx1);
let id2 = calculate_tx_id(&tx2);
assert_ne!(id1, id2);
}
fn encode_bip34_height(height: u64) -> Vec<u8> {
if height == 0 {
return vec![0x00, 0xff]; }
let mut height_bytes = Vec::new();
let mut n = height;
while n > 0 {
height_bytes.push((n & 0xff) as u8);
n >>= 8;
}
if height_bytes.last().map_or(false, |&b| b & 0x80 != 0) {
height_bytes.push(0x00);
}
let mut script_sig = Vec::with_capacity(1 + height_bytes.len() + 1);
script_sig.push(height_bytes.len() as u8); script_sig.extend_from_slice(&height_bytes);
if script_sig.len() < 2 {
script_sig.push(0xff);
}
script_sig
}
fn create_test_block_at_height(height: u64) -> Block {
use crate::mining::calculate_merkle_root;
let script_sig = encode_bip34_height(height);
let coinbase_tx = Transaction {
version: 1,
inputs: vec![TransactionInput {
prevout: OutPoint {
hash: [0; 32].into(),
index: 0xffffffff,
},
script_sig,
sequence: 0xffffffff,
}]
.into(),
outputs: vec![TransactionOutput {
value: 5_000_000_000,
script_pubkey: vec![0x51].into(),
}]
.into(),
lock_time: 0,
};
let merkle_root =
calculate_merkle_root(&[coinbase_tx.clone()]).expect("Failed to calculate merkle root");
Block {
header: BlockHeader {
version: 4,
prev_block_hash: [0; 32],
merkle_root,
timestamp: 1231006505,
bits: 0x207fffff, nonce: 0,
},
transactions: vec![coinbase_tx].into_boxed_slice(),
}
}
fn create_test_block() -> Block {
create_test_block_at_height(0)
}
}