use crate::constants::*;
use crate::error::{ConsensusError, Result};
use crate::types::*;
use blvm_spec_lock::spec_locked;
use sha2::{Digest, Sha256};
#[spec_locked("7.1")]
pub fn get_next_work_required(
_current_header: &BlockHeader,
prev_headers: &[BlockHeader],
) -> Result<Natural> {
get_next_work_required_internal(_current_header, prev_headers, false)
}
#[spec_locked("7.1")]
pub fn get_next_work_required_corrected(
_current_header: &BlockHeader,
prev_headers: &[BlockHeader],
) -> Result<Natural> {
get_next_work_required_internal(_current_header, prev_headers, true)
}
fn get_next_work_required_internal(
_current_header: &BlockHeader,
prev_headers: &[BlockHeader],
use_corrected: bool,
) -> Result<Natural> {
if prev_headers.len() < 2 {
return Err(ConsensusError::InvalidProofOfWork(
"Insufficient headers for difficulty adjustment".into(),
));
}
let last_header = &prev_headers[prev_headers.len() - 1];
let previous_bits = last_header.bits;
let first_timestamp = prev_headers[0].timestamp;
let last_timestamp = last_header.timestamp;
if last_timestamp < first_timestamp {
return Err(ConsensusError::InvalidProofOfWork(
"Invalid timestamp order in difficulty adjustment".into(),
));
}
let time_span = last_timestamp - first_timestamp;
let expected_time = if use_corrected {
let num_intervals = prev_headers.len() as u64;
if num_intervals == DIFFICULTY_ADJUSTMENT_INTERVAL {
(DIFFICULTY_ADJUSTMENT_INTERVAL - 1) * TARGET_TIME_PER_BLOCK
} else {
(num_intervals - 1) * TARGET_TIME_PER_BLOCK
}
} else {
DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK
};
let clamped_timespan = time_span.max(expected_time / 4).min(expected_time * 4);
debug_assert!(
clamped_timespan >= expected_time / 4,
"Clamped timespan ({}) must be >= expected_time/4 ({})",
clamped_timespan,
expected_time / 4
);
debug_assert!(
clamped_timespan <= expected_time * 4,
"Clamped timespan ({}) must be <= expected_time*4 ({})",
clamped_timespan,
expected_time * 4
);
let old_target = expand_target(previous_bits)?;
if old_target.is_zero() {
return Err(ConsensusError::InvalidProofOfWork(
"Previous block target is zero (invalid compact bits)".into(),
));
}
let multiplied_target = match old_target.checked_mul_u64(clamped_timespan) {
Some(t) => t,
None => {
return Ok(previous_bits);
}
};
debug_assert!(
multiplied_target >= old_target || clamped_timespan < expected_time,
"Multiplied target should be >= old target when timespan >= expected_time"
);
let new_target = multiplied_target.div_u64(expected_time);
if new_target.is_zero() {
return Err(ConsensusError::InvalidProofOfWork(
"Difficulty adjustment produced zero expanded target".into(),
));
}
let new_bits = compress_target(&new_target)?;
let clamped_bits = new_bits.min(MAX_TARGET as Natural);
debug_assert!(
clamped_bits > 0,
"Clamped bits ({clamped_bits}) must be positive"
);
debug_assert!(
clamped_bits <= MAX_TARGET as Natural,
"Clamped bits ({clamped_bits}) must be <= MAX_TARGET ({MAX_TARGET})"
);
if clamped_bits == 0 {
return Err(ConsensusError::InvalidProofOfWork(
"Difficulty adjustment resulted in zero target".into(),
));
}
Ok(clamped_bits)
}
#[spec_locked("7.2")]
#[cfg_attr(feature = "production", inline(always))]
#[cfg_attr(not(feature = "production"), inline)]
pub fn check_proof_of_work(header: &BlockHeader) -> Result<bool> {
let header_bytes = serialize_header(header);
let hash1 = Sha256::digest(header_bytes);
let hash2 = Sha256::digest(hash1);
let mut hash_bytes = [0u8; 32];
hash_bytes.copy_from_slice(&hash2);
let hash_value = U256::from_bytes(&hash_bytes);
let target = expand_target(header.bits)?;
Ok(hash_value < target)
}
#[cfg(feature = "production")]
#[spec_locked("7.2")]
pub fn batch_check_proof_of_work(headers: &[BlockHeader]) -> Result<Vec<(bool, Option<Hash>)>> {
use crate::optimizations::simd_vectorization;
if headers.is_empty() {
return Ok(Vec::new());
}
let header_bytes_vec: Vec<[u8; 80]> = {
#[cfg(feature = "rayon")]
{
use rayon::prelude::*;
headers.par_iter().map(serialize_header).collect()
}
#[cfg(not(feature = "rayon"))]
{
headers.iter().map(serialize_header).collect()
}
};
let header_refs: Vec<&[u8]> = header_bytes_vec.iter().map(|v| v.as_slice()).collect();
let aligned_hashes = simd_vectorization::batch_double_sha256_aligned(&header_refs);
let hashes: Vec<[u8; 32]> = aligned_hashes.iter().map(|h| *h.as_bytes()).collect();
let mut results = Vec::with_capacity(headers.len());
for (i, header) in headers.iter().enumerate() {
let hash = hashes[i];
let hash_value = U256::from_bytes(&hash);
match expand_target(header.bits) {
Ok(target) => {
let is_valid = hash_value < target;
results.push((is_valid, if is_valid { Some(hash) } else { None }));
}
Err(_e) => {
results.push((false, None));
}
}
}
Ok(results)
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct U256([u64; 4]);
impl U256 {
fn zero() -> Self {
U256([0; 4])
}
fn from_u32(value: u32) -> Self {
U256([value as u64, 0, 0, 0])
}
#[cfg(test)]
fn from_u64(value: u64) -> Self {
U256([value, 0, 0, 0])
}
fn get_low_64(&self) -> u64 {
self.0[0]
}
#[cfg(test)]
fn to_bytes(&self) -> [u8; 32] {
let mut bytes = [0u8; 32];
for (i, &word) in self.0.iter().enumerate() {
let word_bytes = word.to_le_bytes();
bytes[i * 8..(i + 1) * 8].copy_from_slice(&word_bytes);
}
bytes
}
fn shl(&self, shift: u32) -> Self {
if shift >= 256 {
return U256::zero();
}
let mut result = U256::zero();
let word_shift = (shift / 64) as usize;
let bit_shift = shift % 64;
for i in 0..4 {
if i + word_shift < 4 {
result.0[i + word_shift] |= self.0[i] << bit_shift;
if bit_shift > 0 && i + word_shift + 1 < 4 {
result.0[i + word_shift + 1] |= self.0[i] >> (64 - bit_shift);
}
}
}
result
}
fn shr(&self, shift: u32) -> Self {
if shift >= 256 {
return U256::zero();
}
let mut result = U256::zero();
let word_shift = (shift / 64) as usize;
let bit_shift = shift % 64;
debug_assert!(
word_shift < 4,
"Word shift ({word_shift}) must be < 4 (shift: {shift})"
);
debug_assert!(
bit_shift < 64,
"Bit shift ({bit_shift}) must be < 64 (shift: {shift})"
);
if bit_shift == 0 {
for i in word_shift..4 {
result.0[i - word_shift] = self.0[i];
}
} else {
for i in word_shift..4 {
let mut word = self.0[i] >> bit_shift;
if i + 1 < 4 {
word |= self.0[i + 1] << (64 - bit_shift);
}
result.0[i - word_shift] = word;
}
}
result
}
fn from_bytes(bytes: &[u8; 32]) -> Self {
let mut words = [0u64; 4];
for (i, word) in words.iter_mut().enumerate() {
let start = i * 8;
let _end = start + 8;
*word = u64::from_le_bytes([
bytes[start],
bytes[start + 1],
bytes[start + 2],
bytes[start + 3],
bytes[start + 4],
bytes[start + 5],
bytes[start + 6],
bytes[start + 7],
]);
}
U256(words)
}
fn checked_mul_u64(&self, rhs: u64) -> Option<Self> {
let mut carry = 0u128;
let mut result = U256::zero();
#[cfg(feature = "production")]
{
let product = (self.0[0] as u128) * (rhs as u128) + carry;
result.0[0] = product as u64;
carry = product >> 64;
let product = (self.0[1] as u128) * (rhs as u128) + carry;
result.0[1] = product as u64;
carry = product >> 64;
let product = (self.0[2] as u128) * (rhs as u128) + carry;
result.0[2] = product as u64;
carry = product >> 64;
let product = (self.0[3] as u128) * (rhs as u128) + carry;
result.0[3] = product as u64;
carry = product >> 64;
if carry > 0 {
return None; }
}
#[cfg(not(feature = "production"))]
{
for i in 0..4 {
let product = (self.0[i] as u128) * (rhs as u128) + carry;
result.0[i] = product as u64;
carry = product >> 64;
if i == 3 && carry > 0 {
return None; }
}
}
Some(result)
}
fn div_u64(&self, rhs: u64) -> Self {
if rhs == 0 {
return U256([u64::MAX; 4]);
}
let mut remainder = 0u128;
let mut result = U256::zero();
#[cfg(feature = "production")]
{
let dividend = (remainder << 64) | (self.0[3] as u128);
let quotient = dividend / (rhs as u128);
remainder = dividend % (rhs as u128);
debug_assert!(quotient <= u64::MAX as u128, "Quotient must fit in u64");
result.0[3] = quotient as u64;
let dividend = (remainder << 64) | (self.0[2] as u128);
let quotient = dividend / (rhs as u128);
remainder = dividend % (rhs as u128);
debug_assert!(quotient <= u64::MAX as u128, "Quotient must fit in u64");
result.0[2] = quotient as u64;
let dividend = (remainder << 64) | (self.0[1] as u128);
let quotient = dividend / (rhs as u128);
remainder = dividend % (rhs as u128);
debug_assert!(quotient <= u64::MAX as u128, "Quotient must fit in u64");
result.0[1] = quotient as u64;
let dividend = (remainder << 64) | (self.0[0] as u128);
let quotient = dividend / (rhs as u128);
remainder = dividend % (rhs as u128);
debug_assert!(quotient <= u64::MAX as u128, "Quotient must fit in u64");
result.0[0] = quotient as u64;
}
#[cfg(not(feature = "production"))]
{
for i in (0..4).rev() {
let dividend = (remainder << 64) | (self.0[i] as u128);
let quotient = dividend / (rhs as u128);
remainder = dividend % (rhs as u128);
debug_assert!(
quotient <= u64::MAX as u128,
"Quotient ({quotient}) must fit in u64"
);
result.0[i] = quotient as u64;
}
}
debug_assert!(
result <= *self,
"Division result ({result:?}) must be <= dividend ({self:?})"
);
debug_assert!(
remainder < rhs as u128,
"Remainder ({remainder}) must be < divisor ({rhs})"
);
result
}
fn highest_set_bit(&self) -> Option<u32> {
for (i, &word) in self.0.iter().rev().enumerate() {
if word != 0 {
let word_index = (3 - i) as u32;
let bit_pos = word_index * 64 + (63 - word.leading_zeros());
return Some(bit_pos);
}
}
None
}
fn is_zero(&self) -> bool {
self.0.iter().all(|&x| x == 0)
}
fn to_f64(&self) -> f64 {
if self.is_zero() {
return 0.0;
}
let mut result = 0.0_f64;
result += self.0[0] as f64;
result += (self.0[1] as f64) * 2.0_f64.powi(64);
result += (self.0[2] as f64) * 2.0_f64.powi(128);
result += (self.0[3] as f64) * 2.0_f64.powi(192);
result
}
}
impl PartialOrd for U256 {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for U256 {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
for (a, b) in self.0.iter().rev().zip(other.0.iter().rev()) {
match a.cmp(b) {
std::cmp::Ordering::Equal => continue,
other => return other,
}
}
std::cmp::Ordering::Equal
}
}
#[spec_locked("7.1")]
pub fn difficulty_from_bits(bits: Natural) -> Result<f64> {
let target = expand_target(bits)?;
if target.is_zero() {
return Ok(1.0);
}
let max_target = U256::from_bytes(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00, 0x00, 0xFF,
0xFF, 0x00, 0x00, 0x00, 0x00,
]);
let max_f64 = max_target.to_f64();
let target_f64 = target.to_f64();
if target_f64 == 0.0 {
return Ok(1.0);
}
Ok((max_f64 / target_f64).max(1.0))
}
#[spec_locked("7.1")]
pub fn expand_target(bits: Natural) -> Result<U256> {
let exponent = (bits >> 24) as u8;
let mantissa = bits & 0x007fffff;
if !(3..=32).contains(&exponent) {
return Err(ConsensusError::InvalidProofOfWork(
"Invalid target exponent".into(),
));
}
if mantissa == 0 {
return Ok(U256::zero());
}
if exponent <= 3 {
let shift = 8 * (3 - exponent);
let mantissa_u256 = U256::from_u32(mantissa as u32);
Ok(mantissa_u256.shr(shift as u32))
} else {
let shift = 8u32 * (exponent as u32 - 3);
if shift >= 256 {
return Err(crate::error::ConsensusError::InvalidProofOfWork(
"Target too large".into(),
));
}
let mantissa_u256 = U256::from_u32(mantissa as u32);
Ok(mantissa_u256.shl(shift))
}
}
#[spec_locked("7.1")]
fn compress_target(target: &U256) -> Result<Natural> {
if target.is_zero() {
return Ok(0x1d000000); }
let highest_bit = target
.highest_set_bit()
.ok_or_else(|| ConsensusError::InvalidProofOfWork("Cannot compress zero target".into()))?;
let n_size = (highest_bit + 1).div_ceil(8);
let mut n_compact: u64;
if n_size <= 3 {
let low_64 = target.get_low_64();
let shift_bytes = 3 - n_size;
n_compact = low_64 << (8 * shift_bytes);
} else {
let shift_bytes = n_size - 3;
let shifted = target.shr(shift_bytes * 8);
n_compact = shifted.get_low_64();
}
let mut n_size_final = n_size;
while (n_compact & 0x00800000) != 0 {
n_compact >>= 8;
n_size_final += 1;
}
let mantissa = (n_compact & 0x007fffff) as u32;
if n_size_final > 29 {
return Err(ConsensusError::InvalidProofOfWork(
format!("Target too large: exponent {n_size_final} exceeds maximum 29").into(),
));
}
let bits = (n_size_final << 24) | mantissa;
Ok(bits as Natural)
}
fn serialize_header(header: &BlockHeader) -> [u8; 80] {
let mut bytes = [0u8; 80];
bytes[0..4].copy_from_slice(&(header.version as u32).to_le_bytes());
bytes[4..36].copy_from_slice(&header.prev_block_hash);
bytes[36..68].copy_from_slice(&header.merkle_root);
bytes[68..72].copy_from_slice(&(header.timestamp as u32).to_le_bytes());
bytes[72..76].copy_from_slice(&(header.bits as u32).to_le_bytes());
bytes[76..80].copy_from_slice(&(header.nonce as u32).to_le_bytes());
bytes
}
#[cfg(test)]
fn u256_from_bytes(bytes: &[u8]) -> u128 {
let mut value = 0u128;
for (i, &byte) in bytes.iter().enumerate() {
if i < 16 {
value |= (byte as u128) << (8 * (15 - i));
}
}
value
}
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
fn arb_block_header() -> impl Strategy<Value = BlockHeader> {
(
any::<i64>(),
any::<[u8; 32]>(),
any::<[u8; 32]>(),
any::<u64>(),
0x03000000u32..0x1d00ffffu32,
any::<u64>(),
)
.prop_map(
|(version, prev_block_hash, merkle_root, timestamp, bits, nonce)| BlockHeader {
version,
prev_block_hash,
merkle_root,
timestamp,
bits: bits as u64,
nonce,
},
)
}
proptest! {
#[test]
fn prop_expand_target_valid_range(
bits in 0x03000000u32..0x1d00ffffu32
) {
let result = expand_target(bits as u64);
let mantissa = bits & 0x00ffffff;
match result {
Ok(target) => {
prop_assert!(target >= U256::zero(), "Target must be non-negative");
if mantissa == 0 {
prop_assert!(target.is_zero(), "Zero mantissa should produce zero target");
} else {
prop_assert!(!target.is_zero(), "Non-zero mantissa should produce non-zero target");
}
},
Err(_) => {
}
}
}
}
proptest! {
#[test]
fn prop_check_proof_of_work_deterministic(
header in arb_block_header()
) {
let mut valid_header = header;
valid_header.bits = 0x1d00ffff;
let result1 = check_proof_of_work(&valid_header).unwrap_or(false);
let result2 = check_proof_of_work(&valid_header).unwrap_or(false);
prop_assert_eq!(result1, result2, "Proof of work check must be deterministic");
}
}
proptest! {
#[test]
fn prop_get_next_work_required_bounds(
current_header in arb_block_header(),
prev_headers in proptest::collection::vec(arb_block_header(), 2..6)
) {
let mut valid_headers = prev_headers;
if let Some(first_header) = valid_headers.first_mut() {
first_header.timestamp = current_header.timestamp - 86400 * 14; }
let result = get_next_work_required(¤t_header, &valid_headers);
match result {
Ok(work) => {
prop_assert!(work <= MAX_TARGET as Natural,
"Next work required must not exceed maximum target");
prop_assert!(work > 0, "Next work required must be positive");
},
Err(_) => {
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::constants::MAX_TARGET;
#[test]
fn test_get_next_work_required_insufficient_headers() {
let header = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1231006505,
bits: 0x1d00ffff,
nonce: 0,
};
let prev_headers = vec![header.clone()];
let result = get_next_work_required(&header, &prev_headers);
assert!(result.is_err());
}
#[test]
fn test_get_next_work_required_normal_adjustment() {
let header1 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1000000,
bits: 0x1d00ffff,
nonce: 0,
};
let header2 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1000000 + (DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK), bits: 0x1d00ffff,
nonce: 0,
};
let prev_headers = vec![header1, header2.clone()];
let result = get_next_work_required(&header2, &prev_headers).unwrap();
assert_eq!(result, 0x1d00ffff);
}
#[test]
fn test_difficulty_from_bits() {
let d = difficulty_from_bits(0x1d00ffff).unwrap();
assert!(
(d - 1.0).abs() < 0.01,
"Genesis difficulty should be ~1.0, got {d}"
);
let d_harder = difficulty_from_bits(0x1d000800).unwrap();
assert!(d_harder > d, "Harder target should have higher difficulty");
}
#[test]
fn test_expand_target() {
let target = expand_target(0x0300ffff).unwrap(); assert!(!target.is_zero());
}
#[test]
fn test_check_proof_of_work_genesis() {
let header = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1231006505,
bits: 0x0300ffff, nonce: 0,
};
let result = check_proof_of_work(&header).unwrap();
let _ = result;
}
#[test]
fn test_get_next_work_required_fast_blocks() {
let header1 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1000000,
bits: 0x1d00ffff,
nonce: 0,
};
let header2 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1000000 + (DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK / 2),
bits: 0x1d00ffff,
nonce: 0,
};
let prev_headers = vec![header1, header2.clone()];
let result = get_next_work_required(&header2, &prev_headers).unwrap();
assert!(result <= 0x1d00ffff);
}
#[test]
fn test_get_next_work_required_slow_blocks() {
let header1 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1000000,
bits: 0x1d00ffff,
nonce: 0,
};
let header2 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1000000 + (DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK * 2),
bits: 0x1d00ffff,
nonce: 0,
};
let prev_headers = vec![header1, header2.clone()];
let result = get_next_work_required(&header2, &prev_headers).unwrap();
assert!(result <= 0x1d00ffff);
}
#[test]
fn test_get_next_work_required_extreme_fast_blocks() {
let header1 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1000000,
bits: 0x1d00ffff,
nonce: 0,
};
let header2 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1000000 + (DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK / 14),
bits: 0x1d00ffff,
nonce: 0,
};
let prev_headers = vec![header1, header2.clone()];
let result = get_next_work_required(&header2, &prev_headers).unwrap();
assert!(result <= 0x1d00ffff);
}
#[test]
fn test_get_next_work_required_extreme_slow_blocks() {
let header1 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1000000,
bits: 0x1d00ffff,
nonce: 0,
};
let header2 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1000000 + (DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK * 4),
bits: 0x1d00ffff,
nonce: 0,
};
let prev_headers = vec![header1, header2.clone()];
let result = get_next_work_required(&header2, &prev_headers).unwrap();
assert!(result <= 0x1d00ffff);
}
#[test]
fn test_expand_target_zero_mantissa() {
let result = expand_target(0x1d000000).unwrap();
assert!(result.is_zero());
}
#[test]
fn test_expand_target_invalid_exponent_too_small() {
let result = expand_target(0x0200ffff);
assert!(result.is_err());
}
#[test]
fn test_expand_target_invalid_exponent_too_large() {
let result = expand_target(0x2100ffff);
assert!(result.is_err());
}
#[test]
fn test_expand_target_exponent_31() {
let result = expand_target(0x1f00ffff).unwrap(); assert!(!result.is_zero());
}
#[test]
fn test_expand_target_exponent_32_regtest_bits() {
let result = expand_target(0x2000ffff).unwrap();
assert!(!result.is_zero());
}
#[test]
fn test_expand_target_exponent_3() {
let result = expand_target(0x0300ffff).unwrap();
assert!(!result.is_zero());
}
#[test]
fn test_expand_target_exponent_4() {
let result = expand_target(0x0400ffff).unwrap();
assert!(!result.is_zero());
}
#[test]
fn test_expand_target_exponent_29() {
let result = expand_target(0x1d00ffff).unwrap();
assert!(!result.is_zero());
}
#[test]
fn test_check_proof_of_work_invalid_target() {
let header = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1231006505,
bits: 0x0200ffff, nonce: 0,
};
let result = check_proof_of_work(&header);
assert!(result.is_err());
}
#[test]
fn test_check_proof_of_work_valid_target() {
let header = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1231006505,
bits: 0x1d00ffff, nonce: 0,
};
let result = check_proof_of_work(&header).unwrap();
let _ = result;
}
#[test]
fn test_u256_zero() {
let zero = U256::zero();
assert!(zero.is_zero());
}
#[test]
fn test_u256_from_u32() {
let value = U256::from_u32(0x12345678);
assert!(!value.is_zero());
}
#[test]
fn test_u256_from_u64() {
let value = U256::from_u64(0x123456789abcdef0);
assert!(!value.is_zero());
}
#[test]
fn test_u256_shl_zero_shift() {
let value = U256::from_u32(0x12345678);
let result = value.shl(0);
assert_eq!(result, value);
}
#[test]
fn test_u256_shl_large_shift() {
let value = U256::from_u32(0x12345678);
let result = value.shl(300); assert!(result.is_zero());
}
#[test]
fn test_u256_shr_zero_shift() {
let value = U256::from_u32(0x12345678);
let result = value.shr(0);
assert_eq!(result, value);
}
#[test]
fn test_u256_shr_large_shift() {
let value = U256::from_u32(0x12345678);
let result = value.shr(300); assert!(result.is_zero());
}
#[test]
fn test_u256_shl_small_shift() {
let value = U256::from_u32(0x12345678);
let result = value.shl(8);
assert!(!result.is_zero());
assert_ne!(result, value);
}
#[test]
fn test_u256_shr_small_shift() {
let value = U256::from_u32(0x12345678);
let result = value.shr(8);
assert!(!result.is_zero());
assert_ne!(result, value);
}
#[test]
fn test_u256_to_bytes() {
let value = U256::from_u32(0x12345678);
let bytes = value.to_bytes();
assert_eq!(bytes.len(), 32);
}
#[test]
fn test_u256_from_bytes() {
let mut bytes = [0u8; 32];
bytes[0] = 0x78;
bytes[1] = 0x56;
bytes[2] = 0x34;
bytes[3] = 0x12;
let value = U256::from_bytes(&bytes);
assert!(!value.is_zero());
}
#[test]
fn test_u256_ordering() {
let small = U256::from_u32(0x12345678);
let large = U256::from_u32(0x87654321);
assert!(small < large);
assert!(large > small);
assert_eq!(small.cmp(&small), std::cmp::Ordering::Equal);
}
#[test]
fn test_expand_compress_round_trip() {
let test_bits = vec![
0x1d00ffff, 0x1b0404cb, 0x0300ffff, ];
for &bits in &test_bits {
let expanded = match expand_target(bits) {
Ok(t) => t,
Err(_) => continue, };
let compressed = match compress_target(&expanded) {
Ok(b) => b,
Err(_) => {
continue;
}
};
let re_expanded = match expand_target(compressed) {
Ok(t) => t,
Err(_) => continue,
};
if re_expanded > expanded {
panic!(
"Round-trip failed for bits 0x{bits:08x}: re-expanded > original (compression should truncate, not add)"
);
}
#[allow(clippy::eq_op)]
let significant_words_match =
expanded.0[2] == re_expanded.0[2] && expanded.0[3] == re_expanded.0[3];
if !significant_words_match {
panic!(
"Round-trip failed for bits 0x{:08x}: significant bits differ (expanded: {:?}, re-expanded: {:?})",
bits, expanded.0, re_expanded.0
);
}
}
}
#[test]
fn test_compress_target_genesis() {
let genesis_bits = 0x1d00ffff;
let expanded = expand_target(genesis_bits).unwrap();
let compressed = compress_target(&expanded).unwrap();
assert!(compressed <= MAX_TARGET as u64);
assert!(compressed > 0);
let re_expanded = expand_target(compressed).unwrap();
assert_eq!(expanded, re_expanded);
}
#[test]
fn test_serialize_header() {
let header = BlockHeader {
version: 1,
prev_block_hash: [1; 32],
merkle_root: [2; 32],
timestamp: 1234567890,
bits: 0x1d00ffff,
nonce: 0x12345678,
};
let bytes = serialize_header(&header);
assert_eq!(bytes.len(), 80); }
#[test]
fn test_serialize_header_returns_fixed_80_bytes() {
let header = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 0,
bits: 0,
nonce: 0,
};
let bytes: [u8; 80] = serialize_header(&header);
assert_eq!(bytes.len(), 80);
}
#[test]
fn test_serialize_header_field_layout() {
let header = BlockHeader {
version: 0x01020304,
prev_block_hash: {
let mut h = [0u8; 32];
h[0] = 0xAA;
h[31] = 0xBB;
h
},
merkle_root: {
let mut h = [0u8; 32];
h[0] = 0xCC;
h[31] = 0xDD;
h
},
timestamp: 0x05060708,
bits: 0x090A0B0C,
nonce: 0x0D0E0F10,
};
let bytes = serialize_header(&header);
assert_eq!(bytes[0], 0x04); assert_eq!(bytes[1], 0x03);
assert_eq!(bytes[2], 0x02);
assert_eq!(bytes[3], 0x01);
assert_eq!(bytes[4], 0xAA);
assert_eq!(bytes[35], 0xBB);
assert_eq!(bytes[36], 0xCC);
assert_eq!(bytes[67], 0xDD);
assert_eq!(bytes[68], 0x08);
assert_eq!(bytes[69], 0x07);
assert_eq!(bytes[70], 0x06);
assert_eq!(bytes[71], 0x05);
assert_eq!(bytes[72], 0x0C);
assert_eq!(bytes[73], 0x0B);
assert_eq!(bytes[74], 0x0A);
assert_eq!(bytes[75], 0x09);
assert_eq!(bytes[76], 0x10);
assert_eq!(bytes[77], 0x0F);
assert_eq!(bytes[78], 0x0E);
assert_eq!(bytes[79], 0x0D);
}
#[test]
fn test_serialize_header_deterministic() {
let header = BlockHeader {
version: 1,
prev_block_hash: [0xFF; 32],
merkle_root: [0xAA; 32],
timestamp: 1231006505,
bits: 0x1d00ffff,
nonce: 2083236893,
};
let bytes1 = serialize_header(&header);
let bytes2 = serialize_header(&header);
assert_eq!(bytes1, bytes2, "Header serialization must be deterministic");
}
#[test]
fn test_serialize_header_different_headers_different_bytes() {
let header1 = BlockHeader {
version: 1,
prev_block_hash: [0; 32],
merkle_root: [0; 32],
timestamp: 1231006505,
bits: 0x1d00ffff,
nonce: 0,
};
let mut header2 = header1.clone();
header2.nonce = 1;
let bytes1 = serialize_header(&header1);
let bytes2 = serialize_header(&header2);
assert_ne!(
bytes1, bytes2,
"Different nonces must produce different serializations"
);
assert_eq!(
bytes1[..76],
bytes2[..76],
"Non-nonce bytes should be identical"
);
assert_ne!(bytes1[76..], bytes2[76..], "Nonce bytes should differ");
}
#[test]
fn test_u256_from_bytes_simple() {
let bytes = [0u8; 32];
let value = u256_from_bytes(&bytes);
assert_eq!(value, 0);
}
#[test]
fn test_u256_from_bytes_with_data() {
let mut bytes = [0u8; 32];
bytes[0] = 0x78;
bytes[1] = 0x56;
bytes[2] = 0x34;
bytes[3] = 0x12;
let value = u256_from_bytes(&bytes);
assert_eq!(value, 0x78563412000000000000000000000000);
}
}