use std::{
cmp::{Ordering, PartialEq, PartialOrd},
fmt,
iter::Sum,
ops::{Add, Div, Mul},
};
use hex::{FromHex, ToHex};
use crate::{block, parameters::Network, serialization::BytesInDisplayOrder, BoxError};
pub use crate::work::u256::U256;
#[cfg(any(test, feature = "proptest-impl"))]
mod arbitrary;
#[cfg(test)]
mod tests;
#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Default))]
pub struct CompactDifficulty(pub(crate) u32);
pub const INVALID_COMPACT_DIFFICULTY: CompactDifficulty = CompactDifficulty(u32::MAX);
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct ExpandedDifficulty(U256);
#[derive(Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd)]
pub struct Work(u128);
impl Work {
pub fn zero() -> Self {
Self(0)
}
pub fn as_u128(self) -> u128 {
self.0
}
}
impl fmt::Debug for Work {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("Work")
.field(&format_args!("{:#x}", self.0))
.field(&format_args!("{}", self.0))
.field(&format_args!("{:.5}", (self.0 as f64).log2()))
.finish()
}
}
impl CompactDifficulty {
const BASE: u32 = 256;
const OFFSET: i32 = 3;
const PRECISION: u32 = 24;
const SIGN_BIT: u32 = 1 << (CompactDifficulty::PRECISION - 1);
const UNSIGNED_MANTISSA_MASK: u32 = CompactDifficulty::SIGN_BIT - 1;
#[allow(clippy::unwrap_in_result)]
pub fn to_expanded(self) -> Option<ExpandedDifficulty> {
const BASE: u32 = CompactDifficulty::BASE;
const OFFSET: i32 = CompactDifficulty::OFFSET;
const PRECISION: u32 = CompactDifficulty::PRECISION;
const SIGN_BIT: u32 = CompactDifficulty::SIGN_BIT;
const UNSIGNED_MANTISSA_MASK: u32 = CompactDifficulty::UNSIGNED_MANTISSA_MASK;
if self.0 & SIGN_BIT == SIGN_BIT {
return None;
}
let mantissa = self.0 & UNSIGNED_MANTISSA_MASK;
let exponent = i32::try_from(self.0 >> PRECISION).expect("fits in i32") - OFFSET;
let (mantissa, exponent) = match (mantissa, exponent) {
(_, e) if (e >= 32) => return None,
(m, e) if (e == 31 && m > u8::MAX.into()) => return None,
(m, e) if (e == 31 && m <= u8::MAX.into()) => (m << 16, e - 2),
(m, e) if (e == 30 && m > u16::MAX.into()) => return None,
(m, e) if (e == 30 && m <= u16::MAX.into()) => (m << 8, e - 1),
(m, e) if (e < 0) => (m >> ((e.abs() * 8) as u32), 0),
(m, e) => (m, e),
};
let mantissa: U256 = mantissa.into();
let base: U256 = BASE.into();
let exponent: U256 = exponent.into();
let result = mantissa * base.pow(exponent);
if result == U256::zero() {
None
} else {
Some(result.into())
}
}
pub fn to_work(self) -> Option<Work> {
let expanded = self.to_expanded()?;
Work::try_from(expanded).ok()
}
pub fn bytes_in_display_order(&self) -> [u8; 4] {
self.0.to_be_bytes()
}
pub fn from_bytes_in_display_order(
bytes_in_display_order: &[u8; 4],
) -> Result<CompactDifficulty, BoxError> {
let internal_byte_order = u32::from_be_bytes(*bytes_in_display_order);
let difficulty = CompactDifficulty(internal_byte_order);
if difficulty.to_expanded().is_none() {
return Err("invalid difficulty value".into());
}
Ok(difficulty)
}
pub fn relative_to_network(&self, network: &Network) -> f64 {
let network_difficulty = network.target_difficulty_limit().to_compact();
let [mut n_shift, ..] = self.0.to_be_bytes();
let [n_shift_amount, ..] = network_difficulty.0.to_be_bytes();
let mut d_diff = f64::from(network_difficulty.0 << 8) / f64::from(self.0 << 8);
while n_shift < n_shift_amount {
d_diff *= 256.0;
n_shift += 1;
}
while n_shift > n_shift_amount {
d_diff /= 256.0;
n_shift -= 1;
}
d_diff
}
}
impl fmt::Debug for CompactDifficulty {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("CompactDifficulty")
.field(&format_args!("{:#010x}", self.0))
.field(&format_args!("{:?}", self.to_expanded()))
.finish()
}
}
impl fmt::Display for CompactDifficulty {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.encode_hex::<String>())
}
}
impl ToHex for &CompactDifficulty {
fn encode_hex<T: FromIterator<char>>(&self) -> T {
self.bytes_in_display_order().encode_hex()
}
fn encode_hex_upper<T: FromIterator<char>>(&self) -> T {
self.bytes_in_display_order().encode_hex_upper()
}
}
impl ToHex for CompactDifficulty {
fn encode_hex<T: FromIterator<char>>(&self) -> T {
(&self).encode_hex()
}
fn encode_hex_upper<T: FromIterator<char>>(&self) -> T {
(&self).encode_hex_upper()
}
}
impl FromHex for CompactDifficulty {
type Error = BoxError;
fn from_hex<T: AsRef<[u8]>>(hex: T) -> Result<Self, Self::Error> {
let bytes_in_display_order = <[u8; 4]>::from_hex(hex)?;
CompactDifficulty::from_bytes_in_display_order(&bytes_in_display_order)
}
}
impl TryFrom<ExpandedDifficulty> for Work {
type Error = ();
fn try_from(expanded: ExpandedDifficulty) -> Result<Self, Self::Error> {
let result = (!expanded.0 / (expanded.0 + 1)) + 1;
if result <= u128::MAX.into() {
Ok(Work(result.as_u128()))
} else {
Err(())
}
}
}
impl From<ExpandedDifficulty> for CompactDifficulty {
fn from(value: ExpandedDifficulty) -> Self {
value.to_compact()
}
}
impl BytesInDisplayOrder for ExpandedDifficulty {
fn bytes_in_serialized_order(&self) -> [u8; 32] {
self.0.to_big_endian()
}
fn from_bytes_in_serialized_order(bytes: [u8; 32]) -> Self {
ExpandedDifficulty(U256::from_big_endian(&bytes))
}
}
impl ExpandedDifficulty {
pub(super) fn from_hash(hash: &block::Hash) -> ExpandedDifficulty {
U256::from_little_endian(&hash.0).into()
}
pub fn to_compact(self) -> CompactDifficulty {
assert!(self.0 > 0.into(), "Zero difficulty values are invalid");
const UNSIGNED_MANTISSA_MASK: u32 = CompactDifficulty::UNSIGNED_MANTISSA_MASK;
const OFFSET: i32 = CompactDifficulty::OFFSET;
let size = self.0.bits() / 8 + 1;
let mantissa = if self.0 <= UNSIGNED_MANTISSA_MASK.into() {
self.0 << (8 * (3 - size))
} else {
self.0 >> (8 * (size - 3))
};
assert!(
size < (31 + OFFSET) as _,
"256^size (256^{size}) must fit in a u256, after the sign bit adjustment and offset"
);
let size = u32::try_from(size).expect("a 0-6 bit value fits in a u32");
assert!(
mantissa <= UNSIGNED_MANTISSA_MASK.into(),
"mantissa {mantissa:x?} must fit in its compact field"
);
let mantissa = u32::try_from(mantissa).expect("a 0-23 bit value fits in a u32");
if mantissa > 0 {
CompactDifficulty(mantissa + (size << 24))
} else {
unreachable!("converted CompactDifficulty values must be valid")
}
}
}
impl fmt::Display for ExpandedDifficulty {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.encode_hex::<String>())
}
}
impl fmt::Debug for ExpandedDifficulty {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("ExpandedDifficulty")
.field(&self.encode_hex::<String>())
.finish()
}
}
impl ToHex for &ExpandedDifficulty {
fn encode_hex<T: FromIterator<char>>(&self) -> T {
self.bytes_in_display_order().encode_hex()
}
fn encode_hex_upper<T: FromIterator<char>>(&self) -> T {
self.bytes_in_display_order().encode_hex_upper()
}
}
impl ToHex for ExpandedDifficulty {
fn encode_hex<T: FromIterator<char>>(&self) -> T {
(&self).encode_hex()
}
fn encode_hex_upper<T: FromIterator<char>>(&self) -> T {
(&self).encode_hex_upper()
}
}
impl FromHex for ExpandedDifficulty {
type Error = <[u8; 32] as FromHex>::Error;
fn from_hex<T: AsRef<[u8]>>(hex: T) -> Result<Self, Self::Error> {
let bytes_in_display_order = <[u8; 32]>::from_hex(hex)?;
Ok(ExpandedDifficulty::from_bytes_in_display_order(
&bytes_in_display_order,
))
}
}
impl From<U256> for ExpandedDifficulty {
fn from(value: U256) -> Self {
ExpandedDifficulty(value)
}
}
impl From<ExpandedDifficulty> for U256 {
fn from(value: ExpandedDifficulty) -> Self {
value.0
}
}
impl Sum<ExpandedDifficulty> for ExpandedDifficulty {
fn sum<I: Iterator<Item = ExpandedDifficulty>>(iter: I) -> Self {
iter.map(|d| d.0).fold(U256::zero(), Add::add).into()
}
}
impl<T> Div<T> for ExpandedDifficulty
where
T: Into<U256>,
{
type Output = ExpandedDifficulty;
fn div(self, rhs: T) -> Self::Output {
ExpandedDifficulty(self.0 / rhs)
}
}
impl<T> Mul<T> for ExpandedDifficulty
where
U256: Mul<T>,
<U256 as Mul<T>>::Output: Into<U256>,
{
type Output = ExpandedDifficulty;
fn mul(self, rhs: T) -> ExpandedDifficulty {
ExpandedDifficulty((self.0 * rhs).into())
}
}
impl PartialEq<block::Hash> for ExpandedDifficulty {
fn eq(&self, other: &block::Hash) -> bool {
self.partial_cmp(other) == Some(Ordering::Equal)
}
}
impl PartialOrd<block::Hash> for ExpandedDifficulty {
fn partial_cmp(&self, other: &block::Hash) -> Option<Ordering> {
self.partial_cmp(&ExpandedDifficulty::from_hash(other))
}
}
impl PartialEq<ExpandedDifficulty> for block::Hash {
fn eq(&self, other: &ExpandedDifficulty) -> bool {
other.eq(self)
}
}
impl PartialOrd<ExpandedDifficulty> for block::Hash {
#[allow(clippy::unwrap_in_result)]
fn partial_cmp(&self, other: &ExpandedDifficulty) -> Option<Ordering> {
Some(
other
.partial_cmp(self)
.expect("difficulties and hashes have a total order")
.reverse(),
)
}
}
impl std::ops::Add for Work {
type Output = PartialCumulativeWork;
fn add(self, rhs: Work) -> PartialCumulativeWork {
PartialCumulativeWork::from(self) + rhs
}
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct PartialCumulativeWork(u128);
impl PartialCumulativeWork {
pub fn zero() -> Self {
Self(0)
}
pub fn as_u128(self) -> u128 {
self.0
}
pub fn difficulty_multiplier_for_display(&self, network: Network) -> f64 {
let pow_limit = network
.target_difficulty_limit()
.to_compact()
.to_work()
.expect("target difficult limit is valid work");
let pow_limit = pow_limit.as_u128() as f64;
let work = self.as_u128() as f64;
work / pow_limit
}
pub fn difficulty_bits_for_display(&self) -> f64 {
let work = self.as_u128() as f64;
work.log2()
}
}
pub trait ParameterDifficulty {
fn target_difficulty_limit(&self) -> ExpandedDifficulty;
}
impl ParameterDifficulty for Network {
fn target_difficulty_limit(&self) -> ExpandedDifficulty {
let limit: U256 = match self {
Network::Mainnet => (U256::one() << 243) - 1,
Network::Testnet(params) => return params.target_difficulty_limit(),
};
ExpandedDifficulty(limit)
.to_compact()
.to_expanded()
.expect("difficulty limits are valid expanded values")
}
}
impl From<Work> for PartialCumulativeWork {
fn from(work: Work) -> Self {
PartialCumulativeWork(work.0)
}
}
impl std::ops::Add<Work> for PartialCumulativeWork {
type Output = PartialCumulativeWork;
fn add(self, rhs: Work) -> Self::Output {
let result = self
.0
.checked_add(rhs.0)
.expect("Work values do not overflow");
PartialCumulativeWork(result)
}
}
impl std::ops::AddAssign<Work> for PartialCumulativeWork {
fn add_assign(&mut self, rhs: Work) {
*self = *self + rhs;
}
}
impl std::ops::Sub<Work> for PartialCumulativeWork {
type Output = PartialCumulativeWork;
fn sub(self, rhs: Work) -> Self::Output {
let result = self.0
.checked_sub(rhs.0)
.expect("PartialCumulativeWork values do not underflow: all subtracted Work values must have been previously added to the PartialCumulativeWork");
PartialCumulativeWork(result)
}
}
impl std::ops::SubAssign<Work> for PartialCumulativeWork {
fn sub_assign(&mut self, rhs: Work) {
*self = *self - rhs;
}
}