1use crate::constants::*;
4use crate::error::{ConsensusError, Result};
5use crate::types::*;
6use blvm_spec_lock::spec_locked;
7use sha2::{Digest, Sha256};
8
9#[spec_locked("7.1")]
34pub fn get_next_work_required(
35 _current_header: &BlockHeader,
36 prev_headers: &[BlockHeader],
37) -> Result<Natural> {
38 get_next_work_required_internal(_current_header, prev_headers, false)
39}
40
41#[spec_locked("7.1")]
53pub fn get_next_work_required_corrected(
54 _current_header: &BlockHeader,
55 prev_headers: &[BlockHeader],
56) -> Result<Natural> {
57 get_next_work_required_internal(_current_header, prev_headers, true)
58}
59
60fn get_next_work_required_internal(
65 _current_header: &BlockHeader,
66 prev_headers: &[BlockHeader],
67 use_corrected: bool,
68) -> Result<Natural> {
69 if prev_headers.len() < 2 {
71 return Err(ConsensusError::InvalidProofOfWork(
72 "Insufficient headers for difficulty adjustment".into(),
73 ));
74 }
75
76 let last_header = &prev_headers[prev_headers.len() - 1];
78 let previous_bits = last_header.bits;
79
80 let first_timestamp = prev_headers[0].timestamp;
83 let last_timestamp = last_header.timestamp;
84
85 if last_timestamp < first_timestamp {
87 return Err(ConsensusError::InvalidProofOfWork(
88 "Invalid timestamp order in difficulty adjustment".into(),
89 ));
90 }
91
92 let time_span = last_timestamp - first_timestamp;
93
94 let expected_time = if use_corrected {
99 let num_intervals = prev_headers.len() as u64;
103 if num_intervals == DIFFICULTY_ADJUSTMENT_INTERVAL {
104 (DIFFICULTY_ADJUSTMENT_INTERVAL - 1) * TARGET_TIME_PER_BLOCK
105 } else {
106 (num_intervals - 1) * TARGET_TIME_PER_BLOCK
108 }
109 } else {
110 DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK
112 };
113
114 let clamped_timespan = time_span.max(expected_time / 4).min(expected_time * 4);
117
118 debug_assert!(
120 clamped_timespan >= expected_time / 4,
121 "Clamped timespan ({}) must be >= expected_time/4 ({})",
122 clamped_timespan,
123 expected_time / 4
124 );
125 debug_assert!(
126 clamped_timespan <= expected_time * 4,
127 "Clamped timespan ({}) must be <= expected_time*4 ({})",
128 clamped_timespan,
129 expected_time * 4
130 );
131
132 let old_target = expand_target(previous_bits)?;
134
135 debug_assert!(!old_target.is_zero(), "Old target must be non-zero");
137
138 let multiplied_target = match old_target.checked_mul_u64(clamped_timespan) {
143 Some(t) => t,
144 None => {
145 return Ok(previous_bits);
146 }
147 };
148
149 debug_assert!(
151 multiplied_target >= old_target || clamped_timespan < expected_time,
152 "Multiplied target should be >= old target when timespan >= expected_time"
153 );
154
155 let new_target = multiplied_target.div_u64(expected_time);
157
158 debug_assert!(
160 !new_target.is_zero(),
161 "New target must be non-zero after division"
162 );
163
164 let new_bits = compress_target(&new_target)?;
166
167 let clamped_bits = new_bits.min(MAX_TARGET as Natural);
169
170 debug_assert!(
172 clamped_bits > 0,
173 "Clamped bits ({clamped_bits}) must be positive"
174 );
175 debug_assert!(
176 clamped_bits <= MAX_TARGET as Natural,
177 "Clamped bits ({clamped_bits}) must be <= MAX_TARGET ({MAX_TARGET})"
178 );
179
180 if clamped_bits == 0 {
182 return Err(ConsensusError::InvalidProofOfWork(
183 "Difficulty adjustment resulted in zero target".into(),
184 ));
185 }
186
187 Ok(clamped_bits)
188}
189
190#[spec_locked("7.2")]
195#[cfg_attr(feature = "production", inline(always))]
196#[cfg_attr(not(feature = "production"), inline)]
197pub fn check_proof_of_work(header: &BlockHeader) -> Result<bool> {
198 let header_bytes = serialize_header(header);
200
201 let hash1 = Sha256::digest(header_bytes);
203 let hash2 = Sha256::digest(hash1);
204
205 let mut hash_bytes = [0u8; 32];
207 hash_bytes.copy_from_slice(&hash2);
208 let hash_value = U256::from_bytes(&hash_bytes);
209
210 let target = expand_target(header.bits)?;
212
213 Ok(hash_value < target)
215}
216
217#[cfg(feature = "production")]
230#[spec_locked("7.2")]
231pub fn batch_check_proof_of_work(headers: &[BlockHeader]) -> Result<Vec<(bool, Option<Hash>)>> {
232 use crate::optimizations::simd_vectorization;
233
234 if headers.is_empty() {
235 return Ok(Vec::new());
236 }
237
238 let header_bytes_vec: Vec<[u8; 80]> = {
240 #[cfg(feature = "rayon")]
241 {
242 use rayon::prelude::*;
243 headers.par_iter().map(serialize_header).collect()
244 }
245 #[cfg(not(feature = "rayon"))]
246 {
247 headers.iter().map(serialize_header).collect()
248 }
249 };
250
251 let header_refs: Vec<&[u8]> = header_bytes_vec.iter().map(|v| v.as_slice()).collect();
253 let aligned_hashes = simd_vectorization::batch_double_sha256_aligned(&header_refs);
254 let hashes: Vec<[u8; 32]> = aligned_hashes.iter().map(|h| *h.as_bytes()).collect();
256
257 let mut results = Vec::with_capacity(headers.len());
259 for (i, header) in headers.iter().enumerate() {
260 let hash = hashes[i];
261
262 let hash_value = U256::from_bytes(&hash);
264
265 match expand_target(header.bits) {
267 Ok(target) => {
268 let is_valid = hash_value < target;
269 results.push((is_valid, if is_valid { Some(hash) } else { None }));
270 }
271 Err(_e) => {
272 results.push((false, None));
274 }
275 }
276 }
277
278 Ok(results)
279}
280
281#[derive(Debug, Clone, PartialEq, Eq)]
283pub struct U256([u64; 4]); impl U256 {
286 fn zero() -> Self {
287 U256([0; 4])
288 }
289
290 fn from_u32(value: u32) -> Self {
291 U256([value as u64, 0, 0, 0])
292 }
293
294 #[cfg(test)]
295 fn from_u64(value: u64) -> Self {
296 U256([value, 0, 0, 0])
297 }
298
299 fn get_low_64(&self) -> u64 {
302 self.0[0]
303 }
304
305 #[cfg(test)]
306 fn to_bytes(&self) -> [u8; 32] {
307 let mut bytes = [0u8; 32];
308 for (i, &word) in self.0.iter().enumerate() {
309 let word_bytes = word.to_le_bytes();
310 bytes[i * 8..(i + 1) * 8].copy_from_slice(&word_bytes);
311 }
312 bytes
313 }
314
315 fn shl(&self, shift: u32) -> Self {
316 if shift >= 256 {
317 return U256::zero();
318 }
319
320 let mut result = U256::zero();
321 let word_shift = (shift / 64) as usize;
322 let bit_shift = shift % 64;
323
324 for i in 0..4 {
325 if i + word_shift < 4 {
326 result.0[i + word_shift] |= self.0[i] << bit_shift;
327 if bit_shift > 0 && i + word_shift + 1 < 4 {
328 result.0[i + word_shift + 1] |= self.0[i] >> (64 - bit_shift);
329 }
330 }
331 }
332
333 result
334 }
335
336 fn shr(&self, shift: u32) -> Self {
337 if shift >= 256 {
338 return U256::zero();
339 }
340
341 let mut result = U256::zero();
342 let word_shift = (shift / 64) as usize;
343 let bit_shift = shift % 64;
344
345 debug_assert!(
347 word_shift < 4,
348 "Word shift ({word_shift}) must be < 4 (shift: {shift})"
349 );
350
351 debug_assert!(
353 bit_shift < 64,
354 "Bit shift ({bit_shift}) must be < 64 (shift: {shift})"
355 );
356
357 if bit_shift == 0 {
358 for i in word_shift..4 {
359 result.0[i - word_shift] = self.0[i];
360 }
361 } else {
362 for i in word_shift..4 {
365 let mut word = self.0[i] >> bit_shift;
366 if i + 1 < 4 {
367 word |= self.0[i + 1] << (64 - bit_shift);
368 }
369 result.0[i - word_shift] = word;
370 }
371 }
372
373 result
374 }
375
376 fn from_bytes(bytes: &[u8; 32]) -> Self {
377 let mut words = [0u64; 4];
378 for (i, word) in words.iter_mut().enumerate() {
379 let start = i * 8;
380 let _end = start + 8;
381 *word = u64::from_le_bytes([
382 bytes[start],
383 bytes[start + 1],
384 bytes[start + 2],
385 bytes[start + 3],
386 bytes[start + 4],
387 bytes[start + 5],
388 bytes[start + 6],
389 bytes[start + 7],
390 ]);
391 }
392 U256(words)
393 }
394
395 fn checked_mul_u64(&self, rhs: u64) -> Option<Self> {
398 let mut carry = 0u128;
400 let mut result = U256::zero();
401
402 #[cfg(feature = "production")]
405 {
406 let product = (self.0[0] as u128) * (rhs as u128) + carry;
409 result.0[0] = product as u64;
410 carry = product >> 64;
411
412 let product = (self.0[1] as u128) * (rhs as u128) + carry;
414 result.0[1] = product as u64;
415 carry = product >> 64;
416
417 let product = (self.0[2] as u128) * (rhs as u128) + carry;
419 result.0[2] = product as u64;
420 carry = product >> 64;
421
422 let product = (self.0[3] as u128) * (rhs as u128) + carry;
424 result.0[3] = product as u64;
425 carry = product >> 64;
426
427 if carry > 0 {
429 return None; }
431 }
432
433 #[cfg(not(feature = "production"))]
434 {
435 for i in 0..4 {
436 let product = (self.0[i] as u128) * (rhs as u128) + carry;
437 result.0[i] = product as u64;
438 carry = product >> 64;
439
440 if i == 3 && carry > 0 {
442 return None; }
444 }
445 }
446
447 Some(result)
448 }
449
450 fn div_u64(&self, rhs: u64) -> Self {
457 if rhs == 0 {
458 return U256([u64::MAX; 4]);
461 }
462
463 let mut remainder = 0u128;
464 let mut result = U256::zero();
465
466 #[cfg(feature = "production")]
470 {
471 let dividend = (remainder << 64) | (self.0[3] as u128);
474 let quotient = dividend / (rhs as u128);
475 remainder = dividend % (rhs as u128);
476 debug_assert!(quotient <= u64::MAX as u128, "Quotient must fit in u64");
477 result.0[3] = quotient as u64;
478
479 let dividend = (remainder << 64) | (self.0[2] as u128);
481 let quotient = dividend / (rhs as u128);
482 remainder = dividend % (rhs as u128);
483 debug_assert!(quotient <= u64::MAX as u128, "Quotient must fit in u64");
484 result.0[2] = quotient as u64;
485
486 let dividend = (remainder << 64) | (self.0[1] as u128);
488 let quotient = dividend / (rhs as u128);
489 remainder = dividend % (rhs as u128);
490 debug_assert!(quotient <= u64::MAX as u128, "Quotient must fit in u64");
491 result.0[1] = quotient as u64;
492
493 let dividend = (remainder << 64) | (self.0[0] as u128);
495 let quotient = dividend / (rhs as u128);
496 remainder = dividend % (rhs as u128);
497 debug_assert!(quotient <= u64::MAX as u128, "Quotient must fit in u64");
498 result.0[0] = quotient as u64;
499 }
500
501 #[cfg(not(feature = "production"))]
502 {
503 for i in (0..4).rev() {
505 let dividend = (remainder << 64) | (self.0[i] as u128);
506 let quotient = dividend / (rhs as u128);
507 remainder = dividend % (rhs as u128);
508 debug_assert!(
509 quotient <= u64::MAX as u128,
510 "Quotient ({quotient}) must fit in u64"
511 );
512 result.0[i] = quotient as u64;
513 }
514 }
515
516 debug_assert!(
518 result <= *self,
519 "Division result ({result:?}) must be <= dividend ({self:?})"
520 );
521
522 debug_assert!(
524 remainder < rhs as u128,
525 "Remainder ({remainder}) must be < divisor ({rhs})"
526 );
527
528 result
529 }
530
531 fn highest_set_bit(&self) -> Option<u32> {
534 for (i, &word) in self.0.iter().rev().enumerate() {
535 if word != 0 {
536 let word_index = (3 - i) as u32;
537 let bit_pos = word_index * 64 + (63 - word.leading_zeros());
538 return Some(bit_pos);
539 }
540 }
541 None
542 }
543
544 fn is_zero(&self) -> bool {
546 self.0.iter().all(|&x| x == 0)
547 }
548
549 fn to_f64(&self) -> f64 {
552 if self.is_zero() {
553 return 0.0;
554 }
555 let mut result = 0.0_f64;
556 result += self.0[0] as f64;
557 result += (self.0[1] as f64) * 2.0_f64.powi(64);
558 result += (self.0[2] as f64) * 2.0_f64.powi(128);
559 result += (self.0[3] as f64) * 2.0_f64.powi(192);
560 result
561 }
562}
563
564impl PartialOrd for U256 {
565 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
566 Some(self.cmp(other))
567 }
568}
569
570impl Ord for U256 {
571 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
572 for (a, b) in self.0.iter().rev().zip(other.0.iter().rev()) {
573 match a.cmp(b) {
574 std::cmp::Ordering::Equal => continue,
575 other => return other,
576 }
577 }
578 std::cmp::Ordering::Equal
579 }
580}
581
582#[spec_locked("7.1")]
587pub fn difficulty_from_bits(bits: Natural) -> Result<f64> {
588 let target = expand_target(bits)?;
589 if target.is_zero() {
590 return Ok(1.0);
591 }
592 let max_target = U256::from_bytes(&[
594 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00, 0x00, 0xFF,
595 0xFF, 0x00, 0x00, 0x00, 0x00,
596 ]);
597 let max_f64 = max_target.to_f64();
598 let target_f64 = target.to_f64();
599 if target_f64 == 0.0 {
600 return Ok(1.0);
601 }
602 Ok((max_f64 / target_f64).max(1.0))
603}
604
605#[spec_locked("7.1")]
635pub fn expand_target(bits: Natural) -> Result<U256> {
636 let exponent = (bits >> 24) as u8;
648 let mantissa = bits & 0x007fffff;
650
651 if !(3..=32).contains(&exponent) {
654 return Err(ConsensusError::InvalidProofOfWork(
655 "Invalid target exponent".into(),
656 ));
657 }
658
659 if mantissa == 0 {
660 return Ok(U256::zero());
661 }
662
663 if exponent <= 3 {
665 let shift = 8 * (3 - exponent);
670 let mantissa_u256 = U256::from_u32(mantissa as u32);
671 Ok(mantissa_u256.shr(shift as u32))
672 } else {
673 let shift = 8u32 * (exponent as u32 - 3);
677 if shift >= 256 {
678 return Err(crate::error::ConsensusError::InvalidProofOfWork(
679 "Target too large".into(),
680 ));
681 }
682 let mantissa_u256 = U256::from_u32(mantissa as u32);
683 Ok(mantissa_u256.shl(shift))
684 }
685}
686
687#[spec_locked("7.1")]
716fn compress_target(target: &U256) -> Result<Natural> {
717 if target.is_zero() {
719 return Ok(0x1d000000); }
721
722 let highest_bit = target
724 .highest_set_bit()
725 .ok_or_else(|| ConsensusError::InvalidProofOfWork("Cannot compress zero target".into()))?;
726
727 let n_size = (highest_bit + 1).div_ceil(8);
730
731 let mut n_compact: u64;
734
735 if n_size <= 3 {
736 let low_64 = target.get_low_64();
739 let shift_bytes = 3 - n_size;
740 n_compact = low_64 << (8 * shift_bytes);
741 } else {
742 let shift_bytes = n_size - 3;
745 let shifted = target.shr(shift_bytes * 8);
746 n_compact = shifted.get_low_64();
747 }
748
749 let mut n_size_final = n_size;
753 while (n_compact & 0x00800000) != 0 {
754 n_compact >>= 8;
755 n_size_final += 1;
756 }
757
758 let mantissa = (n_compact & 0x007fffff) as u32;
761
762 if n_size_final > 29 {
764 return Err(ConsensusError::InvalidProofOfWork(
765 format!("Target too large: exponent {n_size_final} exceeds maximum 29").into(),
766 ));
767 }
768
769 let bits = (n_size_final << 24) | mantissa;
771
772 Ok(bits as Natural)
773}
774
775fn serialize_header(header: &BlockHeader) -> [u8; 80] {
777 let mut bytes = [0u8; 80];
779
780 bytes[0..4].copy_from_slice(&(header.version as u32).to_le_bytes());
781 bytes[4..36].copy_from_slice(&header.prev_block_hash);
782 bytes[36..68].copy_from_slice(&header.merkle_root);
783 bytes[68..72].copy_from_slice(&(header.timestamp as u32).to_le_bytes());
784 bytes[72..76].copy_from_slice(&(header.bits as u32).to_le_bytes());
785 bytes[76..80].copy_from_slice(&(header.nonce as u32).to_le_bytes());
786
787 bytes
788}
789
790#[cfg(test)]
791fn u256_from_bytes(bytes: &[u8]) -> u128 {
793 let mut value = 0u128;
794 for (i, &byte) in bytes.iter().enumerate() {
795 if i < 16 {
796 value |= (byte as u128) << (8 * (15 - i));
798 }
799 }
800 value
801}
802
803#[cfg(test)]
817mod property_tests {
818 use super::*;
819 use proptest::prelude::*;
820
821 fn arb_block_header() -> impl Strategy<Value = BlockHeader> {
822 (
823 any::<i64>(),
824 any::<[u8; 32]>(),
825 any::<[u8; 32]>(),
826 any::<u64>(),
827 0x03000000u32..0x1d00ffffu32,
828 any::<u64>(),
829 )
830 .prop_map(
831 |(version, prev_block_hash, merkle_root, timestamp, bits, nonce)| BlockHeader {
832 version,
833 prev_block_hash,
834 merkle_root,
835 timestamp,
836 bits: bits as u64,
837 nonce,
838 },
839 )
840 }
841
842 proptest! {
844 #[test]
845 fn prop_expand_target_valid_range(
846 bits in 0x03000000u32..0x1d00ffffu32
847 ) {
848 let result = expand_target(bits as u64);
849 let mantissa = bits & 0x00ffffff;
850
851 match result {
852 Ok(target) => {
853 prop_assert!(target >= U256::zero(), "Target must be non-negative");
855
856 if mantissa == 0 {
861 prop_assert!(target.is_zero(), "Zero mantissa should produce zero target");
862 } else {
863 prop_assert!(!target.is_zero(), "Non-zero mantissa should produce non-zero target");
864 }
865 },
866 Err(_) => {
867 }
869 }
870 }
871 }
872
873 proptest! {
875 #[test]
876 fn prop_check_proof_of_work_deterministic(
877 header in arb_block_header()
878 ) {
879 let mut valid_header = header;
881 valid_header.bits = 0x1d00ffff; let result1 = check_proof_of_work(&valid_header).unwrap_or(false);
885 let result2 = check_proof_of_work(&valid_header).unwrap_or(false);
886
887 prop_assert_eq!(result1, result2, "Proof of work check must be deterministic");
889 }
890 }
891
892 proptest! {
894 #[test]
895 fn prop_get_next_work_required_bounds(
896 current_header in arb_block_header(),
897 prev_headers in proptest::collection::vec(arb_block_header(), 2..6)
898 ) {
899 let mut valid_headers = prev_headers;
901 if let Some(first_header) = valid_headers.first_mut() {
902 first_header.timestamp = current_header.timestamp - 86400 * 14; }
904
905 let result = get_next_work_required(¤t_header, &valid_headers);
906
907 match result {
908 Ok(work) => {
909 prop_assert!(work <= MAX_TARGET as Natural,
911 "Next work required must not exceed maximum target");
912 prop_assert!(work > 0, "Next work required must be positive");
913 },
914 Err(_) => {
915 }
917 }
918 }
919 }
920}
921
922#[cfg(test)]
923mod tests {
924 use super::*;
925 use crate::constants::MAX_TARGET;
926
927 #[test]
928 fn test_get_next_work_required_insufficient_headers() {
929 let header = BlockHeader {
930 version: 1,
931 prev_block_hash: [0; 32],
932 merkle_root: [0; 32],
933 timestamp: 1231006505,
934 bits: 0x1d00ffff,
935 nonce: 0,
936 };
937
938 let prev_headers = vec![header.clone()];
939 let result = get_next_work_required(&header, &prev_headers);
940
941 assert!(result.is_err());
943 }
944
945 #[test]
946 fn test_get_next_work_required_normal_adjustment() {
947 let header1 = BlockHeader {
948 version: 1,
949 prev_block_hash: [0; 32],
950 merkle_root: [0; 32],
951 timestamp: 1000000,
952 bits: 0x1d00ffff,
953 nonce: 0,
954 };
955
956 let header2 = BlockHeader {
957 version: 1,
958 prev_block_hash: [0; 32],
959 merkle_root: [0; 32],
960 timestamp: 1000000 + (DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK), bits: 0x1d00ffff,
962 nonce: 0,
963 };
964
965 let prev_headers = vec![header1, header2.clone()];
966 let result = get_next_work_required(&header2, &prev_headers).unwrap();
967
968 assert_eq!(result, 0x1d00ffff);
970 }
971
972 #[test]
973 fn test_difficulty_from_bits() {
974 let d = difficulty_from_bits(0x1d00ffff).unwrap();
976 assert!(
977 (d - 1.0).abs() < 0.01,
978 "Genesis difficulty should be ~1.0, got {d}"
979 );
980 let d_harder = difficulty_from_bits(0x1d000800).unwrap();
982 assert!(d_harder > d, "Harder target should have higher difficulty");
983 }
984
985 #[test]
986 fn test_expand_target() {
987 let target = expand_target(0x0300ffff).unwrap(); assert!(!target.is_zero());
991 }
992
993 #[test]
994 fn test_check_proof_of_work_genesis() {
995 let header = BlockHeader {
997 version: 1,
998 prev_block_hash: [0; 32],
999 merkle_root: [0; 32],
1000 timestamp: 1231006505,
1001 bits: 0x0300ffff, nonce: 0,
1003 };
1004
1005 let result = check_proof_of_work(&header).unwrap();
1007 let _ = result;
1010 }
1011
1012 #[test]
1017 fn test_get_next_work_required_fast_blocks() {
1018 let header1 = BlockHeader {
1019 version: 1,
1020 prev_block_hash: [0; 32],
1021 merkle_root: [0; 32],
1022 timestamp: 1000000,
1023 bits: 0x1d00ffff,
1024 nonce: 0,
1025 };
1026
1027 let header2 = BlockHeader {
1029 version: 1,
1030 prev_block_hash: [0; 32],
1031 merkle_root: [0; 32],
1032 timestamp: 1000000 + (DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK / 2),
1033 bits: 0x1d00ffff,
1034 nonce: 0,
1035 };
1036
1037 let prev_headers = vec![header1, header2.clone()];
1038 let result = get_next_work_required(&header2, &prev_headers).unwrap();
1039
1040 assert!(result <= 0x1d00ffff);
1043 }
1044
1045 #[test]
1046 fn test_get_next_work_required_slow_blocks() {
1047 let header1 = BlockHeader {
1048 version: 1,
1049 prev_block_hash: [0; 32],
1050 merkle_root: [0; 32],
1051 timestamp: 1000000,
1052 bits: 0x1d00ffff,
1053 nonce: 0,
1054 };
1055
1056 let header2 = BlockHeader {
1058 version: 1,
1059 prev_block_hash: [0; 32],
1060 merkle_root: [0; 32],
1061 timestamp: 1000000 + (DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK * 2),
1062 bits: 0x1d00ffff,
1063 nonce: 0,
1064 };
1065
1066 let prev_headers = vec![header1, header2.clone()];
1067 let result = get_next_work_required(&header2, &prev_headers).unwrap();
1068
1069 assert!(result <= 0x1d00ffff);
1072 }
1073
1074 #[test]
1075 fn test_get_next_work_required_extreme_fast_blocks() {
1076 let header1 = BlockHeader {
1077 version: 1,
1078 prev_block_hash: [0; 32],
1079 merkle_root: [0; 32],
1080 timestamp: 1000000,
1081 bits: 0x1d00ffff,
1082 nonce: 0,
1083 };
1084
1085 let header2 = BlockHeader {
1087 version: 1,
1088 prev_block_hash: [0; 32],
1089 merkle_root: [0; 32],
1090 timestamp: 1000000 + (DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK / 14),
1091 bits: 0x1d00ffff,
1092 nonce: 0,
1093 };
1094
1095 let prev_headers = vec![header1, header2.clone()];
1096 let result = get_next_work_required(&header2, &prev_headers).unwrap();
1097
1098 assert!(result <= 0x1d00ffff);
1101 }
1102
1103 #[test]
1104 fn test_get_next_work_required_extreme_slow_blocks() {
1105 let header1 = BlockHeader {
1106 version: 1,
1107 prev_block_hash: [0; 32],
1108 merkle_root: [0; 32],
1109 timestamp: 1000000,
1110 bits: 0x1d00ffff,
1111 nonce: 0,
1112 };
1113
1114 let header2 = BlockHeader {
1116 version: 1,
1117 prev_block_hash: [0; 32],
1118 merkle_root: [0; 32],
1119 timestamp: 1000000 + (DIFFICULTY_ADJUSTMENT_INTERVAL * TARGET_TIME_PER_BLOCK * 4),
1120 bits: 0x1d00ffff,
1121 nonce: 0,
1122 };
1123
1124 let prev_headers = vec![header1, header2.clone()];
1125 let result = get_next_work_required(&header2, &prev_headers).unwrap();
1126
1127 assert!(result <= 0x1d00ffff);
1130 }
1131
1132 #[test]
1133 fn test_expand_target_zero_mantissa() {
1134 let result = expand_target(0x1d000000).unwrap();
1135 assert!(result.is_zero());
1136 }
1137
1138 #[test]
1139 fn test_expand_target_invalid_exponent_too_small() {
1140 let result = expand_target(0x0200ffff);
1141 assert!(result.is_err());
1142 }
1143
1144 #[test]
1145 fn test_expand_target_invalid_exponent_too_large() {
1146 let result = expand_target(0x2100ffff);
1147 assert!(result.is_err());
1148 }
1149
1150 #[test]
1151 fn test_expand_target_exponent_31() {
1152 let result = expand_target(0x1f00ffff).unwrap(); assert!(!result.is_zero());
1154 }
1155
1156 #[test]
1157 fn test_expand_target_exponent_32_regtest_bits() {
1158 let result = expand_target(0x2000ffff).unwrap();
1160 assert!(!result.is_zero());
1161 }
1162
1163 #[test]
1164 fn test_expand_target_exponent_3() {
1165 let result = expand_target(0x0300ffff).unwrap();
1166 assert!(!result.is_zero());
1167 }
1168
1169 #[test]
1170 fn test_expand_target_exponent_4() {
1171 let result = expand_target(0x0400ffff).unwrap();
1172 assert!(!result.is_zero());
1173 }
1174
1175 #[test]
1176 fn test_expand_target_exponent_29() {
1177 let result = expand_target(0x1d00ffff).unwrap();
1178 assert!(!result.is_zero());
1179 }
1180
1181 #[test]
1182 fn test_check_proof_of_work_invalid_target() {
1183 let header = BlockHeader {
1186 version: 1,
1187 prev_block_hash: [0; 32],
1188 merkle_root: [0; 32],
1189 timestamp: 1231006505,
1190 bits: 0x0200ffff, nonce: 0,
1192 };
1193
1194 let result = check_proof_of_work(&header);
1195 assert!(result.is_err());
1196 }
1197
1198 #[test]
1199 fn test_check_proof_of_work_valid_target() {
1200 let header = BlockHeader {
1201 version: 1,
1202 prev_block_hash: [0; 32],
1203 merkle_root: [0; 32],
1204 timestamp: 1231006505,
1205 bits: 0x1d00ffff, nonce: 0,
1207 };
1208
1209 let result = check_proof_of_work(&header).unwrap();
1210 let _ = result;
1212 }
1213
1214 #[test]
1215 fn test_u256_zero() {
1216 let zero = U256::zero();
1217 assert!(zero.is_zero());
1218 }
1219
1220 #[test]
1221 fn test_u256_from_u32() {
1222 let value = U256::from_u32(0x12345678);
1223 assert!(!value.is_zero());
1224 }
1225
1226 #[test]
1227 fn test_u256_from_u64() {
1228 let value = U256::from_u64(0x123456789abcdef0);
1229 assert!(!value.is_zero());
1230 }
1231
1232 #[test]
1233 fn test_u256_shl_zero_shift() {
1234 let value = U256::from_u32(0x12345678);
1235 let result = value.shl(0);
1236 assert_eq!(result, value);
1237 }
1238
1239 #[test]
1240 fn test_u256_shl_large_shift() {
1241 let value = U256::from_u32(0x12345678);
1242 let result = value.shl(300); assert!(result.is_zero());
1244 }
1245
1246 #[test]
1247 fn test_u256_shr_zero_shift() {
1248 let value = U256::from_u32(0x12345678);
1249 let result = value.shr(0);
1250 assert_eq!(result, value);
1251 }
1252
1253 #[test]
1254 fn test_u256_shr_large_shift() {
1255 let value = U256::from_u32(0x12345678);
1256 let result = value.shr(300); assert!(result.is_zero());
1258 }
1259
1260 #[test]
1261 fn test_u256_shl_small_shift() {
1262 let value = U256::from_u32(0x12345678);
1263 let result = value.shl(8);
1264 assert!(!result.is_zero());
1265 assert_ne!(result, value);
1266 }
1267
1268 #[test]
1269 fn test_u256_shr_small_shift() {
1270 let value = U256::from_u32(0x12345678);
1271 let result = value.shr(8);
1272 assert!(!result.is_zero());
1273 assert_ne!(result, value);
1274 }
1275
1276 #[test]
1277 fn test_u256_to_bytes() {
1278 let value = U256::from_u32(0x12345678);
1279 let bytes = value.to_bytes();
1280 assert_eq!(bytes.len(), 32);
1281 }
1282
1283 #[test]
1284 fn test_u256_from_bytes() {
1285 let mut bytes = [0u8; 32];
1286 bytes[0] = 0x78;
1287 bytes[1] = 0x56;
1288 bytes[2] = 0x34;
1289 bytes[3] = 0x12;
1290 let value = U256::from_bytes(&bytes);
1291 assert!(!value.is_zero());
1292 }
1293
1294 #[test]
1295 fn test_u256_ordering() {
1296 let small = U256::from_u32(0x12345678);
1297 let large = U256::from_u32(0x87654321);
1298
1299 assert!(small < large);
1300 assert!(large > small);
1301 assert_eq!(small.cmp(&small), std::cmp::Ordering::Equal);
1302 }
1303
1304 #[test]
1305 fn test_expand_compress_round_trip() {
1306 let test_bits = vec![
1308 0x1d00ffff, 0x1b0404cb, 0x0300ffff, ];
1315
1316 for &bits in &test_bits {
1317 let expanded = match expand_target(bits) {
1319 Ok(t) => t,
1320 Err(_) => continue, };
1322
1323 let compressed = match compress_target(&expanded) {
1325 Ok(b) => b,
1326 Err(_) => {
1327 continue;
1330 }
1331 };
1332
1333 let re_expanded = match expand_target(compressed) {
1335 Ok(t) => t,
1336 Err(_) => continue,
1337 };
1338
1339 if re_expanded > expanded {
1343 panic!(
1344 "Round-trip failed for bits 0x{bits:08x}: re-expanded > original (compression should truncate, not add)"
1345 );
1346 }
1347 #[allow(clippy::eq_op)]
1354 let significant_words_match =
1355 expanded.0[2] == re_expanded.0[2] && expanded.0[3] == re_expanded.0[3];
1356 if !significant_words_match {
1357 panic!(
1358 "Round-trip failed for bits 0x{:08x}: significant bits differ (expanded: {:?}, re-expanded: {:?})",
1359 bits, expanded.0, re_expanded.0
1360 );
1361 }
1362 }
1364 }
1365
1366 #[test]
1367 fn test_compress_target_genesis() {
1368 let genesis_bits = 0x1d00ffff;
1370 let expanded = expand_target(genesis_bits).unwrap();
1371 let compressed = compress_target(&expanded).unwrap();
1372
1373 assert!(compressed <= MAX_TARGET as u64);
1375 assert!(compressed > 0);
1376
1377 let re_expanded = expand_target(compressed).unwrap();
1379 assert_eq!(expanded, re_expanded);
1380 }
1381
1382 #[test]
1383 fn test_serialize_header() {
1384 let header = BlockHeader {
1385 version: 1,
1386 prev_block_hash: [1; 32],
1387 merkle_root: [2; 32],
1388 timestamp: 1234567890,
1389 bits: 0x1d00ffff,
1390 nonce: 0x12345678,
1391 };
1392
1393 let bytes = serialize_header(&header);
1394 assert_eq!(bytes.len(), 80); }
1396
1397 #[test]
1402 fn test_serialize_header_returns_fixed_80_bytes() {
1403 let header = BlockHeader {
1405 version: 1,
1406 prev_block_hash: [0; 32],
1407 merkle_root: [0; 32],
1408 timestamp: 0,
1409 bits: 0,
1410 nonce: 0,
1411 };
1412 let bytes: [u8; 80] = serialize_header(&header);
1413 assert_eq!(bytes.len(), 80);
1414 }
1415
1416 #[test]
1417 fn test_serialize_header_field_layout() {
1418 let header = BlockHeader {
1420 version: 0x01020304,
1421 prev_block_hash: {
1422 let mut h = [0u8; 32];
1423 h[0] = 0xAA;
1424 h[31] = 0xBB;
1425 h
1426 },
1427 merkle_root: {
1428 let mut h = [0u8; 32];
1429 h[0] = 0xCC;
1430 h[31] = 0xDD;
1431 h
1432 },
1433 timestamp: 0x05060708,
1434 bits: 0x090A0B0C,
1435 nonce: 0x0D0E0F10,
1436 };
1437
1438 let bytes = serialize_header(&header);
1439
1440 assert_eq!(bytes[0], 0x04); assert_eq!(bytes[1], 0x03);
1443 assert_eq!(bytes[2], 0x02);
1444 assert_eq!(bytes[3], 0x01);
1445
1446 assert_eq!(bytes[4], 0xAA);
1448 assert_eq!(bytes[35], 0xBB);
1449
1450 assert_eq!(bytes[36], 0xCC);
1452 assert_eq!(bytes[67], 0xDD);
1453
1454 assert_eq!(bytes[68], 0x08);
1456 assert_eq!(bytes[69], 0x07);
1457 assert_eq!(bytes[70], 0x06);
1458 assert_eq!(bytes[71], 0x05);
1459
1460 assert_eq!(bytes[72], 0x0C);
1462 assert_eq!(bytes[73], 0x0B);
1463 assert_eq!(bytes[74], 0x0A);
1464 assert_eq!(bytes[75], 0x09);
1465
1466 assert_eq!(bytes[76], 0x10);
1468 assert_eq!(bytes[77], 0x0F);
1469 assert_eq!(bytes[78], 0x0E);
1470 assert_eq!(bytes[79], 0x0D);
1471 }
1472
1473 #[test]
1474 fn test_serialize_header_deterministic() {
1475 let header = BlockHeader {
1476 version: 1,
1477 prev_block_hash: [0xFF; 32],
1478 merkle_root: [0xAA; 32],
1479 timestamp: 1231006505,
1480 bits: 0x1d00ffff,
1481 nonce: 2083236893,
1482 };
1483
1484 let bytes1 = serialize_header(&header);
1485 let bytes2 = serialize_header(&header);
1486 assert_eq!(bytes1, bytes2, "Header serialization must be deterministic");
1487 }
1488
1489 #[test]
1490 fn test_serialize_header_different_headers_different_bytes() {
1491 let header1 = BlockHeader {
1492 version: 1,
1493 prev_block_hash: [0; 32],
1494 merkle_root: [0; 32],
1495 timestamp: 1231006505,
1496 bits: 0x1d00ffff,
1497 nonce: 0,
1498 };
1499
1500 let mut header2 = header1.clone();
1501 header2.nonce = 1;
1502
1503 let bytes1 = serialize_header(&header1);
1504 let bytes2 = serialize_header(&header2);
1505 assert_ne!(
1506 bytes1, bytes2,
1507 "Different nonces must produce different serializations"
1508 );
1509
1510 assert_eq!(
1512 bytes1[..76],
1513 bytes2[..76],
1514 "Non-nonce bytes should be identical"
1515 );
1516 assert_ne!(bytes1[76..], bytes2[76..], "Nonce bytes should differ");
1517 }
1518
1519 #[test]
1520 fn test_u256_from_bytes_simple() {
1521 let bytes = [0u8; 32];
1522 let value = u256_from_bytes(&bytes);
1523 assert_eq!(value, 0);
1524 }
1525
1526 #[test]
1527 fn test_u256_from_bytes_with_data() {
1528 let mut bytes = [0u8; 32];
1529 bytes[0] = 0x78;
1530 bytes[1] = 0x56;
1531 bytes[2] = 0x34;
1532 bytes[3] = 0x12;
1533 let value = u256_from_bytes(&bytes);
1534 assert_eq!(value, 0x78563412000000000000000000000000);
1537 }
1538}