use crate::constants::*;
#[cfg(feature = "production")]
pub mod precomputed_constants {
use super::*;
pub const U64_MAX: u64 = u64::MAX;
pub const MAX_MONEY_U64: u64 = MAX_MONEY as u64;
pub const BTC_PER_SATOSHI: f64 = 1.0 / (SATOSHIS_PER_BTC as f64);
pub const U32_MAX: u32 = u32::MAX;
pub const ONE_BTC_SATOSHIS: i64 = SATOSHIS_PER_BTC;
}
#[repr(align(32))]
#[derive(Clone)]
pub struct CacheAlignedHash([u8; 32]);
impl CacheAlignedHash {
#[inline]
pub fn new(hash: [u8; 32]) -> Self {
Self(hash)
}
#[inline]
pub fn as_bytes(&self) -> &[u8; 32] {
&self.0
}
}
#[cfg(feature = "production")]
pub mod prefetch {
#[cfg(target_arch = "x86_64")]
#[inline(always)]
pub unsafe fn prefetch_read(ptr: *const i8) {
use std::arch::x86_64::{_mm_prefetch, _MM_HINT_T0};
_mm_prefetch(ptr, _MM_HINT_T0);
}
#[cfg(target_arch = "aarch64")]
#[inline(always)]
pub unsafe fn prefetch_read(ptr: *const i8) {
use std::arch::aarch64::_prefetch;
_prefetch(ptr, 0, 0); }
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
#[inline(always)]
pub unsafe fn prefetch_read(_ptr: *const i8) {
}
#[inline(always)]
pub fn prefetch_slice<T>(slice: &[T], index: usize) {
if index < slice.len() {
unsafe {
let ptr = slice.as_ptr().add(index) as *const i8;
prefetch_read(ptr);
}
}
}
#[inline(always)]
pub fn prefetch_ahead<T>(slice: &[T], index: usize, offset: usize) {
let prefetch_index = index.saturating_add(offset);
prefetch_slice(slice, prefetch_index);
}
}
#[repr(C, packed)]
pub struct CompactStackFrame {
pub opcode: u8,
pub flags: u32,
pub script_offset: u16,
pub stack_height: u16,
}
impl CompactStackFrame {
#[inline]
pub fn new(opcode: u8, flags: u32, script_offset: u16, stack_height: u16) -> Self {
Self {
opcode,
flags,
script_offset,
stack_height,
}
}
}
#[macro_export]
#[cfg(feature = "production")]
macro_rules! hot_inline {
() => {
#[inline(always)]
};
}
#[cfg(feature = "production")]
pub mod constant_folding {
pub const EMPTY_STRING_HASH: [u8; 32] = [
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9,
0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52,
0xb8, 0x55,
];
pub const EMPTY_STRING_DOUBLE_HASH: [u8; 32] = [
0x5d, 0xf6, 0xe0, 0xe2, 0x76, 0x13, 0x59, 0xf3, 0x73, 0x9a, 0x1c, 0x6f, 0x87, 0x40, 0x64,
0x0a, 0xf1, 0x2e, 0xc7, 0xc3, 0x72, 0x4a, 0x5c, 0x2c, 0xa5, 0xf3, 0x0f, 0x26, 0x60, 0x87,
0x7e, 0x6b,
];
#[inline(always)]
pub fn is_empty_hash(hash: &[u8; 32]) -> bool {
*hash == EMPTY_STRING_HASH
}
#[inline(always)]
pub fn is_empty_double_hash(hash: &[u8; 32]) -> bool {
*hash == EMPTY_STRING_DOUBLE_HASH
}
#[inline(always)]
pub fn is_zero_hash(hash: &[u8; 32]) -> bool {
hash.iter().all(|&b| b == 0)
}
}
#[cfg(feature = "production")]
#[allow(dead_code)]
pub mod dead_code_elimination {
#[inline(never)]
#[cold]
pub fn mark_unused() {
}
#[inline(always)]
pub fn unlikely(condition: bool) -> bool {
condition
}
}
#[cfg(feature = "production")]
pub mod simd_vectorization {
use crate::crypto::OptimizedSha256;
use digest::Digest;
use ripemd::Ripemd160;
const PARALLEL_THRESHOLD: usize = 8;
#[inline]
fn chunk_size() -> usize {
blvm_primitives::ibd_tuning::hash_batch_chunk_size()
}
pub fn batch_sha256(inputs: &[&[u8]]) -> Vec<[u8; 32]> {
if inputs.is_empty() {
return Vec::new();
}
if inputs.len() < 4 {
let hasher = OptimizedSha256::new();
return inputs.iter().map(|input| hasher.hash(input)).collect();
}
if inputs.len() < PARALLEL_THRESHOLD {
let hasher = OptimizedSha256::new();
let mut results = Vec::with_capacity(inputs.len());
for chunk in inputs.chunks(chunk_size()) {
for input in chunk {
results.push(hasher.hash(input));
}
}
return results;
}
#[cfg(target_arch = "x86_64")]
{
use crate::crypto::sha256_avx2;
if sha256_avx2::is_avx2_available() {
use crate::crypto::avx2_batch;
return avx2_batch::batch_sha256_avx2(inputs);
}
}
let hasher = OptimizedSha256::new();
let mut results = Vec::with_capacity(inputs.len());
for chunk in inputs.chunks(chunk_size()) {
for input in chunk {
results.push(hasher.hash(input));
}
}
results
}
pub fn batch_double_sha256(inputs: &[&[u8]]) -> Vec<[u8; 32]> {
batch_double_sha256_aligned(inputs)
.into_iter()
.map(|h| *h.as_bytes())
.collect()
}
pub fn batch_double_sha256_aligned(inputs: &[&[u8]]) -> Vec<super::CacheAlignedHash> {
if inputs.is_empty() {
return Vec::new();
}
let hasher = OptimizedSha256::new();
if inputs.len() < 4 {
return inputs
.iter()
.map(|input| super::CacheAlignedHash::new(hasher.hash256(input)))
.collect();
}
if inputs.len() < PARALLEL_THRESHOLD {
let mut results = Vec::with_capacity(inputs.len());
for chunk in inputs.chunks(chunk_size()) {
for input in chunk {
results.push(super::CacheAlignedHash::new(hasher.hash256(input)));
}
}
return results;
}
let hasher = OptimizedSha256::new();
let mut results = Vec::with_capacity(inputs.len());
for chunk in inputs.chunks(chunk_size()) {
for input in chunk {
results.push(super::CacheAlignedHash::new(hasher.hash256(input)));
}
}
results
}
pub fn batch_ripemd160(inputs: &[&[u8]]) -> Vec<[u8; 20]> {
if inputs.is_empty() {
return Vec::new();
}
if inputs.len() < 4 {
return inputs
.iter()
.map(|input| {
let hash = Ripemd160::digest(input);
let mut result = [0u8; 20];
result.copy_from_slice(&hash);
result
})
.collect();
}
if inputs.len() < PARALLEL_THRESHOLD {
let mut results = Vec::with_capacity(inputs.len());
for chunk in inputs.chunks(chunk_size()) {
for input in chunk {
let hash = Ripemd160::digest(input);
let mut result = [0u8; 20];
result.copy_from_slice(&hash);
results.push(result);
}
}
return results;
}
let mut results = Vec::with_capacity(inputs.len());
for chunk in inputs.chunks(chunk_size()) {
for input in chunk {
let hash = Ripemd160::digest(input);
let mut result = [0u8; 20];
result.copy_from_slice(&hash);
results.push(result);
}
}
results
}
pub fn batch_hash160(inputs: &[&[u8]]) -> Vec<[u8; 20]> {
if inputs.is_empty() {
return Vec::new();
}
if inputs.len() < 4 {
let hasher = OptimizedSha256::new();
return inputs
.iter()
.map(|input| {
let sha256_hash: [u8; 32] = hasher.hash(input);
let ripemd160_hash = Ripemd160::digest(sha256_hash);
let mut result = [0u8; 20];
result.copy_from_slice(&ripemd160_hash);
result
})
.collect();
}
if inputs.len() < PARALLEL_THRESHOLD {
let hasher = OptimizedSha256::new();
let mut results = Vec::with_capacity(inputs.len());
for chunk in inputs.chunks(chunk_size()) {
for input in chunk {
let sha256_hash: [u8; 32] = hasher.hash(input);
let ripemd160_hash = Ripemd160::digest(sha256_hash);
let mut result = [0u8; 20];
result.copy_from_slice(&ripemd160_hash);
results.push(result);
}
}
return results;
}
let hasher = OptimizedSha256::new();
let mut results = Vec::with_capacity(inputs.len());
for chunk in inputs.chunks(chunk_size()) {
for input in chunk {
let sha256_hash: [u8; 32] = hasher.hash(input);
let ripemd160_hash = Ripemd160::digest(sha256_hash);
let mut result = [0u8; 20];
result.copy_from_slice(&ripemd160_hash);
results.push(result);
}
}
results
}
}
#[cfg(feature = "production")]
pub use constant_folding::*;
#[cfg(feature = "production")]
pub use precomputed_constants::*;
#[cfg(feature = "production")]
pub mod proven_bounds {
use crate::constants::{MAX_INPUTS, MAX_OUTPUTS};
pub const MAX_TX_SIZE_PROVEN: usize = 100000;
pub const MAX_BLOCK_SIZE_PROVEN: usize = 4000000;
pub const MAX_INPUTS_PROVEN: usize = MAX_INPUTS;
pub const MAX_OUTPUTS_PROVEN: usize = MAX_OUTPUTS;
pub const MAX_TRANSACTIONS_PROVEN: usize = 10000;
pub const MAX_PREV_HEADERS_PROVEN: usize = 5;
}
#[cfg(feature = "production")]
pub mod optimized_access {
use super::proven_bounds;
#[inline(always)]
pub fn get_proven<T>(slice: &[T], index: usize) -> Option<&T> {
if index < slice.len() {
unsafe { Some(slice.get_unchecked(index)) }
} else {
None
}
}
#[inline(always)]
pub fn prealloc_proven<T>(max_size: usize) -> Vec<T> {
Vec::with_capacity(max_size)
}
#[inline(always)]
pub fn prealloc_tx_buffer() -> Vec<u8> {
prealloc_proven::<u8>(proven_bounds::MAX_TX_SIZE_PROVEN)
}
#[inline(always)]
pub fn prealloc_block_buffer() -> Vec<u8> {
prealloc_proven::<u8>(proven_bounds::MAX_BLOCK_SIZE_PROVEN)
}
#[inline(always)]
pub fn get_proven_by_<T>(slice: &[T], index: usize) -> Option<&T> {
get_proven(slice, index)
}
}
#[cfg(feature = "production")]
pub mod _optimized_access {
use super::optimized_access;
#[inline(always)]
pub fn get_proven_by_<T>(slice: &[T], index: usize) -> Option<&T> {
optimized_access::get_proven(slice, index)
}
}
#[cfg(feature = "production")]
pub use optimized_access::{prealloc_block_buffer, prealloc_tx_buffer};
#[cfg(feature = "production")]
pub mod reference_implementations {
#[inline(always)]
pub fn get_proven_reference<T>(slice: &[T], index: usize) -> Option<&T> {
slice.get(index) }
}
#[cfg(all(
feature = "production",
any(debug_assertions, feature = "runtime-invariants")
))]
pub mod runtime_assertions {
use super::optimized_access::get_proven;
use super::reference_implementations::get_proven_reference;
#[inline(always)]
pub fn get_proven_checked<T>(slice: &[T], index: usize) -> Option<&T> {
let result_optimized = get_proven(slice, index);
let result_reference = get_proven_reference(slice, index);
debug_assert_eq!(
result_optimized.is_some(),
result_reference.is_some(),
"Optimization correctness check failed: optimized and reference disagree on Some/None"
);
if let (Some(opt_val), Some(ref_val)) = (result_optimized, result_reference) {
debug_assert_eq!(
opt_val as *const T,
ref_val as *const T,
"Optimization correctness check failed: optimized and reference return different pointers"
);
}
result_optimized
}
}