use super::EntropyCache;
use crate::interbar_types::TradeSnapshot;
use crate::normalization_lut::soft_clamp_hurst_lut;
use libm;
use opendeviationbar_hurst;
use smallvec::SmallVec;
use wide::f64x2;
pub fn compute_kaufman_er(prices: &[f64]) -> f64 {
if prices.len() < 2 {
return 0.0;
}
let n = prices.len();
let net_movement = (prices[n - 1] - prices[0]).abs();
use wide::f64x4;
let mut volatility_vec = f64x4::splat(0.0);
let chunks = (n - 1) / 4;
for chunk_idx in 0..chunks {
let i = chunk_idx * 4 + 1;
let diff1 = (prices[i] - prices[i - 1]).abs();
let diff2 = (prices[i + 1] - prices[i]).abs();
let diff3 = (prices[i + 2] - prices[i + 1]).abs();
let diff4 = (prices[i + 3] - prices[i + 2]).abs();
volatility_vec += f64x4::new([diff1, diff2, diff3, diff4]);
}
let arr: [f64; 4] = volatility_vec.into();
let mut volatility = arr[0] + arr[1] + arr[2] + arr[3];
let remainder = (n - 1) % 4;
for i in (chunks * 4 + 1)..(chunks * 4 + 1 + remainder) {
if i < n {
volatility += (prices[i] - prices[i - 1]).abs();
}
}
if volatility > f64::EPSILON {
net_movement / volatility
} else {
0.0 }
}
const LN_2_FACTORIAL: f64 = std::f64::consts::LN_2;
const LN_3_FACTORIAL: f64 = 1.791_759_469_228_055;
#[inline]
pub fn compute_hurst_rescaled_range(prices: &[f64]) -> f64 {
const MIN_SAMPLES: usize = 64;
if prices.len() < MIN_SAMPLES {
return 0.5; }
let h = opendeviationbar_hurst::rssimple(prices);
soft_clamp_hurst(h)
}
#[inline]
pub fn soft_clamp_hurst(h: f64) -> f64 {
soft_clamp_hurst_lut(h)
}
#[inline]
pub fn compute_permutation_entropy(prices: &[f64]) -> f64 {
let n = prices.len();
if n < 10 {
return 1.0; }
if n >= 30 {
compute_permutation_entropy_m3_cached_batch(prices)
} else {
compute_permutation_entropy_m2(prices)
}
}
#[inline]
fn compute_permutation_entropy_m3_cached_batch(prices: &[f64]) -> f64 {
compute_permutation_entropy_m3_simd_batch(prices)
}
#[inline]
fn compute_permutation_entropy_m2(prices: &[f64]) -> f64 {
debug_assert!(prices.len() >= 10);
let mut all_ascending = true;
for i in 0..prices.len() - 1 {
if prices[i] > prices[i + 1] {
all_ascending = false;
break;
}
}
if all_ascending {
return 0.0; }
let mut counts = [0u16; 2]; let n_patterns = prices.len() - 1;
for i in 0..n_patterns {
let idx = if prices[i] <= prices[i + 1] { 0 } else { 1 };
counts[idx] += 1;
}
let total = n_patterns as f64;
let reciprocal = 1.0 / total;
let entropy: f64 = counts.iter().fold(0.0, |acc, &c| {
if c > 0 {
let p = (c as f64) * reciprocal;
acc + (-p * libm::log(p)) } else {
acc
}
});
entropy / LN_2_FACTORIAL }
#[inline]
fn compute_permutation_entropy_m3_simd_batch(prices: &[f64]) -> f64 {
let n = prices.len();
let n_patterns = n - 2;
let mut is_monotonic_inc = true;
let mut is_monotonic_dec = true;
for i in 0..n - 1 {
let cmp = (prices[i] > prices[i + 1]) as u8;
is_monotonic_inc &= cmp == 0;
is_monotonic_dec &= cmp == 1;
if !is_monotonic_inc && !is_monotonic_dec {
break;
}
}
if is_monotonic_inc || is_monotonic_dec {
return 0.0; }
let mut pattern_counts: [u16; 6] = [0; 6];
let simd_bulk_patterns = (n_patterns / 16) * 16;
let mut i = 0;
while i < simd_bulk_patterns {
let p0 = ordinal_pattern_index_m3(prices[i], prices[i + 1], prices[i + 2]);
let p1 = ordinal_pattern_index_m3(prices[i + 1], prices[i + 2], prices[i + 3]);
let p2 = ordinal_pattern_index_m3(prices[i + 2], prices[i + 3], prices[i + 4]);
let p3 = ordinal_pattern_index_m3(prices[i + 3], prices[i + 4], prices[i + 5]);
let p4 = ordinal_pattern_index_m3(prices[i + 4], prices[i + 5], prices[i + 6]);
let p5 = ordinal_pattern_index_m3(prices[i + 5], prices[i + 6], prices[i + 7]);
let p6 = ordinal_pattern_index_m3(prices[i + 6], prices[i + 7], prices[i + 8]);
let p7 = ordinal_pattern_index_m3(prices[i + 7], prices[i + 8], prices[i + 9]);
let p8 = ordinal_pattern_index_m3(prices[i + 8], prices[i + 9], prices[i + 10]);
let p9 = ordinal_pattern_index_m3(prices[i + 9], prices[i + 10], prices[i + 11]);
let p10 = ordinal_pattern_index_m3(prices[i + 10], prices[i + 11], prices[i + 12]);
let p11 = ordinal_pattern_index_m3(prices[i + 11], prices[i + 12], prices[i + 13]);
let p12 = ordinal_pattern_index_m3(prices[i + 12], prices[i + 13], prices[i + 14]);
let p13 = ordinal_pattern_index_m3(prices[i + 13], prices[i + 14], prices[i + 15]);
let p14 = ordinal_pattern_index_m3(prices[i + 14], prices[i + 15], prices[i + 16]);
let p15 = ordinal_pattern_index_m3(prices[i + 15], prices[i + 16], prices[i + 17]);
pattern_counts[p0] += 1;
pattern_counts[p1] += 1;
pattern_counts[p2] += 1;
pattern_counts[p3] += 1;
pattern_counts[p4] += 1;
pattern_counts[p5] += 1;
pattern_counts[p6] += 1;
pattern_counts[p7] += 1;
pattern_counts[p8] += 1;
pattern_counts[p9] += 1;
pattern_counts[p10] += 1;
pattern_counts[p11] += 1;
pattern_counts[p12] += 1;
pattern_counts[p13] += 1;
pattern_counts[p14] += 1;
pattern_counts[p15] += 1;
i += 16;
}
let remainder_patterns = n_patterns - simd_bulk_patterns;
let remainder_8x = (remainder_patterns / 8) * 8;
let mut j = simd_bulk_patterns;
while j < simd_bulk_patterns + remainder_8x {
let p0 = ordinal_pattern_index_m3(prices[j], prices[j + 1], prices[j + 2]);
let p1 = ordinal_pattern_index_m3(prices[j + 1], prices[j + 2], prices[j + 3]);
let p2 = ordinal_pattern_index_m3(prices[j + 2], prices[j + 3], prices[j + 4]);
let p3 = ordinal_pattern_index_m3(prices[j + 3], prices[j + 4], prices[j + 5]);
let p4 = ordinal_pattern_index_m3(prices[j + 4], prices[j + 5], prices[j + 6]);
let p5 = ordinal_pattern_index_m3(prices[j + 5], prices[j + 6], prices[j + 7]);
let p6 = ordinal_pattern_index_m3(prices[j + 6], prices[j + 7], prices[j + 8]);
let p7 = ordinal_pattern_index_m3(prices[j + 7], prices[j + 8], prices[j + 9]);
pattern_counts[p0] += 1;
pattern_counts[p1] += 1;
pattern_counts[p2] += 1;
pattern_counts[p3] += 1;
pattern_counts[p4] += 1;
pattern_counts[p5] += 1;
pattern_counts[p6] += 1;
pattern_counts[p7] += 1;
j += 8;
}
for k in (simd_bulk_patterns + remainder_8x)..n_patterns {
let pattern_idx = ordinal_pattern_index_m3(prices[k], prices[k + 1], prices[k + 2]);
pattern_counts[pattern_idx] += 1;
}
let inv_total = 1.0 / n_patterns as f64;
let entropy: f64 = pattern_counts.iter().fold(0.0, |acc, &count| {
if count > 0 {
let p = count as f64 * inv_total;
acc + (-p * libm::log(p)) } else {
acc
}
});
entropy / LN_3_FACTORIAL }
#[inline(always)]
pub fn ordinal_pattern_index_m3(a: f64, b: f64, c: f64) -> usize {
const LOOKUP: [usize; 8] = [
5, 0, 3, 2, 4, 1, 0, 0, ];
let ab = (a <= b) as usize;
let bc = (b <= c) as usize;
let ac = (a <= c) as usize;
LOOKUP[(ab << 2) | (bc << 1) | ac]
}
#[inline]
pub fn extract_ohlc_batch(lookback: &[&TradeSnapshot]) -> (f64, f64, f64, f64) {
if lookback.is_empty() {
return (0.0, 0.0, 0.0, 0.0);
}
let n = lookback.len();
let open = lookback[0].price.to_f64();
let close = lookback[n - 1].price.to_f64();
let (high, low) = lookback.iter().fold((f64::MIN, f64::MAX), |acc, t| {
let p = t.price.to_f64();
(acc.0.max(p), acc.1.min(p))
});
(open, high, low, close)
}
#[inline]
pub fn extract_prices_and_ohlc_cached(
lookback: &[&TradeSnapshot],
) -> (SmallVec<[f64; 256]>, (f64, f64, f64, f64)) {
if lookback.is_empty() {
return (SmallVec::new(), (0.0, 0.0, 0.0, 0.0));
}
let n = lookback.len();
let open = lookback[0].price.to_f64();
let close = lookback[n - 1].price.to_f64();
let mut prices = SmallVec::with_capacity(lookback.len());
let mut high = f64::MIN;
let mut low = f64::MAX;
for trade in lookback {
let p = trade.price.to_f64();
prices.push(p);
if p > high {
high = p;
}
if p < low {
low = p;
}
}
(prices, (open, high, low, close))
}
#[inline]
pub fn compute_approximate_entropy(prices: &[f64], m: usize, r: f64) -> f64 {
let n = prices.len();
if n < m + 1 {
return 0.0;
}
let phi_m = compute_phi(prices, m, r);
let phi_m1 = compute_phi(prices, m + 1, r);
((phi_m - phi_m1) / libm::log(n as f64)).max(0.0).min(1.0)
}
#[inline]
fn patterns_within_distance_simd(p1: &[f64], p2: &[f64], r: f64, m: usize) -> bool {
if m == 2 && p1.len() >= 2 && p2.len() >= 2 {
let v1 = f64x2::new([p1[0], p1[1]]);
let v2 = f64x2::new([p2[0], p2[1]]);
let diffs = (v1 - v2).abs();
let d0 = diffs.to_array()[0];
let d1 = diffs.to_array()[1];
d0 <= r && d1 <= r
} else {
let mut is_within_distance = true;
for k in 0..m.min(p1.len()).min(p2.len()) {
if (p1[k] - p2[k]).abs() > r {
is_within_distance = false;
break;
}
}
is_within_distance
}
}
fn compute_phi_sampled(prices: &[f64], m: usize, r: f64) -> f64 {
let n = prices.len();
if n < m {
return 0.0;
}
let num_patterns = n - m + 1;
let sample_interval = if num_patterns >= 1000 {
4 } else if num_patterns >= 500 {
3 } else if num_patterns >= 300 {
2 } else {
1 };
let mut count = 0usize;
if sample_interval == 1 {
for i in 0..num_patterns {
let p1 = &prices[i..i + m];
for j in (i + 1)..num_patterns {
let p2 = &prices[j..j + m];
if patterns_within_distance_simd(p1, p2, r, m) {
count += 1;
}
}
}
} else {
for i in (0..num_patterns).step_by(sample_interval) {
let p1 = &prices[i..i + m];
for j in ((i + sample_interval)..num_patterns).step_by(sample_interval) {
let p2 = &prices[j..j + m];
if patterns_within_distance_simd(p1, p2, r, m) {
count += 1;
}
}
}
let interval_f64 = sample_interval as f64;
count = (count as f64 * (interval_f64 * interval_f64)).round() as usize;
}
if count == 0 {
return 0.0;
}
let inv_total_pairs = 2.0 / (num_patterns as f64 * (num_patterns - 1) as f64);
let c = count as f64 * inv_total_pairs;
-c * libm::log(c) }
fn compute_phi(prices: &[f64], m: usize, r: f64) -> f64 {
let n = prices.len();
if n < m {
return 0.0;
}
let num_patterns = n - m + 1;
if num_patterns > 300 {
return compute_phi_sampled(prices, m, r);
}
let mut count = 0usize;
for i in 0..num_patterns {
let p1 = &prices[i..i + m];
for j in (i + 1)..num_patterns {
let p2 = &prices[j..j + m];
if patterns_within_distance_simd(p1, p2, r, m) {
count += 1;
}
}
}
if count == 0 {
return 0.0;
}
let inv_total_pairs = 2.0 / (num_patterns as f64 * (num_patterns - 1) as f64);
let c = count as f64 * inv_total_pairs;
-c * libm::log(c) }
#[inline]
pub fn compute_entropy_adaptive(prices: &[f64]) -> f64 {
let n = prices.len();
if n < 500 {
return compute_permutation_entropy(prices);
}
let n_inv = 1.0 / n as f64;
let mean = prices.iter().sum::<f64>() * n_inv;
let variance = prices
.iter()
.map(|p| {
let d = p - mean;
d * d
})
.sum::<f64>()
* n_inv;
let std = variance.sqrt();
let r = 0.2 * std;
compute_approximate_entropy(prices, 2, r)
}
#[inline]
pub fn compute_entropy_adaptive_cached_readonly(
prices: &[f64],
cache: &EntropyCache,
) -> Option<f64> {
let n = prices.len();
if n < 500 {
cache.get(prices)
} else {
None
}
}
#[inline]
pub fn compute_entropy_adaptive_cached(prices: &[f64], cache: &mut EntropyCache) -> f64 {
let n = prices.len();
if n < 500 {
if let Some(cached_entropy) = cache.get(prices) {
return cached_entropy;
}
let entropy = compute_permutation_entropy(prices);
cache.insert(prices, entropy);
return entropy;
}
let n_inv = 1.0 / n as f64;
let mean = prices.iter().sum::<f64>() * n_inv;
let variance = prices
.iter()
.map(|p| {
let d = p - mean;
d * d
})
.sum::<f64>()
* n_inv;
let std = variance.sqrt();
let r = 0.2 * std;
compute_approximate_entropy(prices, 2, r)
}