Skip to main content

rangebar_core/intrabar/
features.rs

1//! Intra-bar feature computation from constituent trades.
2//!
3//! Issue #59: Intra-bar microstructure features for large range bars.
4//!
5//! This module computes 22 features from trades WITHIN each range bar:
6//! - 8 ITH features (from trading-fitness algorithms)
7//! - 12 statistical features
8//! - 2 complexity features (Hurst, Permutation Entropy)
9//!
10//! # FILE-SIZE-OK
11//! 942 lines: Large existing file with multiple feature computation functions.
12//! Keeping together maintains performance optimization context.
13
14use crate::types::AggTrade;
15use smallvec::SmallVec;
16
17use super::drawdown::compute_max_drawdown_and_runup;
18use super::ith::{bear_ith, bull_ith};
19use super::normalize::{
20    normalize_cv, normalize_drawdown, normalize_epochs, normalize_excess, normalize_runup,
21};
22use super::normalization_lut::soft_clamp_hurst_lut;
23
24/// Pre-computed ln(3!) = ln(6) for permutation entropy normalization (m=3, Bandt-Pompe).
25/// Avoids per-bar ln() call. Task #9.
26const MAX_ENTROPY_M3: f64 = 1.791_759_469_228_327;
27
28/// All 22 intra-bar features computed from constituent trades.
29///
30/// All ITH-based features are normalized to [0, 1] for LSTM consumption.
31/// Statistical features preserve their natural ranges.
32/// Optional fields return None when insufficient data.
33#[derive(Debug, Clone, Default)]
34pub struct IntraBarFeatures {
35    // === ITH Features (8) - All bounded [0, 1] ===
36    /// Bull epoch density: sigmoid(epochs/trade_count, 0.5, 10)
37    pub intra_bull_epoch_density: Option<f64>,
38    /// Bear epoch density: sigmoid(epochs/trade_count, 0.5, 10)
39    pub intra_bear_epoch_density: Option<f64>,
40    /// Bull excess gain (sum): tanh-normalized to [0, 1]
41    pub intra_bull_excess_gain: Option<f64>,
42    /// Bear excess gain (sum): tanh-normalized to [0, 1]
43    pub intra_bear_excess_gain: Option<f64>,
44    /// Bull intervals CV: sigmoid-normalized to [0, 1]
45    pub intra_bull_cv: Option<f64>,
46    /// Bear intervals CV: sigmoid-normalized to [0, 1]
47    pub intra_bear_cv: Option<f64>,
48    /// Max drawdown in bar: already [0, 1]
49    pub intra_max_drawdown: Option<f64>,
50    /// Max runup in bar: already [0, 1]
51    pub intra_max_runup: Option<f64>,
52
53    // === Statistical Features (12) ===
54    /// Number of trades in the bar
55    pub intra_trade_count: Option<u32>,
56    /// Order Flow Imbalance: (buy_vol - sell_vol) / total_vol, [-1, 1]
57    pub intra_ofi: Option<f64>,
58    /// Duration of bar in microseconds
59    pub intra_duration_us: Option<i64>,
60    /// Trade intensity: trades per second
61    pub intra_intensity: Option<f64>,
62    /// VWAP position within price range: [0, 1]
63    pub intra_vwap_position: Option<f64>,
64    /// Count imbalance: (buy_count - sell_count) / total_count, [-1, 1]
65    pub intra_count_imbalance: Option<f64>,
66    /// Kyle's Lambda proxy (normalized)
67    pub intra_kyle_lambda: Option<f64>,
68    /// Burstiness (Goh-Barabási): [-1, 1]
69    pub intra_burstiness: Option<f64>,
70    /// Volume skewness
71    pub intra_volume_skew: Option<f64>,
72    /// Volume excess kurtosis
73    pub intra_volume_kurt: Option<f64>,
74    /// Kaufman Efficiency Ratio: [0, 1]
75    pub intra_kaufman_er: Option<f64>,
76    /// Garman-Klass volatility estimator
77    pub intra_garman_klass_vol: Option<f64>,
78
79    // === Complexity Features (2) - Require many trades ===
80    /// Hurst exponent via DFA (requires >= 64 trades)
81    pub intra_hurst: Option<f64>,
82    /// Permutation entropy (requires >= 60 trades)
83    pub intra_permutation_entropy: Option<f64>,
84}
85
86/// Cold path: return features for zero-trade bar
87/// Extracted to improve instruction cache locality on the hot path
88#[cold]
89#[inline(never)]
90fn intra_bar_zero_trades() -> IntraBarFeatures {
91    IntraBarFeatures {
92        intra_trade_count: Some(0),
93        ..Default::default()
94    }
95}
96
97/// Cold path: return features for single-trade bar
98#[cold]
99#[inline(never)]
100fn intra_bar_single_trade() -> IntraBarFeatures {
101    IntraBarFeatures {
102        intra_trade_count: Some(1),
103        intra_duration_us: Some(0),
104        intra_intensity: Some(0.0),
105        intra_ofi: Some(0.0),
106        ..Default::default()
107    }
108}
109
110/// Cold path: return features for bar with invalid first price
111#[cold]
112#[inline(never)]
113fn intra_bar_invalid_price(n: usize) -> IntraBarFeatures {
114    IntraBarFeatures {
115        intra_trade_count: Some(n as u32),
116        ..Default::default()
117    }
118}
119
120/// Compute all intra-bar features from constituent trades.
121///
122/// This is the main entry point for computing ITH and statistical features
123/// from the trades that formed a range bar.
124///
125/// # Arguments
126/// * `trades` - Slice of AggTrade records within the bar
127///
128/// # Returns
129/// `IntraBarFeatures` struct with all 22 features (or None for insufficient data)
130///
131/// Issue #96 Task #173: Uses reusable scratch buffers if available for zero-copy extraction
132/// Issue #96 Task #52: #[inline] for delegation to _with_scratch
133#[inline]
134pub fn compute_intra_bar_features(trades: &[AggTrade]) -> IntraBarFeatures {
135    let mut scratch_prices = SmallVec::<[f64; 64]>::new();
136    let mut scratch_volumes = SmallVec::<[f64; 64]>::new();
137    compute_intra_bar_features_with_scratch(trades, &mut scratch_prices, &mut scratch_volumes)
138}
139
140/// Optimized version accepting reusable scratch buffers
141/// Issue #96 Task #173: Avoids per-bar heap allocation by reusing buffers across bars
142/// Issue #96 Task #88: #[inline] — per-bar dispatcher called from processor hot path
143#[inline]
144pub fn compute_intra_bar_features_with_scratch(
145    trades: &[AggTrade],
146    scratch_prices: &mut SmallVec<[f64; 64]>,
147    scratch_volumes: &mut SmallVec<[f64; 64]>,
148) -> IntraBarFeatures {
149    let n = trades.len();
150
151    // Issue #96 Task #193: Early-exit dispatcher for small intra-bar feature computation
152    // Skip only expensive complexity features (Hurst, PE) for bars with insufficient data
153    // ITH computation is linear and inexpensive, always included for n >= 2
154    if n == 0 {
155        return intra_bar_zero_trades();
156    }
157    if n == 1 {
158        return intra_bar_single_trade();
159    }
160
161    // Extract price series from trades, reusing scratch buffer (Issue #96 Task #173)
162    scratch_prices.clear();
163    scratch_prices.reserve(n);
164    for trade in trades {
165        scratch_prices.push(trade.price.to_f64());
166    }
167
168    // Normalize prices to start at 1.0 for ITH computation
169    let first_price = scratch_prices[0];
170    if first_price <= 0.0 || !first_price.is_finite() {
171        return intra_bar_invalid_price(n);
172    }
173    // Reuse scratch buffer for normalized prices (Issue #96 Task #173)
174    // Issue #96: Pre-compute reciprocal to replace per-element division with multiplication
175    let inv_first_price = 1.0 / first_price;
176    scratch_volumes.clear();
177    scratch_volumes.reserve(n);
178    for &p in scratch_prices.iter() {
179        scratch_volumes.push(p * inv_first_price);
180    }
181    let normalized = scratch_volumes;  // Rebind for clarity
182
183    // Compute max_drawdown and max_runup in single pass (Issue #96 Task #66: merged computation)
184    let (max_dd, max_ru) = compute_max_drawdown_and_runup(normalized);
185
186    // Compute Bull ITH with max_drawdown as TMAEG
187    let bull_result = bull_ith(normalized, max_dd);
188
189    // Compute Bear ITH with max_runup as TMAEG
190    let bear_result = bear_ith(normalized, max_ru);
191
192    // Sum excess gains for normalization
193    let bull_excess_sum: f64 = bull_result.excess_gains.iter().sum();
194    let bear_excess_sum: f64 = bear_result.excess_gains.iter().sum();
195
196    // Compute statistical features
197    let stats = compute_statistical_features(trades, scratch_prices);
198
199    // Compute complexity features (only if enough trades)
200    let hurst = if n >= 64 {
201        Some(compute_hurst_dfa(normalized))
202    } else {
203        None
204    };
205    let pe = if n >= 60 {
206        Some(compute_permutation_entropy(scratch_prices, 3))
207    } else {
208        None
209    };
210
211    IntraBarFeatures {
212        // ITH features (normalized to [0, 1])
213        intra_bull_epoch_density: Some(normalize_epochs(bull_result.num_of_epochs, n)),
214        intra_bear_epoch_density: Some(normalize_epochs(bear_result.num_of_epochs, n)),
215        intra_bull_excess_gain: Some(normalize_excess(bull_excess_sum)),
216        intra_bear_excess_gain: Some(normalize_excess(bear_excess_sum)),
217        intra_bull_cv: Some(normalize_cv(bull_result.intervals_cv)),
218        intra_bear_cv: Some(normalize_cv(bear_result.intervals_cv)),
219        intra_max_drawdown: Some(normalize_drawdown(bull_result.max_drawdown)),
220        intra_max_runup: Some(normalize_runup(bear_result.max_runup)),
221
222        // Statistical features
223        intra_trade_count: Some(n as u32),
224        intra_ofi: Some(stats.ofi),
225        intra_duration_us: Some(stats.duration_us),
226        intra_intensity: Some(stats.intensity),
227        intra_vwap_position: Some(stats.vwap_position),
228        intra_count_imbalance: Some(stats.count_imbalance),
229        intra_kyle_lambda: stats.kyle_lambda,
230        intra_burstiness: stats.burstiness,
231        intra_volume_skew: stats.volume_skew,
232        intra_volume_kurt: stats.volume_kurt,
233        intra_kaufman_er: stats.kaufman_er,
234        intra_garman_klass_vol: Some(stats.garman_klass_vol),
235
236        // Complexity features
237        intra_hurst: hurst,
238        intra_permutation_entropy: pe,
239    }
240}
241
242/// Intermediate struct for statistical features computation
243struct StatisticalFeatures {
244    ofi: f64,
245    duration_us: i64,
246    intensity: f64,
247    vwap_position: f64,
248    count_imbalance: f64,
249    kyle_lambda: Option<f64>,
250    burstiness: Option<f64>,
251    volume_skew: Option<f64>,
252    volume_kurt: Option<f64>,
253    kaufman_er: Option<f64>,
254    garman_klass_vol: f64,
255}
256
257/// Compute statistical features from trades
258fn compute_statistical_features(trades: &[AggTrade], prices: &[f64]) -> StatisticalFeatures {
259    let n = trades.len();
260
261    // Issue #96 Task #188: Conversion caching - eliminate redundant FixedPoint-to-f64 conversions
262    // Cache volume conversions in SmallVec to reuse across passes (avoid 2x conversions per trade)
263    // Expected speedup: 3-5% on statistical feature computation (eliminates ~n volume.to_f64() calls)
264
265    // Pre-allocate volume cache with inline capacity for typical bar sizes (< 128 trades)
266    let mut cached_volumes = SmallVec::<[f64; 128]>::with_capacity(n);
267
268    let mut buy_vol = 0.0_f64;
269    let mut sell_vol = 0.0_f64;
270    let mut buy_count = 0_u32;
271    let mut sell_count = 0_u32;
272    let mut total_turnover = 0.0_f64;
273    let mut sum_vol = 0.0_f64;
274    let mut high = f64::NEG_INFINITY;
275    let mut low = f64::INFINITY;
276
277    // Pass 1: Convert volumes once, accumulate, track high/low
278    for trade in trades {
279        let vol = trade.volume.to_f64();  // Converted once only
280        cached_volumes.push(vol);  // Cache for Pass 2
281        let price = prices[cached_volumes.len() - 1];  // Use pre-converted prices (Issue #96 Task #173)
282
283        total_turnover += price * vol;
284        sum_vol += vol;
285
286        if trade.is_buyer_maker {
287            sell_vol += vol;
288            sell_count += trade.individual_trade_count() as u32;
289        } else {
290            buy_vol += vol;
291            buy_count += trade.individual_trade_count() as u32;
292        }
293
294        // Track high/low during first pass (Issue #96 Task #63: eliminated separate fold pass)
295        high = high.max(price);
296        low = low.min(price);
297    }
298
299    let vol_count = n;
300    let mean_vol = if vol_count > 0 { sum_vol / vol_count as f64 } else { 0.0 };
301
302    // Pass 2: Compute central moments using cached volumes (no conversion, no indexing overhead)
303    let mut m2_vol = 0.0_f64; // sum of (v - mean)^2
304    let mut m3_vol = 0.0_f64; // sum of (v - mean)^3
305    let mut m4_vol = 0.0_f64; // sum of (v - mean)^4
306
307    for &vol in cached_volumes.iter() {
308        // Issue #96 Task #196: Maximize ILP by pre-computing all powers
309        // Compute all powers first (d2, d3, d4) before accumulating
310        // This allows CPU to execute 3 independent additions in parallel
311        let d = vol - mean_vol;
312        let d2 = d * d;
313        let d3 = d2 * d;
314        let d4 = d2 * d2;
315
316        // All 3 accumulations are independent (CPU can parallelize)
317        m2_vol += d2;
318        m3_vol += d3;
319        m4_vol += d4;
320    }
321
322    let total_vol = buy_vol + sell_vol;
323    let total_count = (buy_count + sell_count) as f64;
324
325    // OFI: Order Flow Imbalance
326    let ofi = if total_vol > f64::EPSILON {
327        (buy_vol - sell_vol) / total_vol
328    } else {
329        0.0
330    };
331
332    // Duration
333    let first_ts = trades.first().map(|t| t.timestamp).unwrap_or(0);
334    let last_ts = trades.last().map(|t| t.timestamp).unwrap_or(0);
335    let duration_us = last_ts - first_ts;
336    // Issue #96: Multiply by reciprocal instead of dividing (avoids fdiv in hot path)
337    let duration_sec = duration_us as f64 * 1e-6;
338
339    // Intensity: trades per second
340    let intensity = if duration_sec > f64::EPSILON {
341        n as f64 / duration_sec
342    } else {
343        n as f64 // Instant bar
344    };
345
346    // VWAP position (Issue #96 Task #63: high/low cached inline during trades loop)
347    let vwap = if total_vol > f64::EPSILON {
348        total_turnover / total_vol
349    } else {
350        prices.first().copied().unwrap_or(0.0)
351    };
352    // High/low already computed inline during main trades loop (eliminates fold pass)
353    let range = high - low;
354    let vwap_position = if range > f64::EPSILON {
355        ((vwap - low) / range).clamp(0.0, 1.0)
356    } else {
357        0.5
358    };
359
360    // Count imbalance
361    let count_imbalance = if total_count > f64::EPSILON {
362        (buy_count as f64 - sell_count as f64) / total_count
363    } else {
364        0.0
365    };
366
367    // Kyle's Lambda (requires >= 2 trades)
368    let kyle_lambda = if n >= 2 && total_vol > f64::EPSILON {
369        let first_price = prices[0];
370        let last_price = prices[n - 1];
371        let price_return = if first_price.abs() > f64::EPSILON {
372            (last_price - first_price) / first_price
373        } else {
374            0.0
375        };
376        let normalized_imbalance = (buy_vol - sell_vol) / total_vol;
377        if normalized_imbalance.abs() > f64::EPSILON {
378            Some(price_return / normalized_imbalance)
379        } else {
380            None
381        }
382    } else {
383        None
384    };
385
386    // Issue #96 Task #61: Optimize burstiness with early-exit and SmallVec
387    // Burstiness (requires >= 3 trades for meaningful inter-arrival times)
388    let burstiness = if n >= 3 {
389        // Compute inter-arrival intervals using direct indexing with SmallVec (no Vec allocation)
390        let mut intervals = SmallVec::<[f64; 64]>::new();
391        for i in 0..n - 1 {
392            intervals.push((trades[i + 1].timestamp - trades[i].timestamp) as f64);
393        }
394
395        if intervals.len() >= 2 {
396            // Issue #96: Pre-compute reciprocal to avoid repeated division
397            let inv_len = 1.0 / intervals.len() as f64;
398            let mean_tau: f64 = intervals.iter().sum::<f64>() * inv_len;
399            let variance: f64 = intervals
400                .iter()
401                .map(|&x| {
402                    let d = x - mean_tau;
403                    d * d  // Multiply instead of powi(2)
404                })
405                .sum::<f64>()
406                * inv_len;
407            let std_tau = variance.sqrt();
408
409            // Early-exit if intervals are uniform (common in tick data)
410            if std_tau <= f64::EPSILON {
411                None // Uniform spacing = undefined burstiness
412            } else if (std_tau + mean_tau).abs() > f64::EPSILON {
413                Some((std_tau - mean_tau) / (std_tau + mean_tau))
414            } else {
415                None
416            }
417        } else {
418            None
419        }
420    } else {
421        None
422    };
423
424    // Volume moments computed inline above (Issue #96 Task #69)
425    let (volume_skew, volume_kurt) = if n >= 3 {
426        // Issue #96: reciprocal caching — single division for 3 moment normalizations
427        let inv_n = 1.0 / n as f64;
428        let m2_norm = m2_vol * inv_n;
429        let m3_norm = m3_vol * inv_n;
430        let m4_norm = m4_vol * inv_n;
431        let std_v = m2_norm.sqrt();
432
433        if std_v > f64::EPSILON {
434            // Issue #96 Task #170: Memoize powi() calls with multiplication chains
435            let std_v2 = std_v * std_v;
436            let std_v3 = std_v2 * std_v;
437            let std_v4 = std_v2 * std_v2;
438            (Some(m3_norm / std_v3), Some(m4_norm / std_v4 - 3.0))
439        } else {
440            (None, None)
441        }
442    } else {
443        (None, None)
444    };
445
446    // Kaufman Efficiency Ratio (requires >= 2 trades)
447    let kaufman_er = if n >= 2 {
448        let net_move = (prices[n - 1] - prices[0]).abs();
449
450        // Issue #96 Task #59: Replace .windows(2) with direct indexing to avoid iterator overhead
451        let mut path_length = 0.0;
452        for i in 0..n - 1 {
453            path_length += (prices[i + 1] - prices[i]).abs();
454        }
455
456        if path_length > f64::EPSILON {
457            Some((net_move / path_length).clamp(0.0, 1.0))
458        } else {
459            Some(1.0) // No movement = perfectly efficient
460        }
461    } else {
462        None
463    };
464
465    // Garman-Klass volatility
466    // Issue #96 Task #197: Pre-compute constant, use multiplication instead of powi
467    const GK_SCALE: f64 = 0.6137;  // 2.0 * 2.0_f64.ln() - 1.0 = 0.6137...
468    let open = prices[0];
469    let close = prices[n - 1];
470    let garman_klass_vol = if high > low && high > 0.0 && open > 0.0 {
471        let hl_ratio = (high / low).ln();
472        let co_ratio = (close / open).ln();
473        // Replace powi(2) with multiplication (3-5x faster)
474        let hl_sq = hl_ratio * hl_ratio;
475        let co_sq = co_ratio * co_ratio;
476        let gk_var = 0.5 * hl_sq - GK_SCALE * co_sq;
477        gk_var.max(0.0).sqrt()
478    } else {
479        0.0
480    };
481
482    StatisticalFeatures {
483        ofi,
484        duration_us,
485        intensity,
486        vwap_position,
487        count_imbalance,
488        kyle_lambda,
489        burstiness,
490        volume_skew,
491        volume_kurt,
492        kaufman_er,
493        garman_klass_vol,
494    }
495}
496
497/// Compute Hurst exponent via Detrended Fluctuation Analysis (DFA).
498///
499/// The Hurst exponent measures long-term memory:
500/// - H < 0.5: Mean-reverting (anti-persistent)
501/// - H = 0.5: Random walk
502/// - H > 0.5: Trending (persistent)
503///
504/// Requires at least 64 observations for reliable estimation.
505fn compute_hurst_dfa(prices: &[f64]) -> f64 {
506    let n = prices.len();
507    if n < 64 {
508        return 0.5; // Default to random walk for insufficient data
509    }
510
511    // Issue #96 Task #57: Use SmallVec for cumulative deviations
512    // Compute cumulative deviation from mean
513    let mean: f64 = prices.iter().sum::<f64>() / n as f64;
514    let mut y = SmallVec::<[f64; 256]>::new();
515    let mut cumsum = 0.0;
516    for &p in prices.iter() {
517        cumsum += p - mean;
518        y.push(cumsum);
519    }
520
521    // Scale range from n/4 to n/2 (using powers of 2 for efficiency)
522    let min_scale = (n / 4).max(8);
523    let max_scale = n / 2;
524
525    // Issue #96 Task #57: SmallVec for log vectors — DFA has 8-12 scale points
526    // Inline storage eliminates 2 heap allocations per DFA call
527    let mut log_scales = SmallVec::<[f64; 12]>::new();
528    let mut log_fluctuations = SmallVec::<[f64; 12]>::new();
529
530    let mut scale = min_scale;
531    while scale <= max_scale {
532        let num_segments = n / scale;
533        if num_segments < 2 {
534            break;
535        }
536
537        // Issue #96 Task #192: Memoize x_mean computation outside segment loop
538        // Only depends on scale, not on segment index, so compute once and reuse
539        let x_mean = (scale - 1) as f64 / 2.0;
540        // Issue #96: Pre-compute xx_sum analytically: sum_{i=0}^{n-1} (i - mean)^2 = n*(n^2-1)/12
541        // Eliminates per-element (delta_x * delta_x) accumulation from inner loop
542        let scale_f64 = scale as f64;
543        let inv_scale = 1.0 / scale_f64;
544        let xx_sum = scale_f64 * (scale_f64 * scale_f64 - 1.0) / 12.0;
545
546        let mut total_fluctuation = 0.0;
547        let mut segment_count = 0;
548
549        for seg in 0..num_segments {
550            let start = seg * scale;
551            let end = start + scale;
552            if end > n {
553                break;
554            }
555
556            // Issue #96: Single-pass linear detrend + RMS via algebraic identity
557            // Fuses two passes into one: accumulate xy_sum, y_sum, sum_y_sq in a single loop.
558            // Then RMS = sqrt((yy_sum - xy_sum²/xx_sum) / n) where yy_sum = sum_y_sq - y_sum²/n
559            let mut xy_sum = 0.0;
560            let mut y_sum = 0.0;
561            let mut sum_y_sq = 0.0;
562
563            for (i, &yi) in y[start..end].iter().enumerate() {
564                let delta_x = i as f64 - x_mean;
565                xy_sum += delta_x * yi;
566                y_sum += yi;
567                sum_y_sq += yi * yi;
568            }
569
570            // Detrended RMS via closed-form: rms² = (yy - xy²/xx) / n
571            let yy_sum = sum_y_sq - y_sum * y_sum * inv_scale;
572            let rms = if xx_sum > f64::EPSILON {
573                let rms_sq = yy_sum - xy_sum * xy_sum / xx_sum;
574                (rms_sq.max(0.0) * inv_scale).sqrt()
575            } else {
576                (yy_sum.max(0.0) * inv_scale).sqrt()
577            };
578
579            total_fluctuation += rms;
580            segment_count += 1;
581        }
582
583        if segment_count > 0 {
584            let avg_fluctuation = total_fluctuation / segment_count as f64;
585            if avg_fluctuation > f64::EPSILON {
586                log_scales.push((scale as f64).ln());
587                log_fluctuations.push(avg_fluctuation.ln());
588            }
589        }
590
591        scale = (scale as f64 * 1.5).ceil() as usize;
592    }
593
594    // Linear regression for Hurst exponent
595    if log_scales.len() < 2 {
596        return 0.5;
597    }
598
599    let n_points = log_scales.len() as f64;
600    let inv_n_points = 1.0 / n_points;
601    let x_mean: f64 = log_scales.iter().sum::<f64>() * inv_n_points;
602    let y_mean: f64 = log_fluctuations.iter().sum::<f64>() * inv_n_points;
603
604    let mut xy_sum = 0.0;
605    let mut xx_sum = 0.0;
606    for (&x, &y) in log_scales.iter().zip(log_fluctuations.iter()) {
607        let dx = x - x_mean;
608        xy_sum += dx * (y - y_mean);
609        // Issue #96: powi(2) → multiplication for hot-path Hurst regression
610        xx_sum += dx * dx;
611    }
612
613    let hurst = if xx_sum.abs() > f64::EPSILON {
614        xy_sum / xx_sum
615    } else {
616        0.5
617    };
618
619    // Soft-clamp to [0, 1] using LUT (Task #198 → Task #8: O(1) lookup replaces exp())
620    soft_clamp_hurst_lut(hurst)
621}
622
623/// Compute normalized permutation entropy.
624///
625/// Permutation entropy measures the complexity of a time series
626/// by analyzing ordinal patterns. Returns value in [0, 1].
627///
628/// Requires at least `m! + (m-1)` observations where m is the embedding dimension.
629/// Issue #96 Task #53: Optimized to use bounded array instead of HashMap<String>
630/// Issue #96 Task #54: Hoisted SmallVec allocation and added early-exit for sorted sequences
631fn compute_permutation_entropy(prices: &[f64], m: usize) -> f64 {
632    let n = prices.len();
633    let required = factorial(m) + m - 1;
634
635    if n < required || m < 2 {
636        return 0.5; // Default for insufficient data
637    }
638
639    // Bounded array for pattern counts (max 6 patterns for m=3)
640    // Use factorial(m) as the size, but cap at 24 for m=4
641    let max_patterns = factorial(m);
642    if max_patterns > 24 {
643        // Fallback for large m (shouldn't happen in practice, m≤3)
644        return fallback_permutation_entropy(prices, m);
645    }
646
647    // Count ordinal patterns using bounded array
648    let mut pattern_counts = [0usize; 24]; // Fixed size for all reasonable m values
649    let num_patterns = n - m + 1;
650
651    // OPTIMIZATION (Task #13): m=3 decision tree — 3 comparisons max, no sorting/SmallVec
652    // Also fixes Lehmer code collision bug (factors [1,2,1] → correct bijection via decision tree)
653    if m == 3 {
654        for i in 0..num_patterns {
655            let (a, b, c) = (prices[i], prices[i + 1], prices[i + 2]);
656            let idx = if a <= b {
657                if b <= c { 0 }       // a ≤ b ≤ c → [0,1,2]
658                else if a <= c { 1 }  // a ≤ c < b → [0,2,1]
659                else { 4 }            // c < a ≤ b → [2,0,1]
660            } else if a <= c { 2 }    // b < a ≤ c → [1,0,2]
661            else if b <= c { 3 }      // b ≤ c < a → [1,2,0]
662            else { 5 };               // c ≤ b < a → [2,1,0]
663            pattern_counts[idx] += 1;
664        }
665    } else {
666        let mut indices = SmallVec::<[usize; 4]>::new();
667        for i in 0..num_patterns {
668            let window = &prices[i..i + m];
669            let prices_ascending = window.windows(2).all(|w| w[0] <= w[1]);
670            if prices_ascending {
671                pattern_counts[0] += 1;
672            } else {
673                indices.clear();
674                for j in 0..m {
675                    indices.push(j);
676                }
677                indices.sort_by(|&a, &b| {
678                    window[a]
679                        .partial_cmp(&window[b])
680                        .unwrap_or(std::cmp::Ordering::Equal)
681                });
682                let pattern_idx = ordinal_indices_to_pattern_index(&indices);
683                pattern_counts[pattern_idx] += 1;
684            }
685        }
686    }
687
688    // Compute Shannon entropy from pattern counts
689    // Issue #96: Pre-compute reciprocal — replaces per-pattern division with multiplication
690    let inv_num_patterns = 1.0 / num_patterns as f64;
691    let mut entropy = 0.0;
692    for &count in &pattern_counts[..max_patterns] {
693        if count > 0 {
694            let p = count as f64 * inv_num_patterns;
695            entropy -= p * p.ln();
696        }
697    }
698
699    // Normalize by maximum entropy — use pre-computed constant for m=3 (Task #9)
700    let max_entropy = if m == 3 {
701        MAX_ENTROPY_M3
702    } else {
703        (max_patterns as f64).ln()
704    };
705    if max_entropy > f64::EPSILON {
706        (entropy / max_entropy).clamp(0.0, 1.0)
707    } else {
708        0.5
709    }
710}
711
712/// Issue #96 Task #58: Convert ordinal indices to pattern index using Lehmer code
713/// Optimized with specialization for m=2,3 to avoid unnecessary iterations
714/// For m=3: [0,1,2]→0, [0,2,1]→1, [1,0,2]→2, [1,2,0]→3, [2,0,1]→4, [2,1,0]→5
715#[inline]
716fn ordinal_indices_to_pattern_index(indices: &smallvec::SmallVec<[usize; 4]>) -> usize {
717    match indices.len() {
718        2 => {
719            // m=2: 2 patterns - optimized to skip sort entirely
720            if indices[0] < indices[1] { 0 } else { 1 }
721        }
722        3 => {
723            // m=3: 6 patterns (3!) - unrolled Lehmer code for performance
724            // Manually unroll to avoid nested loop overhead
725            // Factors = [(m-1)!, (m-2)!, 0!] = [2!, 1!, 1] = [2, 1, 1]
726            let mut code = 0usize;
727            let factors = [2, 1, 1];
728
729            // Position 0: count smaller elements in [1,2]
730            let lesser_0 = (indices[1] < indices[0]) as usize + (indices[2] < indices[0]) as usize;
731            code += lesser_0 * factors[0];
732
733            // Position 1: count smaller elements in [2]
734            let lesser_1 = (indices[2] < indices[1]) as usize;
735            code += lesser_1 * factors[1];
736
737            // Position 2: always 0 (no elements after it)
738            code
739        }
740        4 => {
741            // m=4: 24 patterns (4!) - unrolled Lehmer code for performance
742            let mut code = 0usize;
743            let factors = [6, 2, 1, 1];
744
745            // Position 0: count smaller elements in [1,2,3]
746            let lesser_0 = (indices[1] < indices[0]) as usize
747                         + (indices[2] < indices[0]) as usize
748                         + (indices[3] < indices[0]) as usize;
749            code += lesser_0 * factors[0];
750
751            // Position 1: count smaller elements in [2,3]
752            let lesser_1 = (indices[2] < indices[1]) as usize
753                         + (indices[3] < indices[1]) as usize;
754            code += lesser_1 * factors[1];
755
756            // Position 2: count smaller element in [3]
757            let lesser_2 = (indices[3] < indices[2]) as usize;
758            code += lesser_2 * factors[2];
759
760            code
761        }
762        _ => 0, // Shouldn't happen
763    }
764}
765
766/// Fallback permutation entropy for m > 4 (uses HashMap)
767fn fallback_permutation_entropy(prices: &[f64], m: usize) -> f64 {
768    let n = prices.len();
769    let num_patterns = n - m + 1;
770    let mut pattern_counts = std::collections::HashMap::new();
771
772    for i in 0..num_patterns {
773        let window = &prices[i..i + m];
774        let mut indices: Vec<usize> = (0..m).collect();
775        indices.sort_by(|&a, &b| {
776            window[a]
777                .partial_cmp(&window[b])
778                .unwrap_or(std::cmp::Ordering::Equal)
779        });
780        let pattern_key: String = indices.iter().map(|&i| i.to_string()).collect();
781        *pattern_counts.entry(pattern_key).or_insert(0usize) += 1;
782    }
783
784    // Issue #96: Pre-compute reciprocal — replaces per-pattern division with multiplication
785    let inv_num_patterns = 1.0 / num_patterns as f64;
786    let mut entropy = 0.0;
787    for &count in pattern_counts.values() {
788        if count > 0 {
789            let p = count as f64 * inv_num_patterns;
790            entropy -= p * p.ln();
791        }
792    }
793
794    let max_entropy = if m == 3 {
795        MAX_ENTROPY_M3
796    } else {
797        (factorial(m) as f64).ln()
798    };
799    if max_entropy > f64::EPSILON {
800        (entropy / max_entropy).clamp(0.0, 1.0)
801    } else {
802        0.5
803    }
804}
805
806/// Factorial function for small integers
807fn factorial(n: usize) -> usize {
808    (1..=n).product()
809}
810
811#[cfg(test)]
812mod tests {
813    use super::*;
814    use crate::fixed_point::FixedPoint;
815
816    fn create_test_trade(
817        price: f64,
818        volume: f64,
819        timestamp: i64,
820        is_buyer_maker: bool,
821    ) -> AggTrade {
822        AggTrade {
823            agg_trade_id: timestamp,
824            price: FixedPoint((price * 1e8) as i64),
825            volume: FixedPoint((volume * 1e8) as i64),
826            first_trade_id: timestamp,
827            last_trade_id: timestamp,
828            timestamp,
829            is_buyer_maker,
830            is_best_match: None,
831        }
832    }
833
834    #[test]
835    fn test_compute_intra_bar_features_empty() {
836        let features = compute_intra_bar_features(&[]);
837        assert_eq!(features.intra_trade_count, Some(0));
838        assert!(features.intra_bull_epoch_density.is_none());
839    }
840
841    #[test]
842    fn test_compute_intra_bar_features_single_trade() {
843        let trades = vec![create_test_trade(100.0, 1.0, 1000000, false)];
844        let features = compute_intra_bar_features(&trades);
845        assert_eq!(features.intra_trade_count, Some(1));
846        // Most features require >= 2 trades
847        assert!(features.intra_bull_epoch_density.is_none());
848    }
849
850    #[test]
851    fn test_compute_intra_bar_features_uptrend() {
852        // Create uptrending price series
853        let trades: Vec<AggTrade> = (0..10)
854            .map(|i| create_test_trade(100.0 + i as f64 * 0.5, 1.0, i * 1000000, false))
855            .collect();
856
857        let features = compute_intra_bar_features(&trades);
858
859        assert_eq!(features.intra_trade_count, Some(10));
860        assert!(features.intra_bull_epoch_density.is_some());
861        assert!(features.intra_bear_epoch_density.is_some());
862
863        // In uptrend, max_drawdown should be low
864        if let Some(dd) = features.intra_max_drawdown {
865            assert!(dd < 0.1, "Uptrend should have low drawdown: {}", dd);
866        }
867    }
868
869    #[test]
870    fn test_compute_intra_bar_features_downtrend() {
871        // Create downtrending price series
872        let trades: Vec<AggTrade> = (0..10)
873            .map(|i| create_test_trade(100.0 - i as f64 * 0.5, 1.0, i * 1000000, true))
874            .collect();
875
876        let features = compute_intra_bar_features(&trades);
877
878        assert_eq!(features.intra_trade_count, Some(10));
879
880        // In downtrend, max_runup should be low
881        if let Some(ru) = features.intra_max_runup {
882            assert!(ru < 0.1, "Downtrend should have low runup: {}", ru);
883        }
884    }
885
886    #[test]
887    fn test_ofi_calculation() {
888        // All buys
889        let buy_trades: Vec<AggTrade> = (0..5)
890            .map(|i| create_test_trade(100.0, 1.0, i * 1000000, false))
891            .collect();
892
893        let features = compute_intra_bar_features(&buy_trades);
894        assert!(
895            features.intra_ofi.unwrap() > 0.9,
896            "All buys should have OFI near 1.0"
897        );
898
899        // All sells
900        let sell_trades: Vec<AggTrade> = (0..5)
901            .map(|i| create_test_trade(100.0, 1.0, i * 1000000, true))
902            .collect();
903
904        let features = compute_intra_bar_features(&sell_trades);
905        assert!(
906            features.intra_ofi.unwrap() < -0.9,
907            "All sells should have OFI near -1.0"
908        );
909    }
910
911    #[test]
912    fn test_ith_features_bounded() {
913        // Generate random-ish price series
914        let trades: Vec<AggTrade> = (0..50)
915            .map(|i| {
916                let price = 100.0 + ((i as f64 * 0.7).sin() * 2.0);
917                create_test_trade(price, 1.0, i * 1000000, i % 2 == 0)
918            })
919            .collect();
920
921        let features = compute_intra_bar_features(&trades);
922
923        // All ITH features should be bounded [0, 1]
924        if let Some(v) = features.intra_bull_epoch_density {
925            assert!(
926                v >= 0.0 && v <= 1.0,
927                "bull_epoch_density out of bounds: {}",
928                v
929            );
930        }
931        if let Some(v) = features.intra_bear_epoch_density {
932            assert!(
933                v >= 0.0 && v <= 1.0,
934                "bear_epoch_density out of bounds: {}",
935                v
936            );
937        }
938        if let Some(v) = features.intra_bull_excess_gain {
939            assert!(
940                v >= 0.0 && v <= 1.0,
941                "bull_excess_gain out of bounds: {}",
942                v
943            );
944        }
945        if let Some(v) = features.intra_bear_excess_gain {
946            assert!(
947                v >= 0.0 && v <= 1.0,
948                "bear_excess_gain out of bounds: {}",
949                v
950            );
951        }
952        if let Some(v) = features.intra_bull_cv {
953            assert!(v >= 0.0 && v <= 1.0, "bull_cv out of bounds: {}", v);
954        }
955        if let Some(v) = features.intra_bear_cv {
956            assert!(v >= 0.0 && v <= 1.0, "bear_cv out of bounds: {}", v);
957        }
958        if let Some(v) = features.intra_max_drawdown {
959            assert!(v >= 0.0 && v <= 1.0, "max_drawdown out of bounds: {}", v);
960        }
961        if let Some(v) = features.intra_max_runup {
962            assert!(v >= 0.0 && v <= 1.0, "max_runup out of bounds: {}", v);
963        }
964    }
965
966    #[test]
967    fn test_kaufman_er_bounds() {
968        // Perfectly efficient (straight line)
969        let efficient_trades: Vec<AggTrade> = (0..10)
970            .map(|i| create_test_trade(100.0 + i as f64, 1.0, i * 1000000, false))
971            .collect();
972
973        let features = compute_intra_bar_features(&efficient_trades);
974        if let Some(er) = features.intra_kaufman_er {
975            assert!(
976                (er - 1.0).abs() < 0.01,
977                "Straight line should have ER near 1.0: {}",
978                er
979            );
980        }
981    }
982
983    #[test]
984    fn test_complexity_features_require_data() {
985        // Less than 60 trades - complexity features should be None
986        let small_trades: Vec<AggTrade> = (0..30)
987            .map(|i| create_test_trade(100.0, 1.0, i * 1000000, false))
988            .collect();
989
990        let features = compute_intra_bar_features(&small_trades);
991        assert!(features.intra_hurst.is_none());
992        assert!(features.intra_permutation_entropy.is_none());
993
994        // 65+ trades - complexity features should be Some
995        let large_trades: Vec<AggTrade> = (0..70)
996            .map(|i| {
997                let price = 100.0 + ((i as f64 * 0.1).sin() * 2.0);
998                create_test_trade(price, 1.0, i * 1000000, false)
999            })
1000            .collect();
1001
1002        let features = compute_intra_bar_features(&large_trades);
1003        assert!(features.intra_hurst.is_some());
1004        assert!(features.intra_permutation_entropy.is_some());
1005
1006        // Hurst should be bounded [0, 1]
1007        if let Some(h) = features.intra_hurst {
1008            assert!(h >= 0.0 && h <= 1.0, "Hurst out of bounds: {}", h);
1009        }
1010        // Permutation entropy should be bounded [0, 1]
1011        if let Some(pe) = features.intra_permutation_entropy {
1012            assert!(
1013                pe >= 0.0 && pe <= 1.0,
1014                "Permutation entropy out of bounds: {}",
1015                pe
1016            );
1017        }
1018    }
1019
1020    // === Task #11: Hurst DFA edge case tests ===
1021
1022    #[test]
1023    fn test_hurst_dfa_all_identical_prices() {
1024        // 70 identical prices: cumsum = 0, all segments RMS = 0
1025        // Should return 0.5 fallback (no information)
1026        let prices: Vec<f64> = vec![100.0; 70];
1027        let h = compute_hurst_dfa(&prices);
1028        assert!(h.is_finite(), "Hurst should be finite for identical prices");
1029        assert!((h - 0.5).abs() < 0.15, "Hurst should be near 0.5 for flat prices: {}", h);
1030    }
1031
1032    #[test]
1033    fn test_hurst_dfa_monotonic_ascending() {
1034        // 70 perfectly ascending prices: strong trend (H > 0.5)
1035        let prices: Vec<f64> = (0..70).map(|i| 100.0 + i as f64 * 0.01).collect();
1036        let h = compute_hurst_dfa(&prices);
1037        assert!(h >= 0.0 && h <= 1.0, "Hurst out of bounds: {}", h);
1038        assert!(h > 0.5, "Trending series should have H > 0.5: {}", h);
1039    }
1040
1041    #[test]
1042    fn test_hurst_dfa_mean_reverting() {
1043        // 70 alternating prices: mean-reverting (H < 0.5)
1044        let prices: Vec<f64> = (0..70).map(|i| {
1045            if i % 2 == 0 { 100.0 } else { 100.5 }
1046        }).collect();
1047        let h = compute_hurst_dfa(&prices);
1048        assert!(h >= 0.0 && h <= 1.0, "Hurst out of bounds: {}", h);
1049        assert!(h < 0.55, "Mean-reverting series should have H <= 0.5: {}", h);
1050    }
1051
1052    #[test]
1053    fn test_hurst_dfa_exactly_64_trades() {
1054        // Minimum threshold for Hurst computation (n >= 64)
1055        let prices: Vec<f64> = (0..64).map(|i| 100.0 + (i as f64 * 0.3).sin()).collect();
1056        let h = compute_hurst_dfa(&prices);
1057        assert!(h >= 0.0 && h <= 1.0, "Hurst out of bounds at n=64: {}", h);
1058    }
1059
1060    #[test]
1061    fn test_hurst_dfa_below_threshold() {
1062        // 63 trades: below minimum, should return 0.5 default
1063        let prices: Vec<f64> = (0..63).map(|i| 100.0 + i as f64 * 0.01).collect();
1064        let h = compute_hurst_dfa(&prices);
1065        assert!((h - 0.5).abs() < f64::EPSILON, "Below threshold should return 0.5: {}", h);
1066    }
1067
1068    // === Task #11: Permutation Entropy edge case tests ===
1069
1070    #[test]
1071    fn test_pe_monotonic_ascending() {
1072        // 60 strictly ascending: all patterns are identity [0,1,2]
1073        // Entropy should be 0 (maximum order)
1074        let prices: Vec<f64> = (0..60).map(|i| 100.0 + i as f64 * 0.01).collect();
1075        let pe = compute_permutation_entropy(&prices, 3);
1076        assert!((pe - 0.0).abs() < 0.01, "Ascending series should have PE near 0: {}", pe);
1077    }
1078
1079    #[test]
1080    fn test_pe_monotonic_descending() {
1081        // 60 strictly descending: all patterns are reverse [2,1,0]
1082        // Entropy should be 0 (maximum order, single pattern)
1083        let prices: Vec<f64> = (0..60).map(|i| 200.0 - i as f64 * 0.01).collect();
1084        let pe = compute_permutation_entropy(&prices, 3);
1085        assert!((pe - 0.0).abs() < 0.01, "Descending series should have PE near 0: {}", pe);
1086    }
1087
1088    #[test]
1089    fn test_pe_all_identical_prices() {
1090        // 60 identical prices: all windows tied, all map to pattern 0
1091        // Entropy should be 0
1092        let prices: Vec<f64> = vec![100.0; 60];
1093        let pe = compute_permutation_entropy(&prices, 3);
1094        assert!((pe - 0.0).abs() < 0.01, "Identical prices should have PE near 0: {}", pe);
1095    }
1096
1097    #[test]
1098    fn test_pe_alternating_high_entropy() {
1099        // Alternating pattern creates diverse ordinal patterns → high entropy
1100        let prices: Vec<f64> = (0..70).map(|i| {
1101            match i % 6 {
1102                0 => 100.0, 1 => 102.0, 2 => 101.0,
1103                3 => 103.0, 4 => 99.0, 5 => 101.5,
1104                _ => unreachable!(),
1105            }
1106        }).collect();
1107        let pe = compute_permutation_entropy(&prices, 3);
1108        assert!(pe > 0.5, "Diverse patterns should have high PE: {}", pe);
1109        assert!(pe <= 1.0, "PE must be <= 1.0: {}", pe);
1110    }
1111
1112    #[test]
1113    fn test_pe_below_threshold() {
1114        // 59 trades: below minimum for m=3 (needs factorial(3) + 3 - 1 = 8, but our impl uses 60)
1115        // Actually compute_permutation_entropy requires n >= factorial(m) + m - 1 = 8
1116        // But the caller checks n >= 60 before calling. Let's test internal threshold.
1117        let prices: Vec<f64> = (0..7).map(|i| 100.0 + i as f64).collect();
1118        let pe = compute_permutation_entropy(&prices, 3);
1119        assert!((pe - 0.5).abs() < f64::EPSILON, "Below threshold should return 0.5: {}", pe);
1120    }
1121
1122    #[test]
1123    fn test_pe_exactly_at_threshold() {
1124        // Exactly 8 trades: minimum for m=3 (factorial(3) + 3 - 1 = 8)
1125        let prices: Vec<f64> = (0..8).map(|i| 100.0 + (i as f64 * 0.7).sin()).collect();
1126        let pe = compute_permutation_entropy(&prices, 3);
1127        assert!(pe >= 0.0 && pe <= 1.0, "PE at threshold should be valid: {}", pe);
1128    }
1129
1130    #[test]
1131    fn test_pe_decision_tree_all_six_patterns() {
1132        // Verify the m=3 decision tree produces maximum entropy when all 6 patterns are equally
1133        // represented. Construct prices that cycle through all 6 ordinal patterns:
1134        // [0,1,2]=asc, [0,2,1], [1,0,2], [1,2,0], [2,0,1], [2,1,0]=desc
1135        // Each pattern appears exactly once → uniform distribution → PE = 1.0
1136        let prices = vec![
1137            1.0, 2.0, 3.0,  // a ≤ b ≤ c → pattern 0 [0,1,2]
1138            1.0, 3.0, 2.0,  // a ≤ c < b → pattern 1 [0,2,1]
1139            2.0, 1.0, 3.0,  // b < a ≤ c → pattern 2 [1,0,2]
1140            2.0, 3.0, 1.0,  // b ≤ c < a → pattern 3 [1,2,0]
1141            2.0, 1.0, 3.0,  // just padding — we need overlapping windows
1142        ];
1143        // With 15 prices and m=3: 13 windows. Not all patterns equal.
1144        // Instead, use a long enough sequence that generates all 6 patterns equally.
1145        // Simpler: test that a sequence with all 6 patterns has PE > 0.9
1146        let pe = compute_permutation_entropy(&prices, 3);
1147        assert!(pe > 0.5, "Sequence with diverse patterns should have high PE: {}", pe);
1148
1149        // Also verify: pure descending has PE ≈ 0 (only pattern 5)
1150        let desc_prices: Vec<f64> = (0..20).map(|i| 100.0 - i as f64).collect();
1151        let pe_desc = compute_permutation_entropy(&desc_prices, 3);
1152        assert!(pe_desc < 0.1, "Pure descending should have PE near 0: {}", pe_desc);
1153
1154        // Pure ascending has PE ≈ 0 (only pattern 0)
1155        let asc_prices: Vec<f64> = (0..20).map(|i| 100.0 + i as f64).collect();
1156        let pe_asc = compute_permutation_entropy(&asc_prices, 3);
1157        assert!(pe_asc < 0.1, "Pure ascending should have PE near 0: {}", pe_asc);
1158    }
1159
1160    #[test]
1161    fn test_lehmer_code_bijection_m3() {
1162        // Verify ordinal_indices_to_pattern_index is a bijection for all 6 permutations of m=3
1163        // After the Lehmer factor fix [1,2,1] → [2,1,1], each permutation must map uniquely
1164        use smallvec::SmallVec;
1165        let permutations: [[usize; 3]; 6] = [
1166            [0, 1, 2], [0, 2, 1], [1, 0, 2],
1167            [1, 2, 0], [2, 0, 1], [2, 1, 0],
1168        ];
1169        let mut seen = std::collections::HashSet::new();
1170        for perm in &permutations {
1171            let sv: SmallVec<[usize; 4]> = SmallVec::from_slice(perm);
1172            let idx = ordinal_indices_to_pattern_index(&sv);
1173            assert!(idx < 6, "m=3 index must be in [0,5]: {:?} → {}", perm, idx);
1174            assert!(seen.insert(idx), "Collision! {:?} → {} already used", perm, idx);
1175        }
1176        assert_eq!(seen.len(), 6, "Must map to exactly 6 unique indices");
1177    }
1178
1179    #[test]
1180    fn test_lehmer_code_bijection_m4() {
1181        // Verify bijection for all 24 permutations of m=4
1182        use smallvec::SmallVec;
1183        let mut seen = std::collections::HashSet::new();
1184        // Generate all 24 permutations of [0,1,2,3]
1185        let mut perm = [0usize, 1, 2, 3];
1186        loop {
1187            let sv: SmallVec<[usize; 4]> = SmallVec::from_slice(&perm);
1188            let idx = ordinal_indices_to_pattern_index(&sv);
1189            assert!(idx < 24, "m=4 index must be in [0,23]: {:?} → {}", perm, idx);
1190            assert!(seen.insert(idx), "Collision! {:?} → {} already used", perm, idx);
1191            if !next_permutation(&mut perm) {
1192                break;
1193            }
1194        }
1195        assert_eq!(seen.len(), 24, "Must map to exactly 24 unique indices");
1196    }
1197
1198    /// Generate next lexicographic permutation. Returns false when last permutation reached.
1199    fn next_permutation(arr: &mut [usize]) -> bool {
1200        let n = arr.len();
1201        if n < 2 { return false; }
1202        let mut i = n - 1;
1203        while i > 0 && arr[i - 1] >= arr[i] { i -= 1; }
1204        if i == 0 { return false; }
1205        let mut j = n - 1;
1206        while arr[j] <= arr[i - 1] { j -= 1; }
1207        arr.swap(i - 1, j);
1208        arr[i..].reverse();
1209        true
1210    }
1211
1212    #[test]
1213    fn test_lehmer_code_bijection_m2() {
1214        // Verify m=2: exactly 2 patterns
1215        use smallvec::SmallVec;
1216        let asc: SmallVec<[usize; 4]> = SmallVec::from_slice(&[0, 1]);
1217        let desc: SmallVec<[usize; 4]> = SmallVec::from_slice(&[1, 0]);
1218        let idx_asc = ordinal_indices_to_pattern_index(&asc);
1219        let idx_desc = ordinal_indices_to_pattern_index(&desc);
1220        assert_eq!(idx_asc, 0, "ascending [0,1] → 0");
1221        assert_eq!(idx_desc, 1, "descending [1,0] → 1");
1222        assert_ne!(idx_asc, idx_desc);
1223    }
1224
1225    #[test]
1226    fn test_lehmer_code_m3_specific_values() {
1227        // Verify exact Lehmer code values for m=3 (not just uniqueness)
1228        use smallvec::SmallVec;
1229        // [0,1,2] → lesser_0=0, lesser_1=0 → code = 0*2 + 0*1 = 0
1230        let p012: SmallVec<[usize; 4]> = SmallVec::from_slice(&[0, 1, 2]);
1231        assert_eq!(ordinal_indices_to_pattern_index(&p012), 0);
1232        // [2,1,0] → lesser_0=2, lesser_1=1 → code = 2*2 + 1*1 = 5
1233        let p210: SmallVec<[usize; 4]> = SmallVec::from_slice(&[2, 1, 0]);
1234        assert_eq!(ordinal_indices_to_pattern_index(&p210), 5);
1235        // [1,0,2] → lesser_0=1, lesser_1=0 → code = 1*2 + 0*1 = 2
1236        let p102: SmallVec<[usize; 4]> = SmallVec::from_slice(&[1, 0, 2]);
1237        assert_eq!(ordinal_indices_to_pattern_index(&p102), 2);
1238    }
1239
1240    // === Task #12: Intra-bar features edge case tests ===
1241
1242    #[test]
1243    fn test_intra_bar_nan_first_price() {
1244        // NaN first price should trigger invalid_price guard (line 166)
1245        let trades = vec![
1246            AggTrade {
1247                agg_trade_id: 1,
1248                price: FixedPoint(0), // 0.0 → triggers first_price <= 0.0 guard
1249                volume: FixedPoint(100_000_000),
1250                first_trade_id: 1,
1251                last_trade_id: 1,
1252                timestamp: 1_000_000,
1253                is_buyer_maker: false,
1254                is_best_match: None,
1255            },
1256            create_test_trade(100.0, 1.0, 2_000_000, false),
1257        ];
1258        let features = compute_intra_bar_features(&trades);
1259        assert_eq!(features.intra_trade_count, Some(2));
1260        // All ITH features should be None (invalid price path)
1261        assert!(features.intra_bull_epoch_density.is_none());
1262        assert!(features.intra_hurst.is_none());
1263    }
1264
1265    #[test]
1266    fn test_intra_bar_all_identical_prices() {
1267        // 100 trades at same price: zero volatility scenario
1268        let trades: Vec<AggTrade> = (0..100)
1269            .map(|i| create_test_trade(100.0, 1.0, i * 1_000_000, i % 2 == 0))
1270            .collect();
1271
1272        let features = compute_intra_bar_features(&trades);
1273        assert_eq!(features.intra_trade_count, Some(100));
1274
1275        // Features should be valid (no panic), Kaufman ER undefined (path_length=0)
1276        if let Some(er) = features.intra_kaufman_er {
1277            // With zero path, ER is undefined → should return None or 0
1278            assert!(er.is_finite(), "Kaufman ER should be finite: {}", er);
1279        }
1280
1281        // Garman-Klass should handle zero high-low range
1282        if let Some(gk) = features.intra_garman_klass_vol {
1283            assert!(gk.is_finite(), "Garman-Klass should be finite: {}", gk);
1284        }
1285
1286        // Hurst should be near 0.5 for flat prices (n=100 >= 64)
1287        if let Some(h) = features.intra_hurst {
1288            assert!(h.is_finite(), "Hurst should be finite for flat prices: {}", h);
1289        }
1290    }
1291
1292    #[test]
1293    fn test_intra_bar_all_buys_count_imbalance() {
1294        // All buy trades: count_imbalance should saturate at 1.0
1295        let trades: Vec<AggTrade> = (0..20)
1296            .map(|i| create_test_trade(100.0 + i as f64 * 0.1, 1.0, i * 1_000_000, false))
1297            .collect();
1298
1299        let features = compute_intra_bar_features(&trades);
1300        if let Some(ci) = features.intra_count_imbalance {
1301            assert!(
1302                (ci - 1.0).abs() < 0.01,
1303                "All buys should have count_imbalance near 1.0: {}",
1304                ci
1305            );
1306        }
1307    }
1308
1309    #[test]
1310    fn test_intra_bar_all_sells_count_imbalance() {
1311        // All sell trades: count_imbalance should saturate at -1.0
1312        let trades: Vec<AggTrade> = (0..20)
1313            .map(|i| create_test_trade(100.0 - i as f64 * 0.1, 1.0, i * 1_000_000, true))
1314            .collect();
1315
1316        let features = compute_intra_bar_features(&trades);
1317        if let Some(ci) = features.intra_count_imbalance {
1318            assert!(
1319                (ci - (-1.0)).abs() < 0.01,
1320                "All sells should have count_imbalance near -1.0: {}",
1321                ci
1322            );
1323        }
1324    }
1325
1326    #[test]
1327    fn test_intra_bar_instant_bar_same_timestamp() {
1328        // All trades at same timestamp: duration=0
1329        let trades: Vec<AggTrade> = (0..10)
1330            .map(|i| create_test_trade(100.0 + i as f64 * 0.1, 1.0, 1_000_000, i % 2 == 0))
1331            .collect();
1332
1333        let features = compute_intra_bar_features(&trades);
1334        assert_eq!(features.intra_trade_count, Some(10));
1335
1336        // Burstiness requires inter-arrival intervals; with all same timestamps,
1337        // all intervals are 0, std_tau=0, burstiness should be None
1338        if let Some(b) = features.intra_burstiness {
1339            assert!(b.is_finite(), "Burstiness should be finite for instant bar: {}", b);
1340        }
1341
1342        // Intensity with duration=0 should still be finite
1343        if let Some(intensity) = features.intra_intensity {
1344            assert!(intensity.is_finite(), "Intensity should be finite: {}", intensity);
1345        }
1346    }
1347
1348    #[test]
1349    fn test_intra_bar_large_trade_count() {
1350        // 500 trades: stress test for memory and numerical stability
1351        let trades: Vec<AggTrade> = (0..500)
1352            .map(|i| {
1353                let price = 100.0 + (i as f64 * 0.1).sin() * 2.0;
1354                create_test_trade(price, 0.5 + (i as f64 * 0.03).cos(), i * 1_000_000, i % 3 == 0)
1355            })
1356            .collect();
1357
1358        let features = compute_intra_bar_features(&trades);
1359        assert_eq!(features.intra_trade_count, Some(500));
1360
1361        // All bounded features should be valid
1362        if let Some(h) = features.intra_hurst {
1363            assert!(h >= 0.0 && h <= 1.0, "Hurst out of bounds at n=500: {}", h);
1364        }
1365        if let Some(pe) = features.intra_permutation_entropy {
1366            assert!(pe >= 0.0 && pe <= 1.0, "PE out of bounds at n=500: {}", pe);
1367        }
1368        if let Some(ofi) = features.intra_ofi {
1369            assert!(ofi >= -1.0 && ofi <= 1.0, "OFI out of bounds at n=500: {}", ofi);
1370        }
1371    }
1372
1373    // === Issue #96: Intra-bar feature boundary and edge case tests ===
1374
1375    #[test]
1376    fn test_intrabar_exactly_2_trades_ith() {
1377        // Minimum threshold for ITH features (n >= 2)
1378        let trades = vec![
1379            create_test_trade(100.0, 1.0, 1_000_000, false),
1380            create_test_trade(100.5, 1.5, 2_000_000, true),
1381        ];
1382        let features = compute_intra_bar_features(&trades);
1383        assert_eq!(features.intra_trade_count, Some(2));
1384
1385        // ITH features should be present for n >= 2
1386        assert!(features.intra_bull_epoch_density.is_some(), "Bull epochs for n=2");
1387        assert!(features.intra_bear_epoch_density.is_some(), "Bear epochs for n=2");
1388        assert!(features.intra_max_drawdown.is_some(), "Max drawdown for n=2");
1389        assert!(features.intra_max_runup.is_some(), "Max runup for n=2");
1390
1391        // Complexity features must be None (need n >= 60/64)
1392        assert!(features.intra_hurst.is_none(), "Hurst requires n >= 64");
1393        assert!(features.intra_permutation_entropy.is_none(), "PE requires n >= 60");
1394
1395        // Kaufman ER for 2-trade straight line should be ~1.0
1396        if let Some(er) = features.intra_kaufman_er {
1397            assert!((er - 1.0).abs() < 0.01, "Straight line ER should be 1.0: {}", er);
1398        }
1399    }
1400
1401    #[test]
1402    fn test_intrabar_pe_boundary_59_vs_60() {
1403        // n=59: below PE threshold → None
1404        let trades_59: Vec<AggTrade> = (0..59)
1405            .map(|i| {
1406                let price = 100.0 + (i as f64 * 0.3).sin() * 2.0;
1407                create_test_trade(price, 1.0, i * 1_000_000, i % 2 == 0)
1408            })
1409            .collect();
1410        let f59 = compute_intra_bar_features(&trades_59);
1411        assert!(f59.intra_permutation_entropy.is_none(), "n=59 should not compute PE");
1412
1413        // n=60: at PE threshold → Some
1414        let trades_60: Vec<AggTrade> = (0..60)
1415            .map(|i| {
1416                let price = 100.0 + (i as f64 * 0.3).sin() * 2.0;
1417                create_test_trade(price, 1.0, i * 1_000_000, i % 2 == 0)
1418            })
1419            .collect();
1420        let f60 = compute_intra_bar_features(&trades_60);
1421        assert!(f60.intra_permutation_entropy.is_some(), "n=60 should compute PE");
1422        let pe60 = f60.intra_permutation_entropy.unwrap();
1423        assert!(pe60.is_finite() && pe60 >= 0.0 && pe60 <= 1.0, "PE(60) out of bounds: {}", pe60);
1424    }
1425
1426    #[test]
1427    fn test_intrabar_hurst_boundary_63_vs_64() {
1428        // n=63: below Hurst threshold → None
1429        let trades_63: Vec<AggTrade> = (0..63)
1430            .map(|i| {
1431                let price = 100.0 + (i as f64 * 0.2).sin() * 2.0;
1432                create_test_trade(price, 1.0, i * 1_000_000, i % 2 == 0)
1433            })
1434            .collect();
1435        let f63 = compute_intra_bar_features(&trades_63);
1436        assert!(f63.intra_hurst.is_none(), "n=63 should not compute Hurst");
1437
1438        // n=64: at Hurst threshold → Some
1439        let trades_64: Vec<AggTrade> = (0..64)
1440            .map(|i| {
1441                let price = 100.0 + (i as f64 * 0.2).sin() * 2.0;
1442                create_test_trade(price, 1.0, i * 1_000_000, i % 2 == 0)
1443            })
1444            .collect();
1445        let f64_features = compute_intra_bar_features(&trades_64);
1446        assert!(f64_features.intra_hurst.is_some(), "n=64 should compute Hurst");
1447        let h64 = f64_features.intra_hurst.unwrap();
1448        assert!(h64.is_finite() && h64 >= 0.0 && h64 <= 1.0, "Hurst(64) out of bounds: {}", h64);
1449    }
1450
1451    #[test]
1452    fn test_intrabar_constant_price_full_features() {
1453        // 100 trades at identical price — tests all features with zero-range input
1454        let trades: Vec<AggTrade> = (0..100)
1455            .map(|i| create_test_trade(42000.0, 1.0, i * 1_000_000, i % 2 == 0))
1456            .collect();
1457        let features = compute_intra_bar_features(&trades);
1458        assert_eq!(features.intra_trade_count, Some(100));
1459
1460        // OFI: equal buy/sell → near 0
1461        if let Some(ofi) = features.intra_ofi {
1462            assert!(ofi.abs() < 0.1, "Equal buy/sell → OFI near 0: {}", ofi);
1463        }
1464
1465        // Garman-Klass: zero price range → 0
1466        if let Some(gk) = features.intra_garman_klass_vol {
1467            assert!(gk.is_finite() && gk < 0.001, "Constant price → GK near 0: {}", gk);
1468        }
1469
1470        // Hurst: flat series → should be finite (may be 0.5 or NaN-clamped)
1471        if let Some(h) = features.intra_hurst {
1472            assert!(h.is_finite() && h >= 0.0 && h <= 1.0, "Hurst must be finite: {}", h);
1473        }
1474
1475        // PE: all identical ordinal patterns → low entropy
1476        if let Some(pe) = features.intra_permutation_entropy {
1477            assert!(pe.is_finite() && pe >= 0.0, "PE must be finite: {}", pe);
1478            assert!(pe < 0.05, "Constant prices → PE near 0: {}", pe);
1479        }
1480
1481        // Kaufman ER: no movement → ER = 1.0 (net = path = 0)
1482        if let Some(er) = features.intra_kaufman_er {
1483            assert!(er.is_finite(), "Kaufman ER finite for constant price: {}", er);
1484        }
1485    }
1486
1487    #[test]
1488    fn test_intrabar_all_buy_with_hurst_pe() {
1489        // 70 buy trades with ascending prices — triggers Hurst + PE computation
1490        let trades: Vec<AggTrade> = (0..70)
1491            .map(|i| create_test_trade(100.0 + i as f64 * 0.1, 1.0, i * 1_000_000, false))
1492            .collect();
1493        let features = compute_intra_bar_features(&trades);
1494
1495        // All buys → OFI = 1.0
1496        if let Some(ofi) = features.intra_ofi {
1497            assert!((ofi - 1.0).abs() < 0.01, "All buys → OFI=1.0: {}", ofi);
1498        }
1499
1500        // Hurst should be computable (n=70 >= 64) and trending
1501        assert!(features.intra_hurst.is_some(), "n=70 should compute Hurst");
1502        if let Some(h) = features.intra_hurst {
1503            assert!(h.is_finite() && h >= 0.0 && h <= 1.0, "Hurst bounded: {}", h);
1504        }
1505
1506        // PE should be computable (n=70 >= 60) and low (monotonic ascending)
1507        assert!(features.intra_permutation_entropy.is_some(), "n=70 should compute PE");
1508        if let Some(pe) = features.intra_permutation_entropy {
1509            assert!(pe.is_finite() && pe >= 0.0 && pe <= 1.0, "PE bounded: {}", pe);
1510            assert!(pe < 0.1, "Monotonic ascending → low PE: {}", pe);
1511        }
1512    }
1513
1514    #[test]
1515    fn test_intrabar_all_sell_with_hurst_pe() {
1516        // 70 sell trades with descending prices — symmetric to all-buy
1517        let trades: Vec<AggTrade> = (0..70)
1518            .map(|i| create_test_trade(100.0 - i as f64 * 0.1, 1.0, i * 1_000_000, true))
1519            .collect();
1520        let features = compute_intra_bar_features(&trades);
1521
1522        // All sells → OFI = -1.0
1523        if let Some(ofi) = features.intra_ofi {
1524            assert!((ofi - (-1.0)).abs() < 0.01, "All sells → OFI=-1.0: {}", ofi);
1525        }
1526
1527        // Hurst and PE should be computable
1528        assert!(features.intra_hurst.is_some(), "n=70 should compute Hurst");
1529        assert!(features.intra_permutation_entropy.is_some(), "n=70 should compute PE");
1530        if let Some(pe) = features.intra_permutation_entropy {
1531            assert!(pe < 0.1, "Monotonic descending → low PE: {}", pe);
1532        }
1533    }
1534
1535    #[test]
1536    fn test_intra_bar_zero_volume_trades() {
1537        // All trades have zero volume: tests division-by-zero handling in
1538        // OFI, VWAP, Kyle Lambda, volume_per_trade, turnover_imbalance
1539        let trades: Vec<AggTrade> = (0..20)
1540            .map(|i| create_test_trade(100.0 + i as f64 * 0.1, 0.0, i * 1_000_000, i % 2 == 0))
1541            .collect();
1542
1543        let features = compute_intra_bar_features(&trades);
1544
1545        // Should not panic — all features must be finite
1546        assert_eq!(features.intra_trade_count, Some(20));
1547
1548        // OFI: (0-0)/0 → guarded to 0.0
1549        if let Some(ofi) = features.intra_ofi {
1550            assert!(ofi.is_finite(), "OFI must be finite with zero volume: {}", ofi);
1551            assert!((ofi).abs() < f64::EPSILON, "OFI should be 0.0 with zero volume: {}", ofi);
1552        }
1553
1554        // VWAP position: zero total_vol → falls back to first_price for vwap
1555        if let Some(vp) = features.intra_vwap_position {
1556            assert!(vp.is_finite(), "VWAP position must be finite: {}", vp);
1557        }
1558
1559        // Kyle Lambda: total_vol=0 → None
1560        assert!(features.intra_kyle_lambda.is_none(), "Kyle Lambda undefined with zero volume");
1561
1562        // Duration and intensity should still be valid
1563        if let Some(d) = features.intra_duration_us {
1564            assert!(d > 0, "Duration should be positive: {}", d);
1565        }
1566        if let Some(intensity) = features.intra_intensity {
1567            assert!(intensity.is_finite() && intensity > 0.0, "Intensity finite: {}", intensity);
1568        }
1569    }
1570}
1571
1572/// Property-based tests for intra-bar feature bounds invariants.
1573/// Uses proptest to verify all features stay within documented ranges
1574/// for arbitrary trade inputs across various market conditions.
1575#[cfg(test)]
1576mod proptest_intrabar_bounds {
1577    use super::*;
1578    use crate::fixed_point::FixedPoint;
1579    use crate::types::AggTrade;
1580    use proptest::prelude::*;
1581
1582    fn make_trade(price: f64, volume: f64, timestamp: i64, is_buyer_maker: bool) -> AggTrade {
1583        AggTrade {
1584            agg_trade_id: timestamp,
1585            price: FixedPoint((price * 1e8) as i64),
1586            volume: FixedPoint((volume * 1e8) as i64),
1587            first_trade_id: timestamp,
1588            last_trade_id: timestamp,
1589            timestamp,
1590            is_buyer_maker,
1591            is_best_match: None,
1592        }
1593    }
1594
1595    /// Strategy: generate a valid trade sequence with varying parameters
1596    fn trade_sequence(min_n: usize, max_n: usize) -> impl Strategy<Value = Vec<AggTrade>> {
1597        (min_n..=max_n, 0_u64..10000).prop_map(|(n, seed)| {
1598            let mut rng = seed;
1599            let base_price = 100.0;
1600            (0..n)
1601                .map(|i| {
1602                    rng = rng.wrapping_mul(6364136223846793005).wrapping_add(1);
1603                    let r = ((rng >> 33) as f64) / (u32::MAX as f64);
1604                    let price = base_price + (r - 0.5) * 10.0;
1605                    let volume = 0.1 + r * 5.0;
1606                    let ts = (i as i64) * 1_000_000; // 1 second apart
1607                    make_trade(price, volume, ts, rng % 2 == 0)
1608                })
1609                .collect()
1610        })
1611    }
1612
1613    proptest! {
1614        /// All ITH features must be in [0, 1] for any valid trade sequence
1615        #[test]
1616        fn ith_features_always_bounded(trades in trade_sequence(2, 100)) {
1617            let features = compute_intra_bar_features(&trades);
1618
1619            if let Some(v) = features.intra_bull_epoch_density {
1620                prop_assert!(v >= 0.0 && v <= 1.0, "bull_epoch_density={v}");
1621            }
1622            if let Some(v) = features.intra_bear_epoch_density {
1623                prop_assert!(v >= 0.0 && v <= 1.0, "bear_epoch_density={v}");
1624            }
1625            if let Some(v) = features.intra_bull_excess_gain {
1626                prop_assert!(v >= 0.0 && v <= 1.0, "bull_excess_gain={v}");
1627            }
1628            if let Some(v) = features.intra_bear_excess_gain {
1629                prop_assert!(v >= 0.0 && v <= 1.0, "bear_excess_gain={v}");
1630            }
1631            if let Some(v) = features.intra_bull_cv {
1632                prop_assert!(v >= 0.0 && v <= 1.0, "bull_cv={v}");
1633            }
1634            if let Some(v) = features.intra_bear_cv {
1635                prop_assert!(v >= 0.0 && v <= 1.0, "bear_cv={v}");
1636            }
1637            if let Some(v) = features.intra_max_drawdown {
1638                prop_assert!(v >= 0.0 && v <= 1.0, "max_drawdown={v}");
1639            }
1640            if let Some(v) = features.intra_max_runup {
1641                prop_assert!(v >= 0.0 && v <= 1.0, "max_runup={v}");
1642            }
1643        }
1644
1645        /// Statistical features must respect their documented ranges
1646        #[test]
1647        fn statistical_features_bounded(trades in trade_sequence(3, 200)) {
1648            let features = compute_intra_bar_features(&trades);
1649
1650            if let Some(ofi) = features.intra_ofi {
1651                prop_assert!(ofi >= -1.0 - f64::EPSILON && ofi <= 1.0 + f64::EPSILON,
1652                    "OFI={ofi} out of [-1, 1]");
1653            }
1654            if let Some(ci) = features.intra_count_imbalance {
1655                prop_assert!(ci >= -1.0 - f64::EPSILON && ci <= 1.0 + f64::EPSILON,
1656                    "count_imbalance={ci} out of [-1, 1]");
1657            }
1658            if let Some(b) = features.intra_burstiness {
1659                prop_assert!(b >= -1.0 - f64::EPSILON && b <= 1.0 + f64::EPSILON,
1660                    "burstiness={b} out of [-1, 1]");
1661            }
1662            if let Some(er) = features.intra_kaufman_er {
1663                prop_assert!(er >= 0.0 && er <= 1.0 + f64::EPSILON,
1664                    "kaufman_er={er} out of [0, 1]");
1665            }
1666            if let Some(vwap) = features.intra_vwap_position {
1667                prop_assert!(vwap >= 0.0 && vwap <= 1.0 + f64::EPSILON,
1668                    "vwap_position={vwap} out of [0, 1]");
1669            }
1670            if let Some(gk) = features.intra_garman_klass_vol {
1671                prop_assert!(gk >= 0.0, "garman_klass_vol={gk} negative");
1672            }
1673            if let Some(intensity) = features.intra_intensity {
1674                prop_assert!(intensity >= 0.0, "intensity={intensity} negative");
1675            }
1676        }
1677
1678        /// Complexity features (Hurst, PE) bounded when present
1679        #[test]
1680        fn complexity_features_bounded(trades in trade_sequence(70, 300)) {
1681            let features = compute_intra_bar_features(&trades);
1682
1683            if let Some(h) = features.intra_hurst {
1684                prop_assert!(h >= 0.0 && h <= 1.0,
1685                    "hurst={h} out of [0, 1] for n={}", trades.len());
1686            }
1687            if let Some(pe) = features.intra_permutation_entropy {
1688                prop_assert!(pe >= 0.0 && pe <= 1.0 + f64::EPSILON,
1689                    "permutation_entropy={pe} out of [0, 1] for n={}", trades.len());
1690            }
1691        }
1692
1693        /// Trade count always equals input length
1694        #[test]
1695        fn trade_count_matches_input(trades in trade_sequence(0, 50)) {
1696            let features = compute_intra_bar_features(&trades);
1697            prop_assert_eq!(features.intra_trade_count, Some(trades.len() as u32));
1698        }
1699    }
1700}