Skip to main content

oxiphysics_core/
time_series_analysis.rs

1#![allow(clippy::needless_range_loop)]
2// Copyright 2026 COOLJAPAN OU (Team KitaSan)
3// SPDX-License-Identifier: Apache-2.0
4
5//! Time-series analysis utilities: statistics, filtering, autocorrelation,
6//! trend decomposition, spectral analysis, change-point detection, and AR models.
7
8#![allow(dead_code)]
9
10use std::f64::consts::PI;
11
12// ─────────────────────────────────────────────────────────────────────────────
13// Core data structure
14// ─────────────────────────────────────────────────────────────────────────────
15
16/// A time series with optional explicit timestamps.
17#[derive(Debug, Clone)]
18pub struct TimeSeries {
19    /// Raw sample values.
20    pub data: Vec<f64>,
21    /// Optional explicit timestamps (seconds).  When `None`, samples are
22    /// assumed to be evenly spaced at `1.0 / sampling_rate`.
23    pub timestamps: Option<Vec<f64>>,
24    /// Nominal sampling rate in Hz.
25    pub sampling_rate: f64,
26}
27
28impl TimeSeries {
29    /// Create a new `TimeSeries` from evenly-spaced data.
30    pub fn new(data: Vec<f64>, sampling_rate: f64) -> Self {
31        Self {
32            data,
33            timestamps: None,
34            sampling_rate,
35        }
36    }
37
38    /// Create a `TimeSeries` with explicit timestamps.
39    pub fn with_timestamps(data: Vec<f64>, timestamps: Vec<f64>, sampling_rate: f64) -> Self {
40        Self {
41            data,
42            timestamps: Some(timestamps),
43            sampling_rate,
44        }
45    }
46
47    /// Return the number of samples.
48    pub fn len(&self) -> usize {
49        self.data.len()
50    }
51
52    /// Return `true` when there are no samples.
53    pub fn is_empty(&self) -> bool {
54        self.data.is_empty()
55    }
56}
57
58// ─────────────────────────────────────────────────────────────────────────────
59// Descriptive statistics
60// ─────────────────────────────────────────────────────────────────────────────
61
62/// Descriptive statistics computed over a [`TimeSeries`].
63#[derive(Debug, Clone, PartialEq)]
64pub struct TimeSeriesStats {
65    /// Arithmetic mean.
66    pub mean: f64,
67    /// Sample variance (Bessel-corrected, N-1 denominator).
68    pub variance: f64,
69    /// Standard deviation.
70    pub std: f64,
71    /// Minimum value.
72    pub min: f64,
73    /// Maximum value.
74    pub max: f64,
75    /// Sample skewness.
76    pub skewness: f64,
77    /// Excess kurtosis (Fisher definition: normal distribution → 0).
78    pub kurtosis: f64,
79}
80
81/// Compute descriptive statistics for a [`TimeSeries`].
82///
83/// Returns `None` when the series is empty or has fewer than two samples.
84pub fn compute_stats(ts: &TimeSeries) -> Option<TimeSeriesStats> {
85    let n = ts.data.len();
86    if n < 2 {
87        return None;
88    }
89    let nf = n as f64;
90    let mean = ts.data.iter().copied().sum::<f64>() / nf;
91    let variance = ts.data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / (nf - 1.0);
92    let std = variance.sqrt();
93    let min = ts.data.iter().copied().fold(f64::INFINITY, f64::min);
94    let max = ts.data.iter().copied().fold(f64::NEG_INFINITY, f64::max);
95
96    let (skewness, kurtosis) = if std > 0.0 {
97        let m3 = ts
98            .data
99            .iter()
100            .map(|&x| ((x - mean) / std).powi(3))
101            .sum::<f64>()
102            / nf;
103        let m4 = ts
104            .data
105            .iter()
106            .map(|&x| ((x - mean) / std).powi(4))
107            .sum::<f64>()
108            / nf;
109        (m3, m4 - 3.0)
110    } else {
111        (0.0, 0.0)
112    };
113
114    Some(TimeSeriesStats {
115        mean,
116        variance,
117        std,
118        min,
119        max,
120        skewness,
121        kurtosis,
122    })
123}
124
125// ─────────────────────────────────────────────────────────────────────────────
126// Moving averages
127// ─────────────────────────────────────────────────────────────────────────────
128
129/// Compute a centred (symmetric) simple moving average.
130///
131/// If `window == 0` or `window > data.len()` the original data is returned
132/// unchanged.
133pub fn moving_average(data: &[f64], window: usize) -> Vec<f64> {
134    let n = data.len();
135    if window == 0 || window > n {
136        return data.to_vec();
137    }
138    let half = window / 2;
139    let mut out = vec![0.0_f64; n];
140    for i in 0..n {
141        let lo = i.saturating_sub(half);
142        let hi = (i + half + 1).min(n);
143        let count = hi - lo;
144        let sum: f64 = data[lo..hi].iter().copied().sum();
145        out[i] = sum / count as f64;
146    }
147    out
148}
149
150/// Compute an exponential moving average with smoothing factor `alpha` ∈ (0, 1].
151///
152/// `alpha = 1` returns the original series.
153pub fn ema(data: &[f64], alpha: f64) -> Vec<f64> {
154    if data.is_empty() {
155        return vec![];
156    }
157    let alpha = alpha.clamp(1e-9, 1.0);
158    let mut out = Vec::with_capacity(data.len());
159    out.push(data[0]);
160    for &x in &data[1..] {
161        let prev = *out.last().expect("output is non-empty");
162        out.push(alpha * x + (1.0 - alpha) * prev);
163    }
164    out
165}
166
167// ─────────────────────────────────────────────────────────────────────────────
168// Autocorrelation
169// ─────────────────────────────────────────────────────────────────────────────
170
171/// Autocorrelation function (ACF) estimator.
172#[derive(Debug, Clone)]
173pub struct Autocorrelation {
174    /// ACF values at lags 0, 1, …, `max_lag`.
175    pub values: Vec<f64>,
176}
177
178impl Autocorrelation {
179    /// Compute the normalized ACF of `data` up to `max_lag`.
180    pub fn compute(data: &[f64], max_lag: usize) -> Self {
181        let n = data.len();
182        if n == 0 {
183            return Self { values: vec![] };
184        }
185        let mean = data.iter().copied().sum::<f64>() / n as f64;
186        let var: f64 = data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>();
187        if var == 0.0 {
188            return Self {
189                values: vec![1.0; max_lag + 1],
190            };
191        }
192        let lags = max_lag.min(n - 1);
193        let values = (0..=lags)
194            .map(|lag| {
195                let cov: f64 = (0..n - lag)
196                    .map(|i| (data[i] - mean) * (data[i + lag] - mean))
197                    .sum();
198                cov / var
199            })
200            .collect();
201        Self { values }
202    }
203}
204
205// ─────────────────────────────────────────────────────────────────────────────
206// Cross-correlation
207// ─────────────────────────────────────────────────────────────────────────────
208
209/// Cross-correlation estimator for two equal-length signals.
210#[derive(Debug, Clone)]
211pub struct CrossCorrelation {
212    /// Cross-correlation values indexed by lag offset.
213    pub values: Vec<f64>,
214    /// Maximum lag used during computation.
215    pub max_lag: usize,
216}
217
218impl CrossCorrelation {
219    /// Compute the normalized cross-correlation between `x` and `y`.
220    ///
221    /// The two series are truncated to the shorter length before processing.
222    pub fn compute(x: &[f64], y: &[f64], max_lag: usize) -> Self {
223        let n = x.len().min(y.len());
224        if n == 0 {
225            return Self {
226                values: vec![],
227                max_lag,
228            };
229        }
230        let mx = x[..n].iter().copied().sum::<f64>() / n as f64;
231        let my = y[..n].iter().copied().sum::<f64>() / n as f64;
232        let sx: f64 = x[..n].iter().map(|&v| (v - mx).powi(2)).sum::<f64>().sqrt();
233        let sy: f64 = y[..n].iter().map(|&v| (v - my).powi(2)).sum::<f64>().sqrt();
234        let denom = sx * sy;
235        let lags = max_lag.min(n - 1);
236        let values = (0..=lags)
237            .map(|lag| {
238                let cov: f64 = (0..n - lag).map(|i| (x[i] - mx) * (y[i + lag] - my)).sum();
239                if denom > 0.0 { cov / denom } else { 0.0 }
240            })
241            .collect();
242        Self {
243            values,
244            max_lag: lags,
245        }
246    }
247}
248
249// ─────────────────────────────────────────────────────────────────────────────
250// Trend decomposition
251// ─────────────────────────────────────────────────────────────────────────────
252
253/// Classical additive decomposition of a time series into trend, seasonal, and
254/// residual components.
255///
256/// Returns `(trend, seasonal, residual)`.  Each output vector has the same
257/// length as `data`.  When the series is too short for the requested `period`,
258/// all three vectors are zeroed.
259pub fn decompose_trend(data: &[f64], period: usize) -> (Vec<f64>, Vec<f64>, Vec<f64>) {
260    let n = data.len();
261    if n < period * 2 || period == 0 {
262        return (vec![0.0; n], vec![0.0; n], data.to_vec());
263    }
264
265    // 1. Estimate trend via centred moving average of length `period`.
266    let trend = moving_average(data, period);
267
268    // 2. De-trended = data – trend.
269    let detrended: Vec<f64> = data
270        .iter()
271        .zip(trend.iter())
272        .map(|(&d, &t)| d - t)
273        .collect();
274
275    // 3. Seasonal component: average of de-trended values at each phase.
276    let mut phase_sum = vec![0.0_f64; period];
277    let mut phase_cnt = vec![0_usize; period];
278    for (i, &v) in detrended.iter().enumerate() {
279        phase_sum[i % period] += v;
280        phase_cnt[i % period] += 1;
281    }
282    let seasonal_pattern: Vec<f64> = phase_sum
283        .iter()
284        .zip(phase_cnt.iter())
285        .map(|(&s, &c)| if c > 0 { s / c as f64 } else { 0.0 })
286        .collect();
287
288    // Centre seasonal pattern so it sums to zero.
289    let seasonal_mean = seasonal_pattern.iter().copied().sum::<f64>() / period as f64;
290    let seasonal_pattern: Vec<f64> = seasonal_pattern
291        .iter()
292        .map(|&v| v - seasonal_mean)
293        .collect();
294
295    let seasonal: Vec<f64> = (0..n).map(|i| seasonal_pattern[i % period]).collect();
296
297    // 4. Residual = data – trend – seasonal.
298    let residual: Vec<f64> = data
299        .iter()
300        .zip(trend.iter())
301        .zip(seasonal.iter())
302        .map(|((&d, &t), &s)| d - t - s)
303        .collect();
304
305    (trend, seasonal, residual)
306}
307
308// ─────────────────────────────────────────────────────────────────────────────
309// Stationarity test (simplified ADF)
310// ─────────────────────────────────────────────────────────────────────────────
311
312/// Simplified Augmented Dickey-Fuller test for stationarity.
313///
314/// Returns `(statistic, is_stationary)`.  The threshold used is −1.94
315/// (approximate 5 % critical value for a large sample).
316pub fn adf_test(data: &[f64]) -> (f64, bool) {
317    let n = data.len();
318    if n < 3 {
319        return (0.0, false);
320    }
321    // First differences Δyₜ = yₜ - yₜ₋₁
322    let dy: Vec<f64> = data.windows(2).map(|w| w[1] - w[0]).collect();
323    let y_lag: Vec<f64> = data[..n - 1].to_vec();
324
325    let m = dy.len() as f64;
326    let mean_x = y_lag.iter().copied().sum::<f64>() / m;
327    let mean_y = dy.iter().copied().sum::<f64>() / m;
328
329    let ss_xx: f64 = y_lag.iter().map(|&x| (x - mean_x).powi(2)).sum();
330    let ss_xy: f64 = y_lag
331        .iter()
332        .zip(dy.iter())
333        .map(|(&x, &y)| (x - mean_x) * (y - mean_y))
334        .sum();
335
336    if ss_xx == 0.0 {
337        return (0.0, false);
338    }
339    let beta = ss_xy / ss_xx;
340    let alpha = mean_y - beta * mean_x;
341
342    // Residuals
343    let residuals: Vec<f64> = y_lag
344        .iter()
345        .zip(dy.iter())
346        .map(|(&x, &y)| y - (alpha + beta * x))
347        .collect();
348    let sse: f64 = residuals.iter().map(|&r| r.powi(2)).sum::<f64>();
349    let se_beta = ((sse / (m - 2.0)) / ss_xx).sqrt();
350
351    let t_stat = if se_beta > 0.0 { beta / se_beta } else { 0.0 };
352    let is_stationary = t_stat < -1.94;
353    (t_stat, is_stationary)
354}
355
356// ─────────────────────────────────────────────────────────────────────────────
357// Peak detection
358// ─────────────────────────────────────────────────────────────────────────────
359
360/// Find local maxima whose prominence exceeds `min_prominence`.
361///
362/// A peak's *prominence* is defined here as the height above the nearest
363/// higher sample on either side (or above the boundary if no higher neighbour
364/// exists).
365pub fn find_peaks(data: &[f64], min_prominence: f64) -> Vec<usize> {
366    let n = data.len();
367    if n < 3 {
368        return vec![];
369    }
370    let mut peaks = Vec::new();
371    for i in 1..n - 1 {
372        if data[i] > data[i - 1] && data[i] > data[i + 1] {
373            // Left base: minimum to the left up to the next higher peak.
374            let left_min = data[..i].iter().copied().fold(f64::INFINITY, f64::min);
375            let right_min = data[i + 1..].iter().copied().fold(f64::INFINITY, f64::min);
376            let base = left_min.max(right_min);
377            let prominence = data[i] - base;
378            if prominence >= min_prominence {
379                peaks.push(i);
380            }
381        }
382    }
383    peaks
384}
385
386// ─────────────────────────────────────────────────────────────────────────────
387// Zero-crossing rate
388// ─────────────────────────────────────────────────────────────────────────────
389
390/// Compute the zero-crossing rate: fraction of consecutive-sample sign changes.
391///
392/// Returns a value in \[0, 1\].
393pub fn zero_crossing_rate(data: &[f64]) -> f64 {
394    let n = data.len();
395    if n < 2 {
396        return 0.0;
397    }
398    let crossings = data
399        .windows(2)
400        .filter(|w| (w[0] >= 0.0) != (w[1] >= 0.0))
401        .count();
402    crossings as f64 / (n - 1) as f64
403}
404
405// ─────────────────────────────────────────────────────────────────────────────
406// Power spectrum (simple DFT)
407// ─────────────────────────────────────────────────────────────────────────────
408
409/// Compute the one-sided power spectral density via a direct DFT.
410///
411/// Returns a vector of length `n/2 + 1` where `n` is the signal length.
412pub fn compute_power_spectrum(data: &[f64]) -> Vec<f64> {
413    let n = data.len();
414    if n == 0 {
415        return vec![];
416    }
417    let half = n / 2 + 1;
418    let mut psd = Vec::with_capacity(half);
419    for k in 0..half {
420        let mut re = 0.0_f64;
421        let mut im = 0.0_f64;
422        for (t, &x) in data.iter().enumerate() {
423            let angle = -2.0 * PI * k as f64 * t as f64 / n as f64;
424            re += x * angle.cos();
425            im += x * angle.sin();
426        }
427        let power = (re * re + im * im) / (n as f64 * n as f64);
428        psd.push(if k == 0 || (n.is_multiple_of(2) && k == n / 2) {
429            power
430        } else {
431            2.0 * power
432        });
433    }
434    psd
435}
436
437// ─────────────────────────────────────────────────────────────────────────────
438// Lomb-Scargle periodogram
439// ─────────────────────────────────────────────────────────────────────────────
440
441/// Lomb-Scargle periodogram for unevenly-sampled data.
442#[derive(Debug, Clone)]
443pub struct Periodogram {
444    /// Angular frequencies at which the power was evaluated.
445    pub frequencies: Vec<f64>,
446    /// Normalised Lomb-Scargle power at each frequency.
447    pub power: Vec<f64>,
448}
449
450impl Periodogram {
451    /// Compute the Lomb-Scargle periodogram.
452    ///
453    /// `times` and `values` must be the same length.  `n_freqs` controls the
454    /// number of frequencies sampled in the range
455    /// `[1/(max_t - min_t), n/(2*(max_t - min_t))]`.
456    pub fn compute(times: &[f64], values: &[f64], n_freqs: usize) -> Self {
457        let n = times.len().min(values.len());
458        if n < 2 || n_freqs == 0 {
459            return Self {
460                frequencies: vec![],
461                power: vec![],
462            };
463        }
464        let mean = values[..n].iter().copied().sum::<f64>() / n as f64;
465        let variance = values[..n].iter().map(|&v| (v - mean).powi(2)).sum::<f64>() / n as f64;
466        let t_span = times[n - 1] - times[0];
467        if t_span <= 0.0 || variance == 0.0 {
468            return Self {
469                frequencies: vec![],
470                power: vec![],
471            };
472        }
473        let f_min = 1.0 / t_span;
474        let f_max = (n as f64) / (2.0 * t_span);
475
476        let mut frequencies = Vec::with_capacity(n_freqs);
477        let mut power = Vec::with_capacity(n_freqs);
478
479        for fi in 0..n_freqs {
480            let freq = f_min + (f_max - f_min) * fi as f64 / n_freqs.max(1) as f64;
481            let omega = 2.0 * PI * freq;
482
483            // Time offset τ
484            let sin2 = (0..n).map(|i| (2.0 * omega * times[i]).sin()).sum::<f64>();
485            let cos2 = (0..n).map(|i| (2.0 * omega * times[i]).cos()).sum::<f64>();
486            let tau = (sin2 / cos2).atan() / (2.0 * omega);
487
488            let cos_sum: f64 = (0..n).map(|i| (omega * (times[i] - tau)).cos()).sum();
489            let sin_sum: f64 = (0..n).map(|i| (omega * (times[i] - tau)).sin()).sum();
490            let cc: f64 = (0..n)
491                .map(|i| (omega * (times[i] - tau)).cos().powi(2))
492                .sum();
493            let ss: f64 = (0..n)
494                .map(|i| (omega * (times[i] - tau)).sin().powi(2))
495                .sum();
496
497            let yc: f64 = (0..n)
498                .map(|i| (values[i] - mean) * (omega * (times[i] - tau)).cos())
499                .sum();
500            let ys: f64 = (0..n)
501                .map(|i| (values[i] - mean) * (omega * (times[i] - tau)).sin())
502                .sum();
503
504            let _ = (cos_sum, sin_sum); // suppress warnings; tau already used
505            let p = 0.5 / variance
506                * (if cc > 0.0 { yc * yc / cc } else { 0.0 }
507                    + if ss > 0.0 { ys * ys / ss } else { 0.0 });
508
509            frequencies.push(freq);
510            power.push(p);
511        }
512        Self { frequencies, power }
513    }
514}
515
516// ─────────────────────────────────────────────────────────────────────────────
517// Hurst exponent (R/S analysis)
518// ─────────────────────────────────────────────────────────────────────────────
519
520/// Estimate the Hurst exponent of a time series using R/S (rescaled range) analysis.
521///
522/// Returns `None` when the series is too short (< 8 samples).
523pub fn hurst_exponent(data: &[f64]) -> Option<f64> {
524    let n = data.len();
525    if n < 8 {
526        return None;
527    }
528    let max_pow = (n as f64).log2().floor() as u32;
529    if max_pow < 2 {
530        return None;
531    }
532    let mut log_n_vals = Vec::new();
533    let mut log_rs_vals = Vec::new();
534
535    for pow in 2..=max_pow {
536        let sub_len = 1_usize << pow;
537        if sub_len > n {
538            break;
539        }
540        let n_subs = n / sub_len;
541        let mut rs_sum = 0.0_f64;
542        for s in 0..n_subs {
543            let segment = &data[s * sub_len..(s + 1) * sub_len];
544            let mean = segment.iter().copied().sum::<f64>() / sub_len as f64;
545            let std =
546                (segment.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / sub_len as f64).sqrt();
547            if std == 0.0 {
548                continue;
549            }
550            // Cumulative deviations
551            let mut cum = 0.0_f64;
552            let mut cum_min = f64::INFINITY;
553            let mut cum_max = f64::NEG_INFINITY;
554            for &x in segment {
555                cum += x - mean;
556                cum_min = cum_min.min(cum);
557                cum_max = cum_max.max(cum);
558            }
559            rs_sum += (cum_max - cum_min) / std;
560        }
561        let rs = rs_sum / n_subs as f64;
562        if rs > 0.0 {
563            log_n_vals.push((sub_len as f64).ln());
564            log_rs_vals.push(rs.ln());
565        }
566    }
567
568    if log_n_vals.len() < 2 {
569        return None;
570    }
571    // OLS slope of log(R/S) ~ H * log(N)
572    let m = log_n_vals.len() as f64;
573    let mx = log_n_vals.iter().copied().sum::<f64>() / m;
574    let my = log_rs_vals.iter().copied().sum::<f64>() / m;
575    let num: f64 = log_n_vals
576        .iter()
577        .zip(log_rs_vals.iter())
578        .map(|(&x, &y)| (x - mx) * (y - my))
579        .sum();
580    let den: f64 = log_n_vals.iter().map(|&x| (x - mx).powi(2)).sum();
581    if den == 0.0 {
582        return None;
583    }
584    Some(num / den)
585}
586
587// ─────────────────────────────────────────────────────────────────────────────
588// Change-point detection (CUSUM)
589// ─────────────────────────────────────────────────────────────────────────────
590
591/// CUSUM-based change-point detector.
592#[derive(Debug, Clone)]
593pub struct ChangePointDetection {
594    /// Detection threshold (in units of standard deviation of the series).
595    pub threshold: f64,
596}
597
598impl ChangePointDetection {
599    /// Create a new detector with the given threshold.
600    pub fn new(threshold: f64) -> Self {
601        Self { threshold }
602    }
603
604    /// Detect change points in `data`.
605    ///
606    /// Returns a `Vec` of indices where the CUSUM statistic exceeds the
607    /// threshold.
608    pub fn detect(&self, data: &[f64]) -> Vec<usize> {
609        let n = data.len();
610        if n < 2 {
611            return vec![];
612        }
613        let mean = data.iter().copied().sum::<f64>() / n as f64;
614        let std = (data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / n as f64).sqrt();
615        if std == 0.0 {
616            return vec![];
617        }
618        let limit = self.threshold * std;
619        let mut pos_cusum = 0.0_f64;
620        let mut neg_cusum = 0.0_f64;
621        let mut change_points = Vec::new();
622        for (i, &x) in data.iter().enumerate() {
623            pos_cusum = (pos_cusum + x - mean).max(0.0);
624            neg_cusum = (neg_cusum - x + mean).max(0.0);
625            if pos_cusum > limit || neg_cusum > limit {
626                change_points.push(i);
627                pos_cusum = 0.0;
628                neg_cusum = 0.0;
629            }
630        }
631        change_points
632    }
633}
634
635// ─────────────────────────────────────────────────────────────────────────────
636// Filters
637// ─────────────────────────────────────────────────────────────────────────────
638
639/// Filter types available through [`apply_filter`].
640#[derive(Debug, Clone)]
641pub enum TimeSeriesFilter {
642    /// Simplified first-order Butterworth low-pass filter parameterised by
643    /// normalised cut-off frequency `fc` ∈ (0, 0.5).
644    ButterworthLowpass {
645        /// Normalised cut-off frequency in (0, 0.5).
646        fc: f64,
647    },
648    /// Running median filter of the given window size.
649    Median {
650        /// Window size (must be odd; rounded up internally).
651        window: usize,
652    },
653    /// Simplified Savitzky-Golay smoothing using a polynomial of degree 2
654    /// over the specified window.
655    SavitzkyGolay {
656        /// Half-window size (total window = `2 * half_window + 1`).
657        half_window: usize,
658    },
659}
660
661/// Apply a [`TimeSeriesFilter`] to `data` and return the filtered signal.
662pub fn apply_filter(data: &[f64], filter: &TimeSeriesFilter) -> Vec<f64> {
663    match filter {
664        TimeSeriesFilter::ButterworthLowpass { fc } => butterworth_lowpass(data, *fc),
665        TimeSeriesFilter::Median { window } => median_filter(data, *window),
666        TimeSeriesFilter::SavitzkyGolay { half_window } => savitzky_golay(data, *half_window),
667    }
668}
669
670fn butterworth_lowpass(data: &[f64], fc: f64) -> Vec<f64> {
671    let fc = fc.clamp(1e-6, 0.4999);
672    // First-order IIR approximation: α = 2πfc / (2πfc + 1)
673    let alpha = (2.0 * PI * fc) / (2.0 * PI * fc + 1.0);
674    let mut out = Vec::with_capacity(data.len());
675    if data.is_empty() {
676        return out;
677    }
678    out.push(data[0]);
679    for &x in &data[1..] {
680        let prev = *out.last().expect("output is non-empty");
681        out.push(prev + alpha * (x - prev));
682    }
683    out
684}
685
686fn median_filter(data: &[f64], window: usize) -> Vec<f64> {
687    let w = if window.is_multiple_of(2) {
688        window + 1
689    } else {
690        window
691    }
692    .max(1);
693    let half = w / 2;
694    let n = data.len();
695    (0..n)
696        .map(|i| {
697            let lo = i.saturating_sub(half);
698            let hi = (i + half + 1).min(n);
699            let mut window_vals: Vec<f64> = data[lo..hi].to_vec();
700            window_vals.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
701            let mid = window_vals.len() / 2;
702            window_vals[mid]
703        })
704        .collect()
705}
706
707fn savitzky_golay(data: &[f64], half_window: usize) -> Vec<f64> {
708    // Polynomial degree 2 least-squares coefficients for a symmetric window
709    // of total length 2*half_window + 1.
710    let n = data.len();
711    if half_window == 0 || n == 0 {
712        return data.to_vec();
713    }
714    let m = half_window as i64;
715    let wlen = (2 * half_window + 1) as f64;
716    // Precompute normalisation terms for quadratic SG filter.
717    let norm_denom = (wlen * (wlen.powi(2) - 1.0) / 3.0) / wlen;
718    let _ = norm_denom; // Used implicitly below via the quadratic fit.
719
720    (0..n)
721        .map(|i| {
722            let lo = (i as i64 - m).max(0) as usize;
723            let hi = (i as i64 + m + 1).min(n as i64) as usize;
724            let segment = &data[lo..hi];
725            let k = segment.len() as f64;
726            let mean = segment.iter().copied().sum::<f64>() / k;
727            // Quadratic fit: evaluate central coefficient (smoothed value).
728            // For a symmetric window this equals the centred quadratic regression value.
729            let offset: Vec<f64> = (lo..hi).map(|j| j as f64 - i as f64).collect();
730            let s0 = k;
731            let s2: f64 = offset.iter().map(|&t| t * t).sum();
732            let s4: f64 = offset.iter().map(|&t| t.powi(4)).sum();
733            let sy: f64 = segment.iter().copied().sum::<f64>();
734            let st2y: f64 = segment
735                .iter()
736                .zip(offset.iter())
737                .map(|(&y, &t)| y * t * t)
738                .sum();
739            let det = s0 * s4 - s2 * s2;
740            if det == 0.0 {
741                mean
742            } else {
743                (s4 * sy - s2 * st2y) / det
744            }
745        })
746        .collect()
747}
748
749// ─────────────────────────────────────────────────────────────────────────────
750// AR(p) model
751// ─────────────────────────────────────────────────────────────────────────────
752
753/// Autoregressive model of order `p`.
754#[derive(Debug, Clone)]
755pub struct ArModel {
756    /// AR coefficients φ₁ … φₚ.
757    pub coefficients: Vec<f64>,
758    /// Estimated noise variance.
759    pub noise_variance: f64,
760}
761
762impl ArModel {
763    /// Fit an AR(p) model to `data` using the Yule-Walker equations.
764    ///
765    /// Returns `None` when `data.len() <= p` or `p == 0`.
766    pub fn fit(data: &[f64], p: usize) -> Option<Self> {
767        if p == 0 || data.len() <= p {
768            return None;
769        }
770        let acf = Autocorrelation::compute(data, p);
771        if acf.values.len() < p + 1 {
772            return None;
773        }
774        // Build Toeplitz system R * φ = r
775        // R[i][j] = acf[|i-j|] * acf[0]   (use sample variance as scale)
776        let n = data.len() as f64;
777        let mean = data.iter().copied().sum::<f64>() / n;
778        let var = data.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / n;
779
780        // Levinson-Durbin recursion for the Yule-Walker equations.
781        let r: Vec<f64> = acf.values.iter().map(|&v| v * var).collect();
782        let mut phi = vec![0.0_f64; p];
783        let mut err = r[0];
784        let mut phi_prev = vec![0.0_f64; p];
785
786        for i in 0..p {
787            let mut lambda = r[i + 1];
788            for k in 0..i {
789                lambda -= phi_prev[k] * r[i - k];
790            }
791            if err.abs() < 1e-15 {
792                break;
793            }
794            let kappa = lambda / err;
795            let mut phi_new = vec![0.0_f64; p];
796            phi_new[i] = kappa;
797            for k in 0..i {
798                phi_new[k] = phi_prev[k] - kappa * phi_prev[i - 1 - k];
799            }
800            err *= 1.0 - kappa * kappa;
801            phi = phi_new.clone();
802            phi_prev = phi_new;
803        }
804
805        Some(Self {
806            coefficients: phi,
807            noise_variance: err.max(0.0),
808        })
809    }
810
811    /// Forecast `steps` steps ahead using the fitted model.
812    ///
813    /// `history` must contain at least `p` values.
814    pub fn forecast(&self, history: &[f64], steps: usize) -> Vec<f64> {
815        let p = self.coefficients.len();
816        if history.len() < p || steps == 0 {
817            return vec![];
818        }
819        let mut buf: Vec<f64> = history[history.len() - p..].to_vec();
820        let mut out = Vec::with_capacity(steps);
821        for _ in 0..steps {
822            let pred: f64 = self
823                .coefficients
824                .iter()
825                .enumerate()
826                .map(|(i, &c)| c * buf[buf.len() - 1 - i])
827                .sum();
828            out.push(pred);
829            buf.push(pred);
830        }
831        out
832    }
833}
834
835// ─────────────────────────────────────────────────────────────────────────────
836// Missing-value interpolation
837// ─────────────────────────────────────────────────────────────────────────────
838
839/// Fill `NaN` values in `data` using linear interpolation between the nearest
840/// valid neighbours.  Leading and trailing `NaN`s are filled with the first /
841/// last valid value respectively.
842pub fn interpolate_linear(data: &[f64]) -> Vec<f64> {
843    let n = data.len();
844    let mut out = data.to_vec();
845    let mut i = 0;
846    while i < n {
847        if out[i].is_nan() {
848            // Find the previous valid index.
849            let prev = (0..i).rev().find(|&k| !out[k].is_nan());
850            // Find the next valid index.
851            let next = (i + 1..n).find(|&k| !out[k].is_nan());
852            match (prev, next) {
853                (Some(p), Some(q)) => {
854                    let slope = (out[q] - out[p]) / (q - p) as f64;
855                    for j in p + 1..q {
856                        out[j] = out[p] + slope * (j - p) as f64;
857                    }
858                    i = q;
859                }
860                (None, Some(q)) => {
861                    let fill = out[q];
862                    for j in 0..q {
863                        out[j] = fill;
864                    }
865                    i = q;
866                }
867                (Some(_p), None) => {
868                    let fill = out[_p];
869                    for j in _p + 1..n {
870                        out[j] = fill;
871                    }
872                    break;
873                }
874                (None, None) => break,
875            }
876        }
877        i += 1;
878    }
879    out
880}
881
882/// Fill `NaN` values in `data` using simple cubic-spline-like interpolation
883/// (Catmull-Rom tangents) between the nearest valid neighbours.
884pub fn interpolate_spline(data: &[f64]) -> Vec<f64> {
885    // Collect valid (index, value) pairs.
886    let knots: Vec<(usize, f64)> = data
887        .iter()
888        .enumerate()
889        .filter(|&(_, v)| !v.is_nan())
890        .map(|(i, &v)| (i, v))
891        .collect();
892    if knots.is_empty() {
893        return data.to_vec();
894    }
895    let n = data.len();
896    let mut out = data.to_vec();
897    for i in 0..n {
898        if !out[i].is_nan() {
899            continue;
900        }
901        // Find surrounding knots.
902        let right = knots.partition_point(|&(ki, _)| ki <= i);
903        if right == 0 {
904            out[i] = knots[0].1;
905        } else if right >= knots.len() {
906            out[i] = knots[knots.len() - 1].1;
907        } else {
908            let (x0, y0) = knots[right - 1];
909            let (x1, y1) = knots[right];
910            let t = (i - x0) as f64 / (x1 - x0) as f64;
911            // Catmull-Rom tangents from neighbours (or zero at boundaries).
912            let m0 = if right >= 2 {
913                let (xp, yp) = knots[right - 2];
914                (y1 - yp) / (x1 - xp) as f64
915            } else {
916                y1 - y0
917            };
918            let m1 = if right + 1 < knots.len() {
919                let (xn, yn) = knots[right + 1];
920                (yn - y0) / (xn - x0) as f64
921            } else {
922                y1 - y0
923            };
924            // Hermite basis
925            let h00 = 2.0 * t.powi(3) - 3.0 * t.powi(2) + 1.0;
926            let h10 = t.powi(3) - 2.0 * t.powi(2) + t;
927            let h01 = -2.0 * t.powi(3) + 3.0 * t.powi(2);
928            let h11 = t.powi(3) - t.powi(2);
929            let span = (x1 - x0) as f64;
930            out[i] = h00 * y0 + h10 * span * m0 + h01 * y1 + h11 * span * m1;
931        }
932    }
933    out
934}
935
936// ─────────────────────────────────────────────────────────────────────────────
937// Tests
938// ─────────────────────────────────────────────────────────────────────────────
939
940#[cfg(test)]
941mod tests {
942    use super::*;
943
944    // ── helpers ────────────────────────────────────────────────────────────
945
946    fn sine_wave(n: usize, freq: f64, sr: f64) -> Vec<f64> {
947        (0..n)
948            .map(|i| (2.0 * PI * freq * i as f64 / sr).sin())
949            .collect()
950    }
951
952    fn linspace(start: f64, end: f64, n: usize) -> Vec<f64> {
953        if n == 0 {
954            return vec![];
955        }
956        if n == 1 {
957            return vec![start];
958        }
959        (0..n)
960            .map(|i| start + (end - start) * i as f64 / (n - 1) as f64)
961            .collect()
962    }
963
964    // ── TimeSeries ──────────────────────────────────────────────────────────
965
966    #[test]
967    fn test_time_series_new() {
968        let ts = TimeSeries::new(vec![1.0, 2.0, 3.0], 100.0);
969        assert_eq!(ts.len(), 3);
970        assert!(!ts.is_empty());
971    }
972
973    #[test]
974    fn test_time_series_empty() {
975        let ts = TimeSeries::new(vec![], 100.0);
976        assert!(ts.is_empty());
977    }
978
979    #[test]
980    fn test_time_series_with_timestamps() {
981        let ts = TimeSeries::with_timestamps(vec![1.0, 2.0], vec![0.0, 0.01], 100.0);
982        assert!(ts.timestamps.is_some());
983    }
984
985    // ── compute_stats ───────────────────────────────────────────────────────
986
987    #[test]
988    fn test_stats_basic() {
989        let ts = TimeSeries::new(vec![2.0, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0], 1.0);
990        let s = compute_stats(&ts).unwrap();
991        assert!((s.mean - 5.0).abs() < 1e-9);
992        assert!((s.variance - 4.571_428_571).abs() < 1e-6);
993        assert_eq!(s.min, 2.0);
994        assert_eq!(s.max, 9.0);
995    }
996
997    #[test]
998    fn test_stats_constant() {
999        let ts = TimeSeries::new(vec![3.0; 10], 1.0);
1000        let s = compute_stats(&ts).unwrap();
1001        assert!((s.mean - 3.0).abs() < 1e-12);
1002        assert_eq!(s.std, 0.0);
1003        assert_eq!(s.skewness, 0.0);
1004        assert_eq!(s.kurtosis, 0.0);
1005    }
1006
1007    #[test]
1008    fn test_stats_too_short() {
1009        let ts = TimeSeries::new(vec![1.0], 1.0);
1010        assert!(compute_stats(&ts).is_none());
1011    }
1012
1013    // ── moving_average ──────────────────────────────────────────────────────
1014
1015    #[test]
1016    fn test_moving_average_window_1() {
1017        let data = vec![1.0, 2.0, 3.0, 4.0];
1018        let ma = moving_average(&data, 1);
1019        assert_eq!(ma, data);
1020    }
1021
1022    #[test]
1023    fn test_moving_average_preserves_length() {
1024        let data = sine_wave(64, 5.0, 100.0);
1025        let ma = moving_average(&data, 5);
1026        assert_eq!(ma.len(), data.len());
1027    }
1028
1029    #[test]
1030    fn test_moving_average_constant_input() {
1031        let data = vec![7.0_f64; 20];
1032        let ma = moving_average(&data, 5);
1033        for v in ma {
1034            assert!((v - 7.0).abs() < 1e-12);
1035        }
1036    }
1037
1038    // ── ema ─────────────────────────────────────────────────────────────────
1039
1040    #[test]
1041    fn test_ema_alpha_one() {
1042        let data = vec![1.0, 3.0, 5.0, 2.0];
1043        let e = ema(&data, 1.0);
1044        for (a, b) in e.iter().zip(data.iter()) {
1045            assert!((a - b).abs() < 1e-12);
1046        }
1047    }
1048
1049    #[test]
1050    fn test_ema_length() {
1051        let data = vec![1.0; 50];
1052        assert_eq!(ema(&data, 0.3).len(), 50);
1053    }
1054
1055    #[test]
1056    fn test_ema_empty() {
1057        assert!(ema(&[], 0.5).is_empty());
1058    }
1059
1060    // ── Autocorrelation ─────────────────────────────────────────────────────
1061
1062    #[test]
1063    fn test_acf_lag_zero_is_one() {
1064        let data = sine_wave(128, 5.0, 128.0);
1065        let acf = Autocorrelation::compute(&data, 20);
1066        assert!((acf.values[0] - 1.0).abs() < 1e-10);
1067    }
1068
1069    #[test]
1070    fn test_acf_length() {
1071        let data: Vec<f64> = (0..50).map(|i| i as f64).collect();
1072        let acf = Autocorrelation::compute(&data, 10);
1073        assert_eq!(acf.values.len(), 11);
1074    }
1075
1076    #[test]
1077    fn test_acf_empty() {
1078        let acf = Autocorrelation::compute(&[], 5);
1079        assert!(acf.values.is_empty());
1080    }
1081
1082    // ── CrossCorrelation ────────────────────────────────────────────────────
1083
1084    #[test]
1085    fn test_xcorr_identical_lag_zero() {
1086        let data = sine_wave(64, 2.0, 64.0);
1087        let xcorr = CrossCorrelation::compute(&data, &data, 0);
1088        // lag-0 should be 1.0
1089        assert!((xcorr.values[0] - 1.0).abs() < 1e-6);
1090    }
1091
1092    #[test]
1093    fn test_xcorr_empty() {
1094        let xcorr = CrossCorrelation::compute(&[], &[], 5);
1095        assert!(xcorr.values.is_empty());
1096    }
1097
1098    // ── decompose_trend ─────────────────────────────────────────────────────
1099
1100    #[test]
1101    fn test_decompose_trend_additive() {
1102        let n = 100;
1103        let period = 10;
1104        let trend: Vec<f64> = (0..n).map(|i| i as f64 * 0.1).collect();
1105        let seasonal: Vec<f64> = (0..n).map(|i| ((i % period) as f64).sin()).collect();
1106        let data: Vec<f64> = trend
1107            .iter()
1108            .zip(seasonal.iter())
1109            .map(|(t, s)| t + s)
1110            .collect();
1111        let (tr, _se, _re) = decompose_trend(&data, period);
1112        assert_eq!(tr.len(), n);
1113    }
1114
1115    #[test]
1116    fn test_decompose_trend_lengths() {
1117        let data: Vec<f64> = (0..48).map(|i| i as f64).collect();
1118        let (tr, se, re) = decompose_trend(&data, 12);
1119        assert_eq!(tr.len(), 48);
1120        assert_eq!(se.len(), 48);
1121        assert_eq!(re.len(), 48);
1122    }
1123
1124    #[test]
1125    fn test_decompose_trend_too_short() {
1126        let data = vec![1.0, 2.0, 3.0];
1127        let (tr, _se, re) = decompose_trend(&data, 10);
1128        assert_eq!(tr, vec![0.0, 0.0, 0.0]);
1129        assert_eq!(re, data);
1130    }
1131
1132    // ── adf_test ────────────────────────────────────────────────────────────
1133
1134    #[test]
1135    fn test_adf_stationary_white_noise() {
1136        // White noise is stationary; may or may not pass simplified test, but should not panic.
1137        let data: Vec<f64> = (0..200).map(|i| ((i * 7 + 3) % 17) as f64 - 8.0).collect();
1138        let (stat, _) = adf_test(&data);
1139        assert!(stat.is_finite());
1140    }
1141
1142    #[test]
1143    fn test_adf_short() {
1144        let (stat, is_stat) = adf_test(&[1.0, 2.0]);
1145        assert_eq!(stat, 0.0);
1146        assert!(!is_stat);
1147    }
1148
1149    // ── find_peaks ──────────────────────────────────────────────────────────
1150
1151    #[test]
1152    fn test_find_peaks_simple() {
1153        let data = vec![0.0, 1.0, 0.0, 1.5, 0.0, 2.0, 0.0];
1154        let peaks = find_peaks(&data, 0.5);
1155        assert!(peaks.contains(&1));
1156        assert!(peaks.contains(&3));
1157        assert!(peaks.contains(&5));
1158    }
1159
1160    #[test]
1161    fn test_find_peaks_monotone() {
1162        let data: Vec<f64> = (0..10).map(|i| i as f64).collect();
1163        let peaks = find_peaks(&data, 0.1);
1164        assert!(peaks.is_empty());
1165    }
1166
1167    #[test]
1168    fn test_find_peaks_no_false_positives() {
1169        let data = vec![3.0, 2.0, 1.0, 0.5, 1.0, 2.0, 1.0];
1170        // Only index 1 (value 2.0) is a local maximum here.
1171        // There should be no peak at the shoulders with high prominence threshold.
1172        let peaks = find_peaks(&data, 2.5);
1173        // With high threshold, expect no prominent peaks.
1174        assert!(peaks.is_empty());
1175    }
1176
1177    // ── zero_crossing_rate ──────────────────────────────────────────────────
1178
1179    #[test]
1180    fn test_zcr_sine() {
1181        let data = sine_wave(100, 10.0, 100.0);
1182        let zcr = zero_crossing_rate(&data);
1183        assert!(zcr > 0.0 && zcr <= 1.0);
1184    }
1185
1186    #[test]
1187    fn test_zcr_constant_positive() {
1188        let data = vec![1.0_f64; 20];
1189        assert_eq!(zero_crossing_rate(&data), 0.0);
1190    }
1191
1192    #[test]
1193    fn test_zcr_alternating() {
1194        let data = vec![1.0, -1.0, 1.0, -1.0, 1.0];
1195        let zcr = zero_crossing_rate(&data);
1196        assert!((zcr - 1.0).abs() < 1e-10);
1197    }
1198
1199    // ── compute_power_spectrum ──────────────────────────────────────────────
1200
1201    #[test]
1202    fn test_power_spectrum_length() {
1203        let data = sine_wave(64, 5.0, 64.0);
1204        let psd = compute_power_spectrum(&data);
1205        assert_eq!(psd.len(), 33); // 64/2 + 1
1206    }
1207
1208    #[test]
1209    fn test_power_spectrum_non_negative() {
1210        let data = sine_wave(32, 3.0, 32.0);
1211        for &p in &compute_power_spectrum(&data) {
1212            assert!(p >= 0.0);
1213        }
1214    }
1215
1216    #[test]
1217    fn test_power_spectrum_empty() {
1218        assert!(compute_power_spectrum(&[]).is_empty());
1219    }
1220
1221    // ── Periodogram (Lomb-Scargle) ──────────────────────────────────────────
1222
1223    #[test]
1224    fn test_periodogram_length() {
1225        let times = linspace(0.0, 10.0, 50);
1226        let values = sine_wave(50, 2.0, 5.0);
1227        let pg = Periodogram::compute(&times, &values, 20);
1228        assert_eq!(pg.frequencies.len(), 20);
1229        assert_eq!(pg.power.len(), 20);
1230    }
1231
1232    #[test]
1233    fn test_periodogram_non_negative_power() {
1234        let times = linspace(0.0, 5.0, 30);
1235        let values = sine_wave(30, 1.5, 6.0);
1236        let pg = Periodogram::compute(&times, &values, 10);
1237        for &p in &pg.power {
1238            assert!(p >= 0.0);
1239        }
1240    }
1241
1242    // ── hurst_exponent ──────────────────────────────────────────────────────
1243
1244    #[test]
1245    fn test_hurst_range() {
1246        // Alternating ±1 signal: each segment has constant R/S => H ≈ 0.0 (anti-persistent).
1247        // We just check the estimate is in the valid range [0, 2).
1248        let mut data = vec![0.0_f64; 256];
1249        for i in 1..256 {
1250            data[i] = data[i - 1] + if i % 2 == 0 { 1.0 } else { -1.0 };
1251        }
1252        if let Some(h) = hurst_exponent(&data) {
1253            assert!((0.0..2.0).contains(&h), "hurst={h}");
1254        }
1255    }
1256
1257    #[test]
1258    fn test_hurst_too_short() {
1259        assert!(hurst_exponent(&[1.0, 2.0, 3.0]).is_none());
1260    }
1261
1262    // ── ChangePointDetection ────────────────────────────────────────────────
1263
1264    #[test]
1265    fn test_change_point_step() {
1266        let mut data = vec![0.0_f64; 50];
1267        for v in &mut data[25..] {
1268            *v = 10.0;
1269        }
1270        let cpd = ChangePointDetection::new(1.0);
1271        let cps = cpd.detect(&data);
1272        assert!(!cps.is_empty());
1273        // Change point should be somewhere around index 25.
1274        let any_near = cps.iter().any(|&cp| (20..=35).contains(&cp));
1275        assert!(any_near, "change points: {cps:?}");
1276    }
1277
1278    #[test]
1279    fn test_change_point_constant() {
1280        let data = vec![5.0_f64; 100];
1281        let cpd = ChangePointDetection::new(1.0);
1282        assert!(cpd.detect(&data).is_empty());
1283    }
1284
1285    // ── apply_filter ────────────────────────────────────────────────────────
1286
1287    #[test]
1288    fn test_butterworth_preserves_length() {
1289        let data = sine_wave(100, 5.0, 100.0);
1290        let f = TimeSeriesFilter::ButterworthLowpass { fc: 0.1 };
1291        assert_eq!(apply_filter(&data, &f).len(), 100);
1292    }
1293
1294    #[test]
1295    fn test_median_filter_preserves_length() {
1296        let data: Vec<f64> = (0..40).map(|i| i as f64).collect();
1297        let f = TimeSeriesFilter::Median { window: 5 };
1298        assert_eq!(apply_filter(&data, &f).len(), 40);
1299    }
1300
1301    #[test]
1302    fn test_savitzky_golay_preserves_length() {
1303        let data = sine_wave(50, 3.0, 50.0);
1304        let f = TimeSeriesFilter::SavitzkyGolay { half_window: 5 };
1305        assert_eq!(apply_filter(&data, &f).len(), 50);
1306    }
1307
1308    #[test]
1309    fn test_butterworth_smooths_noise() {
1310        let clean = sine_wave(200, 1.0, 200.0);
1311        // Add high-frequency "noise" by mixing a higher-freq component.
1312        let noisy: Vec<f64> = clean
1313            .iter()
1314            .enumerate()
1315            .map(|(i, &v)| v + 0.5 * (2.0 * PI * 50.0 * i as f64 / 200.0).sin())
1316            .collect();
1317        let filtered = apply_filter(&noisy, &TimeSeriesFilter::ButterworthLowpass { fc: 0.05 });
1318        // The filtered signal should have lower variance than the noisy one.
1319        let var_noisy: f64 = noisy.iter().map(|&x| x * x).sum::<f64>() / noisy.len() as f64;
1320        let var_filtered: f64 =
1321            filtered.iter().map(|&x| x * x).sum::<f64>() / filtered.len() as f64;
1322        assert!(var_filtered <= var_noisy + 1e-6);
1323    }
1324
1325    // ── ArModel ─────────────────────────────────────────────────────────────
1326
1327    #[test]
1328    fn test_ar_fit_returns_coefficients() {
1329        let data: Vec<f64> = (0..100).map(|i| (i as f64 * 0.1).sin()).collect();
1330        let model = ArModel::fit(&data, 3).unwrap();
1331        assert_eq!(model.coefficients.len(), 3);
1332    }
1333
1334    #[test]
1335    fn test_ar_forecast_length() {
1336        let data: Vec<f64> = (0..100).map(|i| i as f64 * 0.01).collect();
1337        let model = ArModel::fit(&data, 2).unwrap();
1338        let fc = model.forecast(&data, 5);
1339        assert_eq!(fc.len(), 5);
1340    }
1341
1342    #[test]
1343    fn test_ar_too_short() {
1344        assert!(ArModel::fit(&[1.0, 2.0], 3).is_none());
1345        assert!(ArModel::fit(&[], 1).is_none());
1346        assert!(ArModel::fit(&[1.0], 0).is_none());
1347    }
1348
1349    // ── interpolate_linear ──────────────────────────────────────────────────
1350
1351    #[test]
1352    fn test_interpolate_linear_basic() {
1353        let data = vec![1.0, f64::NAN, 3.0];
1354        let result = interpolate_linear(&data);
1355        assert!((result[1] - 2.0).abs() < 1e-10);
1356    }
1357
1358    #[test]
1359    fn test_interpolate_linear_leading_nan() {
1360        let data = vec![f64::NAN, f64::NAN, 5.0];
1361        let result = interpolate_linear(&data);
1362        assert_eq!(result[0], 5.0);
1363        assert_eq!(result[1], 5.0);
1364    }
1365
1366    #[test]
1367    fn test_interpolate_linear_no_nan() {
1368        let data = vec![1.0, 2.0, 3.0];
1369        let result = interpolate_linear(&data);
1370        assert_eq!(result, data);
1371    }
1372
1373    // ── interpolate_spline ──────────────────────────────────────────────────
1374
1375    #[test]
1376    fn test_interpolate_spline_basic() {
1377        let data = vec![0.0, f64::NAN, 2.0, f64::NAN, 4.0];
1378        let result = interpolate_spline(&data);
1379        // The filled values should be finite.
1380        for &v in &result {
1381            assert!(v.is_finite(), "spline result contains non-finite value");
1382        }
1383    }
1384
1385    #[test]
1386    fn test_interpolate_spline_no_nan() {
1387        let data = vec![1.0, 2.0, 3.0, 4.0];
1388        let result = interpolate_spline(&data);
1389        assert_eq!(result, data);
1390    }
1391
1392    #[test]
1393    fn test_interpolate_spline_all_nan() {
1394        let data = vec![f64::NAN; 5];
1395        let result = interpolate_spline(&data);
1396        // All NaN, no valid data — output is the same as input.
1397        assert!(result.iter().all(|v| v.is_nan()));
1398    }
1399}