ghostflow_ml/
time_series.rs

1//! Time Series - ARIMA basics, Exponential Smoothing
2
3use ghostflow_core::Tensor;
4
5/// Simple Exponential Smoothing
6pub struct SimpleExponentialSmoothing {
7    pub alpha: f32,
8    level_: Option<f32>,
9    fitted_: Option<Vec<f32>>,
10}
11
12impl SimpleExponentialSmoothing {
13    pub fn new(alpha: f32) -> Self {
14        SimpleExponentialSmoothing {
15            alpha: alpha.clamp(0.0, 1.0),
16            level_: None,
17            fitted_: None,
18        }
19    }
20
21    pub fn fit(&mut self, y: &Tensor) {
22        let y_data = y.data_f32();
23        let n = y_data.len();
24
25        let mut level = y_data[0];
26        let mut fitted = vec![level];
27
28        for i in 1..n {
29            level = self.alpha * y_data[i] + (1.0 - self.alpha) * level;
30            fitted.push(level);
31        }
32
33        self.level_ = Some(level);
34        self.fitted_ = Some(fitted);
35    }
36
37    pub fn predict(&self, steps: usize) -> Tensor {
38        let level = self.level_.expect("Model not fitted");
39        let predictions = vec![level; steps];
40        Tensor::from_slice(&predictions, &[steps]).unwrap()
41    }
42
43    pub fn fitted_values(&self) -> Option<&Vec<f32>> {
44        self.fitted_.as_ref()
45    }
46}
47
48/// Holt's Linear Trend (Double Exponential Smoothing)
49pub struct HoltLinear {
50    pub alpha: f32,
51    pub beta: f32,
52    level_: Option<f32>,
53    trend_: Option<f32>,
54    fitted_: Option<Vec<f32>>,
55}
56
57impl HoltLinear {
58    pub fn new(alpha: f32, beta: f32) -> Self {
59        HoltLinear {
60            alpha: alpha.clamp(0.0, 1.0),
61            beta: beta.clamp(0.0, 1.0),
62            level_: None,
63            trend_: None,
64            fitted_: None,
65        }
66    }
67
68    pub fn fit(&mut self, y: &Tensor) {
69        let y_data = y.data_f32();
70        let n = y_data.len();
71
72        let mut level = y_data[0];
73        let mut trend = if n > 1 { y_data[1] - y_data[0] } else { 0.0 };
74        let mut fitted = vec![level];
75
76        for i in 1..n {
77            let prev_level = level;
78            level = self.alpha * y_data[i] + (1.0 - self.alpha) * (level + trend);
79            trend = self.beta * (level - prev_level) + (1.0 - self.beta) * trend;
80            fitted.push(level + trend);
81        }
82
83        self.level_ = Some(level);
84        self.trend_ = Some(trend);
85        self.fitted_ = Some(fitted);
86    }
87
88    pub fn predict(&self, steps: usize) -> Tensor {
89        let level = self.level_.expect("Model not fitted");
90        let trend = self.trend_.unwrap();
91
92        let predictions: Vec<f32> = (1..=steps)
93            .map(|h| level + h as f32 * trend)
94            .collect();
95
96        Tensor::from_slice(&predictions, &[steps]).unwrap()
97    }
98}
99
100/// Holt-Winters (Triple Exponential Smoothing)
101pub struct HoltWinters {
102    pub alpha: f32,
103    pub beta: f32,
104    pub gamma: f32,
105    pub seasonal_periods: usize,
106    pub seasonal: SeasonalType,
107    level_: Option<f32>,
108    trend_: Option<f32>,
109    seasonal_: Option<Vec<f32>>,
110    fitted_: Option<Vec<f32>>,
111}
112
113#[derive(Clone, Copy)]
114pub enum SeasonalType {
115    Additive,
116    Multiplicative,
117}
118
119impl HoltWinters {
120    pub fn new(seasonal_periods: usize) -> Self {
121        HoltWinters {
122            alpha: 0.2,
123            beta: 0.1,
124            gamma: 0.1,
125            seasonal_periods,
126            seasonal: SeasonalType::Additive,
127            level_: None,
128            trend_: None,
129            seasonal_: None,
130            fitted_: None,
131        }
132    }
133
134    pub fn alpha(mut self, a: f32) -> Self { self.alpha = a.clamp(0.0, 1.0); self }
135    pub fn beta(mut self, b: f32) -> Self { self.beta = b.clamp(0.0, 1.0); self }
136    pub fn gamma(mut self, g: f32) -> Self { self.gamma = g.clamp(0.0, 1.0); self }
137
138    pub fn fit(&mut self, y: &Tensor) {
139        let y_data = y.data_f32();
140        let n = y_data.len();
141        let m = self.seasonal_periods;
142
143        if n < 2 * m {
144            panic!("Need at least 2 seasonal periods of data");
145        }
146
147        // Initialize level and trend
148        let mut level: f32 = y_data[..m].iter().sum::<f32>() / m as f32;
149        let mut trend = (y_data[m..2*m].iter().sum::<f32>() - y_data[..m].iter().sum::<f32>()) 
150            / (m * m) as f32;
151
152        // Initialize seasonal components
153        let mut seasonal: Vec<f32> = match self.seasonal {
154            SeasonalType::Additive => {
155                (0..m).map(|i| y_data[i] - level).collect()
156            }
157            SeasonalType::Multiplicative => {
158                (0..m).map(|i| y_data[i] / level.max(1e-10)).collect()
159            }
160        };
161
162        let mut fitted = Vec::with_capacity(n);
163
164        for i in 0..n {
165            let s_idx = i % m;
166            
167            let forecast = match self.seasonal {
168                SeasonalType::Additive => level + trend + seasonal[s_idx],
169                SeasonalType::Multiplicative => (level + trend) * seasonal[s_idx],
170            };
171            fitted.push(forecast);
172
173            if i >= m {
174                let prev_level = level;
175                
176                match self.seasonal {
177                    SeasonalType::Additive => {
178                        level = self.alpha * (y_data[i] - seasonal[s_idx]) 
179                            + (1.0 - self.alpha) * (level + trend);
180                        trend = self.beta * (level - prev_level) + (1.0 - self.beta) * trend;
181                        seasonal[s_idx] = self.gamma * (y_data[i] - level) 
182                            + (1.0 - self.gamma) * seasonal[s_idx];
183                    }
184                    SeasonalType::Multiplicative => {
185                        level = self.alpha * (y_data[i] / seasonal[s_idx].max(1e-10)) 
186                            + (1.0 - self.alpha) * (level + trend);
187                        trend = self.beta * (level - prev_level) + (1.0 - self.beta) * trend;
188                        seasonal[s_idx] = self.gamma * (y_data[i] / level.max(1e-10)) 
189                            + (1.0 - self.gamma) * seasonal[s_idx];
190                    }
191                }
192            }
193        }
194
195        self.level_ = Some(level);
196        self.trend_ = Some(trend);
197        self.seasonal_ = Some(seasonal);
198        self.fitted_ = Some(fitted);
199    }
200
201    pub fn predict(&self, steps: usize) -> Tensor {
202        let level = self.level_.expect("Model not fitted");
203        let trend = self.trend_.unwrap();
204        let seasonal = self.seasonal_.as_ref().unwrap();
205        let m = self.seasonal_periods;
206
207        let predictions: Vec<f32> = (1..=steps)
208            .map(|h| {
209                let s_idx = (h - 1) % m;
210                match self.seasonal {
211                    SeasonalType::Additive => level + h as f32 * trend + seasonal[s_idx],
212                    SeasonalType::Multiplicative => (level + h as f32 * trend) * seasonal[s_idx],
213                }
214            })
215            .collect();
216
217        Tensor::from_slice(&predictions, &[steps]).unwrap()
218    }
219}
220
221/// Simple ARIMA (AutoRegressive Integrated Moving Average)
222/// Simplified implementation for AR(p) model
223pub struct ARIMA {
224    pub p: usize, // AR order
225    pub d: usize, // Differencing order
226    pub q: usize, // MA order (simplified, not fully implemented)
227    pub max_iter: usize,
228    ar_coef_: Option<Vec<f32>>,
229    intercept_: f32,
230    diff_values_: Option<Vec<f32>>,
231}
232
233impl ARIMA {
234    pub fn new(p: usize, d: usize, q: usize) -> Self {
235        ARIMA {
236            p,
237            d,
238            q,
239            max_iter: 100,
240            ar_coef_: None,
241            intercept_: 0.0,
242            diff_values_: None,
243        }
244    }
245
246    fn difference(y: &[f32], d: usize) -> Vec<f32> {
247        let mut result = y.to_vec();
248        for _ in 0..d {
249            let diff: Vec<f32> = result.windows(2).map(|w| w[1] - w[0]).collect();
250            result = diff;
251        }
252        result
253    }
254
255    fn undifference(diff: &[f32], original: &[f32], d: usize) -> Vec<f32> {
256        if d == 0 {
257            return diff.to_vec();
258        }
259
260        let mut result = diff.to_vec();
261        for i in (0..d).rev() {
262            let last_val = original[original.len() - d + i];
263            let mut undiff = vec![last_val];
264            for &v in &result {
265                undiff.push(undiff.last().unwrap() + v);
266            }
267            result = undiff[1..].to_vec();
268        }
269        result
270    }
271
272    pub fn fit(&mut self, y: &Tensor) {
273        let y_data = y.data_f32();
274        
275        // Apply differencing
276        let y_diff = Self::difference(&y_data, self.d);
277        self.diff_values_ = Some(y_data.clone());
278
279        if y_diff.len() <= self.p {
280            panic!("Not enough data after differencing");
281        }
282
283        // Fit AR model using Yule-Walker equations
284        let n = y_diff.len();
285        let mean = y_diff.iter().sum::<f32>() / n as f32;
286        let y_centered: Vec<f32> = y_diff.iter().map(|&v| v - mean).collect();
287
288        // Compute autocorrelations
289        let mut r = vec![0.0f32; self.p + 1];
290        for k in 0..=self.p {
291            for i in k..n {
292                r[k] += y_centered[i] * y_centered[i - k];
293            }
294            r[k] /= n as f32;
295        }
296
297        // Solve Yule-Walker equations
298        if self.p > 0 {
299            let mut toeplitz = vec![0.0f32; self.p * self.p];
300            for i in 0..self.p {
301                for j in 0..self.p {
302                    toeplitz[i * self.p + j] = r[(i as i32 - j as i32).unsigned_abs() as usize];
303                }
304            }
305
306            let rhs: Vec<f32> = r[1..=self.p].to_vec();
307            self.ar_coef_ = Some(solve_linear(&toeplitz, &rhs, self.p));
308        } else {
309            self.ar_coef_ = Some(vec![]);
310        }
311
312        self.intercept_ = mean;
313    }
314
315    pub fn predict(&self, steps: usize) -> Tensor {
316        let ar_coef = self.ar_coef_.as_ref().expect("Model not fitted");
317        let diff_values = self.diff_values_.as_ref().unwrap();
318        
319        let y_diff = Self::difference(diff_values, self.d);
320        let n = y_diff.len();
321
322        let mut predictions = Vec::with_capacity(steps);
323        let mut history: Vec<f32> = y_diff[n.saturating_sub(self.p)..].to_vec();
324
325        for _ in 0..steps {
326            let mut pred = self.intercept_;
327            for (i, &coef) in ar_coef.iter().enumerate() {
328                if i < history.len() {
329                    pred += coef * history[history.len() - 1 - i];
330                }
331            }
332            predictions.push(pred);
333            history.push(pred);
334        }
335
336        // Undifference
337        let final_pred = Self::undifference(&predictions, diff_values, self.d);
338
339        Tensor::from_slice(&final_pred, &[steps]).unwrap()
340    }
341}
342
343/// Moving Average
344pub struct MovingAverage {
345    pub window: usize,
346}
347
348impl MovingAverage {
349    pub fn new(window: usize) -> Self {
350        MovingAverage { window }
351    }
352
353    pub fn transform(&self, y: &Tensor) -> Tensor {
354        let y_data = y.data_f32();
355        let n = y_data.len();
356
357        let mut result = Vec::with_capacity(n);
358        
359        for i in 0..n {
360            let start = i.saturating_sub(self.window - 1);
361            let sum: f32 = y_data[start..=i].iter().sum();
362            let count = i - start + 1;
363            result.push(sum / count as f32);
364        }
365
366        Tensor::from_slice(&result, &[n]).unwrap()
367    }
368}
369
370/// Exponentially Weighted Moving Average
371pub struct EWMA {
372    pub span: usize,
373}
374
375impl EWMA {
376    pub fn new(span: usize) -> Self {
377        EWMA { span }
378    }
379
380    pub fn transform(&self, y: &Tensor) -> Tensor {
381        let y_data = y.data_f32();
382        let n = y_data.len();
383        let alpha = 2.0 / (self.span as f32 + 1.0);
384
385        let mut result = vec![y_data[0]];
386        
387        for i in 1..n {
388            let ewma = alpha * y_data[i] + (1.0 - alpha) * result[i - 1];
389            result.push(ewma);
390        }
391
392        Tensor::from_slice(&result, &[n]).unwrap()
393    }
394}
395
396fn solve_linear(a: &[f32], b: &[f32], n: usize) -> Vec<f32> {
397    let mut aug = vec![0.0f32; n * (n + 1)];
398    for i in 0..n {
399        for j in 0..n {
400            aug[i * (n + 1) + j] = a[i * n + j];
401        }
402        aug[i * (n + 1) + n] = b[i];
403    }
404
405    // Gaussian elimination
406    for i in 0..n {
407        let mut max_row = i;
408        for k in (i + 1)..n {
409            if aug[k * (n + 1) + i].abs() > aug[max_row * (n + 1) + i].abs() {
410                max_row = k;
411            }
412        }
413
414        for j in 0..=n {
415            let tmp = aug[i * (n + 1) + j];
416            aug[i * (n + 1) + j] = aug[max_row * (n + 1) + j];
417            aug[max_row * (n + 1) + j] = tmp;
418        }
419
420        let pivot = aug[i * (n + 1) + i];
421        if pivot.abs() < 1e-10 { continue; }
422
423        for j in i..=n {
424            aug[i * (n + 1) + j] /= pivot;
425        }
426
427        for k in 0..n {
428            if k != i {
429                let factor = aug[k * (n + 1) + i];
430                for j in i..=n {
431                    aug[k * (n + 1) + j] -= factor * aug[i * (n + 1) + j];
432                }
433            }
434        }
435    }
436
437    (0..n).map(|i| aug[i * (n + 1) + n]).collect()
438}
439
440#[cfg(test)]
441mod tests {
442    use super::*;
443
444    #[test]
445    fn test_simple_exp_smoothing() {
446        let y = Tensor::from_slice(&[1.0f32, 2.0, 3.0, 4.0, 5.0], &[5]).unwrap();
447        let mut ses = SimpleExponentialSmoothing::new(0.3);
448        ses.fit(&y);
449        let pred = ses.predict(3);
450        assert_eq!(pred.dims(), &[3]);
451    }
452
453    #[test]
454    fn test_holt_linear() {
455        let y = Tensor::from_slice(&[1.0f32, 2.0, 3.0, 4.0, 5.0], &[5]).unwrap();
456        let mut holt = HoltLinear::new(0.3, 0.1);
457        holt.fit(&y);
458        let pred = holt.predict(3);
459        assert_eq!(pred.dims(), &[3]);
460    }
461
462    #[test]
463    fn test_moving_average() {
464        let y = Tensor::from_slice(&[1.0f32, 2.0, 3.0, 4.0, 5.0], &[5]).unwrap();
465        let ma = MovingAverage::new(3);
466        let result = ma.transform(&y);
467        assert_eq!(result.dims(), &[5]);
468    }
469}
470
471