1use crate::error::Result;
2use diffx_core::{diff, DiffResult};
3
4#[derive(Debug, Clone)]
6pub struct TimeSeriesPoint {
7 pub timestamp: f64,
8 pub value: f64,
9}
10
11#[derive(Debug, Clone)]
13pub struct TimeSeriesAnalysis {
14 pub trend: TrendAnalysis,
15 pub seasonality: SeasonalityAnalysis,
16 pub changepoints: Vec<ChangePoint>,
17 pub forecasts: Vec<ForecastPoint>,
18 pub anomalies: Vec<TimeSeriesAnomaly>,
19 pub statistics: TimeSeriesStatistics,
20}
21
22#[derive(Debug, Clone)]
24pub struct TrendAnalysis {
25 pub slope: f64,
26 pub intercept: f64,
27 pub r_squared: f64,
28 pub trend_strength: f64,
29 pub direction: TrendDirection,
30}
31
32#[derive(Debug, Clone)]
34pub enum TrendDirection {
35 Increasing,
36 Decreasing,
37 Stable,
38 Volatile,
39}
40
41#[derive(Debug, Clone)]
43pub struct SeasonalityAnalysis {
44 pub detected: bool,
45 pub period: Option<f64>,
46 pub strength: f64,
47 pub seasonal_components: Vec<f64>,
48}
49
50#[derive(Debug, Clone)]
52pub struct ChangePoint {
53 pub timestamp: f64,
54 pub index: usize,
55 pub significance: f64,
56 pub change_type: ChangeType,
57 pub before_value: f64,
58 pub after_value: f64,
59}
60
61#[derive(Debug, Clone)]
63pub enum ChangeType {
64 LevelShift,
65 TrendChange,
66 VarianceChange,
67}
68
69#[derive(Debug, Clone)]
71pub struct ForecastPoint {
72 pub timestamp: f64,
73 pub predicted_value: f64,
74 pub confidence_interval: (f64, f64),
75 pub uncertainty: f64,
76}
77
78#[derive(Debug, Clone)]
80pub struct TimeSeriesAnomaly {
81 pub timestamp: f64,
82 pub index: usize,
83 pub value: f64,
84 pub expected_value: f64,
85 pub anomaly_score: f64,
86 pub anomaly_type: AnomalyType,
87}
88
89#[derive(Debug, Clone)]
91pub enum AnomalyType {
92 PointAnomaly,
93 SequentialAnomaly,
94 SeasonalAnomaly,
95}
96
97#[derive(Debug, Clone)]
99pub struct TimeSeriesStatistics {
100 pub autocorrelation: Vec<f64>,
101 pub partial_autocorrelation: Vec<f64>,
102 pub stationarity_test: StationarityResult,
103 pub noise_level: f64,
104 pub data_quality: DataQuality,
105}
106
107#[derive(Debug, Clone)]
109pub struct StationarityResult {
110 pub is_stationary: bool,
111 pub test_statistic: f64,
112 pub p_value: f64,
113 pub differencing_required: usize,
114}
115
116#[derive(Debug, Clone)]
118pub struct DataQuality {
119 pub completeness: f64,
120 pub consistency: f64,
121 pub regularity: f64,
122 pub outlier_ratio: f64,
123}
124
125pub fn analyze_timeseries(data: &[TimeSeriesPoint]) -> Result<TimeSeriesAnalysis> {
127 if data.len() < 10 {
128 return Err(crate::error::BenfError::InsufficientData(data.len()));
129 }
130
131 let values: Vec<f64> = data.iter().map(|p| p.value).collect();
132 let timestamps: Vec<f64> = data.iter().map(|p| p.timestamp).collect();
133
134 Ok(TimeSeriesAnalysis {
135 trend: analyze_trend(×tamps, &values)?,
136 seasonality: detect_seasonality(&values)?,
137 changepoints: detect_changepoints(×tamps, &values)?,
138 forecasts: generate_forecasts(×tamps, &values, 5)?,
139 anomalies: detect_timeseries_anomalies(×tamps, &values)?,
140 statistics: calculate_timeseries_statistics(&values)?,
141 })
142}
143
144fn analyze_trend(timestamps: &[f64], values: &[f64]) -> Result<TrendAnalysis> {
146 let n = values.len() as f64;
147
148 let sum_x: f64 = timestamps.iter().sum();
150 let sum_y: f64 = values.iter().sum();
151 let sum_xy: f64 = timestamps
152 .iter()
153 .zip(values.iter())
154 .map(|(x, y)| x * y)
155 .sum();
156 let sum_x2: f64 = timestamps.iter().map(|x| x * x).sum();
157
158 let slope = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x * sum_x);
159 let intercept = (sum_y - slope * sum_x) / n;
160
161 let mean_y = sum_y / n;
163 let ss_tot: f64 = values.iter().map(|y| (y - mean_y).powi(2)).sum();
164 let ss_res: f64 = timestamps
165 .iter()
166 .zip(values.iter())
167 .map(|(x, y)| {
168 let predicted = slope * x + intercept;
169 (y - predicted).powi(2)
170 })
171 .sum();
172
173 let r_squared = 1.0 - (ss_res / ss_tot);
174
175 let trend_strength = r_squared * slope.abs();
177 let direction = if slope.abs() < 0.01 {
178 TrendDirection::Stable
179 } else if slope > 0.0 {
180 TrendDirection::Increasing
181 } else {
182 TrendDirection::Decreasing
183 };
184
185 Ok(TrendAnalysis {
186 slope,
187 intercept,
188 r_squared,
189 trend_strength,
190 direction,
191 })
192}
193
194fn detect_seasonality(values: &[f64]) -> Result<SeasonalityAnalysis> {
196 let n = values.len();
197 let mut best_period = None;
198 let mut best_strength = 0.0;
199
200 for period in 2..=(n / 4) {
202 let strength = calculate_seasonal_strength(values, period);
203 if strength > best_strength {
204 best_strength = strength;
205 best_period = Some(period as f64);
206 }
207 }
208
209 let detected = best_strength > 0.3; let seasonal_components = if detected {
211 calculate_seasonal_components(values, best_period.unwrap() as usize)
212 } else {
213 vec![0.0; n]
214 };
215
216 Ok(SeasonalityAnalysis {
217 detected,
218 period: best_period,
219 strength: best_strength,
220 seasonal_components,
221 })
222}
223
224fn detect_changepoints(timestamps: &[f64], values: &[f64]) -> Result<Vec<ChangePoint>> {
226 let mut changepoints = Vec::new();
227 let window_size = (values.len() / 10).clamp(5, 20);
228
229 for i in window_size..(values.len() - window_size) {
230 let before_window = &values[(i - window_size)..i];
232 let after_window = &values[i..(i + window_size)];
233
234 let before_mean: f64 = before_window.iter().sum::<f64>() / before_window.len() as f64;
235 let after_mean: f64 = after_window.iter().sum::<f64>() / after_window.len() as f64;
236
237 let before_var: f64 = before_window
238 .iter()
239 .map(|x| (x - before_mean).powi(2))
240 .sum::<f64>()
241 / before_window.len() as f64;
242 let after_var: f64 = after_window
243 .iter()
244 .map(|x| (x - after_mean).powi(2))
245 .sum::<f64>()
246 / after_window.len() as f64;
247
248 let before_stats = serde_json::json!({
250 "mean": before_mean,
251 "variance": before_var,
252 "std_dev": before_var.sqrt(),
253 "cv": if before_mean.abs() > 0.0 { before_var.sqrt() / before_mean.abs() } else { 0.0 }
254 });
255
256 let after_stats = serde_json::json!({
257 "mean": after_mean,
258 "variance": after_var,
259 "std_dev": after_var.sqrt(),
260 "cv": if after_mean.abs() > 0.0 { after_var.sqrt() / after_mean.abs() } else { 0.0 }
261 });
262
263 let diff_results = diff(&before_stats, &after_stats, None, Some(0.1), None);
265
266 let mean_change = (after_mean - before_mean).abs();
268 let pooled_std = ((before_var + after_var) / 2.0).sqrt();
269
270 if pooled_std > 0.0 {
271 let significance = mean_change / pooled_std;
272
273 let mut change_type = ChangeType::LevelShift;
275 let mut max_change_ratio = 0.0;
276
277 for diff_result in &diff_results {
278 if let DiffResult::Modified(path, old_val, new_val) = diff_result {
279 if path.contains("variance") || path.contains("std_dev") {
280 if let (Some(old), Some(new)) = (old_val.as_f64(), new_val.as_f64()) {
281 let ratio = (new / old.max(0.001)).max(old / new.max(0.001));
282 if ratio > max_change_ratio {
283 max_change_ratio = ratio;
284 if ratio > 2.0 {
285 change_type = ChangeType::VarianceChange;
286 }
287 }
288 }
289 }
290 }
291 }
292
293 if significance > 2.0 || max_change_ratio > 2.0 {
294 changepoints.push(ChangePoint {
295 timestamp: timestamps[i],
296 index: i,
297 significance: significance.max(max_change_ratio),
298 change_type,
299 before_value: before_mean,
300 after_value: after_mean,
301 });
302 }
303 }
304 }
305
306 Ok(changepoints)
307}
308
309fn generate_forecasts(
311 timestamps: &[f64],
312 values: &[f64],
313 steps: usize,
314) -> Result<Vec<ForecastPoint>> {
315 let trend = analyze_trend(timestamps, values)?;
316 let last_timestamp = timestamps.last().unwrap();
317 let time_step = if timestamps.len() > 1 {
318 (timestamps[timestamps.len() - 1] - timestamps[0]) / (timestamps.len() - 1) as f64
319 } else {
320 1.0
321 };
322
323 let mut forecasts = Vec::new();
324
325 let residuals: Vec<f64> = timestamps
327 .iter()
328 .zip(values.iter())
329 .map(|(x, y)| {
330 let predicted = trend.slope * x + trend.intercept;
331 y - predicted
332 })
333 .collect();
334
335 let residual_std = {
336 let mean_residual = residuals.iter().sum::<f64>() / residuals.len() as f64;
337 let variance = residuals
338 .iter()
339 .map(|r| (r - mean_residual).powi(2))
340 .sum::<f64>()
341 / residuals.len() as f64;
342 variance.sqrt()
343 };
344
345 for i in 1..=steps {
346 let future_timestamp = last_timestamp + (i as f64 * time_step);
347 let predicted_value = trend.slope * future_timestamp + trend.intercept;
348
349 let uncertainty = residual_std * (1.0 + i as f64 * 0.1); let confidence_interval = (
352 predicted_value - 1.96 * uncertainty,
353 predicted_value + 1.96 * uncertainty,
354 );
355
356 forecasts.push(ForecastPoint {
357 timestamp: future_timestamp,
358 predicted_value,
359 confidence_interval,
360 uncertainty,
361 });
362 }
363
364 Ok(forecasts)
365}
366
367fn detect_timeseries_anomalies(
369 timestamps: &[f64],
370 values: &[f64],
371) -> Result<Vec<TimeSeriesAnomaly>> {
372 let mut anomalies = Vec::new();
373 let window_size = (values.len() / 20).clamp(3, 10);
374
375 for i in window_size..(values.len() - window_size) {
376 let window = &values[(i - window_size)..(i + window_size + 1)];
378 let mean: f64 = window.iter().sum::<f64>() / window.len() as f64;
379 let std: f64 = {
380 let variance =
381 window.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / window.len() as f64;
382 variance.sqrt()
383 };
384
385 if std > 0.0 {
386 let z_score = (values[i] - mean) / std;
387
388 if z_score.abs() > 3.0 {
389 let expected_value = mean;
390 let anomaly_score = z_score.abs() / 3.0;
391
392 anomalies.push(TimeSeriesAnomaly {
393 timestamp: timestamps[i],
394 index: i,
395 value: values[i],
396 expected_value,
397 anomaly_score,
398 anomaly_type: AnomalyType::PointAnomaly,
399 });
400 }
401 }
402 }
403
404 Ok(anomalies)
405}
406
407fn calculate_timeseries_statistics(values: &[f64]) -> Result<TimeSeriesStatistics> {
409 let n = values.len();
410
411 let max_lags = (n / 4).min(20);
413 let mut autocorrelation = Vec::new();
414
415 for lag in 0..max_lags {
416 let correlation = calculate_autocorrelation(values, lag);
417 autocorrelation.push(correlation);
418 }
419
420 let partial_autocorrelation = autocorrelation.clone(); let stationarity_test = test_stationarity(values);
425
426 let noise_level = calculate_noise_level(values);
428
429 let data_quality = assess_data_quality(values);
431
432 Ok(TimeSeriesStatistics {
433 autocorrelation,
434 partial_autocorrelation,
435 stationarity_test,
436 noise_level,
437 data_quality,
438 })
439}
440
441fn calculate_seasonal_strength(values: &[f64], period: usize) -> f64 {
443 if period >= values.len() {
444 return 0.0;
445 }
446
447 let mut seasonal_means = vec![0.0; period];
448 let mut counts = vec![0; period];
449
450 for (i, &value) in values.iter().enumerate() {
451 let season_idx = i % period;
452 seasonal_means[season_idx] += value;
453 counts[season_idx] += 1;
454 }
455
456 for (i, &count) in counts.iter().enumerate() {
458 if count > 0 {
459 seasonal_means[i] /= count as f64;
460 }
461 }
462
463 let overall_mean: f64 = seasonal_means.iter().sum::<f64>() / seasonal_means.len() as f64;
465 let seasonal_variance: f64 = seasonal_means
466 .iter()
467 .map(|x| (x - overall_mean).powi(2))
468 .sum::<f64>()
469 / seasonal_means.len() as f64;
470
471 let total_variance: f64 = values
472 .iter()
473 .map(|x| (x - overall_mean).powi(2))
474 .sum::<f64>()
475 / values.len() as f64;
476
477 if total_variance > 0.0 {
478 seasonal_variance / total_variance
479 } else {
480 0.0
481 }
482}
483
484fn calculate_seasonal_components(values: &[f64], period: usize) -> Vec<f64> {
485 let mut components = vec![0.0; values.len()];
486 let mut seasonal_means = vec![0.0; period];
487 let mut counts = vec![0; period];
488
489 for (i, &value) in values.iter().enumerate() {
491 let season_idx = i % period;
492 seasonal_means[season_idx] += value;
493 counts[season_idx] += 1;
494 }
495
496 for (i, &count) in counts.iter().enumerate() {
497 if count > 0 {
498 seasonal_means[i] /= count as f64;
499 }
500 }
501
502 for (i, component) in components.iter_mut().enumerate() {
504 let season_idx = i % period;
505 *component = seasonal_means[season_idx];
506 }
507
508 components
509}
510
511fn calculate_autocorrelation(values: &[f64], lag: usize) -> f64 {
512 if lag >= values.len() {
513 return 0.0;
514 }
515
516 let n = values.len() - lag;
517 let mean: f64 = values.iter().sum::<f64>() / values.len() as f64;
518
519 let numerator: f64 = (0..n)
520 .map(|i| (values[i] - mean) * (values[i + lag] - mean))
521 .sum();
522
523 let denominator: f64 = values.iter().map(|x| (x - mean).powi(2)).sum();
524
525 if denominator > 0.0 {
526 numerator / denominator
527 } else {
528 0.0
529 }
530}
531
532fn test_stationarity(values: &[f64]) -> StationarityResult {
533 let n = values.len();
535 if n < 3 {
536 return StationarityResult {
537 is_stationary: false,
538 test_statistic: 0.0,
539 p_value: 1.0,
540 differencing_required: 1,
541 };
542 }
543
544 let diff: Vec<f64> = (1..n).map(|i| values[i] - values[i - 1]).collect();
546
547 let original_var: f64 = {
549 let mean = values.iter().sum::<f64>() / values.len() as f64;
550 values.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / values.len() as f64
551 };
552
553 let diff_var: f64 = {
554 let mean = diff.iter().sum::<f64>() / diff.len() as f64;
555 diff.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / diff.len() as f64
556 };
557
558 let test_statistic = if original_var > 0.0 {
559 diff_var / original_var
560 } else {
561 1.0
562 };
563 let is_stationary = test_statistic < 0.8;
564 let p_value = if is_stationary { 0.01 } else { 0.99 };
565
566 StationarityResult {
567 is_stationary,
568 test_statistic,
569 p_value,
570 differencing_required: if is_stationary { 0 } else { 1 },
571 }
572}
573
574fn calculate_noise_level(values: &[f64]) -> f64 {
575 if values.len() < 3 {
576 return 0.0;
577 }
578
579 let second_diff: Vec<f64> = (2..values.len())
581 .map(|i| values[i] - 2.0 * values[i - 1] + values[i - 2])
582 .collect();
583
584 let rms: f64 = second_diff.iter().map(|x| x.powi(2)).sum::<f64>() / second_diff.len() as f64;
585 rms.sqrt()
586}
587
588fn assess_data_quality(values: &[f64]) -> DataQuality {
589 let n = values.len();
590
591 let completeness = 1.0;
593
594 let changes: Vec<f64> = (1..n)
596 .map(|i| ((values[i] - values[i - 1]) / values[i - 1].abs().max(1e-10)).abs())
597 .collect();
598
599 let consistency = 1.0 - (changes.iter().sum::<f64>() / changes.len() as f64).min(1.0);
600
601 let regularity = 1.0;
603
604 let mean = values.iter().sum::<f64>() / n as f64;
606 let std = {
607 let variance = values.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / n as f64;
608 variance.sqrt()
609 };
610
611 let outlier_count = values
612 .iter()
613 .filter(|&&x| (x - mean).abs() > 3.0 * std)
614 .count();
615
616 let outlier_ratio = outlier_count as f64 / n as f64;
617
618 DataQuality {
619 completeness,
620 consistency,
621 regularity,
622 outlier_ratio,
623 }
624}
625
626pub fn create_timeseries_from_values(values: &[f64]) -> Vec<TimeSeriesPoint> {
628 values
629 .iter()
630 .enumerate()
631 .map(|(i, &value)| TimeSeriesPoint {
632 timestamp: i as f64,
633 value,
634 })
635 .collect()
636}
637
638#[cfg(test)]
639mod tests {
640 use super::*;
641
642 #[test]
643 fn test_create_timeseries_from_values() {
644 let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
645 let timeseries = create_timeseries_from_values(&values);
646
647 assert_eq!(timeseries.len(), 5);
648 assert_eq!(timeseries[0].value, 1.0);
649 assert_eq!(timeseries[0].timestamp, 0.0);
650 assert_eq!(timeseries[4].value, 5.0);
651 assert_eq!(timeseries[4].timestamp, 4.0);
652 }
653
654 #[test]
655 fn test_analyze_timeseries_basic() {
656 let data = vec![
658 TimeSeriesPoint {
659 timestamp: 0.0,
660 value: 1.0,
661 },
662 TimeSeriesPoint {
663 timestamp: 1.0,
664 value: 2.0,
665 },
666 TimeSeriesPoint {
667 timestamp: 2.0,
668 value: 3.0,
669 },
670 TimeSeriesPoint {
671 timestamp: 3.0,
672 value: 4.0,
673 },
674 TimeSeriesPoint {
675 timestamp: 4.0,
676 value: 5.0,
677 },
678 TimeSeriesPoint {
679 timestamp: 5.0,
680 value: 6.0,
681 },
682 TimeSeriesPoint {
683 timestamp: 6.0,
684 value: 7.0,
685 },
686 TimeSeriesPoint {
687 timestamp: 7.0,
688 value: 8.0,
689 },
690 TimeSeriesPoint {
691 timestamp: 8.0,
692 value: 9.0,
693 },
694 TimeSeriesPoint {
695 timestamp: 9.0,
696 value: 10.0,
697 },
698 ];
699
700 let result = analyze_timeseries(&data).unwrap();
701
702 assert!(result.trend.slope > 0.0); assert!(result.trend.r_squared > 0.9); matches!(result.trend.direction, TrendDirection::Increasing);
706
707 assert_eq!(result.forecasts.len(), 5);
709 assert!(result.forecasts[0].predicted_value > 10.0); assert!(!result.statistics.autocorrelation.is_empty());
713 }
714
715 #[test]
716 fn test_analyze_trend() {
717 let timestamps = vec![0.0, 1.0, 2.0, 3.0, 4.0];
718 let values = vec![1.0, 3.0, 5.0, 7.0, 9.0]; let trend = analyze_trend(×tamps, &values).unwrap();
721
722 assert!(trend.slope > 1.5); assert!(trend.slope < 2.5);
724 assert!(trend.r_squared > 0.99); matches!(trend.direction, TrendDirection::Increasing);
726 }
727
728 #[test]
729 fn test_detect_seasonality() {
730 let values = vec![
732 0.0, 1.0, 0.0, -1.0, 0.0, 1.0, 0.0, -1.0, 0.0, 1.0, 0.0, -1.0,
733 ];
734
735 let seasonality = detect_seasonality(&values).unwrap();
736
737 if seasonality.detected {
739 assert!(seasonality.period.unwrap() >= 2.0);
740 assert!(seasonality.strength > 0.0);
741 }
742 }
743
744 #[test]
745 fn test_detect_changepoints() {
746 let timestamps = vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
748 let values = vec![1.0, 1.1, 0.9, 1.05, 0.95, 10.0, 9.9, 10.1, 9.95, 10.05];
749
750 let changepoints = detect_changepoints(×tamps, &values).unwrap();
751
752 if !changepoints.is_empty() {
754 let major_changepoint = &changepoints[0];
755 assert!(major_changepoint.index >= 4);
756 assert!(major_changepoint.index <= 6);
757 assert!(major_changepoint.significance > 2.0);
758 }
759 }
760
761 #[test]
762 fn test_generate_forecasts() {
763 let timestamps = vec![0.0, 1.0, 2.0, 3.0, 4.0];
764 let values = vec![1.0, 2.0, 3.0, 4.0, 5.0]; let forecasts = generate_forecasts(×tamps, &values, 3).unwrap();
767
768 assert_eq!(forecasts.len(), 3);
769
770 assert!(forecasts[0].predicted_value > 5.0);
772 assert!(forecasts[1].predicted_value > forecasts[0].predicted_value);
773 assert!(forecasts[2].predicted_value > forecasts[1].predicted_value);
774
775 assert!(forecasts[0].confidence_interval.0 <= forecasts[0].confidence_interval.1);
777 assert!(forecasts[0].uncertainty >= 0.0); }
779
780 #[test]
781 fn test_detect_timeseries_anomalies() {
782 let timestamps = vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
784 let values = vec![1.0, 2.0, 3.0, 100.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]; let anomalies = detect_timeseries_anomalies(×tamps, &values).unwrap();
787
788 if !anomalies.is_empty() {
790 let anomaly = &anomalies[0];
791 assert!(anomaly.value == 100.0 || anomaly.anomaly_score > 3.0);
792 matches!(anomaly.anomaly_type, AnomalyType::PointAnomaly);
793 }
794 }
795
796 #[test]
797 fn test_calculate_timeseries_statistics() {
798 let values = vec![1.0, 2.0, 3.0, 4.0, 5.0, 4.0, 3.0, 2.0, 1.0, 2.0];
799
800 let stats = calculate_timeseries_statistics(&values).unwrap();
801
802 assert!(!stats.autocorrelation.is_empty());
803 assert!(!stats.partial_autocorrelation.is_empty());
804 assert!(stats.noise_level >= 0.0);
805 assert!(stats.data_quality.completeness > 0.0);
806 assert!(stats.data_quality.consistency >= 0.0);
807 assert!(stats.data_quality.outlier_ratio >= 0.0);
808 }
809
810 #[test]
811 fn test_insufficient_data_error() {
812 let data = vec![
813 TimeSeriesPoint {
814 timestamp: 0.0,
815 value: 1.0,
816 },
817 TimeSeriesPoint {
818 timestamp: 1.0,
819 value: 2.0,
820 },
821 ]; let result = analyze_timeseries(&data);
824 assert!(result.is_err());
825 }
826
827 #[test]
828 fn test_stable_trend_detection() {
829 let timestamps = vec![0.0, 1.0, 2.0, 3.0, 4.0];
830 let values = vec![5.0, 5.01, 4.99, 5.005, 4.995]; let trend = analyze_trend(×tamps, &values).unwrap();
833
834 matches!(trend.direction, TrendDirection::Stable);
835 assert!(trend.slope.abs() < 0.1); }
837}