1use scirs2_core::ndarray::{s, Array1, ArrayView1};
8use scirs2_core::numeric::{Float, FromPrimitive};
9use std::fmt::Debug;
10
11use super::config::TurningPointsConfig;
12use crate::error::{Result, TimeSeriesError};
13
14pub type SpectralPeakResult<F> = (Vec<F>, Vec<F>, Vec<F>, Vec<F>, usize, F, F, Vec<F>);
16
17pub type PhaseSpectrumResult<F> = (
19 Vec<F>,
20 Vec<F>,
21 F,
22 PhaseSpectrumFeatures<F>,
23 BispectrumFeatures<F>,
24);
25
26pub type FrequencyFeatureResult<F> = (F, F, F, F, F, F, F, F);
28
29pub type MultiscaleSpectralResult<F> = (Vec<F>, Vec<ScaleSpectralFeatures<F>>, Vec<F>, F);
31
32pub type CrossFrequencyCouplingResult<F> = (F, F, F, F, F, F, F, Vec<F>, Vec<F>, F);
34
35#[derive(Debug, Clone, Default)]
39pub struct PhaseSpectrumFeatures<F> {
40 pub phase_mean: F,
42 pub phase_std: F,
44 pub phase_entropy: F,
46}
47
48#[derive(Debug, Clone, Default)]
50pub struct BispectrumFeatures<F> {
51 pub bispectrum_peak: F,
53 pub bispectrum_entropy: F,
55}
56
57#[derive(Debug, Clone, Default)]
59pub struct ScaleSpectralFeatures<F> {
60 pub scale: F,
62 pub energy: F,
64 pub entropy: F,
66}
67
68#[allow(dead_code)]
74pub fn find_min_max<F>(ts: &Array1<F>) -> (F, F)
75where
76 F: Float + FromPrimitive,
77{
78 let mut min_val = F::infinity();
79 let mut max_val = F::neg_infinity();
80
81 for &x in ts.iter() {
82 if x < min_val {
83 min_val = x;
84 }
85 if x > max_val {
86 max_val = x;
87 }
88 }
89
90 (min_val, max_val)
91}
92
93#[allow(dead_code)]
95pub fn calculate_median<F>(ts: &Array1<F>) -> F
96where
97 F: Float + FromPrimitive + Clone,
98{
99 let mut sorted: Vec<F> = ts.iter().cloned().collect();
100 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
101 let n = sorted.len();
102 if n.is_multiple_of(2) {
103 (sorted[n / 2 - 1] + sorted[n / 2]) / F::from(2.0).unwrap()
104 } else {
105 sorted[n / 2]
106 }
107}
108
109#[allow(dead_code)]
111pub fn calculate_std_dev<F>(ts: &Array1<F>) -> F
112where
113 F: Float + FromPrimitive,
114{
115 let n = ts.len();
116 let mean = ts.sum() / F::from(n).unwrap();
117 let variance = ts.mapv(|x| (x - mean) * (x - mean)).sum() / F::from(n).unwrap();
118 variance.sqrt()
119}
120
121#[allow(dead_code)]
123pub fn calculate_percentile<F>(sorted: &[F], percentile: f64) -> F
124where
125 F: Float + FromPrimitive,
126{
127 let n = sorted.len();
128 if n == 0 {
129 return F::zero();
130 }
131
132 let index = (percentile / 100.0) * (n - 1) as f64;
133 let lower_index = index.floor() as usize;
134 let upper_index = index.ceil() as usize;
135
136 if lower_index == upper_index {
137 sorted[lower_index]
138 } else {
139 let fraction = F::from(index - lower_index as f64).unwrap();
140 sorted[lower_index] + fraction * (sorted[upper_index] - sorted[lower_index])
141 }
142}
143
144#[allow(dead_code)]
150pub fn linear_fit<F>(x: &[F], y: &[F]) -> (F, F)
151where
152 F: Float + FromPrimitive,
153{
154 let n = x.len() as f64;
155 if n < 2.0 {
156 return (F::zero(), F::zero());
157 }
158
159 let n_f = F::from(n).unwrap();
160 let sum_x = x.iter().fold(F::zero(), |acc, &xi| acc + xi);
161 let sum_y = y.iter().fold(F::zero(), |acc, &yi| acc + yi);
162 let sum_xy = x
163 .iter()
164 .zip(y.iter())
165 .fold(F::zero(), |acc, (&xi, &yi)| acc + xi * yi);
166 let sum_x2 = x.iter().fold(F::zero(), |acc, &xi| acc + xi * xi);
167
168 let denominator = n_f * sum_x2 - sum_x * sum_x;
169 if denominator == F::zero() {
170 return (F::zero(), sum_y / n_f);
171 }
172
173 let slope = (n_f * sum_xy - sum_x * sum_y) / denominator;
174 let intercept = (sum_y - slope * sum_x) / n_f;
175
176 (slope, intercept)
177}
178
179#[allow(dead_code)]
181pub fn calculate_pearson_correlation<F>(x: &Array1<F>, y: &Array1<F>) -> Result<F>
182where
183 F: Float + FromPrimitive + Debug + std::iter::Sum,
184{
185 if x.len() != y.len() {
186 return Err(TimeSeriesError::FeatureExtractionError(
187 "Arrays must have the same length for correlation calculation".to_string(),
188 ));
189 }
190
191 let n = x.len();
192 if n < 2 {
193 return Err(TimeSeriesError::FeatureExtractionError(
194 "At least 2 points required for correlation calculation".to_string(),
195 ));
196 }
197
198 let n_f = F::from_usize(n).unwrap();
199
200 let mean_x = x.iter().fold(F::zero(), |acc, &val| acc + val) / n_f;
202 let mean_y = y.iter().fold(F::zero(), |acc, &val| acc + val) / n_f;
203
204 let mut numerator = F::zero();
206 let mut sum_sq_x = F::zero();
207 let mut sum_sq_y = F::zero();
208
209 for i in 0..n {
210 let dx = x[i] - mean_x;
211 let dy = y[i] - mean_y;
212 numerator = numerator + dx * dy;
213 sum_sq_x = sum_sq_x + dx * dx;
214 sum_sq_y = sum_sq_y + dy * dy;
215 }
216
217 let denominator = (sum_sq_x * sum_sq_y).sqrt();
218 if denominator == F::zero() {
219 Ok(F::zero())
220 } else {
221 Ok(numerator / denominator)
222 }
223}
224
225#[allow(dead_code)]
231pub fn discretize_and_get_probabilities<F>(_ts: &Array1<F>, nbins: usize) -> Result<Vec<F>>
232where
233 F: Float + FromPrimitive + Debug + Clone,
234{
235 let (min_val, max_val) = find_min_max(_ts);
236 if min_val == max_val {
237 return Ok(vec![F::one(); nbins]);
238 }
239
240 let mut counts = vec![0; nbins];
241 for &value in _ts.iter() {
242 let bin = discretize_value(value, min_val, max_val, nbins);
243 counts[bin] += 1;
244 }
245
246 let n_f = F::from(_ts.len()).unwrap();
247 let probabilities = counts
248 .into_iter()
249 .map(|count| F::from(count).unwrap() / n_f)
250 .collect();
251
252 Ok(probabilities)
253}
254
255#[allow(dead_code)]
257pub fn discretize_value<F>(_value: F, min_val: F, max_val: F, nbins: usize) -> usize
258where
259 F: Float + FromPrimitive,
260{
261 let range = max_val - min_val;
262 if range == F::zero() {
263 return 0;
264 }
265
266 let normalized = (_value - min_val) / range;
267 let bin = (normalized * F::from(nbins).unwrap())
268 .to_usize()
269 .unwrap_or(0);
270 bin.min(nbins - 1)
271}
272
273#[allow(dead_code)]
275pub fn coarse_grain_series<F>(ts: &Array1<F>, scale: usize) -> Result<Array1<F>>
276where
277 F: Float + FromPrimitive + Debug + Clone,
278{
279 if scale == 1 {
280 return Ok(ts.clone());
281 }
282
283 let n = ts.len() / scale;
284 let mut coarse_grained = Vec::with_capacity(n);
285
286 for i in 0..n {
287 let start = i * scale;
288 let end = (start + scale).min(ts.len());
289 let sum = (start..end).fold(F::zero(), |acc, j| acc + ts[j]);
290 coarse_grained.push(sum / F::from(end - start).unwrap());
291 }
292
293 Ok(Array1::from_vec(coarse_grained))
294}
295
296#[allow(dead_code)]
298pub fn refined_coarse_grain_series<F>(
299 ts: &Array1<F>,
300 scale: usize,
301 offset: usize,
302) -> Result<Array1<F>>
303where
304 F: Float + FromPrimitive + Debug + Clone,
305{
306 if scale == 1 {
307 return Ok(ts.clone());
308 }
309
310 let mut coarse_grained = Vec::new();
311 let mut i = offset;
312
313 while i + scale <= ts.len() {
314 let sum = (i..i + scale).fold(F::zero(), |acc, j| acc + ts[j]);
315 coarse_grained.push(sum / F::from(scale).unwrap());
316 i += scale;
317 }
318
319 Ok(Array1::from_vec(coarse_grained))
320}
321
322#[allow(dead_code)]
328pub fn downsample_signal<F>(ts: &Array1<F>, factor: usize) -> Result<Array1<F>>
329where
330 F: Float + Clone,
331{
332 if factor <= 1 {
333 return Ok(ts.clone());
334 }
335
336 let downsampled: Vec<F> = ts.iter().step_by(factor).cloned().collect();
337
338 Ok(Array1::from_vec(downsampled))
339}
340
341#[allow(dead_code)]
343pub fn downsample_series<F>(ts: &Array1<F>, factor: usize) -> Result<Array1<F>>
344where
345 F: Float + FromPrimitive + Debug + Clone,
346{
347 if factor == 1 {
348 return Ok(ts.clone());
349 }
350
351 let downsampled: Vec<F> = ts.iter().step_by(factor).cloned().collect();
352 Ok(Array1::from_vec(downsampled))
353}
354
355#[allow(dead_code)]
361pub fn get_ordinal_pattern<F>(window: &ArrayView1<F>) -> Vec<usize>
362where
363 F: Float + FromPrimitive,
364{
365 let mut indices: Vec<usize> = (0..window.len()).collect();
366 indices.sort_by(|&i, &j| window[i].partial_cmp(&window[j]).unwrap());
367 indices
368}
369
370#[allow(dead_code)]
372pub fn find_local_extrema<F>(_signal: &Array1<F>, findmaxima: bool) -> Result<(Vec<usize>, Vec<F>)>
373where
374 F: Float + FromPrimitive + Debug + Clone,
375{
376 let n = _signal.len();
377 let mut indices = Vec::new();
378 let mut values = Vec::new();
379
380 if n < 3 {
382 return Ok((indices, values));
383 }
384
385 for i in 1..(n - 1) {
387 let is_extremum = if findmaxima {
388 _signal[i] > _signal[i - 1] && _signal[i] > _signal[i + 1]
389 } else {
390 _signal[i] < _signal[i - 1] && _signal[i] < _signal[i + 1]
391 };
392
393 if is_extremum {
394 indices.push(i);
395 values.push(_signal[i]);
396 }
397 }
398
399 Ok((indices, values))
400}
401
402#[allow(dead_code)]
404pub fn detect_turning_points<F>(
405 ts: &Array1<F>,
406 config: &TurningPointsConfig,
407) -> Result<(Vec<usize>, Vec<usize>, Vec<usize>)>
408where
409 F: Float + FromPrimitive + Debug + Clone,
410{
411 let n = ts.len();
412 let window_size = config.extrema_window_size;
413 let threshold = F::from(config.min_turning_point_threshold).unwrap();
414
415 let mut turning_points = Vec::new();
416 let mut local_maxima = Vec::new();
417 let mut local_minima = Vec::new();
418
419 let min_val = ts.iter().fold(F::infinity(), |a, &b| a.min(b));
421 let max_val = ts.iter().fold(F::neg_infinity(), |a, &b| a.max(b));
422 let range = max_val - min_val;
423 let abs_threshold = threshold * range;
424
425 for i in window_size..n - window_size {
427 let current = ts[i];
428 let window_start = i - window_size;
429 let window_end = i + window_size + 1;
430
431 let is_local_max = ts
433 .slice(s![window_start..window_end])
434 .iter()
435 .all(|&x| current >= x);
436
437 let is_local_min = ts
439 .slice(s![window_start..window_end])
440 .iter()
441 .all(|&x| current <= x);
442
443 if is_local_max && (i == 0 || (current - ts[i - 1]).abs() >= abs_threshold) {
444 local_maxima.push(i);
445 turning_points.push(i);
446 }
447
448 if is_local_min && (i == 0 || (current - ts[i - 1]).abs() >= abs_threshold) {
449 local_minima.push(i);
450 turning_points.push(i);
451 }
452 }
453
454 Ok((turning_points, local_maxima, local_minima))
455}
456
457#[allow(dead_code)]
463pub fn euclidean_distance_subsequence<F>(
464 ts: &Array1<F>,
465 start1: usize,
466 start2: usize,
467 length: usize,
468) -> F
469where
470 F: Float + FromPrimitive,
471{
472 let mut sum = F::zero();
473 for i in 0..length {
474 if start1 + i < ts.len() && start2 + i < ts.len() {
475 let diff = ts[start1 + i] - ts[start2 + i];
476 sum = sum + diff * diff;
477 }
478 }
479 sum.sqrt()
480}
481
482#[allow(dead_code)]
488pub fn linear_interpolate<F>(x: usize, indices: &[usize], values: &[F]) -> Result<F>
489where
490 F: Float + FromPrimitive + Debug + Clone,
491{
492 if indices.is_empty() {
493 return Ok(F::zero());
494 }
495
496 if indices.len() == 1 {
497 return Ok(values[0]);
498 }
499
500 let mut left_idx = 0;
502 let mut right_idx = indices.len() - 1;
503
504 for i in 0..(indices.len() - 1) {
505 if indices[i] <= x && x <= indices[i + 1] {
506 left_idx = i;
507 right_idx = i + 1;
508 break;
509 }
510 }
511
512 let x1 = F::from(indices[left_idx]).unwrap();
513 let x2 = F::from(indices[right_idx]).unwrap();
514 let y1 = values[left_idx];
515 let y2 = values[right_idx];
516
517 if x2 == x1 {
518 return Ok(y1);
519 }
520
521 let x_f = F::from(x).unwrap();
522 let interpolated = y1 + (y2 - y1) * (x_f - x1) / (x2 - x1);
523
524 Ok(interpolated)
525}
526
527#[allow(dead_code)]
529pub fn cubic_interpolate<F>(x: usize, indices: &[usize], values: &[F]) -> Result<F>
530where
531 F: Float + FromPrimitive + Debug + Clone,
532{
533 linear_interpolate(x, indices, values)
536}
537
538#[allow(dead_code)]
544pub fn gaussian_breakpoints(_alphabetsize: usize) -> Vec<f64> {
545 match _alphabetsize {
546 2 => vec![0.0],
547 3 => vec![-0.43, 0.43],
548 4 => vec![-0.67, 0.0, 0.67],
549 5 => vec![-0.84, -0.25, 0.25, 0.84],
550 6 => vec![-0.97, -0.43, 0.0, 0.43, 0.97],
551 7 => vec![-1.07, -0.57, -0.18, 0.18, 0.57, 1.07],
552 8 => vec![-1.15, -0.67, -0.32, 0.0, 0.32, 0.67, 1.15],
553 _ => {
554 let mut breakpoints = Vec::new();
556 for i in 1.._alphabetsize {
557 let p = i as f64 / _alphabetsize as f64;
558 let z = standard_normal_quantile(p);
560 breakpoints.push(z);
561 }
562 breakpoints
563 }
564 }
565}
566
567#[allow(dead_code)]
569pub fn standard_normal_quantile(p: f64) -> f64 {
570 if p <= 0.0 {
571 return f64::NEG_INFINITY;
572 }
573 if p >= 1.0 {
574 return f64::INFINITY;
575 }
576 if (p - 0.5).abs() < 1e-10 {
577 return 0.0;
578 }
579
580 let x = 2.0 * p - 1.0; if x.abs() > 0.7 {
587 let sign = if x > 0.0 { 1.0 } else { -1.0 };
589 let w = -((1.0 - x.abs()).ln());
590
591 if w < 5.0 {
592 let w = w - 2.5;
593 let z = 2.81022636e-08;
594 let z = z * w + 3.43273939e-07;
595 let z = z * w - 3.5233877e-06;
596 let z = z * w - 4.39150654e-06;
597 let z = z * w + 0.00021858087;
598 let z = z * w - 0.00125372503;
599 let z = z * w - 0.00417768164;
600 let z = z * w + 0.006531194649;
601 let z = z * w + 0.005504751339;
602 let z = z * w + 0.00713309612;
603 let z = z * w + 0.0021063958;
604 let z = z * w + (-0.008198294287);
605
606 sign * (w.sqrt() * z + w.sqrt())
607 } else {
608 let w = w.sqrt();
609 let z = 6.657_904_643_501_103;
610 let z = z * w + 5.463_784_911_164_114;
611 let z = z * w + 1.784_826_539_917_291_3;
612 let z = z * w + 0.296_560_571_828_504_87;
613 let z = z * w + 0.026_532_189_526_576_124;
614 let z = z * w + 0.001_242_660_947_388_078_4;
615 let z = z * w + 0.000_027_115_555_687_434_876;
616 let z = z * w + 0.000_002_010_334_399_292_288;
617
618 sign * z
619 }
620 } else {
621 let x2 = x * x;
623 let z = x * (1.0 - x2 / 3.0 + x2 * x2 * 2.0 / 15.0);
624 z * std::f64::consts::FRAC_2_SQRT_PI.sqrt()
625 }
626}
627
628#[allow(dead_code)]
630pub fn calculate_entropy(_class1_count: usize, class2count: usize) -> f64 {
631 let total = _class1_count + class2count;
632 if total == 0 {
633 return 0.0;
634 }
635
636 let p1 = _class1_count as f64 / total as f64;
637 let p2 = class2count as f64 / total as f64;
638
639 let mut entropy = 0.0;
640 if p1 > 0.0 {
641 entropy -= p1 * p1.ln();
642 }
643 if p2 > 0.0 {
644 entropy -= p2 * p2.ln();
645 }
646
647 entropy
648}
649
650#[allow(dead_code)]
656pub fn calculate_mad<F>(ts: &Array1<F>, median: F) -> Result<F>
657where
658 F: Float + FromPrimitive,
659{
660 let n = ts.len();
661 if n == 0 {
662 return Ok(F::zero());
663 }
664
665 let mut deviations: Vec<F> = ts.iter().map(|&x| (x - median).abs()).collect();
666 deviations.sort_by(|a, b| a.partial_cmp(b).unwrap());
667
668 Ok(if n.is_multiple_of(2) {
669 (deviations[n / 2 - 1] + deviations[n / 2]) / F::from(2.0).unwrap()
670 } else {
671 deviations[n / 2]
672 })
673}
674
675#[allow(dead_code)]
677pub fn calculate_trimmed_mean<F>(_ts: &Array1<F>, trimfraction: f64) -> Result<F>
678where
679 F: Float + FromPrimitive,
680{
681 let n = _ts.len();
682 if n == 0 {
683 return Ok(F::zero());
684 }
685
686 let mut sorted = _ts.to_vec();
687 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
688
689 let trim_count = (n as f64 * trimfraction).floor() as usize;
690 let start = trim_count;
691 let end = n - trim_count;
692
693 if start >= end {
694 return Ok(sorted[n / 2]); }
696
697 let sum = sorted[start..end].iter().fold(F::zero(), |acc, &x| acc + x);
698 let count = F::from(end - start).unwrap();
699
700 Ok(sum / count)
701}
702
703#[allow(dead_code)]
705pub fn calculate_winsorized_mean<F>(_ts: &Array1<F>, winsorfraction: f64) -> Result<F>
706where
707 F: Float + FromPrimitive,
708{
709 let n = _ts.len();
710 if n == 0 {
711 return Ok(F::zero());
712 }
713
714 let mut sorted = _ts.to_vec();
715 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
716
717 let winsor_count = (n as f64 * winsorfraction).floor() as usize;
718
719 let lower_bound = sorted[winsor_count];
721 let upper_bound = sorted[n - winsor_count - 1];
722
723 let winsorized: Vec<F> = _ts
724 .iter()
725 .map(|&x| {
726 if x < lower_bound {
727 lower_bound
728 } else if x > upper_bound {
729 upper_bound
730 } else {
731 x
732 }
733 })
734 .collect();
735
736 let sum = winsorized.iter().fold(F::zero(), |acc, &x| acc + x);
737 let count = F::from(n).unwrap();
738
739 Ok(sum / count)
740}
741
742#[allow(dead_code)]
748pub fn compute_power_spectrum<F>(acf: &Array1<F>) -> Array1<F>
749where
750 F: Float + FromPrimitive + Clone,
751{
752 acf.mapv(|x| x * x)
756}