1use crate::error::{Result, TimeSeriesError};
23use scirs2_core::ndarray::{s, Array1};
24use scirs2_core::numeric::{Float, FromPrimitive};
25use std::collections::HashMap;
26use std::fmt::Debug;
27
28use super::utils::{
30 calculate_std_dev, coarse_grain_series, discretize_and_get_probabilities, discretize_value,
31 find_min_max, get_ordinal_pattern, linear_fit, refined_coarse_grain_series,
32};
33
34#[allow(dead_code)]
48pub fn calculate_approximate_entropy<F>(ts: &Array1<F>, m: usize, r: F) -> Result<F>
49where
50 F: Float + FromPrimitive + Debug,
51{
52 if ts.len() < m + 1 {
53 return Err(TimeSeriesError::FeatureExtractionError(
54 "Time series too short for approximate entropy calculation".to_string(),
55 ));
56 }
57
58 let n = ts.len();
59
60 let mut phi_m = F::zero();
62 let mut phi_m_plus_1 = F::zero();
63
64 for i in 0..=n - m {
66 let mut count = F::zero();
67
68 for j in 0..=n - m {
69 let mut is_match = true;
71
72 for k in 0..m {
73 let x = *ts.get(i + k).unwrap();
74 let y = *ts.get(j + k).unwrap();
75 if (x - y).abs() > r {
76 is_match = false;
77 break;
78 }
79 }
80
81 if is_match {
82 count = count + F::one();
83 }
84 }
85
86 phi_m = phi_m + (count / F::from_usize(n - m + 1).unwrap()).ln();
87 }
88
89 phi_m = phi_m / F::from_usize(n - m + 1).unwrap();
90
91 for i in 0..=n - m - 1 {
93 let mut count = F::zero();
94
95 for j in 0..=n - m - 1 {
96 let mut is_match = true;
98
99 for k in 0..m + 1 {
100 let x = *ts.get(i + k).unwrap();
101 let y = *ts.get(j + k).unwrap();
102 if (x - y).abs() > r {
103 is_match = false;
104 break;
105 }
106 }
107
108 if is_match {
109 count = count + F::one();
110 }
111 }
112
113 phi_m_plus_1 = phi_m_plus_1 + (count / F::from_usize(n - m).unwrap()).ln();
114 }
115
116 phi_m_plus_1 = phi_m_plus_1 / F::from_usize(n - m).unwrap();
117
118 Ok(phi_m - phi_m_plus_1)
120}
121
122#[allow(dead_code)]
135pub fn calculate_sample_entropy<F>(ts: &Array1<F>, m: usize, r: F) -> Result<F>
136where
137 F: Float + FromPrimitive + Debug,
138{
139 if ts.len() < m + 2 {
140 return Err(TimeSeriesError::FeatureExtractionError(
141 "Time series too short for sample entropy calculation".to_string(),
142 ));
143 }
144
145 let n = ts.len();
146
147 let mut a = F::zero(); let mut b = F::zero(); for i in 0..n - m {
152 for j in i + 1..n - m {
153 let mut is_match_m = true;
155
156 for k in 0..m {
157 let x = *ts.get(i + k).unwrap();
158 let y = *ts.get(j + k).unwrap();
159 if (x - y).abs() > r {
160 is_match_m = false;
161 break;
162 }
163 }
164
165 if is_match_m {
166 b = b + F::one();
167
168 let x = *ts.get(i + m).unwrap();
170 let y = *ts.get(j + m).unwrap();
171 if (x - y).abs() <= r {
172 a = a + F::one();
173 }
174 }
175 }
176 }
177
178 if b == F::zero() {
180 return Ok(F::from_f64(n as f64).unwrap().ln());
184 }
185
186 if a == F::zero() {
187 return Ok(F::from_f64(100.0).unwrap());
189 }
190
191 Ok(-((a / b).ln()))
192}
193
194#[allow(dead_code)]
207pub fn calculate_permutation_entropy<F>(ts: &Array1<F>, order: usize) -> Result<F>
208where
209 F: Float + FromPrimitive + Debug,
210{
211 let n = ts.len();
212 if n < order {
213 return Err(TimeSeriesError::InsufficientData {
214 message: "Time series too short for permutation entropy".to_string(),
215 required: order,
216 actual: n,
217 });
218 }
219
220 let mut pattern_counts = HashMap::new();
221 let mut total_patterns = 0;
222
223 for i in 0..=(n - order) {
225 let mut indices: Vec<usize> = (0..order).collect();
226 let window: Vec<F> = (0..order).map(|j| ts[i + j]).collect();
227
228 indices.sort_by(|&a, &b| {
230 window[a]
231 .partial_cmp(&window[b])
232 .unwrap_or(std::cmp::Ordering::Equal)
233 });
234
235 let pattern = indices.iter().map(|&x| x as u8).collect::<Vec<u8>>();
237 *pattern_counts.entry(pattern).or_insert(0) += 1;
238 total_patterns += 1;
239 }
240
241 let mut entropy = F::zero();
243 for &count in pattern_counts.values() {
244 if count > 0 {
245 let p = F::from(count).unwrap() / F::from(total_patterns).unwrap();
246 entropy = entropy - p * p.ln();
247 }
248 }
249
250 Ok(entropy)
251}
252
253#[allow(dead_code)]
264pub fn calculate_lempel_ziv_complexity<F>(ts: &Array1<F>) -> Result<F>
265where
266 F: Float + FromPrimitive + Debug,
267{
268 let median = {
270 let mut sorted: Vec<F> = ts.iter().cloned().collect();
271 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
272 let n = sorted.len();
273 if n.is_multiple_of(2) {
274 (sorted[n / 2 - 1] + sorted[n / 2]) / F::from(2.0).unwrap()
275 } else {
276 sorted[n / 2]
277 }
278 };
279
280 let binary_seq: Vec<u8> = ts
281 .iter()
282 .map(|&x| if x >= median { 1 } else { 0 })
283 .collect();
284
285 let mut complexity = 1;
287 let mut i = 0;
288 let n = binary_seq.len();
289
290 while i < n {
291 let mut l = 1;
292 let mut found = false;
293
294 while i + l <= n && !found {
295 let pattern = &binary_seq[i..i + l];
296
297 for j in 0..i {
299 if j + l <= i {
300 let prev_pattern = &binary_seq[j..j + l];
301 if pattern == prev_pattern {
302 found = true;
303 break;
304 }
305 }
306 }
307
308 if !found {
309 l += 1;
310 }
311 }
312
313 if i + l > n {
314 l = n - i;
315 }
316
317 i += l;
318 complexity += 1;
319 }
320
321 Ok(F::from(complexity).unwrap() / F::from(n).unwrap())
323}
324
325#[allow(dead_code)]
337pub fn calculate_higuchi_fractal_dimension<F>(ts: &Array1<F>, kmax: usize) -> Result<F>
338where
339 F: Float + FromPrimitive + Debug,
340{
341 let n = ts.len();
342 let mut log_k_vec = Vec::new();
343 let mut log_l_vec = Vec::new();
344
345 for k in 1..=kmax.min(n / 4) {
346 let mut l_m = F::zero();
347
348 for m in 1..=k {
349 if m > n {
350 continue;
351 }
352
353 let mut l_mk = F::zero();
354 let max_i = (n - m) / k;
355
356 if max_i == 0 {
357 continue;
358 }
359
360 for i in 1..=max_i {
361 let idx1 = m + i * k - 1;
362 let idx2 = m + (i - 1) * k - 1;
363 if idx1 < n && idx2 < n {
364 l_mk = l_mk + (ts[idx1] - ts[idx2]).abs();
365 }
366 }
367
368 l_mk = l_mk * F::from(n - 1).unwrap() / (F::from(max_i * k).unwrap());
369 l_m = l_m + l_mk;
370 }
371
372 l_m = l_m / F::from(k).unwrap();
373
374 if l_m > F::zero() {
375 log_k_vec.push(F::from(k).unwrap().ln());
376 log_l_vec.push(l_m.ln());
377 }
378 }
379
380 if log_k_vec.len() < 2 {
381 return Ok(F::zero());
382 }
383
384 let n_points = log_k_vec.len();
386 let sum_x: F = log_k_vec.iter().fold(F::zero(), |acc, &x| acc + x);
387 let sum_y: F = log_l_vec.iter().fold(F::zero(), |acc, &x| acc + x);
388 let sum_xy: F = log_k_vec
389 .iter()
390 .zip(log_l_vec.iter())
391 .fold(F::zero(), |acc, (&x, &y)| acc + x * y);
392 let sum_xx: F = log_k_vec.iter().fold(F::zero(), |acc, &x| acc + x * x);
393
394 let n_f = F::from(n_points).unwrap();
395 let slope = (n_f * sum_xy - sum_x * sum_y) / (n_f * sum_xx - sum_x * sum_x);
396
397 Ok(-slope) }
399
400#[allow(dead_code)]
412pub fn calculate_hurst_exponent<F>(ts: &Array1<F>) -> Result<F>
413where
414 F: Float + FromPrimitive + Debug,
415{
416 let n = ts.len();
417 if n < 20 {
418 return Ok(F::from(0.5).unwrap());
419 }
420
421 estimate_hurst_exponent(ts)
422}
423
424#[allow(dead_code)]
435pub fn calculate_dfa_exponent<F>(ts: &Array1<F>) -> Result<F>
436where
437 F: Float + FromPrimitive + Debug,
438{
439 let n = ts.len();
440 if n < 20 {
441 return Ok(F::zero());
442 }
443
444 let mean = ts.sum() / F::from(n).unwrap();
445
446 let mut integrated = Array1::zeros(n);
448 let mut sum = F::zero();
449 for i in 0..n {
450 sum = sum + (ts[i] - mean);
451 integrated[i] = sum;
452 }
453
454 let mut log_f_vec = Vec::new();
455 let mut log_n_vec = Vec::new();
456
457 let max_window = n / 4;
459 for window_size in (4..=max_window).step_by(2) {
460 let num_windows = n / window_size;
461 if num_windows == 0 {
462 continue;
463 }
464
465 let mut fluctuation_sum = F::zero();
466
467 for i in 0..num_windows {
468 let start = i * window_size;
469 let end = start + window_size;
470
471 if end > n {
472 break;
473 }
474
475 let window = integrated.slice(scirs2_core::ndarray::s![start..end]);
477 let x_vals: Array1<F> = (0..window_size).map(|j| F::from(j).unwrap()).collect();
478
479 let x_mean = x_vals.sum() / F::from(window_size).unwrap();
481 let y_mean = window.sum() / F::from(window_size).unwrap();
482
483 let mut num = F::zero();
484 let mut den = F::zero();
485 for j in 0..window_size {
486 let x_dev = x_vals[j] - x_mean;
487 let y_dev = window[j] - y_mean;
488 num = num + x_dev * y_dev;
489 den = den + x_dev * x_dev;
490 }
491
492 let slope = if den > F::zero() {
493 num / den
494 } else {
495 F::zero()
496 };
497 let intercept = y_mean - slope * x_mean;
498
499 let mut fluctuation = F::zero();
501 for j in 0..window_size {
502 let trend_val = intercept + slope * x_vals[j];
503 let deviation = window[j] - trend_val;
504 fluctuation = fluctuation + deviation * deviation;
505 }
506
507 fluctuation_sum = fluctuation_sum + fluctuation;
508 }
509
510 let avg_fluctuation =
511 (fluctuation_sum / F::from(num_windows * window_size).unwrap()).sqrt();
512
513 if avg_fluctuation > F::zero() {
514 log_f_vec.push(avg_fluctuation.ln());
515 log_n_vec.push(F::from(window_size).unwrap().ln());
516 }
517 }
518
519 if log_f_vec.len() < 2 {
520 return Ok(F::zero());
521 }
522
523 let n_points = log_f_vec.len();
525 let sum_x: F = log_n_vec.iter().fold(F::zero(), |acc, &x| acc + x);
526 let sum_y: F = log_f_vec.iter().fold(F::zero(), |acc, &x| acc + x);
527 let sum_xy: F = log_n_vec
528 .iter()
529 .zip(log_f_vec.iter())
530 .fold(F::zero(), |acc, (&x, &y)| acc + x * y);
531 let sum_xx: F = log_n_vec.iter().fold(F::zero(), |acc, &x| acc + x * x);
532
533 let n_f = F::from(n_points).unwrap();
534 let dfa_exponent = (n_f * sum_xy - sum_x * sum_y) / (n_f * sum_xx - sum_x * sum_x);
535
536 Ok(dfa_exponent)
537}
538
539#[allow(dead_code)]
550pub fn calculate_spectral_entropy<F>(_powerspectrum: &Array1<F>) -> Result<F>
551where
552 F: Float + FromPrimitive + Debug,
553{
554 let total_power: F = _powerspectrum.sum();
555 if total_power == F::zero() {
556 return Ok(F::zero());
557 }
558
559 let mut entropy = F::zero();
560 for &power in _powerspectrum.iter() {
561 if power > F::zero() {
562 let p = power / total_power;
563 entropy = entropy - p * p.ln();
564 }
565 }
566
567 Ok(entropy)
568}
569
570#[allow(dead_code)]
576pub fn calculate_shannon_entropy<F>(ts: &Array1<F>, nbins: usize) -> Result<F>
577where
578 F: Float + FromPrimitive + Debug + Clone,
579{
580 let probabilities = discretize_and_get_probabilities(ts, nbins)?;
581
582 let mut entropy = F::zero();
583 for &p in probabilities.iter() {
584 if p > F::zero() {
585 entropy = entropy - p * p.ln();
586 }
587 }
588
589 Ok(entropy)
590}
591
592#[allow(dead_code)]
594pub fn calculate_renyi_entropy<F>(ts: &Array1<F>, nbins: usize, alpha: f64) -> Result<F>
595where
596 F: Float + FromPrimitive + Debug + Clone,
597{
598 if alpha == 1.0 {
599 return calculate_shannon_entropy(ts, nbins);
600 }
601
602 let probabilities = discretize_and_get_probabilities(ts, nbins)?;
603 let alpha_f = F::from(alpha).unwrap();
604
605 let mut sum = F::zero();
606 for &p in probabilities.iter() {
607 if p > F::zero() {
608 sum = sum + p.powf(alpha_f);
609 }
610 }
611
612 if sum == F::zero() {
613 return Ok(F::zero());
614 }
615
616 let entropy = (F::one() / (F::one() - alpha_f)) * sum.ln();
617 Ok(entropy)
618}
619
620#[allow(dead_code)]
622pub fn calculatetsallis_entropy<F>(ts: &Array1<F>, nbins: usize, q: f64) -> Result<F>
623where
624 F: Float + FromPrimitive + Debug + Clone,
625{
626 if q == 1.0 {
627 return calculate_shannon_entropy(ts, nbins);
628 }
629
630 let probabilities = discretize_and_get_probabilities(ts, nbins)?;
631 let q_f = F::from(q).unwrap();
632
633 let mut sum = F::zero();
634 for &p in probabilities.iter() {
635 if p > F::zero() {
636 sum = sum + p.powf(q_f);
637 }
638 }
639
640 let entropy = (F::one() - sum) / (q_f - F::one());
641 Ok(entropy)
642}
643
644#[allow(dead_code)]
646pub fn calculate_relative_entropy<F>(ts: &Array1<F>, nbins: usize) -> Result<F>
647where
648 F: Float + FromPrimitive + Debug + Clone,
649{
650 let probabilities = discretize_and_get_probabilities(ts, nbins)?;
651 let uniform_prob = F::one() / F::from(nbins).unwrap();
652
653 let mut kl_div = F::zero();
654 for &p in probabilities.iter() {
655 if p > F::zero() {
656 kl_div = kl_div + p * (p / uniform_prob).ln();
657 }
658 }
659
660 Ok(kl_div)
661}
662
663#[allow(dead_code)]
665pub fn calculate_differential_entropy<F>(ts: &Array1<F>) -> Result<F>
666where
667 F: Float + FromPrimitive + Debug + Clone,
668{
669 let n = ts.len();
670 if n < 2 {
671 return Ok(F::zero());
672 }
673
674 let std_dev = calculate_std_dev(ts);
676 if std_dev == F::zero() {
677 return Ok(F::neg_infinity());
678 }
679
680 let pi = F::from(std::f64::consts::PI).unwrap();
682 let e = F::from(std::f64::consts::E).unwrap();
683 let two = F::from(2.0).unwrap();
684
685 let entropy = F::from(0.5).unwrap() * (two * pi * e * std_dev * std_dev).ln();
686 Ok(entropy)
687}
688
689#[allow(dead_code)]
691pub fn calculate_weighted_permutation_entropy<F>(ts: &Array1<F>, order: usize) -> Result<F>
692where
693 F: Float + FromPrimitive + Debug + Clone,
694{
695 let n = ts.len();
696 if n < order + 1 {
697 return Ok(F::zero());
698 }
699
700 let mut pattern_weights = std::collections::HashMap::new();
701 let mut total_weight = F::zero();
702
703 for i in 0..=n - order {
704 let window = &ts.slice(s![i..i + order]);
705
706 let mean = window.iter().fold(F::zero(), |acc, &x| acc + x) / F::from(order).unwrap();
708 let variance = window.iter().fold(F::zero(), |acc, &x| {
709 let diff = x - mean;
710 acc + diff * diff
711 }) / F::from(order).unwrap();
712
713 let weight = variance.sqrt();
714
715 let pattern = get_ordinal_pattern(window);
717
718 let entry = pattern_weights.entry(pattern).or_insert(F::zero());
719 *entry = *entry + weight;
720 total_weight = total_weight + weight;
721 }
722
723 if total_weight == F::zero() {
724 return Ok(F::zero());
725 }
726
727 let mut entropy = F::zero();
729 for (_, &weight) in pattern_weights.iter() {
730 let p = weight / total_weight;
731 if p > F::zero() {
732 entropy = entropy - p * p.ln();
733 }
734 }
735
736 Ok(entropy)
737}
738
739#[allow(dead_code)]
741pub fn calculate_multiscale_entropy<F>(
742 ts: &Array1<F>,
743 n_scales: usize,
744 m: usize,
745 tolerance_fraction: f64,
746) -> Result<Vec<F>>
747where
748 F: Float + FromPrimitive + Debug + Clone,
749{
750 let mut entropies = Vec::new();
751 let std_dev = calculate_std_dev(ts);
752 let tolerance = F::from(tolerance_fraction).unwrap() * std_dev;
753
754 for scale in 1..=n_scales {
755 let coarse_grained = coarse_grain_series(ts, scale)?;
756 let entropy = if coarse_grained.len() >= 10 {
757 calculate_sample_entropy(&coarse_grained, m, tolerance)?
758 } else {
759 F::zero()
760 };
761 entropies.push(entropy);
762 }
763
764 Ok(entropies)
765}
766
767#[allow(dead_code)]
769pub fn calculate_refined_composite_multiscale_entropy<F>(
770 ts: &Array1<F>,
771 n_scales: usize,
772) -> Result<F>
773where
774 F: Float + FromPrimitive + Debug + Clone,
775{
776 let std_dev = calculate_std_dev(ts);
777 let tolerance = F::from(0.15).unwrap() * std_dev;
778
779 let mut all_entropies = Vec::new();
780
781 for scale in 1..=n_scales {
782 for j in 0..scale {
784 let coarse_grained = refined_coarse_grain_series(ts, scale, j)?;
785 if coarse_grained.len() >= 10 {
786 let entropy = calculate_sample_entropy(&coarse_grained, 2, tolerance)?;
787 all_entropies.push(entropy);
788 }
789 }
790 }
791
792 if all_entropies.is_empty() {
793 return Ok(F::zero());
794 }
795
796 let sum = all_entropies.iter().fold(F::zero(), |acc, &x| acc + x);
797 Ok(sum / F::from(all_entropies.len()).unwrap())
798}
799
800#[allow(dead_code)]
806pub fn calculate_effective_complexity<F>(ts: &Array1<F>, nbins: usize) -> Result<F>
807where
808 F: Float + FromPrimitive + Debug + Clone,
809{
810 let lz_complexity = calculate_lempel_ziv_complexity(ts)?;
811 let entropy = calculate_shannon_entropy(ts, nbins)?;
812
813 let max_entropy = F::from(nbins as f64).unwrap().ln();
815 let normalized_entropy = if max_entropy > F::zero() {
816 entropy / max_entropy
817 } else {
818 F::zero()
819 };
820
821 let complexity = lz_complexity * normalized_entropy * (F::one() - normalized_entropy);
823 Ok(complexity)
824}
825
826#[allow(dead_code)]
828pub fn calculate_fractal_entropy<F>(ts: &Array1<F>) -> Result<F>
829where
830 F: Float + FromPrimitive + Debug + Clone,
831{
832 let fractal_dim = estimate_fractal_dimension(ts)?;
834 let max_dim = F::from(2.0).unwrap(); let normalized_dim = fractal_dim / max_dim;
837 let entropy = -normalized_dim * normalized_dim.ln()
838 - (F::one() - normalized_dim) * (F::one() - normalized_dim).ln();
839
840 Ok(entropy.abs())
841}
842
843#[allow(dead_code)]
845pub fn calculate_dfa_entropy<F>(ts: &Array1<F>) -> Result<F>
846where
847 F: Float + FromPrimitive + Debug + Clone,
848{
849 let dfa_exponent = estimate_dfa_exponent(ts)?;
851
852 let entropy = -dfa_exponent * dfa_exponent.ln();
854 Ok(entropy.abs())
855}
856
857#[allow(dead_code)]
859pub fn calculate_multifractal_entropy_width<F>(ts: &Array1<F>) -> Result<F>
860where
861 F: Float + FromPrimitive + Debug + Clone,
862{
863 let mut entropies = Vec::new();
865
866 for scale in 2..=8 {
867 let coarse_grained = coarse_grain_series(ts, scale)?;
868 if coarse_grained.len() > 10 {
869 let entropy = calculate_shannon_entropy(&coarse_grained, 8)?;
870 entropies.push(entropy);
871 }
872 }
873
874 if entropies.len() < 2 {
875 return Ok(F::zero());
876 }
877
878 let max_entropy = entropies.iter().fold(F::neg_infinity(), |a, &b| a.max(b));
879 let min_entropy = entropies.iter().fold(F::infinity(), |a, &b| a.min(b));
880
881 Ok(max_entropy - min_entropy)
882}
883
884#[allow(dead_code)]
886pub fn calculate_hurst_entropy<F>(ts: &Array1<F>) -> Result<F>
887where
888 F: Float + FromPrimitive + Debug + Clone,
889{
890 let hurst_exponent = estimate_hurst_exponent(ts)?;
891
892 let deviation = (hurst_exponent - F::from(0.5).unwrap()).abs();
896 let entropy = F::one() - deviation * F::from(2.0).unwrap();
897
898 Ok(entropy.max(F::zero()))
899}
900
901#[allow(dead_code)]
907pub fn calculate_entropy_rate<F>(ts: &Array1<F>, maxlag: usize) -> Result<F>
908where
909 F: Float + FromPrimitive + Debug + Clone,
910{
911 let n = ts.len();
912 if n < maxlag + 2 {
913 return Ok(F::zero());
914 }
915
916 let n_bins = 10;
918 let joint_entropy = calculate_joint_entropy(ts, maxlag, n_bins)?;
919 let conditional_entropy = calculate_conditional_entropy(ts, maxlag, n_bins)?;
920
921 Ok(joint_entropy - conditional_entropy)
922}
923
924#[allow(dead_code)]
926pub fn calculate_conditional_entropy<F>(ts: &Array1<F>, lag: usize, nbins: usize) -> Result<F>
927where
928 F: Float + FromPrimitive + Debug + Clone,
929{
930 let n = ts.len();
931 if n < lag + 1 {
932 return Ok(F::zero());
933 }
934
935 let mut joint_counts = std::collections::HashMap::new();
937 let mut marginal_counts = std::collections::HashMap::new();
938
939 let (min_val, max_val) = find_min_max(ts);
940
941 for i in 0..n - lag {
942 let current_bin = discretize_value(ts[i], min_val, max_val, nbins);
943 let future_bin = discretize_value(ts[i + lag], min_val, max_val, nbins);
944
945 *joint_counts.entry((current_bin, future_bin)).or_insert(0) += 1;
946 *marginal_counts.entry(current_bin).or_insert(0) += 1;
947 }
948
949 let total = (n - lag) as f64;
950 let mut conditional_entropy = F::zero();
951
952 for ((x_bin, _y_bin), &joint_count) in joint_counts.iter() {
953 let marginal_count = marginal_counts[x_bin];
954
955 let p_xy = F::from(joint_count as f64 / total).unwrap();
956 let p_x = F::from(marginal_count as f64 / total).unwrap();
957 let p_y_given_x = p_xy / p_x;
958
959 if p_y_given_x > F::zero() {
960 conditional_entropy = conditional_entropy - p_xy * p_y_given_x.ln();
961 }
962 }
963
964 Ok(conditional_entropy)
965}
966
967#[allow(dead_code)]
969pub fn calculate_mutual_information_lag<F>(ts: &Array1<F>, lag: usize, nbins: usize) -> Result<F>
970where
971 F: Float + FromPrimitive + Debug + Clone,
972{
973 let n = ts.len();
974 if n < lag + 1 {
975 return Ok(F::zero());
976 }
977
978 let current_entropy = calculate_shannon_entropy(&ts.slice(s![0..n - lag]).to_owned(), nbins)?;
979 let future_entropy = calculate_shannon_entropy(&ts.slice(s![lag..]).to_owned(), nbins)?;
980 let joint_entropy = calculate_joint_entropy(ts, lag, nbins)?;
981
982 Ok(current_entropy + future_entropy - joint_entropy)
983}
984
985#[allow(dead_code)]
987pub fn calculate_transfer_entropy<F>(ts: &Array1<F>, lag: usize, nbins: usize) -> Result<F>
988where
989 F: Float + FromPrimitive + Debug + Clone,
990{
991 let conditional_entropy_single = calculate_conditional_entropy(ts, 1, nbins)?;
996 let conditional_entropy_multi = calculate_conditional_entropy(ts, lag, nbins)?;
997
998 Ok((conditional_entropy_single - conditional_entropy_multi).abs())
999}
1000
1001#[allow(dead_code)]
1003pub fn calculate_excess_entropy<F>(ts: &Array1<F>, maxlag: usize) -> Result<F>
1004where
1005 F: Float + FromPrimitive + Debug + Clone,
1006{
1007 let n_bins = 10;
1008 let mut block_entropies = Vec::new();
1009
1010 for block_size in 1..=maxlag {
1011 let entropy = calculate_block_entropy(ts, block_size, n_bins)?;
1012 block_entropies.push(entropy);
1013 }
1014
1015 if block_entropies.len() < 2 {
1016 return Ok(F::zero());
1017 }
1018
1019 let entropy_rate = (block_entropies[block_entropies.len() - 1] - block_entropies[0])
1022 / F::from(block_entropies.len() - 1).unwrap();
1023 let excess =
1024 block_entropies[block_entropies.len() - 1] - F::from(maxlag).unwrap() * entropy_rate;
1025
1026 Ok(excess.max(F::zero()))
1027}
1028
1029#[allow(dead_code)]
1035fn calculate_joint_entropy<F>(ts: &Array1<F>, lag: usize, nbins: usize) -> Result<F>
1036where
1037 F: Float + FromPrimitive + Debug + Clone,
1038{
1039 let n = ts.len();
1040 if n < lag + 1 {
1041 return Ok(F::zero());
1042 }
1043
1044 let (min_val, max_val) = find_min_max(ts);
1045 let mut joint_counts = std::collections::HashMap::new();
1046
1047 for i in 0..n - lag {
1048 let current_bin = discretize_value(ts[i], min_val, max_val, nbins);
1049 let future_bin = discretize_value(ts[i + lag], min_val, max_val, nbins);
1050 *joint_counts.entry((current_bin, future_bin)).or_insert(0) += 1;
1051 }
1052
1053 let total = (n - lag) as f64;
1054 let mut entropy = F::zero();
1055
1056 for &count in joint_counts.values() {
1057 if count > 0 {
1058 let p = F::from(count as f64 / total).unwrap();
1059 entropy = entropy - p * p.ln();
1060 }
1061 }
1062
1063 Ok(entropy)
1064}
1065
1066#[allow(dead_code)]
1068fn calculate_block_entropy<F>(ts: &Array1<F>, block_size: usize, nbins: usize) -> Result<F>
1069where
1070 F: Float + FromPrimitive + Debug + Clone,
1071{
1072 let n = ts.len();
1073 if n < block_size {
1074 return Ok(F::zero());
1075 }
1076
1077 let mut block_counts = std::collections::HashMap::new();
1078 let (min_val, max_val) = find_min_max(ts);
1079
1080 for i in 0..=n - block_size {
1081 let mut block_pattern = Vec::new();
1082 for j in 0..block_size {
1083 block_pattern.push(discretize_value(ts[i + j], min_val, max_val, nbins));
1084 }
1085 *block_counts.entry(block_pattern).or_insert(0) += 1;
1086 }
1087
1088 let total = (n - block_size + 1) as f64;
1089 let mut entropy = F::zero();
1090
1091 for &count in block_counts.values() {
1092 if count > 0 {
1093 let p = F::from(count as f64 / total).unwrap();
1094 entropy = entropy - p * p.ln();
1095 }
1096 }
1097
1098 Ok(entropy)
1099}
1100
1101#[allow(dead_code)]
1103fn estimate_fractal_dimension<F>(ts: &Array1<F>) -> Result<F>
1104where
1105 F: Float + FromPrimitive + Debug + Clone,
1106{
1107 let n = ts.len();
1109 if n < 4 {
1110 return Ok(F::one());
1111 }
1112
1113 let mut box_counts = Vec::new();
1114 let mut scales = Vec::new();
1115
1116 for scale in 2..=8 {
1117 let n_boxes = n / scale;
1118 if n_boxes > 0 {
1119 scales.push(scale as f64);
1120 box_counts.push(n_boxes as f64);
1121 }
1122 }
1123
1124 if scales.len() < 2 {
1125 return Ok(F::one());
1126 }
1127
1128 let log_scales: Vec<f64> = scales.iter().map(|x| x.ln()).collect();
1130 let log_counts: Vec<f64> = box_counts.iter().map(|x| x.ln()).collect();
1131
1132 let n_points = log_scales.len() as f64;
1133 let sum_x: f64 = log_scales.iter().sum();
1134 let sum_y: f64 = log_counts.iter().sum();
1135 let sum_xy: f64 = log_scales
1136 .iter()
1137 .zip(log_counts.iter())
1138 .map(|(x, y)| x * y)
1139 .sum();
1140 let sum_x2: f64 = log_scales.iter().map(|x| x * x).sum();
1141
1142 let slope = (n_points * sum_xy - sum_x * sum_y) / (n_points * sum_x2 - sum_x * sum_x);
1143
1144 Ok(F::from(-slope)
1145 .unwrap()
1146 .max(F::zero())
1147 .min(F::from(3.0).unwrap()))
1148}
1149
1150#[allow(dead_code)]
1152fn estimate_dfa_exponent<F>(ts: &Array1<F>) -> Result<F>
1153where
1154 F: Float + FromPrimitive + Debug + Clone,
1155{
1156 let n = ts.len();
1158 if n < 10 {
1159 return Ok(F::from(0.5).unwrap());
1160 }
1161
1162 let mean = ts.iter().fold(F::zero(), |acc, &x| acc + x) / F::from(n).unwrap();
1164 let mut cumsum = Vec::with_capacity(n);
1165 let mut sum = F::zero();
1166
1167 for &x in ts.iter() {
1168 sum = sum + (x - mean);
1169 cumsum.push(sum);
1170 }
1171
1172 let mut fluctuations = Vec::new();
1174 let mut window_sizes = Vec::new();
1175
1176 for window_size in (4..n / 4).step_by(4) {
1177 let n_windows = n / window_size;
1178 let mut mse_sum = F::zero();
1179
1180 for i in 0..n_windows {
1181 let start = i * window_size;
1182 let end = start + window_size;
1183
1184 let x_vals: Vec<F> = (0..window_size).map(|j| F::from(j).unwrap()).collect();
1186 let y_vals: Vec<F> = cumsum[start..end].to_vec();
1187
1188 let (slope, intercept) = linear_fit(&x_vals, &y_vals);
1189
1190 let mut mse = F::zero();
1191 for (j, &y_val) in y_vals.iter().enumerate().take(window_size) {
1192 let predicted = slope * F::from(j).unwrap() + intercept;
1193 let residual = y_val - predicted;
1194 mse = mse + residual * residual;
1195 }
1196 mse_sum = mse_sum + mse / F::from(window_size).unwrap();
1197 }
1198
1199 let fluctuation = (mse_sum / F::from(n_windows).unwrap()).sqrt();
1200 fluctuations.push(fluctuation);
1201 window_sizes.push(window_size);
1202 }
1203
1204 if fluctuations.len() < 2 {
1205 return Ok(F::from(0.5).unwrap());
1206 }
1207
1208 let log_sizes: Vec<f64> = window_sizes.iter().map(|&x| (x as f64).ln()).collect();
1210 let log_flucts: Vec<f64> = fluctuations
1211 .iter()
1212 .map(|x| x.to_f64().unwrap_or(1.0).ln())
1213 .collect();
1214
1215 let n_points = log_sizes.len() as f64;
1216 let sum_x: f64 = log_sizes.iter().sum();
1217 let sum_y: f64 = log_flucts.iter().sum();
1218 let sum_xy: f64 = log_sizes
1219 .iter()
1220 .zip(log_flucts.iter())
1221 .map(|(x, y)| x * y)
1222 .sum();
1223 let sum_x2: f64 = log_sizes.iter().map(|x| x * x).sum();
1224
1225 let slope = (n_points * sum_xy - sum_x * sum_y) / (n_points * sum_x2 - sum_x * sum_x);
1226
1227 Ok(F::from(slope).unwrap().max(F::zero()).min(F::one()))
1228}
1229
1230#[allow(dead_code)]
1232fn estimate_hurst_exponent<F>(ts: &Array1<F>) -> Result<F>
1233where
1234 F: Float + FromPrimitive + Debug + Clone,
1235{
1236 let n = ts.len();
1237 if n < 10 {
1238 return Ok(F::from(0.5).unwrap());
1239 }
1240
1241 let _mean = ts.iter().fold(F::zero(), |acc, &x| acc + x) / F::from(n).unwrap();
1242
1243 let mut rs_values = Vec::new();
1245 let mut window_sizes = Vec::new();
1246
1247 for window_size in (10..n / 2).step_by(10) {
1248 let n_windows = n / window_size;
1249 let mut rs_sum = F::zero();
1250
1251 for i in 0..n_windows {
1252 let start = i * window_size;
1253 let end = start + window_size;
1254 let window = &ts.slice(s![start..end]);
1255
1256 let window_mean =
1258 window.iter().fold(F::zero(), |acc, &x| acc + x) / F::from(window_size).unwrap();
1259 let mut cumulative_devs = Vec::with_capacity(window_size);
1260 let mut sum_dev = F::zero();
1261
1262 for &x in window.iter() {
1263 sum_dev = sum_dev + (x - window_mean);
1264 cumulative_devs.push(sum_dev);
1265 }
1266
1267 let max_dev = cumulative_devs
1269 .iter()
1270 .fold(F::neg_infinity(), |a, &b| a.max(b));
1271 let min_dev = cumulative_devs.iter().fold(F::infinity(), |a, &b| a.min(b));
1272 let range = max_dev - min_dev;
1273
1274 let variance = window.iter().fold(F::zero(), |acc, &x| {
1276 let diff = x - window_mean;
1277 acc + diff * diff
1278 }) / F::from(window_size - 1).unwrap();
1279 let std_dev = variance.sqrt();
1280
1281 if std_dev > F::zero() {
1282 rs_sum = rs_sum + range / std_dev;
1283 }
1284 }
1285
1286 if n_windows > 0 {
1287 rs_values.push(rs_sum / F::from(n_windows).unwrap());
1288 window_sizes.push(window_size);
1289 }
1290 }
1291
1292 if rs_values.len() < 2 {
1293 return Ok(F::from(0.5).unwrap());
1294 }
1295
1296 let log_sizes: Vec<f64> = window_sizes.iter().map(|&x| (x as f64).ln()).collect();
1298 let log_rs: Vec<f64> = rs_values
1299 .iter()
1300 .map(|x| x.to_f64().unwrap_or(1.0).ln())
1301 .collect();
1302
1303 let n_points = log_sizes.len() as f64;
1304 let sum_x: f64 = log_sizes.iter().sum();
1305 let sum_y: f64 = log_rs.iter().sum();
1306 let sum_xy: f64 = log_sizes
1307 .iter()
1308 .zip(log_rs.iter())
1309 .map(|(x, y)| x * y)
1310 .sum();
1311 let sum_x2: f64 = log_sizes.iter().map(|x| x * x).sum();
1312
1313 let hurst = (n_points * sum_xy - sum_x * sum_y) / (n_points * sum_x2 - sum_x * sum_x);
1314
1315 Ok(F::from(hurst).unwrap().max(F::zero()).min(F::one()))
1316}
1317
1318#[allow(dead_code)]
1324pub fn calculate_permutation_entropy_simple<F>(signal: &Array1<F>) -> Result<F>
1325where
1326 F: Float + FromPrimitive + Debug,
1327{
1328 calculate_permutation_entropy(signal, 3)
1329}
1330
1331#[allow(dead_code)]
1333pub fn calculate_sample_entropy_simple<F>(signal: &Array1<F>) -> Result<F>
1334where
1335 F: Float + FromPrimitive + Debug,
1336{
1337 let std_dev = calculate_std_dev(signal);
1338 let tolerance = F::from(0.2).unwrap() * std_dev;
1339 calculate_sample_entropy(signal, 2, tolerance)
1340}
1341
1342#[allow(dead_code)]
1344pub fn calculate_cross_entropy_simple<F>(signal1: &Array1<F>, signal2: &Array1<F>) -> Result<F>
1345where
1346 F: Float + FromPrimitive + Debug + Clone,
1347{
1348 if signal1.len() != signal2.len() {
1349 return Err(TimeSeriesError::FeatureExtractionError(
1350 "Signals must have the same length for cross entropy".to_string(),
1351 ));
1352 }
1353
1354 let n_bins = 10;
1356 let prob1 = discretize_and_get_probabilities(signal1, n_bins)?;
1357 let prob2 = discretize_and_get_probabilities(signal2, n_bins)?;
1358
1359 let mut cross_entropy = F::zero();
1360 for (p1, p2) in prob1.iter().zip(prob2.iter()) {
1361 if *p1 > F::zero() && *p2 > F::zero() {
1362 cross_entropy = cross_entropy - (*p1) * p2.ln();
1363 }
1364 }
1365
1366 Ok(cross_entropy)
1367}
1368
1369#[cfg(test)]
1370mod tests {
1371 use super::*;
1372 use scirs2_core::ndarray::Array1;
1373
1374 #[test]
1375 fn test_approximate_entropy() {
1376 let data = Array1::from_vec(vec![1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0]);
1377 let result = calculate_approximate_entropy(&data, 2, 0.1);
1378 assert!(result.is_ok());
1379 assert!(result.unwrap() >= 0.0);
1380 }
1381
1382 #[test]
1383 fn test_sample_entropy() {
1384 let data = Array1::from_vec(vec![1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0]);
1385 let result = calculate_sample_entropy(&data, 2, 0.1);
1386 assert!(result.is_ok());
1387 assert!(result.unwrap() >= 0.0);
1388 }
1389
1390 #[test]
1391 fn test_permutation_entropy() {
1392 let data = Array1::from_vec(vec![1.0, 3.0, 2.0, 4.0, 1.0, 3.0, 2.0, 4.0]);
1393 let result = calculate_permutation_entropy(&data, 3);
1394 assert!(result.is_ok());
1395 assert!(result.unwrap() >= 0.0);
1396 }
1397
1398 #[test]
1399 fn test_lempel_ziv_complexity() {
1400 let data = Array1::from_vec(vec![1.0, 2.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0]);
1401 let result = calculate_lempel_ziv_complexity(&data);
1402 assert!(result.is_ok());
1403 let value = result.unwrap();
1404 assert!(value >= 0.0);
1405 assert!(value <= 1.0);
1406 }
1407
1408 #[test]
1409 fn test_higuchi_fractal_dimension() {
1410 let data = Array1::from_vec(vec![1.0, 1.5, 2.0, 2.5, 3.0, 2.5, 2.0, 1.5, 1.0]);
1411 let result = calculate_higuchi_fractal_dimension(&data, 5);
1412 assert!(result.is_ok());
1413 assert!(result.unwrap() >= 0.0);
1414 }
1415
1416 #[test]
1417 fn test_hurst_exponent() {
1418 let data = Array1::from_vec(vec![
1419 1.0, 1.1, 1.2, 1.15, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8,
1420 1.85, 1.9, 1.95, 2.0,
1421 ]);
1422 let result = calculate_hurst_exponent(&data);
1423 assert!(result.is_ok());
1424 let hurst = result.unwrap();
1425 assert!(hurst >= 0.0);
1426 assert!(hurst <= 1.0);
1427 }
1428
1429 #[test]
1430 fn test_dfa_exponent() {
1431 let data = Array1::from_vec(vec![
1432 1.0, 1.1, 1.2, 1.15, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8,
1433 1.85, 1.9, 1.95, 2.0,
1434 ]);
1435 let result = calculate_dfa_exponent(&data);
1436 assert!(result.is_ok());
1437 assert!(result.unwrap() >= 0.0);
1438 }
1439
1440 #[test]
1441 fn test_shannon_entropy() {
1442 let data = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]);
1443 let result = calculate_shannon_entropy(&data, 8);
1444 assert!(result.is_ok());
1445 assert!(result.unwrap() >= 0.0);
1446 }
1447
1448 #[test]
1449 fn test_multiscale_entropy() {
1450 let data = Array1::from_vec(vec![
1451 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0,
1452 ]);
1453 let result = calculate_multiscale_entropy(&data, 3, 2, 0.1);
1454 assert!(result.is_ok());
1455 let entropies = result.unwrap();
1456 assert_eq!(entropies.len(), 3);
1457 for entropy in entropies {
1458 assert!(entropy >= 0.0);
1459 }
1460 }
1461}