1#![allow(dead_code)]
12#![allow(clippy::too_many_arguments)]
13
14#[derive(Debug, Clone)]
22pub struct ErgodicMeasure {
23 pub x_min: f64,
25 pub x_max: f64,
27 pub num_bins: usize,
29 pub counts: Vec<u64>,
31 pub total: u64,
33}
34
35impl ErgodicMeasure {
36 pub fn new(x_min: f64, x_max: f64, num_bins: usize) -> Self {
41 assert!(num_bins > 0, "num_bins must be > 0");
42 assert!(x_min < x_max, "x_min must be < x_max");
43 Self {
44 x_min,
45 x_max,
46 num_bins,
47 counts: vec![0u64; num_bins],
48 total: 0,
49 }
50 }
51
52 pub fn update(&mut self, x: f64) {
56 if x < self.x_min || x >= self.x_max {
57 return;
58 }
59 let frac = (x - self.x_min) / (self.x_max - self.x_min);
60 let bin = ((frac * self.num_bins as f64) as usize).min(self.num_bins - 1);
61 self.counts[bin] += 1;
62 self.total += 1;
63 }
64
65 pub fn density(&self) -> Vec<f64> {
69 if self.total == 0 {
70 return vec![0.0; self.num_bins];
71 }
72 let bin_width = (self.x_max - self.x_min) / self.num_bins as f64;
73 self.counts
74 .iter()
75 .map(|&c| c as f64 / (self.total as f64 * bin_width))
76 .collect()
77 }
78
79 pub fn space_average<F>(&self, f: F) -> f64
84 where
85 F: Fn(f64) -> f64,
86 {
87 if self.total == 0 {
88 return 0.0;
89 }
90 let bin_width = (self.x_max - self.x_min) / self.num_bins as f64;
91 let mut acc = 0.0_f64;
92 for (k, &c) in self.counts.iter().enumerate() {
93 let mid = self.x_min + (k as f64 + 0.5_f64) * bin_width;
94 acc += f(mid) * c as f64;
95 }
96 acc / self.total as f64
97 }
98
99 pub fn compare_averages<F>(&self, trajectory: &[f64], f: &F) -> (f64, f64, f64)
103 where
104 F: Fn(f64) -> f64,
105 {
106 let time_avg = if trajectory.is_empty() {
107 0.0_f64
108 } else {
109 trajectory.iter().map(|&x| f(x)).sum::<f64>() / trajectory.len() as f64
110 };
111 let space_avg = self.space_average(f);
112 let diff = (time_avg - space_avg).abs();
113 (time_avg, space_avg, diff)
114 }
115}
116
117#[derive(Debug, Clone)]
125pub struct BirkhoffAverage {
126 pub sum: f64,
128 pub count: usize,
130 pub history: Vec<f64>,
132}
133
134impl BirkhoffAverage {
135 pub fn new() -> Self {
137 Self {
138 sum: 0.0,
139 count: 0,
140 history: Vec::new(),
141 }
142 }
143
144 pub fn update(&mut self, fx: f64) {
146 self.sum += fx;
147 self.count += 1;
148 self.history.push(self.sum / self.count as f64);
149 }
150
151 pub fn current(&self) -> f64 {
153 if self.count == 0 {
154 0.0
155 } else {
156 self.sum / self.count as f64
157 }
158 }
159
160 pub fn consume_trajectory<F>(&mut self, trajectory: &[f64], f: &F)
162 where
163 F: Fn(f64) -> f64,
164 {
165 for &x in trajectory {
166 self.update(f(x));
167 }
168 }
169}
170
171impl Default for BirkhoffAverage {
172 fn default() -> Self {
173 Self::new()
174 }
175}
176
177pub fn birkhoff_average(signal: &[f64]) -> f64 {
182 if signal.is_empty() {
183 return 0.0;
184 }
185 signal.iter().sum::<f64>() / signal.len() as f64
186}
187
188pub struct LyapunovSpectrum {
197 pub dim: usize,
199 pub log_sums: Vec<f64>,
201 pub steps: usize,
203}
204
205impl LyapunovSpectrum {
206 pub fn new(dim: usize) -> Self {
208 Self {
209 dim,
210 log_sums: vec![0.0_f64; dim],
211 steps: 0,
212 }
213 }
214
215 pub fn update_qr(&mut self, jacobian: &[f64]) {
224 let d = self.dim;
225 assert_eq!(jacobian.len(), d * d, "jacobian size mismatch");
226
227 let mut cols: Vec<Vec<f64>> = (0..d)
229 .map(|j| (0..d).map(|i| jacobian[i * d + j]).collect())
230 .collect();
231
232 for j in 0..d {
234 let norm = cols[j].iter().map(|&v| v * v).sum::<f64>().sqrt();
236 if norm > 1e-300 {
237 self.log_sums[j] += norm.ln();
238 for v in cols[j].iter_mut() {
240 *v /= norm;
241 }
242 }
243 for k in (j + 1)..d {
245 let dot: f64 = cols[j]
246 .iter()
247 .zip(cols[k].iter())
248 .map(|(&a, &b)| a * b)
249 .sum();
250 let cj = cols[j].clone();
251 for (val, cj_val) in cols[k].iter_mut().zip(cj.iter()) {
252 *val -= dot * cj_val;
253 }
254 }
255 }
256 self.steps += 1;
257 }
258
259 pub fn exponents(&self, dt: f64) -> Vec<f64> {
264 let total = self.steps as f64 * dt;
265 if total < 1e-300 {
266 return vec![0.0_f64; self.dim];
267 }
268 self.log_sums.iter().map(|&s| s / total).collect()
269 }
270}
271
272pub struct LyapunovAnalyzer {
274 pub dim: usize,
276}
277
278impl LyapunovAnalyzer {
279 pub fn new(dim: usize) -> Self {
281 Self { dim }
282 }
283}
284
285pub fn lyapunov_exponents(trajectory: &[Vec<f64>], dt: f64) -> Vec<f64> {
294 assert!(!trajectory.is_empty(), "trajectory must not be empty");
295 assert!(dt > 0.0, "dt must be positive");
296
297 let n = trajectory[0].len();
298 if n == 0 || trajectory.len() < 2 {
299 return vec![0.0; n];
300 }
301
302 let steps = trajectory.len() - 1;
303 let mut sums = vec![0.0_f64; n];
304
305 for t in 0..steps {
306 let prev = &trajectory[t];
307 let next = &trajectory[t + 1];
308 for k in 0..n.min(prev.len()).min(next.len()) {
309 let delta = (next[k] - prev[k]).abs();
310 if delta > 1e-300 {
311 sums[k] += delta.ln();
312 }
313 }
314 }
315
316 let total_time = steps as f64 * dt;
317 let mut exponents: Vec<f64> = sums.iter().map(|&s| s / total_time).collect();
318 exponents.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
319 exponents
320}
321
322pub fn logistic_lyapunov(r: f64, x0: f64, n: usize) -> f64 {
327 let mut x = x0;
328 let mut sum = 0.0_f64;
329 for _ in 0..n {
330 let deriv = (r - 2.0_f64 * r * x).abs();
331 if deriv > 1e-300 {
332 sum += deriv.ln();
333 }
334 x = r * x * (1.0_f64 - x);
335 }
336 if n == 0 { 0.0_f64 } else { sum / n as f64 }
337}
338
339#[derive(Debug, Clone)]
346pub struct PoincareSection {
347 pub plane_normal: [f64; 3],
349 pub plane_point: [f64; 3],
351 pub crossings: Vec<[f64; 3]>,
353}
354
355impl PoincareSection {
356 pub fn new(normal: [f64; 3], point: [f64; 3]) -> Self {
358 Self {
359 plane_normal: normal,
360 plane_point: point,
361 crossings: Vec::new(),
362 }
363 }
364}
365
366pub fn poincare_section(
372 trajectory: &[[f64; 3]],
373 normal: [f64; 3],
374 point: [f64; 3],
375) -> Vec<[f64; 3]> {
376 if trajectory.len() < 2 {
377 return Vec::new();
378 }
379
380 let dot3 = |a: &[f64; 3], b: &[f64; 3]| a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
381 let sub3 = |a: &[f64; 3], b: &[f64; 3]| [a[0] - b[0], a[1] - b[1], a[2] - b[2]];
382 let signed_dist = |p: &[f64; 3]| dot3(&sub3(p, &point), &normal);
383
384 let mut crossings = Vec::new();
385 let mut prev_dist = signed_dist(&trajectory[0]);
386
387 for i in 1..trajectory.len() {
388 let curr_dist = signed_dist(&trajectory[i]);
389 if prev_dist < 0.0 && curr_dist >= 0.0 {
390 let t = if (curr_dist - prev_dist).abs() < 1e-15 {
391 0.5_f64
392 } else {
393 (-prev_dist) / (curr_dist - prev_dist)
394 };
395 let p = &trajectory[i - 1];
396 let q = &trajectory[i];
397 let crossing = [
398 p[0] + t * (q[0] - p[0]),
399 p[1] + t * (q[1] - p[1]),
400 p[2] + t * (q[2] - p[2]),
401 ];
402 crossings.push(crossing);
403 }
404 prev_dist = curr_dist;
405 }
406 crossings
407}
408
409#[derive(Debug, Clone)]
415pub struct RecurrencePlot {
416 pub size: usize,
418 pub threshold: f64,
420 pub matrix: Vec<Vec<bool>>,
422}
423
424impl RecurrencePlot {
425 pub fn new(signal: &[f64], threshold: f64) -> Self {
427 let n = signal.len();
428 let mut matrix = vec![vec![false; n]; n];
429 for i in 0..n {
430 for j in 0..n {
431 matrix[i][j] = (signal[i] - signal[j]).abs() <= threshold;
432 }
433 }
434 Self {
435 size: n,
436 threshold,
437 matrix,
438 }
439 }
440}
441
442#[derive(Debug, Clone)]
447pub struct RecurrenceAnalysis {
448 pub recurrence_rate: f64,
450 pub determinism: f64,
452 pub mean_diagonal_length: f64,
454 pub max_diagonal_length: usize,
456 pub diagonal_entropy: f64,
458 pub laminarity: f64,
460 pub trapping_time: f64,
462}
463
464impl RecurrenceAnalysis {
465 pub fn compute(rp: &RecurrencePlot, min_line: usize) -> Self {
469 let rr = recurrence_rate(rp);
470 let det = determinism(rp);
471 let total_recurrent = (rp.size * rp.size) as f64 * rr;
472
473 let diag_lengths = diagonal_line_lengths(rp, min_line);
475 let total_on_diag: usize = diag_lengths.iter().copied().sum();
476 let mean_diag = if diag_lengths.is_empty() {
477 0.0_f64
478 } else {
479 total_on_diag as f64 / diag_lengths.len() as f64
480 };
481 let max_diag = diag_lengths.iter().cloned().max().unwrap_or(0);
482
483 let diag_entropy = if diag_lengths.is_empty() {
485 0.0_f64
486 } else {
487 let n_lines = diag_lengths.len() as f64;
488 let max_l = *diag_lengths.iter().max().unwrap_or(&1);
490 let mut hist = vec![0usize; max_l + 1];
491 for &l in &diag_lengths {
492 hist[l] += 1;
493 }
494 hist.iter()
495 .filter(|&&c| c > 0)
496 .map(|&c| {
497 let p = c as f64 / n_lines;
498 -p * p.ln()
499 })
500 .sum()
501 };
502
503 let vert_lengths = vertical_line_lengths(rp, min_line);
505 let total_on_vert: usize = vert_lengths.iter().copied().sum();
506 let lam = if total_recurrent < 1.0 {
507 0.0_f64
508 } else {
509 total_on_vert as f64 / total_recurrent
510 };
511 let trapping = if vert_lengths.is_empty() {
512 0.0_f64
513 } else {
514 total_on_vert as f64 / vert_lengths.len() as f64
515 };
516
517 Self {
518 recurrence_rate: rr,
519 determinism: det,
520 mean_diagonal_length: mean_diag,
521 max_diagonal_length: max_diag,
522 diagonal_entropy: diag_entropy,
523 laminarity: lam,
524 trapping_time: trapping,
525 }
526 }
527}
528
529fn diagonal_line_lengths(rp: &RecurrencePlot, min_line: usize) -> Vec<usize> {
531 let n = rp.size;
532 let mut lengths = Vec::new();
533 for k in -(n as isize - 1)..=(n as isize - 1) {
534 let mut run = 0usize;
535 let i_start = if k < 0 { (-k) as usize } else { 0 };
536 let i_end = if k < 0 {
537 n
538 } else {
539 n.saturating_sub(k as usize)
540 };
541 for i in i_start..i_end {
542 let j = (i as isize + k) as usize;
543 if rp.matrix[i][j] {
544 run += 1;
545 } else {
546 if run >= min_line {
547 lengths.push(run);
548 }
549 run = 0;
550 }
551 }
552 if run >= min_line {
553 lengths.push(run);
554 }
555 }
556 lengths
557}
558
559fn vertical_line_lengths(rp: &RecurrencePlot, min_line: usize) -> Vec<usize> {
561 let n = rp.size;
562 let mut lengths = Vec::new();
563 for j in 0..n {
564 let mut run = 0usize;
565 for i in 0..n {
566 if rp.matrix[i][j] {
567 run += 1;
568 } else {
569 if run >= min_line {
570 lengths.push(run);
571 }
572 run = 0;
573 }
574 }
575 if run >= min_line {
576 lengths.push(run);
577 }
578 }
579 lengths
580}
581
582pub fn recurrence_rate(rp: &RecurrencePlot) -> f64 {
586 if rp.size == 0 {
587 return 0.0;
588 }
589 let total = (rp.size * rp.size) as f64;
590 let recurrent: usize = rp
591 .matrix
592 .iter()
593 .flat_map(|row| row.iter())
594 .filter(|&&v| v)
595 .count();
596 recurrent as f64 / total
597}
598
599pub fn determinism(rp: &RecurrencePlot) -> f64 {
604 if rp.size == 0 {
605 return 0.0;
606 }
607 let n = rp.size;
608 let mut on_diag = 0usize;
609 let mut total_recurrent = 0usize;
610
611 for i in 0..n {
612 for j in 0..n {
613 if rp.matrix[i][j] {
614 total_recurrent += 1;
615 }
616 }
617 }
618
619 if total_recurrent == 0 {
620 return 0.0;
621 }
622
623 for k in -(n as isize - 1)..=(n as isize - 1) {
624 let mut run = 0usize;
625 let i_start = if k < 0 { (-k) as usize } else { 0 };
626 let i_end = if k < 0 { n } else { n - k as usize };
627 for i in i_start..i_end {
628 let j = (i as isize + k) as usize;
629 if rp.matrix[i][j] {
630 run += 1;
631 } else {
632 if run >= 2 {
633 on_diag += run;
634 }
635 run = 0;
636 }
637 }
638 if run >= 2 {
639 on_diag += run;
640 }
641 }
642
643 on_diag as f64 / total_recurrent as f64
644}
645
646#[derive(Debug, Clone)]
654pub struct EntropicAnalysis {
655 pub word_length: usize,
657 pub alphabet_size: usize,
659 pub ks_entropy: f64,
661 pub topological_entropy: f64,
663}
664
665impl EntropicAnalysis {
666 pub fn from_symbols(symbols: &[usize], word_length: usize, alphabet_size: usize) -> Self {
671 let ks = kolmogorov_sinai_entropy(symbols, word_length);
672 let topo = topological_entropy_estimate(symbols, word_length);
673 Self {
674 word_length,
675 alphabet_size,
676 ks_entropy: ks,
677 topological_entropy: topo,
678 }
679 }
680}
681
682pub fn kolmogorov_sinai_entropy(symbols: &[usize], word_length: usize) -> f64 {
687 if symbols.len() <= word_length + 1 || word_length == 0 {
688 return 0.0_f64;
689 }
690 let h_l = block_entropy(symbols, word_length);
691 let h_l1 = block_entropy(symbols, word_length + 1);
692 (h_l1 - h_l).max(0.0_f64)
693}
694
695pub fn block_entropy(symbols: &[usize], word_length: usize) -> f64 {
699 use std::collections::HashMap;
700 if symbols.len() < word_length || word_length == 0 {
701 return 0.0_f64;
702 }
703 let mut counts: HashMap<Vec<usize>, usize> = HashMap::new();
704 let num_words = symbols.len() - word_length + 1;
705 for i in 0..num_words {
706 let word = symbols[i..i + word_length].to_vec();
707 *counts.entry(word).or_insert(0) += 1;
708 }
709 let total = num_words as f64;
710 counts
711 .values()
712 .map(|&c| {
713 let p = c as f64 / total;
714 -p * p.log2()
715 })
716 .sum()
717}
718
719pub fn topological_entropy_estimate(symbols: &[usize], word_length: usize) -> f64 {
721 use std::collections::HashSet;
722 if symbols.len() < word_length || word_length == 0 {
723 return 0.0_f64;
724 }
725 let mut words: HashSet<Vec<usize>> = HashSet::new();
726 let num_words = symbols.len() - word_length + 1;
727 for i in 0..num_words {
728 words.insert(symbols[i..i + word_length].to_vec());
729 }
730 let distinct = words.len() as f64;
731 if distinct <= 1.0_f64 {
732 0.0_f64
733 } else {
734 distinct.log2() / word_length as f64
735 }
736}
737
738pub fn symbolic_encode(signal: &[f64], x_min: f64, x_max: f64, num_symbols: usize) -> Vec<usize> {
743 assert!(num_symbols > 0, "num_symbols must be > 0");
744 let range = x_max - x_min;
745 if range <= 0.0_f64 {
746 return vec![0usize; signal.len()];
747 }
748 signal
749 .iter()
750 .map(|&x| {
751 let frac = (x - x_min) / range;
752 let idx = (frac * num_symbols as f64) as usize;
753 idx.min(num_symbols - 1)
754 })
755 .collect()
756}
757
758#[derive(Debug, Clone)]
766pub struct MixingRate {
767 pub correlation_decay: Vec<f64>,
769 pub decay_rate: f64,
771 pub mixing_timescale: f64,
773}
774
775impl MixingRate {
776 pub fn from_autocorr(corr: Vec<f64>) -> Self {
779 let rate = estimate_decay_rate(&corr);
780 let timescale = if rate > 1e-300 {
781 1.0_f64 / rate
782 } else {
783 f64::INFINITY
784 };
785 Self {
786 correlation_decay: corr,
787 decay_rate: rate,
788 mixing_timescale: timescale,
789 }
790 }
791
792 pub fn mixing_time(&self, threshold: f64) -> Option<usize> {
795 mixing_time(&self.correlation_decay, threshold)
796 }
797}
798
799pub fn estimate_decay_rate(corr: &[f64]) -> f64 {
804 if corr.len() < 2 {
805 return 0.0_f64;
806 }
807 let mut sum_k = 0.0_f64;
808 let mut sum_log_c = 0.0_f64;
809 let mut sum_k2 = 0.0_f64;
810 let mut sum_k_log_c = 0.0_f64;
811 let mut count = 0.0_f64;
812 for (k, &c) in corr.iter().enumerate() {
813 if c > 0.0_f64 {
814 let log_c = c.ln();
815 let kf = k as f64;
816 sum_k += kf;
817 sum_log_c += log_c;
818 sum_k2 += kf * kf;
819 sum_k_log_c += kf * log_c;
820 count += 1.0_f64;
821 }
822 }
823 if count < 2.0_f64 {
824 return 0.0_f64;
825 }
826 let slope = (count * sum_k_log_c - sum_k * sum_log_c) / (count * sum_k2 - sum_k * sum_k);
827 (-slope).max(0.0_f64)
828}
829
830#[derive(Debug, Clone)]
835pub struct MixingAnalyzer {
836 pub correlation_decay: Vec<f64>,
838}
839
840impl MixingAnalyzer {
841 pub fn new(correlation_decay: Vec<f64>) -> Self {
843 Self { correlation_decay }
844 }
845
846 pub fn mixing_time(&self, threshold: f64) -> Option<usize> {
849 mixing_time(&self.correlation_decay, threshold)
850 }
851}
852
853pub fn autocorrelation(signal: &[f64], max_lag: usize) -> Vec<f64> {
859 let n = signal.len();
860 if n == 0 {
861 return Vec::new();
862 }
863 let mu = signal.iter().sum::<f64>() / n as f64;
864 let c0: f64 = signal.iter().map(|&x| (x - mu) * (x - mu)).sum::<f64>() / n as f64;
865 if c0 < 1e-300 {
866 return vec![1.0_f64; (max_lag + 1).min(n)];
867 }
868
869 let lags = (max_lag + 1).min(n);
870 let mut result = Vec::with_capacity(lags);
871 for k in 0..lags {
872 let len = n - k;
873 let ck: f64 = (0..len)
874 .map(|t| (signal[t] - mu) * (signal[t + k] - mu))
875 .sum::<f64>()
876 / n as f64;
877 result.push(ck / c0);
878 }
879 result
880}
881
882pub fn mixing_time(autocorr: &[f64], threshold: f64) -> Option<usize> {
886 autocorr.iter().position(|&v| v < threshold)
887}
888
889#[derive(Debug, Clone)]
896pub struct ErgodicMean {
897 pub values: Vec<f64>,
899 mean_acc: f64,
901 m2_acc: f64,
903}
904
905impl ErgodicMean {
906 pub fn new() -> Self {
908 Self {
909 values: Vec::new(),
910 mean_acc: 0.0,
911 m2_acc: 0.0,
912 }
913 }
914
915 pub fn update(&mut self, x: f64) {
917 self.values.push(x);
918 let n = self.values.len() as f64;
919 let delta = x - self.mean_acc;
920 self.mean_acc += delta / n;
921 let delta2 = x - self.mean_acc;
922 self.m2_acc += delta * delta2;
923 }
924
925 pub fn mean(&self) -> f64 {
929 self.mean_acc
930 }
931
932 pub fn variance(&self) -> f64 {
936 let n = self.values.len();
937 if n < 2 {
938 0.0
939 } else {
940 self.m2_acc / (n as f64 - 1.0)
941 }
942 }
943}
944
945impl Default for ErgodicMean {
946 fn default() -> Self {
947 Self::new()
948 }
949}
950
951pub fn bakers_map_step(x: f64, y: f64) -> (f64, f64) {
958 let two_x = 2.0_f64 * x;
959 let floor = two_x.floor();
960 let xn = two_x - floor;
961 let yn = (y + floor) / 2.0_f64;
962 (xn, yn)
963}
964
965pub fn bakers_map_trajectory(x0: f64, y0: f64, n: usize) -> Vec<(f64, f64)> {
967 let mut traj = Vec::with_capacity(n + 1);
968 let mut x = x0;
969 let mut y = y0;
970 traj.push((x, y));
971 for _ in 0..n {
972 let (xn, yn) = bakers_map_step(x, y);
973 x = xn;
974 y = yn;
975 traj.push((x, y));
976 }
977 traj
978}
979
980#[cfg(test)]
983mod tests {
984 use super::*;
985 use std::f64::consts::PI;
986
987 #[test]
990 fn test_ergodic_measure_update_and_density() {
991 let mut em = ErgodicMeasure::new(0.0, 1.0, 10);
992 for i in 0..100 {
993 em.update(i as f64 / 100.0);
994 }
995 assert_eq!(em.total, 100);
996 let dens = em.density();
997 assert_eq!(dens.len(), 10);
998 for &d in &dens {
1000 assert!(d > 0.0, "density should be positive");
1001 }
1002 }
1003
1004 #[test]
1005 fn test_ergodic_measure_out_of_bounds_dropped() {
1006 let mut em = ErgodicMeasure::new(0.0, 1.0, 5);
1007 em.update(-0.5);
1008 em.update(1.5);
1009 assert_eq!(em.total, 0);
1010 }
1011
1012 #[test]
1013 fn test_ergodic_measure_density_zero_when_empty() {
1014 let em = ErgodicMeasure::new(0.0, 10.0, 5);
1015 let dens = em.density();
1016 for &d in &dens {
1017 assert_eq!(d, 0.0);
1018 }
1019 }
1020
1021 #[test]
1022 fn test_ergodic_measure_space_average_constant() {
1023 let mut em = ErgodicMeasure::new(0.0, 1.0, 100);
1024 for i in 0..1000 {
1025 em.update(i as f64 / 1000.0);
1026 }
1027 let avg = em.space_average(|x| x);
1029 assert!((avg - 0.5).abs() < 0.05, "space average ≈ 0.5, got {}", avg);
1030 }
1031
1032 #[test]
1033 fn test_ergodic_measure_compare_averages() {
1034 let mut em = ErgodicMeasure::new(0.0, 1.0, 50);
1035 let traj: Vec<f64> = (0..500).map(|i| i as f64 / 500.0).collect();
1036 for &x in &traj {
1037 em.update(x);
1038 }
1039 let (_ta, _sa, diff) = em.compare_averages(&traj, &|x| x);
1040 assert!(
1041 diff < 0.1,
1042 "time and space averages should be close: diff = {}",
1043 diff
1044 );
1045 }
1046
1047 #[test]
1048 fn test_ergodic_measure_panics_zero_bins() {
1049 let result = std::panic::catch_unwind(|| ErgodicMeasure::new(0.0, 1.0, 0));
1050 assert!(result.is_err());
1051 }
1052
1053 #[test]
1056 fn test_birkhoff_average_empty_current() {
1057 let ba = BirkhoffAverage::new();
1058 assert_eq!(ba.current(), 0.0);
1059 }
1060
1061 #[test]
1062 fn test_birkhoff_average_single_update() {
1063 let mut ba = BirkhoffAverage::new();
1064 ba.update(7.0);
1065 assert!((ba.current() - 7.0).abs() < 1e-12);
1066 }
1067
1068 #[test]
1069 fn test_birkhoff_average_convergence() {
1070 let mut ba = BirkhoffAverage::new();
1071 for i in 1..=100 {
1072 ba.update(i as f64);
1073 }
1074 assert!((ba.current() - 50.5).abs() < 1e-10);
1076 }
1077
1078 #[test]
1079 fn test_birkhoff_average_history_length() {
1080 let mut ba = BirkhoffAverage::new();
1081 for _ in 0..20 {
1082 ba.update(1.0);
1083 }
1084 assert_eq!(ba.history.len(), 20);
1085 }
1086
1087 #[test]
1088 fn test_birkhoff_average_consume_trajectory() {
1089 let mut ba = BirkhoffAverage::new();
1090 let traj: Vec<f64> = vec![2.0, 4.0, 6.0, 8.0];
1091 ba.consume_trajectory(&traj, &|x| x);
1092 assert!((ba.current() - 5.0).abs() < 1e-12);
1094 }
1095
1096 #[test]
1097 fn test_birkhoff_average_observable() {
1098 let mut ba = BirkhoffAverage::new();
1099 let traj: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0];
1100 ba.consume_trajectory(&traj, &|x| x * x);
1101 assert!((ba.current() - 7.5).abs() < 1e-12);
1103 }
1104
1105 #[test]
1106 fn test_birkhoff_average_default() {
1107 let ba = BirkhoffAverage::default();
1108 assert_eq!(ba.count, 0);
1109 }
1110
1111 #[test]
1114 fn test_lyapunov_spectrum_new() {
1115 let ls = LyapunovSpectrum::new(2);
1116 assert_eq!(ls.dim, 2);
1117 assert_eq!(ls.steps, 0);
1118 }
1119
1120 #[test]
1121 fn test_lyapunov_spectrum_identity_jacobian() {
1122 let mut ls = LyapunovSpectrum::new(2);
1123 let identity = [1.0, 0.0, 0.0, 1.0];
1125 for _ in 0..10 {
1126 ls.update_qr(&identity);
1127 }
1128 let exp = ls.exponents(1.0);
1129 for &e in &exp {
1131 assert!(
1132 e.abs() < 0.5,
1133 "exponent should be near 0 for identity: {}",
1134 e
1135 );
1136 }
1137 }
1138
1139 #[test]
1140 fn test_lyapunov_spectrum_diagonal_stretching() {
1141 let mut ls = LyapunovSpectrum::new(2);
1142 let diag = [2.0, 0.0, 0.0, 0.5];
1144 for _ in 0..100 {
1145 ls.update_qr(&diag);
1146 }
1147 let exp = ls.exponents(1.0);
1148 assert_eq!(exp.len(), 2);
1149 assert!(
1151 exp[0] > 0.0,
1152 "first exponent should be positive: {}",
1153 exp[0]
1154 );
1155 }
1156
1157 #[test]
1158 fn test_lyapunov_spectrum_exponents_zero_dt() {
1159 let ls = LyapunovSpectrum::new(3);
1160 let exp = ls.exponents(0.0);
1161 for &e in &exp {
1162 assert_eq!(e, 0.0);
1163 }
1164 }
1165
1166 #[test]
1169 fn test_logistic_lyapunov_r4_positive() {
1170 let le = logistic_lyapunov(4.0, 0.3, 10_000);
1172 assert!(le > 0.5, "logistic LE at r=4 should be > 0.5, got {}", le);
1173 assert!(le < 1.0, "logistic LE at r=4 should be < 1.0, got {}", le);
1174 }
1175
1176 #[test]
1177 fn test_logistic_lyapunov_r2_negative() {
1178 let le = logistic_lyapunov(2.0, 0.4, 5_000);
1180 assert!(
1181 le < 0.0,
1182 "logistic LE at r=2 should be negative, got {}",
1183 le
1184 );
1185 }
1186
1187 #[test]
1188 fn test_logistic_lyapunov_zero_steps() {
1189 assert_eq!(logistic_lyapunov(4.0, 0.3, 0), 0.0);
1190 }
1191
1192 #[test]
1195 fn test_lyapunov_empty_trajectory_panics() {
1196 let result = std::panic::catch_unwind(|| lyapunov_exponents(&[], 0.01));
1197 assert!(result.is_err());
1198 }
1199
1200 #[test]
1201 fn test_lyapunov_zero_dt_panics() {
1202 let traj = vec![vec![1.0, 0.0], vec![1.1, 0.0]];
1203 let result = std::panic::catch_unwind(|| lyapunov_exponents(&traj, 0.0));
1204 assert!(result.is_err());
1205 }
1206
1207 #[test]
1208 fn test_lyapunov_single_step_returns_vector() {
1209 let traj = vec![vec![1.0, 0.0], vec![2.0, 0.0]];
1210 let exps = lyapunov_exponents(&traj, 1.0);
1211 assert_eq!(exps.len(), 2);
1212 }
1213
1214 #[test]
1215 fn test_lyapunov_growing_trajectory_positive() {
1216 let n = 20;
1217 let dt = 1.0;
1218 let traj: Vec<Vec<f64>> = (0..=n).map(|t| vec![2.0_f64.powi(t)]).collect();
1219 let exps = lyapunov_exponents(&traj, dt);
1220 assert!(!exps.is_empty());
1221 assert!(
1222 exps[0] > 0.0,
1223 "leading exponent should be positive: {}",
1224 exps[0]
1225 );
1226 }
1227
1228 #[test]
1229 fn test_lyapunov_constant_trajectory_near_zero() {
1230 let traj: Vec<Vec<f64>> = vec![vec![1.0, 2.0]; 10];
1231 let exps = lyapunov_exponents(&traj, 0.1);
1232 for e in &exps {
1233 assert!(e.abs() < 1.0, "exponent {} should be near zero", e);
1234 }
1235 }
1236
1237 #[test]
1238 fn test_lyapunov_sorted_descending() {
1239 let traj: Vec<Vec<f64>> = (0..20)
1240 .map(|t| vec![2.0_f64.powi(t), 1.5_f64.powi(t), 0.9_f64.powi(t)])
1241 .collect();
1242 let exps = lyapunov_exponents(&traj, 1.0);
1243 for i in 1..exps.len() {
1244 assert!(
1245 exps[i - 1] >= exps[i],
1246 "exponents not sorted at index {}",
1247 i
1248 );
1249 }
1250 }
1251
1252 #[test]
1253 fn test_lyapunov_analyzer_new() {
1254 let la = LyapunovAnalyzer::new(3);
1255 assert_eq!(la.dim, 3);
1256 }
1257
1258 #[test]
1261 fn test_poincare_empty_trajectory() {
1262 let crossings = poincare_section(&[], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]);
1263 assert!(crossings.is_empty());
1264 }
1265
1266 #[test]
1267 fn test_poincare_single_point_no_crossings() {
1268 let crossings = poincare_section(&[[0.0, 0.0, 1.0]], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]);
1269 assert!(crossings.is_empty());
1270 }
1271
1272 #[test]
1273 fn test_poincare_one_crossing_through_z_plane() {
1274 let traj = [[0.0, 0.0, -0.5], [0.0, 0.0, 0.5]];
1275 let crossings = poincare_section(&traj, [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]);
1276 assert_eq!(crossings.len(), 1);
1277 assert!((crossings[0][2]).abs() < 1e-10, "crossing z should be ~0");
1278 }
1279
1280 #[test]
1281 fn test_poincare_multiple_crossings() {
1282 let n = 200;
1283 let traj: Vec<[f64; 3]> = (0..n)
1284 .map(|i| [0.0, 0.0, (2.0 * PI * i as f64 / 100.0 + PI).sin()])
1285 .collect();
1286 let crossings = poincare_section(&traj, [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]);
1287 assert!(!crossings.is_empty(), "expected at least one crossing");
1288 }
1289
1290 #[test]
1291 fn test_poincare_section_struct_new() {
1292 let ps = PoincareSection::new([1.0, 0.0, 0.0], [0.0, 0.0, 0.0]);
1293 assert!(ps.crossings.is_empty());
1294 }
1295
1296 #[test]
1297 fn test_poincare_downward_not_counted() {
1298 let traj = [[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]];
1299 let crossings = poincare_section(&traj, [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]);
1300 assert!(
1301 crossings.is_empty(),
1302 "downward crossing should not be counted"
1303 );
1304 }
1305
1306 #[test]
1309 fn test_recurrence_rate_constant_signal() {
1310 let sig = vec![5.0; 10];
1311 let rp = RecurrencePlot::new(&sig, 0.1);
1312 let rate = recurrence_rate(&rp);
1313 assert!((rate - 1.0).abs() < 1e-10, "rate = {}", rate);
1314 }
1315
1316 #[test]
1317 fn test_recurrence_rate_sparse() {
1318 let sig: Vec<f64> = (0..10).map(|i| i as f64).collect();
1319 let rp = RecurrencePlot::new(&sig, 0.5);
1320 let rate = recurrence_rate(&rp);
1321 assert!((rate - 0.1).abs() < 1e-10, "rate = {}", rate);
1322 }
1323
1324 #[test]
1325 fn test_recurrence_rate_bounds() {
1326 let sig: Vec<f64> = (0..20).map(|i| (i as f64).sin()).collect();
1327 let rp = RecurrencePlot::new(&sig, 0.3);
1328 let rate = recurrence_rate(&rp);
1329 assert!((0.0..=1.0).contains(&rate), "rate out of bounds: {}", rate);
1330 }
1331
1332 #[test]
1333 fn test_recurrence_rate_empty() {
1334 let rp = RecurrencePlot {
1335 size: 0,
1336 threshold: 0.1,
1337 matrix: Vec::new(),
1338 };
1339 assert_eq!(recurrence_rate(&rp), 0.0);
1340 }
1341
1342 #[test]
1343 fn test_determinism_constant_signal() {
1344 let sig = vec![1.0; 5];
1345 let rp = RecurrencePlot::new(&sig, 0.01);
1346 let det = determinism(&rp);
1347 assert!(det > 0.0, "determinism should be > 0 for constant signal");
1348 }
1349
1350 #[test]
1351 fn test_determinism_bounds() {
1352 let sig: Vec<f64> = (0..15).map(|i| (i as f64 * 0.5).sin()).collect();
1353 let rp = RecurrencePlot::new(&sig, 0.3);
1354 let det = determinism(&rp);
1355 assert!(
1356 (0.0..=1.0).contains(&det),
1357 "determinism out of bounds: {}",
1358 det
1359 );
1360 }
1361
1362 #[test]
1363 fn test_determinism_empty() {
1364 let rp = RecurrencePlot {
1365 size: 0,
1366 threshold: 0.1,
1367 matrix: Vec::new(),
1368 };
1369 assert_eq!(determinism(&rp), 0.0);
1370 }
1371
1372 #[test]
1373 fn test_rqa_compute_sine_wave() {
1374 let sig: Vec<f64> = (0..50)
1375 .map(|i| (2.0 * PI * i as f64 / 25.0).sin())
1376 .collect();
1377 let rp = RecurrencePlot::new(&sig, 0.3);
1378 let rqa = RecurrenceAnalysis::compute(&rp, 2);
1379 assert!(rqa.recurrence_rate > 0.0);
1380 assert!(rqa.determinism >= 0.0 && rqa.determinism <= 1.0);
1381 }
1382
1383 #[test]
1384 fn test_rqa_max_diagonal_length() {
1385 let sig = vec![1.0_f64; 8];
1387 let rp = RecurrencePlot::new(&sig, 0.01);
1388 let rqa = RecurrenceAnalysis::compute(&rp, 2);
1389 assert!(
1390 rqa.max_diagonal_length >= 6,
1391 "max diag = {}",
1392 rqa.max_diagonal_length
1393 );
1394 }
1395
1396 #[test]
1399 fn test_symbolic_encode_uniform() {
1400 let sig: Vec<f64> = (0..10).map(|i| i as f64 / 10.0).collect();
1401 let syms = symbolic_encode(&sig, 0.0, 1.0, 10);
1402 assert_eq!(syms.len(), 10);
1403 for (i, &s) in syms.iter().enumerate() {
1404 assert_eq!(s, i, "symbol mismatch at index {}", i);
1405 }
1406 }
1407
1408 #[test]
1409 fn test_block_entropy_constant() {
1410 let syms = vec![0usize; 20];
1412 let h = block_entropy(&syms, 3);
1413 assert!(h.abs() < 1e-10, "entropy of constant = {}", h);
1414 }
1415
1416 #[test]
1417 fn test_block_entropy_uniform() {
1418 let syms: Vec<usize> = (0..40).map(|i| i % 4).collect();
1420 let h = block_entropy(&syms, 2);
1421 assert!(h > 0.0, "entropy should be > 0 for varied signal");
1422 }
1423
1424 #[test]
1425 fn test_ks_entropy_logistic_chaos() {
1426 let mut x = 0.3_f64;
1428 let mut traj = Vec::with_capacity(1000);
1429 for _ in 0..1000 {
1430 x = 4.0 * x * (1.0 - x);
1431 traj.push(x);
1432 }
1433 let syms = symbolic_encode(&traj, 0.0, 1.0, 8);
1434 let ks = kolmogorov_sinai_entropy(&syms, 4);
1435 assert!(ks >= 0.0, "KS entropy must be non-negative: {}", ks);
1436 }
1437
1438 #[test]
1439 fn test_topological_entropy_constant() {
1440 let syms = vec![0usize; 30];
1441 let te = topological_entropy_estimate(&syms, 3);
1442 assert_eq!(te, 0.0, "topo entropy of constant signal should be 0");
1443 }
1444
1445 #[test]
1446 fn test_entropic_analysis_from_symbols() {
1447 let syms: Vec<usize> = (0..100).map(|i| i % 4).collect();
1448 let ea = EntropicAnalysis::from_symbols(&syms, 3, 4);
1449 assert!(ea.ks_entropy >= 0.0);
1450 assert!(ea.topological_entropy >= 0.0);
1451 }
1452
1453 #[test]
1456 fn test_autocorrelation_lag0_is_one() {
1457 let sig: Vec<f64> = (0..50).map(|i| (i as f64).sin()).collect();
1458 let ac = autocorrelation(&sig, 10);
1459 assert!(
1460 (ac[0] - 1.0).abs() < 1e-10,
1461 "lag-0 autocorrelation must be 1"
1462 );
1463 }
1464
1465 #[test]
1466 fn test_autocorrelation_constant_signal() {
1467 let sig = vec![3.0; 20];
1468 let ac = autocorrelation(&sig, 5);
1469 for &v in &ac {
1470 assert!((v - 1.0).abs() < 1e-10, "constant signal AC should be 1");
1471 }
1472 }
1473
1474 #[test]
1475 fn test_autocorrelation_sine_half_period() {
1476 let n = 200usize;
1477 let sig: Vec<f64> = (0..n)
1478 .map(|i| (2.0 * PI * i as f64 / n as f64).sin())
1479 .collect();
1480 let ac = autocorrelation(&sig, n / 2);
1481 let ac_half = ac[n / 2];
1482 assert!(
1483 ac_half < -0.45,
1484 "AC at half-period should be strongly negative, got {}",
1485 ac_half
1486 );
1487 }
1488
1489 #[test]
1490 fn test_autocorrelation_empty_signal() {
1491 let ac = autocorrelation(&[], 5);
1492 assert!(ac.is_empty());
1493 }
1494
1495 #[test]
1496 fn test_autocorrelation_length() {
1497 let sig: Vec<f64> = (0..30).map(|i| i as f64).collect();
1498 let ac = autocorrelation(&sig, 10);
1499 assert_eq!(ac.len(), 11);
1500 }
1501
1502 #[test]
1503 fn test_mixing_time_found() {
1504 let ac = vec![1.0, 0.5, 0.2, 0.05, -0.01];
1505 assert_eq!(mixing_time(&ac, 0.1), Some(3));
1506 }
1507
1508 #[test]
1509 fn test_mixing_time_not_found() {
1510 let ac = vec![1.0, 0.9, 0.8, 0.7];
1511 assert_eq!(mixing_time(&ac, 0.1), None);
1512 }
1513
1514 #[test]
1515 fn test_mixing_time_immediately() {
1516 let ac = vec![0.0, 0.5, 1.0];
1517 assert_eq!(mixing_time(&ac, 0.1), Some(0));
1518 }
1519
1520 #[test]
1521 fn test_mixing_analyzer_new() {
1522 let ma = MixingAnalyzer::new(vec![1.0, 0.5, 0.1]);
1523 assert_eq!(ma.correlation_decay.len(), 3);
1524 assert_eq!(ma.mixing_time(0.2), Some(2));
1525 }
1526
1527 #[test]
1528 fn test_mixing_rate_from_exponential_decay() {
1529 let corr: Vec<f64> = (0..20).map(|k| (-0.5_f64 * k as f64).exp()).collect();
1531 let mr = MixingRate::from_autocorr(corr);
1532 assert!(
1534 (mr.decay_rate - 0.5).abs() < 0.1,
1535 "decay rate = {}",
1536 mr.decay_rate
1537 );
1538 assert!(mr.mixing_timescale > 0.0);
1539 }
1540
1541 #[test]
1544 fn test_ergodic_mean_empty() {
1545 let em = ErgodicMean::new();
1546 assert_eq!(em.mean(), 0.0);
1547 assert_eq!(em.variance(), 0.0);
1548 }
1549
1550 #[test]
1551 fn test_ergodic_mean_single() {
1552 let mut em = ErgodicMean::new();
1553 em.update(5.0);
1554 assert!((em.mean() - 5.0).abs() < 1e-12);
1555 assert_eq!(em.variance(), 0.0);
1556 }
1557
1558 #[test]
1559 fn test_ergodic_mean_convergence() {
1560 let mut em = ErgodicMean::new();
1561 for i in 1..=100 {
1562 em.update(i as f64);
1563 }
1564 assert!((em.mean() - 50.5).abs() < 1e-10, "mean = {}", em.mean());
1565 }
1566
1567 #[test]
1568 fn test_ergodic_variance() {
1569 let mut em = ErgodicMean::new();
1570 for x in [1.0, 2.0, 3.0, 4.0, 5.0] {
1571 em.update(x);
1572 }
1573 assert!(
1574 (em.variance() - 2.5).abs() < 1e-10,
1575 "variance = {}",
1576 em.variance()
1577 );
1578 }
1579
1580 #[test]
1581 fn test_ergodic_mean_default() {
1582 let em = ErgodicMean::default();
1583 assert_eq!(em.mean(), 0.0);
1584 }
1585
1586 #[test]
1589 fn test_birkhoff_average_empty() {
1590 assert_eq!(birkhoff_average(&[]), 0.0);
1591 }
1592
1593 #[test]
1594 fn test_birkhoff_average_constant() {
1595 let sig = vec![7.0; 100];
1596 assert!((birkhoff_average(&sig) - 7.0).abs() < 1e-12);
1597 }
1598
1599 #[test]
1600 fn test_birkhoff_average_integers() {
1601 let sig: Vec<f64> = (1..=10).map(|i| i as f64).collect();
1602 assert!((birkhoff_average(&sig) - 5.5).abs() < 1e-12);
1603 }
1604
1605 #[test]
1606 fn test_birkhoff_average_zero_mean() {
1607 let sig: Vec<f64> = (0..100)
1608 .map(|i| if i % 2 == 0 { 1.0 } else { -1.0 })
1609 .collect();
1610 assert!(birkhoff_average(&sig).abs() < 1e-12);
1611 }
1612
1613 #[test]
1616 fn test_bakers_map_step_in_unit_square() {
1617 let (xn, yn) = bakers_map_step(0.3, 0.7);
1618 assert!((0.0..1.0).contains(&xn), "x out of [0,1): {}", xn);
1619 assert!((0.0..1.0).contains(&yn), "y out of [0,1): {}", yn);
1620 }
1621
1622 #[test]
1623 fn test_bakers_map_trajectory_length() {
1624 let traj = bakers_map_trajectory(0.2, 0.5, 50);
1625 assert_eq!(traj.len(), 51);
1626 }
1627
1628 #[test]
1629 fn test_bakers_map_trajectory_first_point() {
1630 let traj = bakers_map_trajectory(0.3, 0.6, 10);
1631 assert!((traj[0].0 - 0.3).abs() < 1e-14);
1632 assert!((traj[0].1 - 0.6).abs() < 1e-14);
1633 }
1634
1635 #[test]
1636 fn test_bakers_map_ergodic_mean_x() {
1637 let traj = bakers_map_trajectory(0.123, 0.456, 5000);
1639 for (i, &(x, y)) in traj.iter().enumerate() {
1640 assert!(
1641 (0.0..1.0).contains(&x),
1642 "x out of [0,1) at step {}: {}",
1643 i,
1644 x
1645 );
1646 assert!(
1647 (0.0..1.0).contains(&y),
1648 "y out of [0,1) at step {}: {}",
1649 i,
1650 y
1651 );
1652 }
1653 }
1654}