1#![allow(clippy::needless_range_loop)]
2#![allow(dead_code)]
35
36#[derive(Debug, Clone)]
48pub struct LorenzSystem {
49 pub sigma: f64,
51 pub rho: f64,
53 pub beta: f64,
55}
56
57impl LorenzSystem {
58 pub fn new(sigma: f64, rho: f64, beta: f64) -> Self {
60 Self { sigma, rho, beta }
61 }
62
63 pub fn classic() -> Self {
65 Self::new(10.0, 28.0, 8.0 / 3.0)
66 }
67
68 pub fn deriv(&self, state: [f64; 3]) -> [f64; 3] {
70 let [x, y, z] = state;
71 [
72 self.sigma * (y - x),
73 x * (self.rho - z) - y,
74 x * y - self.beta * z,
75 ]
76 }
77
78 pub fn integrate(&self, initial: [f64; 3], dt: f64, steps: usize) -> Vec<[f64; 3]> {
80 let mut traj = Vec::with_capacity(steps + 1);
81 let mut s = initial;
82 traj.push(s);
83 for _ in 0..steps {
84 s = rk4_step(|st| self.deriv(st), s, dt);
85 traj.push(s);
86 }
87 traj
88 }
89
90 pub fn fixed_points(&self) -> [[f64; 3]; 3] {
92 let c = (self.beta * (self.rho - 1.0)).sqrt();
93 [
94 [0.0, 0.0, 0.0],
95 [c, c, self.rho - 1.0],
96 [-c, -c, self.rho - 1.0],
97 ]
98 }
99
100 pub fn jacobian(&self, state: [f64; 3]) -> [[f64; 3]; 3] {
102 let [x, y, z] = state;
103 [
104 [-self.sigma, self.sigma, 0.0],
105 [self.rho - z, -1.0, -x],
106 [y, x, -self.beta],
107 ]
108 }
109}
110
111#[derive(Debug, Clone)]
123pub struct RosslerSystem {
124 pub a: f64,
126 pub b: f64,
128 pub c: f64,
130}
131
132impl RosslerSystem {
133 pub fn new(a: f64, b: f64, c: f64) -> Self {
135 Self { a, b, c }
136 }
137
138 pub fn classic() -> Self {
140 Self::new(0.2, 0.2, 5.7)
141 }
142
143 pub fn deriv(&self, state: [f64; 3]) -> [f64; 3] {
145 let [x, y, z] = state;
146 [-y - z, x + self.a * y, self.b + z * (x - self.c)]
147 }
148
149 pub fn integrate(&self, initial: [f64; 3], dt: f64, steps: usize) -> Vec<[f64; 3]> {
151 let mut traj = Vec::with_capacity(steps + 1);
152 let mut s = initial;
153 traj.push(s);
154 for _ in 0..steps {
155 s = rk4_step(|st| self.deriv(st), s, dt);
156 traj.push(s);
157 }
158 traj
159 }
160
161 pub fn approximate_period(&self) -> f64 {
163 std::f64::consts::TAU
165 }
166}
167
168#[derive(Debug, Clone)]
172pub struct LogisticMap {
173 pub r: f64,
175}
176
177impl LogisticMap {
178 pub fn new(r: f64) -> Self {
180 Self { r }
181 }
182
183 pub fn iterate(&self, x0: f64, n: usize) -> Vec<f64> {
185 let mut orbit = Vec::with_capacity(n + 1);
186 let mut x = x0;
187 orbit.push(x);
188 for _ in 0..n {
189 x = self.r * x * (1.0 - x);
190 orbit.push(x);
191 }
192 orbit
193 }
194
195 pub fn attractor(&self, x0: f64, transient: usize, n: usize) -> Vec<f64> {
197 let orbit = self.iterate(x0, transient + n);
198 orbit[transient..].to_vec()
199 }
200
201 pub fn lyapunov_exponent(&self, x0: f64, n: usize) -> f64 {
203 let mut x = x0;
204 let mut sum = 0.0;
205 for _ in 0..n {
206 let deriv = (self.r * (1.0 - 2.0 * x)).abs();
207 if deriv > 1e-15 {
208 sum += deriv.ln();
209 }
210 x = self.r * x * (1.0 - x);
211 }
212 sum / n as f64
213 }
214
215 pub fn bifurcation_diagram(
217 r_values: &[f64],
218 x0: f64,
219 transient: usize,
220 n_attractor: usize,
221 ) -> Vec<(f64, Vec<f64>)> {
222 r_values
223 .iter()
224 .map(|&r| {
225 let lm = LogisticMap::new(r);
226 let attr = lm.attractor(x0, transient, n_attractor);
227 (r, attr)
228 })
229 .collect()
230 }
231}
232
233#[derive(Debug, Clone)]
243pub struct DuffingOscillator {
244 pub delta: f64,
246 pub alpha: f64,
248 pub beta: f64,
250 pub gamma: f64,
252 pub omega: f64,
254}
255
256impl DuffingOscillator {
257 #[allow(clippy::too_many_arguments)]
259 pub fn new(delta: f64, alpha: f64, beta: f64, gamma: f64, omega: f64) -> Self {
260 Self {
261 delta,
262 alpha,
263 beta,
264 gamma,
265 omega,
266 }
267 }
268
269 pub fn classic_chaotic() -> Self {
271 Self::new(0.3, -1.0, 1.0, 0.37, 1.2)
272 }
273
274 pub fn deriv(&self, state: [f64; 2], t: f64) -> [f64; 2] {
276 let [x, v] = state;
277 let accel = -self.delta * v - self.alpha * x - self.beta * x.powi(3)
278 + self.gamma * (self.omega * t).cos();
279 [v, accel]
280 }
281
282 pub fn integrate(&self, initial: [f64; 2], t0: f64, dt: f64, steps: usize) -> Vec<[f64; 3]> {
284 let mut result = Vec::with_capacity(steps + 1);
285 let mut state = initial;
286 let mut t = t0;
287 result.push([state[0], state[1], t]);
288 for _ in 0..steps {
289 let k1 = self.deriv(state, t);
290 let s2 = [state[0] + 0.5 * dt * k1[0], state[1] + 0.5 * dt * k1[1]];
291 let k2 = self.deriv(s2, t + 0.5 * dt);
292 let s3 = [state[0] + 0.5 * dt * k2[0], state[1] + 0.5 * dt * k2[1]];
293 let k3 = self.deriv(s3, t + 0.5 * dt);
294 let s4 = [state[0] + dt * k3[0], state[1] + dt * k3[1]];
295 let k4 = self.deriv(s4, t + dt);
296 state[0] += dt / 6.0 * (k1[0] + 2.0 * k2[0] + 2.0 * k3[0] + k4[0]);
297 state[1] += dt / 6.0 * (k1[1] + 2.0 * k2[1] + 2.0 * k3[1] + k4[1]);
298 t += dt;
299 result.push([state[0], state[1], t]);
300 }
301 result
302 }
303
304 pub fn natural_frequency(&self) -> Option<f64> {
306 if self.alpha < 0.0 && self.beta > 0.0 {
308 let x_star = (-self.alpha / self.beta).sqrt();
309 let omega_n = (2.0 * self.beta * x_star * x_star).sqrt();
310 Some(omega_n)
311 } else if self.alpha > 0.0 {
312 Some(self.alpha.sqrt())
313 } else {
314 None
315 }
316 }
317}
318
319#[derive(Debug, Clone)]
324pub struct LyapunovExponent {
325 pub n_steps: usize,
327 pub dt: f64,
329 pub renorm_interval: usize,
331 pub epsilon: f64,
333}
334
335impl LyapunovExponent {
336 pub fn new(n_steps: usize, dt: f64, renorm_interval: usize, epsilon: f64) -> Self {
338 Self {
339 n_steps,
340 dt,
341 renorm_interval,
342 epsilon,
343 }
344 }
345
346 pub fn default_lorenz() -> Self {
348 Self::new(50_000, 0.01, 10, 1e-8)
349 }
350
351 pub fn estimate<F>(&self, f: F, initial: [f64; 3]) -> f64
355 where
356 F: Fn([f64; 3]) -> [f64; 3],
357 {
358 let mut x = initial;
359 let mut xp = [initial[0] + self.epsilon, initial[1], initial[2]];
361 let mut lambda_sum = 0.0;
362 let mut n_renorms = 0usize;
363
364 for step in 0..self.n_steps {
365 x = rk4_step(&f, x, self.dt);
366 xp = rk4_step(&f, xp, self.dt);
367
368 if (step + 1) % self.renorm_interval == 0 {
369 let d = dist3(x, xp);
370 if d > 1e-15 {
371 lambda_sum += (d / self.epsilon).ln();
372 n_renorms += 1;
373 let scale = self.epsilon / d;
375 xp[0] = x[0] + (xp[0] - x[0]) * scale;
376 xp[1] = x[1] + (xp[1] - x[1]) * scale;
377 xp[2] = x[2] + (xp[2] - x[2]) * scale;
378 }
379 }
380 }
381
382 if n_renorms == 0 {
383 return 0.0;
384 }
385 lambda_sum / (n_renorms as f64 * self.renorm_interval as f64 * self.dt)
386 }
387}
388
389#[derive(Debug, Clone)]
395pub struct LyapunovSpectrum {
396 pub dt: f64,
398 pub steps_per_qr: usize,
400 pub n_qr: usize,
402}
403
404impl LyapunovSpectrum {
405 pub fn new(dt: f64, steps_per_qr: usize, n_qr: usize) -> Self {
407 Self {
408 dt,
409 steps_per_qr,
410 n_qr,
411 }
412 }
413
414 pub fn compute_lorenz(&self, sigma: f64, rho: f64, beta: f64, initial: [f64; 3]) -> [f64; 3] {
416 let sys = LorenzSystem::new(sigma, rho, beta);
417 self.compute(|s| sys.deriv(s), |s| sys.jacobian(s), initial)
418 }
419
420 pub fn compute<F, J>(&self, f: F, jac: J, initial: [f64; 3]) -> [f64; 3]
424 where
425 F: Fn([f64; 3]) -> [f64; 3],
426 J: Fn([f64; 3]) -> [[f64; 3]; 3],
427 {
428 let mut x = initial;
429 let mut q: [[f64; 3]; 3] = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]];
431 let mut lambda = [0.0f64; 3];
432
433 for _ in 0..self.n_qr {
434 for _ in 0..self.steps_per_qr {
436 let j = jac(x);
437 let mut q_new = [[0.0f64; 3]; 3];
439 for col in 0..3 {
440 for row in 0..3 {
441 let mut s = 0.0;
442 for k in 0..3 {
443 s += j[row][k] * q[col][k];
444 }
445 q_new[col][row] = q[col][row] + self.dt * s;
446 }
447 }
448 x = rk4_step(&f, x, self.dt);
449 q = q_new;
450 }
451 let (q_new, r_diag) = gram_schmidt_qr(q);
453 q = q_new;
454 for i in 0..3 {
455 lambda[i] += r_diag[i].abs().ln();
456 }
457 }
458
459 let total_time = self.n_qr as f64 * self.steps_per_qr as f64 * self.dt;
460 [
461 lambda[0] / total_time,
462 lambda[1] / total_time,
463 lambda[2] / total_time,
464 ]
465 }
466}
467
468#[derive(Debug, Clone)]
472pub struct PoincareSection {
473 pub z_section: f64,
475 pub upward_only: bool,
477}
478
479impl PoincareSection {
480 pub fn new(z_section: f64, upward_only: bool) -> Self {
482 Self {
483 z_section,
484 upward_only,
485 }
486 }
487
488 pub fn extract(&self, traj: &[[f64; 3]]) -> Vec<[f64; 2]> {
492 let mut crossings = Vec::new();
493 for i in 1..traj.len() {
494 let prev = traj[i - 1];
495 let curr = traj[i];
496 let dz_prev = prev[2] - self.z_section;
497 let dz_curr = curr[2] - self.z_section;
498 if dz_prev * dz_curr < 0.0 {
500 let upward = dz_curr > dz_prev;
501 if !self.upward_only || upward {
502 let t = dz_prev.abs() / (dz_prev.abs() + dz_curr.abs());
504 let x = prev[0] + t * (curr[0] - prev[0]);
505 let y = prev[1] + t * (curr[1] - prev[1]);
506 crossings.push([x, y]);
507 }
508 }
509 }
510 crossings
511 }
512}
513
514#[derive(Debug, Clone)]
520pub struct CorrelationDimension {
521 pub n_radii: usize,
523 pub r_min: f64,
525 pub r_max: f64,
527}
528
529impl CorrelationDimension {
530 pub fn new(n_radii: usize, r_min: f64, r_max: f64) -> Self {
532 Self {
533 n_radii,
534 r_min,
535 r_max,
536 }
537 }
538
539 pub fn correlation_integral(&self, data: &[[f64; 3]]) -> Vec<[f64; 2]> {
543 let n = data.len();
544 if n < 2 {
545 return vec![];
546 }
547
548 let log_r_min = self.r_min.ln();
549 let log_r_max = self.r_max.ln();
550 let step = (log_r_max - log_r_min) / (self.n_radii as f64 - 1.0);
551
552 let radii: Vec<f64> = (0..self.n_radii)
553 .map(|i| (log_r_min + i as f64 * step).exp())
554 .collect();
555
556 let n_pairs = (n * (n - 1)) as f64;
557 radii
558 .iter()
559 .map(|&r| {
560 let count = data
561 .iter()
562 .enumerate()
563 .flat_map(|(i, a)| data[i + 1..].iter().map(move |b| dist3(*a, *b)))
564 .filter(|&d| d < r)
565 .count();
566 let c = 2.0 * count as f64 / n_pairs;
567 [r.ln(), if c > 0.0 { c.ln() } else { f64::NEG_INFINITY }]
568 })
569 .collect()
570 }
571
572 pub fn estimate_dimension(&self, data: &[[f64; 3]]) -> f64 {
574 let pairs = self.correlation_integral(data);
575 let valid: Vec<[f64; 2]> = pairs.into_iter().filter(|p| p[1].is_finite()).collect();
576 if valid.len() < 2 {
577 return 0.0;
578 }
579 linear_regression_slope(&valid)
580 }
581}
582
583#[derive(Debug, Clone)]
589pub struct BoxCounting {
590 pub box_sizes: Vec<f64>,
592}
593
594impl BoxCounting {
595 pub fn new(n_sizes: usize, size_min: f64, size_max: f64) -> Self {
597 let log_min = size_min.ln();
598 let log_max = size_max.ln();
599 let step = (log_max - log_min) / (n_sizes as f64 - 1.0);
600 let box_sizes = (0..n_sizes)
601 .map(|i| (log_max - i as f64 * step).exp())
602 .collect();
603 Self { box_sizes }
604 }
605
606 pub fn count_boxes(&self, data: &[[f64; 3]], eps: f64) -> usize {
608 use std::collections::HashSet;
609 let mut occupied: HashSet<(i64, i64, i64)> = HashSet::new();
610 for &[x, y, z] in data {
611 let bx = (x / eps).floor() as i64;
612 let by = (y / eps).floor() as i64;
613 let bz = (z / eps).floor() as i64;
614 occupied.insert((bx, by, bz));
615 }
616 occupied.len()
617 }
618
619 pub fn estimate_dimension(&self, data: &[[f64; 3]]) -> f64 {
621 let pairs: Vec<[f64; 2]> = self
622 .box_sizes
623 .iter()
624 .map(|&eps| {
625 let n = self.count_boxes(data, eps) as f64;
626 [(1.0 / eps).ln(), if n > 0.0 { n.ln() } else { 0.0 }]
627 })
628 .collect();
629 linear_regression_slope(&pairs)
630 }
631}
632
633#[derive(Debug, Clone)]
637pub struct RecurrencePlot {
638 pub epsilon: f64,
640}
641
642impl RecurrencePlot {
643 pub fn new(epsilon: f64) -> Self {
645 Self { epsilon }
646 }
647
648 pub fn build(&self, data: &[[f64; 3]]) -> Vec<bool> {
652 let n = data.len();
653 let mut mat = vec![false; n * n];
654 for i in 0..n {
655 for j in 0..n {
656 mat[i * n + j] = dist3(data[i], data[j]) < self.epsilon;
657 }
658 }
659 mat
660 }
661
662 pub fn recurrence_rate(&self, data: &[[f64; 3]]) -> f64 {
664 let n = data.len();
665 if n == 0 {
666 return 0.0;
667 }
668 let mat = self.build(data);
669 let count = mat.iter().filter(|&&b| b).count();
670 count as f64 / (n * n) as f64
671 }
672
673 pub fn determinism(&self, data: &[[f64; 3]], min_line: usize) -> f64 {
675 let n = data.len();
676 if n == 0 {
677 return 0.0;
678 }
679 let mat = self.build(data);
680 let total_rec: usize = mat.iter().filter(|&&b| b).count();
681 if total_rec == 0 {
682 return 0.0;
683 }
684
685 let mut in_diag = 0usize;
686 for diag in (-(n as isize) + 1)..(n as isize) {
687 let mut run = 0usize;
688 let i_start = (-diag).max(0) as usize;
689 let j_start = diag.max(0) as usize;
690 let len = n.saturating_sub(i_start.max(j_start));
691 for k in 0..len {
692 let i = i_start + k;
693 let j = j_start + k;
694 if mat[i * n + j] {
695 run += 1;
696 } else {
697 if run >= min_line {
698 in_diag += run;
699 }
700 run = 0;
701 }
702 }
703 if run >= min_line {
704 in_diag += run;
705 }
706 }
707 in_diag as f64 / total_rec as f64
708 }
709}
710
711#[derive(Debug, Clone)]
717pub struct MutualInformation {
718 pub n_bins: usize,
720}
721
722impl MutualInformation {
723 pub fn new(n_bins: usize) -> Self {
725 Self { n_bins }
726 }
727
728 pub fn compute(&self, data: &[f64], tau: usize) -> f64 {
730 if tau >= data.len() {
731 return 0.0;
732 }
733 let n = data.len() - tau;
734 let min_val = data.iter().cloned().fold(f64::INFINITY, f64::min);
735 let max_val = data.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
736 let range = (max_val - min_val).max(1e-15);
737 let nb = self.n_bins;
738
739 let mut joint = vec![vec![0usize; nb]; nb];
741 let mut marg_x = vec![0usize; nb];
742 let mut marg_y = vec![0usize; nb];
743
744 for i in 0..n {
745 let bx = ((data[i] - min_val) / range * nb as f64).floor() as usize;
746 let by = ((data[i + tau] - min_val) / range * nb as f64).floor() as usize;
747 let bx = bx.min(nb - 1);
748 let by = by.min(nb - 1);
749 joint[bx][by] += 1;
750 marg_x[bx] += 1;
751 marg_y[by] += 1;
752 }
753
754 let mut mi = 0.0f64;
755 for bx in 0..nb {
756 for by in 0..nb {
757 let pxy = joint[bx][by] as f64 / n as f64;
758 let px = marg_x[bx] as f64 / n as f64;
759 let py = marg_y[by] as f64 / n as f64;
760 if pxy > 0.0 && px > 0.0 && py > 0.0 {
761 mi += pxy * (pxy / (px * py)).ln();
762 }
763 }
764 }
765 mi
766 }
767
768 pub fn ami_curve(&self, data: &[f64], max_tau: usize) -> Vec<(usize, f64)> {
770 (1..=max_tau)
771 .map(|tau| (tau, self.compute(data, tau)))
772 .collect()
773 }
774
775 pub fn optimal_lag(&self, data: &[f64], max_tau: usize) -> usize {
777 let curve = self.ami_curve(data, max_tau);
778 for i in 1..curve.len().saturating_sub(1) {
779 if curve[i].1 < curve[i - 1].1 && curve[i].1 < curve[i + 1].1 {
780 return curve[i].0;
781 }
782 }
783 1
784 }
785}
786
787#[derive(Debug, Clone)]
793pub struct EmbeddingDimension {
794 pub tau: usize,
796 pub r_tol: f64,
798 pub a_tol: f64,
800}
801
802impl EmbeddingDimension {
803 pub fn new(tau: usize, r_tol: f64, a_tol: f64) -> Self {
805 Self { tau, r_tol, a_tol }
806 }
807
808 pub fn default_params() -> Self {
810 Self::new(1, 15.0, 2.0)
811 }
812
813 pub fn embed(&self, data: &[f64], d: usize) -> Vec<Vec<f64>> {
815 if data.len() < d * self.tau {
816 return vec![];
817 }
818 let n = data.len() - (d - 1) * self.tau;
819 (0..n)
820 .map(|i| (0..d).map(|k| data[i + k * self.tau]).collect())
821 .collect()
822 }
823
824 pub fn fnn_fraction(&self, data: &[f64], d: usize) -> f64 {
826 let embedded_d = self.embed(data, d);
827 let embedded_d1 = self.embed(data, d + 1);
828 let n = embedded_d.len().min(embedded_d1.len());
829 if n < 2 {
830 return 0.0;
831 }
832 let mean = data.iter().sum::<f64>() / data.len() as f64;
834 let std = (data.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / data.len() as f64)
835 .sqrt()
836 .max(1e-15);
837
838 let mut n_fnn = 0usize;
839 let mut n_total = 0usize;
840 for i in 0..n {
841 let mut nn_dist = f64::INFINITY;
843 let mut nn_idx = 0usize;
844 for j in 0..n {
845 if i == j {
846 continue;
847 }
848 let dist = embedded_d[i]
849 .iter()
850 .zip(&embedded_d[j])
851 .map(|(a, b)| (a - b).powi(2))
852 .sum::<f64>()
853 .sqrt();
854 if dist < nn_dist {
855 nn_dist = dist;
856 nn_idx = j;
857 }
858 }
859 if nn_dist < 1e-15 {
860 continue;
861 }
862 let new_component = (embedded_d1[i][d] - embedded_d1[nn_idx][d]).abs();
864 let dist_d1 = (nn_dist * nn_dist + new_component * new_component).sqrt();
865
866 n_total += 1;
867 if new_component / nn_dist > self.r_tol || dist_d1 / std > self.a_tol {
868 n_fnn += 1;
869 }
870 }
871 if n_total == 0 {
872 0.0
873 } else {
874 n_fnn as f64 / n_total as f64
875 }
876 }
877
878 pub fn estimate(&self, data: &[f64], max_d: usize, threshold: f64) -> usize {
880 for d in 1..=max_d {
881 if self.fnn_fraction(data, d) < threshold {
882 return d;
883 }
884 }
885 max_d
886 }
887}
888
889#[derive(Debug, Clone)]
895pub struct FeigenbaumConstant;
896
897impl FeigenbaumConstant {
898 pub const DELTA: f64 = 4.669_201_609_102_99;
900
901 pub fn bifurcation_point(period: usize, x0: f64, transient: usize, n_check: usize) -> f64 {
905 let mut r_lo = 2.5f64;
907 let mut r_hi = 4.0f64;
908 for _ in 0..60 {
909 let r_mid = 0.5 * (r_lo + r_hi);
910 let lm = LogisticMap::new(r_mid);
911 let attr = lm.attractor(x0, transient, n_check);
912 let detected = detect_period(&attr, 0.001);
913 if detected >= period {
914 r_hi = r_mid;
915 } else {
916 r_lo = r_mid;
917 }
918 }
919 0.5 * (r_lo + r_hi)
920 }
921
922 pub fn estimate_delta(x0: f64) -> f64 {
924 let r1 = Self::bifurcation_point(2, x0, 1000, 256);
925 let r2 = Self::bifurcation_point(4, x0, 1000, 512);
926 let r3 = Self::bifurcation_point(8, x0, 1000, 1024);
927 (r2 - r1) / (r3 - r2).max(1e-12)
928 }
929}
930
931#[derive(Debug, Clone)]
937pub struct FtleField {
938 pub nx: usize,
940 pub ny: usize,
942 pub t_integration: f64,
944 pub dt: f64,
946}
947
948impl FtleField {
949 pub fn new(nx: usize, ny: usize, t_integration: f64, dt: f64) -> Self {
951 Self {
952 nx,
953 ny,
954 t_integration,
955 dt,
956 }
957 }
958
959 pub fn compute_lorenz(
963 &self,
964 x_range: (f64, f64),
965 y_range: (f64, f64),
966 z_fixed: f64,
967 sigma: f64,
968 rho: f64,
969 beta: f64,
970 ) -> Vec<Vec<f64>> {
971 let lorenz = LorenzSystem::new(sigma, rho, beta);
972 let steps = (self.t_integration / self.dt).abs() as usize;
973 let dt = if self.t_integration > 0.0 {
974 self.dt
975 } else {
976 -self.dt
977 };
978 let dx = (x_range.1 - x_range.0) / (self.nx as f64 - 1.0);
979 let dy = (y_range.1 - y_range.0) / (self.ny as f64 - 1.0);
980 let eps = dx.min(dy) * 0.01;
981
982 let mut ftle = vec![vec![0.0f64; self.ny]; self.nx];
983 for ix in 0..self.nx {
984 for iy in 0..self.ny {
985 let x0 = x_range.0 + ix as f64 * dx;
986 let y0 = y_range.0 + iy as f64 * dy;
987 let p0 = [x0, y0, z_fixed];
988 let px = [x0 + eps, y0, z_fixed];
989 let py = [x0, y0 + eps, z_fixed];
990
991 let mut p0f = p0;
992 let mut pxf = px;
993 let mut pyf = py;
994 for _ in 0..steps {
995 p0f = rk4_step(|s| lorenz.deriv(s), p0f, dt);
996 pxf = rk4_step(|s| lorenz.deriv(s), pxf, dt);
997 pyf = rk4_step(|s| lorenz.deriv(s), pyf, dt);
998 }
999
1000 let dfdx = [(pxf[0] - p0f[0]) / eps, (pxf[1] - p0f[1]) / eps];
1002 let dfdy = [(pyf[0] - p0f[0]) / eps, (pyf[1] - p0f[1]) / eps];
1003 let c11 = dfdx[0] * dfdx[0] + dfdx[1] * dfdx[1];
1005 let c12 = dfdx[0] * dfdy[0] + dfdx[1] * dfdy[1];
1006 let c22 = dfdy[0] * dfdy[0] + dfdy[1] * dfdy[1];
1007 let tr = c11 + c22;
1009 let det = c11 * c22 - c12 * c12;
1010 let disc = (0.25 * tr * tr - det).max(0.0);
1011 let lambda_max = 0.5 * tr + disc.sqrt();
1012 ftle[ix][iy] = if lambda_max > 1.0 {
1013 lambda_max.ln() / (2.0 * self.t_integration.abs())
1014 } else {
1015 0.0
1016 };
1017 }
1018 }
1019 ftle
1020 }
1021}
1022
1023#[derive(Debug, Clone)]
1027pub struct PeriodDoubling;
1028
1029impl PeriodDoubling {
1030 pub fn detect_period(series: &[f64], tol: f64, max_period: usize) -> usize {
1034 detect_period(series, tol).min(max_period)
1035 }
1036
1037 pub fn period_diagram(
1039 r_values: &[f64],
1040 x0: f64,
1041 transient: usize,
1042 check_len: usize,
1043 ) -> Vec<(f64, usize)> {
1044 r_values
1045 .iter()
1046 .map(|&r| {
1047 let lm = LogisticMap::new(r);
1048 let attr = lm.attractor(x0, transient, check_len);
1049 let p = detect_period(&attr, 0.001);
1050 (r, p)
1051 })
1052 .collect()
1053 }
1054
1055 pub fn count_doublings(r_max: f64, x0: f64) -> usize {
1057 let mut count = 0usize;
1058 let mut period = 1usize;
1059 for i in 1..=6 {
1060 let target = 1usize << i;
1061 let r = FeigenbaumConstant::bifurcation_point(target, x0, 500, 512);
1062 if r < r_max {
1063 count += 1;
1064 period = target;
1065 }
1066 }
1067 let _ = period;
1068 count
1069 }
1070}
1071
1072#[derive(Debug, Clone)]
1076pub struct ChaosAnalysis {
1077 pub tau: usize,
1079 pub dim: usize,
1081}
1082
1083impl ChaosAnalysis {
1084 pub fn new(tau: usize, dim: usize) -> Self {
1086 Self { tau, dim }
1087 }
1088
1089 pub fn largest_lyapunov(&self, data: &[f64], evolve_steps: usize) -> f64 {
1094 let emb = EmbeddingDimension::new(self.tau, 15.0, 2.0);
1095 let vecs = emb.embed(data, self.dim);
1096 let n = vecs.len();
1097 if n < 2 {
1098 return 0.0;
1099 }
1100
1101 let min_sep = self.dim * self.tau + 1;
1102 let mut sum = 0.0f64;
1103 let mut count = 0usize;
1104
1105 for i in 0..n {
1106 let mut nn_dist = f64::INFINITY;
1107 let mut nn_j = 0usize;
1108 for j in 0..n {
1109 if (i as isize - j as isize).unsigned_abs() < min_sep {
1110 continue;
1111 }
1112 let d: f64 = vecs[i]
1113 .iter()
1114 .zip(&vecs[j])
1115 .map(|(a, b)| (a - b).powi(2))
1116 .sum::<f64>()
1117 .sqrt();
1118 if d < nn_dist {
1119 nn_dist = d;
1120 nn_j = j;
1121 }
1122 }
1123 if nn_dist < 1e-15 {
1124 continue;
1125 }
1126
1127 let end_i = (i + evolve_steps).min(n - 1);
1128 let end_j = (nn_j + evolve_steps).min(n - 1);
1129 if end_i == i || end_j == nn_j {
1130 continue;
1131 }
1132
1133 let d_evolved: f64 = vecs[end_i]
1134 .iter()
1135 .zip(&vecs[end_j])
1136 .map(|(a, b)| (a - b).powi(2))
1137 .sum::<f64>()
1138 .sqrt();
1139 if d_evolved > 1e-15 {
1140 sum += (d_evolved / nn_dist).ln();
1141 count += 1;
1142 }
1143 }
1144
1145 if count == 0 {
1146 0.0
1147 } else {
1148 sum / (count as f64 * evolve_steps as f64)
1149 }
1150 }
1151
1152 pub fn correlation_dimension(&self, data: &[f64]) -> f64 {
1154 let emb = EmbeddingDimension::new(self.tau, 15.0, 2.0);
1155 let vecs = emb.embed(data, self.dim);
1156 if vecs.len() < 10 {
1157 return 0.0;
1158 }
1159 let data3: Vec<[f64; 3]> = vecs
1161 .iter()
1162 .map(|v| {
1163 let x = v.first().copied().unwrap_or(0.0);
1164 let y = v.get(1).copied().unwrap_or(0.0);
1165 let z = v.get(2).copied().unwrap_or(0.0);
1166 [x, y, z]
1167 })
1168 .collect();
1169 let cd = CorrelationDimension::new(20, 0.01, 5.0);
1170 cd.estimate_dimension(&data3)
1171 }
1172
1173 pub fn summary(&self, data: &[f64]) -> String {
1175 let lambda = self.largest_lyapunov(data, 5);
1176 let d_corr = self.correlation_dimension(data);
1177 format!(
1178 "Largest Lyapunov: {:.6}, Correlation dim: {:.6}",
1179 lambda, d_corr
1180 )
1181 }
1182}
1183
1184fn rk4_step<F>(f: F, state: [f64; 3], dt: f64) -> [f64; 3]
1188where
1189 F: Fn([f64; 3]) -> [f64; 3],
1190{
1191 let k1 = f(state);
1192 let s2 = add3(state, scale3(k1, 0.5 * dt));
1193 let k2 = f(s2);
1194 let s3 = add3(state, scale3(k2, 0.5 * dt));
1195 let k3 = f(s3);
1196 let s4 = add3(state, scale3(k3, dt));
1197 let k4 = f(s4);
1198 [
1199 state[0] + dt / 6.0 * (k1[0] + 2.0 * k2[0] + 2.0 * k3[0] + k4[0]),
1200 state[1] + dt / 6.0 * (k1[1] + 2.0 * k2[1] + 2.0 * k3[1] + k4[1]),
1201 state[2] + dt / 6.0 * (k1[2] + 2.0 * k2[2] + 2.0 * k3[2] + k4[2]),
1202 ]
1203}
1204
1205fn dist3(a: [f64; 3], b: [f64; 3]) -> f64 {
1207 ((a[0] - b[0]).powi(2) + (a[1] - b[1]).powi(2) + (a[2] - b[2]).powi(2)).sqrt()
1208}
1209
1210fn add3(a: [f64; 3], b: [f64; 3]) -> [f64; 3] {
1212 [a[0] + b[0], a[1] + b[1], a[2] + b[2]]
1213}
1214
1215fn scale3(v: [f64; 3], s: f64) -> [f64; 3] {
1217 [v[0] * s, v[1] * s, v[2] * s]
1218}
1219
1220fn gram_schmidt_qr(cols: [[f64; 3]; 3]) -> ([[f64; 3]; 3], [f64; 3]) {
1224 let mut q = [[0.0f64; 3]; 3];
1225 let mut r_diag = [0.0f64; 3];
1226
1227 for j in 0..3 {
1228 let mut v = cols[j];
1229 for k in 0..j {
1231 let proj = dot3(v, q[k]);
1232 for i in 0..3 {
1233 v[i] -= proj * q[k][i];
1234 }
1235 }
1236 let norm = dot3(v, v).sqrt();
1237 r_diag[j] = norm;
1238 if norm > 1e-15 {
1239 for i in 0..3 {
1240 q[j][i] = v[i] / norm;
1241 }
1242 } else {
1243 q[j] = [0.0; 3];
1244 q[j][j] = 1.0;
1245 }
1246 }
1247 (q, r_diag)
1248}
1249
1250fn dot3(a: [f64; 3], b: [f64; 3]) -> f64 {
1252 a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
1253}
1254
1255fn linear_regression_slope(pairs: &[[f64; 2]]) -> f64 {
1257 let n = pairs.len() as f64;
1258 if n < 2.0 {
1259 return 0.0;
1260 }
1261 let sum_x: f64 = pairs.iter().map(|p| p[0]).sum();
1262 let sum_y: f64 = pairs.iter().map(|p| p[1]).sum();
1263 let sum_xx: f64 = pairs.iter().map(|p| p[0] * p[0]).sum();
1264 let sum_xy: f64 = pairs.iter().map(|p| p[0] * p[1]).sum();
1265 let denom = n * sum_xx - sum_x * sum_x;
1266 if denom.abs() < 1e-15 {
1267 return 0.0;
1268 }
1269 (n * sum_xy - sum_x * sum_y) / denom
1270}
1271
1272fn detect_period(series: &[f64], tol: f64) -> usize {
1274 let n = series.len();
1275 if n < 4 {
1276 return 1;
1277 }
1278 for p in 1..=(n / 2) {
1279 let is_periodic = series[..n - p]
1280 .iter()
1281 .zip(&series[p..])
1282 .all(|(a, b)| (a - b).abs() < tol);
1283 if is_periodic {
1284 return p;
1285 }
1286 }
1287 n }
1289
1290#[cfg(test)]
1293mod tests {
1294 use super::*;
1295
1296 #[test]
1299 fn test_lorenz_deriv_at_origin() {
1300 let sys = LorenzSystem::classic();
1301 let d = sys.deriv([0.0, 0.0, 0.0]);
1302 assert!(d[0].abs() < 1e-12, "dx/dt at origin should be 0");
1303 assert!(d[1].abs() < 1e-12, "dy/dt at origin should be 0");
1304 assert!(d[2].abs() < 1e-12, "dz/dt at origin should be 0");
1305 }
1306
1307 #[test]
1308 fn test_lorenz_integrate_length() {
1309 let sys = LorenzSystem::classic();
1310 let traj = sys.integrate([1.0, 1.0, 1.0], 0.01, 100);
1311 assert_eq!(traj.len(), 101);
1312 }
1313
1314 #[test]
1315 fn test_lorenz_fixed_points_symmetric() {
1316 let sys = LorenzSystem::classic();
1317 let fps = sys.fixed_points();
1318 assert!(fps[0].iter().all(|&v| v.abs() < 1e-12));
1320 assert!((fps[1][0] + fps[2][0]).abs() < 1e-12);
1322 assert!((fps[1][1] + fps[2][1]).abs() < 1e-12);
1323 assert!((fps[1][2] - fps[2][2]).abs() < 1e-12);
1324 }
1325
1326 #[test]
1327 fn test_lorenz_fixed_point_is_fixed() {
1328 let sys = LorenzSystem::classic();
1329 let fps = sys.fixed_points();
1330 for fp in fps.iter().skip(1) {
1331 let d = sys.deriv(*fp);
1332 let mag = (d[0].powi(2) + d[1].powi(2) + d[2].powi(2)).sqrt();
1333 assert!(
1334 mag < 1e-10,
1335 "Fixed point derivative should be near zero, got {mag}"
1336 );
1337 }
1338 }
1339
1340 #[test]
1341 fn test_lorenz_sensitivity() {
1342 let sys = LorenzSystem::classic();
1343 let t1 = sys.integrate([1.0, 1.0, 1.0], 0.01, 2000);
1345 let t2 = sys.integrate([1.0 + 1e-3, 1.0, 1.0], 0.01, 2000);
1346 let end1 = t1.last().unwrap();
1349 let end2 = t2.last().unwrap();
1350 let sep = dist3(*end1, *end2);
1351 assert!(
1352 sep > 1e-6,
1353 "Lorenz should show sensitivity, separation={sep}"
1354 );
1355 }
1356
1357 #[test]
1360 fn test_rossler_integrate_no_nan() {
1361 let sys = RosslerSystem::classic();
1362 let traj = sys.integrate([1.0, 0.0, 0.0], 0.01, 200);
1363 for pt in &traj {
1364 assert!(
1365 pt.iter().all(|v| v.is_finite()),
1366 "Rössler trajectory must be finite"
1367 );
1368 }
1369 }
1370
1371 #[test]
1372 fn test_rossler_period_positive() {
1373 let sys = RosslerSystem::classic();
1374 assert!(sys.approximate_period() > 0.0);
1375 }
1376
1377 #[test]
1380 fn test_logistic_fixed_point_r3() {
1381 let lm = LogisticMap::new(3.0);
1383 let attr = lm.attractor(0.5, 1000, 10);
1384 for &v in &attr {
1385 assert!(
1386 (v - 2.0 / 3.0).abs() < 0.01,
1387 "r=3 should approach fixed point, got {v}"
1388 );
1389 }
1390 }
1391
1392 #[test]
1393 fn test_logistic_period2_r35() {
1394 let lm = LogisticMap::new(3.5);
1396 let attr = lm.attractor(0.5, 2000, 32);
1397 let p = detect_period(&attr, 0.001);
1398 assert!(p >= 2, "r=3.5 should have period ≥ 2, got {p}");
1399 }
1400
1401 #[test]
1402 fn test_logistic_lyapunov_chaotic() {
1403 let lm = LogisticMap::new(3.9);
1405 let lambda = lm.lyapunov_exponent(0.5, 10000);
1406 assert!(lambda > 0.0, "r=3.9 should be chaotic, λ={lambda}");
1407 }
1408
1409 #[test]
1410 fn test_logistic_lyapunov_stable() {
1411 let lm = LogisticMap::new(2.5);
1413 let lambda = lm.lyapunov_exponent(0.3, 5000);
1414 assert!(lambda <= 0.0, "r=2.5 should be stable (λ≤0), λ={lambda}");
1415 }
1416
1417 #[test]
1420 fn test_duffing_integrate_length() {
1421 let duff = DuffingOscillator::classic_chaotic();
1422 let traj = duff.integrate([1.0, 0.0], 0.0, 0.01, 100);
1423 assert_eq!(traj.len(), 101);
1424 }
1425
1426 #[test]
1427 fn test_duffing_natural_freq_double_well() {
1428 let duff = DuffingOscillator::new(0.0, -1.0, 1.0, 0.0, 1.0);
1429 let freq = duff.natural_frequency();
1430 assert!(freq.is_some(), "Double-well should have natural frequency");
1431 assert!((freq.unwrap() - 2.0_f64.sqrt()).abs() < 0.01);
1432 }
1433
1434 #[test]
1435 fn test_duffing_no_nan() {
1436 let duff = DuffingOscillator::classic_chaotic();
1437 let traj = duff.integrate([0.5, 0.0], 0.0, 0.01, 500);
1438 for pt in &traj {
1439 assert!(
1440 pt.iter().all(|v| v.is_finite()),
1441 "Duffing trajectory must be finite"
1442 );
1443 }
1444 }
1445
1446 #[test]
1449 fn test_lyapunov_lorenz_positive() {
1450 let est = LyapunovExponent::new(20_000, 0.01, 10, 1e-8);
1451 let sys = LorenzSystem::classic();
1452 let lambda = est.estimate(|s| sys.deriv(s), [0.1, 0.1, 0.1]);
1453 assert!(
1454 lambda > 0.0,
1455 "Lorenz largest Lyapunov should be positive, got {lambda}"
1456 );
1457 }
1458
1459 #[test]
1462 fn test_lyapunov_spectrum_sum_negative() {
1463 let spec = LyapunovSpectrum::new(0.01, 5, 200);
1465 let exps = spec.compute_lorenz(10.0, 28.0, 8.0 / 3.0, [1.0, 1.0, 1.0]);
1466 let sum = exps[0] + exps[1] + exps[2];
1467 assert!(
1469 sum < 0.0,
1470 "Lyapunov spectrum sum should be negative (volume contraction), got {sum}"
1471 );
1472 }
1473
1474 #[test]
1477 fn test_poincare_section_count() {
1478 let sys = LorenzSystem::classic();
1479 let traj = sys.integrate([1.0, 1.0, 20.0], 0.01, 5000);
1480 let poincare = PoincareSection::new(27.0, true);
1481 let crossings = poincare.extract(&traj);
1482 assert!(
1483 !crossings.is_empty(),
1484 "Lorenz should have Poincaré crossings"
1485 );
1486 }
1487
1488 #[test]
1489 fn test_poincare_section_upward_only() {
1490 let sys = LorenzSystem::classic();
1491 let traj = sys.integrate([1.0, 1.0, 25.0], 0.01, 3000);
1492 let all = PoincareSection::new(27.0, false).extract(&traj);
1493 let up_only = PoincareSection::new(27.0, true).extract(&traj);
1494 assert!(
1495 up_only.len() <= all.len(),
1496 "Upward-only should have ≤ crossings than all"
1497 );
1498 }
1499
1500 #[test]
1503 fn test_correlation_dimension_line() {
1504 let data: Vec<[f64; 3]> = (0..100).map(|i| [i as f64 * 0.1, 0.0, 0.0]).collect();
1506 let cd = CorrelationDimension::new(10, 0.1, 5.0);
1507 let dim = cd.estimate_dimension(&data);
1508 assert!(dim < 2.0, "Line should have dimension < 2, got {dim}");
1509 }
1510
1511 #[test]
1514 fn test_box_counting_cube() {
1515 let mut data = Vec::new();
1517 for i in 0..5 {
1518 for j in 0..5 {
1519 for k in 0..5 {
1520 data.push([i as f64, j as f64, k as f64]);
1521 }
1522 }
1523 }
1524 let bc = BoxCounting::new(5, 0.5, 4.0);
1525 let dim = bc.estimate_dimension(&data);
1526 assert!(
1527 dim > 1.5 && dim < 4.0,
1528 "Cube should have dimension ≈ 3, got {dim}"
1529 );
1530 }
1531
1532 #[test]
1535 fn test_recurrence_rate_between_0_and_1() {
1536 let sys = LorenzSystem::classic();
1537 let traj = sys.integrate([1.0, 1.0, 1.0], 0.01, 99);
1538 let rp = RecurrencePlot::new(5.0);
1539 let rr = rp.recurrence_rate(&traj);
1540 assert!(
1541 (0.0..=1.0).contains(&rr),
1542 "Recurrence rate should be in [0,1], got {rr}"
1543 );
1544 }
1545
1546 #[test]
1547 fn test_recurrence_diagonal_is_always_1() {
1548 let data: Vec<[f64; 3]> = (0..10).map(|i| [i as f64, 0.0, 0.0]).collect();
1549 let rp = RecurrencePlot::new(0.5);
1550 let mat = rp.build(&data);
1551 let n = data.len();
1552 for i in 0..n {
1553 assert!(mat[i * n + i], "Diagonal should always be true");
1554 }
1555 }
1556
1557 #[test]
1560 fn test_mutual_information_zero_lag_max() {
1561 let sys = LorenzSystem::classic();
1563 let traj = sys.integrate([1.0, 0.5, 1.5], 0.01, 300);
1564 let x_series: Vec<f64> = traj.iter().map(|pt| pt[0]).collect();
1565 let mi = MutualInformation::new(10);
1566 let ami1 = mi.compute(&x_series, 1);
1567 let ami_large = mi.compute(&x_series, 50);
1568 assert!(
1569 ami1 >= ami_large - 0.5,
1570 "Short-lag AMI should not be less than long-lag AMI by more than 0.5"
1571 );
1572 }
1573
1574 #[test]
1577 fn test_embedding_embed_shape() {
1578 let data: Vec<f64> = (0..20).map(|i| i as f64).collect();
1579 let emb = EmbeddingDimension::new(1, 15.0, 2.0);
1580 let vecs = emb.embed(&data, 3);
1581 assert_eq!(vecs.len(), 18);
1583 for v in &vecs {
1584 assert_eq!(v.len(), 3);
1585 }
1586 }
1587
1588 #[test]
1591 fn test_feigenbaum_delta_known_value() {
1592 assert!(
1594 (FeigenbaumConstant::DELTA - 4.6692).abs() < 0.001,
1595 "Feigenbaum delta should be ≈ 4.6692"
1596 );
1597 }
1598
1599 #[test]
1600 fn test_feigenbaum_bifurcation_r2() {
1601 let r = FeigenbaumConstant::bifurcation_point(2, 0.5, 1000, 256);
1603 assert!(
1604 (r - 3.0).abs() < 0.1,
1605 "Period-2 bifurcation should be near r=3.0, got {r}"
1606 );
1607 }
1608
1609 #[test]
1612 fn test_ftle_field_non_negative() {
1613 let ftle = FtleField::new(5, 5, 0.5, 0.05);
1614 let field = ftle.compute_lorenz((-5.0, 5.0), (-5.0, 5.0), 25.0, 10.0, 28.0, 8.0 / 3.0);
1615 for row in &field {
1616 for &v in row {
1617 assert!(v >= 0.0, "FTLE values should be non-negative, got {v}");
1618 }
1619 }
1620 }
1621
1622 #[test]
1625 fn test_period_detection_simple() {
1626 let series = vec![1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0];
1627 let p = PeriodDoubling::detect_period(&series, 0.001, 10);
1628 assert_eq!(p, 2, "Simple alternating series should have period 2");
1629 }
1630
1631 #[test]
1632 fn test_period_doubling_count() {
1633 let count = PeriodDoubling::count_doublings(3.6, 0.5);
1635 assert!(
1636 count >= 2,
1637 "Should see ≥ 2 doublings before r=3.6, got {count}"
1638 );
1639 }
1640
1641 #[test]
1644 fn test_chaos_analysis_summary_format() {
1645 let sys = LorenzSystem::classic();
1646 let traj = sys.integrate([1.0, 1.0, 1.0], 0.01, 200);
1647 let x_series: Vec<f64> = traj.iter().map(|pt| pt[0]).collect();
1648 let ca = ChaosAnalysis::new(1, 3);
1649 let s = ca.summary(&x_series);
1650 assert!(
1651 s.contains("Largest Lyapunov"),
1652 "Summary should contain Lyapunov info"
1653 );
1654 assert!(
1655 s.contains("Correlation dim"),
1656 "Summary should contain dimension info"
1657 );
1658 }
1659
1660 #[test]
1661 fn test_chaos_analysis_lorenz_positive_lyapunov() {
1662 let sys = LorenzSystem::classic();
1663 let traj = sys.integrate([0.1, 0.1, 0.1], 0.01, 1000);
1664 let x_series: Vec<f64> = traj.iter().map(|pt| pt[0]).collect();
1665 let ca = ChaosAnalysis::new(1, 3);
1666 let lambda = ca.largest_lyapunov(&x_series, 5);
1667 assert!(
1669 lambda > -1.0,
1670 "Chaos analysis Lyapunov should not be very negative, got {lambda}"
1671 );
1672 }
1673}