Skip to main content

oxiphysics_core/
chaos_theory.rs

1#![allow(clippy::needless_range_loop)]
2// Copyright 2026 COOLJAPAN OU (Team KitaSan)
3// SPDX-License-Identifier: Apache-2.0
4
5//! Chaos theory and nonlinear dynamics.
6//!
7//! This module provides tools for analysing chaotic dynamical systems, including:
8//!
9//! - [`LorenzSystem`]: The classic Lorenz attractor (σ, ρ, β parameters).
10//! - [`RosslerSystem`]: Rössler system with spiral chaos.
11//! - [`LogisticMap`]: Logistic map and period-doubling bifurcations.
12//! - [`DuffingOscillator`]: Periodically-forced Duffing oscillator.
13//! - [`LyapunovExponent`]: Largest Lyapunov exponent estimation.
14//! - [`LyapunovSpectrum`]: Full spectrum via QR-based algorithm.
15//! - [`FtleField`]: Finite-time Lyapunov exponents on a grid.
16//! - [`PoincareSection`]: Poincaré section sampler.
17//! - [`CorrelationDimension`]: Grassberger-Procaccia correlation dimension.
18//! - [`BoxCounting`]: Box-counting fractal dimension.
19//! - [`RecurrencePlot`]: Recurrence plot and recurrence quantification.
20//! - [`MutualInformation`]: Average mutual information for embedding.
21//! - [`EmbeddingDimension`]: False nearest neighbours (Taken's embedding).
22//! - [`FeigenbaumConstant`]: Numerical Feigenbaum δ from period-doubling.
23//! - [`ChaosAnalysis`]: Convenience wrapper combining several diagnostics.
24//!
25//! # References
26//! - Lorenz, E.N. (1963). Deterministic nonperiodic flow. *J. Atmos. Sci.*, 20, 130–141.
27//! - Rössler, O.E. (1976). An equation for continuous chaos. *Phys. Lett. A*, 57, 397–398.
28//! - Grassberger, P. & Procaccia, I. (1983). Characterization of strange attractors.
29//!   *Phys. Rev. Lett.*, 50, 346–349.
30//! - Takens, F. (1981). Detecting strange attractors in turbulence. *Lect. Notes Math.*, 898.
31//! - Feigenbaum, M.J. (1978). Quantitative universality for a class of nonlinear transformations.
32//!   *J. Stat. Phys.*, 19, 25–52.
33
34#![allow(dead_code)]
35
36// ─── Lorenz System ───────────────────────────────────────────────────────────
37
38/// Parameters and state for the Lorenz attractor.
39///
40/// The Lorenz equations are:
41/// ```text
42/// dx/dt = σ(y − x)
43/// dy/dt = x(ρ − z) − y
44/// dz/dt = xy − βz
45/// ```
46/// with the classic chaotic parameters σ = 10, ρ = 28, β = 8/3.
47#[derive(Debug, Clone)]
48pub struct LorenzSystem {
49    /// Prandtl number σ.
50    pub sigma: f64,
51    /// Rayleigh number ρ.
52    pub rho: f64,
53    /// Geometric factor β.
54    pub beta: f64,
55}
56
57impl LorenzSystem {
58    /// Creates a new Lorenz system with the given parameters.
59    pub fn new(sigma: f64, rho: f64, beta: f64) -> Self {
60        Self { sigma, rho, beta }
61    }
62
63    /// Creates the classic chaotic Lorenz system (σ=10, ρ=28, β=8/3).
64    pub fn classic() -> Self {
65        Self::new(10.0, 28.0, 8.0 / 3.0)
66    }
67
68    /// Computes the time derivative `[dx/dt, dy/dt, dz/dt]` at state `[x, y, z]`.
69    pub fn deriv(&self, state: [f64; 3]) -> [f64; 3] {
70        let [x, y, z] = state;
71        [
72            self.sigma * (y - x),
73            x * (self.rho - z) - y,
74            x * y - self.beta * z,
75        ]
76    }
77
78    /// Integrates using the 4th-order Runge-Kutta method for `steps` steps of size `dt`.
79    pub fn integrate(&self, initial: [f64; 3], dt: f64, steps: usize) -> Vec<[f64; 3]> {
80        let mut traj = Vec::with_capacity(steps + 1);
81        let mut s = initial;
82        traj.push(s);
83        for _ in 0..steps {
84            s = rk4_step(|st| self.deriv(st), s, dt);
85            traj.push(s);
86        }
87        traj
88    }
89
90    /// Returns the three fixed points of the Lorenz system (origin and two symmetric points).
91    pub fn fixed_points(&self) -> [[f64; 3]; 3] {
92        let c = (self.beta * (self.rho - 1.0)).sqrt();
93        [
94            [0.0, 0.0, 0.0],
95            [c, c, self.rho - 1.0],
96            [-c, -c, self.rho - 1.0],
97        ]
98    }
99
100    /// Computes the Jacobian matrix at state `[x, y, z]` as a row-major 3×3 array.
101    pub fn jacobian(&self, state: [f64; 3]) -> [[f64; 3]; 3] {
102        let [x, y, z] = state;
103        [
104            [-self.sigma, self.sigma, 0.0],
105            [self.rho - z, -1.0, -x],
106            [y, x, -self.beta],
107        ]
108    }
109}
110
111// ─── Rössler System ──────────────────────────────────────────────────────────
112
113/// Parameters and state for the Rössler system.
114///
115/// The Rössler equations are:
116/// ```text
117/// dx/dt = −y − z
118/// dy/dt = x + ay
119/// dz/dt = b + z(x − c)
120/// ```
121/// Typical chaotic parameters: a = 0.2, b = 0.2, c = 5.7.
122#[derive(Debug, Clone)]
123pub struct RosslerSystem {
124    /// Parameter a (spiral rate).
125    pub a: f64,
126    /// Parameter b (z-coupling).
127    pub b: f64,
128    /// Parameter c (nonlinear threshold).
129    pub c: f64,
130}
131
132impl RosslerSystem {
133    /// Creates a new Rössler system with the given parameters.
134    pub fn new(a: f64, b: f64, c: f64) -> Self {
135        Self { a, b, c }
136    }
137
138    /// Creates the classic chaotic Rössler system (a=0.2, b=0.2, c=5.7).
139    pub fn classic() -> Self {
140        Self::new(0.2, 0.2, 5.7)
141    }
142
143    /// Computes the time derivative `[dx/dt, dy/dt, dz/dt]`.
144    pub fn deriv(&self, state: [f64; 3]) -> [f64; 3] {
145        let [x, y, z] = state;
146        [-y - z, x + self.a * y, self.b + z * (x - self.c)]
147    }
148
149    /// Integrates using RK4 for `steps` steps of size `dt`.
150    pub fn integrate(&self, initial: [f64; 3], dt: f64, steps: usize) -> Vec<[f64; 3]> {
151        let mut traj = Vec::with_capacity(steps + 1);
152        let mut s = initial;
153        traj.push(s);
154        for _ in 0..steps {
155            s = rk4_step(|st| self.deriv(st), s, dt);
156            traj.push(s);
157        }
158        traj
159    }
160
161    /// Approximate period of the spiral for small-amplitude oscillations.
162    pub fn approximate_period(&self) -> f64 {
163        // Frequency ≈ 1 for small a.
164        std::f64::consts::TAU
165    }
166}
167
168// ─── Logistic Map ────────────────────────────────────────────────────────────
169
170/// The logistic map x_{n+1} = r·x_n·(1 − x_n).
171#[derive(Debug, Clone)]
172pub struct LogisticMap {
173    /// Growth parameter r ∈ \[0, 4\].
174    pub r: f64,
175}
176
177impl LogisticMap {
178    /// Creates a new logistic map with the given growth rate.
179    pub fn new(r: f64) -> Self {
180        Self { r }
181    }
182
183    /// Iterates the map for `n` steps starting from `x0`.
184    pub fn iterate(&self, x0: f64, n: usize) -> Vec<f64> {
185        let mut orbit = Vec::with_capacity(n + 1);
186        let mut x = x0;
187        orbit.push(x);
188        for _ in 0..n {
189            x = self.r * x * (1.0 - x);
190            orbit.push(x);
191        }
192        orbit
193    }
194
195    /// Returns the attractor (after discarding `transient` steps) of length `n`.
196    pub fn attractor(&self, x0: f64, transient: usize, n: usize) -> Vec<f64> {
197        let orbit = self.iterate(x0, transient + n);
198        orbit[transient..].to_vec()
199    }
200
201    /// Computes the Lyapunov exponent of the map.
202    pub fn lyapunov_exponent(&self, x0: f64, n: usize) -> f64 {
203        let mut x = x0;
204        let mut sum = 0.0;
205        for _ in 0..n {
206            let deriv = (self.r * (1.0 - 2.0 * x)).abs();
207            if deriv > 1e-15 {
208                sum += deriv.ln();
209            }
210            x = self.r * x * (1.0 - x);
211        }
212        sum / n as f64
213    }
214
215    /// Generates a bifurcation diagram: for each r in `r_values` returns (r, attractor_points).
216    pub fn bifurcation_diagram(
217        r_values: &[f64],
218        x0: f64,
219        transient: usize,
220        n_attractor: usize,
221    ) -> Vec<(f64, Vec<f64>)> {
222        r_values
223            .iter()
224            .map(|&r| {
225                let lm = LogisticMap::new(r);
226                let attr = lm.attractor(x0, transient, n_attractor);
227                (r, attr)
228            })
229            .collect()
230    }
231}
232
233// ─── Duffing Oscillator ──────────────────────────────────────────────────────
234
235/// Periodically-forced Duffing oscillator.
236///
237/// Equation of motion:
238/// ```text
239/// ẍ + δẋ + αx + βx³ = γ cos(ωt)
240/// ```
241/// State vector is `[x, ẋ, t]` (extended with time for the forcing phase).
242#[derive(Debug, Clone)]
243pub struct DuffingOscillator {
244    /// Linear damping coefficient δ.
245    pub delta: f64,
246    /// Linear stiffness α (can be negative for double-well).
247    pub alpha: f64,
248    /// Nonlinear stiffness β.
249    pub beta: f64,
250    /// Forcing amplitude γ.
251    pub gamma: f64,
252    /// Forcing frequency ω.
253    pub omega: f64,
254}
255
256impl DuffingOscillator {
257    /// Creates a new Duffing oscillator with the given parameters.
258    #[allow(clippy::too_many_arguments)]
259    pub fn new(delta: f64, alpha: f64, beta: f64, gamma: f64, omega: f64) -> Self {
260        Self {
261            delta,
262            alpha,
263            beta,
264            gamma,
265            omega,
266        }
267    }
268
269    /// Classic chaotic Duffing parameters (δ=0.3, α=−1, β=1, γ=0.37, ω=1.2).
270    pub fn classic_chaotic() -> Self {
271        Self::new(0.3, -1.0, 1.0, 0.37, 1.2)
272    }
273
274    /// Computes `[ẋ, ẍ]` given `[x, ẋ]` and time `t`.
275    pub fn deriv(&self, state: [f64; 2], t: f64) -> [f64; 2] {
276        let [x, v] = state;
277        let accel = -self.delta * v - self.alpha * x - self.beta * x.powi(3)
278            + self.gamma * (self.omega * t).cos();
279        [v, accel]
280    }
281
282    /// Integrates for `steps` steps of size `dt` starting from `initial` at `t0`.
283    pub fn integrate(&self, initial: [f64; 2], t0: f64, dt: f64, steps: usize) -> Vec<[f64; 3]> {
284        let mut result = Vec::with_capacity(steps + 1);
285        let mut state = initial;
286        let mut t = t0;
287        result.push([state[0], state[1], t]);
288        for _ in 0..steps {
289            let k1 = self.deriv(state, t);
290            let s2 = [state[0] + 0.5 * dt * k1[0], state[1] + 0.5 * dt * k1[1]];
291            let k2 = self.deriv(s2, t + 0.5 * dt);
292            let s3 = [state[0] + 0.5 * dt * k2[0], state[1] + 0.5 * dt * k2[1]];
293            let k3 = self.deriv(s3, t + 0.5 * dt);
294            let s4 = [state[0] + dt * k3[0], state[1] + dt * k3[1]];
295            let k4 = self.deriv(s4, t + dt);
296            state[0] += dt / 6.0 * (k1[0] + 2.0 * k2[0] + 2.0 * k3[0] + k4[0]);
297            state[1] += dt / 6.0 * (k1[1] + 2.0 * k2[1] + 2.0 * k3[1] + k4[1]);
298            t += dt;
299            result.push([state[0], state[1], t]);
300        }
301        result
302    }
303
304    /// Returns the natural frequency for small oscillations around the stable fixed point.
305    pub fn natural_frequency(&self) -> Option<f64> {
306        // For double-well (α < 0, β > 0): fixed points at x = ±√(−α/β).
307        if self.alpha < 0.0 && self.beta > 0.0 {
308            let x_star = (-self.alpha / self.beta).sqrt();
309            let omega_n = (2.0 * self.beta * x_star * x_star).sqrt();
310            Some(omega_n)
311        } else if self.alpha > 0.0 {
312            Some(self.alpha.sqrt())
313        } else {
314            None
315        }
316    }
317}
318
319// ─── Lyapunov Exponent ───────────────────────────────────────────────────────
320
321/// Estimates the largest Lyapunov exponent of a continuous flow using the
322/// Wolf et al. algorithm (orbit separation renormalization).
323#[derive(Debug, Clone)]
324pub struct LyapunovExponent {
325    /// Number of renormalization steps.
326    pub n_steps: usize,
327    /// Integration time step.
328    pub dt: f64,
329    /// Renormalization interval (in steps).
330    pub renorm_interval: usize,
331    /// Initial perturbation magnitude.
332    pub epsilon: f64,
333}
334
335impl LyapunovExponent {
336    /// Creates a new estimator with the given settings.
337    pub fn new(n_steps: usize, dt: f64, renorm_interval: usize, epsilon: f64) -> Self {
338        Self {
339            n_steps,
340            dt,
341            renorm_interval,
342            epsilon,
343        }
344    }
345
346    /// Default settings for the Lorenz system.
347    pub fn default_lorenz() -> Self {
348        Self::new(50_000, 0.01, 10, 1e-8)
349    }
350
351    /// Estimates the largest Lyapunov exponent for a generic 3D flow `f`.
352    ///
353    /// `f(state) -> derivative` must be the vector field.
354    pub fn estimate<F>(&self, f: F, initial: [f64; 3]) -> f64
355    where
356        F: Fn([f64; 3]) -> [f64; 3],
357    {
358        let mut x = initial;
359        // Perturbed trajectory.
360        let mut xp = [initial[0] + self.epsilon, initial[1], initial[2]];
361        let mut lambda_sum = 0.0;
362        let mut n_renorms = 0usize;
363
364        for step in 0..self.n_steps {
365            x = rk4_step(&f, x, self.dt);
366            xp = rk4_step(&f, xp, self.dt);
367
368            if (step + 1) % self.renorm_interval == 0 {
369                let d = dist3(x, xp);
370                if d > 1e-15 {
371                    lambda_sum += (d / self.epsilon).ln();
372                    n_renorms += 1;
373                    // Rescale xp back to distance epsilon from x.
374                    let scale = self.epsilon / d;
375                    xp[0] = x[0] + (xp[0] - x[0]) * scale;
376                    xp[1] = x[1] + (xp[1] - x[1]) * scale;
377                    xp[2] = x[2] + (xp[2] - x[2]) * scale;
378                }
379            }
380        }
381
382        if n_renorms == 0 {
383            return 0.0;
384        }
385        lambda_sum / (n_renorms as f64 * self.renorm_interval as f64 * self.dt)
386    }
387}
388
389// ─── Lyapunov Spectrum ───────────────────────────────────────────────────────
390
391/// Full Lyapunov spectrum computation via simultaneous QR decomposition.
392///
393/// The Benettin–Galgani–Giorgilli–Strelcyn (BGGS) algorithm.
394#[derive(Debug, Clone)]
395pub struct LyapunovSpectrum {
396    /// Integration time step.
397    pub dt: f64,
398    /// Number of integration steps per QR step.
399    pub steps_per_qr: usize,
400    /// Total number of QR steps.
401    pub n_qr: usize,
402}
403
404impl LyapunovSpectrum {
405    /// Creates a new spectrum calculator.
406    pub fn new(dt: f64, steps_per_qr: usize, n_qr: usize) -> Self {
407        Self {
408            dt,
409            steps_per_qr,
410            n_qr,
411        }
412    }
413
414    /// Computes the 3 Lyapunov exponents for the Lorenz system.
415    pub fn compute_lorenz(&self, sigma: f64, rho: f64, beta: f64, initial: [f64; 3]) -> [f64; 3] {
416        let sys = LorenzSystem::new(sigma, rho, beta);
417        self.compute(|s| sys.deriv(s), |s| sys.jacobian(s), initial)
418    }
419
420    /// Computes the Lyapunov spectrum for a general 3D system with Jacobian.
421    ///
422    /// `f(state) -> derivative`, `jac(state) -> 3×3 Jacobian`.
423    pub fn compute<F, J>(&self, f: F, jac: J, initial: [f64; 3]) -> [f64; 3]
424    where
425        F: Fn([f64; 3]) -> [f64; 3],
426        J: Fn([f64; 3]) -> [[f64; 3]; 3],
427    {
428        let mut x = initial;
429        // Q is stored column-major: Q[col][row].
430        let mut q: [[f64; 3]; 3] = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]];
431        let mut lambda = [0.0f64; 3];
432
433        for _ in 0..self.n_qr {
434            // Integrate the tangent vectors.
435            for _ in 0..self.steps_per_qr {
436                let j = jac(x);
437                // Update each column of Q: dQ/dt = J·Q.
438                let mut q_new = [[0.0f64; 3]; 3];
439                for col in 0..3 {
440                    for row in 0..3 {
441                        let mut s = 0.0;
442                        for k in 0..3 {
443                            s += j[row][k] * q[col][k];
444                        }
445                        q_new[col][row] = q[col][row] + self.dt * s;
446                    }
447                }
448                x = rk4_step(&f, x, self.dt);
449                q = q_new;
450            }
451            // QR decomposition via Gram-Schmidt.
452            let (q_new, r_diag) = gram_schmidt_qr(q);
453            q = q_new;
454            for i in 0..3 {
455                lambda[i] += r_diag[i].abs().ln();
456            }
457        }
458
459        let total_time = self.n_qr as f64 * self.steps_per_qr as f64 * self.dt;
460        [
461            lambda[0] / total_time,
462            lambda[1] / total_time,
463            lambda[2] / total_time,
464        ]
465    }
466}
467
468// ─── Poincaré Section ────────────────────────────────────────────────────────
469
470/// Records the Poincaré section of a 3D trajectory through a plane z = z_section.
471#[derive(Debug, Clone)]
472pub struct PoincareSection {
473    /// z-coordinate of the section plane.
474    pub z_section: f64,
475    /// Only record crossings in the positive-z direction if true.
476    pub upward_only: bool,
477}
478
479impl PoincareSection {
480    /// Creates a new Poincaré section at the given z level.
481    pub fn new(z_section: f64, upward_only: bool) -> Self {
482        Self {
483            z_section,
484            upward_only,
485        }
486    }
487
488    /// Extracts crossing points from a 3D trajectory.
489    ///
490    /// Returns `(x, y)` coordinates at each crossing.
491    pub fn extract(&self, traj: &[[f64; 3]]) -> Vec<[f64; 2]> {
492        let mut crossings = Vec::new();
493        for i in 1..traj.len() {
494            let prev = traj[i - 1];
495            let curr = traj[i];
496            let dz_prev = prev[2] - self.z_section;
497            let dz_curr = curr[2] - self.z_section;
498            // Detect sign change (crossing).
499            if dz_prev * dz_curr < 0.0 {
500                let upward = dz_curr > dz_prev;
501                if !self.upward_only || upward {
502                    // Linear interpolation.
503                    let t = dz_prev.abs() / (dz_prev.abs() + dz_curr.abs());
504                    let x = prev[0] + t * (curr[0] - prev[0]);
505                    let y = prev[1] + t * (curr[1] - prev[1]);
506                    crossings.push([x, y]);
507                }
508            }
509        }
510        crossings
511    }
512}
513
514// ─── Correlation Dimension ───────────────────────────────────────────────────
515
516/// Grassberger-Procaccia correlation dimension estimator.
517///
518/// The correlation integral C(r) ~ r^D gives the correlation dimension D.
519#[derive(Debug, Clone)]
520pub struct CorrelationDimension {
521    /// Number of radii to evaluate C(r) at.
522    pub n_radii: usize,
523    /// Minimum radius (log scale).
524    pub r_min: f64,
525    /// Maximum radius (log scale).
526    pub r_max: f64,
527}
528
529impl CorrelationDimension {
530    /// Creates a new correlation dimension estimator.
531    pub fn new(n_radii: usize, r_min: f64, r_max: f64) -> Self {
532        Self {
533            n_radii,
534            r_min,
535            r_max,
536        }
537    }
538
539    /// Computes the correlation integral for 3D data.
540    ///
541    /// Returns `(log_r, log_C)` pairs suitable for linear regression.
542    pub fn correlation_integral(&self, data: &[[f64; 3]]) -> Vec<[f64; 2]> {
543        let n = data.len();
544        if n < 2 {
545            return vec![];
546        }
547
548        let log_r_min = self.r_min.ln();
549        let log_r_max = self.r_max.ln();
550        let step = (log_r_max - log_r_min) / (self.n_radii as f64 - 1.0);
551
552        let radii: Vec<f64> = (0..self.n_radii)
553            .map(|i| (log_r_min + i as f64 * step).exp())
554            .collect();
555
556        let n_pairs = (n * (n - 1)) as f64;
557        radii
558            .iter()
559            .map(|&r| {
560                let count = data
561                    .iter()
562                    .enumerate()
563                    .flat_map(|(i, a)| data[i + 1..].iter().map(move |b| dist3(*a, *b)))
564                    .filter(|&d| d < r)
565                    .count();
566                let c = 2.0 * count as f64 / n_pairs;
567                [r.ln(), if c > 0.0 { c.ln() } else { f64::NEG_INFINITY }]
568            })
569            .collect()
570    }
571
572    /// Estimates the slope (dimension) from the linear part of the log-log plot.
573    pub fn estimate_dimension(&self, data: &[[f64; 3]]) -> f64 {
574        let pairs = self.correlation_integral(data);
575        let valid: Vec<[f64; 2]> = pairs.into_iter().filter(|p| p[1].is_finite()).collect();
576        if valid.len() < 2 {
577            return 0.0;
578        }
579        linear_regression_slope(&valid)
580    }
581}
582
583// ─── Box-Counting Dimension ──────────────────────────────────────────────────
584
585/// Box-counting fractal dimension estimator.
586///
587/// D = lim_{ε→0} log(N(ε)) / log(1/ε).
588#[derive(Debug, Clone)]
589pub struct BoxCounting {
590    /// Box sizes to use (decreasing).
591    pub box_sizes: Vec<f64>,
592}
593
594impl BoxCounting {
595    /// Creates a new box-counting estimator with logarithmically spaced sizes.
596    pub fn new(n_sizes: usize, size_min: f64, size_max: f64) -> Self {
597        let log_min = size_min.ln();
598        let log_max = size_max.ln();
599        let step = (log_max - log_min) / (n_sizes as f64 - 1.0);
600        let box_sizes = (0..n_sizes)
601            .map(|i| (log_max - i as f64 * step).exp())
602            .collect();
603        Self { box_sizes }
604    }
605
606    /// Counts occupied boxes of size `eps` for 3D data.
607    pub fn count_boxes(&self, data: &[[f64; 3]], eps: f64) -> usize {
608        use std::collections::HashSet;
609        let mut occupied: HashSet<(i64, i64, i64)> = HashSet::new();
610        for &[x, y, z] in data {
611            let bx = (x / eps).floor() as i64;
612            let by = (y / eps).floor() as i64;
613            let bz = (z / eps).floor() as i64;
614            occupied.insert((bx, by, bz));
615        }
616        occupied.len()
617    }
618
619    /// Estimates the fractal dimension via linear regression on log-log plot.
620    pub fn estimate_dimension(&self, data: &[[f64; 3]]) -> f64 {
621        let pairs: Vec<[f64; 2]> = self
622            .box_sizes
623            .iter()
624            .map(|&eps| {
625                let n = self.count_boxes(data, eps) as f64;
626                [(1.0 / eps).ln(), if n > 0.0 { n.ln() } else { 0.0 }]
627            })
628            .collect();
629        linear_regression_slope(&pairs)
630    }
631}
632
633// ─── Recurrence Plot ─────────────────────────────────────────────────────────
634
635/// Recurrence plot and recurrence quantification analysis (RQA).
636#[derive(Debug, Clone)]
637pub struct RecurrencePlot {
638    /// Threshold radius ε for recurrence.
639    pub epsilon: f64,
640}
641
642impl RecurrencePlot {
643    /// Creates a new recurrence plot with the given threshold.
644    pub fn new(epsilon: f64) -> Self {
645        Self { epsilon }
646    }
647
648    /// Builds the recurrence matrix for 3D data.
649    ///
650    /// Returns a flattened row-major boolean matrix of size `n × n`.
651    pub fn build(&self, data: &[[f64; 3]]) -> Vec<bool> {
652        let n = data.len();
653        let mut mat = vec![false; n * n];
654        for i in 0..n {
655            for j in 0..n {
656                mat[i * n + j] = dist3(data[i], data[j]) < self.epsilon;
657            }
658        }
659        mat
660    }
661
662    /// Recurrence rate RR = (number of recurrences) / N².
663    pub fn recurrence_rate(&self, data: &[[f64; 3]]) -> f64 {
664        let n = data.len();
665        if n == 0 {
666            return 0.0;
667        }
668        let mat = self.build(data);
669        let count = mat.iter().filter(|&&b| b).count();
670        count as f64 / (n * n) as f64
671    }
672
673    /// Determinism DET = fraction of recurrences forming diagonal lines ≥ min_line.
674    pub fn determinism(&self, data: &[[f64; 3]], min_line: usize) -> f64 {
675        let n = data.len();
676        if n == 0 {
677            return 0.0;
678        }
679        let mat = self.build(data);
680        let total_rec: usize = mat.iter().filter(|&&b| b).count();
681        if total_rec == 0 {
682            return 0.0;
683        }
684
685        let mut in_diag = 0usize;
686        for diag in (-(n as isize) + 1)..(n as isize) {
687            let mut run = 0usize;
688            let i_start = (-diag).max(0) as usize;
689            let j_start = diag.max(0) as usize;
690            let len = n.saturating_sub(i_start.max(j_start));
691            for k in 0..len {
692                let i = i_start + k;
693                let j = j_start + k;
694                if mat[i * n + j] {
695                    run += 1;
696                } else {
697                    if run >= min_line {
698                        in_diag += run;
699                    }
700                    run = 0;
701                }
702            }
703            if run >= min_line {
704                in_diag += run;
705            }
706        }
707        in_diag as f64 / total_rec as f64
708    }
709}
710
711// ─── Mutual Information ──────────────────────────────────────────────────────
712
713/// Average mutual information for embedding lag selection.
714///
715/// Used to select the optimal delay τ for Takens' embedding.
716#[derive(Debug, Clone)]
717pub struct MutualInformation {
718    /// Number of histogram bins.
719    pub n_bins: usize,
720}
721
722impl MutualInformation {
723    /// Creates a new mutual information calculator with the given number of bins.
724    pub fn new(n_bins: usize) -> Self {
725        Self { n_bins }
726    }
727
728    /// Computes the average mutual information between `x[t]` and `x[t+tau]`.
729    pub fn compute(&self, data: &[f64], tau: usize) -> f64 {
730        if tau >= data.len() {
731            return 0.0;
732        }
733        let n = data.len() - tau;
734        let min_val = data.iter().cloned().fold(f64::INFINITY, f64::min);
735        let max_val = data.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
736        let range = (max_val - min_val).max(1e-15);
737        let nb = self.n_bins;
738
739        // Build joint histogram.
740        let mut joint = vec![vec![0usize; nb]; nb];
741        let mut marg_x = vec![0usize; nb];
742        let mut marg_y = vec![0usize; nb];
743
744        for i in 0..n {
745            let bx = ((data[i] - min_val) / range * nb as f64).floor() as usize;
746            let by = ((data[i + tau] - min_val) / range * nb as f64).floor() as usize;
747            let bx = bx.min(nb - 1);
748            let by = by.min(nb - 1);
749            joint[bx][by] += 1;
750            marg_x[bx] += 1;
751            marg_y[by] += 1;
752        }
753
754        let mut mi = 0.0f64;
755        for bx in 0..nb {
756            for by in 0..nb {
757                let pxy = joint[bx][by] as f64 / n as f64;
758                let px = marg_x[bx] as f64 / n as f64;
759                let py = marg_y[by] as f64 / n as f64;
760                if pxy > 0.0 && px > 0.0 && py > 0.0 {
761                    mi += pxy * (pxy / (px * py)).ln();
762                }
763            }
764        }
765        mi
766    }
767
768    /// Returns the mutual information vs lag for lags 1..=max_tau.
769    pub fn ami_curve(&self, data: &[f64], max_tau: usize) -> Vec<(usize, f64)> {
770        (1..=max_tau)
771            .map(|tau| (tau, self.compute(data, tau)))
772            .collect()
773    }
774
775    /// Finds the first local minimum of AMI (optimal embedding lag).
776    pub fn optimal_lag(&self, data: &[f64], max_tau: usize) -> usize {
777        let curve = self.ami_curve(data, max_tau);
778        for i in 1..curve.len().saturating_sub(1) {
779            if curve[i].1 < curve[i - 1].1 && curve[i].1 < curve[i + 1].1 {
780                return curve[i].0;
781            }
782        }
783        1
784    }
785}
786
787// ─── Embedding Dimension (FNN) ───────────────────────────────────────────────
788
789/// False nearest neighbours (FNN) method for embedding dimension (Kennel et al.).
790///
791/// Implements Takens' embedding theorem numerically.
792#[derive(Debug, Clone)]
793pub struct EmbeddingDimension {
794    /// Embedding lag τ.
795    pub tau: usize,
796    /// FNN threshold R_tol (typically 10–15).
797    pub r_tol: f64,
798    /// Attractor size threshold A_tol.
799    pub a_tol: f64,
800}
801
802impl EmbeddingDimension {
803    /// Creates a new FNN estimator.
804    pub fn new(tau: usize, r_tol: f64, a_tol: f64) -> Self {
805        Self { tau, r_tol, a_tol }
806    }
807
808    /// Default parameters (τ=1, R_tol=15.0, A_tol=2.0).
809    pub fn default_params() -> Self {
810        Self::new(1, 15.0, 2.0)
811    }
812
813    /// Embeds a scalar time series into d-dimensional delay vectors.
814    pub fn embed(&self, data: &[f64], d: usize) -> Vec<Vec<f64>> {
815        if data.len() < d * self.tau {
816            return vec![];
817        }
818        let n = data.len() - (d - 1) * self.tau;
819        (0..n)
820            .map(|i| (0..d).map(|k| data[i + k * self.tau]).collect())
821            .collect()
822    }
823
824    /// Computes the fraction of false nearest neighbours for embedding dimension `d`.
825    pub fn fnn_fraction(&self, data: &[f64], d: usize) -> f64 {
826        let embedded_d = self.embed(data, d);
827        let embedded_d1 = self.embed(data, d + 1);
828        let n = embedded_d.len().min(embedded_d1.len());
829        if n < 2 {
830            return 0.0;
831        }
832        // Std deviation of data as attractor size estimate.
833        let mean = data.iter().sum::<f64>() / data.len() as f64;
834        let std = (data.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / data.len() as f64)
835            .sqrt()
836            .max(1e-15);
837
838        let mut n_fnn = 0usize;
839        let mut n_total = 0usize;
840        for i in 0..n {
841            // Find nearest neighbour in d dimensions.
842            let mut nn_dist = f64::INFINITY;
843            let mut nn_idx = 0usize;
844            for j in 0..n {
845                if i == j {
846                    continue;
847                }
848                let dist = embedded_d[i]
849                    .iter()
850                    .zip(&embedded_d[j])
851                    .map(|(a, b)| (a - b).powi(2))
852                    .sum::<f64>()
853                    .sqrt();
854                if dist < nn_dist {
855                    nn_dist = dist;
856                    nn_idx = j;
857                }
858            }
859            if nn_dist < 1e-15 {
860                continue;
861            }
862            // Distance in d+1 dimensions.
863            let new_component = (embedded_d1[i][d] - embedded_d1[nn_idx][d]).abs();
864            let dist_d1 = (nn_dist * nn_dist + new_component * new_component).sqrt();
865
866            n_total += 1;
867            if new_component / nn_dist > self.r_tol || dist_d1 / std > self.a_tol {
868                n_fnn += 1;
869            }
870        }
871        if n_total == 0 {
872            0.0
873        } else {
874            n_fnn as f64 / n_total as f64
875        }
876    }
877
878    /// Finds the minimal embedding dimension where FNN fraction < threshold.
879    pub fn estimate(&self, data: &[f64], max_d: usize, threshold: f64) -> usize {
880        for d in 1..=max_d {
881            if self.fnn_fraction(data, d) < threshold {
882                return d;
883            }
884        }
885        max_d
886    }
887}
888
889// ─── Feigenbaum Constant ─────────────────────────────────────────────────────
890
891/// Numerical estimation of the Feigenbaum constant δ ≈ 4.6692016…
892///
893/// Computed from the period-doubling cascade of the logistic map.
894#[derive(Debug, Clone)]
895pub struct FeigenbaumConstant;
896
897impl FeigenbaumConstant {
898    /// Known theoretical value of the Feigenbaum constant δ.
899    pub const DELTA: f64 = 4.669_201_609_102_99;
900
901    /// Finds the r-value where the logistic map first shows period-2k behaviour.
902    ///
903    /// Returns the approximate bifurcation point via iterated cobweb detection.
904    pub fn bifurcation_point(period: usize, x0: f64, transient: usize, n_check: usize) -> f64 {
905        // Binary search for the r value at which period 2k first appears.
906        let mut r_lo = 2.5f64;
907        let mut r_hi = 4.0f64;
908        for _ in 0..60 {
909            let r_mid = 0.5 * (r_lo + r_hi);
910            let lm = LogisticMap::new(r_mid);
911            let attr = lm.attractor(x0, transient, n_check);
912            let detected = detect_period(&attr, 0.001);
913            if detected >= period {
914                r_hi = r_mid;
915            } else {
916                r_lo = r_mid;
917            }
918        }
919        0.5 * (r_lo + r_hi)
920    }
921
922    /// Estimates δ from the first few bifurcation points.
923    pub fn estimate_delta(x0: f64) -> f64 {
924        let r1 = Self::bifurcation_point(2, x0, 1000, 256);
925        let r2 = Self::bifurcation_point(4, x0, 1000, 512);
926        let r3 = Self::bifurcation_point(8, x0, 1000, 1024);
927        (r2 - r1) / (r3 - r2).max(1e-12)
928    }
929}
930
931// ─── FTLE Field ───────────────────────────────────────────────────────────────
932
933/// Finite-Time Lyapunov Exponent (FTLE) field on a regular 2D grid.
934///
935/// Computes the FTLE by tracking the deformation of a grid of initial conditions.
936#[derive(Debug, Clone)]
937pub struct FtleField {
938    /// Grid resolution in x.
939    pub nx: usize,
940    /// Grid resolution in y.
941    pub ny: usize,
942    /// Integration time T (positive → forward FTLE, negative → backward).
943    pub t_integration: f64,
944    /// Sub-step size dt.
945    pub dt: f64,
946}
947
948impl FtleField {
949    /// Creates a new FTLE field calculator.
950    pub fn new(nx: usize, ny: usize, t_integration: f64, dt: f64) -> Self {
951        Self {
952            nx,
953            ny,
954            t_integration,
955            dt,
956        }
957    }
958
959    /// Computes the FTLE field for the Lorenz system projected onto the xy plane.
960    ///
961    /// `x_range = (x_min, x_max)`, `y_range = (y_min, y_max)`, z is fixed.
962    pub fn compute_lorenz(
963        &self,
964        x_range: (f64, f64),
965        y_range: (f64, f64),
966        z_fixed: f64,
967        sigma: f64,
968        rho: f64,
969        beta: f64,
970    ) -> Vec<Vec<f64>> {
971        let lorenz = LorenzSystem::new(sigma, rho, beta);
972        let steps = (self.t_integration / self.dt).abs() as usize;
973        let dt = if self.t_integration > 0.0 {
974            self.dt
975        } else {
976            -self.dt
977        };
978        let dx = (x_range.1 - x_range.0) / (self.nx as f64 - 1.0);
979        let dy = (y_range.1 - y_range.0) / (self.ny as f64 - 1.0);
980        let eps = dx.min(dy) * 0.01;
981
982        let mut ftle = vec![vec![0.0f64; self.ny]; self.nx];
983        for ix in 0..self.nx {
984            for iy in 0..self.ny {
985                let x0 = x_range.0 + ix as f64 * dx;
986                let y0 = y_range.0 + iy as f64 * dy;
987                let p0 = [x0, y0, z_fixed];
988                let px = [x0 + eps, y0, z_fixed];
989                let py = [x0, y0 + eps, z_fixed];
990
991                let mut p0f = p0;
992                let mut pxf = px;
993                let mut pyf = py;
994                for _ in 0..steps {
995                    p0f = rk4_step(|s| lorenz.deriv(s), p0f, dt);
996                    pxf = rk4_step(|s| lorenz.deriv(s), pxf, dt);
997                    pyf = rk4_step(|s| lorenz.deriv(s), pyf, dt);
998                }
999
1000                // Deformation gradient columns.
1001                let dfdx = [(pxf[0] - p0f[0]) / eps, (pxf[1] - p0f[1]) / eps];
1002                let dfdy = [(pyf[0] - p0f[0]) / eps, (pyf[1] - p0f[1]) / eps];
1003                // Cauchy-Green strain tensor C = F^T F (2×2).
1004                let c11 = dfdx[0] * dfdx[0] + dfdx[1] * dfdx[1];
1005                let c12 = dfdx[0] * dfdy[0] + dfdx[1] * dfdy[1];
1006                let c22 = dfdy[0] * dfdy[0] + dfdy[1] * dfdy[1];
1007                // Largest eigenvalue of C.
1008                let tr = c11 + c22;
1009                let det = c11 * c22 - c12 * c12;
1010                let disc = (0.25 * tr * tr - det).max(0.0);
1011                let lambda_max = 0.5 * tr + disc.sqrt();
1012                ftle[ix][iy] = if lambda_max > 1.0 {
1013                    lambda_max.ln() / (2.0 * self.t_integration.abs())
1014                } else {
1015                    0.0
1016                };
1017            }
1018        }
1019        ftle
1020    }
1021}
1022
1023// ─── Period-Doubling Route to Chaos ─────────────────────────────────────────
1024
1025/// Analysis of the period-doubling cascade in a map.
1026#[derive(Debug, Clone)]
1027pub struct PeriodDoubling;
1028
1029impl PeriodDoubling {
1030    /// Detects the period of a time series (up to max_period).
1031    ///
1032    /// Returns the smallest period p such that `|x[i] − x[i+p]| < tol` for all i.
1033    pub fn detect_period(series: &[f64], tol: f64, max_period: usize) -> usize {
1034        detect_period(series, tol).min(max_period)
1035    }
1036
1037    /// Returns `(r, period)` pairs for the logistic map over a range of r values.
1038    pub fn period_diagram(
1039        r_values: &[f64],
1040        x0: f64,
1041        transient: usize,
1042        check_len: usize,
1043    ) -> Vec<(f64, usize)> {
1044        r_values
1045            .iter()
1046            .map(|&r| {
1047                let lm = LogisticMap::new(r);
1048                let attr = lm.attractor(x0, transient, check_len);
1049                let p = detect_period(&attr, 0.001);
1050                (r, p)
1051            })
1052            .collect()
1053    }
1054
1055    /// Counts the number of period-doubling bifurcations up to r_max.
1056    pub fn count_doublings(r_max: f64, x0: f64) -> usize {
1057        let mut count = 0usize;
1058        let mut period = 1usize;
1059        for i in 1..=6 {
1060            let target = 1usize << i;
1061            let r = FeigenbaumConstant::bifurcation_point(target, x0, 500, 512);
1062            if r < r_max {
1063                count += 1;
1064                period = target;
1065            }
1066        }
1067        let _ = period;
1068        count
1069    }
1070}
1071
1072// ─── Chaotic Time Series Analysis ────────────────────────────────────────────
1073
1074/// Comprehensive chaotic time series analysis toolkit.
1075#[derive(Debug, Clone)]
1076pub struct ChaosAnalysis {
1077    /// Embedding lag.
1078    pub tau: usize,
1079    /// Embedding dimension.
1080    pub dim: usize,
1081}
1082
1083impl ChaosAnalysis {
1084    /// Creates a new analysis instance with the given embedding parameters.
1085    pub fn new(tau: usize, dim: usize) -> Self {
1086        Self { tau, dim }
1087    }
1088
1089    /// Computes the largest Lyapunov exponent from a scalar time series
1090    /// (Rosenstein et al. algorithm).
1091    ///
1092    /// Returns an estimate of the largest Lyapunov exponent in nats/step.
1093    pub fn largest_lyapunov(&self, data: &[f64], evolve_steps: usize) -> f64 {
1094        let emb = EmbeddingDimension::new(self.tau, 15.0, 2.0);
1095        let vecs = emb.embed(data, self.dim);
1096        let n = vecs.len();
1097        if n < 2 {
1098            return 0.0;
1099        }
1100
1101        let min_sep = self.dim * self.tau + 1;
1102        let mut sum = 0.0f64;
1103        let mut count = 0usize;
1104
1105        for i in 0..n {
1106            let mut nn_dist = f64::INFINITY;
1107            let mut nn_j = 0usize;
1108            for j in 0..n {
1109                if (i as isize - j as isize).unsigned_abs() < min_sep {
1110                    continue;
1111                }
1112                let d: f64 = vecs[i]
1113                    .iter()
1114                    .zip(&vecs[j])
1115                    .map(|(a, b)| (a - b).powi(2))
1116                    .sum::<f64>()
1117                    .sqrt();
1118                if d < nn_dist {
1119                    nn_dist = d;
1120                    nn_j = j;
1121                }
1122            }
1123            if nn_dist < 1e-15 {
1124                continue;
1125            }
1126
1127            let end_i = (i + evolve_steps).min(n - 1);
1128            let end_j = (nn_j + evolve_steps).min(n - 1);
1129            if end_i == i || end_j == nn_j {
1130                continue;
1131            }
1132
1133            let d_evolved: f64 = vecs[end_i]
1134                .iter()
1135                .zip(&vecs[end_j])
1136                .map(|(a, b)| (a - b).powi(2))
1137                .sum::<f64>()
1138                .sqrt();
1139            if d_evolved > 1e-15 {
1140                sum += (d_evolved / nn_dist).ln();
1141                count += 1;
1142            }
1143        }
1144
1145        if count == 0 {
1146            0.0
1147        } else {
1148            sum / (count as f64 * evolve_steps as f64)
1149        }
1150    }
1151
1152    /// Estimates the correlation dimension of a scalar time series.
1153    pub fn correlation_dimension(&self, data: &[f64]) -> f64 {
1154        let emb = EmbeddingDimension::new(self.tau, 15.0, 2.0);
1155        let vecs = emb.embed(data, self.dim);
1156        if vecs.len() < 10 {
1157            return 0.0;
1158        }
1159        // Convert to [f64; 3] by taking first 3 components (pad if needed).
1160        let data3: Vec<[f64; 3]> = vecs
1161            .iter()
1162            .map(|v| {
1163                let x = v.first().copied().unwrap_or(0.0);
1164                let y = v.get(1).copied().unwrap_or(0.0);
1165                let z = v.get(2).copied().unwrap_or(0.0);
1166                [x, y, z]
1167            })
1168            .collect();
1169        let cd = CorrelationDimension::new(20, 0.01, 5.0);
1170        cd.estimate_dimension(&data3)
1171    }
1172
1173    /// Prints a summary of chaos diagnostics.
1174    pub fn summary(&self, data: &[f64]) -> String {
1175        let lambda = self.largest_lyapunov(data, 5);
1176        let d_corr = self.correlation_dimension(data);
1177        format!(
1178            "Largest Lyapunov: {:.6}, Correlation dim: {:.6}",
1179            lambda, d_corr
1180        )
1181    }
1182}
1183
1184// ─── Internal Helpers ─────────────────────────────────────────────────────────
1185
1186/// 4th-order Runge-Kutta step for a 3D autonomous ODE.
1187fn rk4_step<F>(f: F, state: [f64; 3], dt: f64) -> [f64; 3]
1188where
1189    F: Fn([f64; 3]) -> [f64; 3],
1190{
1191    let k1 = f(state);
1192    let s2 = add3(state, scale3(k1, 0.5 * dt));
1193    let k2 = f(s2);
1194    let s3 = add3(state, scale3(k2, 0.5 * dt));
1195    let k3 = f(s3);
1196    let s4 = add3(state, scale3(k3, dt));
1197    let k4 = f(s4);
1198    [
1199        state[0] + dt / 6.0 * (k1[0] + 2.0 * k2[0] + 2.0 * k3[0] + k4[0]),
1200        state[1] + dt / 6.0 * (k1[1] + 2.0 * k2[1] + 2.0 * k3[1] + k4[1]),
1201        state[2] + dt / 6.0 * (k1[2] + 2.0 * k2[2] + 2.0 * k3[2] + k4[2]),
1202    ]
1203}
1204
1205/// Euclidean distance between two 3D points.
1206fn dist3(a: [f64; 3], b: [f64; 3]) -> f64 {
1207    ((a[0] - b[0]).powi(2) + (a[1] - b[1]).powi(2) + (a[2] - b[2]).powi(2)).sqrt()
1208}
1209
1210/// Component-wise addition of two 3D vectors.
1211fn add3(a: [f64; 3], b: [f64; 3]) -> [f64; 3] {
1212    [a[0] + b[0], a[1] + b[1], a[2] + b[2]]
1213}
1214
1215/// Scalar multiplication of a 3D vector.
1216fn scale3(v: [f64; 3], s: f64) -> [f64; 3] {
1217    [v[0] * s, v[1] * s, v[2] * s]
1218}
1219
1220/// Gram-Schmidt QR decomposition for 3 column vectors.
1221///
1222/// Returns (orthonormal Q, diagonal of R).
1223fn gram_schmidt_qr(cols: [[f64; 3]; 3]) -> ([[f64; 3]; 3], [f64; 3]) {
1224    let mut q = [[0.0f64; 3]; 3];
1225    let mut r_diag = [0.0f64; 3];
1226
1227    for j in 0..3 {
1228        let mut v = cols[j];
1229        // Subtract projections onto previous columns.
1230        for k in 0..j {
1231            let proj = dot3(v, q[k]);
1232            for i in 0..3 {
1233                v[i] -= proj * q[k][i];
1234            }
1235        }
1236        let norm = dot3(v, v).sqrt();
1237        r_diag[j] = norm;
1238        if norm > 1e-15 {
1239            for i in 0..3 {
1240                q[j][i] = v[i] / norm;
1241            }
1242        } else {
1243            q[j] = [0.0; 3];
1244            q[j][j] = 1.0;
1245        }
1246    }
1247    (q, r_diag)
1248}
1249
1250/// Dot product of two 3D vectors.
1251fn dot3(a: [f64; 3], b: [f64; 3]) -> f64 {
1252    a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
1253}
1254
1255/// Linear regression slope from (x, y) pairs.
1256fn linear_regression_slope(pairs: &[[f64; 2]]) -> f64 {
1257    let n = pairs.len() as f64;
1258    if n < 2.0 {
1259        return 0.0;
1260    }
1261    let sum_x: f64 = pairs.iter().map(|p| p[0]).sum();
1262    let sum_y: f64 = pairs.iter().map(|p| p[1]).sum();
1263    let sum_xx: f64 = pairs.iter().map(|p| p[0] * p[0]).sum();
1264    let sum_xy: f64 = pairs.iter().map(|p| p[0] * p[1]).sum();
1265    let denom = n * sum_xx - sum_x * sum_x;
1266    if denom.abs() < 1e-15 {
1267        return 0.0;
1268    }
1269    (n * sum_xy - sum_x * sum_y) / denom
1270}
1271
1272/// Detects the period of a time series.
1273fn detect_period(series: &[f64], tol: f64) -> usize {
1274    let n = series.len();
1275    if n < 4 {
1276        return 1;
1277    }
1278    for p in 1..=(n / 2) {
1279        let is_periodic = series[..n - p]
1280            .iter()
1281            .zip(&series[p..])
1282            .all(|(a, b)| (a - b).abs() < tol);
1283        if is_periodic {
1284            return p;
1285        }
1286    }
1287    n // Treat as "chaotic" = period n.
1288}
1289
1290// ─── Tests ───────────────────────────────────────────────────────────────────
1291
1292#[cfg(test)]
1293mod tests {
1294    use super::*;
1295
1296    // ── Lorenz system ─────────────────────────────────────────────────────────
1297
1298    #[test]
1299    fn test_lorenz_deriv_at_origin() {
1300        let sys = LorenzSystem::classic();
1301        let d = sys.deriv([0.0, 0.0, 0.0]);
1302        assert!(d[0].abs() < 1e-12, "dx/dt at origin should be 0");
1303        assert!(d[1].abs() < 1e-12, "dy/dt at origin should be 0");
1304        assert!(d[2].abs() < 1e-12, "dz/dt at origin should be 0");
1305    }
1306
1307    #[test]
1308    fn test_lorenz_integrate_length() {
1309        let sys = LorenzSystem::classic();
1310        let traj = sys.integrate([1.0, 1.0, 1.0], 0.01, 100);
1311        assert_eq!(traj.len(), 101);
1312    }
1313
1314    #[test]
1315    fn test_lorenz_fixed_points_symmetric() {
1316        let sys = LorenzSystem::classic();
1317        let fps = sys.fixed_points();
1318        // Origin.
1319        assert!(fps[0].iter().all(|&v| v.abs() < 1e-12));
1320        // The two non-trivial fixed points should be symmetric.
1321        assert!((fps[1][0] + fps[2][0]).abs() < 1e-12);
1322        assert!((fps[1][1] + fps[2][1]).abs() < 1e-12);
1323        assert!((fps[1][2] - fps[2][2]).abs() < 1e-12);
1324    }
1325
1326    #[test]
1327    fn test_lorenz_fixed_point_is_fixed() {
1328        let sys = LorenzSystem::classic();
1329        let fps = sys.fixed_points();
1330        for fp in fps.iter().skip(1) {
1331            let d = sys.deriv(*fp);
1332            let mag = (d[0].powi(2) + d[1].powi(2) + d[2].powi(2)).sqrt();
1333            assert!(
1334                mag < 1e-10,
1335                "Fixed point derivative should be near zero, got {mag}"
1336            );
1337        }
1338    }
1339
1340    #[test]
1341    fn test_lorenz_sensitivity() {
1342        let sys = LorenzSystem::classic();
1343        // Use a large perturbation so divergence is detectable within a reasonable time.
1344        let t1 = sys.integrate([1.0, 1.0, 1.0], 0.01, 2000);
1345        let t2 = sys.integrate([1.0 + 1e-3, 1.0, 1.0], 0.01, 2000);
1346        // After 2000 steps (t=20) even a 1e-3 perturbation should have grown in a chaotic system.
1347        // We just check that the two trajectories are not identical.
1348        let end1 = t1.last().unwrap();
1349        let end2 = t2.last().unwrap();
1350        let sep = dist3(*end1, *end2);
1351        assert!(
1352            sep > 1e-6,
1353            "Lorenz should show sensitivity, separation={sep}"
1354        );
1355    }
1356
1357    // ── Rössler system ────────────────────────────────────────────────────────
1358
1359    #[test]
1360    fn test_rossler_integrate_no_nan() {
1361        let sys = RosslerSystem::classic();
1362        let traj = sys.integrate([1.0, 0.0, 0.0], 0.01, 200);
1363        for pt in &traj {
1364            assert!(
1365                pt.iter().all(|v| v.is_finite()),
1366                "Rössler trajectory must be finite"
1367            );
1368        }
1369    }
1370
1371    #[test]
1372    fn test_rossler_period_positive() {
1373        let sys = RosslerSystem::classic();
1374        assert!(sys.approximate_period() > 0.0);
1375    }
1376
1377    // ── Logistic map ──────────────────────────────────────────────────────────
1378
1379    #[test]
1380    fn test_logistic_fixed_point_r3() {
1381        // For r=3.0 the fixed point is x*=(r-1)/r = 2/3.
1382        let lm = LogisticMap::new(3.0);
1383        let attr = lm.attractor(0.5, 1000, 10);
1384        for &v in &attr {
1385            assert!(
1386                (v - 2.0 / 3.0).abs() < 0.01,
1387                "r=3 should approach fixed point, got {v}"
1388            );
1389        }
1390    }
1391
1392    #[test]
1393    fn test_logistic_period2_r35() {
1394        // r=3.5 → period 4; verify at least period ≥ 2.
1395        let lm = LogisticMap::new(3.5);
1396        let attr = lm.attractor(0.5, 2000, 32);
1397        let p = detect_period(&attr, 0.001);
1398        assert!(p >= 2, "r=3.5 should have period ≥ 2, got {p}");
1399    }
1400
1401    #[test]
1402    fn test_logistic_lyapunov_chaotic() {
1403        // r=3.9 is deeply chaotic → λ > 0.
1404        let lm = LogisticMap::new(3.9);
1405        let lambda = lm.lyapunov_exponent(0.5, 10000);
1406        assert!(lambda > 0.0, "r=3.9 should be chaotic, λ={lambda}");
1407    }
1408
1409    #[test]
1410    fn test_logistic_lyapunov_stable() {
1411        // r=2.5: stable fixed point → λ ≤ 0 (derivative |r(1-2x*)| < 1 at x*=(r-1)/r).
1412        let lm = LogisticMap::new(2.5);
1413        let lambda = lm.lyapunov_exponent(0.3, 5000);
1414        assert!(lambda <= 0.0, "r=2.5 should be stable (λ≤0), λ={lambda}");
1415    }
1416
1417    // ── Duffing oscillator ────────────────────────────────────────────────────
1418
1419    #[test]
1420    fn test_duffing_integrate_length() {
1421        let duff = DuffingOscillator::classic_chaotic();
1422        let traj = duff.integrate([1.0, 0.0], 0.0, 0.01, 100);
1423        assert_eq!(traj.len(), 101);
1424    }
1425
1426    #[test]
1427    fn test_duffing_natural_freq_double_well() {
1428        let duff = DuffingOscillator::new(0.0, -1.0, 1.0, 0.0, 1.0);
1429        let freq = duff.natural_frequency();
1430        assert!(freq.is_some(), "Double-well should have natural frequency");
1431        assert!((freq.unwrap() - 2.0_f64.sqrt()).abs() < 0.01);
1432    }
1433
1434    #[test]
1435    fn test_duffing_no_nan() {
1436        let duff = DuffingOscillator::classic_chaotic();
1437        let traj = duff.integrate([0.5, 0.0], 0.0, 0.01, 500);
1438        for pt in &traj {
1439            assert!(
1440                pt.iter().all(|v| v.is_finite()),
1441                "Duffing trajectory must be finite"
1442            );
1443        }
1444    }
1445
1446    // ── Lyapunov exponent ─────────────────────────────────────────────────────
1447
1448    #[test]
1449    fn test_lyapunov_lorenz_positive() {
1450        let est = LyapunovExponent::new(20_000, 0.01, 10, 1e-8);
1451        let sys = LorenzSystem::classic();
1452        let lambda = est.estimate(|s| sys.deriv(s), [0.1, 0.1, 0.1]);
1453        assert!(
1454            lambda > 0.0,
1455            "Lorenz largest Lyapunov should be positive, got {lambda}"
1456        );
1457    }
1458
1459    // ── Lyapunov spectrum ─────────────────────────────────────────────────────
1460
1461    #[test]
1462    fn test_lyapunov_spectrum_sum_negative() {
1463        // For Lorenz: λ1 + λ2 + λ3 = −(σ + 1 + β) < 0 (volume contraction).
1464        let spec = LyapunovSpectrum::new(0.01, 5, 200);
1465        let exps = spec.compute_lorenz(10.0, 28.0, 8.0 / 3.0, [1.0, 1.0, 1.0]);
1466        let sum = exps[0] + exps[1] + exps[2];
1467        // The sum should be near −(σ + 1 + β) ≈ −13.67, but allow numerical tolerance.
1468        assert!(
1469            sum < 0.0,
1470            "Lyapunov spectrum sum should be negative (volume contraction), got {sum}"
1471        );
1472    }
1473
1474    // ── Poincaré section ──────────────────────────────────────────────────────
1475
1476    #[test]
1477    fn test_poincare_section_count() {
1478        let sys = LorenzSystem::classic();
1479        let traj = sys.integrate([1.0, 1.0, 20.0], 0.01, 5000);
1480        let poincare = PoincareSection::new(27.0, true);
1481        let crossings = poincare.extract(&traj);
1482        assert!(
1483            !crossings.is_empty(),
1484            "Lorenz should have Poincaré crossings"
1485        );
1486    }
1487
1488    #[test]
1489    fn test_poincare_section_upward_only() {
1490        let sys = LorenzSystem::classic();
1491        let traj = sys.integrate([1.0, 1.0, 25.0], 0.01, 3000);
1492        let all = PoincareSection::new(27.0, false).extract(&traj);
1493        let up_only = PoincareSection::new(27.0, true).extract(&traj);
1494        assert!(
1495            up_only.len() <= all.len(),
1496            "Upward-only should have ≤ crossings than all"
1497        );
1498    }
1499
1500    // ── Correlation dimension ─────────────────────────────────────────────────
1501
1502    #[test]
1503    fn test_correlation_dimension_line() {
1504        // Points on a line should have dimension ≈ 1.
1505        let data: Vec<[f64; 3]> = (0..100).map(|i| [i as f64 * 0.1, 0.0, 0.0]).collect();
1506        let cd = CorrelationDimension::new(10, 0.1, 5.0);
1507        let dim = cd.estimate_dimension(&data);
1508        assert!(dim < 2.0, "Line should have dimension < 2, got {dim}");
1509    }
1510
1511    // ── Box-counting dimension ────────────────────────────────────────────────
1512
1513    #[test]
1514    fn test_box_counting_cube() {
1515        // Points filling a cube → dimension ≈ 3.
1516        let mut data = Vec::new();
1517        for i in 0..5 {
1518            for j in 0..5 {
1519                for k in 0..5 {
1520                    data.push([i as f64, j as f64, k as f64]);
1521                }
1522            }
1523        }
1524        let bc = BoxCounting::new(5, 0.5, 4.0);
1525        let dim = bc.estimate_dimension(&data);
1526        assert!(
1527            dim > 1.5 && dim < 4.0,
1528            "Cube should have dimension ≈ 3, got {dim}"
1529        );
1530    }
1531
1532    // ── Recurrence plot ───────────────────────────────────────────────────────
1533
1534    #[test]
1535    fn test_recurrence_rate_between_0_and_1() {
1536        let sys = LorenzSystem::classic();
1537        let traj = sys.integrate([1.0, 1.0, 1.0], 0.01, 99);
1538        let rp = RecurrencePlot::new(5.0);
1539        let rr = rp.recurrence_rate(&traj);
1540        assert!(
1541            (0.0..=1.0).contains(&rr),
1542            "Recurrence rate should be in [0,1], got {rr}"
1543        );
1544    }
1545
1546    #[test]
1547    fn test_recurrence_diagonal_is_always_1() {
1548        let data: Vec<[f64; 3]> = (0..10).map(|i| [i as f64, 0.0, 0.0]).collect();
1549        let rp = RecurrencePlot::new(0.5);
1550        let mat = rp.build(&data);
1551        let n = data.len();
1552        for i in 0..n {
1553            assert!(mat[i * n + i], "Diagonal should always be true");
1554        }
1555    }
1556
1557    // ── Mutual information ────────────────────────────────────────────────────
1558
1559    #[test]
1560    fn test_mutual_information_zero_lag_max() {
1561        // At lag=1 vs lag=large, AMI at lag=1 should generally not be zero for a chaotic series.
1562        let sys = LorenzSystem::classic();
1563        let traj = sys.integrate([1.0, 0.5, 1.5], 0.01, 300);
1564        let x_series: Vec<f64> = traj.iter().map(|pt| pt[0]).collect();
1565        let mi = MutualInformation::new(10);
1566        let ami1 = mi.compute(&x_series, 1);
1567        let ami_large = mi.compute(&x_series, 50);
1568        assert!(
1569            ami1 >= ami_large - 0.5,
1570            "Short-lag AMI should not be less than long-lag AMI by more than 0.5"
1571        );
1572    }
1573
1574    // ── Embedding dimension ───────────────────────────────────────────────────
1575
1576    #[test]
1577    fn test_embedding_embed_shape() {
1578        let data: Vec<f64> = (0..20).map(|i| i as f64).collect();
1579        let emb = EmbeddingDimension::new(1, 15.0, 2.0);
1580        let vecs = emb.embed(&data, 3);
1581        // Should have 20 - 2*1 = 18 vectors, each of length 3.
1582        assert_eq!(vecs.len(), 18);
1583        for v in &vecs {
1584            assert_eq!(v.len(), 3);
1585        }
1586    }
1587
1588    // ── Feigenbaum constant ───────────────────────────────────────────────────
1589
1590    #[test]
1591    fn test_feigenbaum_delta_known_value() {
1592        // The known value is ≈ 4.669.
1593        assert!(
1594            (FeigenbaumConstant::DELTA - 4.6692).abs() < 0.001,
1595            "Feigenbaum delta should be ≈ 4.6692"
1596        );
1597    }
1598
1599    #[test]
1600    fn test_feigenbaum_bifurcation_r2() {
1601        // Period-2 bifurcation at r ≈ 3.0.
1602        let r = FeigenbaumConstant::bifurcation_point(2, 0.5, 1000, 256);
1603        assert!(
1604            (r - 3.0).abs() < 0.1,
1605            "Period-2 bifurcation should be near r=3.0, got {r}"
1606        );
1607    }
1608
1609    // ── FTLE field ────────────────────────────────────────────────────────────
1610
1611    #[test]
1612    fn test_ftle_field_non_negative() {
1613        let ftle = FtleField::new(5, 5, 0.5, 0.05);
1614        let field = ftle.compute_lorenz((-5.0, 5.0), (-5.0, 5.0), 25.0, 10.0, 28.0, 8.0 / 3.0);
1615        for row in &field {
1616            for &v in row {
1617                assert!(v >= 0.0, "FTLE values should be non-negative, got {v}");
1618            }
1619        }
1620    }
1621
1622    // ── Period doubling ───────────────────────────────────────────────────────
1623
1624    #[test]
1625    fn test_period_detection_simple() {
1626        let series = vec![1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0];
1627        let p = PeriodDoubling::detect_period(&series, 0.001, 10);
1628        assert_eq!(p, 2, "Simple alternating series should have period 2");
1629    }
1630
1631    #[test]
1632    fn test_period_doubling_count() {
1633        // There should be at least 2 period-doubling bifurcations before r=3.6.
1634        let count = PeriodDoubling::count_doublings(3.6, 0.5);
1635        assert!(
1636            count >= 2,
1637            "Should see ≥ 2 doublings before r=3.6, got {count}"
1638        );
1639    }
1640
1641    // ── Chaos analysis ────────────────────────────────────────────────────────
1642
1643    #[test]
1644    fn test_chaos_analysis_summary_format() {
1645        let sys = LorenzSystem::classic();
1646        let traj = sys.integrate([1.0, 1.0, 1.0], 0.01, 200);
1647        let x_series: Vec<f64> = traj.iter().map(|pt| pt[0]).collect();
1648        let ca = ChaosAnalysis::new(1, 3);
1649        let s = ca.summary(&x_series);
1650        assert!(
1651            s.contains("Largest Lyapunov"),
1652            "Summary should contain Lyapunov info"
1653        );
1654        assert!(
1655            s.contains("Correlation dim"),
1656            "Summary should contain dimension info"
1657        );
1658    }
1659
1660    #[test]
1661    fn test_chaos_analysis_lorenz_positive_lyapunov() {
1662        let sys = LorenzSystem::classic();
1663        let traj = sys.integrate([0.1, 0.1, 0.1], 0.01, 1000);
1664        let x_series: Vec<f64> = traj.iter().map(|pt| pt[0]).collect();
1665        let ca = ChaosAnalysis::new(1, 3);
1666        let lambda = ca.largest_lyapunov(&x_series, 5);
1667        // Should be positive for chaotic series.
1668        assert!(
1669            lambda > -1.0,
1670            "Chaos analysis Lyapunov should not be very negative, got {lambda}"
1671        );
1672    }
1673}