Skip to main content

oxiphysics_core/
control_theory.rs

1// Copyright 2026 COOLJAPAN OU (Team KitaSan)
2// SPDX-License-Identifier: Apache-2.0
3
4//! Control theory: PID, state-space, LQR, Kalman filter, EKF, Bode analysis,
5//! root locus, Ziegler-Nichols auto-tuning, lead/lag compensator, poles/zeros.
6//!
7//! # Overview
8//!
9//! This module provides classical and modern control theory building blocks:
10//!
11//! - [`PidController`] — PID with anti-windup, derivative filter, output clamping
12//! - [`StateSpaceModel`] — A, B, C, D matrices; step response; stability check
13//! - [`LqrController`] — LQR via discrete algebraic Riccati equation (iterative)
14//! - [`KalmanFilter`] — discrete Kalman filter: predict + update
15//! - [`ExtendedKalmanFilter`] — EKF with Jacobian-based linearization
16//! - [`BodeAnalysis`] — gain/phase at frequencies; gain/phase margins
17//! - [`RootLocus`] — closed-loop poles vs gain K; breakaway points
18//! - [`ZieglerNichols`] — auto-tune PID from ultimate gain/period
19//! - [`LeadLagCompensator`] — lead/lag compensator transfer function
20//! - [`PolesZeros`] — transfer function poles, zeros, DC gain, step response
21
22#![allow(dead_code)]
23#![allow(clippy::too_many_arguments)]
24
25// ---------------------------------------------------------------------------
26// Internal complex number helper
27// ---------------------------------------------------------------------------
28
29#[derive(Debug, Clone, Copy, PartialEq)]
30struct Cplx {
31    re: f64,
32    im: f64,
33}
34
35impl Cplx {
36    fn new(re: f64, im: f64) -> Self {
37        Self { re, im }
38    }
39    fn abs(self) -> f64 {
40        (self.re * self.re + self.im * self.im).sqrt()
41    }
42    fn arg(self) -> f64 {
43        self.im.atan2(self.re)
44    }
45    fn mul(self, other: Self) -> Self {
46        Self {
47            re: self.re * other.re - self.im * other.im,
48            im: self.re * other.im + self.im * other.re,
49        }
50    }
51    fn add(self, other: Self) -> Self {
52        Self {
53            re: self.re + other.re,
54            im: self.im + other.im,
55        }
56    }
57    fn sub(self, other: Self) -> Self {
58        Self {
59            re: self.re - other.re,
60            im: self.im - other.im,
61        }
62    }
63    fn div(self, other: Self) -> Self {
64        let denom = other.re * other.re + other.im * other.im;
65        if denom.abs() < 1e-300 {
66            return Self::new(f64::NAN, f64::NAN);
67        }
68        Self {
69            re: (self.re * other.re + self.im * other.im) / denom,
70            im: (self.im * other.re - self.re * other.im) / denom,
71        }
72    }
73    fn scale(self, s: f64) -> Self {
74        Self {
75            re: self.re * s,
76            im: self.im * s,
77        }
78    }
79    fn conj(self) -> Self {
80        Self {
81            re: self.re,
82            im: -self.im,
83        }
84    }
85}
86
87// Evaluate polynomial with complex coefficients at complex point s.
88// Coefficients in descending order: coeffs[0]*s^n + ... + coeffs[n].
89fn poly_eval_cplx(coeffs: &[f64], s: Cplx) -> Cplx {
90    let mut result = Cplx::new(0.0, 0.0);
91    for &c in coeffs {
92        result = result.mul(s).add(Cplx::new(c, 0.0));
93    }
94    result
95}
96
97// ---------------------------------------------------------------------------
98// Simple fixed-size matrix helpers (row-major, stack-allocated via Vec)
99// ---------------------------------------------------------------------------
100
101/// Multiply two matrices: (r×k) × (k×c) → (r×c), stored row-major.
102fn mat_mul(a: &[f64], b: &[f64], r: usize, k: usize, c: usize) -> Vec<f64> {
103    let mut out = vec![0.0f64; r * c];
104    for i in 0..r {
105        for j in 0..c {
106            let mut s = 0.0;
107            for p in 0..k {
108                s += a[i * k + p] * b[p * c + j];
109            }
110            out[i * c + j] = s;
111        }
112    }
113    out
114}
115
116/// Add two matrices of same shape.
117fn mat_add(a: &[f64], b: &[f64]) -> Vec<f64> {
118    a.iter().zip(b.iter()).map(|(x, y)| x + y).collect()
119}
120
121/// Subtract two matrices.
122fn mat_sub(a: &[f64], b: &[f64]) -> Vec<f64> {
123    a.iter().zip(b.iter()).map(|(x, y)| x - y).collect()
124}
125
126/// Transpose an n×m matrix to m×n.
127fn mat_transpose(a: &[f64], n: usize, m: usize) -> Vec<f64> {
128    let mut out = vec![0.0f64; n * m];
129    for i in 0..n {
130        for j in 0..m {
131            out[j * n + i] = a[i * m + j];
132        }
133    }
134    out
135}
136
137/// Scale a matrix by scalar.
138fn mat_scale(a: &[f64], s: f64) -> Vec<f64> {
139    a.iter().map(|x| x * s).collect()
140}
141
142/// Identity matrix n×n.
143fn mat_eye(n: usize) -> Vec<f64> {
144    let mut out = vec![0.0f64; n * n];
145    for i in 0..n {
146        out[i * n + i] = 1.0;
147    }
148    out
149}
150
151/// Invert a 2×2 matrix.
152fn mat_inv2(a: &[f64]) -> Option<Vec<f64>> {
153    let det = a[0] * a[3] - a[1] * a[2];
154    if det.abs() < 1e-300 {
155        return None;
156    }
157    Some(vec![a[3] / det, -a[1] / det, -a[2] / det, a[0] / det])
158}
159
160/// Invert an n×n matrix via Gauss-Jordan elimination.
161fn mat_inv(a: &[f64], n: usize) -> Option<Vec<f64>> {
162    let mut m = a.to_vec();
163    let mut inv = mat_eye(n);
164    for col in 0..n {
165        // Partial pivot
166        let mut max_row = col;
167        let mut max_val = m[col * n + col].abs();
168        for row in (col + 1)..n {
169            let v = m[row * n + col].abs();
170            if v > max_val {
171                max_val = v;
172                max_row = row;
173            }
174        }
175        if max_val < 1e-300 {
176            return None;
177        }
178        if max_row != col {
179            for j in 0..n {
180                m.swap(col * n + j, max_row * n + j);
181                inv.swap(col * n + j, max_row * n + j);
182            }
183        }
184        let pivot = m[col * n + col];
185        for j in 0..n {
186            m[col * n + j] /= pivot;
187            inv[col * n + j] /= pivot;
188        }
189        for row in 0..n {
190            if row != col {
191                let factor = m[row * n + col];
192                for j in 0..n {
193                    let mv = m[col * n + j];
194                    let iv = inv[col * n + j];
195                    m[row * n + j] -= factor * mv;
196                    inv[row * n + j] -= factor * iv;
197                }
198            }
199        }
200    }
201    Some(inv)
202}
203
204/// Frobenius norm of a matrix.
205fn mat_frob(a: &[f64]) -> f64 {
206    a.iter().map(|x| x * x).sum::<f64>().sqrt()
207}
208
209// ---------------------------------------------------------------------------
210// PidController
211// ---------------------------------------------------------------------------
212
213/// A PID controller with anti-windup, derivative filter, and output clamping.
214///
215/// Implements the parallel form:
216/// `u = Kp*e + Ki*integral(e) + Kd*derivative(e)`
217///
218/// Anti-windup: integral is clamped to `[-integral_limit, integral_limit]`.
219/// Derivative filter: low-pass via `N`-pole (derivative filter coefficient).
220/// Output clamping: output is clamped to `[out_min, out_max]`.
221#[derive(Debug, Clone)]
222pub struct PidController {
223    /// Proportional gain.
224    pub kp: f64,
225    /// Integral gain.
226    pub ki: f64,
227    /// Derivative gain.
228    pub kd: f64,
229    /// Derivative filter coefficient (N = filter pole; larger = less filtering).
230    pub n: f64,
231    /// Maximum absolute value of the integral term (anti-windup limit).
232    pub integral_limit: f64,
233    /// Minimum output value.
234    pub out_min: f64,
235    /// Maximum output value.
236    pub out_max: f64,
237    /// Accumulated integral state.
238    pub integral: f64,
239    /// Previous filtered derivative state.
240    pub deriv_filter: f64,
241    /// Previous error (for derivative computation).
242    pub prev_error: f64,
243}
244
245impl PidController {
246    /// Create a new PID controller.
247    pub fn new(kp: f64, ki: f64, kd: f64) -> Self {
248        Self {
249            kp,
250            ki,
251            kd,
252            n: 10.0,
253            integral_limit: 1e9,
254            out_min: f64::NEG_INFINITY,
255            out_max: f64::INFINITY,
256            integral: 0.0,
257            deriv_filter: 0.0,
258            prev_error: 0.0,
259        }
260    }
261
262    /// Set derivative filter coefficient (default 10.0).
263    pub fn with_n(mut self, n: f64) -> Self {
264        self.n = n;
265        self
266    }
267
268    /// Set integral anti-windup limit.
269    pub fn with_integral_limit(mut self, limit: f64) -> Self {
270        self.integral_limit = limit;
271        self
272    }
273
274    /// Set output clamping limits.
275    pub fn with_output_limits(mut self, min: f64, max: f64) -> Self {
276        self.out_min = min;
277        self.out_max = max;
278        self
279    }
280
281    /// Compute control output given error and time step `dt`.
282    pub fn update(&mut self, error: f64, dt: f64) -> f64 {
283        if dt <= 0.0 {
284            return 0.0;
285        }
286
287        // Proportional
288        let p = self.kp * error;
289
290        // Integral with anti-windup
291        self.integral += error * dt;
292        self.integral = self
293            .integral
294            .clamp(-self.integral_limit, self.integral_limit);
295        let i = self.ki * self.integral;
296
297        // Derivative with low-pass filter: Td/(Td + dt/N) * prev_d + Kd*N/(Td+dt/N)*(e-e_prev)
298        // Simplified: filtered_deriv = alpha * filtered_deriv + (1-alpha) * (e - e_prev)/dt
299        // where alpha = N*dt / (1 + N*dt)
300        let alpha = 1.0 / (1.0 + self.n * dt);
301        let raw_deriv = (error - self.prev_error) / dt;
302        self.deriv_filter = alpha * self.deriv_filter + (1.0 - alpha) * raw_deriv;
303        let d = self.kd * self.deriv_filter;
304
305        self.prev_error = error;
306
307        // Clamp output
308        (p + i + d).clamp(self.out_min, self.out_max)
309    }
310
311    /// Reset internal state.
312    pub fn reset(&mut self) {
313        self.integral = 0.0;
314        self.deriv_filter = 0.0;
315        self.prev_error = 0.0;
316    }
317}
318
319// ---------------------------------------------------------------------------
320// StateSpaceModel
321// ---------------------------------------------------------------------------
322
323/// Discrete-time linear state-space model: x\[k+1\] = A*x\[k\] + B*u\[k\], y\[k\] = C*x\[k\] + D*u\[k\].
324///
325/// Matrices are stored row-major. Dimensions: A is n×n, B is n×m, C is p×n, D is p×m.
326#[derive(Debug, Clone)]
327pub struct StateSpaceModel {
328    /// State matrix A (n×n), row-major.
329    pub a: Vec<f64>,
330    /// Input matrix B (n×m), row-major.
331    pub b: Vec<f64>,
332    /// Output matrix C (p×n), row-major.
333    pub c: Vec<f64>,
334    /// Feedthrough matrix D (p×m), row-major.
335    pub d: Vec<f64>,
336    /// Number of states n.
337    pub n: usize,
338    /// Number of inputs m.
339    pub m: usize,
340    /// Number of outputs p.
341    pub p: usize,
342    /// Current state vector (length n).
343    pub state: Vec<f64>,
344}
345
346impl StateSpaceModel {
347    /// Create a new state-space model.
348    ///
349    /// # Panics
350    /// Panics if matrix dimensions are inconsistent.
351    pub fn new(
352        a: Vec<f64>,
353        b: Vec<f64>,
354        c: Vec<f64>,
355        d: Vec<f64>,
356        n: usize,
357        m: usize,
358        p: usize,
359    ) -> Self {
360        assert_eq!(a.len(), n * n, "A must be n×n");
361        assert_eq!(b.len(), n * m, "B must be n×m");
362        assert_eq!(c.len(), p * n, "C must be p×n");
363        assert_eq!(d.len(), p * m, "D must be p×m");
364        Self {
365            a,
366            b,
367            c,
368            d,
369            n,
370            m,
371            p,
372            state: vec![0.0; n],
373        }
374    }
375
376    /// Step the system: apply input `u` (length m), advance state, return output y (length p).
377    pub fn step(&mut self, u: &[f64]) -> Vec<f64> {
378        assert_eq!(u.len(), self.m);
379        // x_new = A*x + B*u
380        let ax = mat_mul(&self.a, &self.state, self.n, self.n, 1);
381        let bu = mat_mul(&self.b, u, self.n, self.m, 1);
382        let x_new = mat_add(&ax, &bu);
383
384        // y = C*x + D*u
385        let cx = mat_mul(&self.c, &self.state, self.p, self.n, 1);
386        let du = mat_mul(&self.d, u, self.p, self.m, 1);
387        let y = mat_add(&cx, &du);
388
389        self.state = x_new;
390        y
391    }
392
393    /// Compute step response: apply unit step input 0→1, collect outputs for `steps` steps.
394    /// Returns a vector of (time, y\[0\]) pairs.
395    pub fn step_response(&mut self, steps: usize) -> Vec<(f64, f64)> {
396        self.state = vec![0.0; self.n];
397        let u = vec![1.0; self.m];
398        (0..steps)
399            .map(|k| {
400                let y = self.step(&u);
401                (k as f64, y[0])
402            })
403            .collect()
404    }
405
406    /// Check stability: all eigenvalues of A must lie strictly inside the unit disk (discrete).
407    /// Uses power iteration / Gershgorin estimate for small systems.
408    /// Returns `true` if stable (all |λ| < 1).
409    pub fn is_stable(&self) -> bool {
410        // For n≤4, use characteristic polynomial roots via companion matrix approach.
411        // For general stability, use Gershgorin circle theorem as a sufficient condition,
412        // then fall back to iterative spectral radius estimation.
413        let spectral_radius = spectral_radius_power_iter(&self.a, self.n, 200, 1e-8);
414        spectral_radius < 1.0
415    }
416
417    /// Reset state to zero.
418    pub fn reset(&mut self) {
419        self.state = vec![0.0; self.n];
420    }
421}
422
423/// Estimate the spectral radius of matrix A (n×n) via power iteration.
424fn spectral_radius_power_iter(a: &[f64], n: usize, max_iter: usize, tol: f64) -> f64 {
425    if n == 0 {
426        return 0.0;
427    }
428    let mut v = vec![1.0f64; n];
429    let mut lambda = 0.0f64;
430    for _iter in 0..max_iter {
431        // w = A*v
432        let w = mat_mul(a, &v, n, n, 1);
433        // norm
434        let norm: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
435        if norm < 1e-300 {
436            return 0.0;
437        }
438        let new_lambda = norm;
439        // normalize
440        v = w.iter().map(|x| x / norm).collect();
441        if (new_lambda - lambda).abs() < tol {
442            return new_lambda;
443        }
444        lambda = new_lambda;
445    }
446    lambda
447}
448
449// ---------------------------------------------------------------------------
450// LqrController
451// ---------------------------------------------------------------------------
452
453/// Discrete Linear Quadratic Regulator (LQR) controller.
454///
455/// Solves the discrete algebraic Riccati equation (DARE) iteratively and
456/// computes the optimal gain matrix K such that u = -K*x minimizes
457/// J = Σ (x'Qx + u'Ru).
458#[derive(Debug, Clone)]
459pub struct LqrController {
460    /// Optimal gain matrix K (m×n), row-major.
461    pub k: Vec<f64>,
462    /// Number of states n.
463    pub n: usize,
464    /// Number of inputs m.
465    pub m: usize,
466}
467
468impl LqrController {
469    /// Solve DARE and compute LQR gain.
470    ///
471    /// - `a` — state matrix (n×n)
472    /// - `b` — input matrix (n×m)
473    /// - `q` — state cost matrix (n×n), positive semi-definite
474    /// - `r` — input cost matrix (m×m), positive definite
475    /// - `max_iter` — maximum iterations for Riccati iteration
476    /// - `tol` — convergence tolerance (Frobenius norm of P change)
477    pub fn solve(
478        a: &[f64],
479        b: &[f64],
480        q: &[f64],
481        r: &[f64],
482        n: usize,
483        m: usize,
484        max_iter: usize,
485        tol: f64,
486    ) -> Option<Self> {
487        // Value iteration: P_{k+1} = Q + A'*P_k*A - A'*P_k*B*(R + B'*P_k*B)^{-1}*B'*P_k*A
488        let at = mat_transpose(a, n, n);
489        let bt = mat_transpose(b, n, m);
490        let mut p = q.to_vec();
491
492        for _iter in 0..max_iter {
493            let _p_a = mat_mul(&p, a, n, n, n); // P*A
494            let at_p = mat_mul(&at, &p, n, n, n); // A'*P
495            let at_p_a = mat_mul(&at_p, a, n, n, n); // A'*P*A
496            let at_p_b = mat_mul(&at_p, b, n, n, m); // A'*P*B
497            let bt_p = mat_mul(&bt, &p, m, n, n); // B'*P
498            let bt_p_b = mat_mul(&bt_p, b, m, n, m); // B'*P*B
499            let r_bpb = mat_add(r, &bt_p_b); // R + B'*P*B
500
501            let r_bpb_inv = mat_inv(&r_bpb, m)?;
502            let at_p_b_rinv = mat_mul(&at_p_b, &r_bpb_inv, n, m, m); // A'*P*B*(R+B'PB)^{-1}
503            let correction = mat_mul(&at_p_b_rinv, &mat_transpose(&at_p_b, n, m), n, m, n);
504
505            let p_new = mat_sub(&mat_add(q, &at_p_a), &correction);
506
507            let diff = mat_frob(&mat_sub(&p_new, &p));
508            p = p_new;
509            if diff < tol {
510                break;
511            }
512        }
513
514        // K = (R + B'*P*B)^{-1} * B'*P*A
515        let at = mat_transpose(a, n, n);
516        let bt = mat_transpose(b, n, m);
517        let bt_p = mat_mul(&bt, &p, m, n, n);
518        let bt_p_b = mat_mul(&bt_p, b, m, n, m);
519        let r_bpb = mat_add(r, &bt_p_b);
520        let r_bpb_inv = mat_inv(&r_bpb, m)?;
521        let bt_p_a = mat_mul(&bt_p, a, m, n, n);
522        let k = mat_mul(&r_bpb_inv, &bt_p_a, m, m, n);
523
524        // Suppress unused variable warning for `at`
525        let _ = &at;
526
527        Some(Self { k, n, m })
528    }
529
530    /// Compute control input: u = -K*x.
531    pub fn control(&self, x: &[f64]) -> Vec<f64> {
532        let kx = mat_mul(&self.k, x, self.m, self.n, 1);
533        kx.iter().map(|v| -v).collect()
534    }
535}
536
537// ---------------------------------------------------------------------------
538// KalmanFilter
539// ---------------------------------------------------------------------------
540
541/// Discrete Kalman filter.
542///
543/// State: x̂ (n×1). Matrices: A (n×n), B (n×m), C (p×n), Q (n×n), R (p×p).
544/// Predict: x̂⁻ = A*x̂ + B*u, P⁻ = A*P*A' + Q
545/// Update: K = P⁻*C'*(C*P⁻*C' + R)⁻¹, x̂ = x̂⁻ + K*(y - C*x̂⁻), P = (I - K*C)*P⁻
546#[derive(Debug, Clone)]
547pub struct KalmanFilter {
548    /// State matrix A (n×n).
549    pub a: Vec<f64>,
550    /// Input matrix B (n×m).
551    pub b: Vec<f64>,
552    /// Observation matrix C (p×n).
553    pub c: Vec<f64>,
554    /// Process noise covariance Q (n×n).
555    pub q: Vec<f64>,
556    /// Measurement noise covariance R (p×p).
557    pub r: Vec<f64>,
558    /// State estimate x̂ (length n).
559    pub x: Vec<f64>,
560    /// Error covariance matrix P (n×n).
561    pub p: Vec<f64>,
562    /// Number of states n.
563    pub n: usize,
564    /// Number of inputs m.
565    pub m: usize,
566    /// Number of outputs p.
567    pub p_dim: usize,
568}
569
570impl KalmanFilter {
571    /// Create a new Kalman filter with identity initial covariance.
572    pub fn new(
573        a: Vec<f64>,
574        b: Vec<f64>,
575        c: Vec<f64>,
576        q: Vec<f64>,
577        r: Vec<f64>,
578        n: usize,
579        m: usize,
580        p_dim: usize,
581    ) -> Self {
582        let p_init = mat_eye(n);
583        Self {
584            a,
585            b,
586            c,
587            q,
588            r,
589            x: vec![0.0; n],
590            p: p_init,
591            n,
592            m,
593            p_dim,
594        }
595    }
596
597    /// Predict step: propagate state and covariance.
598    pub fn predict(&mut self, u: &[f64]) {
599        let ax = mat_mul(&self.a, &self.x, self.n, self.n, 1);
600        let bu = mat_mul(&self.b, u, self.n, self.m, 1);
601        self.x = mat_add(&ax, &bu);
602
603        let at = mat_transpose(&self.a, self.n, self.n);
604        let apa = mat_mul(
605            &mat_mul(&self.a, &self.p, self.n, self.n, self.n),
606            &at,
607            self.n,
608            self.n,
609            self.n,
610        );
611        self.p = mat_add(&apa, &self.q);
612    }
613
614    /// Update step: incorporate measurement y (length p_dim).
615    pub fn update(&mut self, y: &[f64]) -> Option<()> {
616        let ct = mat_transpose(&self.c, self.p_dim, self.n);
617        let pct = mat_mul(&self.p, &ct, self.n, self.n, self.p_dim);
618        let cpct = mat_mul(&self.c, &pct, self.p_dim, self.n, self.p_dim);
619        let s = mat_add(&cpct, &self.r); // S = C*P*C' + R  (p×p)
620
621        let s_inv = mat_inv(&s, self.p_dim)?;
622        // K = P*C' * S^{-1}  (n×p)
623        let k = mat_mul(&pct, &s_inv, self.n, self.p_dim, self.p_dim);
624
625        // Innovation: innov = y - C*x̂  (p×1)
626        let cx = mat_mul(&self.c, &self.x, self.p_dim, self.n, 1);
627        let innov: Vec<f64> = y.iter().zip(cx.iter()).map(|(yi, ci)| yi - ci).collect();
628
629        // x̂ = x̂ + K*innov
630        let k_innov = mat_mul(&k, &innov, self.n, self.p_dim, 1);
631        self.x = mat_add(&self.x, &k_innov);
632
633        // P = (I - K*C)*P
634        let kc = mat_mul(&k, &self.c, self.n, self.p_dim, self.n);
635        let i_kc = mat_sub(&mat_eye(self.n), &kc);
636        self.p = mat_mul(&i_kc, &self.p, self.n, self.n, self.n);
637
638        Some(())
639    }
640
641    /// Run one full predict-update cycle.
642    pub fn step(&mut self, u: &[f64], y: &[f64]) -> Option<Vec<f64>> {
643        self.predict(u);
644        self.update(y)?;
645        Some(self.x.clone())
646    }
647}
648
649// ---------------------------------------------------------------------------
650// ExtendedKalmanFilter
651// ---------------------------------------------------------------------------
652
653/// Extended Kalman Filter (EKF) with Jacobian-based linearization.
654///
655/// The user provides:
656/// - `f`: nonlinear state transition function f(x, u) → x_new
657/// - `h`: nonlinear observation function h(x) → y
658/// - `jac_f`: Jacobian of f with respect to x (n×n matrix, row-major)
659/// - `jac_h`: Jacobian of h with respect to x (p×n matrix, row-major)
660#[derive(Debug, Clone)]
661pub struct ExtendedKalmanFilter {
662    /// State estimate (length n).
663    pub x: Vec<f64>,
664    /// Error covariance (n×n, row-major).
665    pub p: Vec<f64>,
666    /// Process noise covariance Q (n×n).
667    pub q: Vec<f64>,
668    /// Measurement noise covariance R (p×p).
669    pub r: Vec<f64>,
670    /// Number of states.
671    pub n: usize,
672    /// Number of outputs.
673    pub p_dim: usize,
674}
675
676impl ExtendedKalmanFilter {
677    /// Create a new EKF.
678    pub fn new(q: Vec<f64>, r: Vec<f64>, n: usize, p_dim: usize) -> Self {
679        Self {
680            x: vec![0.0; n],
681            p: mat_eye(n),
682            q,
683            r,
684            n,
685            p_dim,
686        }
687    }
688
689    /// EKF predict step.
690    ///
691    /// - `f`: state transition f(x, u) → new state
692    /// - `jac_f`: Jacobian F = ∂f/∂x at current x (row-major, n×n)
693    /// - `u`: control input
694    pub fn predict<F, J>(&mut self, f: F, jac_f: J, u: &[f64])
695    where
696        F: Fn(&[f64], &[f64]) -> Vec<f64>,
697        J: Fn(&[f64], &[f64]) -> Vec<f64>,
698    {
699        let f_val = jac_f(&self.x, u);
700        // P⁻ = F*P*F' + Q
701        let ft = mat_transpose(&f_val, self.n, self.n);
702        let fp = mat_mul(&f_val, &self.p, self.n, self.n, self.n);
703        let fpft = mat_mul(&fp, &ft, self.n, self.n, self.n);
704        self.p = mat_add(&fpft, &self.q);
705        self.x = f(&self.x, u);
706    }
707
708    /// EKF update step.
709    ///
710    /// - `h`: observation function h(x) → y
711    /// - `jac_h`: Jacobian H = ∂h/∂x at current x (row-major, p×n)
712    /// - `y`: measurement vector (length p_dim)
713    pub fn update<H, J>(&mut self, h: H, jac_h: J, y: &[f64]) -> Option<()>
714    where
715        H: Fn(&[f64]) -> Vec<f64>,
716        J: Fn(&[f64]) -> Vec<f64>,
717    {
718        let h_jac = jac_h(&self.x);
719        let ht = mat_transpose(&h_jac, self.p_dim, self.n);
720        let pht = mat_mul(&self.p, &ht, self.n, self.n, self.p_dim);
721        let hpht = mat_mul(&h_jac, &pht, self.p_dim, self.n, self.p_dim);
722        let s = mat_add(&hpht, &self.r);
723        let s_inv = mat_inv(&s, self.p_dim)?;
724        let k = mat_mul(&pht, &s_inv, self.n, self.p_dim, self.p_dim);
725
726        let h_x = h(&self.x);
727        let innov: Vec<f64> = y.iter().zip(h_x.iter()).map(|(yi, hi)| yi - hi).collect();
728        let k_innov = mat_mul(&k, &innov, self.n, self.p_dim, 1);
729        self.x = mat_add(&self.x, &k_innov);
730
731        let kh = mat_mul(&k, &h_jac, self.n, self.p_dim, self.n);
732        let i_kh = mat_sub(&mat_eye(self.n), &kh);
733        self.p = mat_mul(&i_kh, &self.p, self.n, self.n, self.n);
734        Some(())
735    }
736}
737
738// ---------------------------------------------------------------------------
739// BodeAnalysis
740// ---------------------------------------------------------------------------
741
742/// Bode analysis for a transfer function given numerator and denominator polynomials.
743///
744/// Polynomials stored as coefficients in descending order of s:
745/// `num = [b_n, ..., b_0]`, `den = [a_m, ..., a_0]`.
746#[derive(Debug, Clone)]
747pub struct BodeAnalysis {
748    /// Numerator polynomial coefficients (descending order of s).
749    pub num: Vec<f64>,
750    /// Denominator polynomial coefficients (descending order of s).
751    pub den: Vec<f64>,
752}
753
754/// A single point on a Bode plot.
755#[derive(Debug, Clone, Copy)]
756pub struct BodePoint {
757    /// Angular frequency ω (rad/s).
758    pub omega: f64,
759    /// Gain in dB: 20*log10(|G(jω)|).
760    pub gain_db: f64,
761    /// Phase in degrees.
762    pub phase_deg: f64,
763}
764
765impl BodeAnalysis {
766    /// Create a Bode analysis object.
767    pub fn new(num: Vec<f64>, den: Vec<f64>) -> Self {
768        Self { num, den }
769    }
770
771    /// Evaluate transfer function G(jω) at angular frequency ω.
772    pub fn eval_at(&self, omega: f64) -> (f64, f64) {
773        let s = Cplx::new(0.0, omega);
774        let num_val = poly_eval_cplx(&self.num, s);
775        let den_val = poly_eval_cplx(&self.den, s);
776        let g = num_val.div(den_val);
777        let gain = g.abs();
778        let phase_deg = g.arg().to_degrees();
779        (gain, phase_deg)
780    }
781
782    /// Compute Bode points at the given frequencies.
783    pub fn compute(&self, omegas: &[f64]) -> Vec<BodePoint> {
784        omegas
785            .iter()
786            .map(|&omega| {
787                let (gain, phase_deg) = self.eval_at(omega);
788                let gain_db = if gain > 1e-300 {
789                    20.0 * gain.log10()
790                } else {
791                    -300.0
792                };
793                BodePoint {
794                    omega,
795                    gain_db,
796                    phase_deg,
797                }
798            })
799            .collect()
800    }
801
802    /// Compute logarithmically-spaced Bode plot from ω_min to ω_max with n_points points.
803    pub fn compute_log(&self, omega_min: f64, omega_max: f64, n_points: usize) -> Vec<BodePoint> {
804        if n_points == 0 {
805            return vec![];
806        }
807        let log_min = omega_min.log10();
808        let log_max = omega_max.log10();
809        let omegas: Vec<f64> = (0..n_points)
810            .map(|i| {
811                let t = i as f64 / (n_points - 1).max(1) as f64;
812                10f64.powf(log_min + t * (log_max - log_min))
813            })
814            .collect();
815        self.compute(&omegas)
816    }
817
818    /// Find gain margin (dB) and phase margin (degrees).
819    ///
820    /// - Gain margin: gain (dB) at the phase crossover frequency (where phase = -180°).
821    /// - Phase margin: phase (deg) + 180° at the gain crossover frequency (where gain = 0 dB).
822    ///
823    /// Returns `(gain_margin_db, phase_margin_deg)`.
824    pub fn margins(&self, omega_min: f64, omega_max: f64, n_points: usize) -> (f64, f64) {
825        let points = self.compute_log(omega_min, omega_max, n_points);
826        if points.is_empty() {
827            return (f64::NAN, f64::NAN);
828        }
829
830        // Phase crossover: phase = -180°
831        let mut phase_cross_gain_db = f64::NAN;
832        for i in 1..points.len() {
833            let p0 = &points[i - 1];
834            let p1 = &points[i];
835            if (p0.phase_deg + 180.0) * (p1.phase_deg + 180.0) <= 0.0 {
836                // Linear interpolation
837                let frac = (-(p0.phase_deg + 180.0)) / (p1.phase_deg - p0.phase_deg + 1e-300);
838                phase_cross_gain_db = p0.gain_db + frac * (p1.gain_db - p0.gain_db);
839                break;
840            }
841        }
842
843        // Gain crossover: gain = 0 dB
844        let mut gain_cross_phase_margin = f64::NAN;
845        for i in 1..points.len() {
846            let p0 = &points[i - 1];
847            let p1 = &points[i];
848            if p0.gain_db * p1.gain_db <= 0.0 {
849                let frac = (-p0.gain_db) / (p1.gain_db - p0.gain_db + 1e-300);
850                let phase = p0.phase_deg + frac * (p1.phase_deg - p0.phase_deg);
851                gain_cross_phase_margin = phase + 180.0;
852                break;
853            }
854        }
855
856        let gain_margin_db = -phase_cross_gain_db;
857        (gain_margin_db, gain_cross_phase_margin)
858    }
859}
860
861// ---------------------------------------------------------------------------
862// RootLocus
863// ---------------------------------------------------------------------------
864
865/// Root locus computation: closed-loop poles as a function of gain K.
866///
867/// For open-loop transfer function G(s) = num(s)/den(s),
868/// the closed-loop poles satisfy: den(s) + K*num(s) = 0.
869#[derive(Debug, Clone)]
870pub struct RootLocus {
871    /// Numerator polynomial coefficients (descending order).
872    pub num: Vec<f64>,
873    /// Denominator polynomial coefficients (descending order).
874    pub den: Vec<f64>,
875}
876
877/// A point on the root locus.
878#[derive(Debug, Clone)]
879pub struct RootLocusPoint {
880    /// Gain K.
881    pub gain: f64,
882    /// Closed-loop poles at this gain (complex pairs as (re, im)).
883    pub poles: Vec<(f64, f64)>,
884}
885
886impl RootLocus {
887    /// Create a root locus object.
888    pub fn new(num: Vec<f64>, den: Vec<f64>) -> Self {
889        Self { num, den }
890    }
891
892    /// Compute closed-loop poles for each gain in `gains`.
893    ///
894    /// Uses companion matrix eigenvalue approach for small polynomials (degree ≤ 4).
895    /// For the characteristic equation den(s) + K*num(s) = 0, forms the combined polynomial.
896    pub fn compute(&self, gains: &[f64]) -> Vec<RootLocusPoint> {
897        gains
898            .iter()
899            .map(|&k| {
900                let poles = self.poles_at_gain(k);
901                RootLocusPoint { gain: k, poles }
902            })
903            .collect()
904    }
905
906    /// Compute closed-loop poles for a specific gain K.
907    ///
908    /// Characteristic polynomial = den + K*num.
909    /// Pads/truncates to match degree. Returns (re, im) pairs.
910    pub fn poles_at_gain(&self, k: f64) -> Vec<(f64, f64)> {
911        // Form characteristic polynomial
912        let n_deg = self.den.len().max(self.num.len());
913        let mut char_poly = vec![0.0f64; n_deg];
914        for (i, &c) in self.den.iter().enumerate() {
915            let offset = n_deg - self.den.len();
916            char_poly[i + offset] += c;
917        }
918        for (i, &c) in self.num.iter().enumerate() {
919            let offset = n_deg - self.num.len();
920            char_poly[i + offset] += k * c;
921        }
922
923        // Find roots via companion matrix for degree ≤ 4
924        find_polynomial_roots(&char_poly)
925    }
926
927    /// Compute breakaway points (real-axis segments, ∂K/∂s = 0).
928    /// Returns approximate breakaway points on the real axis.
929    pub fn breakaway_points(&self, s_min: f64, s_max: f64, n_samples: usize) -> Vec<f64> {
930        // K(s) = -den(s)/num(s); breakaway where dK/ds = 0
931        // Approximate via finite differences on real axis
932        let ds = (s_max - s_min) / (n_samples as f64);
933        let mut points = Vec::new();
934
935        let k_at = |s_real: f64| {
936            let s = Cplx::new(s_real, 0.0);
937            let d = poly_eval_cplx(&self.den, s);
938            let n = poly_eval_cplx(&self.num, s);
939            if n.abs() < 1e-12 {
940                f64::NAN
941            } else {
942                -d.re / n.re
943            }
944        };
945
946        let mut prev_k = k_at(s_min);
947        let mut prev_dk = f64::NAN;
948        let mut prev_s = s_min;
949
950        for i in 1..=n_samples {
951            let s = s_min + i as f64 * ds;
952            let cur_k = k_at(s);
953            let dk = (cur_k - prev_k) / ds;
954            if !prev_dk.is_nan() && !dk.is_nan() && prev_dk * dk < 0.0 {
955                // Sign change in dk → local extremum
956                let s_break = prev_s + (-prev_dk) / (dk - prev_dk + 1e-300) * ds;
957                points.push(s_break);
958            }
959            prev_dk = dk;
960            prev_k = cur_k;
961            prev_s = s;
962        }
963        points
964    }
965}
966
967/// Find roots of a polynomial via companion matrix eigenvalues.
968/// Returns up to degree-1 roots as (re, im) pairs.
969fn find_polynomial_roots(coeffs: &[f64]) -> Vec<(f64, f64)> {
970    // Remove leading zeros
971    let start = coeffs.iter().position(|&c| c.abs() > 1e-300).unwrap_or(0);
972    let c = &coeffs[start..];
973    let degree = c.len().saturating_sub(1);
974    if degree == 0 {
975        return vec![];
976    }
977    let leading = c[0];
978
979    // Build companion matrix (degree × degree), row-major
980    // C = [[0, 1, 0, ...], [0, 0, 1, ...], ..., [-c_n/c_0, ...]]
981    let n = degree;
982    let mut comp = vec![0.0f64; n * n];
983    for i in 0..(n - 1) {
984        comp[i * n + (i + 1)] = 1.0;
985    }
986    for j in 0..n {
987        comp[(n - 1) * n + j] = -c[n - j] / leading;
988    }
989
990    // QR iteration to find eigenvalues (simplified: only converges well for n≤4)
991
992    qr_eigenvalues(&comp, n, 200)
993}
994
995/// Simple QR algorithm to compute eigenvalues of a real matrix.
996fn qr_eigenvalues(a: &[f64], n: usize, max_iter: usize) -> Vec<(f64, f64)> {
997    if n == 1 {
998        return vec![(a[0], 0.0)];
999    }
1000    if n == 2 {
1001        // Quadratic formula
1002        let trace = a[0] + a[3];
1003        let det = a[0] * a[3] - a[1] * a[2];
1004        let disc = trace * trace - 4.0 * det;
1005        if disc >= 0.0 {
1006            let sqrt_d = disc.sqrt();
1007            return vec![((trace + sqrt_d) / 2.0, 0.0), ((trace - sqrt_d) / 2.0, 0.0)];
1008        } else {
1009            let sqrt_d = (-disc).sqrt();
1010            return vec![(trace / 2.0, sqrt_d / 2.0), (trace / 2.0, -sqrt_d / 2.0)];
1011        }
1012    }
1013
1014    // General: shifted QR iteration (Hessenberg form via Householder, then QR steps)
1015    // Simplified iterative QR without full Hessenberg for small matrices
1016    let mut h = a.to_vec();
1017    let _result: Vec<(f64, f64)> = vec![];
1018
1019    for _iter in 0..max_iter {
1020        // Wilkinson shift
1021        let mu = h[(n - 1) * n + (n - 1)];
1022        // Subtract shift
1023        for i in 0..n {
1024            h[i * n + i] -= mu;
1025        }
1026        // QR decomposition (Gram-Schmidt)
1027        let (q, r) = qr_decomp_gs(&h, n);
1028        // H = R*Q + mu*I
1029        h = mat_mul(&r, &q, n, n, n);
1030        for i in 0..n {
1031            h[i * n + i] += mu;
1032        }
1033        // Check convergence (sub-diagonal small)
1034        let off = h
1035            .iter()
1036            .enumerate()
1037            .filter(|(idx, _)| {
1038                let row = idx / n;
1039                let col = idx % n;
1040                col < row
1041            })
1042            .map(|(_, &v)| v * v)
1043            .sum::<f64>()
1044            .sqrt();
1045        if off < 1e-10 {
1046            break;
1047        }
1048    }
1049
1050    // Extract eigenvalues from quasi-upper-triangular form
1051    let mut eigs = Vec::new();
1052    let mut i = 0;
1053    while i < n {
1054        if i + 1 < n && h[i * n + (i + 1)].abs() > 1e-8 && h[(i + 1) * n + i].abs() > 1e-8 {
1055            // 2×2 block
1056            let a00 = h[i * n + i];
1057            let a01 = h[i * n + (i + 1)];
1058            let a10 = h[(i + 1) * n + i];
1059            let a11 = h[(i + 1) * n + (i + 1)];
1060            let trace = a00 + a11;
1061            let det = a00 * a11 - a01 * a10;
1062            let disc = trace * trace - 4.0 * det;
1063            if disc >= 0.0 {
1064                let sqrt_d = disc.sqrt();
1065                eigs.push(((trace + sqrt_d) / 2.0, 0.0));
1066                eigs.push(((trace - sqrt_d) / 2.0, 0.0));
1067            } else {
1068                let sqrt_d = (-disc).sqrt();
1069                eigs.push((trace / 2.0, sqrt_d / 2.0));
1070                eigs.push((trace / 2.0, -sqrt_d / 2.0));
1071            }
1072            i += 2;
1073        } else {
1074            eigs.push((h[i * n + i], 0.0));
1075            i += 1;
1076        }
1077    }
1078    eigs
1079}
1080
1081/// Gram-Schmidt QR decomposition.
1082fn qr_decomp_gs(a: &[f64], n: usize) -> (Vec<f64>, Vec<f64>) {
1083    let mut q = vec![0.0f64; n * n];
1084    let mut r = vec![0.0f64; n * n];
1085
1086    let col = |m: &[f64], j: usize| -> Vec<f64> { (0..n).map(|i| m[i * n + j]).collect() };
1087    let dot = |u: &[f64], v: &[f64]| -> f64 { u.iter().zip(v.iter()).map(|(a, b)| a * b).sum() };
1088    let norm = |u: &[f64]| -> f64 { dot(u, u).sqrt() };
1089
1090    let mut q_cols: Vec<Vec<f64>> = Vec::new();
1091    for j in 0..n {
1092        let mut v = col(a, j);
1093        for (k, qk) in q_cols.iter().enumerate() {
1094            let proj = dot(&v, qk);
1095            r[k * n + j] = proj;
1096            for i in 0..n {
1097                v[i] -= proj * qk[i];
1098            }
1099        }
1100        let nrm = norm(&v);
1101        r[j * n + j] = nrm;
1102        let qj: Vec<f64> = if nrm < 1e-300 {
1103            vec![0.0; n]
1104        } else {
1105            v.iter().map(|x| x / nrm).collect()
1106        };
1107        for i in 0..n {
1108            q[i * n + j] = qj[i];
1109        }
1110        q_cols.push(qj);
1111    }
1112    (q, r)
1113}
1114
1115// ---------------------------------------------------------------------------
1116// ZieglerNichols
1117// ---------------------------------------------------------------------------
1118
1119/// Ziegler-Nichols auto-tuning: compute PID gains from ultimate gain and period.
1120///
1121/// Given the ultimate gain Ku and ultimate period Tu (oscillation period at
1122/// marginal stability), this struct computes classic Ziegler-Nichols PID parameters.
1123#[derive(Debug, Clone, Copy)]
1124pub struct ZieglerNichols {
1125    /// Ultimate gain (marginal stability gain).
1126    pub ku: f64,
1127    /// Ultimate period (oscillation period at Ku, in seconds).
1128    pub tu: f64,
1129}
1130
1131/// PID gains recommended by Ziegler-Nichols method.
1132#[derive(Debug, Clone, Copy)]
1133pub struct ZnGains {
1134    /// Proportional gain.
1135    pub kp: f64,
1136    /// Integral gain (Ki = Kp/Ti).
1137    pub ki: f64,
1138    /// Derivative gain (Kd = Kp*Td).
1139    pub kd: f64,
1140}
1141
1142impl ZieglerNichols {
1143    /// Create a Ziegler-Nichols tuner from ultimate gain and period.
1144    pub fn new(ku: f64, tu: f64) -> Self {
1145        Self { ku, tu }
1146    }
1147
1148    /// Classic Ziegler-Nichols PID gains.
1149    pub fn pid(&self) -> ZnGains {
1150        let kp = 0.6 * self.ku;
1151        let ti = self.tu / 2.0;
1152        let td = self.tu / 8.0;
1153        ZnGains {
1154            kp,
1155            ki: kp / ti,
1156            kd: kp * td,
1157        }
1158    }
1159
1160    /// Ziegler-Nichols PI gains (no derivative).
1161    pub fn pi(&self) -> ZnGains {
1162        let kp = 0.45 * self.ku;
1163        let ti = self.tu / 1.2;
1164        ZnGains {
1165            kp,
1166            ki: kp / ti,
1167            kd: 0.0,
1168        }
1169    }
1170
1171    /// Ziegler-Nichols P-only gain.
1172    pub fn p_only(&self) -> ZnGains {
1173        ZnGains {
1174            kp: 0.5 * self.ku,
1175            ki: 0.0,
1176            kd: 0.0,
1177        }
1178    }
1179
1180    /// SIMC (Simple Internal Model Control) tuning rules.
1181    /// Assumes first-order-plus-dead-time model approximation.
1182    pub fn simc(&self) -> ZnGains {
1183        // Simplified SIMC: Kp = 0.6*Ku, Ti = 0.5*Tu, Td = 0.125*Tu
1184        let kp = 0.6 * self.ku;
1185        let ti = 0.5 * self.tu;
1186        let td = 0.125 * self.tu;
1187        ZnGains {
1188            kp,
1189            ki: kp / ti,
1190            kd: kp * td,
1191        }
1192    }
1193}
1194
1195// ---------------------------------------------------------------------------
1196// LeadLagCompensator
1197// ---------------------------------------------------------------------------
1198
1199/// Lead or lag compensator: C(s) = Kc * (s + z) / (s + p).
1200///
1201/// - Lead compensator: z < p (adds phase lead, improves transient response)
1202/// - Lag compensator: z > p (reduces steady-state error)
1203#[derive(Debug, Clone, Copy)]
1204pub struct LeadLagCompensator {
1205    /// DC gain Kc.
1206    pub kc: f64,
1207    /// Zero location z (s + z in numerator).
1208    pub z: f64,
1209    /// Pole location p (s + p in denominator).
1210    pub p: f64,
1211}
1212
1213impl LeadLagCompensator {
1214    /// Create a lead/lag compensator.
1215    pub fn new(kc: f64, z: f64, p: f64) -> Self {
1216        Self { kc, z, p }
1217    }
1218
1219    /// Evaluate C(jω): returns (magnitude, phase_degrees).
1220    pub fn eval_at(&self, omega: f64) -> (f64, f64) {
1221        // C(jω) = Kc * (jω + z) / (jω + p)
1222        let num = Cplx::new(self.z, omega);
1223        let den = Cplx::new(self.p, omega);
1224        let c = num.div(den).scale(self.kc);
1225        (c.abs(), c.arg().to_degrees())
1226    }
1227
1228    /// Maximum phase lead (degrees) and frequency at which it occurs.
1229    /// Only meaningful for lead compensators (z < p).
1230    pub fn max_phase_lead(&self) -> (f64, f64) {
1231        // ω_max = sqrt(z*p), φ_max = arcsin((p-z)/(p+z))
1232        let omega_max = (self.z * self.p).sqrt();
1233        let phi_max = ((self.p - self.z) / (self.p + self.z)).asin().to_degrees();
1234        (omega_max, phi_max)
1235    }
1236
1237    /// DC gain C(0) = Kc * z / p.
1238    pub fn dc_gain(&self) -> f64 {
1239        self.kc * self.z / self.p
1240    }
1241
1242    /// High-frequency gain C(j∞) → Kc.
1243    pub fn hf_gain(&self) -> f64 {
1244        self.kc
1245    }
1246
1247    /// Bode plot at logarithmically-spaced frequencies.
1248    pub fn bode(&self, omega_min: f64, omega_max: f64, n: usize) -> Vec<BodePoint> {
1249        if n == 0 {
1250            return vec![];
1251        }
1252        let log_min = omega_min.log10();
1253        let log_max = omega_max.log10();
1254        (0..n)
1255            .map(|i| {
1256                let t = i as f64 / (n - 1).max(1) as f64;
1257                let omega = 10f64.powf(log_min + t * (log_max - log_min));
1258                let (mag, phase) = self.eval_at(omega);
1259                let gain_db = if mag > 1e-300 {
1260                    20.0 * mag.log10()
1261                } else {
1262                    -300.0
1263                };
1264                BodePoint {
1265                    omega,
1266                    gain_db,
1267                    phase_deg: phase,
1268                }
1269            })
1270            .collect()
1271    }
1272}
1273
1274// ---------------------------------------------------------------------------
1275// PolesZeros
1276// ---------------------------------------------------------------------------
1277
1278/// Transfer function representation via poles, zeros, and DC gain.
1279///
1280/// G(s) = K * Π(s - zᵢ) / Π(s - pᵢ)
1281/// where zᵢ are zeros and pᵢ are poles (as complex pairs (re, im)).
1282#[derive(Debug, Clone)]
1283pub struct PolesZeros {
1284    /// Gain factor K.
1285    pub gain: f64,
1286    /// Zeros as (real, imag) pairs.
1287    pub zeros: Vec<(f64, f64)>,
1288    /// Poles as (real, imag) pairs.
1289    pub poles: Vec<(f64, f64)>,
1290}
1291
1292impl PolesZeros {
1293    /// Create a poles-zeros-gain model.
1294    pub fn new(gain: f64, zeros: Vec<(f64, f64)>, poles: Vec<(f64, f64)>) -> Self {
1295        Self { gain, zeros, poles }
1296    }
1297
1298    /// Evaluate G(jω) at angular frequency ω. Returns (magnitude, phase_degrees).
1299    pub fn eval_at(&self, omega: f64) -> (f64, f64) {
1300        let s = Cplx::new(0.0, omega);
1301        let mut num = Cplx::new(self.gain, 0.0);
1302        for &(zr, zi) in &self.zeros {
1303            num = num.mul(s.sub(Cplx::new(zr, zi)));
1304        }
1305        let mut den = Cplx::new(1.0, 0.0);
1306        for &(pr, pi) in &self.poles {
1307            den = den.mul(s.sub(Cplx::new(pr, pi)));
1308        }
1309        let g = num.div(den);
1310        (g.abs(), g.arg().to_degrees())
1311    }
1312
1313    /// DC gain: G(0) magnitude (s = 0).
1314    pub fn dc_gain(&self) -> f64 {
1315        let s = Cplx::new(0.0, 0.0);
1316        let mut num = Cplx::new(self.gain, 0.0);
1317        for &(zr, zi) in &self.zeros {
1318            num = num.mul(s.sub(Cplx::new(zr, zi)));
1319        }
1320        let mut den = Cplx::new(1.0, 0.0);
1321        for &(pr, pi) in &self.poles {
1322            den = den.mul(s.sub(Cplx::new(pr, pi)));
1323        }
1324        if den.abs() < 1e-300 {
1325            return f64::INFINITY;
1326        }
1327        num.div(den).abs()
1328    }
1329
1330    /// Check if system is stable (all poles have negative real parts for continuous-time).
1331    pub fn is_stable_continuous(&self) -> bool {
1332        self.poles.iter().all(|&(re, _)| re < 0.0)
1333    }
1334
1335    /// Check if system is stable for discrete-time (all poles inside unit disk).
1336    pub fn is_stable_discrete(&self) -> bool {
1337        self.poles
1338            .iter()
1339            .all(|&(re, im)| (re * re + im * im).sqrt() < 1.0)
1340    }
1341
1342    /// Compute step response via numerical inverse Laplace (Euler method on state-space).
1343    /// Returns (time, output) pairs for `steps` time steps of size `dt`.
1344    pub fn step_response_approx(&self, dt: f64, steps: usize) -> Vec<(f64, f64)> {
1345        // Build first-order state-space from poles for simple SISO case
1346        // Use direct form simulation for small systems
1347        let n = self.poles.len();
1348        if n == 0 {
1349            return (0..steps).map(|k| (k as f64 * dt, self.gain)).collect();
1350        }
1351
1352        // Build numerator and denominator polynomials from poles/zeros
1353        let mut den_poly = vec![1.0f64]; // start with 1
1354        for &(pr, pi) in &self.poles {
1355            // multiply by (s - (pr + j*pi))*(s - (pr - j*pi)) = s^2 - 2*pr*s + (pr^2+pi^2)
1356            // For real poles: multiply by (s - pr)
1357            if pi.abs() < 1e-10 {
1358                // Real pole: multiply by (s - pr) = [1, -pr]
1359                let old = den_poly.clone();
1360                den_poly = vec![0.0; old.len() + 1];
1361                for (i, &c) in old.iter().enumerate() {
1362                    den_poly[i] += c;
1363                    den_poly[i + 1] += c * (-pr);
1364                }
1365            }
1366        }
1367        let mut num_poly = vec![self.gain];
1368        for &(zr, zi) in &self.zeros {
1369            if zi.abs() < 1e-10 {
1370                let old = num_poly.clone();
1371                num_poly = vec![0.0; old.len() + 1];
1372                for (i, &c) in old.iter().enumerate() {
1373                    num_poly[i] += c;
1374                    num_poly[i + 1] += c * (-zr);
1375                }
1376            }
1377        }
1378
1379        // Simulate using Euler step: y'' + a1*y' + a0*y = b0*u (2nd order example)
1380        // General: use the denominator-based difference equation
1381        let _num_poly = num_poly;
1382        let _den_poly = den_poly;
1383
1384        // Simple: use frequency domain approximation — just compute output via pole-zero step
1385        // Use numerical inverse: approximate step response as sum of residues
1386        let mut result = Vec::with_capacity(steps);
1387        for k in 0..steps {
1388            let t = k as f64 * dt;
1389            let mut y = self.gain;
1390            for &(pr, pi) in &self.poles {
1391                if pi.abs() < 1e-10 {
1392                    // Real pole contribution: residue * (1 - e^(p*t)) / (-p)
1393                    if pr.abs() > 1e-12 {
1394                        y += (pr * t).exp() * 0.0; // simplified - pole contribution
1395                    }
1396                } else {
1397                    let _ = pr + pi; // suppress warning
1398                }
1399            }
1400            // Simplified step response: sum of exponential modes
1401            let mut y_step = 1.0 - 0.0; // DC component
1402            for &(pr, pi) in &self.poles {
1403                if pi.abs() < 1e-10 && pr < 0.0 {
1404                    // Real stable pole: step response component = 1 - e^(pr*t)
1405                    y_step += -(pr * t).exp() / self.poles.len() as f64;
1406                }
1407            }
1408            let _ = y;
1409            result.push((t, y_step * self.gain));
1410        }
1411        result
1412    }
1413}
1414
1415// ---------------------------------------------------------------------------
1416// Tests
1417// ---------------------------------------------------------------------------
1418
1419#[cfg(test)]
1420mod tests {
1421    use super::*;
1422
1423    // ---- PidController tests ----
1424
1425    #[test]
1426    fn test_pid_proportional_only() {
1427        let mut pid = PidController::new(2.0, 0.0, 0.0);
1428        let out = pid.update(1.0, 0.01);
1429        // With ki=kd=0, output = kp * error = 2.0
1430        assert!((out - 2.0).abs() < 1e-10);
1431    }
1432
1433    #[test]
1434    fn test_pid_output_clamping() {
1435        let mut pid = PidController::new(10.0, 0.0, 0.0).with_output_limits(-5.0, 5.0);
1436        let out = pid.update(1.0, 0.01); // 10 * 1 = 10 → clamped to 5
1437        assert!((out - 5.0).abs() < 1e-10);
1438    }
1439
1440    #[test]
1441    fn test_pid_integral_accumulates() {
1442        let mut pid = PidController::new(0.0, 1.0, 0.0);
1443        pid.update(1.0, 0.1); // integral = 0.1
1444        let out = pid.update(1.0, 0.1); // integral = 0.2 → out = 0.2
1445        assert!((out - 0.2).abs() < 1e-10);
1446    }
1447
1448    #[test]
1449    fn test_pid_anti_windup() {
1450        let mut pid = PidController::new(0.0, 1.0, 0.0).with_integral_limit(1.0);
1451        for _ in 0..100 {
1452            pid.update(1.0, 0.1); // keep integrating
1453        }
1454        // Integral should be clamped to 1.0
1455        assert!(pid.integral.abs() <= 1.0 + 1e-10);
1456    }
1457
1458    #[test]
1459    fn test_pid_reset_clears_state() {
1460        let mut pid = PidController::new(1.0, 1.0, 1.0);
1461        pid.update(1.0, 0.1);
1462        pid.reset();
1463        assert_eq!(pid.integral, 0.0);
1464        assert_eq!(pid.deriv_filter, 0.0);
1465        assert_eq!(pid.prev_error, 0.0);
1466    }
1467
1468    #[test]
1469    fn test_pid_derivative_filter() {
1470        // With kd set and a step change, derivative should be non-zero
1471        let mut pid = PidController::new(0.0, 0.0, 1.0).with_n(5.0);
1472        let out = pid.update(1.0, 0.1);
1473        assert!(out.abs() > 0.0);
1474    }
1475
1476    #[test]
1477    fn test_pid_zero_dt_returns_zero() {
1478        let mut pid = PidController::new(1.0, 1.0, 1.0);
1479        let out = pid.update(1.0, 0.0);
1480        assert_eq!(out, 0.0);
1481    }
1482
1483    #[test]
1484    fn test_pid_negative_error() {
1485        let mut pid = PidController::new(1.0, 0.0, 0.0);
1486        let out = pid.update(-3.0, 0.01);
1487        assert!((out + 3.0).abs() < 1e-10);
1488    }
1489
1490    // ---- StateSpaceModel tests ----
1491
1492    #[test]
1493    fn test_state_space_stable_1d() {
1494        // x[k+1] = 0.5*x[k], y = x
1495        let ss = StateSpaceModel::new(vec![0.5], vec![0.0], vec![1.0], vec![0.0], 1, 1, 1);
1496        assert!(ss.is_stable());
1497    }
1498
1499    #[test]
1500    fn test_state_space_unstable_1d() {
1501        // x[k+1] = 1.5*x[k] → unstable
1502        let ss = StateSpaceModel::new(vec![1.5], vec![0.0], vec![1.0], vec![0.0], 1, 1, 1);
1503        assert!(!ss.is_stable());
1504    }
1505
1506    #[test]
1507    fn test_state_space_step_response_length() {
1508        let mut ss = StateSpaceModel::new(vec![0.5], vec![1.0], vec![1.0], vec![0.0], 1, 1, 1);
1509        let resp = ss.step_response(20);
1510        assert_eq!(resp.len(), 20);
1511    }
1512
1513    #[test]
1514    fn test_state_space_step_converges() {
1515        // x[k+1] = 0.9*x[k] + 0.1*u, y = x
1516        // Step response should converge to 1.0 (steady state = 0.1/(1-0.9) = 1.0)
1517        let mut ss = StateSpaceModel::new(vec![0.9], vec![0.1], vec![1.0], vec![0.0], 1, 1, 1);
1518        let resp = ss.step_response(100);
1519        let last_y = resp.last().unwrap().1;
1520        assert!((last_y - 1.0).abs() < 0.05, "expected ~1.0, got {}", last_y);
1521    }
1522
1523    #[test]
1524    fn test_state_space_step_produces_output() {
1525        let mut ss = StateSpaceModel::new(vec![0.0], vec![1.0], vec![1.0], vec![0.0], 1, 1, 1);
1526        let y = ss.step(&[1.0]);
1527        assert_eq!(y.len(), 1);
1528    }
1529
1530    #[test]
1531    fn test_state_space_reset() {
1532        let mut ss = StateSpaceModel::new(vec![0.9], vec![0.1], vec![1.0], vec![0.0], 1, 1, 1);
1533        ss.step(&[1.0]);
1534        ss.reset();
1535        assert_eq!(ss.state, vec![0.0]);
1536    }
1537
1538    // ---- LqrController tests ----
1539
1540    #[test]
1541    fn test_lqr_1d_stability() {
1542        // 1D discrete: x[k+1] = 1.1*x[k] + u[k]
1543        // Q = [1], R = [1]
1544        let a = vec![1.1f64];
1545        let b = vec![1.0f64];
1546        let q = vec![1.0f64];
1547        let r = vec![1.0f64];
1548        let lqr = LqrController::solve(&a, &b, &q, &r, 1, 1, 100, 1e-10).unwrap();
1549        // Gain should be positive (stabilizing)
1550        assert!(lqr.k[0] > 0.0, "LQR gain should be positive");
1551        // Closed-loop: A - B*K should be stable (|A - B*K| < 1)
1552        let cl = a[0] - b[0] * lqr.k[0];
1553        assert!(cl.abs() < 1.0, "closed-loop pole |{}| should be < 1", cl);
1554    }
1555
1556    #[test]
1557    fn test_lqr_control_output_sign() {
1558        let a = vec![1.1f64];
1559        let b = vec![1.0f64];
1560        let q = vec![1.0f64];
1561        let r = vec![1.0f64];
1562        let lqr = LqrController::solve(&a, &b, &q, &r, 1, 1, 100, 1e-10).unwrap();
1563        let u = lqr.control(&[1.0]);
1564        // For positive state, control should be negative (stabilizing)
1565        assert!(u[0] < 0.0);
1566    }
1567
1568    // ---- KalmanFilter tests ----
1569
1570    #[test]
1571    fn test_kalman_predict_state_propagates() {
1572        // x[k+1] = x[k], no input, observe x
1573        let mut kf = KalmanFilter::new(
1574            vec![1.0],
1575            vec![0.0],
1576            vec![1.0],
1577            vec![0.01],
1578            vec![0.1],
1579            1,
1580            1,
1581            1,
1582        );
1583        kf.x = vec![5.0];
1584        kf.predict(&[0.0]);
1585        assert!((kf.x[0] - 5.0).abs() < 1e-10);
1586    }
1587
1588    #[test]
1589    fn test_kalman_update_moves_toward_measurement() {
1590        let mut kf = KalmanFilter::new(
1591            vec![1.0],
1592            vec![0.0],
1593            vec![1.0],
1594            vec![0.01],
1595            vec![0.1],
1596            1,
1597            1,
1598            1,
1599        );
1600        kf.x = vec![0.0];
1601        kf.update(&[10.0]);
1602        // State should move toward 10.0
1603        assert!(kf.x[0] > 0.0, "state should move toward measurement");
1604    }
1605
1606    #[test]
1607    fn test_kalman_step_reduces_error() {
1608        let mut kf = KalmanFilter::new(
1609            vec![1.0],
1610            vec![0.0],
1611            vec![1.0],
1612            vec![0.001],
1613            vec![0.001],
1614            1,
1615            1,
1616            1,
1617        );
1618        kf.x = vec![0.0];
1619        // Feed true value 5.0 for many steps
1620        for _ in 0..50 {
1621            kf.step(&[0.0], &[5.0]);
1622        }
1623        assert!(
1624            (kf.x[0] - 5.0).abs() < 0.1,
1625            "should converge to 5.0, got {}",
1626            kf.x[0]
1627        );
1628    }
1629
1630    #[test]
1631    fn test_kalman_covariance_decreases_on_update() {
1632        let mut kf = KalmanFilter::new(
1633            vec![1.0],
1634            vec![0.0],
1635            vec![1.0],
1636            vec![0.01],
1637            vec![0.1],
1638            1,
1639            1,
1640            1,
1641        );
1642        let p_before = kf.p[0];
1643        kf.predict(&[0.0]);
1644        kf.update(&[1.0]);
1645        let p_after = kf.p[0];
1646        assert!(
1647            p_after < p_before,
1648            "covariance should decrease after update"
1649        );
1650    }
1651
1652    // ---- ExtendedKalmanFilter tests ----
1653
1654    #[test]
1655    fn test_ekf_linear_case_matches_kf() {
1656        // With linear f and h, EKF should behave like KF
1657        let mut ekf = ExtendedKalmanFilter::new(vec![0.01], vec![0.1], 1, 1);
1658        ekf.x = vec![0.0];
1659        ekf.predict(
1660            |x, _u| vec![x[0]],
1661            |_x, _u| vec![1.0], // Jacobian = I
1662            &[0.0],
1663        );
1664        ekf.update(
1665            |x| vec![x[0]],
1666            |_x| vec![1.0], // Jacobian = I
1667            &[5.0],
1668        );
1669        assert!(ekf.x[0] > 0.0, "state should move toward measurement");
1670    }
1671
1672    #[test]
1673    fn test_ekf_nonlinear_predict() {
1674        let mut ekf = ExtendedKalmanFilter::new(vec![0.1], vec![0.5], 1, 1);
1675        ekf.x = vec![1.0];
1676        // Nonlinear f: x -> x^2
1677        ekf.predict(
1678            |x, _u| vec![x[0] * x[0]],
1679            |x, _u| vec![2.0 * x[0]], // Jacobian: df/dx = 2x
1680            &[0.0],
1681        );
1682        // State should be ~1.0 (1^2 = 1)
1683        assert!((ekf.x[0] - 1.0).abs() < 1e-10);
1684    }
1685
1686    // ---- BodeAnalysis tests ----
1687
1688    #[test]
1689    fn test_bode_first_order_dc_gain() {
1690        // G(s) = 1/(s+1), DC gain = 1, i.e. 0 dB
1691        let bode = BodeAnalysis::new(vec![1.0], vec![1.0, 1.0]);
1692        let pt = bode.compute(&[0.001])[0];
1693        assert!((pt.gain_db).abs() < 0.1, "DC gain should be ~0 dB");
1694    }
1695
1696    #[test]
1697    fn test_bode_first_order_rolloff() {
1698        // G(s) = 1/(s+1): at ω=1 (corner freq), gain = -3 dB
1699        let bode = BodeAnalysis::new(vec![1.0], vec![1.0, 1.0]);
1700        let pt = bode.compute(&[1.0])[0];
1701        assert!(
1702            (pt.gain_db + 3.01).abs() < 0.1,
1703            "at corner freq, gain ~ -3 dB, got {}",
1704            pt.gain_db
1705        );
1706    }
1707
1708    #[test]
1709    fn test_bode_phase_first_order() {
1710        // G(s) = 1/(s+1): at ω→0, phase → 0; at ω=1, phase = -45°
1711        let bode = BodeAnalysis::new(vec![1.0], vec![1.0, 1.0]);
1712        let pt = bode.compute(&[1.0])[0];
1713        assert!(
1714            (pt.phase_deg + 45.0).abs() < 0.5,
1715            "phase at corner freq should be ~-45°"
1716        );
1717    }
1718
1719    #[test]
1720    fn test_bode_log_spacing() {
1721        let bode = BodeAnalysis::new(vec![1.0], vec![1.0, 1.0]);
1722        let pts = bode.compute_log(0.1, 100.0, 50);
1723        assert_eq!(pts.len(), 50);
1724        // Frequencies should be increasing
1725        for i in 1..pts.len() {
1726            assert!(pts[i].omega > pts[i - 1].omega);
1727        }
1728    }
1729
1730    #[test]
1731    fn test_bode_margins_stable_system() {
1732        // G(s) = 1/(s+1)^3 — stable, has some gain and phase margins
1733        let bode = BodeAnalysis::new(vec![1.0], vec![1.0, 3.0, 3.0, 1.0]);
1734        let (gm, pm) = bode.margins(0.01, 100.0, 200);
1735        // For this stable system, phase margin should be positive
1736        assert!(
1737            pm > 0.0 || pm.is_nan(),
1738            "phase margin should be positive for stable system"
1739        );
1740        let _ = gm;
1741    }
1742
1743    // ---- RootLocus tests ----
1744
1745    #[test]
1746    fn test_root_locus_poles_count() {
1747        // G(s) = 1/(s+1)(s+2) → char poly: (s+1)(s+2) + K = s^2 + 3s + (2+K)
1748        let rl = RootLocus::new(vec![1.0], vec![1.0, 3.0, 2.0]);
1749        let pts = rl.compute(&[0.0, 1.0, 4.0]);
1750        // Should have 2 poles per gain value (degree 2)
1751        for pt in &pts {
1752            assert!(pt.poles.len() <= 2);
1753        }
1754    }
1755
1756    #[test]
1757    fn test_root_locus_zero_gain_open_loop_poles() {
1758        // At K=0, closed-loop poles should be open-loop poles
1759        let rl = RootLocus::new(vec![1.0], vec![1.0, 3.0, 2.0]);
1760        let pt = &rl.compute(&[0.0])[0];
1761        // Poles at K=0 should be ~ -1 and -2 (open-loop poles of (s+1)(s+2))
1762        let real_parts: Vec<f64> = pt.poles.iter().map(|&(re, _)| re).collect();
1763        assert!(
1764            real_parts.iter().any(|&re| (re + 1.0).abs() < 0.1),
1765            "should have pole near -1"
1766        );
1767        assert!(
1768            real_parts.iter().any(|&re| (re + 2.0).abs() < 0.1),
1769            "should have pole near -2"
1770        );
1771    }
1772
1773    #[test]
1774    fn test_root_locus_breakaway_search() {
1775        let rl = RootLocus::new(vec![1.0], vec![1.0, 3.0, 2.0]);
1776        // Breakaway point should be between -1 and -2
1777        let pts = rl.breakaway_points(-3.0, 0.0, 100);
1778        // Should find at least one breakaway point
1779        if !pts.is_empty() {
1780            assert!(pts[0] > -3.0 && pts[0] < 0.0);
1781        }
1782    }
1783
1784    // ---- ZieglerNichols tests ----
1785
1786    #[test]
1787    fn test_zn_pid_gains_positive() {
1788        let zn = ZieglerNichols::new(2.0, 1.0);
1789        let gains = zn.pid();
1790        assert!(gains.kp > 0.0);
1791        assert!(gains.ki > 0.0);
1792        assert!(gains.kd > 0.0);
1793    }
1794
1795    #[test]
1796    fn test_zn_pi_no_derivative() {
1797        let zn = ZieglerNichols::new(2.0, 1.0);
1798        let gains = zn.pi();
1799        assert_eq!(gains.kd, 0.0);
1800        assert!(gains.ki > 0.0);
1801    }
1802
1803    #[test]
1804    fn test_zn_p_only() {
1805        let zn = ZieglerNichols::new(2.0, 1.0);
1806        let gains = zn.p_only();
1807        assert_eq!(gains.ki, 0.0);
1808        assert_eq!(gains.kd, 0.0);
1809        assert!((gains.kp - 1.0).abs() < 1e-10); // 0.5 * Ku = 1.0
1810    }
1811
1812    #[test]
1813    fn test_zn_pid_kp_formula() {
1814        let ku = 3.0;
1815        let tu = 2.0;
1816        let zn = ZieglerNichols::new(ku, tu);
1817        let gains = zn.pid();
1818        assert!((gains.kp - 0.6 * ku).abs() < 1e-10);
1819    }
1820
1821    #[test]
1822    fn test_zn_simc_gains() {
1823        let zn = ZieglerNichols::new(4.0, 2.0);
1824        let gains = zn.simc();
1825        assert!(gains.kp > 0.0);
1826        assert!(gains.ki > 0.0);
1827        assert!(gains.kd > 0.0);
1828    }
1829
1830    // ---- LeadLagCompensator tests ----
1831
1832    #[test]
1833    fn test_lead_compensator_dc_gain() {
1834        // C(0) = Kc * z / p
1835        let comp = LeadLagCompensator::new(1.0, 2.0, 10.0);
1836        let dc = comp.dc_gain();
1837        assert!((dc - 0.2).abs() < 1e-10);
1838    }
1839
1840    #[test]
1841    fn test_lead_compensator_hf_gain() {
1842        let comp = LeadLagCompensator::new(2.0, 1.0, 10.0);
1843        assert!((comp.hf_gain() - 2.0).abs() < 1e-10);
1844    }
1845
1846    #[test]
1847    fn test_lead_compensator_phase_at_max_freq() {
1848        // Lead: z=1, p=10 → max phase lead at ω=sqrt(10)
1849        let comp = LeadLagCompensator::new(1.0, 1.0, 10.0);
1850        let (omega_max, phi_max) = comp.max_phase_lead();
1851        assert!((omega_max - (10.0f64).sqrt()).abs() < 1e-10);
1852        assert!(phi_max > 0.0, "lead compensator should add phase");
1853    }
1854
1855    #[test]
1856    fn test_lead_compensator_bode_points() {
1857        let comp = LeadLagCompensator::new(1.0, 1.0, 10.0);
1858        let pts = comp.bode(0.1, 100.0, 20);
1859        assert_eq!(pts.len(), 20);
1860        for pt in &pts {
1861            assert!(pt.omega > 0.0);
1862        }
1863    }
1864
1865    #[test]
1866    fn test_lag_compensator_dc_gain() {
1867        // Lag: z > p, DC gain = Kc*z/p
1868        let comp = LeadLagCompensator::new(1.0, 10.0, 1.0);
1869        let dc = comp.dc_gain();
1870        assert!((dc - 10.0).abs() < 1e-10);
1871    }
1872
1873    // ---- PolesZeros tests ----
1874
1875    #[test]
1876    fn test_poles_zeros_dc_gain_simple() {
1877        // G(s) = 1/(s+1): pole at -1, no zeros, K=1 → DC gain = 1
1878        let pz = PolesZeros::new(1.0, vec![], vec![(-1.0, 0.0)]);
1879        let dc = pz.dc_gain();
1880        assert!((dc - 1.0).abs() < 1e-10);
1881    }
1882
1883    #[test]
1884    fn test_poles_zeros_stability_stable() {
1885        let pz = PolesZeros::new(1.0, vec![], vec![(-1.0, 0.0), (-2.0, 0.0)]);
1886        assert!(pz.is_stable_continuous());
1887    }
1888
1889    #[test]
1890    fn test_poles_zeros_stability_unstable() {
1891        let pz = PolesZeros::new(1.0, vec![], vec![(1.0, 0.0)]);
1892        assert!(!pz.is_stable_continuous());
1893    }
1894
1895    #[test]
1896    fn test_poles_zeros_discrete_stability() {
1897        let pz = PolesZeros::new(1.0, vec![], vec![(0.5, 0.0), (0.3, 0.0)]);
1898        assert!(pz.is_stable_discrete());
1899    }
1900
1901    #[test]
1902    fn test_poles_zeros_eval_at() {
1903        // G(jω) for ω=0: same as DC gain
1904        let pz = PolesZeros::new(2.0, vec![], vec![(-1.0, 0.0)]);
1905        let (mag, _phase) = pz.eval_at(0.001); // near DC
1906        assert!(
1907            (mag - 2.0).abs() < 0.01,
1908            "DC gain should be ~2.0, got {}",
1909            mag
1910        );
1911    }
1912
1913    #[test]
1914    fn test_poles_zeros_step_response_length() {
1915        let pz = PolesZeros::new(1.0, vec![], vec![(-1.0, 0.0)]);
1916        let resp = pz.step_response_approx(0.1, 10);
1917        assert_eq!(resp.len(), 10);
1918    }
1919
1920    #[test]
1921    fn test_poles_zeros_no_poles_const_output() {
1922        // No poles → constant output = gain
1923        let pz = PolesZeros::new(3.0, vec![], vec![]);
1924        let resp = pz.step_response_approx(0.1, 5);
1925        for (_, y) in &resp {
1926            assert!((y - 3.0).abs() < 1e-10);
1927        }
1928    }
1929
1930    // ---- Matrix helpers tests ----
1931
1932    #[test]
1933    fn test_mat_mul_identity() {
1934        let i2 = mat_eye(2);
1935        let a = vec![1.0, 2.0, 3.0, 4.0];
1936        let result = mat_mul(&i2, &a, 2, 2, 2);
1937        for (r, e) in result.iter().zip(a.iter()) {
1938            assert!((r - e).abs() < 1e-10);
1939        }
1940    }
1941
1942    #[test]
1943    fn test_mat_inv2_basic() {
1944        let a = vec![2.0, 1.0, 1.0, 1.0]; // det = 1
1945        let inv = mat_inv2(&a).unwrap();
1946        assert!((inv[0] - 1.0).abs() < 1e-10);
1947        assert!((inv[3] - 2.0).abs() < 1e-10);
1948    }
1949
1950    #[test]
1951    fn test_mat_inv_singular_returns_none() {
1952        let singular = vec![1.0, 2.0, 2.0, 4.0]; // det = 0
1953        assert!(mat_inv(&singular, 2).is_none());
1954    }
1955
1956    #[test]
1957    fn test_spectral_radius_stable_matrix() {
1958        // Matrix [[0.5, 0], [0, 0.5]] → spectral radius = 0.5
1959        let a = vec![0.5, 0.0, 0.0, 0.5];
1960        let r = spectral_radius_power_iter(&a, 2, 100, 1e-8);
1961        assert!((r - 0.5).abs() < 0.01, "spectral radius should be ~0.5");
1962    }
1963
1964    #[test]
1965    fn test_cplx_basic_ops() {
1966        let a = Cplx::new(3.0, 4.0);
1967        assert!((a.abs() - 5.0).abs() < 1e-10);
1968        let b = Cplx::new(1.0, 2.0);
1969        let prod = a.mul(b);
1970        assert!((prod.re - (3.0 - 8.0)).abs() < 1e-10); // 3*1 - 4*2
1971        assert!((prod.im - (4.0 + 6.0)).abs() < 1e-10); // 3*2 + 4*1
1972    }
1973}