Skip to main content

oxiphysics_core/
control.rs

1#![allow(clippy::needless_range_loop)]
2// Copyright 2026 COOLJAPAN OU (Team KitaSan)
3// SPDX-License-Identifier: Apache-2.0
4
5//! Control theory and systems: PID, state-space, transfer functions, Bode analysis,
6//! LQR, Kalman filter, MPC, adaptive control, fuzzy control, and neural controller.
7
8#![allow(dead_code)]
9#![allow(clippy::too_many_arguments)]
10
11use std::f64::consts::PI;
12
13// ─────────────────────────────────────────────────────────────────────────────
14// Helper math utilities
15// ─────────────────────────────────────────────────────────────────────────────
16
17/// Matrix-vector multiply: C = A * b, where A is n×n and b is length n.
18fn mat_vec_mul(a: &[Vec<f64>], b: &[f64]) -> Vec<f64> {
19    let n = a.len();
20    let mut out = vec![0.0; n];
21    for i in 0..n {
22        for j in 0..b.len() {
23            out[i] += a[i][j] * b[j];
24        }
25    }
26    out
27}
28
29/// Matrix-matrix multiply: C = A * B.
30fn mat_mul(a: &[Vec<f64>], b: &[Vec<f64>]) -> Vec<Vec<f64>> {
31    let rows = a.len();
32    let cols = b[0].len();
33    let inner = b.len();
34    let mut c = vec![vec![0.0; cols]; rows];
35    for i in 0..rows {
36        for j in 0..cols {
37            for k in 0..inner {
38                c[i][j] += a[i][k] * b[k][j];
39            }
40        }
41    }
42    c
43}
44
45/// Transpose of a matrix.
46fn mat_transpose(a: &[Vec<f64>]) -> Vec<Vec<f64>> {
47    if a.is_empty() {
48        return vec![];
49    }
50    let rows = a.len();
51    let cols = a[0].len();
52    let mut t = vec![vec![0.0; rows]; cols];
53    for i in 0..rows {
54        for j in 0..cols {
55            t[j][i] = a[i][j];
56        }
57    }
58    t
59}
60
61/// Add two vectors element-wise.
62fn vec_add(a: &[f64], b: &[f64]) -> Vec<f64> {
63    a.iter().zip(b.iter()).map(|(x, y)| x + y).collect()
64}
65
66/// Scale a vector.
67fn vec_scale(a: &[f64], s: f64) -> Vec<f64> {
68    a.iter().map(|x| x * s).collect()
69}
70
71/// Dot product.
72fn dot(a: &[f64], b: &[f64]) -> f64 {
73    a.iter().zip(b.iter()).map(|(x, y)| x * y).sum()
74}
75
76/// Identity matrix n×n.
77fn eye(n: usize) -> Vec<Vec<f64>> {
78    let mut m = vec![vec![0.0; n]; n];
79    for i in 0..n {
80        m[i][i] = 1.0;
81    }
82    m
83}
84
85/// Add two matrices element-wise.
86fn mat_add(a: &[Vec<f64>], b: &[Vec<f64>]) -> Vec<Vec<f64>> {
87    let rows = a.len();
88    let cols = a[0].len();
89    let mut c = vec![vec![0.0; cols]; rows];
90    for i in 0..rows {
91        for j in 0..cols {
92            c[i][j] = a[i][j] + b[i][j];
93        }
94    }
95    c
96}
97
98/// Scale a matrix.
99fn mat_scale(a: &[Vec<f64>], s: f64) -> Vec<Vec<f64>> {
100    a.iter()
101        .map(|row| row.iter().map(|x| x * s).collect())
102        .collect()
103}
104
105/// Simple matrix inverse using Gauss-Jordan elimination (n×n).
106fn mat_inv(a: &[Vec<f64>]) -> Option<Vec<Vec<f64>>> {
107    let n = a.len();
108    let mut aug: Vec<Vec<f64>> = a
109        .iter()
110        .enumerate()
111        .map(|(i, row)| {
112            let mut r = row.clone();
113            for j in 0..n {
114                r.push(if i == j { 1.0 } else { 0.0 });
115            }
116            r
117        })
118        .collect();
119    for col in 0..n {
120        let pivot = (col..n).max_by(|&i, &j| {
121            aug[i][col]
122                .abs()
123                .partial_cmp(&aug[j][col].abs())
124                .unwrap_or(std::cmp::Ordering::Equal)
125        })?;
126        aug.swap(col, pivot);
127        let diag = aug[col][col];
128        if diag.abs() < 1e-14 {
129            return None;
130        }
131        for j in 0..(2 * n) {
132            aug[col][j] /= diag;
133        }
134        for row in 0..n {
135            if row != col {
136                let factor = aug[row][col];
137                for j in 0..(2 * n) {
138                    let val = aug[col][j] * factor;
139                    aug[row][j] -= val;
140                }
141            }
142        }
143    }
144    Some(aug.iter().map(|row| row[n..].to_vec()).collect())
145}
146
147// ─────────────────────────────────────────────────────────────────────────────
148// PID Controller
149// ─────────────────────────────────────────────────────────────────────────────
150
151/// Proportional-integral-derivative (PID) controller with anti-windup and
152/// derivative filter.
153#[derive(Debug, Clone)]
154pub struct Pid {
155    /// Proportional gain.
156    pub kp: f64,
157    /// Integral gain.
158    pub ki: f64,
159    /// Derivative gain.
160    pub kd: f64,
161    /// Integral accumulator.
162    pub integral: f64,
163    /// Previous error for derivative computation.
164    pub prev_error: f64,
165    /// Filtered derivative state.
166    pub derivative_filter_state: f64,
167    /// Derivative filter coefficient N (cutoff factor).
168    pub filter_coeff: f64,
169    /// Anti-windup: maximum integral magnitude.
170    pub integral_limit: f64,
171    /// Output saturation maximum.
172    pub output_max: f64,
173    /// Output saturation minimum.
174    pub output_min: f64,
175}
176
177impl Pid {
178    /// Create a new PID controller with given gains.
179    pub fn new(kp: f64, ki: f64, kd: f64) -> Self {
180        Self {
181            kp,
182            ki,
183            kd,
184            integral: 0.0,
185            prev_error: 0.0,
186            derivative_filter_state: 0.0,
187            filter_coeff: 10.0,
188            integral_limit: f64::MAX,
189            output_max: f64::MAX,
190            output_min: f64::MIN,
191        }
192    }
193
194    /// Set gains.
195    pub fn set_gains(&mut self, kp: f64, ki: f64, kd: f64) {
196        self.kp = kp;
197        self.ki = ki;
198        self.kd = kd;
199    }
200
201    /// Set anti-windup integral limit.
202    pub fn set_integral_limit(&mut self, limit: f64) {
203        self.integral_limit = limit.abs();
204    }
205
206    /// Set output saturation limits.
207    pub fn set_output_limits(&mut self, min: f64, max: f64) {
208        self.output_min = min;
209        self.output_max = max;
210    }
211
212    /// Compute PID output given current `error` and time step `dt`.
213    pub fn compute(&mut self, error: f64, dt: f64) -> f64 {
214        if dt <= 0.0 {
215            return 0.0;
216        }
217        // Proportional
218        let p = self.kp * error;
219
220        // Integral with anti-windup
221        self.integral += error * dt;
222        self.integral = self
223            .integral
224            .clamp(-self.integral_limit, self.integral_limit);
225        let i = self.ki * self.integral;
226
227        // Derivative with low-pass filter (Tustin bilinear)
228        // Filtered derivative: d_f = N*(e - x_f), x_f += dt * d_f
229        let n = self.filter_coeff;
230        let d_f = n * (error - self.derivative_filter_state);
231        self.derivative_filter_state += dt * d_f;
232        let d = self.kd * d_f;
233
234        self.prev_error = error;
235
236        let output = p + i + d;
237        output.clamp(self.output_min, self.output_max)
238    }
239
240    /// Reset internal state.
241    pub fn reset(&mut self) {
242        self.integral = 0.0;
243        self.prev_error = 0.0;
244        self.derivative_filter_state = 0.0;
245    }
246}
247
248// ─────────────────────────────────────────────────────────────────────────────
249// State-Space Model
250// ─────────────────────────────────────────────────────────────────────────────
251
252/// Continuous or discrete linear state-space model: ẋ = Ax + Bu, y = Cx + Du.
253#[derive(Debug, Clone)]
254pub struct StateSpaceModel {
255    /// State matrix (n×n).
256    pub a: Vec<Vec<f64>>,
257    /// Input matrix (n×m).
258    pub b: Vec<Vec<f64>>,
259    /// Output matrix (p×n).
260    pub c: Vec<Vec<f64>>,
261    /// Feedthrough matrix (p×m).
262    pub d: Vec<Vec<f64>>,
263    /// Current state vector (length n).
264    pub state: Vec<f64>,
265    /// Whether this model is discrete-time.
266    pub is_discrete: bool,
267}
268
269impl StateSpaceModel {
270    /// Create a new state-space model.
271    pub fn new(a: Vec<Vec<f64>>, b: Vec<Vec<f64>>, c: Vec<Vec<f64>>, d: Vec<Vec<f64>>) -> Self {
272        let n = a.len();
273        Self {
274            a,
275            b,
276            c,
277            d,
278            state: vec![0.0; n],
279            is_discrete: false,
280        }
281    }
282
283    /// Set initial state.
284    pub fn set_state(&mut self, x0: Vec<f64>) {
285        self.state = x0;
286    }
287
288    /// Step the continuous system using Euler integration; returns output y.
289    pub fn step(&mut self, u: &[f64], dt: f64) -> Vec<f64> {
290        if self.is_discrete {
291            // x[k+1] = A*x[k] + B*u[k]
292            let ax = mat_vec_mul(&self.a, &self.state);
293            let bu = mat_vec_mul(&self.b, u);
294            self.state = vec_add(&ax, &bu);
295        } else {
296            // Euler: x += dt*(A*x + B*u)
297            let ax = mat_vec_mul(&self.a, &self.state);
298            let bu = mat_vec_mul(&self.b, u);
299            let dx = vec_add(&ax, &bu);
300            let dx_dt = vec_scale(&dx, dt);
301            self.state = vec_add(&self.state, &dx_dt);
302        }
303        // y = C*x + D*u
304        let cx = mat_vec_mul(&self.c, &self.state);
305        let du = mat_vec_mul(&self.d, u);
306        vec_add(&cx, &du)
307    }
308
309    /// Discretize using zero-order hold (ZOH) approximation.
310    pub fn discretize_zoh(&self, dt: f64) -> Self {
311        let ad = zoh_discretize(&self.a, dt);
312        // B_d ≈ dt * B (first-order ZOH approximation)
313        let bd = mat_scale(&self.b, dt);
314        let mut disc = Self::new(ad, bd, self.c.clone(), self.d.clone());
315        disc.is_discrete = true;
316        disc
317    }
318
319    /// Number of states.
320    pub fn n_states(&self) -> usize {
321        self.a.len()
322    }
323
324    /// Number of inputs.
325    pub fn n_inputs(&self) -> usize {
326        if self.b.is_empty() {
327            0
328        } else {
329            self.b[0].len()
330        }
331    }
332
333    /// Number of outputs.
334    pub fn n_outputs(&self) -> usize {
335        self.c.len()
336    }
337}
338
339// ─────────────────────────────────────────────────────────────────────────────
340// Transfer Function
341// ─────────────────────────────────────────────────────────────────────────────
342
343/// SISO transfer function defined by numerator and denominator polynomials
344/// (descending powers).
345#[derive(Debug, Clone)]
346pub struct TransferFunction {
347    /// Numerator polynomial coefficients (highest power first).
348    pub num: Vec<f64>,
349    /// Denominator polynomial coefficients (highest power first).
350    pub den: Vec<f64>,
351}
352
353impl TransferFunction {
354    /// Create a new transfer function.
355    pub fn new(num: Vec<f64>, den: Vec<f64>) -> Self {
356        Self { num, den }
357    }
358
359    /// Create from PID parameters: H(s) = kd*s^2 + kp*s + ki / s.
360    pub fn from_pid(kp: f64, ki: f64, kd: f64) -> Self {
361        // C(s) = (kd*s^2 + kp*s + ki) / s
362        Self::new(vec![kd, kp, ki], vec![1.0, 0.0])
363    }
364
365    /// Evaluate the polynomial at complex point s = (re, im).
366    fn eval_poly_complex(coeffs: &[f64], re: f64, im: f64) -> (f64, f64) {
367        let mut re_acc = 0.0_f64;
368        let mut im_acc = 0.0_f64;
369        for &c in coeffs {
370            // multiply by s = re + j*im and add c
371            let new_re = re_acc * re - im_acc * im + c;
372            let new_im = re_acc * im + im_acc * re;
373            re_acc = new_re;
374            im_acc = new_im;
375        }
376        (re_acc, im_acc)
377    }
378
379    /// Evaluate H(jω) and return (magnitude, phase_deg).
380    pub fn frequency_response(&self, omega: f64) -> (f64, f64) {
381        let (nr, ni) = Self::eval_poly_complex(&self.num, 0.0, omega);
382        let (dr, di) = Self::eval_poly_complex(&self.den, 0.0, omega);
383        let num_mag = (nr * nr + ni * ni).sqrt();
384        let den_mag = (dr * dr + di * di).sqrt();
385        if den_mag < 1e-30 {
386            return (f64::INFINITY, 0.0);
387        }
388        let mag = num_mag / den_mag;
389        let phase = ni.atan2(nr) - di.atan2(dr);
390        (mag, phase.to_degrees())
391    }
392
393    /// DC gain (ω → 0), i.e., H(0).
394    pub fn dc_gain(&self) -> f64 {
395        let n_val = self.num.last().copied().unwrap_or(0.0);
396        let d_val = self.den.last().copied().unwrap_or(1.0);
397        if d_val.abs() < 1e-30 {
398            f64::INFINITY
399        } else {
400            n_val / d_val
401        }
402    }
403
404    /// Compute poles (roots of denominator) using companion matrix eigenvalues
405    /// (only real poles for now via Durand-Kerner).
406    pub fn poles(&self) -> Vec<f64> {
407        poly_roots_real(&self.den)
408    }
409
410    /// Compute zeros (roots of numerator).
411    pub fn zeros(&self) -> Vec<f64> {
412        poly_roots_real(&self.num)
413    }
414}
415
416/// Find real roots of a polynomial with descending-power coefficients using
417/// numerical deflation (simple Newton-Raphson approximation).
418fn poly_roots_real(coeffs: &[f64]) -> Vec<f64> {
419    let n = coeffs.len();
420    if n <= 1 {
421        return vec![];
422    }
423    // Normalize
424    let leading = coeffs[0];
425    if leading.abs() < 1e-30 {
426        return poly_roots_real(&coeffs[1..]);
427    }
428    let p: Vec<f64> = coeffs.iter().map(|&c| c / leading).collect();
429    let degree = p.len() - 1;
430    if degree == 0 {
431        return vec![];
432    }
433    if degree == 1 {
434        return vec![-p[1]];
435    }
436    if degree == 2 {
437        let disc = p[1] * p[1] - 4.0 * p[2];
438        if disc >= 0.0 {
439            return vec![(-p[1] + disc.sqrt()) / 2.0, (-p[1] - disc.sqrt()) / 2.0];
440        }
441        return vec![];
442    }
443    // For higher degree, use Newton on the polynomial
444    let mut roots = Vec::new();
445    let mut remaining = p.clone();
446    while remaining.len() > 2 {
447        let mut x = 0.1_f64;
448        for _ in 0..200 {
449            let (fx, dfx) = eval_poly_and_deriv(&remaining, x);
450            if dfx.abs() < 1e-30 {
451                x += 0.01;
452                continue;
453            }
454            let dx = fx / dfx;
455            x -= dx;
456            if dx.abs() < 1e-12 {
457                break;
458            }
459        }
460        roots.push(x);
461        // deflate
462        remaining = poly_deflate(&remaining, x);
463    }
464    if remaining.len() == 2 {
465        roots.push(-remaining[1] / remaining[0]);
466    }
467    roots
468}
469
470fn eval_poly_and_deriv(p: &[f64], x: f64) -> (f64, f64) {
471    let mut f = p[0];
472    let mut df = 0.0_f64;
473    for &c in &p[1..] {
474        df = df * x + f;
475        f = f * x + c;
476    }
477    (f, df)
478}
479
480fn poly_deflate(p: &[f64], root: f64) -> Vec<f64> {
481    let n = p.len();
482    let mut q = vec![0.0; n - 1];
483    q[0] = p[0];
484    for i in 1..n - 1 {
485        q[i] = p[i] + q[i - 1] * root;
486    }
487    q
488}
489
490// ─────────────────────────────────────────────────────────────────────────────
491// Bode Analysis
492// ─────────────────────────────────────────────────────────────────────────────
493
494/// Bode frequency response analysis for a SISO transfer function.
495#[derive(Debug, Clone)]
496pub struct BodeAnalysis {
497    /// The transfer function being analyzed.
498    pub tf: TransferFunction,
499}
500
501impl BodeAnalysis {
502    /// Create from a transfer function.
503    pub fn new(tf: TransferFunction) -> Self {
504        Self { tf }
505    }
506
507    /// Compute magnitude in dB at angular frequency ω.
508    pub fn magnitude_db(&self, omega: f64) -> f64 {
509        let (mag, _) = self.tf.frequency_response(omega);
510        20.0 * mag.log10()
511    }
512
513    /// Compute phase in degrees at angular frequency ω.
514    pub fn phase_deg(&self, omega: f64) -> f64 {
515        let (_, phase) = self.tf.frequency_response(omega);
516        phase
517    }
518
519    /// Sweep frequencies \[ω_min, ω_max\] with `n` points (log-spaced).
520    /// Returns `(omegas, magnitudes_db, phases_deg)`.
521    pub fn sweep(
522        &self,
523        omega_min: f64,
524        omega_max: f64,
525        n: usize,
526    ) -> (Vec<f64>, Vec<f64>, Vec<f64>) {
527        let log_min = omega_min.ln();
528        let log_max = omega_max.ln();
529        let omegas: Vec<f64> = (0..n)
530            .map(|i| (log_min + (log_max - log_min) * i as f64 / (n - 1) as f64).exp())
531            .collect();
532        let mags: Vec<f64> = omegas.iter().map(|&w| self.magnitude_db(w)).collect();
533        let phases: Vec<f64> = omegas.iter().map(|&w| self.phase_deg(w)).collect();
534        (omegas, mags, phases)
535    }
536
537    /// Estimate gain margin and phase margin.
538    /// Returns `(gain_margin_db, phase_crossover_freq, phase_margin_deg, gain_crossover_freq)`.
539    pub fn stability_margins(
540        &self,
541        omega_min: f64,
542        omega_max: f64,
543        n: usize,
544    ) -> (f64, f64, f64, f64) {
545        let (omegas, mags, phases) = self.sweep(omega_min, omega_max, n);
546
547        // Phase crossover: where phase ≈ -180°
548        let mut phase_crossover_freq = 0.0;
549        let mut gain_at_phase_cross = 0.0;
550        for i in 1..n {
551            if phases[i - 1] > -180.0 && phases[i] <= -180.0 {
552                let t = (-180.0 - phases[i - 1]) / (phases[i] - phases[i - 1]);
553                phase_crossover_freq = omegas[i - 1] + t * (omegas[i] - omegas[i - 1]);
554                gain_at_phase_cross = mags[i - 1] + t * (mags[i] - mags[i - 1]);
555                break;
556            }
557        }
558        let gain_margin = -gain_at_phase_cross;
559
560        // Gain crossover: where magnitude ≈ 0 dB
561        let mut gain_crossover_freq = 0.0;
562        let mut phase_at_gain_cross = 0.0;
563        for i in 1..n {
564            if mags[i - 1] > 0.0 && mags[i] <= 0.0 {
565                let t = (0.0 - mags[i - 1]) / (mags[i] - mags[i - 1]);
566                gain_crossover_freq = omegas[i - 1] + t * (omegas[i] - omegas[i - 1]);
567                phase_at_gain_cross = phases[i - 1] + t * (phases[i] - phases[i - 1]);
568                break;
569            }
570        }
571        let phase_margin = 180.0 + phase_at_gain_cross;
572
573        (
574            gain_margin,
575            phase_crossover_freq,
576            phase_margin,
577            gain_crossover_freq,
578        )
579    }
580}
581
582// ─────────────────────────────────────────────────────────────────────────────
583// LQR Controller
584// ─────────────────────────────────────────────────────────────────────────────
585
586/// Linear Quadratic Regulator (LQR): computes optimal gain matrix K via
587/// Discrete Algebraic Riccati Equation (DARE).
588#[derive(Debug, Clone)]
589pub struct LqrController {
590    /// Gain matrix K (m×n).
591    pub k: Vec<Vec<f64>>,
592    /// State weighting matrix Q (n×n).
593    pub q: Vec<Vec<f64>>,
594    /// Input weighting matrix R (m×m).
595    pub r: Vec<Vec<f64>>,
596}
597
598impl LqrController {
599    /// Solve DARE and compute LQR gain K for discrete system (A, B).
600    /// Returns `None` if the Riccati equation does not converge.
601    pub fn solve(
602        a: &[Vec<f64>],
603        b: &[Vec<f64>],
604        q: Vec<Vec<f64>>,
605        r: Vec<Vec<f64>>,
606    ) -> Option<Self> {
607        let k = riccati_solve_dare(a, b, &q, &r, 1000, 1e-10)?;
608        Some(Self { k, q, r })
609    }
610
611    /// Compute optimal control: u = -K * x.
612    pub fn compute(&self, x: &[f64]) -> Vec<f64> {
613        let kx = mat_vec_mul(&self.k, x);
614        vec_scale(&kx, -1.0)
615    }
616}
617
618// ─────────────────────────────────────────────────────────────────────────────
619// Kalman Filter
620// ─────────────────────────────────────────────────────────────────────────────
621
622/// Full linear Kalman filter with predict and update steps.
623#[derive(Debug, Clone)]
624pub struct KalmanFilter {
625    /// State estimate (length n).
626    pub x_hat: Vec<f64>,
627    /// Error covariance (n×n).
628    pub p: Vec<Vec<f64>>,
629}
630
631impl KalmanFilter {
632    /// Create with initial state estimate and covariance.
633    pub fn new(x0: Vec<f64>, p0: Vec<Vec<f64>>) -> Self {
634        Self { x_hat: x0, p: p0 }
635    }
636
637    /// Prediction step: x̂⁻ = A*x̂ + B*u, P⁻ = A*P*Aᵀ + Q.
638    pub fn predict(&mut self, a: &[Vec<f64>], b: &[Vec<f64>], u: &[f64], q: &[Vec<f64>]) {
639        let ax = mat_vec_mul(a, &self.x_hat);
640        let bu = mat_vec_mul(b, u);
641        self.x_hat = vec_add(&ax, &bu);
642
643        let at = mat_transpose(a);
644        let ap = mat_mul(a, &self.p);
645        let apat = mat_mul(&ap, &at);
646        self.p = mat_add(&apat, q);
647    }
648
649    /// Update step: K = P⁻Hᵀ(H*P⁻*Hᵀ+R)⁻¹, x̂ = x̂⁻ + K*(z - H*x̂⁻), P = (I-KH)*P⁻.
650    pub fn update(&mut self, h: &[Vec<f64>], r: &[Vec<f64>], z: &[f64]) -> Option<()> {
651        let n = self.x_hat.len();
652        let ht = mat_transpose(h);
653        let ph_t = mat_mul(&self.p, &ht);
654        let hph_t = mat_mul(h, &ph_t);
655        let s = mat_add(&hph_t, r);
656        let s_inv = mat_inv(&s)?;
657        // Kalman gain K = P*Hᵀ * S⁻¹
658        let k = mat_mul(&ph_t, &s_inv);
659        // Innovation: z - H*x̂
660        let hx = mat_vec_mul(h, &self.x_hat);
661        let innov: Vec<f64> = z.iter().zip(hx.iter()).map(|(zi, hxi)| zi - hxi).collect();
662        // State update
663        let k_innov = mat_vec_mul(&k, &innov);
664        self.x_hat = vec_add(&self.x_hat, &k_innov);
665        // Covariance update: P = (I - K*H) * P
666        let kh = mat_mul(&k, h);
667        let i = eye(n);
668        let i_minus_kh = mat_add(&i, &mat_scale(&kh, -1.0));
669        self.p = mat_mul(&i_minus_kh, &self.p);
670        Some(())
671    }
672}
673
674// ─────────────────────────────────────────────────────────────────────────────
675// Model Predictive Control
676// ─────────────────────────────────────────────────────────────────────────────
677
678/// Simple linear MPC with prediction horizon N, tracking and input penalties.
679#[derive(Debug, Clone)]
680pub struct ModelPredictiveControl {
681    /// System A matrix (n×n).
682    pub a: Vec<Vec<f64>>,
683    /// System B matrix (n×m).
684    pub b: Vec<Vec<f64>>,
685    /// Prediction horizon.
686    pub horizon: usize,
687    /// State tracking weight (diagonal Q).
688    pub q_weight: f64,
689    /// Input weight (diagonal R).
690    pub r_weight: f64,
691    /// Input constraint: max magnitude.
692    pub u_max: f64,
693}
694
695impl ModelPredictiveControl {
696    /// Create a new MPC controller.
697    pub fn new(
698        a: Vec<Vec<f64>>,
699        b: Vec<Vec<f64>>,
700        horizon: usize,
701        q_weight: f64,
702        r_weight: f64,
703    ) -> Self {
704        Self {
705            a,
706            b,
707            horizon,
708            q_weight,
709            r_weight,
710            u_max: f64::MAX,
711        }
712    }
713
714    /// Set input constraint.
715    pub fn set_input_constraint(&mut self, u_max: f64) {
716        self.u_max = u_max;
717    }
718
719    /// Compute first optimal control action via unconstrained MPC (gradient step).
720    /// `x` is current state, `x_ref` is reference trajectory (constant horizon×n).
721    pub fn compute(&self, x: &[f64], x_ref: &[f64]) -> Vec<f64> {
722        let n = x.len();
723        let m = if self.b.is_empty() {
724            0
725        } else {
726            self.b[0].len()
727        };
728        if m == 0 {
729            return vec![];
730        }
731
732        // Receding horizon: predict states and compute gradient numerically
733        // Simple steepest-descent for unconstrained linear MPC
734        let mut u_seq: Vec<Vec<f64>> = vec![vec![0.0; m]; self.horizon];
735        let alpha = 0.01; // step size
736
737        for _iter in 0..50 {
738            // Evaluate cost gradient
739            let mut x_pred = x.to_vec();
740            let mut grads: Vec<Vec<f64>> = vec![vec![0.0; m]; self.horizon];
741
742            for k in 0..self.horizon {
743                let ax = mat_vec_mul(&self.a, &x_pred);
744                let bu = mat_vec_mul(&self.b, &u_seq[k]);
745                x_pred = vec_add(&ax, &bu);
746                // State error
747                let err: Vec<f64> = x_pred
748                    .iter()
749                    .zip(x_ref.iter().take(n))
750                    .map(|(xi, ri)| xi - ri)
751                    .collect();
752                let cost_grad_x = vec_scale(&err, 2.0 * self.q_weight);
753                // Propagate gradient back to u: dJ/du_k ≈ Bᵀ * dJ/dx_{k+1}
754                let bt = mat_transpose(&self.b);
755                let gu = mat_vec_mul(&bt, &cost_grad_x);
756                // Add input penalty gradient
757                let gu_r: Vec<f64> = u_seq[k]
758                    .iter()
759                    .zip(gu.iter())
760                    .map(|(ui, gi)| gi + 2.0 * self.r_weight * ui)
761                    .collect();
762                grads[k] = gu_r;
763            }
764            // Update u_seq
765            for k in 0..self.horizon {
766                for j in 0..m {
767                    u_seq[k][j] -= alpha * grads[k][j];
768                    u_seq[k][j] = u_seq[k][j].clamp(-self.u_max, self.u_max);
769                }
770            }
771        }
772
773        u_seq[0].clone()
774    }
775}
776
777// ─────────────────────────────────────────────────────────────────────────────
778// Adaptive Controller (MRAS / MIT rule)
779// ─────────────────────────────────────────────────────────────────────────────
780
781/// Model Reference Adaptive System (MRAS) controller using the MIT rule for
782/// parameter adaptation.
783#[derive(Debug, Clone)]
784pub struct AdaptiveController {
785    /// Reference model numerator (first order: \[b_m\]).
786    pub bm: f64,
787    /// Reference model denominator pole (am).
788    pub am: f64,
789    /// Adaptive parameter θ (scalar for SISO first order).
790    pub theta: f64,
791    /// Adaptation gain γ.
792    pub gamma: f64,
793    /// Reference model state.
794    pub ym: f64,
795    /// Plant output.
796    pub y: f64,
797    /// Integral of sensitivity signal.
798    pub sens_int: f64,
799}
800
801impl AdaptiveController {
802    /// Create with reference model parameters and adaptation gain.
803    pub fn new(bm: f64, am: f64, gamma: f64) -> Self {
804        Self {
805            bm,
806            am,
807            theta: 1.0,
808            gamma,
809            ym: 0.0,
810            y: 0.0,
811            sens_int: 0.0,
812        }
813    }
814
815    /// Update the adaptive controller.
816    /// `r` is reference input, `y_plant` is measured plant output, `dt` is time step.
817    /// Returns the control signal u.
818    pub fn update(&mut self, r: f64, y_plant: f64, dt: f64) -> f64 {
819        // Reference model: ẏ_m = -a_m * y_m + b_m * r
820        let dym = -self.am * self.ym + self.bm * r;
821        self.ym += dym * dt;
822
823        // Error
824        let e = y_plant - self.ym;
825
826        // MIT rule: dθ/dt = -γ * e * ∂e/∂θ ≈ -γ * e * y_m
827        let dtheta = -self.gamma * e * self.ym;
828        self.theta += dtheta * dt;
829
830        self.y = y_plant;
831        // Control signal
832        self.theta * r
833    }
834
835    /// Reset state.
836    pub fn reset(&mut self) {
837        self.ym = 0.0;
838        self.y = 0.0;
839        self.theta = 1.0;
840        self.sens_int = 0.0;
841    }
842}
843
844// ─────────────────────────────────────────────────────────────────────────────
845// Fuzzy Controller (Mamdani)
846// ─────────────────────────────────────────────────────────────────────────────
847
848/// Triangular membership function: zero outside \[a, c\], peak at b.
849#[derive(Debug, Clone)]
850pub struct TriangleMf {
851    /// Left foot.
852    pub a: f64,
853    /// Peak.
854    pub b: f64,
855    /// Right foot.
856    pub c: f64,
857}
858
859impl TriangleMf {
860    /// Create a triangular membership function.
861    pub fn new(a: f64, b: f64, c: f64) -> Self {
862        Self { a, b, c }
863    }
864
865    /// Evaluate μ(x).
866    pub fn eval(&self, x: f64) -> f64 {
867        if x <= self.a || x >= self.c {
868            0.0
869        } else if x <= self.b {
870            (x - self.a) / (self.b - self.a)
871        } else {
872            (self.c - x) / (self.c - self.b)
873        }
874    }
875}
876
877/// Fuzzy rule: (input_mf_index) → (output_mf_index).
878#[derive(Debug, Clone)]
879pub struct FuzzyRule {
880    /// Index into input membership functions.
881    pub input_idx: usize,
882    /// Index into output membership functions.
883    pub output_idx: usize,
884}
885
886/// Mamdani fuzzy controller with triangular MFs and centroid defuzzification.
887#[derive(Debug, Clone)]
888pub struct FuzzyController {
889    /// Input membership functions.
890    pub input_mfs: Vec<TriangleMf>,
891    /// Output membership functions.
892    pub output_mfs: Vec<TriangleMf>,
893    /// Fuzzy rules.
894    pub rules: Vec<FuzzyRule>,
895    /// Output universe lower bound.
896    pub out_min: f64,
897    /// Output universe upper bound.
898    pub out_max: f64,
899    /// Number of discretization points for defuzzification.
900    pub n_points: usize,
901}
902
903impl FuzzyController {
904    /// Create a new fuzzy controller.
905    pub fn new(
906        input_mfs: Vec<TriangleMf>,
907        output_mfs: Vec<TriangleMf>,
908        rules: Vec<FuzzyRule>,
909        out_min: f64,
910        out_max: f64,
911    ) -> Self {
912        Self {
913            input_mfs,
914            output_mfs,
915            rules,
916            out_min,
917            out_max,
918            n_points: 100,
919        }
920    }
921
922    /// Compute output via Mamdani inference and centroid defuzzification.
923    pub fn compute(&self, input: f64) -> f64 {
924        // Fuzzify input
925        let memberships: Vec<f64> = self.input_mfs.iter().map(|mf| mf.eval(input)).collect();
926
927        // Activate output MFs (min operation for each rule)
928        let mut activated = vec![0.0_f64; self.output_mfs.len()];
929        for rule in &self.rules {
930            if rule.input_idx < memberships.len() && rule.output_idx < activated.len() {
931                activated[rule.output_idx] =
932                    activated[rule.output_idx].max(memberships[rule.input_idx]);
933            }
934        }
935
936        // Centroid defuzzification
937        let step = (self.out_max - self.out_min) / self.n_points as f64;
938        let mut num = 0.0_f64;
939        let mut den = 0.0_f64;
940        for i in 0..self.n_points {
941            let y = self.out_min + (i as f64 + 0.5) * step;
942            // Aggregate: max of clipped output MFs
943            let mu = self
944                .output_mfs
945                .iter()
946                .enumerate()
947                .fold(0.0_f64, |acc, (j, mf)| {
948                    let clipped = mf.eval(y).min(activated[j]);
949                    acc.max(clipped)
950                });
951            num += y * mu;
952            den += mu;
953        }
954        if den < 1e-12 {
955            (self.out_min + self.out_max) / 2.0
956        } else {
957            num / den
958        }
959    }
960}
961
962// ─────────────────────────────────────────────────────────────────────────────
963// Neural Controller (MLP)
964// ─────────────────────────────────────────────────────────────────────────────
965
966/// Simple experience replay buffer for neural controller training.
967#[derive(Debug, Clone)]
968pub struct ReplayBuffer {
969    /// Stored (state, action, reward) tuples.
970    pub buffer: Vec<(Vec<f64>, Vec<f64>, f64)>,
971    /// Maximum capacity.
972    pub capacity: usize,
973    /// Write index.
974    pub idx: usize,
975}
976
977impl ReplayBuffer {
978    /// Create a new replay buffer with given capacity.
979    pub fn new(capacity: usize) -> Self {
980        Self {
981            buffer: Vec::with_capacity(capacity),
982            capacity,
983            idx: 0,
984        }
985    }
986
987    /// Push a new experience.
988    pub fn push(&mut self, state: Vec<f64>, action: Vec<f64>, reward: f64) {
989        let entry = (state, action, reward);
990        if self.buffer.len() < self.capacity {
991            self.buffer.push(entry);
992        } else {
993            self.buffer[self.idx % self.capacity] = entry;
994        }
995        self.idx += 1;
996    }
997
998    /// Number of stored experiences.
999    pub fn len(&self) -> usize {
1000        self.buffer.len()
1001    }
1002
1003    /// Check if buffer is empty.
1004    pub fn is_empty(&self) -> bool {
1005        self.buffer.is_empty()
1006    }
1007}
1008
1009/// Simple multi-layer perceptron (MLP) neural network controller.
1010#[derive(Debug, Clone)]
1011pub struct NeuralController {
1012    /// Layer weights: each element is a weight matrix (out×in).
1013    pub weights: Vec<Vec<Vec<f64>>>,
1014    /// Layer biases: each element is a bias vector.
1015    pub biases: Vec<Vec<f64>>,
1016    /// Learning rate.
1017    pub lr: f64,
1018    /// Replay buffer.
1019    pub replay: ReplayBuffer,
1020}
1021
1022impl NeuralController {
1023    /// Create an MLP controller with given layer sizes.
1024    /// `layer_sizes` includes input and output dimensions.
1025    pub fn new(layer_sizes: &[usize], lr: f64) -> Self {
1026        let n_layers = layer_sizes.len() - 1;
1027        let mut weights = Vec::with_capacity(n_layers);
1028        let mut biases = Vec::with_capacity(n_layers);
1029        for i in 0..n_layers {
1030            let rows = layer_sizes[i + 1];
1031            let cols = layer_sizes[i];
1032            // Xavier initialization
1033            let scale = (2.0 / (rows + cols) as f64).sqrt();
1034            let w: Vec<Vec<f64>> = (0..rows)
1035                .map(|r| {
1036                    (0..cols)
1037                        .map(|c| ((r * cols + c) as f64 * 0.1).sin() * scale)
1038                        .collect()
1039                })
1040                .collect();
1041            weights.push(w);
1042            biases.push(vec![0.0; rows]);
1043        }
1044        Self {
1045            weights,
1046            biases,
1047            lr,
1048            replay: ReplayBuffer::new(10000),
1049        }
1050    }
1051
1052    /// ReLU activation.
1053    fn relu(x: f64) -> f64 {
1054        x.max(0.0)
1055    }
1056
1057    /// Tanh activation.
1058    fn tanh(x: f64) -> f64 {
1059        x.tanh()
1060    }
1061
1062    /// Forward pass: returns output and all layer activations (for backprop).
1063    pub fn forward(&self, input: &[f64]) -> (Vec<f64>, Vec<Vec<f64>>) {
1064        let mut activations = vec![input.to_vec()];
1065        let n_layers = self.weights.len();
1066        for layer in 0..n_layers {
1067            let w = &self.weights[layer];
1068            let b = &self.biases[layer];
1069            let rows = w.len();
1070            let z: Vec<f64> = (0..rows)
1071                .map(|i| {
1072                    let s: f64 = w[i]
1073                        .iter()
1074                        .zip(activations.last().expect("activations is non-empty").iter())
1075                        .map(|(wi, ai)| wi * ai)
1076                        .sum();
1077                    s + b[i]
1078                })
1079                .collect();
1080            // Use tanh on hidden, linear on output
1081            let a = if layer < n_layers - 1 {
1082                z.iter().map(|&zi| Self::tanh(zi)).collect()
1083            } else {
1084                z
1085            };
1086            activations.push(a);
1087        }
1088        let out = activations
1089            .last()
1090            .expect("activations is non-empty")
1091            .clone();
1092        (out, activations)
1093    }
1094
1095    /// Gradient update step (simple policy gradient / supervised).
1096    /// `target` is the desired output, `input` is the current state.
1097    pub fn gradient_update(&mut self, input: &[f64], target: &[f64]) {
1098        let n_layers = self.weights.len();
1099        let (_, activations) = self.forward(input);
1100
1101        // MSE loss gradient at output layer
1102        let out = &activations[n_layers];
1103        let mut delta: Vec<f64> = out.iter().zip(target.iter()).map(|(o, t)| o - t).collect();
1104
1105        for layer in (0..n_layers).rev() {
1106            let a_prev = &activations[layer];
1107            let rows = self.weights[layer].len();
1108            // Weight gradient
1109            let new_delta: Vec<f64> = if layer > 0 {
1110                let cols = self.weights[layer][0].len();
1111                let mut nd = vec![0.0; cols];
1112                for i in 0..rows {
1113                    for j in 0..cols {
1114                        nd[j] += self.weights[layer][i][j] * delta[i];
1115                    }
1116                }
1117                // Backprop through tanh: (1 - a²)
1118                nd.iter()
1119                    .zip(activations[layer].iter())
1120                    .map(|(d, a)| d * (1.0 - a * a))
1121                    .collect()
1122            } else {
1123                vec![0.0; a_prev.len()]
1124            };
1125            // Update weights and biases
1126            for i in 0..rows {
1127                for j in 0..a_prev.len() {
1128                    self.weights[layer][i][j] -= self.lr * delta[i] * a_prev[j];
1129                }
1130                self.biases[layer][i] -= self.lr * delta[i];
1131            }
1132            delta = new_delta;
1133        }
1134    }
1135
1136    /// Store experience in replay buffer.
1137    pub fn remember(&mut self, state: Vec<f64>, action: Vec<f64>, reward: f64) {
1138        self.replay.push(state, action, reward);
1139    }
1140
1141    /// Compute control output for given state.
1142    pub fn compute(&self, state: &[f64]) -> Vec<f64> {
1143        let (out, _) = self.forward(state);
1144        out
1145    }
1146}
1147
1148// ─────────────────────────────────────────────────────────────────────────────
1149// Helper Functions
1150// ─────────────────────────────────────────────────────────────────────────────
1151
1152/// Zero-order hold (ZOH) discretization of continuous-time A matrix.
1153/// Uses Padé approximation: e^(A*dt) ≈ I + A*dt + (A*dt)²/2 + ...
1154pub fn zoh_discretize(a: &[Vec<f64>], dt: f64) -> Vec<Vec<f64>> {
1155    let n = a.len();
1156    let mut result = eye(n);
1157    let mut term = eye(n);
1158    let a_dt = mat_scale(a, dt);
1159    for k in 1..=10 {
1160        term = mat_mul(&term, &a_dt);
1161        let scaled = mat_scale(&term, 1.0 / factorial(k) as f64);
1162        result = mat_add(&result, &scaled);
1163    }
1164    result
1165}
1166
1167fn factorial(n: u64) -> u64 {
1168    (1..=n).product()
1169}
1170
1171/// Solve the Discrete Algebraic Riccati Equation (DARE) iteratively.
1172/// Returns gain matrix K = (R + BᵀPB)⁻¹ * BᵀPA.
1173pub fn riccati_solve_dare(
1174    a: &[Vec<f64>],
1175    b: &[Vec<f64>],
1176    q: &[Vec<f64>],
1177    r: &[Vec<f64>],
1178    max_iter: usize,
1179    tol: f64,
1180) -> Option<Vec<Vec<f64>>> {
1181    let n = a.len();
1182    let m = if b.is_empty() {
1183        return None;
1184    } else {
1185        b[0].len()
1186    };
1187    let _ = m;
1188
1189    let at = mat_transpose(a);
1190    let bt = mat_transpose(b);
1191
1192    let mut p = q.to_vec();
1193    for _ in 0..max_iter {
1194        // K = (R + Bᵀ P B)⁻¹ Bᵀ P A
1195        let pb = mat_mul(&p, b);
1196        let btpb = mat_mul(&bt, &pb);
1197        let r_btpb = mat_add(r, &btpb);
1198        let r_btpb_inv = mat_inv(&r_btpb)?;
1199        let btp = mat_mul(&bt, &p);
1200        let btpa = mat_mul(&btp, a);
1201        let k = mat_mul(&r_btpb_inv, &btpa);
1202
1203        // P_new = Q + Aᵀ P A - Aᵀ P B K
1204        let atpa = mat_mul(&at, &mat_mul(&p, a));
1205        let atpbk = mat_mul(&at, &mat_mul(&pb, &k));
1206        let p_new = mat_add(q, &mat_add(&atpa, &mat_scale(&atpbk, -1.0)));
1207
1208        // Check convergence
1209        let mut max_diff = 0.0_f64;
1210        for i in 0..n {
1211            for j in 0..n {
1212                max_diff = max_diff.max((p_new[i][j] - p[i][j]).abs());
1213            }
1214        }
1215        p = p_new;
1216        if max_diff < tol {
1217            // Compute final K
1218            let pb2 = mat_mul(&p, b);
1219            let btpb2 = mat_mul(&bt, &pb2);
1220            let r_btpb2 = mat_add(r, &btpb2);
1221            let r_btpb2_inv = mat_inv(&r_btpb2)?;
1222            let btp2 = mat_mul(&bt, &p);
1223            let btpa2 = mat_mul(&btp2, a);
1224            let k_final = mat_mul(&r_btpb2_inv, &btpa2);
1225            return Some(k_final);
1226        }
1227    }
1228    None
1229}
1230
1231/// Compute controllability matrix: \[B, AB, A²B, ..., A^(n-1)B\].
1232pub fn controllability_matrix(a: &[Vec<f64>], b: &[Vec<f64>]) -> Vec<Vec<f64>> {
1233    let n = a.len();
1234    let m = if b.is_empty() {
1235        return vec![];
1236    } else {
1237        b[0].len()
1238    };
1239    let total_cols = n * m;
1240    let mut c_mat = vec![vec![0.0; total_cols]; n];
1241
1242    let mut ak_b = b.to_vec();
1243    for k in 0..n {
1244        for i in 0..n {
1245            for j in 0..m {
1246                c_mat[i][k * m + j] = ak_b[i][j];
1247            }
1248        }
1249        if k < n - 1 {
1250            ak_b = mat_mul(a, &ak_b);
1251        }
1252    }
1253    c_mat
1254}
1255
1256/// Pole placement: compute state feedback gain K such that eigenvalues of
1257/// (A - B*K) match desired poles (Ackermann's formula, SISO only).
1258pub fn pole_placement(a: &[Vec<f64>], b: &[Vec<f64>], poles: &[f64]) -> Option<Vec<Vec<f64>>> {
1259    let n = a.len();
1260    if b.is_empty() || b[0].len() != 1 || poles.len() != n {
1261        return None; // Only SISO supported
1262    }
1263
1264    // Desired characteristic polynomial
1265    let mut p_coeffs = vec![1.0_f64];
1266    for &pole in poles {
1267        // Multiply by (s - pole): shift and subtract
1268        let mut new_coeffs = vec![0.0; p_coeffs.len() + 1];
1269        for (i, &c) in p_coeffs.iter().enumerate() {
1270            new_coeffs[i] += c;
1271            new_coeffs[i + 1] -= c * pole;
1272        }
1273        p_coeffs = new_coeffs;
1274    }
1275
1276    // Controllability matrix
1277    let c_mat = controllability_matrix(a, b);
1278
1279    // Characteristic polynomial of A (Cayley-Hamilton)
1280    let a_poly = char_poly(a);
1281
1282    // Ackermann's formula: K = e_n * C^-1 * phi(A)
1283    // phi(A) = A^n + alpha_1 * A^(n-1) + ... + alpha_n * I
1284    let c_inv = mat_inv(&c_mat)?;
1285
1286    // Evaluate phi(A)
1287    let mut phi_a = mat_scale(&eye(n), p_coeffs[n]);
1288    let mut ak = eye(n);
1289    for k in (0..n).rev() {
1290        ak = mat_mul(&ak, a);
1291        phi_a = mat_add(&phi_a, &mat_scale(&ak, p_coeffs[k]));
1292    }
1293    let _ = a_poly;
1294
1295    // e_n = last row of identity
1296    let en: Vec<Vec<f64>> = vec![(0..n).map(|j| if j == n - 1 { 1.0 } else { 0.0 }).collect()];
1297    let en_cinv = mat_mul(&en, &c_inv);
1298    let k = mat_mul(&en_cinv, &phi_a);
1299    Some(k)
1300}
1301
1302/// Compute characteristic polynomial coefficients of matrix A (length n+1, leading 1).
1303fn char_poly(a: &[Vec<f64>]) -> Vec<f64> {
1304    let n = a.len();
1305    // Use Faddeev-LeVerrier algorithm
1306    let mut m = eye(n);
1307    let mut coeffs = vec![1.0_f64];
1308    for k in 1..=n {
1309        let am = mat_mul(a, &m);
1310        let tr: f64 = (0..n).map(|i| am[i][i]).sum();
1311        let c = -tr / k as f64;
1312        coeffs.push(c);
1313        m = mat_add(&am, &mat_scale(&eye(n), c));
1314    }
1315    coeffs
1316}
1317
1318/// Constant π exported for convenience.
1319pub const TWO_PI: f64 = 2.0 * PI;
1320
1321// ─────────────────────────────────────────────────────────────────────────────
1322// Tests
1323// ─────────────────────────────────────────────────────────────────────────────
1324
1325#[cfg(test)]
1326mod tests {
1327    use super::*;
1328
1329    // ── PID tests ────────────────────────────────────────────────────────────
1330
1331    #[test]
1332    fn pid_proportional_only() {
1333        let mut pid = Pid::new(2.0, 0.0, 0.0);
1334        let out = pid.compute(1.0, 0.01);
1335        assert!((out - 2.0).abs() < 1e-10, "P-only: out={out}");
1336    }
1337
1338    #[test]
1339    fn pid_integral_accumulates() {
1340        let mut pid = Pid::new(0.0, 1.0, 0.0);
1341        pid.compute(1.0, 0.1);
1342        pid.compute(1.0, 0.1);
1343        let out = pid.compute(1.0, 0.1);
1344        // integral ≈ 0.3, output = ki * integral = 0.3
1345        assert!((out - 0.3).abs() < 1e-6, "I-only: out={out}");
1346    }
1347
1348    #[test]
1349    fn pid_anti_windup_clamps_integral() {
1350        let mut pid = Pid::new(0.0, 1.0, 0.0);
1351        pid.set_integral_limit(1.0);
1352        for _ in 0..100 {
1353            pid.compute(1.0, 0.1);
1354        }
1355        assert!(pid.integral <= 1.0 + 1e-10);
1356    }
1357
1358    #[test]
1359    fn pid_output_saturation() {
1360        let mut pid = Pid::new(100.0, 0.0, 0.0);
1361        pid.set_output_limits(-1.0, 1.0);
1362        let out = pid.compute(10.0, 0.01);
1363        assert!((out - 1.0).abs() < 1e-10, "saturated output: {out}");
1364    }
1365
1366    #[test]
1367    fn pid_reset_clears_state() {
1368        let mut pid = Pid::new(1.0, 1.0, 1.0);
1369        pid.compute(5.0, 0.1);
1370        pid.reset();
1371        assert!(pid.integral.abs() < 1e-12);
1372        assert!(pid.prev_error.abs() < 1e-12);
1373    }
1374
1375    #[test]
1376    fn pid_zero_dt_returns_zero() {
1377        let mut pid = Pid::new(1.0, 1.0, 1.0);
1378        let out = pid.compute(1.0, 0.0);
1379        assert!((out).abs() < 1e-12);
1380    }
1381
1382    #[test]
1383    fn pid_set_gains() {
1384        let mut pid = Pid::new(1.0, 0.0, 0.0);
1385        pid.set_gains(2.0, 3.0, 4.0);
1386        assert!((pid.kp - 2.0).abs() < 1e-12);
1387        assert!((pid.ki - 3.0).abs() < 1e-12);
1388        assert!((pid.kd - 4.0).abs() < 1e-12);
1389    }
1390
1391    // ── StateSpaceModel tests ────────────────────────────────────────────────
1392
1393    #[test]
1394    fn state_space_step_first_order() {
1395        // ẋ = -x + u, y = x (integrator approx)
1396        let a = vec![vec![-1.0]];
1397        let b = vec![vec![1.0]];
1398        let c = vec![vec![1.0]];
1399        let d = vec![vec![0.0]];
1400        let mut ss = StateSpaceModel::new(a, b, c, d);
1401        let y = ss.step(&[1.0], 0.01);
1402        // x += dt*(-x+u) = 0 + 0.01*(0+1) = 0.01
1403        assert!((y[0] - 0.01).abs() < 1e-10, "y={}", y[0]);
1404    }
1405
1406    #[test]
1407    fn state_space_n_states() {
1408        let a = vec![vec![0.0, 1.0], vec![-1.0, 0.0]];
1409        let b = vec![vec![0.0], vec![1.0]];
1410        let c = vec![vec![1.0, 0.0]];
1411        let d = vec![vec![0.0]];
1412        let ss = StateSpaceModel::new(a, b, c, d);
1413        assert_eq!(ss.n_states(), 2);
1414        assert_eq!(ss.n_inputs(), 1);
1415        assert_eq!(ss.n_outputs(), 1);
1416    }
1417
1418    #[test]
1419    fn state_space_discrete_step() {
1420        let a = vec![vec![0.5]];
1421        let b = vec![vec![1.0]];
1422        let c = vec![vec![1.0]];
1423        let d = vec![vec![0.0]];
1424        let mut ss = StateSpaceModel::new(a, b, c, d);
1425        ss.is_discrete = true;
1426        ss.set_state(vec![1.0]);
1427        let y = ss.step(&[0.0], 0.01);
1428        // x_new = 0.5 * 1 + 1.0 * 0 = 0.5
1429        assert!((y[0] - 0.5).abs() < 1e-10, "y={}", y[0]);
1430    }
1431
1432    // ── TransferFunction tests ───────────────────────────────────────────────
1433
1434    #[test]
1435    fn tf_dc_gain_unity() {
1436        let tf = TransferFunction::new(vec![1.0], vec![1.0]);
1437        assert!((tf.dc_gain() - 1.0).abs() < 1e-10);
1438    }
1439
1440    #[test]
1441    fn tf_dc_gain_ratio() {
1442        let tf = TransferFunction::new(vec![2.0], vec![4.0]);
1443        assert!((tf.dc_gain() - 0.5).abs() < 1e-10);
1444    }
1445
1446    #[test]
1447    fn tf_from_pid_structure() {
1448        let tf = TransferFunction::from_pid(1.0, 2.0, 0.5);
1449        // num = [0.5, 1, 2], den = [1, 0]
1450        assert_eq!(tf.num.len(), 3);
1451        assert_eq!(tf.den.len(), 2);
1452    }
1453
1454    #[test]
1455    fn tf_frequency_response_at_zero() {
1456        let tf = TransferFunction::new(vec![3.0], vec![1.0]);
1457        let (mag, phase) = tf.frequency_response(0.001);
1458        assert!((mag - 3.0).abs() < 0.01, "mag={mag}");
1459        assert!(phase.abs() < 1.0, "phase={phase}");
1460    }
1461
1462    // ── BodeAnalysis tests ───────────────────────────────────────────────────
1463
1464    #[test]
1465    fn bode_magnitude_db_at_low_freq() {
1466        let tf = TransferFunction::new(vec![10.0], vec![1.0]);
1467        let bode = BodeAnalysis::new(tf);
1468        let db = bode.magnitude_db(0.001);
1469        // Should be ~20 dB
1470        assert!((db - 20.0).abs() < 1.0, "db={db}");
1471    }
1472
1473    #[test]
1474    fn bode_sweep_returns_correct_length() {
1475        let tf = TransferFunction::new(vec![1.0], vec![1.0, 1.0]);
1476        let bode = BodeAnalysis::new(tf);
1477        let (omegas, mags, phases) = bode.sweep(0.1, 100.0, 50);
1478        assert_eq!(omegas.len(), 50);
1479        assert_eq!(mags.len(), 50);
1480        assert_eq!(phases.len(), 50);
1481    }
1482
1483    // ── KalmanFilter tests ───────────────────────────────────────────────────
1484
1485    #[test]
1486    fn kalman_predict_zero_input() {
1487        let mut kf = KalmanFilter::new(vec![1.0], vec![vec![1.0]]);
1488        let a = vec![vec![1.0]];
1489        let b = vec![vec![0.0]];
1490        let q = vec![vec![0.01]];
1491        kf.predict(&a, &b, &[0.0], &q);
1492        // x_hat should still be 1.0
1493        assert!((kf.x_hat[0] - 1.0).abs() < 1e-10);
1494    }
1495
1496    #[test]
1497    fn kalman_update_corrects_state() {
1498        let mut kf = KalmanFilter::new(vec![0.0], vec![vec![1.0]]);
1499        let h = vec![vec![1.0]];
1500        let r = vec![vec![1.0]];
1501        kf.update(&h, &r, &[10.0]).unwrap();
1502        // Should move toward measurement
1503        assert!(
1504            kf.x_hat[0] > 0.0,
1505            "state should increase toward measurement"
1506        );
1507    }
1508
1509    #[test]
1510    fn kalman_update_reduces_covariance() {
1511        let mut kf = KalmanFilter::new(vec![0.0], vec![vec![10.0]]);
1512        let h = vec![vec![1.0]];
1513        let r = vec![vec![1.0]];
1514        let p_before = kf.p[0][0];
1515        kf.update(&h, &r, &[0.0]).unwrap();
1516        assert!(
1517            kf.p[0][0] < p_before,
1518            "covariance should decrease after update"
1519        );
1520    }
1521
1522    // ── LQR tests ────────────────────────────────────────────────────────────
1523
1524    #[test]
1525    fn lqr_scalar_system() {
1526        // Scalar: a=1.1, b=1, q=1, r=0.1 → should converge
1527        let a = vec![vec![0.9]];
1528        let b = vec![vec![1.0]];
1529        let q = vec![vec![1.0]];
1530        let r = vec![vec![0.1]];
1531        let lqr = LqrController::solve(&a, &b, q, r);
1532        assert!(
1533            lqr.is_some(),
1534            "LQR should converge for stable scalar system"
1535        );
1536    }
1537
1538    #[test]
1539    fn lqr_control_output_sign() {
1540        let a = vec![vec![0.9]];
1541        let b = vec![vec![1.0]];
1542        let q = vec![vec![1.0]];
1543        let r = vec![vec![0.1]];
1544        if let Some(lqr) = LqrController::solve(&a, &b, q, r) {
1545            let u = lqr.compute(&[1.0]);
1546            // Stabilizing control should push state negative
1547            assert!(u[0] < 0.0, "LQR should apply negative feedback: u={}", u[0]);
1548        }
1549    }
1550
1551    // ── AdaptiveController tests ─────────────────────────────────────────────
1552
1553    #[test]
1554    fn adaptive_controller_produces_output() {
1555        let mut ctrl = AdaptiveController::new(1.0, 1.0, 0.1);
1556        let u = ctrl.update(1.0, 0.5, 0.01);
1557        assert!(u.is_finite(), "u={u}");
1558    }
1559
1560    #[test]
1561    fn adaptive_controller_reset() {
1562        let mut ctrl = AdaptiveController::new(1.0, 1.0, 0.1);
1563        ctrl.update(5.0, 3.0, 0.1);
1564        ctrl.reset();
1565        assert!(ctrl.ym.abs() < 1e-12);
1566        assert!((ctrl.theta - 1.0).abs() < 1e-12);
1567    }
1568
1569    // ── FuzzyController tests ────────────────────────────────────────────────
1570
1571    #[test]
1572    fn triangle_mf_peak_value() {
1573        let mf = TriangleMf::new(0.0, 1.0, 2.0);
1574        assert!((mf.eval(1.0) - 1.0).abs() < 1e-10);
1575    }
1576
1577    #[test]
1578    fn triangle_mf_zero_outside() {
1579        let mf = TriangleMf::new(0.0, 1.0, 2.0);
1580        assert!(mf.eval(-0.1).abs() < 1e-10);
1581        assert!(mf.eval(2.1).abs() < 1e-10);
1582    }
1583
1584    #[test]
1585    fn triangle_mf_half_left() {
1586        let mf = TriangleMf::new(0.0, 1.0, 2.0);
1587        assert!((mf.eval(0.5) - 0.5).abs() < 1e-10);
1588    }
1589
1590    #[test]
1591    fn fuzzy_controller_center_input() {
1592        let imfs = vec![
1593            TriangleMf::new(-2.0, -1.0, 0.0),
1594            TriangleMf::new(-1.0, 0.0, 1.0),
1595            TriangleMf::new(0.0, 1.0, 2.0),
1596        ];
1597        let omfs = vec![
1598            TriangleMf::new(-2.0, -1.0, 0.0),
1599            TriangleMf::new(-1.0, 0.0, 1.0),
1600            TriangleMf::new(0.0, 1.0, 2.0),
1601        ];
1602        let rules = vec![
1603            FuzzyRule {
1604                input_idx: 0,
1605                output_idx: 0,
1606            },
1607            FuzzyRule {
1608                input_idx: 1,
1609                output_idx: 1,
1610            },
1611            FuzzyRule {
1612                input_idx: 2,
1613                output_idx: 2,
1614            },
1615        ];
1616        let ctrl = FuzzyController::new(imfs, omfs, rules, -2.0, 2.0);
1617        let out = ctrl.compute(0.0);
1618        // With input = 0, rule 2 fires with membership 1, output should be ~0
1619        assert!(out.abs() < 0.5, "out={out}");
1620    }
1621
1622    // ── NeuralController tests ───────────────────────────────────────────────
1623
1624    #[test]
1625    fn neural_controller_forward_pass() {
1626        let nn = NeuralController::new(&[2, 4, 1], 0.01);
1627        let (out, _acts) = nn.forward(&[1.0, 0.5]);
1628        assert_eq!(out.len(), 1);
1629        assert!(out[0].is_finite());
1630    }
1631
1632    #[test]
1633    fn neural_controller_gradient_update() {
1634        let mut nn = NeuralController::new(&[2, 4, 1], 0.01);
1635        let (out_before, _) = nn.forward(&[1.0, 0.5]);
1636        nn.gradient_update(&[1.0, 0.5], &[1.0]);
1637        let (out_after, _) = nn.forward(&[1.0, 0.5]);
1638        // Loss should decrease after one step toward target=1.0
1639        let loss_before = (out_before[0] - 1.0).powi(2);
1640        let loss_after = (out_after[0] - 1.0).powi(2);
1641        assert!(loss_after <= loss_before + 1e-6, "loss should not increase");
1642    }
1643
1644    #[test]
1645    fn replay_buffer_capacity() {
1646        let mut buf = ReplayBuffer::new(3);
1647        for i in 0..5 {
1648            buf.push(vec![i as f64], vec![0.0], 0.0);
1649        }
1650        assert_eq!(buf.len(), 3, "buffer should not exceed capacity");
1651    }
1652
1653    #[test]
1654    fn replay_buffer_is_empty() {
1655        let buf = ReplayBuffer::new(10);
1656        assert!(buf.is_empty());
1657    }
1658
1659    // ── Helper function tests ────────────────────────────────────────────────
1660
1661    #[test]
1662    fn zoh_discretize_identity_zero_dt() {
1663        let a = vec![vec![1.0, 0.0], vec![0.0, -1.0]];
1664        let ad = zoh_discretize(&a, 0.0);
1665        // e^0 ≈ I
1666        assert!((ad[0][0] - 1.0).abs() < 1e-6);
1667        assert!((ad[1][1] - 1.0).abs() < 1e-6);
1668        assert!(ad[0][1].abs() < 1e-6);
1669    }
1670
1671    #[test]
1672    fn controllability_matrix_size() {
1673        let a = vec![vec![0.0, 1.0], vec![-1.0, 0.0]];
1674        let b = vec![vec![0.0], vec![1.0]];
1675        let cm = controllability_matrix(&a, &b);
1676        assert_eq!(cm.len(), 2);
1677        assert_eq!(cm[0].len(), 2); // 2×(n*m)
1678    }
1679
1680    #[test]
1681    fn mat_inv_identity() {
1682        let i = eye(3);
1683        let inv = mat_inv(&i).unwrap();
1684        for r in 0..3 {
1685            for c in 0..3 {
1686                let expected = if r == c { 1.0 } else { 0.0 };
1687                assert!((inv[r][c] - expected).abs() < 1e-10);
1688            }
1689        }
1690    }
1691
1692    #[test]
1693    fn mat_mul_identity() {
1694        let a = vec![vec![2.0, 3.0], vec![1.0, 4.0]];
1695        let i = eye(2);
1696        let ai = mat_mul(&a, &i);
1697        assert!((ai[0][0] - 2.0).abs() < 1e-12);
1698        assert!((ai[1][1] - 4.0).abs() < 1e-12);
1699    }
1700
1701    #[test]
1702    fn poly_roots_linear() {
1703        // x + 2 = 0 → root = -2
1704        let roots = poly_roots_real(&[1.0, 2.0]);
1705        assert_eq!(roots.len(), 1);
1706        assert!((roots[0] + 2.0).abs() < 1e-10);
1707    }
1708
1709    #[test]
1710    fn poly_roots_quadratic() {
1711        // x^2 - 5x + 6 = (x-2)(x-3) → roots 2, 3
1712        let roots = poly_roots_real(&[1.0, -5.0, 6.0]);
1713        assert_eq!(roots.len(), 2);
1714        let mut sorted = roots.clone();
1715        sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
1716        assert!((sorted[0] - 2.0).abs() < 1e-6, "root0={}", sorted[0]);
1717        assert!((sorted[1] - 3.0).abs() < 1e-6, "root1={}", sorted[1]);
1718    }
1719
1720    #[test]
1721    fn mpc_compute_output_length() {
1722        let a = vec![vec![1.0, 0.1], vec![0.0, 1.0]];
1723        let b = vec![vec![0.0], vec![0.1]];
1724        let mpc = ModelPredictiveControl::new(a, b, 5, 1.0, 0.1);
1725        let u = mpc.compute(&[1.0, 0.0], &[0.0, 0.0]);
1726        assert_eq!(u.len(), 1);
1727    }
1728
1729    #[test]
1730    fn tf_poles_first_order() {
1731        // H(s) = 1 / (s + 3) → pole at -3
1732        let tf = TransferFunction::new(vec![1.0], vec![1.0, 3.0]);
1733        let poles = tf.poles();
1734        assert_eq!(poles.len(), 1);
1735        assert!((poles[0] + 3.0).abs() < 1e-6, "pole={}", poles[0]);
1736    }
1737
1738    #[test]
1739    fn bode_phase_first_order_system() {
1740        // First-order: H(s) = 1/(s+1), at ω=1: phase = -45°
1741        let tf = TransferFunction::new(vec![1.0], vec![1.0, 1.0]);
1742        let bode = BodeAnalysis::new(tf);
1743        let phase = bode.phase_deg(1.0);
1744        assert!((phase + 45.0).abs() < 1.0, "phase={phase}");
1745    }
1746
1747    #[test]
1748    fn nn_compute_shape() {
1749        let nn = NeuralController::new(&[3, 8, 2], 0.001);
1750        let out = nn.compute(&[1.0, 2.0, 3.0]);
1751        assert_eq!(out.len(), 2);
1752    }
1753
1754    #[test]
1755    fn pi_only_settles() {
1756        let mut pid = Pid::new(1.0, 0.5, 0.0);
1757        // Simulate simple integrating plant: y += u * dt
1758        let mut y = 0.0_f64;
1759        let setpoint = 1.0;
1760        let dt = 0.01;
1761        for _ in 0..2000 {
1762            let err = setpoint - y;
1763            let output = pid.compute(err, dt);
1764            y += output * dt;
1765        }
1766        assert!((y - setpoint).abs() < 0.05, "PI settled y={y}");
1767    }
1768
1769    #[test]
1770    fn state_space_zoh_discrete() {
1771        let a = vec![vec![-1.0]];
1772        let b = vec![vec![1.0]];
1773        let c = vec![vec![1.0]];
1774        let d = vec![vec![0.0]];
1775        let ss = StateSpaceModel::new(a, b, c, d);
1776        let disc = ss.discretize_zoh(0.01);
1777        assert!(disc.is_discrete);
1778        assert!((disc.a[0][0] - 1.0).abs() < 0.1); // e^(-0.01) ≈ 0.99
1779    }
1780}