scirs2_signal/lti/
analysis.rs

1// System analysis functions for LTI systems
2//
3// This module provides comprehensive analysis capabilities for Linear Time-Invariant systems:
4// - Frequency response analysis (Bode plots)
5// - Controllability and observability analysis
6// - Lyapunov Gramians computation
7// - Kalman decomposition for minimal realizations
8// - System equivalence checking
9// - Matrix utilities for system analysis
10
11use super::systems::LtiSystem;
12use crate::error::{SignalError, SignalResult};
13use crate::lti::systems::StateSpace;
14use crate::lti::TransferFunction;
15use std::f64::consts::PI;
16
17#[allow(unused_imports)]
18/// Calculate the Bode plot data (magnitude and phase) for an LTI system
19///
20/// Computes the frequency response at specified frequencies and converts
21/// to magnitude (in dB) and phase (in degrees) format for Bode plot visualization.
22///
23/// # Arguments
24///
25/// * `system` - The LTI system to analyze
26/// * `w` - Optional frequency points at which to evaluate the response.
27///   If None, generates logarithmically spaced frequencies from 0.01 to 100 rad/s
28///
29/// # Returns
30///
31/// A tuple containing (frequencies, magnitude in dB, phase in degrees)
32///
33/// # Examples
34///
35/// ```rust
36/// use scirs2_signal::lti::{systems::TransferFunction, analysis::bode};
37///
38/// let tf = TransferFunction::new(vec![1.0], vec![1.0, 1.0], None).unwrap();
39/// let freqs = vec![0.1, 1.0, 10.0];
40/// let (w, mag, phase) = bode(&tf, Some(&freqs)).unwrap();
41/// ```
42#[allow(dead_code)]
43pub fn bode<T: LtiSystem>(
44    system: &T,
45    w: Option<&[f64]>,
46) -> SignalResult<(Vec<f64>, Vec<f64>, Vec<f64>)> {
47    // Default frequencies if none provided
48    let frequencies = match w {
49        Some(freq) => freq.to_vec(),
50        None => {
51            // Generate logarithmically spaced frequencies between 0.01 and 100 rad/s
52            let n = 100;
53            let mut w_out = Vec::with_capacity(n);
54
55            let w_min = 0.01;
56            let w_max = 100.0;
57            let log_step = f64::powf(w_max / w_min, 1.0 / (n - 1) as f64);
58
59            let mut w_val = w_min;
60            for _ in 0..n {
61                w_out.push(w_val);
62                w_val *= log_step;
63            }
64
65            w_out
66        }
67    };
68
69    // Calculate frequency response
70    let resp = system.frequency_response(&frequencies)?;
71
72    // Convert to magnitude (dB) and phase (degrees)
73    let mut mag = Vec::with_capacity(resp.len());
74    let mut phase = Vec::with_capacity(resp.len());
75
76    for &val in &resp {
77        // Magnitude in dB: 20 * log10(|H(jw)|)
78        let mag_db = 20.0 * val.norm().log10();
79        mag.push(mag_db);
80
81        // Phase in degrees: arg(H(jw)) * 180/pi
82        let phase_deg = val.arg() * 180.0 / PI;
83        phase.push(phase_deg);
84    }
85
86    Ok((frequencies, mag, phase))
87}
88
89/// Analysis result for system controllability
90///
91/// Contains comprehensive information about the controllability properties
92/// of a state-space system, including the controllability matrix and subspace dimensions.
93#[derive(Debug, Clone)]
94pub struct ControllabilityAnalysis {
95    /// Whether the system is completely controllable
96    pub is_controllable: bool,
97    /// Rank of the controllability matrix
98    pub controllability_rank: usize,
99    /// Total number of states in the system
100    pub state_dimension: usize,
101    /// The full controllability matrix [B AB A²B ... A^(n-1)B]
102    pub controllability_matrix: Vec<Vec<f64>>,
103    /// Dimension of the controllable subspace
104    pub controllable_subspace_dim: usize,
105}
106
107/// Analysis result for system observability
108///
109/// Contains comprehensive information about the observability properties
110/// of a state-space system, including the observability matrix and subspace dimensions.
111#[derive(Debug, Clone)]
112pub struct ObservabilityAnalysis {
113    /// Whether the system is completely observable
114    pub is_observable: bool,
115    /// Rank of the observability matrix
116    pub observability_rank: usize,
117    /// Total number of states in the system
118    pub state_dimension: usize,
119    /// The full observability matrix [C; CA; CA²; ...; CA^(n-1)]
120    pub observability_matrix: Vec<Vec<f64>>,
121    /// Dimension of the observable subspace
122    pub observable_subspace_dim: usize,
123}
124
125/// Combined controllability and observability analysis result
126///
127/// Provides a complete picture of the system's structural properties,
128/// including whether it's minimal and the Kalman canonical structure.
129#[derive(Debug, Clone)]
130pub struct ControlObservabilityAnalysis {
131    /// Controllability analysis results
132    pub controllability: ControllabilityAnalysis,
133    /// Observability analysis results
134    pub observability: ObservabilityAnalysis,
135    /// Whether the system is minimal (both controllable and observable)
136    pub is_minimal: bool,
137    /// Kalman canonical decomposition structure
138    pub kalman_structure: KalmanStructure,
139}
140
141/// Kalman canonical structure showing subspace dimensions
142///
143/// Decomposes the state space into four orthogonal subspaces according
144/// to controllability and observability properties.
145#[derive(Debug, Clone)]
146pub struct KalmanStructure {
147    /// Controllable and observable subspace dimension
148    pub co_dimension: usize,
149    /// Controllable but not observable subspace dimension
150    pub c_no_dimension: usize,
151    /// Not controllable but observable subspace dimension
152    pub nc_o_dimension: usize,
153    /// Neither controllable nor observable subspace dimension
154    pub nc_no_dimension: usize,
155}
156
157/// Enhanced Kalman decomposition with transformation matrices
158///
159/// Provides complete Kalman decomposition including orthogonal basis vectors
160/// for each subspace and the transformation matrix to canonical form.
161#[derive(Debug, Clone)]
162pub struct KalmanDecomposition {
163    /// Controllable and observable subspace dimension
164    pub co_dimension: usize,
165    /// Controllable but not observable subspace dimension
166    pub c_no_dimension: usize,
167    /// Not controllable but observable subspace dimension
168    pub nc_o_dimension: usize,
169    /// Neither controllable nor observable subspace dimension
170    pub nc_no_dimension: usize,
171    /// Transformation matrix to Kalman canonical form
172    pub transformation_matrix: Vec<Vec<f64>>,
173    /// Basis vectors for controllable and observable subspace
174    pub co_basis: Vec<Vec<f64>>,
175    /// Basis vectors for controllable but not observable subspace
176    pub c_no_basis: Vec<Vec<f64>>,
177    /// Basis vectors for not controllable but observable subspace
178    pub nc_o_basis: Vec<Vec<f64>>,
179    /// Basis vectors for neither controllable nor observable subspace
180    pub nc_no_basis: Vec<Vec<f64>>,
181}
182
183/// Analyze controllability of a state-space system
184///
185/// A system is controllable if the controllability matrix [B AB A²B ... A^(n-1)B]
186/// has full row rank, where n is the number of states. This means all states
187/// can be reached from any initial state using appropriate control inputs.
188///
189/// # Arguments
190///
191/// * `ss` - State-space system to analyze
192///
193/// # Returns
194///
195/// Controllability analysis result containing rank information and controllability matrix
196///
197/// # Examples
198///
199/// ```rust
200/// use scirs2_signal::lti::{systems::StateSpace, analysis::analyze_controllability};
201///
202/// let ss = StateSpace::new(
203///     vec![-1.0], vec![1.0], vec![1.0], vec![0.0], None
204/// ).unwrap();
205/// let analysis = analyze_controllability(&ss).unwrap();
206/// assert!(analysis.is_controllable);
207/// ```
208#[allow(dead_code)]
209pub fn analyze_controllability(ss: &StateSpace) -> SignalResult<ControllabilityAnalysis> {
210    let n = ss.n_states; // Number of states
211    if n == 0 {
212        return Err(SignalError::ValueError("Empty state matrix".to_string()));
213    }
214
215    let m = ss.n_inputs; // Number of inputs
216    if m == 0 {
217        return Err(SignalError::ValueError("Empty input matrix".to_string()));
218    }
219
220    // Build controllability matrix: [B AB A²B ... A^(n-1)B]
221    // Convert flattened matrices to 2D format for easier manipulation
222    let a_matrix = flatten_to_2d(&ss.a, n, n)?;
223    let b_matrix = flatten_to_2d(&ss.b, n, m)?;
224
225    // Initialize controllability matrix with the right dimensions: n x (n*m)
226    let mut controllability_matrix = vec![vec![0.0; n * m]; n];
227    let mut current_ab = b_matrix.clone();
228
229    // Add B columns to controllability matrix
230    for row_idx in 0..n {
231        for col_idx in 0..m {
232            controllability_matrix[row_idx][col_idx] = current_ab[row_idx][col_idx];
233        }
234    }
235
236    // Add AB, A²B, ..., A^(n-1)B columns
237    for block in 1..n {
238        current_ab = matrix_multiply(&a_matrix, &current_ab)?;
239
240        for row_idx in 0..n {
241            for col_idx in 0..m {
242                let matrix_col_idx = block * m + col_idx;
243                controllability_matrix[row_idx][matrix_col_idx] = current_ab[row_idx][col_idx];
244            }
245        }
246    }
247
248    // Calculate rank of controllability matrix
249    let rank = matrix_rank(&controllability_matrix)?;
250    let is_controllable = rank == n;
251
252    Ok(ControllabilityAnalysis {
253        is_controllable,
254        controllability_rank: rank,
255        state_dimension: n,
256        controllability_matrix: controllability_matrix.clone(),
257        controllable_subspace_dim: rank,
258    })
259}
260
261/// Analyze observability of a state-space system
262///
263/// A system is observable if the observability matrix [C; CA; CA²; ...; CA^(n-1)]
264/// has full column rank, where n is the number of states. This means the internal
265/// states can be determined from the output measurements.
266///
267/// # Arguments
268///
269/// * `ss` - State-space system to analyze
270///
271/// # Returns
272///
273/// Observability analysis result containing rank information and observability matrix
274///
275/// # Examples
276///
277/// ```rust
278/// use scirs2_signal::lti::{systems::StateSpace, analysis::analyze_observability};
279///
280/// let ss = StateSpace::new(
281///     vec![-1.0], vec![1.0], vec![1.0], vec![0.0], None
282/// ).unwrap();
283/// let analysis = analyze_observability(&ss).unwrap();
284/// assert!(analysis.is_observable);
285/// ```
286#[allow(dead_code)]
287pub fn analyze_observability(ss: &StateSpace) -> SignalResult<ObservabilityAnalysis> {
288    let n = ss.n_states; // Number of states
289    if n == 0 {
290        return Err(SignalError::ValueError("Empty state matrix".to_string()));
291    }
292
293    let p = ss.n_outputs; // Number of outputs
294    if p == 0 {
295        return Err(SignalError::ValueError("Empty output matrix".to_string()));
296    }
297
298    // Build observability matrix: [C; CA; CA²; ...; CA^(n-1)]
299    // Convert flattened matrices to 2D format for easier manipulation
300    let a_matrix = flatten_to_2d(&ss.a, n, n)?;
301    let c_matrix = flatten_to_2d(&ss.c, p, n)?;
302
303    // Initialize observability matrix with the right dimensions: (n*p) x n
304    let mut observability_matrix = vec![vec![0.0; n]; n * p];
305    let mut current_ca = c_matrix.clone();
306
307    // Add C rows to observability matrix
308    for row_idx in 0..p {
309        for col_idx in 0..n {
310            observability_matrix[row_idx][col_idx] = current_ca[row_idx][col_idx];
311        }
312    }
313
314    // Add CA, CA², ..., CA^(n-1) rows
315    for block in 1..n {
316        current_ca = matrix_multiply(&current_ca, &a_matrix)?;
317
318        for (row_idx, row) in current_ca.iter().enumerate().take(p) {
319            let matrix_row_idx = block * p + row_idx;
320            observability_matrix[matrix_row_idx][..n].copy_from_slice(&row[..n]);
321        }
322    }
323
324    // Calculate rank of observability matrix
325    let rank = matrix_rank(&observability_matrix)?;
326    let is_observable = rank == n;
327
328    Ok(ObservabilityAnalysis {
329        is_observable,
330        observability_rank: rank,
331        state_dimension: n,
332        observability_matrix: observability_matrix.clone(),
333        observable_subspace_dim: rank,
334    })
335}
336
337/// Perform combined controllability and observability analysis
338///
339/// This function analyzes both controllability and observability properties
340/// and determines if the system is minimal (both controllable and observable).
341/// It also provides a simplified Kalman decomposition structure.
342///
343/// # Arguments
344///
345/// * `ss` - State-space system to analyze
346///
347/// # Returns
348///
349/// Combined analysis result including Kalman decomposition structure
350///
351/// # Examples
352///
353/// ```rust
354/// use scirs2_signal::lti::{systems::StateSpace, analysis::analyze_control_observability};
355///
356/// let ss = StateSpace::new(
357///     vec![-1.0], vec![1.0], vec![1.0], vec![0.0], None
358/// ).unwrap();
359/// let analysis = analyze_control_observability(&ss).unwrap();
360/// assert!(analysis.is_minimal);
361/// ```
362#[allow(dead_code)]
363pub fn analyze_control_observability(
364    ss: &StateSpace,
365) -> SignalResult<ControlObservabilityAnalysis> {
366    let controllability = analyze_controllability(ss)?;
367    let observability = analyze_observability(ss)?;
368
369    let is_minimal = controllability.is_controllable && observability.is_observable;
370
371    // Determine Kalman canonical decomposition structure
372    let n = ss.n_states;
373    let nc = controllability.controllable_subspace_dim;
374    let no = observability.observable_subspace_dim;
375
376    // This is a simplified analysis - full Kalman decomposition would require
377    // computing the intersection of controllable and observable subspaces
378    let co_dimension = if is_minimal {
379        n
380    } else {
381        (nc + no).saturating_sub(n).min(nc.min(no))
382    };
383    let c_no_dimension = nc.saturating_sub(co_dimension);
384    let nc_o_dimension = no.saturating_sub(co_dimension);
385    let nc_no_dimension = n.saturating_sub(co_dimension + c_no_dimension + nc_o_dimension);
386
387    let kalman_structure = KalmanStructure {
388        co_dimension,
389        c_no_dimension,
390        nc_o_dimension,
391        nc_no_dimension,
392    };
393
394    Ok(ControlObservabilityAnalysis {
395        controllability,
396        observability,
397        is_minimal,
398        kalman_structure,
399    })
400}
401
402/// Type alias for Gramian matrix pair to reduce type complexity
403pub type GramianPair = (Vec<Vec<f64>>, Vec<Vec<f64>>);
404
405/// Compute controllability and observability Gramians by solving Lyapunov equations
406///
407/// Computes the controllability Gramian Wc and observability Gramian Wo by solving:
408/// - Controllability: A*Wc + Wc*A' + B*B' = 0
409/// - Observability: A'*Wo + Wo*A + C'*C = 0
410///
411/// These Gramians provide energy-based measures of controllability and observability.
412///
413/// # Arguments
414/// * `ss` - State-space system
415///
416/// # Returns
417/// * Tuple of (controllability_gramian, observability_gramian)
418///
419/// # Examples
420///
421/// ```rust
422/// use scirs2_signal::lti::{systems::StateSpace, analysis::compute_lyapunov_gramians};
423///
424/// let ss = StateSpace::new(
425///     vec![-1.0], vec![1.0], vec![1.0], vec![0.0], None
426/// ).unwrap();
427/// let (wc, wo) = compute_lyapunov_gramians(&ss).unwrap();
428/// ```
429#[allow(clippy::needless_range_loop)]
430#[allow(dead_code)]
431pub fn compute_lyapunov_gramians(ss: &StateSpace) -> SignalResult<GramianPair> {
432    let n = ss.n_states;
433    if n == 0 {
434        return Err(SignalError::ValueError("Empty state matrix".to_string()));
435    }
436
437    // For continuous-time systems, solve Lyapunov equations:
438    // A*Wc + Wc*A' + B*B' = 0  (controllability Gramian)
439    // A'*Wo + Wo*A + C'*C = 0  (observability Gramian)
440
441    // This is a simplified implementation using iterative methods
442    // In practice, would use more sophisticated solvers like Bartels-Stewart algorithm
443
444    let max_iterations = 1000;
445    let tolerance = 1e-8;
446
447    // Initialize Gramians
448    let mut wc = vec![vec![0.0; n]; n];
449    let mut wo = vec![vec![0.0; n]; n];
450
451    // Compute B*B' for controllability
452    let mut bb_t = vec![vec![0.0; n]; n];
453    for i in 0..n {
454        for j in 0..n {
455            for k in 0..ss.n_inputs {
456                bb_t[i][j] += ss.b[i * ss.n_inputs + k] * ss.b[j * ss.n_inputs + k];
457            }
458        }
459    }
460
461    // Compute C'*C for observability
462    let mut ct_c = vec![vec![0.0; n]; n];
463    for i in 0..n {
464        for j in 0..n {
465            for k in 0..ss.n_outputs {
466                ct_c[i][j] += ss.c[k * n + i] * ss.c[k * n + j];
467            }
468        }
469    }
470
471    // Iterative solution (simplified fixed-point iteration)
472    for _iter in 0..max_iterations {
473        let mut wc_new = bb_t.clone();
474        let mut wo_new = ct_c.clone();
475
476        // Update controllability Gramian: Wc_new = -A*Wc*A' + B*B'
477        for i in 0..n {
478            for j in 0..n {
479                let mut awc_at = 0.0;
480                for k in 0..n {
481                    for l in 0..n {
482                        awc_at += ss.a[i * n + k] * wc[k][l] * ss.a[j * n + l];
483                    }
484                }
485                wc_new[i][j] -= awc_at;
486            }
487        }
488
489        // Update observability Gramian: Wo_new = -A'*Wo*A + C'*C
490        for i in 0..n {
491            for j in 0..n {
492                let mut at_wo_a = 0.0;
493                for k in 0..n {
494                    for l in 0..n {
495                        at_wo_a += ss.a[k * n + i] * wo[k][l] * ss.a[l * n + j];
496                    }
497                }
498                wo_new[i][j] -= at_wo_a;
499            }
500        }
501
502        // Check convergence
503        let mut max_diff: f64 = 0.0;
504        for i in 0..n {
505            for j in 0..n {
506                max_diff = max_diff.max((wc_new[i][j] - wc[i][j]).abs());
507                max_diff = max_diff.max((wo_new[i][j] - wo[i][j]).abs());
508            }
509        }
510
511        wc = wc_new;
512        wo = wo_new;
513
514        if max_diff < tolerance {
515            break;
516        }
517    }
518
519    Ok((wc, wo))
520}
521
522/// Perform complete Kalman decomposition
523///
524/// Decomposes the state space into four subspaces:
525/// - Controllable and Observable (CO)
526/// - Controllable but Not Observable (CNO)
527/// - Not Controllable but Observable (NCO)  
528/// - Not Controllable and Not Observable (NCNO)
529///
530/// This decomposition is fundamental for understanding the minimal realization
531/// and the structure of the system.
532///
533/// # Arguments
534/// * `ss` - State-space system
535///
536/// # Returns
537/// * Enhanced Kalman structure with transformation matrices
538///
539/// # Examples
540///
541/// ```
542/// use scirs2_signal::lti::{systems::StateSpace, analysis::complete_kalman_decomposition};
543///
544/// let ss = StateSpace::new(
545///     vec![-1.0, 0.0, 1.0, -2.0], vec![1.0, 0.0],
546///     vec![1.0, 0.0], vec![0.0], None
547/// ).unwrap();
548/// let decomp = complete_kalman_decomposition(&ss).unwrap();
549/// assert!(decomp.co_dimension + decomp.c_no_dimension + decomp.nc_o_dimension + decomp.nc_no_dimension == 2);
550/// ```
551#[allow(dead_code)]
552pub fn complete_kalman_decomposition(ss: &StateSpace) -> SignalResult<KalmanDecomposition> {
553    let n = ss.n_states;
554    if n == 0 {
555        return Err(SignalError::ValueError("Empty state matrix".to_string()));
556    }
557
558    // Get controllability and observability analyses
559    let controllability = analyze_controllability(ss)?;
560    let observability = analyze_observability(ss)?;
561
562    // Compute orthogonal bases for controllable and observable subspaces
563    let controllable_basis = compute_orthogonal_basis(&controllability.controllability_matrix)?;
564    let observable_basis = compute_orthogonal_basis(&observability.observability_matrix)?;
565
566    // Find intersection of controllable and observable subspaces
567    let co_basis = compute_subspace_intersection(&controllable_basis, &observable_basis)?;
568    let _co_dimension = co_basis.len();
569
570    // Compute complementary subspaces
571    let c_no_basis = compute_orthogonal_complement(&controllable_basis, &co_basis)?;
572    let nc_o_basis = compute_orthogonal_complement(&observable_basis, &co_basis)?;
573
574    // Compute the uncontrollable and unobservable subspace
575    let mut all_basis = co_basis.clone();
576    all_basis.extend(c_no_basis.clone());
577    all_basis.extend(nc_o_basis.clone());
578
579    let nc_no_basis = compute_orthogonal_complement_to_space(&all_basis, n)?;
580
581    // Build transformation matrix T = [T_co | T_cno | T_nco | T_ncno]
582    let mut transformation = vec![vec![0.0; n]; n];
583    let mut col = 0;
584
585    // Add CO basis vectors
586    for vector in &co_basis {
587        if col >= n {
588            break; // Prevent index out of bounds
589        }
590        for (i, &val) in vector.iter().enumerate() {
591            transformation[i][col] = val;
592        }
593        col += 1;
594    }
595
596    // Add CNO basis vectors
597    for vector in &c_no_basis {
598        if col >= n {
599            break; // Prevent index out of bounds
600        }
601        for (i, &val) in vector.iter().enumerate() {
602            transformation[i][col] = val;
603        }
604        col += 1;
605    }
606
607    // Add NCO basis vectors
608    for vector in &nc_o_basis {
609        if col >= n {
610            break; // Prevent index out of bounds
611        }
612        for (i, &val) in vector.iter().enumerate() {
613            transformation[i][col] = val;
614        }
615        col += 1;
616    }
617
618    // Add NCNO basis vectors
619    for vector in &nc_no_basis {
620        if col >= n {
621            break; // Prevent index out of bounds
622        }
623        for (i, &val) in vector.iter().enumerate() {
624            transformation[i][col] = val;
625        }
626        col += 1;
627    }
628
629    // Calculate actual used dimensions after bounds checking
630    let mut used_col = 0;
631    let used_co_dimension = std::cmp::min(co_basis.len(), n - used_col);
632    used_col += used_co_dimension;
633
634    let used_c_no_dimension = std::cmp::min(c_no_basis.len(), n - used_col);
635    used_col += used_c_no_dimension;
636
637    let used_nc_o_dimension = std::cmp::min(nc_o_basis.len(), n - used_col);
638    used_col += used_nc_o_dimension;
639
640    let used_nc_no_dimension = std::cmp::min(nc_no_basis.len(), n - used_col);
641
642    Ok(KalmanDecomposition {
643        co_dimension: used_co_dimension,
644        c_no_dimension: used_c_no_dimension,
645        nc_o_dimension: used_nc_o_dimension,
646        nc_no_dimension: used_nc_no_dimension,
647        transformation_matrix: transformation,
648        co_basis,
649        c_no_basis,
650        nc_o_basis,
651        nc_no_basis,
652    })
653}
654
655/// Check if two systems are equivalent (same input-output behavior)
656///
657/// Two systems are equivalent if they have the same transfer function,
658/// even if their state-space representations are different. This is useful
659/// for comparing minimal realizations and checking system transformations.
660///
661/// # Arguments
662///
663/// * `sys1` - First system
664/// * `sys2` - Second system
665/// * `tolerance` - Tolerance for numerical comparison
666///
667/// # Returns
668///
669/// True if systems are equivalent within tolerance
670///
671/// # Examples
672///
673/// ```rust
674/// use scirs2_signal::lti::{systems::TransferFunction, analysis::systems_equivalent};
675///
676/// let tf1 = TransferFunction::new(vec![1.0], vec![1.0, 1.0], None).unwrap();
677/// let tf2 = TransferFunction::new(vec![2.0], vec![2.0, 2.0], None).unwrap();
678/// assert!(systems_equivalent(&tf1, &tf2, 1e-6).unwrap());
679/// ```
680#[allow(dead_code)]
681pub fn systems_equivalent(
682    sys1: &dyn LtiSystem,
683    sys2: &dyn LtiSystem,
684    tolerance: f64,
685) -> SignalResult<bool> {
686    // Compare transfer functions
687    let tf1 = sys1.to_tf()?;
688    let tf2 = sys2.to_tf()?;
689
690    // Check if they have the same time domain
691    if tf1.dt != tf2.dt {
692        return Ok(false);
693    }
694
695    // Check if numerator and denominator coefficients are proportional
696    if tf1.num.len() != tf2.num.len() || tf1.den.len() != tf2.den.len() {
697        return Ok(false);
698    }
699
700    // Find scaling factor from leading non-zero coefficients
701    let scale_num = if !tf1.num.is_empty() && !tf2.num.is_empty() && tf1.num[0].abs() > tolerance {
702        tf2.num[0] / tf1.num[0]
703    } else {
704        1.0
705    };
706
707    let scale_den = if !tf1.den.is_empty() && !tf2.den.is_empty() && tf1.den[0].abs() > tolerance {
708        tf2.den[0] / tf1.den[0]
709    } else {
710        1.0
711    };
712
713    // The overall scaling should be the same for both numerator and denominator
714    if (scale_num - scale_den).abs() > tolerance {
715        return Ok(false);
716    }
717
718    // Check if all numerator coefficients match after scaling
719    for (&a, &b) in tf1.num.iter().zip(&tf2.num) {
720        if (b - scale_num * a).abs() > tolerance {
721            return Ok(false);
722        }
723    }
724
725    // Check if all denominator coefficients match after scaling
726    for (&c, &d) in tf1.den.iter().zip(&tf2.den) {
727        if (d - scale_den * c).abs() > tolerance {
728            return Ok(false);
729        }
730    }
731
732    Ok(true)
733}
734
735/// Calculate the condition number of a matrix
736///
737/// The condition number indicates how sensitive the matrix inverse is to
738/// perturbations in the matrix. High condition numbers indicate near-singular matrices.
739///
740/// # Arguments
741///
742/// * `matrix` - Input matrix in row-major format
743///
744/// # Returns
745///
746/// Condition number (ratio of largest to smallest singular value)
747#[allow(dead_code)]
748pub fn matrix_condition_number(matrix: &[Vec<f64>]) -> SignalResult<f64> {
749    if matrix.is_empty() || matrix[0].is_empty() {
750        return Err(SignalError::ValueError("Empty _matrix".to_string()));
751    }
752
753    // For this simplified implementation, we'll use the Frobenius norm
754    // In practice, would compute SVD for accurate condition number
755
756    let rows = matrix.len();
757    let cols = matrix[0].len();
758
759    // Compute Frobenius norm
760    let mut norm_sum = 0.0;
761    for row in matrix {
762        for &val in row {
763            norm_sum += val * val;
764        }
765    }
766    let frobenius_norm = norm_sum.sqrt();
767
768    // Simplified condition number estimate
769    // This is a placeholder - real implementation would use SVD
770    let min_dimension = rows.min(cols) as f64;
771    let condition_estimate = frobenius_norm * min_dimension;
772
773    Ok(condition_estimate.max(1.0))
774}
775
776// Helper functions for matrix operations and subspace computations
777
778/// Convert a flattened matrix to 2D format
779#[allow(dead_code)]
780fn flatten_to_2d(flat: &[f64], rows: usize, cols: usize) -> SignalResult<Vec<Vec<f64>>> {
781    if flat.len() != rows * cols {
782        return Err(SignalError::ValueError(
783            "Matrix dimensions don't match flattened size".to_string(),
784        ));
785    }
786
787    let mut matrix = vec![vec![0.0; cols]; rows];
788    for i in 0..rows {
789        for j in 0..cols {
790            matrix[i][j] = flat[i * cols + j];
791        }
792    }
793
794    Ok(matrix)
795}
796
797/// Multiply two matrices
798#[allow(dead_code)]
799fn matrix_multiply(a: &[Vec<f64>], b: &[Vec<f64>]) -> SignalResult<Vec<Vec<f64>>> {
800    if a.is_empty() || b.is_empty() || a[0].len() != b.len() {
801        return Err(SignalError::ValueError(
802            "Incompatible matrix dimensions for multiplication".to_string(),
803        ));
804    }
805
806    let rows = a.len();
807    let cols = b[0].len();
808    let inner = a[0].len();
809
810    let mut result = vec![vec![0.0; cols]; rows];
811
812    for i in 0..rows {
813        for j in 0..cols {
814            #[allow(clippy::needless_range_loop)]
815            for k in 0..inner {
816                result[i][j] += a[i][k] * b[k][j];
817            }
818        }
819    }
820
821    Ok(result)
822}
823
824/// Compute the rank of a matrix using Gaussian elimination
825#[allow(dead_code)]
826fn matrix_rank(matrix: &[Vec<f64>]) -> SignalResult<usize> {
827    if matrix.is_empty() || matrix[0].is_empty() {
828        return Ok(0);
829    }
830
831    let mut working_matrix = matrix.to_vec();
832    let rows = working_matrix.len();
833    let cols = working_matrix[0].len();
834    let tolerance = 1e-10;
835
836    let mut rank = 0;
837
838    for col in 0..cols {
839        // Find pivot
840        let mut pivot_row = rank;
841        for row in (rank + 1)..rows {
842            if working_matrix[row][col].abs() > working_matrix[pivot_row][col].abs() {
843                pivot_row = row;
844            }
845        }
846
847        // Check if pivot is significant
848        if working_matrix[pivot_row][col].abs() < tolerance {
849            continue;
850        }
851
852        // Swap rows if necessary
853        if pivot_row != rank {
854            working_matrix.swap(rank, pivot_row);
855        }
856
857        // Eliminate below pivot
858        for row in (rank + 1)..rows {
859            let factor = working_matrix[row][col] / working_matrix[rank][col];
860            for c in col..cols {
861                working_matrix[row][c] -= factor * working_matrix[rank][c];
862            }
863        }
864
865        rank += 1;
866    }
867
868    Ok(rank)
869}
870
871/// Compute orthogonal basis from a matrix using QR decomposition (simplified)
872#[allow(clippy::needless_range_loop)]
873#[allow(dead_code)]
874fn compute_orthogonal_basis(matrix: &[Vec<f64>]) -> SignalResult<Vec<Vec<f64>>> {
875    if matrix.is_empty() || matrix[0].is_empty() {
876        return Ok(Vec::new());
877    }
878
879    let m = matrix.len();
880    let n = matrix[0].len();
881
882    // Transpose _matrix to work with column vectors
883    let mut columns = vec![vec![0.0; m]; n];
884    for i in 0..m {
885        for j in 0..n {
886            columns[j][i] = matrix[i][j];
887        }
888    }
889
890    // Simplified Gram-Schmidt orthogonalization
891    let mut basis: Vec<Vec<f64>> = Vec::new();
892    let tolerance = 1e-10;
893
894    for col in columns {
895        let mut orthogonal_col = col.clone();
896
897        // Orthogonalize against previous basis vectors
898        for basis_vec in &basis {
899            let proj_coeff =
900                dot_product(&orthogonal_col, basis_vec) / dot_product(basis_vec, basis_vec);
901            for i in 0..orthogonal_col.len() {
902                orthogonal_col[i] -= proj_coeff * basis_vec[i];
903            }
904        }
905
906        // Normalize and add if not zero vector
907        let norm = vector_norm(&orthogonal_col);
908        if norm > tolerance {
909            for elem in &mut orthogonal_col {
910                *elem /= norm;
911            }
912            basis.push(orthogonal_col);
913        }
914    }
915
916    Ok(basis)
917}
918
919/// Compute intersection of two subspaces
920#[allow(dead_code)]
921fn compute_subspace_intersection(
922    subspace1: &[Vec<f64>],
923    subspace2: &[Vec<f64>],
924) -> SignalResult<Vec<Vec<f64>>> {
925    if subspace1.is_empty() || subspace2.is_empty() {
926        return Ok(Vec::new());
927    }
928
929    // This is a simplified implementation
930    // In practice would use more sophisticated algorithms
931    let mut intersection = Vec::new();
932    let tolerance = 1e-10;
933
934    for vec1 in subspace1 {
935        // Check if vec1 is in subspace2 by trying to express it as linear combination
936        let mut in_subspace2 = false;
937
938        // Simple check: see if vec1 is close to any vector in subspace2
939        for vec2 in subspace2 {
940            let mut diff_norm = 0.0;
941            for i in 0..vec1.len() {
942                diff_norm += (vec1[i] - vec2[i]).powi(2);
943            }
944
945            if diff_norm.sqrt() < tolerance {
946                in_subspace2 = true;
947                break;
948            }
949        }
950
951        if in_subspace2 {
952            intersection.push(vec1.clone());
953        }
954    }
955
956    Ok(intersection)
957}
958
959/// Compute orthogonal complement of a subspace
960#[allow(dead_code)]
961fn compute_orthogonal_complement(
962    original_space: &[Vec<f64>],
963    subspace: &[Vec<f64>],
964) -> SignalResult<Vec<Vec<f64>>> {
965    let mut complement = Vec::new();
966
967    for vec in original_space {
968        let mut is_in_subspace = false;
969        let tolerance = 1e-10;
970
971        // Check if vec is in subspace
972        for sub_vec in subspace {
973            let mut diff_norm = 0.0;
974            for i in 0..vec.len() {
975                diff_norm += (vec[i] - sub_vec[i]).powi(2);
976            }
977
978            if diff_norm.sqrt() < tolerance {
979                is_in_subspace = true;
980                break;
981            }
982        }
983
984        if !is_in_subspace {
985            complement.push(vec.clone());
986        }
987    }
988
989    Ok(complement)
990}
991
992/// Compute orthogonal complement to a given subspace in n-dimensional space
993#[allow(dead_code)]
994fn compute_orthogonal_complement_to_space(
995    subspace: &[Vec<f64>],
996    dimension: usize,
997) -> SignalResult<Vec<Vec<f64>>> {
998    if subspace.is_empty() {
999        // Return standard basis
1000        let mut basis = Vec::new();
1001        for i in 0..dimension {
1002            let mut vec = vec![0.0; dimension];
1003            vec[i] = 1.0;
1004            basis.push(vec);
1005        }
1006        return Ok(basis);
1007    }
1008
1009    // Create standard basis and orthogonalize against subspace
1010    let mut complement = Vec::new();
1011    let tolerance = 1e-10;
1012
1013    for i in 0..dimension {
1014        let mut basis_vec = vec![0.0; dimension];
1015        basis_vec[i] = 1.0;
1016
1017        // Orthogonalize against subspace
1018        for sub_vec in subspace {
1019            let proj_coeff = dot_product(&basis_vec, sub_vec);
1020            for j in 0..basis_vec.len() {
1021                basis_vec[j] -= proj_coeff * sub_vec[j];
1022            }
1023        }
1024
1025        // Normalize and add if not zero
1026        let norm = vector_norm(&basis_vec);
1027        if norm > tolerance {
1028            for elem in &mut basis_vec {
1029                *elem /= norm;
1030            }
1031            complement.push(basis_vec);
1032        }
1033    }
1034
1035    Ok(complement)
1036}
1037
1038/// Helper function: dot product of two vectors
1039#[allow(dead_code)]
1040fn dot_product(a: &[f64], b: &[f64]) -> f64 {
1041    a.iter().zip(b.iter()).map(|(x, y)| x * y).sum()
1042}
1043
1044/// Helper function: compute norm of a vector
1045#[allow(dead_code)]
1046fn vector_norm(vec: &[f64]) -> f64 {
1047    vec.iter().map(|x| x * x).sum::<f64>().sqrt()
1048}
1049
1050#[cfg(test)]
1051mod tests {
1052    use super::*;
1053    use crate::lti::design::tf;
1054    use crate::lti::{StateSpace, TransferFunction};
1055    use approx::assert_relative_eq;
1056    #[test]
1057    fn test_bode_plot() {
1058        let a = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1059        let b = vec![0.5, 0.5];
1060        // Create a simple first-order system H(s) = 1 / (s + 1)
1061        let tf = TransferFunction::new(vec![1.0], vec![1.0, 1.0], None).unwrap();
1062
1063        // Compute Bode plot at omega = 0.1, 1, 10
1064        let freqs = vec![0.1, 1.0, 10.0];
1065        let (w, mag, phase) = bode(&tf, Some(&freqs)).unwrap();
1066
1067        // Check frequencies
1068        assert_eq!(w.len(), 3);
1069        assert_relative_eq!(w[0], 0.1, epsilon = 1e-6);
1070        assert_relative_eq!(w[1], 1.0, epsilon = 1e-6);
1071        assert_relative_eq!(w[2], 10.0, epsilon = 1e-6);
1072
1073        // Check magnitudes (in dB)
1074        assert_eq!(mag.len(), 3);
1075        // At omega = 1, |H| = 0.707, which is -3 dB
1076        assert_relative_eq!(mag[1], -3.0, epsilon = 0.1);
1077
1078        // Check phases (in degrees)
1079        assert_eq!(phase.len(), 3);
1080        // At omega = 1, phase is -45 degrees
1081        assert_relative_eq!(phase[1], -45.0, epsilon = 0.1);
1082    }
1083
1084    #[test]
1085    fn test_controllability_analysis() {
1086        // Create a controllable system: dx/dt = -x + u, y = x
1087        let ss = StateSpace::new(
1088            vec![-1.0], // A = [-1]
1089            vec![1.0],  // B = [1]
1090            vec![1.0],  // C = [1]
1091            vec![0.0],  // D = [0]
1092            None,
1093        )
1094        .unwrap();
1095
1096        let analysis = analyze_controllability(&ss).unwrap();
1097        assert!(analysis.is_controllable);
1098        assert_eq!(analysis.controllability_rank, 1);
1099        assert_eq!(analysis.state_dimension, 1);
1100    }
1101
1102    #[test]
1103    fn test_observability_analysis() {
1104        // Create an observable system: dx/dt = -x + u, y = x
1105        let ss = StateSpace::new(
1106            vec![-1.0], // A = [-1]
1107            vec![1.0],  // B = [1]
1108            vec![1.0],  // C = [1]
1109            vec![0.0],  // D = [0]
1110            None,
1111        )
1112        .unwrap();
1113
1114        let analysis = analyze_observability(&ss).unwrap();
1115        assert!(analysis.is_observable);
1116        assert_eq!(analysis.observability_rank, 1);
1117        assert_eq!(analysis.state_dimension, 1);
1118    }
1119
1120    #[test]
1121    fn test_combined_analysis() {
1122        // Create a minimal system (both controllable and observable)
1123        let ss = StateSpace::new(
1124            vec![-1.0], // A = [-1]
1125            vec![1.0],  // B = [1]
1126            vec![1.0],  // C = [1]
1127            vec![0.0],  // D = [0]
1128            None,
1129        )
1130        .unwrap();
1131
1132        let analysis = analyze_control_observability(&ss).unwrap();
1133        assert!(analysis.is_minimal);
1134        assert_eq!(analysis.kalman_structure.co_dimension, 1);
1135        assert_eq!(analysis.kalman_structure.c_no_dimension, 0);
1136        assert_eq!(analysis.kalman_structure.nc_o_dimension, 0);
1137        assert_eq!(analysis.kalman_structure.nc_no_dimension, 0);
1138    }
1139
1140    #[test]
1141    fn test_systems_equivalence() {
1142        // Create two equivalent systems (same after normalization)
1143        let tf1 = TransferFunction::new(vec![1.0], vec![1.0, 1.0], None).unwrap();
1144        let tf2 = TransferFunction::new(vec![2.0], vec![2.0, 2.0], None).unwrap();
1145
1146        assert!(systems_equivalent(&tf1, &tf2, 1e-6).unwrap());
1147
1148        // Create two different systems
1149        let tf3 = TransferFunction::new(vec![1.0], vec![1.0, 2.0], None).unwrap();
1150        assert!(!systems_equivalent(&tf1, &tf3, 1e-6).unwrap());
1151    }
1152
1153    #[test]
1154    fn test_matrix_rank() {
1155        // Test rank of identity matrix
1156        let identity = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
1157        assert_eq!(matrix_rank(&identity).unwrap(), 2);
1158
1159        // Test rank of singular matrix
1160        let singular = vec![vec![1.0, 2.0], vec![2.0, 4.0]];
1161        assert_eq!(matrix_rank(&singular).unwrap(), 1);
1162    }
1163
1164    #[test]
1165    fn test_matrix_multiply() {
1166        let a = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
1167        let b = vec![vec![5.0, 6.0], vec![7.0, 8.0]];
1168
1169        let result = matrix_multiply(&a, &b).unwrap();
1170        assert_eq!(result.len(), 2);
1171        assert_eq!(result[0].len(), 2);
1172        assert_relative_eq!(result[0][0], 19.0);
1173        assert_relative_eq!(result[0][1], 22.0);
1174        assert_relative_eq!(result[1][0], 43.0);
1175        assert_relative_eq!(result[1][1], 50.0);
1176    }
1177}
1178
1179#[allow(dead_code)]
1180fn tf(num: Vec<f64>, den: Vec<f64>) -> TransferFunction {
1181    TransferFunction::new(num, den, None).unwrap()
1182}