Skip to main content

oxilean_std/functional_calculus/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use super::functions::*;
6use oxilean_kernel::{BinderInfo, Declaration, Environment, Expr, Level, Name};
7
8/// Represents the numerical range (field of values) of an operator.
9#[allow(dead_code)]
10#[derive(Debug, Clone)]
11pub struct NumericalRange {
12    /// Boundary points sampled at regular angles.
13    pub boundary_samples: Vec<(f64, f64)>,
14    /// Whether the operator is normal (numerical range = convex hull of spectrum).
15    pub is_normal: bool,
16}
17#[allow(dead_code)]
18impl NumericalRange {
19    /// Creates a numerical range from eigenvalues (normal operator case).
20    pub fn from_eigenvalues(eigenvalues: &[f64]) -> Self {
21        if eigenvalues.is_empty() {
22            return NumericalRange {
23                boundary_samples: Vec::new(),
24                is_normal: true,
25            };
26        }
27        let λ_min = eigenvalues.iter().copied().fold(f64::INFINITY, f64::min);
28        let λ_max = eigenvalues
29            .iter()
30            .copied()
31            .fold(f64::NEG_INFINITY, f64::max);
32        let boundary_samples = vec![(λ_min, 0.0), (λ_max, 0.0)];
33        NumericalRange {
34            boundary_samples,
35            is_normal: true,
36        }
37    }
38    /// Returns the numerical radius: max{|z| : z in W(A)}.
39    pub fn numerical_radius(&self) -> f64 {
40        self.boundary_samples
41            .iter()
42            .map(|&(x, y)| (x * x + y * y).sqrt())
43            .fold(0.0f64, f64::max)
44    }
45    /// Checks if 0 is in the numerical range (relevant for invertibility).
46    pub fn contains_zero(&self, tol: f64) -> bool {
47        if self.boundary_samples.len() >= 2 {
48            let xmin = self
49                .boundary_samples
50                .iter()
51                .map(|&(x, _)| x)
52                .fold(f64::INFINITY, f64::min);
53            let xmax = self
54                .boundary_samples
55                .iter()
56                .map(|&(x, _)| x)
57                .fold(f64::NEG_INFINITY, f64::max);
58            xmin - tol <= 0.0 && xmax + tol >= 0.0
59        } else {
60            false
61        }
62    }
63}
64/// Bounded perturbation data for Miyadera-Voigt theorem.
65#[allow(dead_code)]
66#[derive(Debug, Clone)]
67pub struct BoundedPerturbation {
68    /// Norm of the perturbation B.
69    pub perturbation_norm: f64,
70    /// Growth bound of the unperturbed semigroup.
71    pub base_growth_bound: f64,
72    /// Constant M for the unperturbed semigroup.
73    pub base_constant: f64,
74}
75#[allow(dead_code)]
76impl BoundedPerturbation {
77    /// Creates perturbation data.
78    pub fn new(b_norm: f64, omega: f64, m: f64) -> Self {
79        BoundedPerturbation {
80            perturbation_norm: b_norm,
81            base_growth_bound: omega,
82            base_constant: m,
83        }
84    }
85    /// New growth bound after perturbation: ω + M * ||B||.
86    pub fn perturbed_growth_bound(&self) -> f64 {
87        self.base_growth_bound + self.base_constant * self.perturbation_norm
88    }
89    /// Checks if the perturbation preserves contractivity (||B|| small enough).
90    pub fn preserves_contractivity(&self) -> bool {
91        self.base_growth_bound + self.base_constant * self.perturbation_norm <= 0.0
92    }
93}
94/// Represents a function in a function algebra, stored as a lookup table.
95///
96/// This models C(K) -- the algebra of continuous functions on a compact set K,
97/// approximated by sampling at discrete points.
98#[derive(Debug, Clone)]
99pub struct FunctionAlgebraElement {
100    /// Sampled function values at equally-spaced points in [0, 1].
101    pub values: Vec<f64>,
102}
103impl FunctionAlgebraElement {
104    /// Create from a Rust closure, sampling at `n` equally-spaced points in [0,1].
105    pub fn from_fn<F: Fn(f64) -> f64>(f: F, n: usize) -> Self {
106        let values = (0..n)
107            .map(|i| {
108                let t = if n <= 1 {
109                    0.0
110                } else {
111                    i as f64 / (n - 1) as f64
112                };
113                f(t)
114            })
115            .collect();
116        FunctionAlgebraElement { values }
117    }
118    /// Pointwise addition: (f + g)(x) = f(x) + g(x).
119    pub fn add(&self, other: &FunctionAlgebraElement) -> FunctionAlgebraElement {
120        let values = self
121            .values
122            .iter()
123            .zip(other.values.iter())
124            .map(|(a, b)| a + b)
125            .collect();
126        FunctionAlgebraElement { values }
127    }
128    /// Pointwise multiplication: (f * g)(x) = f(x) * g(x).
129    pub fn multiply(&self, other: &FunctionAlgebraElement) -> FunctionAlgebraElement {
130        let values = self
131            .values
132            .iter()
133            .zip(other.values.iter())
134            .map(|(a, b)| a * b)
135            .collect();
136        FunctionAlgebraElement { values }
137    }
138    /// Scalar multiplication: (c * f)(x) = c * f(x).
139    pub fn scale(&self, c: f64) -> FunctionAlgebraElement {
140        FunctionAlgebraElement {
141            values: self.values.iter().map(|v| v * c).collect(),
142        }
143    }
144    /// The supremum norm: ||f||_inf = max |f(x)|.
145    pub fn sup_norm(&self) -> f64 {
146        self.values.iter().map(|v| v.abs()).fold(0.0_f64, f64::max)
147    }
148    /// The L2 norm (approximated by the trapezoidal rule on the samples).
149    pub fn l2_norm(&self) -> f64 {
150        if self.values.len() <= 1 {
151            return self.values.first().map(|v| v.abs()).unwrap_or(0.0);
152        }
153        let n = self.values.len();
154        let h = 1.0 / (n - 1) as f64;
155        let mut integral = 0.0;
156        for i in 0..n - 1 {
157            let f0 = self.values[i] * self.values[i];
158            let f1 = self.values[i + 1] * self.values[i + 1];
159            integral += (f0 + f1) * h / 2.0;
160        }
161        integral.sqrt()
162    }
163    /// Composition: (f o g)(x) = f(g(x)).
164    ///
165    /// Since we store sampled values, we interpolate g's output to find f's value.
166    /// Clamps g's output to [0, 1] for lookups.
167    pub fn compose(&self, g: &FunctionAlgebraElement) -> FunctionAlgebraElement {
168        let n = self.values.len();
169        if n <= 1 {
170            return self.clone();
171        }
172        let values = g
173            .values
174            .iter()
175            .map(|&gx| {
176                let t = gx.clamp(0.0, 1.0) * (n - 1) as f64;
177                let lo = (t.floor() as usize).min(n - 2);
178                let hi = lo + 1;
179                let frac = t - lo as f64;
180                self.values[lo] * (1.0 - frac) + self.values[hi] * frac
181            })
182            .collect();
183        FunctionAlgebraElement { values }
184    }
185    /// The multiplicative identity in C(K): the constant function 1.
186    pub fn one(n: usize) -> Self {
187        FunctionAlgebraElement {
188            values: vec![1.0; n],
189        }
190    }
191    /// The zero element in C(K): the constant function 0.
192    pub fn zero_fn(n: usize) -> Self {
193        FunctionAlgebraElement {
194            values: vec![0.0; n],
195        }
196    }
197}
198/// A polynomial p(z) = coeffs[0] + coeffs[1]*z + coeffs[2]*z^2 + ...
199///
200/// Used to represent continuous functions on the spectrum for the continuous
201/// functional calculus (by density of polynomials in C(K)).
202#[derive(Debug, Clone)]
203pub struct Polynomial {
204    /// Coefficients in ascending degree order: `coeffs[i]` is the coefficient of z^i.
205    pub coeffs: Vec<f64>,
206}
207impl Polynomial {
208    /// Create a polynomial from coefficients in ascending degree order.
209    pub fn new(coeffs: Vec<f64>) -> Self {
210        Polynomial { coeffs }
211    }
212    /// The zero polynomial.
213    pub fn zero() -> Self {
214        Polynomial { coeffs: vec![0.0] }
215    }
216    /// A constant polynomial p(z) = c.
217    pub fn constant(c: f64) -> Self {
218        Polynomial { coeffs: vec![c] }
219    }
220    /// The identity polynomial p(z) = z.
221    pub fn identity() -> Self {
222        Polynomial {
223            coeffs: vec![0.0, 1.0],
224        }
225    }
226    /// The degree of the polynomial (0 for the zero polynomial).
227    pub fn degree(&self) -> usize {
228        if self.coeffs.is_empty() {
229            return 0;
230        }
231        for i in (0..self.coeffs.len()).rev() {
232            if self.coeffs[i].abs() > 1e-15 {
233                return i;
234            }
235        }
236        0
237    }
238    /// Evaluate the polynomial at a scalar point x using Horner's method.
239    pub fn eval(&self, x: f64) -> f64 {
240        if self.coeffs.is_empty() {
241            return 0.0;
242        }
243        let mut result = 0.0;
244        for c in self.coeffs.iter().rev() {
245            result = result * x + c;
246        }
247        result
248    }
249    /// Add two polynomials.
250    pub fn add(&self, other: &Polynomial) -> Polynomial {
251        let max_len = self.coeffs.len().max(other.coeffs.len());
252        let mut result = vec![0.0; max_len];
253        for (i, c) in self.coeffs.iter().enumerate() {
254            result[i] += c;
255        }
256        for (i, c) in other.coeffs.iter().enumerate() {
257            result[i] += c;
258        }
259        Polynomial { coeffs: result }
260    }
261    /// Multiply two polynomials via convolution.
262    pub fn multiply(&self, other: &Polynomial) -> Polynomial {
263        if self.coeffs.is_empty() || other.coeffs.is_empty() {
264            return Polynomial::zero();
265        }
266        let n = self.coeffs.len() + other.coeffs.len() - 1;
267        let mut result = vec![0.0; n];
268        for (i, a) in self.coeffs.iter().enumerate() {
269            for (j, b) in other.coeffs.iter().enumerate() {
270                result[i + j] += a * b;
271            }
272        }
273        Polynomial { coeffs: result }
274    }
275    /// Scale the polynomial by a constant factor.
276    pub fn scale(&self, s: f64) -> Polynomial {
277        Polynomial {
278            coeffs: self.coeffs.iter().map(|c| c * s).collect(),
279        }
280    }
281    /// Compose two polynomials: compute (self o other)(z) = self(other(z)).
282    pub fn compose(&self, other: &Polynomial) -> Polynomial {
283        if self.coeffs.is_empty() {
284            return Polynomial::zero();
285        }
286        let mut result = Polynomial::constant(*self.coeffs.last().unwrap_or(&0.0));
287        for c in self.coeffs.iter().rev().skip(1) {
288            result = result.multiply(other).add(&Polynomial::constant(*c));
289        }
290        result
291    }
292}
293/// Computes the spectral radius of a square matrix via the power method.
294///
295/// The power method applied to ||A^k||^{1/k} converges monotonically down
296/// to r(A) by the Gelfand formula.
297#[derive(Debug, Clone)]
298pub struct SpectralRadiusComputer {
299    /// Maximum number of iterations for the power iteration.
300    pub max_iters: u32,
301    /// Convergence tolerance: stop when successive estimates differ by less
302    /// than `tol`.
303    pub tol: f64,
304}
305impl SpectralRadiusComputer {
306    /// Create a new computer with given iteration count and tolerance.
307    pub fn new(max_iters: u32, tol: f64) -> Self {
308        SpectralRadiusComputer { max_iters, tol }
309    }
310    /// Default: 30 iterations, tolerance 1e-8.
311    pub fn default() -> Self {
312        SpectralRadiusComputer {
313            max_iters: 30,
314            tol: 1e-8,
315        }
316    }
317    /// Compute the spectral radius of `mat` using the Gelfand formula
318    /// r(A) = inf_k ||A^k||^{1/k}.
319    ///
320    /// Returns the best estimate found within `max_iters` steps.
321    pub fn compute(&self, mat: &SquareMatrix) -> f64 {
322        let mut best = f64::INFINITY;
323        let mut prev = f64::INFINITY;
324        for k in 1..=self.max_iters {
325            let nk = mat.pow(k).frobenius_norm();
326            let rk = if nk == 0.0 {
327                0.0
328            } else {
329                nk.powf(1.0 / k as f64)
330            };
331            if rk < best {
332                best = rk;
333            }
334            if (rk - prev).abs() < self.tol {
335                break;
336            }
337            prev = rk;
338        }
339        best
340    }
341    /// Use the power-vector method: iterate v_{k+1} = A v_k / ||A v_k||
342    /// and track ||A v_k|| to estimate the dominant eigenvalue magnitude.
343    pub fn power_vector_method(&self, mat: &SquareMatrix, init: &[f64]) -> f64 {
344        assert_eq!(init.len(), mat.dim, "init must have length mat.dim");
345        let n = mat.dim;
346        let mut v: Vec<f64> = init.to_vec();
347        let norm0: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
348        if norm0 > 1e-15 {
349            for x in &mut v {
350                *x /= norm0;
351            }
352        }
353        let mut lambda = 0.0;
354        for _ in 0..self.max_iters {
355            let mut w = vec![0.0; n];
356            for i in 0..n {
357                for j in 0..n {
358                    w[i] += mat.get(i, j) * v[j];
359                }
360            }
361            let dot_wv: f64 = w.iter().zip(v.iter()).map(|(a, b)| a * b).sum();
362            lambda = dot_wv.abs();
363            let nw: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
364            if nw < 1e-15 {
365                return 0.0;
366            }
367            for x in &mut w {
368                *x /= nw;
369            }
370            v = w;
371        }
372        lambda
373    }
374}
375/// Computes the Fredholm index of a linear map A : R^m -> R^n represented
376/// as a non-square (or square) matrix.
377///
378/// For a finite-dimensional matrix A in R^{n x m},
379/// ind(A) = dim ker(A) - dim coker(A) = (m - rank A) - (n - rank A) = m - n.
380///
381/// For a square matrix ind(A) = 0 always; the interesting case is detecting
382/// whether A is Fredholm (finite-dimensional kernel and cokernel).
383#[derive(Debug, Clone)]
384pub struct FredholmIndexCalculator {
385    /// Row dimension (codomain dimension n).
386    pub rows: usize,
387    /// Column dimension (domain dimension m).
388    pub cols: usize,
389    /// Matrix entries in row-major order (rows * cols entries).
390    pub entries: Vec<f64>,
391}
392impl FredholmIndexCalculator {
393    /// Construct from row-major entries.
394    ///
395    /// Panics if `entries.len() != rows * cols`.
396    pub fn new(rows: usize, cols: usize, entries: Vec<f64>) -> Self {
397        assert_eq!(entries.len(), rows * cols, "entries must be rows*cols");
398        FredholmIndexCalculator {
399            rows,
400            cols,
401            entries,
402        }
403    }
404    fn get(&self, r: usize, c: usize) -> f64 {
405        self.entries[r * self.cols + c]
406    }
407    /// Estimate the numerical rank via Gaussian elimination with partial pivoting.
408    pub fn numerical_rank(&self, tol: f64) -> usize {
409        let mut mat: Vec<Vec<f64>> = (0..self.rows)
410            .map(|r| (0..self.cols).map(|c| self.get(r, c)).collect())
411            .collect();
412        let mut rank = 0;
413        let mut col = 0;
414        let mut row = 0;
415        while row < self.rows && col < self.cols {
416            let mut max_val = 0.0;
417            let mut max_row = row;
418            for r in row..self.rows {
419                if mat[r][col].abs() > max_val {
420                    max_val = mat[r][col].abs();
421                    max_row = r;
422                }
423            }
424            if max_val < tol {
425                col += 1;
426                continue;
427            }
428            mat.swap(row, max_row);
429            let pivot = mat[row][col];
430            for c in col..self.cols {
431                mat[row][c] /= pivot;
432            }
433            for r in 0..self.rows {
434                if r != row {
435                    let factor = mat[r][col];
436                    for c in col..self.cols {
437                        mat[r][c] -= factor * mat[row][c];
438                    }
439                }
440            }
441            rank += 1;
442            row += 1;
443            col += 1;
444        }
445        rank
446    }
447    /// Dimension of the (approximate) kernel: dim ker(A) = cols - rank(A).
448    pub fn kernel_dim(&self, tol: f64) -> usize {
449        self.cols - self.numerical_rank(tol)
450    }
451    /// Dimension of the (approximate) cokernel: dim coker(A) = rows - rank(A).
452    pub fn cokernel_dim(&self, tol: f64) -> usize {
453        self.rows - self.numerical_rank(tol)
454    }
455    /// Fredholm index: ind(A) = dim ker(A) - dim coker(A).
456    ///
457    /// For a matrix this equals `cols - rows` (independent of rank, as long
458    /// as the map is Fredholm, i.e., both dimensions are finite).
459    pub fn fredholm_index(&self, tol: f64) -> i64 {
460        let k = self.kernel_dim(tol) as i64;
461        let c = self.cokernel_dim(tol) as i64;
462        k - c
463    }
464}
465/// A simplified spectral measure (discrete version for finite matrices).
466#[allow(dead_code)]
467#[derive(Debug, Clone)]
468pub struct SpectralMeasure {
469    /// Eigenvalues (the support of the spectral measure).
470    pub eigenvalues: Vec<f64>,
471    /// Spectral projections as rank-1 projectors (encoded as outer products of eigenvectors).
472    pub projector_vectors: Vec<Vec<f64>>,
473    /// Dimension of the space.
474    pub dim: usize,
475}
476#[allow(dead_code)]
477impl SpectralMeasure {
478    /// Creates a spectral measure from eigenvalues (diagonal matrix case).
479    pub fn diagonal(eigenvalues: Vec<f64>) -> Self {
480        let dim = eigenvalues.len();
481        let mut projector_vectors = Vec::new();
482        for i in 0..dim {
483            let mut v = vec![0.0f64; dim];
484            v[i] = 1.0;
485            projector_vectors.push(v);
486        }
487        SpectralMeasure {
488            eigenvalues,
489            projector_vectors,
490            dim,
491        }
492    }
493    /// Applies a Borel function f to the operator via functional calculus.
494    /// Returns the eigenvalues of f(A).
495    pub fn apply_function<F: Fn(f64) -> f64>(&self, f: F) -> Vec<f64> {
496        self.eigenvalues.iter().map(|&λ| f(λ)).collect()
497    }
498    /// Computes trace(f(A)) = sum f(λ_i).
499    pub fn trace_of_function<F: Fn(f64) -> f64>(&self, f: F) -> f64 {
500        self.eigenvalues.iter().map(|&λ| f(λ)).sum()
501    }
502    /// Computes the spectral radius.
503    pub fn spectral_radius(&self) -> f64 {
504        self.eigenvalues
505            .iter()
506            .map(|&λ| λ.abs())
507            .fold(0.0f64, f64::max)
508    }
509    /// Computes the operator norm (largest |λ|).
510    pub fn operator_norm(&self) -> f64 {
511        self.spectral_radius()
512    }
513    /// Checks if the operator is positive (all eigenvalues >= 0).
514    pub fn is_positive(&self) -> bool {
515        self.eigenvalues.iter().all(|&λ| λ >= 0.0)
516    }
517    /// Checks if the operator is positive definite (all eigenvalues > 0).
518    pub fn is_positive_definite(&self) -> bool {
519        self.eigenvalues.iter().all(|&λ| λ > 0.0)
520    }
521    /// Computes the square root of a positive operator.
522    pub fn sqrt_eigenvalues(&self) -> Option<Vec<f64>> {
523        if !self.is_positive() {
524            return None;
525        }
526        Some(self.eigenvalues.iter().map(|&λ| λ.sqrt()).collect())
527    }
528    /// Computes the exponential exp(A) eigenvalues.
529    pub fn exp_eigenvalues(&self) -> Vec<f64> {
530        self.eigenvalues.iter().map(|&λ| λ.exp()).collect()
531    }
532    /// Computes the logarithm log(A) for positive definite A.
533    pub fn log_eigenvalues(&self) -> Option<Vec<f64>> {
534        if !self.is_positive_definite() {
535            return None;
536        }
537        Some(self.eigenvalues.iter().map(|&λ| λ.ln()).collect())
538    }
539}
540/// An element of a finite-dimensional Banach algebra, represented as a square
541/// matrix together with a name for the algebra.
542///
543/// Provides an approximate spectrum via the characteristic polynomial roots
544/// (for small dimensions) and the Gelfand spectral radius estimate.
545#[derive(Debug, Clone)]
546pub struct BanachAlgebraElem {
547    /// The matrix representation of the element.
548    pub matrix: SquareMatrix,
549    /// Human-readable label for the algebra (e.g. "M_2(R)").
550    pub algebra_name: String,
551}
552impl BanachAlgebraElem {
553    /// Construct a new element in the algebra `algebra_name`.
554    pub fn new(matrix: SquareMatrix, algebra_name: impl Into<String>) -> Self {
555        BanachAlgebraElem {
556            matrix,
557            algebra_name: algebra_name.into(),
558        }
559    }
560    /// Operator (Frobenius) norm as a proxy for the Banach algebra norm.
561    pub fn norm(&self) -> f64 {
562        self.matrix.frobenius_norm()
563    }
564    /// Estimate the spectral radius r(a) = lim ||a^n||^{1/n}.
565    ///
566    /// Uses power iteration over the first `iters` powers.
567    pub fn spectral_radius_estimate(&self, iters: u32) -> f64 {
568        self.matrix.spectral_radius(iters)
569    }
570    /// Check whether the element is approximately invertible (det != 0 for 2x2).
571    pub fn is_invertible_2x2(&self) -> Option<bool> {
572        if self.matrix.dim != 2 {
573            return None;
574        }
575        let m = &self.matrix;
576        let det = m.get(0, 0) * m.get(1, 1) - m.get(0, 1) * m.get(1, 0);
577        Some(det.abs() > 1e-12)
578    }
579    /// For a 2x2 matrix, check membership of a scalar in the spectrum by
580    /// testing non-invertibility of (a - lambda * I).
581    pub fn is_in_spectrum_2x2(&self, lambda: f64) -> Option<bool> {
582        if self.matrix.dim != 2 {
583            return None;
584        }
585        let shifted = self.matrix.sub(&SquareMatrix::identity(2).scale(lambda));
586        let det = shifted.get(0, 0) * shifted.get(1, 1) - shifted.get(0, 1) * shifted.get(1, 0);
587        Some(det.abs() <= 1e-10)
588    }
589    /// Approximate the spectrum of a 2x2 matrix analytically.
590    ///
591    /// Returns the two eigenvalues (possibly complex — here returned as pairs
592    /// (real_part, imag_part)) via the characteristic polynomial.
593    pub fn spectrum_2x2(&self) -> Option<[(f64, f64); 2]> {
594        if self.matrix.dim != 2 {
595            return None;
596        }
597        let m = &self.matrix;
598        let tr = m.get(0, 0) + m.get(1, 1);
599        let det = m.get(0, 0) * m.get(1, 1) - m.get(0, 1) * m.get(1, 0);
600        let disc = tr * tr - 4.0 * det;
601        if disc >= 0.0 {
602            let s = disc.sqrt();
603            Some([((tr + s) / 2.0, 0.0), ((tr - s) / 2.0, 0.0)])
604        } else {
605            let s = (-disc).sqrt();
606            Some([(tr / 2.0, s / 2.0), (tr / 2.0, -s / 2.0)])
607        }
608    }
609}
610/// A discretised simulation of the C_0-semigroup {T(t)} generated by a bounded
611/// operator A via the explicit Euler approximation T(t) ≈ (I + (t/N) A)^N.
612///
613/// This is the Euler product formula (Trotter approximation) and converges to
614/// exp(tA) as N -> infty for bounded A.
615#[derive(Debug, Clone)]
616pub struct OperatorSemigroup {
617    /// The generator matrix A (bounded approximation).
618    pub generator: SquareMatrix,
619    /// Number of Euler steps used in the product approximation.
620    pub num_steps: u32,
621}
622impl OperatorSemigroup {
623    /// Construct with a given generator and number of discretisation steps.
624    pub fn new(generator: SquareMatrix, num_steps: u32) -> Self {
625        OperatorSemigroup {
626            generator,
627            num_steps,
628        }
629    }
630    /// Evaluate the semigroup T(t) ≈ (I + (t/N) A)^N.
631    ///
632    /// For small t and large N this approximates exp(tA).
633    pub fn eval(&self, t: f64) -> SquareMatrix {
634        let n = self.num_steps.max(1);
635        let dt = t / n as f64;
636        let dim = self.generator.dim;
637        let step = SquareMatrix::identity(dim).add(&self.generator.scale(dt));
638        step.pow(n)
639    }
640    /// Apply T(t) to an initial vector x_0, returning the evolved state T(t) x_0.
641    pub fn apply(&self, t: f64, x0: &[f64]) -> Vec<f64> {
642        let tt = self.eval(t);
643        assert_eq!(x0.len(), tt.dim);
644        let n = tt.dim;
645        let mut result = vec![0.0; n];
646        for i in 0..n {
647            for j in 0..n {
648                result[i] += tt.get(i, j) * x0[j];
649            }
650        }
651        result
652    }
653    /// Check the semigroup property T(s+t) ≈ T(s) T(t) by comparing Frobenius norms.
654    ///
655    /// Returns the relative error ||T(s+t) - T(s)T(t)||_F / ||T(s+t)||_F.
656    pub fn check_semigroup_property(&self, s: f64, t: f64) -> f64 {
657        let t_sum = self.eval(s + t);
658        let t_s = self.eval(s);
659        let t_t = self.eval(t);
660        let product = t_s.mul(&t_t);
661        let diff = t_sum.sub(&product);
662        let err = diff.frobenius_norm();
663        let denom = t_sum.frobenius_norm();
664        if denom < 1e-15 {
665            err
666        } else {
667            err / denom
668        }
669    }
670}
671/// Estimates the trace-class (nuclear) norm ||T||_1 = sum of singular values
672/// of a square matrix, using power iteration to find singular values.
673///
674/// For an n x n matrix the singular values are the square roots of the
675/// eigenvalues of T^T T (or T* T for complex operators).
676#[derive(Debug, Clone)]
677pub struct TraceClassNorm {
678    /// The matrix whose trace norm we estimate.
679    pub matrix: SquareMatrix,
680}
681impl TraceClassNorm {
682    /// Construct from a `SquareMatrix`.
683    pub fn new(matrix: SquareMatrix) -> Self {
684        TraceClassNorm { matrix }
685    }
686    /// Compute T^T (transpose) for a square matrix.
687    fn transpose(&self) -> SquareMatrix {
688        let n = self.matrix.dim;
689        let mut entries = vec![0.0; n * n];
690        for i in 0..n {
691            for j in 0..n {
692                entries[j * n + i] = self.matrix.get(i, j);
693            }
694        }
695        SquareMatrix { entries, dim: n }
696    }
697    /// Compute T^T T.
698    fn gram_matrix(&self) -> SquareMatrix {
699        self.transpose().mul(&self.matrix)
700    }
701    /// Estimate the largest singular value of `self.matrix` via power
702    /// iteration on the Gram matrix T^T T.
703    pub fn largest_singular_value(&self, iters: u32) -> f64 {
704        let gram = self.gram_matrix();
705        let computer = SpectralRadiusComputer::new(iters, 1e-10);
706        let init: Vec<f64> = (0..self.matrix.dim)
707            .map(|i| if i == 0 { 1.0 } else { 0.0 })
708            .collect();
709        let lambda_sq = computer.power_vector_method(&gram, &init);
710        lambda_sq.sqrt()
711    }
712    /// For a 2x2 matrix, compute both singular values exactly and return
713    /// the trace norm ||T||_1 = sigma_1 + sigma_2.
714    pub fn trace_norm_2x2(&self) -> Option<f64> {
715        if self.matrix.dim != 2 {
716            return None;
717        }
718        let gram = self.gram_matrix();
719        let tr = gram.get(0, 0) + gram.get(1, 1);
720        let det = gram.get(0, 0) * gram.get(1, 1) - gram.get(0, 1) * gram.get(1, 0);
721        let disc = (tr * tr - 4.0 * det).max(0.0);
722        let ev1 = ((tr + disc.sqrt()) / 2.0).max(0.0);
723        let ev2 = ((tr - disc.sqrt()) / 2.0).max(0.0);
724        Some(ev1.sqrt() + ev2.sqrt())
725    }
726    /// Check if the operator is (approximately) Hilbert-Schmidt:
727    /// ||T||_HS = sqrt(sum sigma_i^2) = ||T||_F (Frobenius norm).
728    pub fn hilbert_schmidt_norm(&self) -> f64 {
729        self.matrix.frobenius_norm()
730    }
731    /// Estimate the trace norm for a general n x n matrix as the Frobenius
732    /// norm (upper bound, exact when T is normal).
733    pub fn trace_norm_estimate(&self) -> f64 {
734        self.matrix.frobenius_norm()
735    }
736    /// Compute the trace of T (sum of diagonal entries).
737    pub fn trace(&self) -> f64 {
738        self.matrix.trace()
739    }
740    /// Check the Lidskii approximation: for a normal matrix, trace(T) ≈ sum eigenvalues.
741    /// Returns |tr(T) - (lambda_1 + lambda_2)| for 2x2 matrices.
742    pub fn lidskii_error_2x2(&self) -> Option<f64> {
743        if self.matrix.dim != 2 {
744            return None;
745        }
746        let elem = BanachAlgebraElem::new(self.matrix.clone(), "M_2");
747        let evs = elem.spectrum_2x2()?;
748        let sum_ev = evs[0].0 + evs[1].0;
749        Some((self.trace() - sum_ev).abs())
750    }
751}
752/// Represents the resolvent data of a linear operator.
753#[allow(dead_code)]
754#[derive(Debug, Clone)]
755pub struct ResolventData {
756    /// Spectrum (eigenvalues for finite-dimensional).
757    pub spectrum: Vec<f64>,
758    /// Operator norm bound.
759    pub norm_bound: f64,
760    /// Whether the operator is compact.
761    pub is_compact: bool,
762    /// Whether the operator is self-adjoint.
763    pub is_self_adjoint: bool,
764}
765#[allow(dead_code)]
766impl ResolventData {
767    /// Creates resolvent data.
768    pub fn new(spectrum: Vec<f64>, norm_bound: f64) -> Self {
769        ResolventData {
770            spectrum,
771            norm_bound,
772            is_compact: false,
773            is_self_adjoint: false,
774        }
775    }
776    /// Returns the resolvent set (complement of spectrum) membership check.
777    pub fn in_resolvent_set(&self, lambda: f64, tol: f64) -> bool {
778        self.spectrum.iter().all(|&sv| (lambda - sv).abs() > tol)
779    }
780    /// Estimates ||(λ - A)^{-1}|| using spectral theorem for self-adjoint.
781    pub fn resolvent_norm_estimate(&self, lambda: f64) -> Option<f64> {
782        if !self.is_self_adjoint {
783            return None;
784        }
785        let min_dist = self
786            .spectrum
787            .iter()
788            .map(|&spec_val| (lambda - spec_val).abs())
789            .fold(f64::INFINITY, f64::min);
790        if min_dist < 1e-14 {
791            None
792        } else {
793            Some(1.0 / min_dist)
794        }
795    }
796    /// Returns the spectral gap (distance between consecutive eigenvalues, min).
797    pub fn spectral_gap(&self) -> Option<f64> {
798        if self.spectrum.len() < 2 {
799            return None;
800        }
801        let mut sorted = self.spectrum.clone();
802        sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
803        let gap = sorted
804            .windows(2)
805            .map(|w| w[1] - w[0])
806            .fold(f64::INFINITY, f64::min);
807        Some(gap)
808    }
809    /// Checks if the operator is invertible (0 not in spectrum).
810    pub fn is_invertible(&self) -> bool {
811        self.in_resolvent_set(0.0, 1e-14)
812    }
813}
814/// A square matrix operator represented as a flat row-major array.
815#[derive(Debug, Clone)]
816pub struct SquareMatrix {
817    /// Row-major entries.
818    pub entries: Vec<f64>,
819    /// Dimension (n x n).
820    pub dim: usize,
821}
822impl SquareMatrix {
823    /// Create a new square matrix from row-major entries.
824    ///
825    /// Panics if `entries.len() != dim * dim`.
826    pub fn new(entries: Vec<f64>, dim: usize) -> Self {
827        assert_eq!(
828            entries.len(),
829            dim * dim,
830            "entries must have dim*dim elements"
831        );
832        SquareMatrix { entries, dim }
833    }
834    /// The n x n identity matrix.
835    pub fn identity(dim: usize) -> Self {
836        let mut entries = vec![0.0; dim * dim];
837        for i in 0..dim {
838            entries[i * dim + i] = 1.0;
839        }
840        SquareMatrix { entries, dim }
841    }
842    /// The n x n zero matrix.
843    pub fn zero(dim: usize) -> Self {
844        SquareMatrix {
845            entries: vec![0.0; dim * dim],
846            dim,
847        }
848    }
849    /// Get entry at (row, col).
850    pub fn get(&self, row: usize, col: usize) -> f64 {
851        self.entries[row * self.dim + col]
852    }
853    /// Set entry at (row, col).
854    pub fn set(&mut self, row: usize, col: usize, val: f64) {
855        self.entries[row * self.dim + col] = val;
856    }
857    /// Matrix addition.
858    pub fn add(&self, other: &SquareMatrix) -> SquareMatrix {
859        assert_eq!(self.dim, other.dim);
860        let entries: Vec<f64> = self
861            .entries
862            .iter()
863            .zip(other.entries.iter())
864            .map(|(a, b)| a + b)
865            .collect();
866        SquareMatrix {
867            entries,
868            dim: self.dim,
869        }
870    }
871    /// Matrix subtraction.
872    pub fn sub(&self, other: &SquareMatrix) -> SquareMatrix {
873        assert_eq!(self.dim, other.dim);
874        let entries: Vec<f64> = self
875            .entries
876            .iter()
877            .zip(other.entries.iter())
878            .map(|(a, b)| a - b)
879            .collect();
880        SquareMatrix {
881            entries,
882            dim: self.dim,
883        }
884    }
885    /// Scalar multiplication.
886    pub fn scale(&self, s: f64) -> SquareMatrix {
887        SquareMatrix {
888            entries: self.entries.iter().map(|x| x * s).collect(),
889            dim: self.dim,
890        }
891    }
892    /// Matrix multiplication.
893    pub fn mul(&self, other: &SquareMatrix) -> SquareMatrix {
894        assert_eq!(self.dim, other.dim);
895        let n = self.dim;
896        let mut entries = vec![0.0; n * n];
897        for i in 0..n {
898            for j in 0..n {
899                let mut s = 0.0;
900                for k in 0..n {
901                    s += self.entries[i * n + k] * other.entries[k * n + j];
902                }
903                entries[i * n + j] = s;
904            }
905        }
906        SquareMatrix { entries, dim: n }
907    }
908    /// Compute A^k (matrix exponentiation by repeated squaring).
909    pub fn pow(&self, k: u32) -> SquareMatrix {
910        if k == 0 {
911            return SquareMatrix::identity(self.dim);
912        }
913        if k == 1 {
914            return self.clone();
915        }
916        let half = self.pow(k / 2);
917        let sq = half.mul(&half);
918        if k % 2 == 0 {
919            sq
920        } else {
921            sq.mul(self)
922        }
923    }
924    /// The operator norm (approximated by the Frobenius norm).
925    pub fn operator_norm(&self) -> f64 {
926        self.frobenius_norm()
927    }
928    /// The Frobenius norm: ||A||_F = sqrt(sum a_ij^2).
929    pub fn frobenius_norm(&self) -> f64 {
930        self.entries.iter().map(|x| x * x).sum::<f64>().sqrt()
931    }
932    /// The trace: sum of diagonal entries.
933    pub fn trace(&self) -> f64 {
934        (0..self.dim).map(|i| self.entries[i * self.dim + i]).sum()
935    }
936    /// Evaluate a polynomial at a matrix: p(A) = c_0 I + c_1 A + c_2 A^2 + ...
937    ///
938    /// Uses Horner's method for efficiency: p(A) = (... ((c_n A + c_{n-1}) A + c_{n-2}) A + ...) + c_0.
939    pub fn poly_eval(&self, poly: &Polynomial) -> SquareMatrix {
940        if poly.coeffs.is_empty() {
941            return SquareMatrix::zero(self.dim);
942        }
943        let n = poly.coeffs.len();
944        let mut result = SquareMatrix::identity(self.dim).scale(poly.coeffs[n - 1]);
945        for i in (0..n - 1).rev() {
946            result = result
947                .mul(self)
948                .add(&SquareMatrix::identity(self.dim).scale(poly.coeffs[i]));
949        }
950        result
951    }
952    /// Approximate spectral radius: r(A) = lim ||A^n||^{1/n}.
953    ///
954    /// Computed by evaluating ||A^n||^{1/n} for increasing n and returning the
955    /// value at `max_iter`.
956    pub fn spectral_radius(&self, max_iter: u32) -> f64 {
957        let mut best = f64::INFINITY;
958        for k in 1..=max_iter {
959            let norm_k = self.pow(k).operator_norm();
960            let r_k = norm_k.powf(1.0 / k as f64);
961            if r_k < best {
962                best = r_k;
963            }
964        }
965        best
966    }
967    /// Compute the resolvent (lambda I - A)^{-1} for a 2x2 matrix using
968    /// the explicit inverse formula.
969    ///
970    /// Returns `None` if the matrix is not 2x2 or the determinant is zero
971    /// (i.e., lambda is in the spectrum).
972    pub fn resolvent_2x2(&self, lambda: f64) -> Option<SquareMatrix> {
973        if self.dim != 2 {
974            return None;
975        }
976        let a = lambda - self.get(0, 0);
977        let b = -self.get(0, 1);
978        let c = -self.get(1, 0);
979        let d = lambda - self.get(1, 1);
980        let det = a * d - b * c;
981        if det.abs() < 1e-12 {
982            return None;
983        }
984        let inv_det = 1.0 / det;
985        Some(SquareMatrix::new(
986            vec![d * inv_det, -b * inv_det, -c * inv_det, a * inv_det],
987            2,
988        ))
989    }
990}
991/// Represents a finite-dimensional C*-algebra as a product of matrix algebras.
992/// A = M_{n_1} × M_{n_2} × ... × M_{n_k}.
993#[allow(dead_code)]
994#[derive(Debug, Clone)]
995pub struct FiniteDimCStarAlgebra {
996    /// Block sizes n_i.
997    pub blocks: Vec<usize>,
998    /// Name.
999    pub name: String,
1000}
1001#[allow(dead_code)]
1002impl FiniteDimCStarAlgebra {
1003    /// Creates a C*-algebra.
1004    pub fn new(name: &str, blocks: Vec<usize>) -> Self {
1005        FiniteDimCStarAlgebra {
1006            blocks,
1007            name: name.to_string(),
1008        }
1009    }
1010    /// Creates the algebra C^n (n commutative factors).
1011    pub fn commutative(n: usize) -> Self {
1012        FiniteDimCStarAlgebra::new(&format!("C^{n}"), vec![1; n])
1013    }
1014    /// Creates M_n(C).
1015    pub fn matrix_algebra(n: usize) -> Self {
1016        FiniteDimCStarAlgebra::new(&format!("M_{n}(C)"), vec![n])
1017    }
1018    /// Returns the total dimension as a vector space.
1019    pub fn dimension(&self) -> usize {
1020        self.blocks.iter().map(|&n| n * n).sum()
1021    }
1022    /// Returns the number of irreducible representations.
1023    pub fn num_irreps(&self) -> usize {
1024        self.blocks.len()
1025    }
1026    /// Checks if the algebra is commutative.
1027    pub fn is_commutative(&self) -> bool {
1028        self.blocks.iter().all(|&n| n == 1)
1029    }
1030    /// Returns the K_0 group (free abelian on generators of irreps).
1031    pub fn k0_rank(&self) -> usize {
1032        self.blocks.len()
1033    }
1034    /// Checks if this algebra is simple (single block).
1035    pub fn is_simple(&self) -> bool {
1036        self.blocks.len() == 1
1037    }
1038    /// Computes the trace (normalized to 1 on identity) of an element given as eigenvalues.
1039    pub fn normalized_trace(&self, block_idx: usize, value: f64) -> f64 {
1040        if block_idx < self.blocks.len() {
1041            let n = self.blocks[block_idx] as f64;
1042            value / n
1043        } else {
1044            0.0
1045        }
1046    }
1047}
1048/// Data for a strongly continuous semigroup {T(t)}_{t>=0}.
1049#[allow(dead_code)]
1050#[derive(Debug, Clone)]
1051pub struct StrongContSemigroup {
1052    /// Generator A (represented as spectral data).
1053    pub generator_spectrum: Vec<f64>,
1054    /// Growth bound ω: ||T(t)|| <= M e^{ωt}.
1055    pub growth_bound: f64,
1056    /// Constant M in the bound.
1057    pub bound_constant: f64,
1058    /// Whether the semigroup is analytic.
1059    pub is_analytic: bool,
1060    /// Whether the semigroup is contractive (ω <= 0, M = 1).
1061    pub is_contractive: bool,
1062}
1063#[allow(dead_code)]
1064impl StrongContSemigroup {
1065    /// Creates a strongly continuous semigroup.
1066    pub fn new(generator_spectrum: Vec<f64>, growth_bound: f64) -> Self {
1067        let is_contractive = growth_bound <= 0.0;
1068        StrongContSemigroup {
1069            generator_spectrum,
1070            growth_bound,
1071            bound_constant: 1.0,
1072            is_analytic: false,
1073            is_contractive,
1074        }
1075    }
1076    /// Estimates ||T(t)|| <= M e^{ωt}.
1077    pub fn norm_bound(&self, t: f64) -> f64 {
1078        self.bound_constant * (self.growth_bound * t).exp()
1079    }
1080    /// Computes T(t)v for a diagonal generator (eigenvalue decomposition).
1081    pub fn apply_at_time(&self, t: f64, initial: &[f64]) -> Vec<f64> {
1082        initial
1083            .iter()
1084            .zip(self.generator_spectrum.iter())
1085            .map(|(&v, &λ)| v * (λ * t).exp())
1086            .collect()
1087    }
1088    /// Checks Hille-Yosida condition: (ω, ∞) ⊂ resolvent set.
1089    pub fn check_hille_yosida(&self) -> bool {
1090        self.generator_spectrum
1091            .iter()
1092            .all(|&λ| λ <= self.growth_bound + 1e-10)
1093    }
1094    /// Returns the spectral bound s(A) = sup{Re(λ) : λ ∈ σ(A)}.
1095    pub fn spectral_bound(&self) -> f64 {
1096        self.generator_spectrum
1097            .iter()
1098            .copied()
1099            .fold(f64::NEG_INFINITY, f64::max)
1100    }
1101}