amari_functional/operator/
matrix.rs

1//! Matrix representation of linear operators.
2//!
3//! This module provides a matrix-based representation for linear operators
4//! on finite-dimensional Hilbert spaces.
5
6use crate::error::{FunctionalError, Result};
7use crate::operator::traits::{AdjointableOperator, BoundedOperator, LinearOperator, OperatorNorm};
8use crate::phantom::Bounded;
9use amari_core::Multivector;
10
11/// A linear operator represented as a matrix.
12///
13/// The matrix is stored in row-major order.
14#[derive(Debug, Clone)]
15pub struct MatrixOperator<const P: usize, const Q: usize, const R: usize> {
16    /// Matrix entries in row-major order.
17    entries: Vec<f64>,
18    /// Number of rows.
19    rows: usize,
20    /// Number of columns.
21    cols: usize,
22}
23
24impl<const P: usize, const Q: usize, const R: usize> MatrixOperator<P, Q, R> {
25    /// The dimension of the Clifford algebra.
26    pub const DIM: usize = 1 << (P + Q + R);
27
28    /// Create a new matrix operator from entries.
29    ///
30    /// Entries should be in row-major order.
31    pub fn new(entries: Vec<f64>, rows: usize, cols: usize) -> Result<Self> {
32        if entries.len() != rows * cols {
33            return Err(FunctionalError::dimension_mismatch(
34                rows * cols,
35                entries.len(),
36            ));
37        }
38        if cols != Self::DIM {
39            return Err(FunctionalError::dimension_mismatch(Self::DIM, cols));
40        }
41        if rows != Self::DIM {
42            return Err(FunctionalError::dimension_mismatch(Self::DIM, rows));
43        }
44        Ok(Self {
45            entries,
46            rows,
47            cols,
48        })
49    }
50
51    /// Create an identity matrix.
52    pub fn identity() -> Self {
53        let n = Self::DIM;
54        let mut entries = vec![0.0; n * n];
55        for i in 0..n {
56            entries[i * n + i] = 1.0;
57        }
58        Self {
59            entries,
60            rows: n,
61            cols: n,
62        }
63    }
64
65    /// Create a zero matrix.
66    pub fn zeros() -> Self {
67        let n = Self::DIM;
68        Self {
69            entries: vec![0.0; n * n],
70            rows: n,
71            cols: n,
72        }
73    }
74
75    /// Create a diagonal matrix from diagonal entries.
76    pub fn diagonal(diag: &[f64]) -> Result<Self> {
77        let n = Self::DIM;
78        if diag.len() != n {
79            return Err(FunctionalError::dimension_mismatch(n, diag.len()));
80        }
81        let mut entries = vec![0.0; n * n];
82        for i in 0..n {
83            entries[i * n + i] = diag[i];
84        }
85        Ok(Self {
86            entries,
87            rows: n,
88            cols: n,
89        })
90    }
91
92    /// Get a matrix entry.
93    pub fn get(&self, row: usize, col: usize) -> f64 {
94        if row < self.rows && col < self.cols {
95            self.entries[row * self.cols + col]
96        } else {
97            0.0
98        }
99    }
100
101    /// Set a matrix entry.
102    pub fn set(&mut self, row: usize, col: usize, value: f64) {
103        if row < self.rows && col < self.cols {
104            self.entries[row * self.cols + col] = value;
105        }
106    }
107
108    /// Get the number of rows.
109    pub fn rows(&self) -> usize {
110        self.rows
111    }
112
113    /// Get the number of columns.
114    pub fn cols(&self) -> usize {
115        self.cols
116    }
117
118    /// Compute the transpose of the matrix.
119    pub fn transpose(&self) -> Self {
120        let mut entries = vec![0.0; self.rows * self.cols];
121        for i in 0..self.rows {
122            for j in 0..self.cols {
123                entries[j * self.rows + i] = self.get(i, j);
124            }
125        }
126        Self {
127            entries,
128            rows: self.cols,
129            cols: self.rows,
130        }
131    }
132
133    /// Compute the trace of the matrix.
134    pub fn trace(&self) -> f64 {
135        let n = self.rows.min(self.cols);
136        (0..n).map(|i| self.get(i, i)).sum()
137    }
138
139    /// Multiply this matrix by another.
140    pub fn multiply(&self, other: &Self) -> Result<Self> {
141        if self.cols != other.rows {
142            return Err(FunctionalError::dimension_mismatch(self.cols, other.rows));
143        }
144
145        let mut entries = vec![0.0; self.rows * other.cols];
146        for i in 0..self.rows {
147            for j in 0..other.cols {
148                let mut sum = 0.0;
149                for k in 0..self.cols {
150                    sum += self.get(i, k) * other.get(k, j);
151                }
152                entries[i * other.cols + j] = sum;
153            }
154        }
155
156        Ok(Self {
157            entries,
158            rows: self.rows,
159            cols: other.cols,
160        })
161    }
162
163    /// Add another matrix.
164    pub fn add(&self, other: &Self) -> Result<Self> {
165        if self.rows != other.rows || self.cols != other.cols {
166            return Err(FunctionalError::dimension_mismatch(
167                self.rows * self.cols,
168                other.rows * other.cols,
169            ));
170        }
171
172        let entries: Vec<f64> = self
173            .entries
174            .iter()
175            .zip(other.entries.iter())
176            .map(|(a, b)| a + b)
177            .collect();
178
179        Ok(Self {
180            entries,
181            rows: self.rows,
182            cols: self.cols,
183        })
184    }
185
186    /// Scale the matrix by a scalar.
187    pub fn scale(&self, scalar: f64) -> Self {
188        let entries: Vec<f64> = self.entries.iter().map(|x| x * scalar).collect();
189        Self {
190            entries,
191            rows: self.rows,
192            cols: self.cols,
193        }
194    }
195
196    /// Check if the matrix is symmetric.
197    pub fn is_symmetric(&self, tolerance: f64) -> bool {
198        if self.rows != self.cols {
199            return false;
200        }
201        for i in 0..self.rows {
202            for j in (i + 1)..self.cols {
203                if (self.get(i, j) - self.get(j, i)).abs() > tolerance {
204                    return false;
205                }
206            }
207        }
208        true
209    }
210}
211
212impl<const P: usize, const Q: usize, const R: usize> LinearOperator<Multivector<P, Q, R>>
213    for MatrixOperator<P, Q, R>
214{
215    fn apply(&self, x: &Multivector<P, Q, R>) -> Result<Multivector<P, Q, R>> {
216        let x_coeffs = x.to_vec();
217        if x_coeffs.len() != self.cols {
218            return Err(FunctionalError::dimension_mismatch(
219                self.cols,
220                x_coeffs.len(),
221            ));
222        }
223
224        let mut result = vec![0.0; self.rows];
225        for i in 0..self.rows {
226            for j in 0..self.cols {
227                result[i] += self.get(i, j) * x_coeffs[j];
228            }
229        }
230
231        Ok(Multivector::<P, Q, R>::from_slice(&result))
232    }
233
234    fn domain_dimension(&self) -> Option<usize> {
235        Some(self.cols)
236    }
237
238    fn codomain_dimension(&self) -> Option<usize> {
239        Some(self.rows)
240    }
241}
242
243impl<const P: usize, const Q: usize, const R: usize> OperatorNorm for MatrixOperator<P, Q, R> {
244    fn norm(&self) -> f64 {
245        // Use power iteration to estimate spectral norm
246        // This is an approximation for the 2-norm ||A||₂
247        let ata = self.transpose().multiply(self).unwrap();
248        power_iteration_spectral_radius(&ata, 100).sqrt()
249    }
250
251    fn frobenius_norm(&self) -> Option<f64> {
252        let sum_sq: f64 = self.entries.iter().map(|x| x * x).sum();
253        Some(sum_sq.sqrt())
254    }
255}
256
257impl<const P: usize, const Q: usize, const R: usize>
258    BoundedOperator<Multivector<P, Q, R>, Multivector<P, Q, R>, Bounded>
259    for MatrixOperator<P, Q, R>
260{
261    fn operator_norm(&self) -> f64 {
262        self.norm()
263    }
264}
265
266impl<const P: usize, const Q: usize, const R: usize> AdjointableOperator<Multivector<P, Q, R>>
267    for MatrixOperator<P, Q, R>
268{
269    type Adjoint = MatrixOperator<P, Q, R>;
270
271    fn adjoint(&self) -> Self::Adjoint {
272        // For real matrices, the adjoint is the transpose
273        self.transpose()
274    }
275
276    fn is_self_adjoint(&self) -> bool {
277        self.is_symmetric(1e-10)
278    }
279
280    fn is_normal(&self) -> bool {
281        // Check if AA* = A*A
282        let a_adj = self.transpose();
283        let aa_adj = self.multiply(&a_adj).unwrap();
284        let a_adj_a = a_adj.multiply(self).unwrap();
285
286        for i in 0..self.rows {
287            for j in 0..self.cols {
288                if (aa_adj.get(i, j) - a_adj_a.get(i, j)).abs() > 1e-10 {
289                    return false;
290                }
291            }
292        }
293        true
294    }
295}
296
297/// Power iteration to estimate the spectral radius.
298fn power_iteration_spectral_radius<const P: usize, const Q: usize, const R: usize>(
299    matrix: &MatrixOperator<P, Q, R>,
300    iterations: usize,
301) -> f64 {
302    let n = matrix.rows;
303    if n == 0 {
304        return 0.0;
305    }
306
307    // Start with a random-ish vector
308    let mut v: Vec<f64> = (0..n).map(|i| 1.0 / ((i + 1) as f64)).collect();
309
310    for _ in 0..iterations {
311        // Compute Av
312        let mut w = vec![0.0; n];
313        for i in 0..n {
314            for j in 0..n {
315                w[i] += matrix.get(i, j) * v[j];
316            }
317        }
318
319        // Normalize
320        let norm: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
321        if norm < 1e-15 {
322            return 0.0;
323        }
324        for x in &mut w {
325            *x /= norm;
326        }
327
328        v = w;
329    }
330
331    // Compute Rayleigh quotient
332    let mut av = vec![0.0; n];
333    for i in 0..n {
334        for j in 0..n {
335            av[i] += matrix.get(i, j) * v[j];
336        }
337    }
338
339    let numerator: f64 = v.iter().zip(av.iter()).map(|(a, b)| a * b).sum();
340    let denominator: f64 = v.iter().map(|x| x * x).sum();
341
342    if denominator.abs() < 1e-15 {
343        0.0
344    } else {
345        (numerator / denominator).abs()
346    }
347}
348
349#[cfg(test)]
350mod tests {
351    use super::*;
352
353    #[test]
354    fn test_identity_matrix() {
355        let id: MatrixOperator<2, 0, 0> = MatrixOperator::identity();
356        let x = Multivector::<2, 0, 0>::from_slice(&[1.0, 2.0, 3.0, 4.0]);
357        let y = id.apply(&x).unwrap();
358        assert_eq!(x.to_vec(), y.to_vec());
359    }
360
361    #[test]
362    fn test_diagonal_matrix() {
363        let diag: MatrixOperator<2, 0, 0> =
364            MatrixOperator::diagonal(&[2.0, 3.0, 4.0, 5.0]).unwrap();
365        let x = Multivector::<2, 0, 0>::from_slice(&[1.0, 1.0, 1.0, 1.0]);
366        let y = diag.apply(&x).unwrap();
367        assert_eq!(y.to_vec(), vec![2.0, 3.0, 4.0, 5.0]);
368    }
369
370    #[test]
371    fn test_transpose() {
372        let mut matrix: MatrixOperator<1, 0, 0> = MatrixOperator::zeros();
373        matrix.set(0, 1, 1.0);
374        let transposed = matrix.transpose();
375        assert!((transposed.get(1, 0) - 1.0).abs() < 1e-10);
376        assert!(transposed.get(0, 1).abs() < 1e-10);
377    }
378
379    #[test]
380    fn test_trace() {
381        let id: MatrixOperator<2, 0, 0> = MatrixOperator::identity();
382        assert!((id.trace() - 4.0).abs() < 1e-10); // 4-dimensional space
383    }
384
385    #[test]
386    fn test_matrix_multiplication() {
387        let a: MatrixOperator<1, 0, 0> = MatrixOperator::identity();
388        let b: MatrixOperator<1, 0, 0> = MatrixOperator::identity();
389        let c = a.multiply(&b).unwrap();
390
391        // I * I = I
392        for i in 0..2 {
393            for j in 0..2 {
394                let expected = if i == j { 1.0 } else { 0.0 };
395                assert!((c.get(i, j) - expected).abs() < 1e-10);
396            }
397        }
398    }
399
400    #[test]
401    fn test_symmetric_check() {
402        let id: MatrixOperator<2, 0, 0> = MatrixOperator::identity();
403        assert!(id.is_symmetric(1e-10));
404        assert!(id.is_self_adjoint());
405    }
406
407    #[test]
408    fn test_frobenius_norm() {
409        let id: MatrixOperator<2, 0, 0> = MatrixOperator::identity();
410        // ||I||_F = sqrt(n) for n×n identity
411        let expected = 2.0; // sqrt(4) = 2
412        assert!((id.frobenius_norm().unwrap() - expected).abs() < 1e-10);
413    }
414
415    #[test]
416    fn test_operator_norm_identity() {
417        let id: MatrixOperator<2, 0, 0> = MatrixOperator::identity();
418        // ||I||₂ = 1
419        assert!((id.operator_norm() - 1.0).abs() < 0.1);
420    }
421}