use crate::linalg::{split_two_col_slices, LinalgError};
use crate::linalg::symmetric_eigen::givens;
use crate::traits::{LinalgScalar, MatrixMut};
use crate::matrix::vector::Vector;
use crate::Matrix;
use num_traits::{Float, One, Zero};
pub(crate) fn bidiagonalize<T: LinalgScalar>(
a: &mut impl MatrixMut<T>,
diag: &mut [T::Real],
off_diag: &mut [T::Real],
u: &mut impl MatrixMut<T>,
v: &mut impl MatrixMut<T>,
compute_u: bool,
compute_v: bool,
) {
let m = a.nrows();
let n = a.ncols();
assert!(m >= n, "bidiagonalize requires M >= N");
assert!(diag.len() >= n);
assert!(off_diag.len() + 1 >= n);
if compute_u {
for i in 0..m {
for j in 0..m {
*u.get_mut(i, j) = if i == j { T::one() } else { T::zero() };
}
}
}
if compute_v {
for i in 0..n {
for j in 0..n {
*v.get_mut(i, j) = if i == j { T::one() } else { T::zero() };
}
}
}
for k in 0..n {
let sub_col = a.col_as_slice(k, k);
let mut norm_sq = <T::Real as Zero>::zero();
for &val in sub_col {
norm_sq = norm_sq + (val * val.conj()).re();
}
if norm_sq > T::lepsilon() * T::lepsilon() {
let norm = norm_sq.lsqrt();
let akk = *a.get(k, k);
let alpha = akk.modulus();
let sigma = if alpha < T::lepsilon() {
T::from_real(norm)
} else {
T::from_real(norm) * (akk / T::from_real(alpha))
};
let v0 = akk + sigma;
*a.get_mut(k, k) = v0;
{
let sub_col = a.col_as_mut_slice(k, k + 1);
for x in sub_col.iter_mut() {
*x = *x / v0;
}
}
let tau = v0 / sigma;
for j in (k + 1)..n {
let mut dot = *a.get(k, j); let (v_slice, a_j_slice) = split_two_col_slices(a, k, j, k + 1);
for idx in 0..v_slice.len() {
dot = dot + v_slice[idx].conj() * a_j_slice[idx];
}
dot = dot * tau;
*a.get_mut(k, j) = *a.get(k, j) - dot;
let (v_slice, a_j_slice) = split_two_col_slices(a, k, j, k + 1);
crate::simd::axpy_neg_dispatch(a_j_slice, dot, v_slice);
}
if compute_u {
for row in 0..m {
let mut dot = *u.get(row, k);
for i in (k + 1)..m {
dot = dot + *u.get(row, i) * *a.get(i, k);
}
dot = dot * tau;
*u.get_mut(row, k) = *u.get(row, k) - dot;
for i in (k + 1)..m {
let vi_conj = (*a.get(i, k)).conj();
*u.get_mut(row, i) = *u.get(row, i) - dot * vi_conj;
}
}
}
diag[k] = (T::zero() - sigma).re();
} else {
diag[k] = (*a.get(k, k)).re();
}
if k + 2 <= n.saturating_sub(1) {
let mut norm_sq = <T::Real as Zero>::zero();
for j in (k + 1)..n {
let val = *a.get(k, j);
norm_sq = norm_sq + (val * val.conj()).re();
}
if norm_sq > T::lepsilon() * T::lepsilon() {
let norm = norm_sq.lsqrt();
let ak_k1 = *a.get(k, k + 1);
let alpha = ak_k1.modulus();
let sigma = if alpha < T::lepsilon() {
T::from_real(norm)
} else {
T::from_real(norm) * (ak_k1 / T::from_real(alpha))
};
let v0 = ak_k1 + sigma;
*a.get_mut(k, k + 1) = v0;
for j in (k + 2)..n {
let val = *a.get(k, j) / v0;
*a.get_mut(k, j) = val;
}
let tau = v0 / sigma;
for i in (k + 1)..m {
let mut dot = *a.get(i, k + 1);
for j in (k + 2)..n {
dot = dot + *a.get(i, j) * *a.get(k, j);
}
dot = dot * tau;
*a.get_mut(i, k + 1) = *a.get(i, k + 1) - dot;
for j in (k + 2)..n {
let vj_conj = (*a.get(k, j)).conj();
*a.get_mut(i, j) = *a.get(i, j) - dot * vj_conj;
}
}
if compute_v {
for row in 0..n {
let mut dot = *v.get(row, k + 1);
for j in (k + 2)..n {
dot = dot + *v.get(row, j) * *a.get(k, j);
}
dot = dot * tau;
*v.get_mut(row, k + 1) = *v.get(row, k + 1) - dot;
for j in (k + 2)..n {
let vj_conj = (*a.get(k, j)).conj();
*v.get_mut(row, j) = *v.get(row, j) - dot * vj_conj;
}
}
}
off_diag[k] = (T::zero() - sigma).re();
} else {
off_diag[k] = (*a.get(k, k + 1)).re();
}
} else if k + 1 < n {
off_diag[k] = (*a.get(k, k + 1)).re();
}
}
}
pub(crate) fn bidiagonal_qr<T: LinalgScalar>(
diag: &mut [T::Real],
off_diag: &mut [T::Real],
u: &mut impl MatrixMut<T>,
v: &mut impl MatrixMut<T>,
compute_u: bool,
compute_v: bool,
max_iter: usize,
) -> Result<(), LinalgError> {
let n = diag.len();
if n <= 1 {
if n == 1 && diag[0] < <T::Real as Zero>::zero() {
diag[0] = <T::Real as Zero>::zero() - diag[0];
if compute_u {
let m = u.nrows();
for i in 0..m {
let val = *u.get(i, 0);
*u.get_mut(i, 0) = T::zero() - val;
}
}
}
return Ok(());
}
let five = <T::Real as One>::one() + <T::Real as One>::one()
+ <T::Real as One>::one() + <T::Real as One>::one() + <T::Real as One>::one();
let eps = T::lepsilon() * five;
let mut sigma_max = <T::Real as Zero>::zero();
for i in 0..n {
let v = diag[i].abs();
if v > sigma_max {
sigma_max = v;
}
}
let threshold = eps * sigma_max;
let mut iter = 0usize;
let mut hi = n - 1;
while hi > 0 {
{
if off_diag[hi - 1].abs() <= threshold {
off_diag[hi - 1] = <T::Real as Zero>::zero();
hi -= 1;
continue;
}
}
let mut lo = hi - 1;
while lo > 0 {
if off_diag[lo - 1].abs() <= threshold {
off_diag[lo - 1] = <T::Real as Zero>::zero();
break;
}
lo -= 1;
}
iter += 1;
if iter > max_iter {
return Err(LinalgError::ConvergenceFailure);
}
{
let zero = <T::Real as Zero>::zero();
let mut found_zero = false;
for idx in lo..hi {
if diag[idx].abs() <= eps {
diag[idx] = zero;
let mut z = off_diag[idx];
off_diag[idx] = zero;
for j in (idx + 1)..=hi {
let (c, s, _r) = givens(diag[j], z);
diag[j] = c * diag[j] + s * z;
if j < hi {
z = zero - s * off_diag[j];
off_diag[j] = c * off_diag[j];
}
if compute_u {
let mu = u.nrows();
for row in 0..mu {
let uj = *u.get(row, j);
let ui = *u.get(row, idx);
*u.get_mut(row, j) =
T::from_real(c) * uj + T::from_real(s) * ui;
*u.get_mut(row, idx) =
T::from_real(c) * ui - T::from_real(s) * uj;
}
}
}
found_zero = true;
break;
}
}
if found_zero {
continue;
}
}
let d_hi = diag[hi];
let d_hi1 = diag[hi - 1];
let e_hi1 = off_diag[hi - 1];
let e_hi2 = if hi >= 2 && hi - 2 >= lo {
off_diag[hi - 2]
} else {
<T::Real as Zero>::zero()
};
let t11 = d_hi1 * d_hi1 + e_hi2 * e_hi2;
let t12 = d_hi1 * e_hi1;
let t22 = d_hi * d_hi + e_hi1 * e_hi1;
let two = <T::Real as One>::one() + <T::Real as One>::one();
let d = (t11 - t22) / two;
let sign_d = if d >= <T::Real as Zero>::zero() {
<T::Real as One>::one()
} else {
<T::Real as Zero>::zero() - <T::Real as One>::one()
};
let mu = t22 - t12 * t12 / (d + sign_d * d.hypot(t12));
let mut x = diag[lo] * diag[lo] - mu;
let mut z = diag[lo] * off_diag[lo];
for k in lo..hi {
let (c, s, _r) = givens(x, z);
if k > lo {
off_diag[k - 1] = c * x + s * z;
}
let dk = diag[k];
let ek = off_diag[k];
let dk1 = diag[k + 1];
diag[k] = c * dk + s * ek;
off_diag[k] = c * ek - s * dk;
let bulge = s * dk1;
diag[k + 1] = c * dk1;
if compute_v {
let nv = v.nrows();
for row in 0..nv {
let vk = *v.get(row, k);
let vk1 = *v.get(row, k + 1);
*v.get_mut(row, k) = T::from_real(c) * vk + T::from_real(s) * vk1;
*v.get_mut(row, k + 1) = T::from_real(c) * vk1 - T::from_real(s) * vk;
}
}
let (c2, s2, _r) = givens(diag[k], bulge);
diag[k] = c2 * diag[k] + s2 * bulge;
let old_ek = off_diag[k];
let old_dk1 = diag[k + 1];
off_diag[k] = c2 * old_ek + s2 * old_dk1;
diag[k + 1] = c2 * old_dk1 - s2 * old_ek;
if k + 1 < hi {
let old_ek1 = off_diag[k + 1];
x = off_diag[k]; z = s2 * old_ek1; off_diag[k + 1] = c2 * old_ek1;
}
if compute_u {
let mu = u.nrows();
for row in 0..mu {
let uk = *u.get(row, k);
let uk1 = *u.get(row, k + 1);
*u.get_mut(row, k) = T::from_real(c2) * uk + T::from_real(s2) * uk1;
*u.get_mut(row, k + 1) = T::from_real(c2) * uk1 - T::from_real(s2) * uk;
}
}
}
}
for i in 0..n {
if diag[i] < <T::Real as Zero>::zero() {
diag[i] = <T::Real as Zero>::zero() - diag[i];
if compute_u {
let m = u.nrows();
for row in 0..m {
let val = *u.get(row, i);
*u.get_mut(row, i) = T::zero() - val;
}
}
}
}
for i in 0..n {
let mut max_idx = i;
for j in (i + 1)..n {
if diag[j] > diag[max_idx] {
max_idx = j;
}
}
if max_idx != i {
diag.swap(i, max_idx);
if compute_u {
let m = u.nrows();
for row in 0..m {
let tmp = *u.get(row, i);
*u.get_mut(row, i) = *u.get(row, max_idx);
*u.get_mut(row, max_idx) = tmp;
}
}
if compute_v {
let nv = v.nrows();
for row in 0..nv {
let tmp = *v.get(row, i);
*v.get_mut(row, i) = *v.get(row, max_idx);
*v.get_mut(row, max_idx) = tmp;
}
}
}
}
Ok(())
}
#[derive(Debug, Clone)]
pub struct SvdDecomposition<T: LinalgScalar, const M: usize, const N: usize> {
u: Matrix<T, M, M>,
singular_values: [T::Real; N],
vt: Matrix<T, N, N>,
}
impl<T: LinalgScalar, const M: usize, const N: usize> SvdDecomposition<T, M, N> {
pub fn new(a: &Matrix<T, M, N>) -> Result<Self, LinalgError> {
assert!(M >= N, "SVD requires M >= N; transpose first for wide matrices");
if N == 0 {
return Ok(Self {
u: Matrix::<T, M, M>::eye(),
singular_values: [<T::Real as Zero>::zero(); N],
vt: Matrix::<T, N, N>::zeros(),
});
}
let mut work = *a;
let mut amax = <T::Real as Zero>::zero();
for col in 0..N {
for row in 0..M {
let v = work[(row, col)].modulus();
if v > amax {
amax = v;
}
}
}
let has_scale = amax > <T::Real as Zero>::zero();
if has_scale {
let inv_amax = T::from_real(<T::Real as One>::one() / amax);
for col in 0..N {
for row in 0..M {
work[(row, col)] = work[(row, col)] * inv_amax;
}
}
}
let mut u = Matrix::<T, M, M>::zeros();
let mut v = Matrix::<T, N, N>::zeros();
let mut diag = [<T::Real as Zero>::zero(); N];
let mut off_diag = [<T::Real as Zero>::zero(); N];
bidiagonalize(&mut work, &mut diag, &mut off_diag, &mut u, &mut v, true, true);
bidiagonal_qr::<T>(
&mut diag,
&mut off_diag[..N.saturating_sub(1)],
&mut u,
&mut v,
true,
true,
30 * M.max(N),
)?;
if has_scale {
for s in diag.iter_mut() {
*s = *s * amax;
}
}
let mut vt = Matrix::<T, N, N>::zeros();
for i in 0..N {
for j in 0..N {
vt[(i, j)] = v[(j, i)].conj();
}
}
Ok(Self {
u,
singular_values: diag,
vt,
})
}
pub fn singular_values_only(a: &Matrix<T, M, N>) -> Result<[T::Real; N], LinalgError> {
assert!(M >= N, "SVD requires M >= N; transpose first for wide matrices");
if N == 0 {
return Ok([<T::Real as Zero>::zero(); N]);
}
let mut work = *a;
let mut amax = <T::Real as Zero>::zero();
for col in 0..N {
for row in 0..M {
let v = work[(row, col)].modulus();
if v > amax {
amax = v;
}
}
}
let has_scale = amax > <T::Real as Zero>::zero();
if has_scale {
let inv_amax = T::from_real(<T::Real as One>::one() / amax);
for col in 0..N {
for row in 0..M {
work[(row, col)] = work[(row, col)] * inv_amax;
}
}
}
let mut u = Matrix::<T, M, M>::zeros();
let mut v = Matrix::<T, N, N>::zeros();
let mut diag = [<T::Real as Zero>::zero(); N];
let mut off_diag = [<T::Real as Zero>::zero(); N];
bidiagonalize(&mut work, &mut diag, &mut off_diag, &mut u, &mut v, false, false);
bidiagonal_qr::<T>(
&mut diag,
&mut off_diag[..N.saturating_sub(1)],
&mut u,
&mut v,
false,
false,
30 * M.max(N),
)?;
if has_scale {
for s in diag.iter_mut() {
*s = *s * amax;
}
}
Ok(diag)
}
#[inline]
pub fn singular_values(&self) -> &[T::Real; N] {
&self.singular_values
}
#[inline]
pub fn u(&self) -> &Matrix<T, M, M> {
&self.u
}
#[inline]
pub fn vt(&self) -> &Matrix<T, N, N> {
&self.vt
}
pub fn rank(&self, tol: T::Real) -> usize {
self.singular_values.iter().filter(|&&s| s > tol).count()
}
pub fn solve(&self, b: &Vector<T, M>) -> Vector<T, N> {
let threshold = if N == 0 {
<T::Real as Zero>::zero()
} else {
let dim = <T::Real as num_traits::NumCast>::from(M.max(N)).unwrap();
T::Real::epsilon() * self.singular_values[0] * dim
};
let mut uhb = [T::zero(); N];
for k in 0..N {
let mut sum = T::zero();
for i in 0..M {
sum = sum + self.u[(i, k)].conj() * b[i];
}
uhb[k] = sum;
}
for k in 0..N {
if self.singular_values[k] > threshold {
uhb[k] = uhb[k] / T::from_real(self.singular_values[k]);
} else {
uhb[k] = T::zero();
}
}
let mut x = [T::zero(); N];
for i in 0..N {
let mut sum = T::zero();
for k in 0..N {
sum = sum + self.vt[(k, i)].conj() * uhb[k];
}
x[i] = sum;
}
Vector::from_array(x)
}
pub fn pseudo_inverse(&self) -> Matrix<T, N, M> {
let threshold = if N == 0 {
<T::Real as Zero>::zero()
} else {
let dim = <T::Real as num_traits::NumCast>::from(M.max(N)).unwrap();
T::Real::epsilon() * self.singular_values[0] * dim
};
let mut result = Matrix::<T, N, M>::zeros();
for i in 0..N {
for j in 0..M {
let mut sum = T::zero();
for k in 0..N {
if self.singular_values[k] > threshold {
let v_ik = self.vt[(k, i)].conj();
let uh_kj = self.u[(j, k)].conj();
sum = sum + v_ik * uh_kj / T::from_real(self.singular_values[k]);
}
}
result[(i, j)] = sum;
}
}
result
}
pub fn condition_number(&self) -> T::Real {
if N == 0 {
return <T::Real as One>::one();
}
let s_max = self.singular_values[0];
let s_min = self.singular_values[N - 1];
if s_min == <T::Real as Zero>::zero() {
T::Real::infinity()
} else {
s_max / s_min
}
}
}
impl<T: LinalgScalar, const M: usize, const N: usize> Matrix<T, M, N> {
pub fn svd(&self) -> Result<SvdDecomposition<T, M, N>, LinalgError> {
SvdDecomposition::new(self)
}
pub fn singular_values_only(&self) -> Result<[T::Real; N], LinalgError> {
SvdDecomposition::singular_values_only(self)
}
pub fn pinv(&self) -> Result<Matrix<T, N, M>, LinalgError> {
Ok(self.svd()?.pseudo_inverse())
}
pub fn solve_svd(&self, b: &Vector<T, M>) -> Result<Vector<T, N>, LinalgError> {
Ok(self.svd()?.solve(b))
}
}
#[cfg(test)]
mod tests {
use super::*;
const TOL: f64 = 1e-10;
fn assert_near(a: f64, b: f64, tol: f64, msg: &str) {
assert!(
(a - b).abs() < tol,
"{}: {} vs {} (diff {})",
msg,
a,
b,
(a - b).abs()
);
}
#[test]
fn identity_2x2() {
let a: Matrix<f64, 2, 2> = Matrix::eye();
let svd = a.svd().unwrap();
for i in 0..2 {
assert_near(svd.singular_values()[i], 1.0, TOL, &format!("σ[{}]", i));
}
let u = svd.u();
let utu = u.transpose() * *u;
for i in 0..2 {
for j in 0..2 {
let expected = if i == j { 1.0 } else { 0.0 };
assert_near(utu[(i, j)], expected, TOL, &format!("U^TU[({},{})]", i, j));
}
}
let vt = svd.vt();
let vtv = *vt * vt.transpose();
for i in 0..2 {
for j in 0..2 {
let expected = if i == j { 1.0 } else { 0.0 };
assert_near(vtv[(i, j)], expected, TOL, &format!("V^TV[({},{})]", i, j));
}
}
}
#[test]
fn identity_3x3() {
let a: Matrix<f64, 3, 3> = Matrix::eye();
let svd = a.svd().unwrap();
for i in 0..3 {
assert_near(svd.singular_values()[i], 1.0, TOL, &format!("σ[{}]", i));
}
}
#[test]
fn diagonal_matrix() {
let a = Matrix::new([
[5.0_f64, 0.0, 0.0],
[0.0, 3.0, 0.0],
[0.0, 0.0, 1.0],
]);
let svd = a.svd().unwrap();
assert_near(svd.singular_values()[0], 5.0, TOL, "σ[0]");
assert_near(svd.singular_values()[1], 3.0, TOL, "σ[1]");
assert_near(svd.singular_values()[2], 1.0, TOL, "σ[2]");
}
#[test]
fn diagonal_with_negative() {
let a = Matrix::new([
[-3.0_f64, 0.0],
[0.0, 2.0],
]);
let svd = a.svd().unwrap();
assert_near(svd.singular_values()[0], 3.0, TOL, "σ[0]");
assert_near(svd.singular_values()[1], 2.0, TOL, "σ[1]");
}
#[test]
fn known_2x2() {
let a = Matrix::new([
[3.0_f64, 2.0],
[2.0, 3.0],
]);
let svd = a.svd().unwrap();
assert_near(svd.singular_values()[0], 5.0, TOL, "σ[0]");
assert_near(svd.singular_values()[1], 1.0, TOL, "σ[1]");
}
#[test]
fn reconstruction_3x3() {
let a = Matrix::new([
[1.0_f64, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 0.0],
]);
let svd = a.svd().unwrap();
let u = svd.u();
let vt = svd.vt();
let sv = svd.singular_values();
for i in 0..3 {
for j in 0..3 {
let mut sum = 0.0;
for k in 0..3 {
sum += u[(i, k)] * sv[k] * vt[(k, j)];
}
assert_near(sum, a[(i, j)], 1e-9, &format!("UΣV^T[({},{})]", i, j));
}
}
}
#[test]
fn orthogonality() {
let a = Matrix::new([
[4.0_f64, 1.0, -1.0],
[1.0, 3.0, 2.0],
[-1.0, 2.0, 5.0],
]);
let svd = a.svd().unwrap();
let u = svd.u();
let utu = u.transpose() * *u;
for i in 0..3 {
for j in 0..3 {
let expected = if i == j { 1.0 } else { 0.0 };
assert_near(utu[(i, j)], expected, 1e-9, &format!("U^TU[({},{})]", i, j));
}
}
let vt = svd.vt();
let vtv = *vt * vt.transpose();
for i in 0..3 {
for j in 0..3 {
let expected = if i == j { 1.0 } else { 0.0 };
assert_near(vtv[(i, j)], expected, 1e-9, &format!("VV^T[({},{})]", i, j));
}
}
}
#[test]
fn sorted_descending() {
let a = Matrix::new([
[10.0_f64, 3.0, 0.0, 0.0],
[3.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 7.0, 2.0],
[0.0, 0.0, 2.0, 4.0],
]);
let svd = a.svd().unwrap();
let sv = svd.singular_values();
for i in 0..3 {
assert!(
sv[i] >= sv[i + 1] - TOL,
"not descending: σ[{}]={} < σ[{}]={}",
i, sv[i], i + 1, sv[i + 1]
);
}
}
#[test]
fn rank_deficient() {
let a = Matrix::new([
[1.0_f64, 2.0, 3.0],
[2.0, 4.0, 6.0],
[3.0, 6.0, 9.0],
]);
let svd = a.svd().unwrap();
let sv = svd.singular_values();
assert!(sv[0] > 1.0, "σ[0] should be large");
assert!(sv[1].abs() < 1e-9, "σ[1] should be ≈ 0");
assert!(sv[2].abs() < 1e-9, "σ[2] should be ≈ 0");
assert_eq!(svd.rank(1e-9), 1);
}
#[test]
fn rectangular_tall() {
let a = Matrix::new([
[1.0_f64, 0.0],
[0.0, 1.0],
[1.0, 1.0],
[0.0, 0.0],
]);
let svd = a.svd().unwrap();
let u = svd.u();
let vt = svd.vt();
let sv = svd.singular_values();
for i in 0..4 {
for j in 0..2 {
let mut sum = 0.0;
for k in 0..2 {
sum += u[(i, k)] * sv[k] * vt[(k, j)];
}
assert_near(sum, a[(i, j)], 1e-9, &format!("UΣV^T[({},{})]", i, j));
}
}
}
#[test]
fn singular_values_only_path() {
let a = Matrix::new([
[3.0_f64, 0.0],
[0.0, 4.0],
]);
let sv = a.singular_values_only().unwrap();
assert_near(sv[0], 4.0, TOL, "σ[0]");
assert_near(sv[1], 3.0, TOL, "σ[1]");
}
#[test]
fn rank_and_condition() {
let a = Matrix::new([
[2.0_f64, 0.0],
[0.0, 0.5],
]);
let svd = a.svd().unwrap();
assert_eq!(svd.rank(1e-10), 2);
assert_near(svd.condition_number(), 4.0, TOL, "cond");
}
#[test]
fn f32_support() {
let a = Matrix::new([
[3.0_f32, 1.0],
[1.0, 3.0],
]);
let svd = a.svd().unwrap();
assert!((svd.singular_values()[0] - 4.0).abs() < 1e-5);
assert!((svd.singular_values()[1] - 2.0).abs() < 1e-5);
}
#[test]
fn size_1x1() {
let a = Matrix::new([[7.0_f64]]);
let svd = a.svd().unwrap();
assert_near(svd.singular_values()[0], 7.0, TOL, "σ[0]");
}
#[test]
fn size_1x1_negative() {
let a = Matrix::new([[-5.0_f64]]);
let svd = a.svd().unwrap();
assert_near(svd.singular_values()[0], 5.0, TOL, "σ[0]");
}
#[test]
fn reconstruction_5x3() {
let a = Matrix::new([
[1.0_f64, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 0.0],
[10.0, 11.0, 1.0],
[13.0, 14.0, 2.0],
]);
let svd = a.svd().unwrap();
let u = svd.u();
let vt = svd.vt();
let sv = svd.singular_values();
for i in 0..5 {
for j in 0..3 {
let mut sum = 0.0;
for k in 0..3 {
sum += u[(i, k)] * sv[k] * vt[(k, j)];
}
assert_near(sum, a[(i, j)], 1e-8, &format!("UΣV^T[({},{})]", i, j));
}
}
}
#[test]
fn solve_overdetermined() {
let a = Matrix::new([
[1.0_f64, 0.0],
[0.0, 1.0],
[1.0, 1.0],
]);
let b = Vector::from_array([1.0, 2.0, 3.0]);
let x = a.solve_svd(&b).unwrap();
let ax = a.vecmul(&x);
let residual = (ax[0]-1.0).powi(2) + (ax[1]-2.0).powi(2) + (ax[2]-3.0).powi(2);
assert!(residual < 1.0, "residual too large: {}", residual);
assert_near(x[0], 1.0, 1e-10, "x[0]");
assert_near(x[1], 2.0, 1e-10, "x[1]");
}
#[test]
fn solve_square_full_rank() {
let a = Matrix::new([
[2.0_f64, 1.0, -1.0],
[-3.0, -1.0, 2.0],
[-2.0, 1.0, 2.0],
]);
let b = Vector::from_array([8.0, -11.0, -3.0]);
let x_svd = a.solve_svd(&b).unwrap();
let x_lu = a.solve(&b).unwrap();
for i in 0..3 {
assert_near(x_svd[i], x_lu[i], 1e-9, &format!("x[{}]", i));
}
}
#[test]
fn pseudo_inverse_identity() {
let eye: Matrix<f64, 3, 3> = Matrix::eye();
let pinv = eye.pinv().unwrap();
for i in 0..3 {
for j in 0..3 {
let expected = if i == j { 1.0 } else { 0.0 };
assert_near(pinv[(i, j)], expected, 1e-12, &format!("pinv[({},{})]", i, j));
}
}
}
#[test]
fn pseudo_inverse_rectangular() {
let a = Matrix::new([
[1.0_f64, 2.0],
[3.0, 4.0],
[5.0, 6.0],
]);
let svd = a.svd().unwrap();
let pinv = svd.pseudo_inverse();
let apa = a * (pinv * a);
for i in 0..3 {
for j in 0..2 {
assert_near(apa[(i, j)], a[(i, j)], 1e-9, &format!("APA[({},{})]", i, j));
}
}
}
#[test]
fn pseudo_inverse_rank_deficient() {
let a = Matrix::new([
[1.0_f64, 2.0, 3.0],
[2.0, 4.0, 6.0],
[3.0, 6.0, 9.0],
]);
let svd = a.svd().unwrap();
let pinv = svd.pseudo_inverse();
let pap = pinv * (a * pinv);
for i in 0..3 {
for j in 0..3 {
assert_near(pap[(i, j)], pinv[(i, j)], 1e-9, &format!("PAP[({},{})]", i, j));
}
}
}
}