use core::f64;
use matrix_kit::dynamic::matrix::Matrix;
fn house(x: Matrix<f64>) -> (Matrix<f64>, f64) {
let x1 = x.get(0, 0);
let x_2m = x.get_submatrix(1..x.row_count(), 0..1);
let sigma = x_2m.inner_product(&x_2m);
let mut v = Matrix::identity(1, 1);
v.append_mat_bottom(x_2m);
let beta;
if sigma == 0.0 && x1 >= 0.0 {
beta = 0.0;
} else if sigma == 0.0 && x1 < 0.0 {
beta = -2.0;
} else {
let mu = (x1.powf(2.0) + sigma).sqrt();
if x1 <= 0.0 {
v.set(0, 0, x1 - mu);
} else {
v.set(0, 0,
-sigma / (x1 + mu)
);
}
let numerator = 2.0 * v.get(0, 0).powf(2.0);
let denominator = sigma + v.get(0, 0).powf(2.0);
beta = numerator / denominator;
v /= v.get(0, 0);
}
(v, beta)
}
fn householder_bidiag(
matrix: Matrix<f64>
) -> (Matrix<f64>, Matrix<f64>, Matrix<f64>) {
let m = matrix.row_count();
let n = matrix.col_count();
let mut workmatrix = matrix.clone();
let mut big_u = Matrix::identity(m, m);
let mut big_v = Matrix::identity(n, n);
for j in 0..n {
let (v, _beta) = house(workmatrix.get_submatrix(j..m, j..(j + 1)));
let mut new_v = Matrix::new(m, 1);
new_v.set_submatrix(j..m, 0..1,
v.clone()
);
let new_beta = 2.0 / new_v.l2_norm_squared();
let householder_matrix = Matrix::identity(m, m) - (new_v.clone() * new_v.transpose() * new_beta);
workmatrix = householder_matrix.transpose() * workmatrix;
big_u = householder_matrix * big_u;
if j < n - 2 {
let (v, beta) = house(
workmatrix.get_submatrix(j..(j + 1), (j + 1)..n).transpose()
);
let mut new_v = Matrix::new(n, 1);
new_v.set_submatrix((j + 1)..n, 0..1,
v.clone()
);
let householder_matrix = Matrix::identity(n, n) - (new_v.clone() * new_v.transpose() * beta);
workmatrix = workmatrix * householder_matrix.clone();
big_v = householder_matrix * big_v;
}
}
for j in (0..n).rev() {
workmatrix.set_submatrix((j + 1)..m, j..(j + 1), Matrix::new(m - j - 1, 1));
}
for j in (0..n - 2).rev() {
workmatrix.set_submatrix(j..(j + 1), (j + 2)..n, Matrix::new(1, n - j - 2));
}
(big_u.transpose(), big_v.transpose(), workmatrix)
}
fn determine_givens(a: f64, b: f64) -> (f64, f64) {
if b == 0.0 {
(1.0, 0.0)
} else {
if b.abs() > a.abs() {
let t = -a/b;
let s = 1.0 / (1.0 + t.powf(2.0)).sqrt();
let c = s * t;
(c, s)
} else {
let t = -b/a;
let c = 1.0 / (1.0 + t.powf(2.0)).sqrt();
let s = c * t;
(c, s)
}
}
}
fn givens_rightmult(matrix: &mut Matrix<f64>, k: usize, c: f64, s: f64) {
for i in 0..matrix.row_count() {
let t1 = matrix.get(i, k);
let t2 = matrix.get(i, k + 1);
matrix.set(i, k, c * t1 - s * t2);
matrix.set(i, k + 1, s * t1 + c * t2);
}
}
fn givens_leftmult(matrix: &mut Matrix<f64>, k: usize, c: f64, s: f64) {
for i in 0..matrix.col_count() {
let t1 = matrix.get(k, i);
let t2 = matrix.get(k + 1, i);
matrix.set(k, i, c * t1 - s * t2);
matrix.set(k + 1, i, s * t1 + c * t2);
}
}
fn golub_kahan_step(
b: &mut Matrix<f64>,
u: &mut Matrix<f64>, v: &mut Matrix<f64>,
q: usize, p: usize
) -> (Vec<f64>, Vec<f64>) {
let n = b.col_count();
let b22 = b.get_submatrix(p..(n - q), p..(n - q));
let (diagonal, superdiagonal) = (b22.get_diagonal(), b22.get_upperdiagonal());
let n_b22 = diagonal.len();
let m_b22 = superdiagonal.len();
let lambda = {
let b = if m_b22 == 1 {
-diagonal[0].powf(2.0) - superdiagonal[0].powf(2.0)
-diagonal[1].powf(2.0)
} else {
-diagonal[m_b22 - 1].powf(2.0) - superdiagonal[m_b22 - 2].powf(2.0)
-diagonal[n_b22 - 1].powf(2.0) - superdiagonal[m_b22 - 1].powf(2.0)
};
let c = if m_b22 == 1 {
(diagonal[0].powf(2.0) * superdiagonal[0].powf(2.0)) +
(diagonal[1].powf(2.0) * diagonal[0].powf(2.0)) -
(superdiagonal[0].powf(2.0) * diagonal[0] * superdiagonal[0])
} else {
(diagonal[m_b22 - 1].powf(2.0) * diagonal[n_b22 - 1].powf(2.0)) +
(diagonal[n_b22 - 1].powf(2.0) * superdiagonal[m_b22 - 2].powf(2.0)) +
(superdiagonal[m_b22 - 2].powf(2.0) * superdiagonal[m_b22 - 1].powf(2.0))
};
let lambda_1 = (-b + (b.powf(2.0) - 4.0 * c).sqrt()) / 2.0;
let lambda_2 = (-b - (b.powf(2.0) - 4.0 * c).sqrt()) / 2.0;
let target = diagonal[n_b22 - 1].powf(2.0) + superdiagonal[m_b22 - 1].powf(2.0);
if (target - lambda_1).abs() < (target - lambda_2).abs() {
lambda_1
} else {
lambda_2
}
};
let mut y = diagonal[0].powf(2.0) - lambda;
let mut z = diagonal[0] * superdiagonal[0];
for k in p..(n - q - 1) {
let (mut c, mut s) = determine_givens(y, z);
givens_rightmult(b, k, c, s);
givens_rightmult(v, k, c, s);
y = b.get(k, k);
z = b.get(k + 1, k);
(c, s) = determine_givens(y, z);
givens_leftmult(b, k, c, s);
givens_rightmult(u, k, c, s);
if k < n - 2 {
y = b.get(k, k + 1);
z = b.get(k, k + 2);
}
}
(b.get_diagonal(), b.get_upperdiagonal())
}
fn format_svd(u: &mut Matrix<f64>, v: &mut Matrix<f64>, s: &mut Matrix<f64>) {
let mut u_tups: Vec<(Matrix<f64>, f64)> = (0..u.col_count()).map(|c|
(
u.get_submatrix(0..u.row_count(), c..(c + 1)),
if c < s.col_count() {
s.get(c, c).abs()
} else {
f64::NEG_INFINITY
}
)
).collect();
let mut v_tups: Vec<(Matrix<f64>, f64)> = (0..v.col_count()).map(|c|
(
v.get_submatrix(0..v.row_count(), c..(c + 1)) * s.get(c, c).signum(),
s.get(c, c).abs()
)
).collect();
u_tups.sort_by(|(_, s1), (_, s2)| s2.total_cmp(s1));
v_tups.sort_by(|(_, s1), (_, s2)| s2.total_cmp(s1));
let u_cols = u_tups.iter().map(|(u_vec, _)| u_vec.clone()).collect();
let v_cols = v_tups.iter().map(|(v_vec, _)| v_vec.clone()).collect();
*u = Matrix::from_cols(u_cols);
*v = Matrix::from_cols(v_cols);
let mut diag: Vec<f64> = s.get_diagonal().iter().map(|sing| sing.abs()).collect();
diag.sort_by(|a, b| b.abs().total_cmp(&a.abs()));
*s = Matrix::from_diagonal(diag);
}
fn tall_svd_factorization(matrix: &Matrix<f64>) -> (Matrix<f64>, Matrix<f64>, Matrix<f64>) {
let m = matrix.row_count();
let n = matrix.col_count();
debug_assert!(m >= n);
let (mut u, mut v, mut b) = householder_bidiag(matrix.clone());
let mut q = 0;
while q < n {
for i in 0..(n - 1) {
if b.get(i, i + 1).abs() <= f64::EPSILON * (b.get(i, i).abs() + b.get(i + 1, i + 1).abs()) {
b.set(i, i + 1, 0.0);
}
}
q = 0; for i in 0..(n - 1) {
if b.get(n - i - 2, n - i - 1) != 0.0 {
break;
} else {
q = i + 1;
}
}
if q == n - 1 {
q = n;
}
let mut p = n - q;
for i in (1..(n - q)).rev() {
if b.get(i - 1, i) != 0.0 {
p = i - 1;
} else {
break;
}
}
if q < n {
let mut found_zero_diag = false;
for i in p..(n - q) {
if b.get(i, i) == 0.0 {
found_zero_diag = true;
if i < n - 1 {
b.set(i, i + 1, 0.0);
}
}
}
if !found_zero_diag {
golub_kahan_step(&mut b, &mut u, &mut v, q, p);
}
}
}
format_svd(&mut u, &mut v, &mut b);
b.append_mat_bottom(Matrix::new(m - n, n));
(u, v, b)
}
pub fn svd(matrix: &Matrix<f64>) -> (Matrix<f64>, Matrix<f64>, Matrix<f64>) {
let m = matrix.row_count();
let n = matrix.col_count();
if m >= n {
tall_svd_factorization(matrix)
} else {
let transposed = matrix.transpose();
let (u, v, s) = tall_svd_factorization(&transposed);
(v, u, s.transpose())
}
}
pub fn truncate(
u: &Matrix<f64>, v: &Matrix<f64>, s: &Vec<f64>, r: usize
) -> (Matrix<f64>, Matrix<f64>, Vec<f64>) {
let u_r = u.get_submatrix(0..u.row_count(), 0..r);
let v_r = v.get_submatrix(0..v.col_count(), 0..r);
let s_r = s[0..r].to_vec();
(u_r, v_r, s_r)
}
pub fn compressed_svd(
matrix: &Matrix<f64>, r: usize
) -> (Matrix<f64>, Matrix<f64>, Vec<f64>) {
let (u, v, s) = svd(matrix);
truncate(&u, &v, &s.get_diagonal(), r)
}
#[cfg(test)]
mod svd_math_tests {
use matrix_kit::dynamic::matrix::Matrix;
use rand::Rng;
use super::compressed_svd;
use super::house;
use super::householder_bidiag;
use super::svd;
fn matrices_close(a: &Matrix<f64>, b: &Matrix<f64>) -> bool {
if a.row_count() != b.row_count() || a.col_count() != b.col_count() {
return false;
}
for r in 0..a.row_count() {
for c in 0..a.col_count() {
if (a.get(r, c) - b.get(r, c)).abs() > 1e-5 {
return false;
}
}
}
return true;
}
#[test]
fn test_house() {
let x = Matrix::from_flatmap(2, 1, vec![-1.0, 1.0]);
let (v, beta) = house(x);
println!("House: {:?}, {:?}", beta, v);
}
#[test]
fn test_bidiagonalization() {
for _ in 1..=1000 {
let mut rng = rand::rng();
let n = rng.random_range(2..=40);
let m = rng.random_range(n..100);
let mut a = Matrix::<f64>::random_normal(m, n, 0.0, 1.0);
a.apply_to_all(&|x: f64| x.abs());
let (u, v, b) = householder_bidiag(a.clone());
if !matrices_close(&(u.transpose() * u.clone()), &Matrix::identity(u.row_count(), u.col_count())) &&
!matrices_close(&(v.transpose() * v.clone()), &Matrix::identity(v.row_count(), v.col_count())){
println!("U and V are not orthogonal.");
println!("A:{:?}", a);
println!("U: {:?}", u.clone());
println!("U^T U:{:?}", u.transpose() * u.clone());
println!("V^T V:{:?}", v.transpose() * v.clone());
panic!("U and V are not orthogonal");
}
if !matrices_close(&(u.transpose() * a.clone() * v.clone()), &b) {
println!("Improper factorization");
println!("A:{:?}", a);
println!("U: {:?}", u.clone());
println!("U^T U:{:?}", u.transpose() * u.clone());
println!("V^T V:{:?}", v.transpose() * v.clone());
println!("U^T * A * V: {:?}", u.transpose() * a.clone() * v.clone());
println!("U * A * V^T: {:?}", u.clone() * a.clone() * v.transpose());
println!("B: {:?}", b);
panic!("Wrong factorization");
}
if !matrices_close(&(u.clone() * b.clone() * v.transpose()), &a) {
println!("Improper factorization");
println!("A:{:?}", a);
println!("U: {:?}", u.clone());
println!("U^T U:{:?}", u.transpose() * u.clone());
println!("V^T V:{:?}", v.transpose() * v.clone());
println!("U^T * A * V: {:?}", u.transpose() * a.clone() * v);
println!("B: {:?}", b);
panic!("Wrong factorization");
}
}
}
#[test]
fn test_svd() {
let a = Matrix::from_flatmap(3, 4, vec![
0.0, 2.0, 4.0, 6.0,
1.0, 3.0, 5.0, 7.0,
-1.0, 2.0, -3.0, -4.0,
]);
let (u, v, s) = svd(&a);
println!("Orthogonal check!");
println!("{:?}", u.transpose() * u.clone());
println!("{:?}", v.transpose() * v.clone());
println!("Singular values?");
println!("{:?}", s);
println!("Can we get diagonal?");
println!("{:?}", u.transpose() * a.clone() * v.clone());
println!("Can we recover?");
println!("{:?}", u.clone() * s.clone() * v.transpose());
for _ in 1..=1 {
let mut rng = rand::rng();
let n = 4032;
let m = 3024;
println!("SVD-ing [{} x {}]", m, n);
let a = Matrix::random_normal(m, n, 0.0, 1.0);
let (u, v, s) = svd(&a);
let alleged_a = u.clone() * s.clone() * v.transpose();
let alleged_s = u.transpose() * a.clone() * v.clone();
assert!(matrices_close(&alleged_a, &a));
assert!(matrices_close(&alleged_s, &s));
}
}
#[test]
fn test_svd_compression() {
let a = Matrix::from_flatmap(4, 3, vec![
0.0, 2.0, 4.0, 6.0,
1.0, 3.0, 5.0, 7.0,
-1.0, 2.0, -3.0, -4.0,
]);
let (u, v, s) = compressed_svd(&a, 2);
println!("Orthogonal check!");
println!("{:?}", u.transpose() * u.clone());
println!("{:?}", v.transpose() * v.clone());
println!("Singular values?");
println!("{:?}", s);
println!("Can we get diagonal?");
println!("{:?}", u.transpose() * a.clone() * v.clone());
println!("Can we recover?");
let s_matrix = Matrix::from_index_def(u.col_count(), v.col_count(), &mut |r, c| if r == c {
s[r]
}else {
0.0
});
println!("{:?}", u.clone() * s_matrix * v.transpose());
}
#[test]
fn visualize_singular_value_distribution() {
}
}