use super::config::LowRankConfig;
use super::error::LowRankError;
#[derive(Debug, Clone)]
pub struct SvdResult {
pub u: Vec<f64>,
pub u_rows: usize,
pub u_cols: usize, pub singular_values: Vec<f64>,
pub vt: Vec<f64>,
pub vt_rows: usize, pub vt_cols: usize,
pub frobenius_error: f64,
pub rank_used: usize,
}
impl SvdResult {
pub fn reconstruct(&self) -> Vec<f64> {
let rows = self.u_rows;
let cols = self.vt_cols;
let rank = self.rank_used;
let mut out = vec![0.0_f64; rows * cols];
for k in 0..rank {
let sigma = self.singular_values[k];
for i in 0..rows {
let u_ik = self.u[i * self.u_cols + k];
for j in 0..cols {
let vt_kj = self.vt[k * self.vt_cols + j];
out[i * cols + j] += sigma * u_ik * vt_kj;
}
}
}
out
}
pub fn relative_error(&self, original: &[f64]) -> f64 {
let reconstructed = self.reconstruct();
TruncatedSvd::relative_frobenius_error(original, &reconstructed)
}
pub fn energy_fraction(&self) -> f64 {
let total: f64 = self.singular_values.iter().sum();
if total == 0.0 {
return 1.0;
}
let used: f64 = self.singular_values[..self.rank_used].iter().sum();
used / total
}
}
pub struct TruncatedSvd {
config: LowRankConfig,
}
impl TruncatedSvd {
pub fn new(config: LowRankConfig) -> Self {
TruncatedSvd { config }
}
pub fn frobenius_norm(matrix: &[f64]) -> f64 {
matrix.iter().map(|x| x * x).sum::<f64>().sqrt()
}
pub fn relative_frobenius_error(original: &[f64], reconstructed: &[f64]) -> f64 {
let orig_norm = Self::frobenius_norm(original);
if orig_norm == 0.0 {
return 0.0;
}
let diff_norm_sq: f64 = original
.iter()
.zip(reconstructed.iter())
.map(|(a, b)| (a - b) * (a - b))
.sum();
diff_norm_sq.sqrt() / orig_norm
}
pub fn error_bound_from_singular_values(all_singular_values: &[f64], rank: usize) -> f64 {
let tail: f64 = all_singular_values.iter().skip(rank).map(|s| s * s).sum();
tail.sqrt()
}
pub fn decompose(
&self,
matrix: &[f64],
rows: usize,
cols: usize,
) -> Result<SvdResult, LowRankError> {
if matrix.len() != rows * cols {
return Err(LowRankError::InvalidInput(format!(
"matrix length {} does not match rows={} × cols={}",
matrix.len(),
rows,
cols
)));
}
if rows == 0 || cols == 0 {
return Err(LowRankError::InvalidInput(
"matrix must have non-zero rows and cols".to_string(),
));
}
let max_rank = rows.min(cols);
let rank = self.config.rank.min(max_rank);
if self.config.rank > max_rank {
return Err(LowRankError::RankExceedsDimensions {
rank: self.config.rank,
rows,
cols,
});
}
let mut a = matrix.to_vec();
let mut u_vecs: Vec<Vec<f64>> = Vec::with_capacity(rank);
let mut sigmas: Vec<f64> = Vec::with_capacity(rank);
let mut v_vecs: Vec<Vec<f64>> = Vec::with_capacity(rank);
let mut rank_used = 0;
let a_norm = Self::frobenius_norm(matrix);
for k in 0..rank {
let mut v = vec![0.0_f64; cols];
for (j, vj) in v.iter_mut().enumerate() {
let angle = std::f64::consts::TAU
* ((k * 1_000_003 + j * 1_000_033) % 100_000) as f64
/ 100_000.0_f64;
*vj = angle.cos();
}
normalize_vec(&mut v).map_err(|_| {
LowRankError::NumericalInstability(format!(
"could not normalize initial v for component {}",
k
))
})?;
let mut prev_sigma = f64::INFINITY;
let mut u = vec![0.0_f64; rows];
let mut sigma = 0.0_f64;
for iter in 0..self.config.max_iterations {
matvec(&a, rows, cols, &v, &mut u);
if normalize_vec(&mut u).is_err() {
break;
}
matvec_t(&a, rows, cols, &u, &mut v);
if normalize_vec(&mut v).is_err() {
break;
}
let mut av = vec![0.0_f64; rows];
matvec(&a, rows, cols, &v, &mut av);
sigma = dot(&u, &av);
if iter > 0
&& (sigma - prev_sigma).abs() < self.config.tolerance * sigma.abs().max(1e-12)
{
break;
}
if iter == self.config.max_iterations - 1 {
}
prev_sigma = sigma;
}
sigma = sigma.abs();
let relative_contribution = if a_norm > 0.0 { sigma / a_norm } else { 0.0 };
if relative_contribution < self.config.tolerance && k > 0 {
break;
}
sigmas.push(sigma);
u_vecs.push(u.clone());
v_vecs.push(v.clone());
rank_used += 1;
for i in 0..rows {
for j in 0..cols {
a[i * cols + j] -= sigma * u[i] * v[j];
}
}
}
if rank_used == 0 {
return Err(LowRankError::SvdFailed {
iterations: self.config.max_iterations,
reason: "no singular components found".to_string(),
});
}
let mut u_flat = vec![0.0_f64; rows * rank_used];
for (k, uk) in u_vecs.iter().enumerate() {
for (i, &val) in uk.iter().enumerate() {
u_flat[i * rank_used + k] = val;
}
}
let mut vt_flat = vec![0.0_f64; rank_used * cols];
for (k, vk) in v_vecs.iter().enumerate() {
for (j, &val) in vk.iter().enumerate() {
vt_flat[k * cols + j] = val;
}
}
let result_proto = SvdResult {
u: u_flat.clone(),
u_rows: rows,
u_cols: rank_used,
singular_values: sigmas.clone(),
vt: vt_flat.clone(),
vt_rows: rank_used,
vt_cols: cols,
frobenius_error: 0.0,
rank_used,
};
let reconstructed = result_proto.reconstruct();
let frobenius_error = Self::relative_frobenius_error(matrix, &reconstructed);
Ok(SvdResult {
u: u_flat,
u_rows: rows,
u_cols: rank_used,
singular_values: sigmas,
vt: vt_flat,
vt_rows: rank_used,
vt_cols: cols,
frobenius_error,
rank_used,
})
}
}
fn matvec(a: &[f64], rows: usize, cols: usize, x: &[f64], out: &mut [f64]) {
for (i, out_i) in out.iter_mut().enumerate().take(rows) {
*out_i = 0.0;
for j in 0..cols {
*out_i += a[i * cols + j] * x[j];
}
}
}
fn matvec_t(a: &[f64], rows: usize, cols: usize, x: &[f64], out: &mut [f64]) {
for out_j in out.iter_mut().take(cols) {
*out_j = 0.0;
}
for i in 0..rows {
for j in 0..cols {
out[j] += a[i * cols + j] * x[i];
}
}
}
fn dot(a: &[f64], b: &[f64]) -> f64 {
a.iter().zip(b.iter()).map(|(x, y)| x * y).sum()
}
fn normalize_vec(v: &mut [f64]) -> Result<(), ()> {
let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm < 1e-300 {
return Err(());
}
for x in v.iter_mut() {
*x /= norm;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
fn make_config(rank: usize) -> LowRankConfig {
LowRankConfig::new(rank)
.with_tolerance(1e-9)
.with_max_iterations(500)
}
#[test]
fn test_svd_frobenius_norm() {
let m = vec![1.0_f64, 2.0, 3.0, 4.0];
let norm = TruncatedSvd::frobenius_norm(&m);
let expected = (1.0_f64 + 4.0 + 9.0 + 16.0_f64).sqrt();
assert!(
(norm - expected).abs() < 1e-12,
"norm={norm} expected={expected}"
);
}
#[test]
fn test_svd_2x2_identity() {
let m = vec![1.0_f64, 0.0, 0.0, 1.0];
let svd = TruncatedSvd::new(make_config(2));
let result = svd.decompose(&m, 2, 2).expect("SVD should succeed");
assert_eq!(result.rank_used, 2);
let mut svs = result.singular_values.clone();
svs.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
for sv in &svs {
assert!(
(*sv - 1.0).abs() < 1e-5,
"singular value {sv} expected ~1.0"
);
}
}
#[test]
fn test_svd_rank1_matrix() {
let v: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0];
let mut m = vec![0.0_f64; 16];
for i in 0..4 {
for j in 0..4 {
m[i * 4 + j] = v[i] * v[j];
}
}
let svd = TruncatedSvd::new(make_config(1));
let result = svd.decompose(&m, 4, 4).expect("SVD should succeed");
assert_eq!(result.rank_used, 1);
let rel_err = result.relative_error(&m);
assert!(
rel_err < 1e-5,
"relative reconstruction error {rel_err} should be near zero"
);
}
#[test]
fn test_svd_reconstruction_error() {
#[rustfmt::skip]
let m: Vec<f64> = vec![
4.0, 3.0, 2.0, 1.0, 0.5,
2.0, 5.0, 1.0, 0.5, 3.0,
1.0, 2.0, 6.0, 2.0, 1.0,
0.5, 1.0, 2.0, 4.0, 2.0,
0.25, 0.5, 1.0, 2.0, 3.0,
];
let svd = TruncatedSvd::new(make_config(3));
let result = svd.decompose(&m, 5, 5).expect("SVD should succeed");
assert!(result.rank_used >= 1);
assert!(result.frobenius_error >= 0.0);
assert!(result.frobenius_error <= 1.1); }
#[test]
fn test_svd_rank_exceeds_dimensions_error() {
let m = vec![1.0_f64, 2.0, 3.0, 4.0];
let svd = TruncatedSvd::new(make_config(5)); let err = svd.decompose(&m, 2, 2);
assert!(
matches!(err, Err(LowRankError::RankExceedsDimensions { .. })),
"expected RankExceedsDimensions, got {:?}",
err
);
}
#[test]
fn test_svd_result_reconstruct() {
let result = SvdResult {
u: vec![1.0, 0.0],
u_rows: 2,
u_cols: 1,
singular_values: vec![2.0],
vt: vec![0.0, 1.0],
vt_rows: 1,
vt_cols: 2,
frobenius_error: 0.0,
rank_used: 1,
};
let rec = result.reconstruct();
assert_eq!(rec.len(), 4);
assert!((rec[0] - 0.0).abs() < 1e-12);
assert!((rec[1] - 2.0).abs() < 1e-12);
assert!((rec[2] - 0.0).abs() < 1e-12);
assert!((rec[3] - 0.0).abs() < 1e-12);
}
#[test]
fn test_svd_result_energy_fraction() {
let result = SvdResult {
u: vec![1.0, 0.0, 0.0, 1.0],
u_rows: 2,
u_cols: 2,
singular_values: vec![4.0, 2.0],
vt: vec![1.0, 0.0, 0.0, 1.0],
vt_rows: 2,
vt_cols: 2,
frobenius_error: 0.0,
rank_used: 2,
};
let ef = result.energy_fraction();
assert!((ef - 1.0).abs() < 1e-12);
}
#[test]
fn test_svd_result_relative_error() {
let v: Vec<f64> = vec![1.0, 2.0, 3.0];
let mut original = vec![0.0_f64; 9];
for i in 0..3 {
for j in 0..3 {
original[i * 3 + j] = v[i] * v[j];
}
}
let svd = TruncatedSvd::new(make_config(1));
let result = svd.decompose(&original, 3, 3).expect("SVD ok");
let rel_err = result.relative_error(&original);
assert!(
rel_err < 1e-4,
"relative error {rel_err} should be near zero"
);
}
}