#![allow(clippy::needless_range_loop)]
const QR_ZERO_TOLERANCE: f64 = 1e-12;
const SINGULAR_TOLERANCE: f64 = 1e-10;
#[derive(Clone, Debug)]
pub struct Matrix {
pub rows: usize,
pub cols: usize,
pub data: Vec<f64>,
}
impl Matrix {
pub fn new(rows: usize, cols: usize, data: Vec<f64>) -> Self {
assert_eq!(data.len(), rows * cols, "Data length must match dimensions");
Matrix { rows, cols, data }
}
pub fn zeros(rows: usize, cols: usize) -> Self {
Matrix {
rows,
cols,
data: vec![0.0; rows * cols],
}
}
pub fn get(&self, row: usize, col: usize) -> f64 {
self.data[row * self.cols + col]
}
pub fn set(&mut self, row: usize, col: usize, val: f64) {
self.data[row * self.cols + col] = val;
}
pub fn transpose(&self) -> Matrix {
let mut t_data = vec![0.0; self.rows * self.cols];
for r in 0..self.rows {
for c in 0..self.cols {
t_data[c * self.rows + r] = self.get(r, c);
}
}
Matrix::new(self.cols, self.rows, t_data)
}
pub fn matmul(&self, other: &Matrix) -> Matrix {
assert_eq!(
self.cols, other.rows,
"Dimension mismatch for multiplication"
);
let mut result = Matrix::zeros(self.rows, other.cols);
for r in 0..self.rows {
for c in 0..other.cols {
let mut sum = 0.0;
for k in 0..self.cols {
sum += self.get(r, k) * other.get(k, c);
}
result.set(r, c, sum);
}
}
result
}
#[allow(clippy::needless_range_loop)]
pub fn mul_vec(&self, vec: &[f64]) -> Vec<f64> {
assert_eq!(
self.cols,
vec.len(),
"Dimension mismatch for matrix-vector multiplication"
);
let mut result = vec![0.0; self.rows];
for r in 0..self.rows {
let mut sum = 0.0;
for c in 0..self.cols {
sum += self.get(r, c) * vec[c];
}
result[r] = sum;
}
result
}
#[allow(clippy::needless_range_loop)]
pub fn col_dot(&self, col: usize, v: &[f64]) -> f64 {
assert!(col < self.cols, "Column index out of bounds");
assert_eq!(
self.rows,
v.len(),
"Vector length must match number of rows"
);
let mut sum = 0.0;
for row in 0..self.rows {
sum += self.get(row, col) * v[row];
}
sum
}
#[allow(clippy::needless_range_loop)]
pub fn col_axpy_inplace(&self, col: usize, alpha: f64, v: &mut [f64]) {
assert!(col < self.cols, "Column index out of bounds");
assert_eq!(
self.rows,
v.len(),
"Vector length must match number of rows"
);
for row in 0..self.rows {
v[row] += alpha * self.get(row, col);
}
}
#[allow(clippy::needless_range_loop)]
pub fn col_norm2(&self, col: usize) -> f64 {
assert!(col < self.cols, "Column index out of bounds");
let mut sum = 0.0;
for row in 0..self.rows {
let val = self.get(row, col);
sum += val * val;
}
sum
}
pub fn add_diagonal_in_place(&mut self, alpha: f64, start_index: usize) {
assert_eq!(self.rows, self.cols, "Matrix must be square");
let n = self.rows;
for i in start_index..n {
let current = self.get(i, i);
self.set(i, i, current + alpha);
}
}
}
impl Matrix {
#[allow(clippy::needless_range_loop)]
pub fn qr(&self) -> (Matrix, Matrix) {
let m = self.rows;
let n = self.cols;
let mut q = Matrix::identity(m);
let mut r = self.clone();
for k in 0..n.min(m - 1) {
let mut x = vec![0.0; m - k];
for i in k..m {
x[i - k] = r.get(i, k);
}
let norm_x: f64 = x.iter().map(|&v| v * v).sum::<f64>().sqrt();
if norm_x < QR_ZERO_TOLERANCE {
continue;
}
let sign = if x[0] >= 0.0 { 1.0 } else { -1.0 }; let u1 = x[0] + sign * norm_x;
let mut v = x; v[0] = u1;
let norm_v: f64 = v.iter().map(|&val| val * val).sum::<f64>().sqrt();
for val in &mut v {
*val /= norm_v;
}
for j in k..n {
let mut dot = 0.0;
for i in 0..m - k {
dot += v[i] * r.get(k + i, j);
}
for i in 0..m - k {
let val = r.get(k + i, j) - 2.0 * v[i] * dot;
r.set(k + i, j, val);
}
}
for i in 0..m {
let mut dot = 0.0;
for l in 0..m - k {
dot += q.get(i, k + l) * v[l];
}
for l in 0..m - k {
let val = q.get(i, k + l) - 2.0 * dot * v[l];
q.set(i, k + l, val);
}
}
}
(q, r)
}
pub fn identity(size: usize) -> Self {
let mut data = vec![0.0; size * size];
for i in 0..size {
data[i * size + i] = 1.0;
}
Matrix::new(size, size, data)
}
pub fn invert_upper_triangular(&self) -> Option<Matrix> {
let n = self.rows;
assert_eq!(n, self.cols, "Matrix must be square");
let max_diag: f64 = (0..n)
.map(|i| self.get(i, i).abs())
.fold(0.0_f64, |acc, val| acc.max(val));
let epsilon = 2.0_f64 * f64::EPSILON; let relative_tolerance = max_diag * epsilon * n as f64;
let tolerance = SINGULAR_TOLERANCE.max(relative_tolerance);
for i in 0..n {
if self.get(i, i).abs() < tolerance {
return None; }
}
let mut inv = Matrix::zeros(n, n);
for i in 0..n {
inv.set(i, i, 1.0 / self.get(i, i));
for j in (0..i).rev() {
let mut sum = 0.0;
for k in j + 1..=i {
sum += self.get(j, k) * inv.get(k, i);
}
inv.set(j, i, -sum / self.get(j, j));
}
}
Some(inv)
}
pub fn invert_upper_triangular_with_tolerance(&self, tolerance_mult: f64) -> Option<Matrix> {
let n = self.rows;
assert_eq!(n, self.cols, "Matrix must be square");
let max_diag: f64 = (0..n)
.map(|i| self.get(i, i).abs())
.fold(0.0_f64, |acc, val| acc.max(val));
let epsilon = 2.0_f64 * f64::EPSILON;
let relative_tolerance = max_diag * epsilon * n as f64 * tolerance_mult;
let tolerance = SINGULAR_TOLERANCE.max(relative_tolerance);
for i in 0..n {
if self.get(i, i).abs() < tolerance {
return None;
}
}
let mut inv = Matrix::zeros(n, n);
for i in 0..n {
inv.set(i, i, 1.0 / self.get(i, i));
for j in (0..i).rev() {
let mut sum = 0.0;
for k in j + 1..=i {
sum += self.get(j, k) * inv.get(k, i);
}
inv.set(j, i, -sum / self.get(j, j));
}
}
Some(inv)
}
pub fn invert(&self) -> Option<Matrix> {
let n = self.rows;
if n != self.cols {
panic!("Matrix must be square for inversion");
}
let (q, r) = self.qr();
let r_inv = r.invert_upper_triangular()?;
let q_transpose = q.transpose();
let mut result = Matrix::zeros(n, n);
for i in 0..n {
for j in 0..n {
let mut sum = 0.0;
for k in 0..n {
sum += r_inv.get(i, k) * q_transpose.get(k, j);
}
result.set(i, j, sum);
}
}
Some(result)
}
pub fn chol2inv_from_qr(&self) -> Option<Matrix> {
self.chol2inv_from_qr_with_tolerance(1.0)
}
pub fn chol2inv_from_qr_with_tolerance(&self, tolerance_mult: f64) -> Option<Matrix> {
let p = self.cols;
let (_, r_full) = self.qr();
let mut r1 = Matrix::zeros(p, p);
for i in 0..p {
for j in i..p {
r1.set(i, j, r_full.get(i, j));
}
if i < p {
r1.set(i, i, r_full.get(i, i));
}
}
let r1_inv = r1.invert_upper_triangular_with_tolerance(tolerance_mult)?;
let mut result = Matrix::zeros(p, p);
for i in 0..p {
for j in 0..p {
let mut sum = 0.0;
for k in 0..p {
sum += r1_inv.get(i, k) * r1_inv.get(j, k);
}
result.set(i, j, sum);
}
}
Some(result)
}
}
pub fn vec_mean(v: &[f64]) -> f64 {
if v.is_empty() {
return 0.0;
}
v.iter().sum::<f64>() / v.len() as f64
}
pub fn vec_sub(a: &[f64], b: &[f64]) -> Vec<f64> {
assert_eq!(a.len(), b.len(), "vec_sub: slice lengths must match");
a.iter().zip(b.iter()).map(|(x, y)| x - y).collect()
}
pub fn vec_dot(a: &[f64], b: &[f64]) -> f64 {
assert_eq!(a.len(), b.len(), "vec_dot: slice lengths must match");
a.iter().zip(b.iter()).map(|(x, y)| x * y).sum()
}
pub fn vec_add(a: &[f64], b: &[f64]) -> Vec<f64> {
assert_eq!(a.len(), b.len(), "vec_add: slice lengths must match");
a.iter().zip(b.iter()).map(|(x, y)| x + y).collect()
}
pub fn vec_axpy_inplace(dst: &mut [f64], alpha: f64, src: &[f64]) {
assert_eq!(
dst.len(),
src.len(),
"vec_axpy_inplace: slice lengths must match"
);
for (d, &s) in dst.iter_mut().zip(src.iter()) {
*d += alpha * s;
}
}
pub fn vec_scale_inplace(v: &mut [f64], alpha: f64) {
for val in v.iter_mut() {
*val *= alpha;
}
}
pub fn vec_scale(v: &[f64], alpha: f64) -> Vec<f64> {
v.iter().map(|&x| x * alpha).collect()
}
pub fn vec_l2_norm(v: &[f64]) -> f64 {
v.iter().map(|&x| x * x).sum::<f64>().sqrt()
}
pub fn vec_max_abs(v: &[f64]) -> f64 {
v.iter().map(|&x| x.abs()).fold(0.0_f64, f64::max)
}
#[derive(Clone, Debug)]
pub struct QRLinpack {
pub qr: Matrix,
pub qraux: Vec<f64>,
pub pivot: Vec<usize>,
pub rank: usize,
}
impl Matrix {
pub fn qr_linpack(&self, tol: Option<f64>) -> QRLinpack {
let n = self.rows;
let p = self.cols;
let lup = n.min(p);
let tol = tol.unwrap_or(1e-07);
let mut x = self.clone(); let mut qraux = vec![0.0; p];
let mut pivot: Vec<usize> = (1..=p).collect(); let mut work = vec![(0.0, 0.0); p];
if n > 0 {
for j in 0..p {
let mut norm = 0.0;
for i in 0..n {
norm += x.get(i, j) * x.get(i, j);
}
norm = norm.sqrt();
qraux[j] = norm;
let original_norm = if norm == 0.0 { 1.0 } else { norm };
work[j] = (norm, original_norm);
}
}
let mut k = p + 1;
for l in 0..lup {
while l < k - 1 && qraux[l] < work[l].1 * tol {
let lp1 = l + 1;
for i in 0..n {
let t = x.get(i, l);
for j in lp1..p {
x.set(i, j - 1, x.get(i, j));
}
x.set(i, p - 1, t);
}
let saved_pivot = pivot[l];
let saved_qraux = qraux[l];
let saved_work = work[l];
for j in lp1..p {
pivot[j - 1] = pivot[j];
qraux[j - 1] = qraux[j];
work[j - 1] = work[j];
}
pivot[p - 1] = saved_pivot;
qraux[p - 1] = saved_qraux;
work[p - 1] = saved_work;
k -= 1;
}
if l == n - 1 {
break;
}
let mut nrmxl = 0.0;
for i in l..n {
let val = x.get(i, l);
nrmxl += val * val;
}
nrmxl = nrmxl.sqrt();
if nrmxl == 0.0 {
continue;
}
let x_ll = x.get(l, l);
if x_ll != 0.0 {
nrmxl = nrmxl.copysign(x_ll);
}
let scale = 1.0 / nrmxl;
for i in l..n {
x.set(i, l, x.get(i, l) * scale);
}
x.set(l, l, 1.0 + x.get(l, l));
let lp1 = l + 1;
if p > lp1 {
for j in lp1..p {
let mut dot = 0.0;
for i in l..n {
dot += x.get(i, l) * x.get(i, j);
}
let t = -dot / x.get(l, l);
for i in l..n {
let val = x.get(i, j) + t * x.get(i, l);
x.set(i, j, val);
}
if qraux[j] != 0.0 {
let x_lj = x.get(l, j).abs();
let mut tt = 1.0 - (x_lj / qraux[j]).powi(2);
tt = tt.max(0.0);
if tt.abs() < 1e-6 {
let mut new_norm = 0.0;
for i in (l + 1)..n {
let val = x.get(i, j);
new_norm += val * val;
}
new_norm = new_norm.sqrt();
qraux[j] = new_norm;
work[j].0 = new_norm;
} else {
qraux[j] *= tt.sqrt();
}
}
}
}
qraux[l] = x.get(l, l);
x.set(l, l, -nrmxl);
}
let rank = k - 1;
let rank = rank.min(n);
QRLinpack {
qr: x,
qraux,
pivot,
rank,
}
}
#[allow(clippy::needless_range_loop)]
pub fn qr_solve_linpack(&self, qr_result: &QRLinpack, y: &[f64]) -> Option<Vec<f64>> {
let n = self.rows;
let p = self.cols;
let k = qr_result.rank;
if y.len() != n {
return None;
}
if k == 0 {
return None;
}
let mut qty = y.to_vec();
for j in 0..k {
let r_jj = qr_result.qr.get(j, j);
if r_jj == 0.0 {
continue;
}
let mut v = vec![0.0; n - j];
for i in j..n {
v[i - j] = qr_result.qr.get(i, j);
}
let alpha = qr_result.qraux[j];
if alpha != 0.0 {
v[0] = alpha;
}
let v_norm: f64 = v.iter().map(|&x| x * x).sum::<f64>().sqrt();
if v_norm < 1e-14 {
continue;
}
let mut dot = 0.0;
for i in j..n {
dot += v[i - j] * qty[i];
}
let t = 2.0 * dot / (v_norm * v_norm);
for i in j..n {
qty[i] -= t * v[i - j];
}
}
let mut coef_permuted = vec![f64::NAN; p];
for row in (0..k).rev() {
let r_diag = qr_result.qr.get(row, row);
let max_abs = (0..k)
.map(|i| qr_result.qr.get(i, i).abs())
.fold(0.0_f64, f64::max);
let tolerance = 1e-14 * max_abs.max(1.0);
if r_diag.abs() < tolerance {
return None; }
let mut sum = qty[row];
for col in (row + 1)..k {
sum -= qr_result.qr.get(row, col) * coef_permuted[col];
}
coef_permuted[row] = sum / r_diag;
}
let mut result = vec![0.0; p];
for j in 0..p {
let original_col = qr_result.pivot[j] - 1; result[original_col] = coef_permuted[j];
}
Some(result)
}
}
pub fn fit_ols_linpack(y: &[f64], x: &Matrix) -> Option<Vec<f64>> {
let qr_result = x.qr_linpack(None);
x.qr_solve_linpack(&qr_result, y)
}
#[derive(Clone, Debug)]
pub struct SVDResult {
pub u: Matrix,
pub sigma: Vec<f64>,
pub v_t: Matrix,
}
impl Matrix {
#[allow(clippy::needless_range_loop)]
pub fn svd(&self) -> SVDResult {
let m = self.rows;
let n = self.cols;
let k = m.min(n);
let mut ata = Matrix::zeros(n, n);
for i in 0..n {
for j in i..n {
let mut sum = 0.0;
for p in 0..m {
sum += self.get(p, i) * self.get(p, j);
}
ata.set(i, j, sum);
ata.set(j, i, sum); }
}
let mut v = Matrix::identity(n);
let mut lambda = Vec::with_capacity(n);
let max_iterations = 100;
let tolerance = 1e-14;
let mut a_work = ata.clone();
for _iter in 0..max_iterations {
let mut off_diag_sum = 0.0;
for i in 0..n {
for j in (i + 1)..n {
off_diag_sum += a_work.get(i, j).abs();
}
}
if off_diag_sum < tolerance {
break;
}
let (q, r) = a_work.qr();
a_work = r.matmul(&q);
v = v.matmul(&q);
}
for i in 0..n {
lambda.push(a_work.get(i, i));
}
for i in 0..n {
for j in (i + 1)..n {
if lambda[j] > lambda[i] {
lambda.swap(i, j);
#[allow(clippy::manual_swap)]
for row in 0..n {
let temp = v.get(row, i);
v.set(row, i, v.get(row, j));
v.set(row, j, temp);
}
}
}
}
let mut sigma = Vec::with_capacity(k);
for i in 0..k {
let s = lambda[i].max(0.0).sqrt();
sigma.push(s);
}
let mut u = Matrix::zeros(m, k);
for j in 0..k {
if sigma[j] > 1e-14 {
for i in 0..m {
let mut sum = 0.0;
for p in 0..n {
sum += self.get(i, p) * v.get(p, j);
}
u.set(i, j, sum / sigma[j]);
}
}
}
let v_t = v.transpose();
SVDResult { u, sigma, v_t }
}
#[allow(clippy::needless_range_loop)]
pub fn svd_solve(&self, svd_result: &SVDResult, b: &[f64]) -> Vec<f64> {
let m = self.rows;
let n = self.cols;
let k = m.min(n);
let max_sigma = svd_result.sigma.first().copied().unwrap_or(0.0);
let tol = if max_sigma > 0.0 {
max_sigma * 100.0 * f64::EPSILON
} else {
1e-14
};
let mut ut_b = vec![0.0; k];
for j in 0..k {
let mut sum = 0.0;
for i in 0..m {
sum += svd_result.u.get(i, j) * b[i];
}
ut_b[j] = sum;
}
let mut coeffs_v = vec![0.0; k];
for j in 0..k {
if svd_result.sigma[j] > tol {
coeffs_v[j] = ut_b[j] / svd_result.sigma[j];
} else {
coeffs_v[j] = 0.0; }
}
let mut x = vec![0.0; n];
for i in 0..n {
let mut sum = 0.0;
for j in 0..k {
sum += svd_result.v_t.get(j, i) * coeffs_v[j];
}
x[i] = sum;
}
x
}
}
#[allow(clippy::needless_range_loop)]
pub fn fit_and_predict_linpack(y: &[f64], x: &Matrix) -> Option<Vec<f64>> {
let n = x.rows;
let p = x.cols;
let qr_result = x.qr_linpack(None);
let k = qr_result.rank;
let beta_permuted = x.qr_solve_linpack(&qr_result, y)?;
if k == p {
return Some(x.mul_vec(&beta_permuted));
}
let mut pred = vec![0.0; n];
for row in 0..n {
let mut sum = 0.0;
for j in 0..p {
let b_val = beta_permuted[j];
if b_val.is_nan() {
continue; }
sum += x.get(row, j) * b_val;
}
pred[row] = sum;
}
Some(pred)
}
impl Matrix {
pub fn svd_jacobi(&self) -> Option<SVDResult> {
let m = self.rows;
let n = self.cols;
if m < n {
let at = self.transpose();
let svd_t = at.svd_jacobi()?;
return Some(SVDResult {
u: svd_t.v_t.transpose(), sigma: svd_t.sigma,
v_t: svd_t.u.transpose(), });
}
let ata = self.transpose().matmul(self);
let (v, s_sq) = ata.symmetric_eigen()?;
let s: Vec<f64> = s_sq.iter().map(|&x| x.sqrt().max(0.0)).collect();
let mut indexed: Vec<(usize, f64)> = s.iter().enumerate()
.map(|(i, &val)| (i, val))
.collect();
indexed.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
let mut v_sorted = Matrix::zeros(n, n);
let mut s_sorted = vec![0.0; n];
for (new_idx, (old_idx, val)) in indexed.iter().enumerate() {
s_sorted[new_idx] = *val;
for row in 0..n {
v_sorted.set(row, new_idx, v.get(row, *old_idx));
}
}
let mut u = Matrix::zeros(m, n);
for i in 0..m {
for j in 0..n {
if s_sorted[j] > f64::EPSILON {
let mut sum = 0.0;
for k in 0..n {
sum += self.get(i, k) * v_sorted.get(k, j);
}
u.set(i, j, sum / s_sorted[j]);
}
}
}
let v_t = v_sorted.transpose();
Some(SVDResult {
u,
sigma: s_sorted,
v_t,
})
}
fn symmetric_eigen(&self) -> Option<(Matrix, Vec<f64>)> {
let n = self.rows;
assert_eq!(n, self.cols, "Matrix must be square");
let max_iterations = 100;
let tolerance = 1e-10;
let mut v = Matrix::identity(n);
let mut a = self.clone();
for _iter in 0..max_iterations {
let mut max_off_diag = 0.0;
let mut p = 0;
let mut q = 1;
for i in 0..n {
for j in (i + 1)..n {
let val = a.get(i, j).abs();
if val > max_off_diag {
max_off_diag = val;
p = i;
q = j;
}
}
}
if max_off_diag < tolerance {
break;
}
let app = a.get(p, p);
let aqq = a.get(q, q);
let apq = a.get(p, q);
if apq.abs() < tolerance {
continue;
}
let tau = (aqq - app) / (2.0 * apq);
let t = if tau >= 0.0 {
1.0 / (tau + (1.0 + tau * tau).sqrt())
} else {
-1.0 / (-tau + (1.0 + tau * tau).sqrt())
};
let c = 1.0 / (1.0 + t * t).sqrt();
let s = t * c;
for i in 0..n {
if i != p && i != q {
let aip = a.get(i, p);
let aiq = a.get(i, q);
a.set(i, p, aip - s * (aiq + s * aip));
a.set(i, q, aiq + s * (aip - s * aiq));
}
}
a.set(p, p, app - t * apq);
a.set(q, q, aqq + t * apq);
a.set(p, q, 0.0);
a.set(q, p, 0.0);
for i in 0..n {
let vip = v.get(i, p);
let viq = v.get(i, q);
v.set(i, p, vip - s * (viq + s * vip));
v.set(i, q, viq + s * (vip - s * viq));
}
}
let eigenvalues: Vec<f64> = (0..n).map(|i| a.get(i, i)).collect();
Some((v, eigenvalues))
}
pub fn pseudo_inverse(&self, tolerance: Option<f64>) -> Option<Matrix> {
let svd = self.svd_jacobi()?;
let m = self.rows;
let n = self.cols;
let tol = tolerance.unwrap_or_else(|| {
let max_s = svd.sigma.iter().fold(0.0_f64, |a: f64, &b| a.max(b));
if max_s > 0.0 {
max_s * f64::EPSILON * (m.max(n) as f64)
} else {
f64::EPSILON
}
});
let mut s_pinv = vec![0.0; svd.sigma.len()];
for (i, &s_val) in svd.sigma.iter().enumerate() {
if s_val > tol {
s_pinv[i] = 1.0 / s_val;
}
}
let mut s_ut = Matrix::zeros(n, m);
for i in 0..n {
for j in 0..m {
s_ut.set(i, j, s_pinv[i] * svd.u.get(j, i));
}
}
let v = svd.v_t.transpose();
let pseudoinv = v.matmul(&s_ut);
Some(pseudoinv)
}
}
#[cfg(test)]
mod svd_tests {
use super::*;
#[test]
fn test_svd_simple_matrix() {
let data = vec![1.0, 2.0, 3.0, 4.0];
let m = Matrix::new(2, 2, data);
let svd = m.svd();
for i in 1..svd.sigma.len() {
assert!(svd.sigma[i-1] >= svd.sigma[i]);
}
let ut = svd.u.transpose();
let ut_u = ut.matmul(&svd.u);
assert!((ut_u.get(0, 0) - 1.0).abs() < 1e-10);
assert!(ut_u.get(0, 1).abs() < 1e-10);
assert!(ut_u.get(1, 0).abs() < 1e-10);
assert!((ut_u.get(1, 1) - 1.0).abs() < 1e-10);
}
#[test]
fn test_svd_solve_basic() {
let data = vec![
1.0, 1.0,
1.0, 2.0,
1.0, 3.0,
];
let m = Matrix::new(3, 2, data);
let svd = m.svd();
let b = vec![2.0, 4.0, 6.0];
let x = m.svd_solve(&svd, &b);
assert!((x[0] - 0.0).abs() < 1e-10);
assert!((x[1] - 2.0).abs() < 1e-10);
}
#[test]
fn test_svd_tolerance_formula() {
let data = vec![
1.0, 1.0,
1.0, 2.0,
1.0, 3.0,
];
let m = Matrix::new(3, 2, data);
let svd = m.svd();
let max_sigma = svd.sigma[0];
let expected_tol = max_sigma * 100.0 * f64::EPSILON;
assert!(expected_tol > 0.0);
assert!(expected_tol < 1e-10);
}
#[test]
fn test_svd_solve_rank_deficient() {
let data = vec![
1.0, 2.0,
2.0, 4.0,
3.0, 6.0,
];
let m = Matrix::new(3, 2, data);
let svd = m.svd();
assert!(svd.sigma[0] > 1e-10);
assert!(svd.sigma[1] < 1e-10);
let b = vec![3.0, 6.0, 9.0];
let x = m.svd_solve(&svd, &b);
assert!(x[0].is_finite());
assert!(x[1].is_finite());
let pred = m.get(0, 0) * x[0] + m.get(0, 1) * x[1];
assert!((pred - b[0]).abs() < 1e-6);
}
#[test]
fn test_svd_jacobi_kahan_produce_results() {
let data = vec![
1.0, 2.0, 3.0,
4.0, 5.0, 6.0,
7.0, 8.0, 9.0,
];
let m = Matrix::new(3, 3, data);
let svd_kahan = m.svd();
let svd_jacobi = m.svd_jacobi().unwrap();
assert_eq!(svd_kahan.sigma.len(), svd_jacobi.sigma.len());
for i in 1..svd_kahan.sigma.len() {
assert!(svd_kahan.sigma[i-1] >= svd_kahan.sigma[i]);
assert!(svd_jacobi.sigma[i-1] >= svd_jacobi.sigma[i]);
}
assert!(svd_kahan.sigma[2] < 1e-10);
assert!(svd_jacobi.sigma[2] < 1e-10);
}
#[test]
fn test_svd_jacobi_rank_deficient() {
let data = vec![
1.0, 2.0,
2.0, 4.0,
3.0, 6.0,
];
let m = Matrix::new(3, 2, data);
let svd = m.svd_jacobi().unwrap();
assert_eq!(svd.sigma.len(), 2);
assert!(svd.sigma[1] < 1e-10);
}
#[test]
fn test_pseudo_inverse_basic() {
let data = vec![
1.0, 0.0,
0.0, 1.0,
];
let m = Matrix::new(2, 2, data);
let pinv = m.pseudo_inverse(None).unwrap();
assert!((pinv.get(0, 0) - 1.0).abs() < 1e-10);
assert!(pinv.get(0, 1).abs() < 1e-10);
assert!(pinv.get(1, 0).abs() < 1e-10);
assert!((pinv.get(1, 1) - 1.0).abs() < 1e-10);
}
#[test]
fn test_pseudo_inverse_rank_deficient() {
let data = vec![
1.0, 0.0,
1.0, 0.0,
1.0, 0.0,
];
let m = Matrix::new(3, 2, data);
let pinv = m.pseudo_inverse(None).unwrap();
assert_eq!(pinv.rows, 2);
assert_eq!(pinv.cols, 3);
for i in 0..pinv.rows {
for j in 0..pinv.cols {
assert!(pinv.get(i, j).is_finite());
}
}
}
#[test]
fn test_svd_wide_matrix() {
let data = vec![
1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0,
];
let m = Matrix::new(2, 4, data);
let svd = m.svd();
assert_eq!(svd.sigma.len(), 2);
assert_eq!(svd.u.rows, 2);
assert_eq!(svd.u.cols, 2);
assert_eq!(svd.v_t.rows, 4);
assert_eq!(svd.v_t.cols, 4);
}
#[test]
fn test_svd_tall_matrix() {
let data = vec![
1.0, 2.0,
3.0, 4.0,
5.0, 6.0,
7.0, 8.0,
];
let m = Matrix::new(4, 2, data);
let svd = m.svd();
assert_eq!(svd.sigma.len(), 2);
assert_eq!(svd.u.rows, 4);
assert_eq!(svd.u.cols, 2);
assert_eq!(svd.v_t.rows, 2);
assert_eq!(svd.v_t.cols, 2);
}
#[test]
fn test_svd_solve_with_custom_tolerance() {
let data = vec![
1.0, 2.0,
2.0, 4.0,
3.0, 6.0,
];
let m = Matrix::new(3, 2, data);
let svd = m.svd();
let b = vec![3.0, 6.0, 9.0];
let x_default = m.svd_solve(&svd, &b);
assert!(x_default[0].is_finite());
assert!(x_default[1].is_finite());
}
}