use scirs2_core::ndarray::{Array1, Array2, ArrayView2};
use crate::error::{LinalgError, LinalgResult};
fn frob_norm(m: &Array2<f64>) -> f64 {
m.iter().map(|&v| v * v).sum::<f64>().sqrt()
}
fn frob_inner(a: &Array2<f64>, b: &Array2<f64>) -> f64 {
a.iter().zip(b.iter()).map(|(&x, &y)| x * y).sum::<f64>()
}
fn symmetrise(a: &mut Array2<f64>) {
let n = a.nrows();
for i in 0..n {
for j in (i + 1)..n {
let val = (a[[i, j]] + a[[j, i]]) / 2.0;
a[[i, j]] = val;
a[[j, i]] = val;
}
}
}
fn project_psd(a: &Array2<f64>) -> LinalgResult<Array2<f64>> {
let n = a.nrows();
let (eigenvalues, eigenvectors) = crate::eigen::eigh(&a.view(), None)?;
let mut result = Array2::<f64>::zeros((n, n));
for k in 0..n {
let lam = eigenvalues[k].max(0.0);
if lam == 0.0 {
continue;
}
for i in 0..n {
for j in 0..n {
result[[i, j]] += lam * eigenvectors[[i, k]] * eigenvectors[[j, k]];
}
}
}
Ok(result)
}
#[derive(Debug, Clone)]
pub struct NearestPdResult {
pub matrix: Array2<f64>,
pub distance: f64,
pub iterations: usize,
pub converged: bool,
}
pub fn nearest_positive_definite(
a: &ArrayView2<f64>,
max_iter: Option<usize>,
tol: Option<f64>,
epsilon: Option<f64>,
) -> LinalgResult<NearestPdResult> {
let n = a.nrows();
if n != a.ncols() {
return Err(LinalgError::ShapeError(format!(
"nearest_positive_definite: matrix must be square, got {}×{}",
n,
a.ncols()
)));
}
if n == 0 {
return Err(LinalgError::ShapeError(
"nearest_positive_definite: empty matrix".to_string(),
));
}
let max_iter = max_iter.unwrap_or(200);
let tol = tol.unwrap_or(1e-12);
let eps = epsilon.unwrap_or(1e-8);
let a_owned = a.to_owned();
let mut y = a_owned.clone();
let mut ds = Array2::<f64>::zeros((n, n)); let mut converged = false;
let mut iterations = 0_usize;
for _iter in 0..max_iter {
iterations = _iter + 1;
let r = &y - &ds; let mut x = project_psd(&r)?;
for i in 0..n {
for j in 0..n {
ds[[i, j]] = x[[i, j]] - r[[i, j]];
}
}
symmetrise(&mut x);
let change_norm = {
let mut s = 0.0_f64;
for i in 0..n {
for j in 0..n {
let d = x[[i, j]] - y[[i, j]];
s += d * d;
}
}
s.sqrt()
};
y = x;
if change_norm < tol * (frob_norm(&y) + 1.0) {
converged = true;
break;
}
}
if eps > 0.0 {
for i in 0..n {
y[[i, i]] += eps;
}
}
symmetrise(&mut y);
let distance = {
let mut s = 0.0_f64;
for i in 0..n {
for j in 0..n {
let d = a[[i, j]] - y[[i, j]];
s += d * d;
}
}
s.sqrt()
};
Ok(NearestPdResult {
matrix: y,
distance,
iterations,
converged,
})
}
#[derive(Debug, Clone)]
pub struct NearestOrthogonalResult {
pub matrix: Array2<f64>,
pub distance: f64,
}
pub fn nearest_orthogonal(a: &ArrayView2<f64>) -> LinalgResult<NearestOrthogonalResult> {
let m = a.nrows();
let n = a.ncols();
if m < n {
return Err(LinalgError::ShapeError(format!(
"nearest_orthogonal: matrix must have m >= n, got {}×{}",
m, n
)));
}
if m == 0 || n == 0 {
return Err(LinalgError::ShapeError(
"nearest_orthogonal: empty matrix".to_string(),
));
}
let (u, _s, vt) = crate::decomposition::svd(a, false, None)?;
let q = u.dot(&vt);
let distance = {
let mut s = 0.0_f64;
for i in 0..m {
for j in 0..n {
let d = a[[i, j]] - q[[i, j]];
s += d * d;
}
}
s.sqrt()
};
Ok(NearestOrthogonalResult { matrix: q, distance })
}
pub fn nearest_symmetric(a: &ArrayView2<f64>) -> LinalgResult<(Array2<f64>, f64)> {
let n = a.nrows();
if n != a.ncols() {
return Err(LinalgError::ShapeError(format!(
"nearest_symmetric: matrix must be square, got {}×{}",
n,
a.ncols()
)));
}
let mut s = a.to_owned();
symmetrise(&mut s);
let distance = {
let mut acc = 0.0_f64;
for i in 0..n {
for j in 0..n {
let d = a[[i, j]] - s[[i, j]];
acc += d * d;
}
}
acc.sqrt()
};
Ok((s, distance))
}
#[derive(Debug, Clone)]
pub struct NearestCorrelationResult {
pub matrix: Array2<f64>,
pub distance: f64,
pub iterations: usize,
pub converged: bool,
}
pub fn nearest_correlation(
a: &ArrayView2<f64>,
max_iter: Option<usize>,
tol: Option<f64>,
) -> LinalgResult<NearestCorrelationResult> {
let n = a.nrows();
if n != a.ncols() {
return Err(LinalgError::ShapeError(format!(
"nearest_correlation: matrix must be square, got {}×{}",
n,
a.ncols()
)));
}
if n == 0 {
return Err(LinalgError::ShapeError(
"nearest_correlation: empty matrix".to_string(),
));
}
let max_iter = max_iter.unwrap_or(1000);
let tol = tol.unwrap_or(1e-12);
let mut x = {
let mut tmp = a.to_owned();
symmetrise(&mut tmp);
tmp
};
let mut ds = Array2::<f64>::zeros((n, n)); let mut converged = false;
let mut iterations = 0_usize;
for _iter in 0..max_iter {
iterations = _iter + 1;
let r: Array2<f64> = &x - &ds;
let y = project_psd(&r)?;
for i in 0..n {
for j in 0..n {
ds[[i, j]] = y[[i, j]] - r[[i, j]];
}
}
let mut x_new = y.clone();
for i in 0..n {
x_new[[i, i]] = 1.0;
}
let change = {
let mut s = 0.0_f64;
for i in 0..n {
for j in 0..n {
let d = x_new[[i, j]] - x[[i, j]];
s += d * d;
}
}
s.sqrt()
};
x = x_new;
if change < tol {
converged = true;
break;
}
}
symmetrise(&mut x);
for i in 0..n {
x[[i, i]] = 1.0;
}
let distance = {
let mut s = 0.0_f64;
for i in 0..n {
for j in 0..n {
let d = a[[i, j]] - x[[i, j]];
s += d * d;
}
}
s.sqrt()
};
Ok(NearestCorrelationResult {
matrix: x,
distance,
iterations,
converged,
})
}
#[derive(Debug, Clone)]
pub struct NearestDoublyStochasticResult {
pub matrix: Array2<f64>,
pub distance: f64,
pub iterations: usize,
pub converged: bool,
}
pub fn nearest_doubly_stochastic(
a: &ArrayView2<f64>,
max_iter: Option<usize>,
tol: Option<f64>,
) -> LinalgResult<NearestDoublyStochasticResult> {
let n = a.nrows();
if n != a.ncols() {
return Err(LinalgError::ShapeError(format!(
"nearest_doubly_stochastic: matrix must be square, got {}×{}",
n,
a.ncols()
)));
}
if n == 0 {
return Err(LinalgError::ShapeError(
"nearest_doubly_stochastic: empty matrix".to_string(),
));
}
let max_iter = max_iter.unwrap_or(10_000);
let tol = tol.unwrap_or(1e-10);
let mut m: Array2<f64> = a.mapv(|v| v.max(0.0));
let mut converged = false;
let mut iterations = 0_usize;
for _iter in 0..max_iter {
iterations = _iter + 1;
for i in 0..n {
let row_sum: f64 = (0..n).map(|j| m[[i, j]]).sum();
if row_sum < 1e-300 {
return Err(LinalgError::ComputationError(format!(
"nearest_doubly_stochastic: row {} became zero during iteration",
i
)));
}
for j in 0..n {
m[[i, j]] /= row_sum;
}
}
for j in 0..n {
let col_sum: f64 = (0..n).map(|i| m[[i, j]]).sum();
if col_sum < 1e-300 {
return Err(LinalgError::ComputationError(format!(
"nearest_doubly_stochastic: column {} became zero during iteration",
j
)));
}
for i in 0..n {
m[[i, j]] /= col_sum;
}
}
if _iter % 50 == 0 {
let mut max_dev = 0.0_f64;
for i in 0..n {
let rs: f64 = (0..n).map(|j| m[[i, j]]).sum::<f64>() - 1.0;
max_dev = max_dev.max(rs.abs());
let cs: f64 = (0..n).map(|j| m[[j, i]]).sum::<f64>() - 1.0;
max_dev = max_dev.max(cs.abs());
}
if max_dev < tol {
converged = true;
break;
}
}
}
if !converged {
let mut max_dev = 0.0_f64;
for i in 0..n {
let rs: f64 = (0..n).map(|j| m[[i, j]]).sum::<f64>() - 1.0;
max_dev = max_dev.max(rs.abs());
}
if max_dev < tol {
converged = true;
}
}
let distance = {
let mut s = 0.0_f64;
for i in 0..n {
for j in 0..n {
let d = a[[i, j]] - m[[i, j]];
s += d * d;
}
}
s.sqrt()
};
Ok(NearestDoublyStochasticResult {
matrix: m,
distance,
iterations,
converged,
})
}
#[derive(Debug, Clone)]
pub struct NearestLowRankResult {
pub matrix: Array2<f64>,
pub distance: f64,
pub u: Array2<f64>,
pub singular_values: Array1<f64>,
pub vt: Array2<f64>,
}
pub fn nearest_low_rank(a: &ArrayView2<f64>, k: usize) -> LinalgResult<NearestLowRankResult> {
let m = a.nrows();
let n = a.ncols();
if m == 0 || n == 0 {
return Err(LinalgError::ShapeError(
"nearest_low_rank: empty matrix".to_string(),
));
}
let r = m.min(n);
if k == 0 || k > r {
return Err(LinalgError::ValueError(format!(
"nearest_low_rank: k must be in 1..={}, got {}",
r, k
)));
}
let (u_full, s_full, vt_full) = crate::decomposition::svd(a, false, None)?;
let u_k = u_full.slice(scirs2_core::ndarray::s![.., ..k]).to_owned();
let s_k = s_full.slice(scirs2_core::ndarray::s![..k]).to_owned();
let vt_k = vt_full.slice(scirs2_core::ndarray::s![..k, ..]).to_owned();
let mut a_k = Array2::<f64>::zeros((m, n));
for t in 0..k {
let sv = s_k[t];
for i in 0..m {
for j in 0..n {
a_k[[i, j]] += sv * u_k[[i, t]] * vt_k[[t, j]];
}
}
}
let distance = {
let mut acc = 0.0_f64;
for t in k..r {
acc += s_full[t] * s_full[t];
}
acc.sqrt()
};
Ok(NearestLowRankResult {
matrix: a_k,
distance,
u: u_k,
singular_values: s_k,
vt: vt_k,
})
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::array;
#[test]
fn test_nearest_symmetric_basic() {
let a = array![[1.0_f64, 2.0], [4.0, 3.0]];
let (s, dist) = nearest_symmetric(&a.view()).expect("nearest_symmetric failed");
assert!((s[[0, 1]] - 3.0).abs() < 1e-14, "s[0,1] = {}", s[[0, 1]]);
assert!((s[[1, 0]] - 3.0).abs() < 1e-14, "s[1,0] = {}", s[[1, 0]]);
assert!((dist - std::f64::consts::SQRT_2).abs() < 1e-12, "dist = {}", dist);
}
#[test]
fn test_nearest_symmetric_already_symmetric() {
let a = array![[1.0_f64, 0.5], [0.5, 2.0]];
let (s, dist) = nearest_symmetric(&a.view()).expect("failed");
assert!(dist < 1e-15, "dist = {}", dist);
for i in 0..2 {
for j in 0..2 {
assert!((s[[i, j]] - a[[i, j]]).abs() < 1e-14);
}
}
}
#[test]
fn test_nearest_pd_already_spd() {
let a = array![[1.0_f64, 0.0], [0.0, 1.0]];
let res = nearest_positive_definite(&a.view(), None, None, Some(0.0))
.expect("nearest_pd failed");
assert!(
res.distance < 1e-6,
"Expected small distance for identity, got {}",
res.distance
);
}
#[test]
fn test_nearest_pd_indefinite() {
let a = array![[1.0_f64, 2.0], [2.0, 1.0]]; let res = nearest_positive_definite(&a.view(), None, None, Some(1e-6))
.expect("nearest_pd failed");
let (vals, _) = crate::eigen::eigh(&res.matrix.view(), None)
.expect("eigh failed");
assert!(
vals[0] > 0.0,
"Smallest eigenvalue must be positive, got {}",
vals[0]
);
}
#[test]
fn test_nearest_orthogonal_basic() {
let a = array![[2.0_f64, 0.5], [0.5, 1.5]];
let res = nearest_orthogonal(&a.view()).expect("nearest_orthogonal failed");
let qt_q = res.matrix.t().dot(&res.matrix);
assert!((qt_q[[0, 0]] - 1.0).abs() < 1e-10, "qt_q[0,0] = {}", qt_q[[0, 0]]);
assert!((qt_q[[1, 1]] - 1.0).abs() < 1e-10, "qt_q[1,1] = {}", qt_q[[1, 1]]);
assert!(qt_q[[0, 1]].abs() < 1e-10, "qt_q[0,1] = {}", qt_q[[0, 1]]);
}
#[test]
fn test_nearest_orthogonal_already_orthogonal() {
let theta = std::f64::consts::PI / 4.0;
let a = array![[theta.cos(), -theta.sin()],
[theta.sin(), theta.cos()]];
let res = nearest_orthogonal(&a.view()).expect("failed");
assert!(
res.distance < 1e-12,
"distance = {}; rotation matrix should map to itself",
res.distance
);
}
#[test]
fn test_nearest_correlation_unit_diagonal() {
let a = array![[1.02_f64, 0.8], [0.8, 0.99]];
let res = nearest_correlation(&a.view(), None, None).expect("failed");
assert!(
(res.matrix[[0, 0]] - 1.0).abs() < 1e-6,
"diag[0] = {}",
res.matrix[[0, 0]]
);
assert!(
(res.matrix[[1, 1]] - 1.0).abs() < 1e-6,
"diag[1] = {}",
res.matrix[[1, 1]]
);
}
#[test]
fn test_nearest_correlation_psd() {
let a = array![[1.0_f64, 2.0], [2.0, 1.0]]; let res = nearest_correlation(&a.view(), None, None).expect("failed");
let (vals, _) = crate::eigen::eigh(&res.matrix.view(), None).expect("eigh failed");
assert!(vals[0] >= -1e-8, "smallest eigenvalue = {}", vals[0]);
}
#[test]
fn test_nearest_doubly_stochastic_row_col_sums() {
let a = array![[3.0_f64, 1.0], [1.0, 2.0]];
let res = nearest_doubly_stochastic(&a.view(), None, None).expect("failed");
let m = &res.matrix;
let tol = 1e-7;
for i in 0..2 {
let rs: f64 = (0..2).map(|j| m[[i, j]]).sum();
assert!((rs - 1.0).abs() < tol, "row {} sum = {}", i, rs);
let cs: f64 = (0..2).map(|j| m[[j, i]]).sum();
assert!((cs - 1.0).abs() < tol, "col {} sum = {}", i, cs);
}
}
#[test]
fn test_nearest_low_rank_rank1() {
let a = array![[1.0_f64, 2.0, 3.0],
[2.0, 4.0, 6.0],
[3.0, 6.0, 9.0]];
let res = nearest_low_rank(&a.view(), 1).expect("failed");
assert!(
res.distance < 1e-8,
"rank-1 distance should be ~0, got {}",
res.distance
);
for i in 0..3 {
for j in 0..3 {
assert!(
(res.matrix[[i, j]] - a[[i, j]]).abs() < 1e-8,
"mismatch at ({},{})",
i,
j
);
}
}
}
#[test]
fn test_nearest_low_rank_distance() {
let a = array![[3.0_f64, 0.0], [0.0, 1.0]];
let res = nearest_low_rank(&a.view(), 1).expect("failed");
assert!(
(res.distance - 1.0).abs() < 1e-10,
"distance = {}",
res.distance
);
}
}