use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2, ScalarOperand};
use scirs2_core::numeric::{Float, NumAssign};
use scirs2_core::random::prelude::*;
use std::iter::Sum;
use crate::error::{LinalgError, LinalgResult};
use crate::norm::vector_norm;
use crate::solve::solve;
#[derive(Debug, Clone)]
pub struct LanczosResult<F> {
pub eigenvalues: Array1<F>,
pub eigenvectors: Array2<F>,
}
#[derive(Debug, Clone)]
pub struct ArnoldiResult<F> {
pub h: Array2<F>,
pub q: Array2<F>,
pub steps: usize,
}
pub fn power_iteration_dense<F>(
a: &ArrayView2<F>,
n_iter: usize,
tol: F,
) -> LinalgResult<(F, Array1<F>)>
where
F: Float + NumAssign + Sum + ScalarOperand + Send + Sync + 'static,
{
let (nrows, ncols) = (a.nrows(), a.ncols());
if nrows != ncols {
return Err(LinalgError::ShapeError(format!(
"power_iteration_dense: matrix must be square, got ({nrows}, {ncols})"
)));
}
if nrows == 0 {
return Err(LinalgError::ShapeError(
"power_iteration_dense: matrix is empty".to_string(),
));
}
let n = nrows;
let mut rng = scirs2_core::random::rng();
let mut b = Array1::<F>::zeros(n);
for bi in b.iter_mut() {
*bi = F::from(rng.random_range(-1.0..=1.0)).unwrap_or(F::zero());
}
let norm = vector_norm(&b.view(), 2)?;
if norm > F::epsilon() {
b.mapv_inplace(|x| x / norm);
}
let mut eigenvalue = F::zero();
let mut prev_eigenvalue = F::one() + tol + tol;
for _ in 0..n_iter {
let mut b_new = Array1::<F>::zeros(n);
for i in 0..n {
let mut s = F::zero();
for j in 0..n {
s += a[[i, j]] * b[j];
}
b_new[i] = s;
}
eigenvalue = b
.iter()
.zip(b_new.iter())
.fold(F::zero(), |acc, (&bi, &abi)| acc + bi * abi);
let norm_new = vector_norm(&b_new.view(), 2)?;
if norm_new > F::epsilon() {
b_new.mapv_inplace(|x| x / norm_new);
}
b = b_new;
if (eigenvalue - prev_eigenvalue).abs() < tol {
break;
}
prev_eigenvalue = eigenvalue;
}
Ok((eigenvalue, b))
}
pub fn inverse_power_iteration<F>(
a: &ArrayView2<F>,
shift: F,
n_iter: usize,
tol: F,
) -> LinalgResult<(F, Array1<F>)>
where
F: Float + NumAssign + Sum + ScalarOperand + Send + Sync + 'static,
{
let (nrows, ncols) = (a.nrows(), a.ncols());
if nrows != ncols {
return Err(LinalgError::ShapeError(format!(
"inverse_power_iteration: matrix must be square, got ({nrows}, {ncols})"
)));
}
if nrows == 0 {
return Err(LinalgError::ShapeError(
"inverse_power_iteration: matrix is empty".to_string(),
));
}
let n = nrows;
let mut shifted = a.to_owned();
for i in 0..n {
shifted[[i, i]] -= shift;
}
let mut rng = scirs2_core::random::rng();
let mut x = Array1::<F>::zeros(n);
for xi in x.iter_mut() {
*xi = F::from(rng.random_range(-1.0..=1.0)).unwrap_or(F::zero());
}
let norm = vector_norm(&x.view(), 2)?;
if norm > F::epsilon() {
x.mapv_inplace(|x| x / norm);
}
let mut eigenvalue = F::zero();
let mut prev_eigenvalue = F::one() + tol + tol;
for _ in 0..n_iter {
let y = solve(&shifted.view(), &x.view(), None).map_err(|e| {
LinalgError::SingularMatrixError(format!(
"inverse_power_iteration: (A − σI) is singular or nearly singular: {e}"
))
})?;
let ax = mat_vec_mul(a, &x.view());
eigenvalue = x
.iter()
.zip(ax.iter())
.fold(F::zero(), |acc, (&xi, &axi)| acc + xi * axi);
let norm_y = vector_norm(&y.view(), 2)?;
if norm_y > F::epsilon() {
x = y.mapv(|v| v / norm_y);
} else {
break;
}
if (eigenvalue - prev_eigenvalue).abs() < tol {
break;
}
prev_eigenvalue = eigenvalue;
}
Ok((eigenvalue, x))
}
pub fn rayleigh_quotient_iteration<F>(
a: &ArrayView2<F>,
x0: &ArrayView1<F>,
n_iter: usize,
tol: F,
) -> LinalgResult<(F, Array1<F>)>
where
F: Float + NumAssign + Sum + ScalarOperand + Send + Sync + 'static,
{
let n = a.nrows();
if a.nrows() != a.ncols() {
return Err(LinalgError::ShapeError(format!(
"rayleigh_quotient_iteration: matrix must be square, got ({}, {})",
a.nrows(),
a.ncols()
)));
}
if x0.len() != n {
return Err(LinalgError::ShapeError(format!(
"rayleigh_quotient_iteration: x0 length {} != matrix size {n}",
x0.len()
)));
}
let mut x = x0.to_owned();
let norm = vector_norm(&x.view(), 2)?;
if norm < F::epsilon() {
return Err(LinalgError::InvalidInput(
"rayleigh_quotient_iteration: initial vector x0 is zero".to_string(),
));
}
x.mapv_inplace(|v| v / norm);
let mut sigma = rayleigh_quotient_scalar(a, &x.view());
let mut prev_sigma = sigma - tol - tol;
for _ in 0..n_iter {
if (sigma - prev_sigma).abs() < tol {
break;
}
prev_sigma = sigma;
let mut shifted = a.to_owned();
for i in 0..n {
shifted[[i, i]] -= sigma;
}
let y = match solve(&shifted.view(), &x.view(), None) {
Ok(y) => y,
Err(_) => {
break;
}
};
let norm_y = vector_norm(&y.view(), 2)?;
if norm_y < F::epsilon() {
break;
}
x = y.mapv(|v| v / norm_y);
sigma = rayleigh_quotient_scalar(a, &x.view());
}
Ok((sigma, x))
}
#[inline]
fn rayleigh_quotient_scalar<F>(a: &ArrayView2<F>, x: &ArrayView1<F>) -> F
where
F: Float + NumAssign + Sum + 'static,
{
let ax = mat_vec_mul(a, x);
x.iter()
.zip(ax.iter())
.fold(F::zero(), |acc, (&xi, &axi)| acc + xi * axi)
}
pub fn lanczos<F>(a: &ArrayView2<F>, k: usize, tol: F) -> LinalgResult<LanczosResult<F>>
where
F: Float + NumAssign + Sum + ScalarOperand + Send + Sync + 'static,
{
let (nrows, ncols) = (a.nrows(), a.ncols());
if nrows != ncols {
return Err(LinalgError::ShapeError(format!(
"lanczos: matrix must be square, got ({nrows}, {ncols})"
)));
}
let n = nrows;
if k == 0 || k >= n {
return Err(LinalgError::ShapeError(format!(
"lanczos: k={k} must satisfy 0 < k < n={n}"
)));
}
let m = n.min(k * 3 + 10).min(n);
let mut q_vecs: Vec<Array1<F>> = Vec::with_capacity(m + 1);
let mut rng = scirs2_core::random::rng();
let mut q0 = Array1::<F>::zeros(n);
for qi in q0.iter_mut() {
*qi = F::from(rng.random_range(-1.0..=1.0)).unwrap_or(F::zero());
}
let norm0 = vector_norm(&q0.view(), 2)?;
if norm0 < F::epsilon() {
q0[0] = F::one();
} else {
q0.mapv_inplace(|x| x / norm0);
}
q_vecs.push(q0);
let mut alphas: Vec<F> = Vec::with_capacity(m);
let mut betas: Vec<F> = Vec::with_capacity(m);
let mut actual_m = 0usize;
for j in 0..m {
let v = mat_vec_mul(a, &q_vecs[j].view());
let alpha_j = q_vecs[j]
.iter()
.zip(v.iter())
.fold(F::zero(), |acc, (&qi, &vi)| acc + qi * vi);
alphas.push(alpha_j);
let mut w = v;
for i in 0..n {
w[i] -= alpha_j * q_vecs[j][i];
}
if j > 0 {
let beta_prev = betas[j - 1];
for i in 0..n {
w[i] -= beta_prev * q_vecs[j - 1][i];
}
}
for qv in q_vecs.iter().take(j + 1) {
let proj = qv
.iter()
.zip(w.iter())
.fold(F::zero(), |acc, (&qi, &wi)| acc + qi * wi);
for i in 0..n {
w[i] -= proj * qv[i];
}
}
let beta_j = vector_norm(&w.view(), 2)?;
actual_m = j + 1;
if beta_j < tol || j + 1 == m {
betas.push(beta_j);
break;
}
betas.push(beta_j);
let q_next = w.mapv(|wi| wi / beta_j);
q_vecs.push(q_next);
}
let (tri_eigs, tri_evecs) =
tridiagonal_symmetric_eig(&alphas[..actual_m], &betas[..actual_m.saturating_sub(1)])?;
let m_used = actual_m;
let m_evecs = tri_evecs.ncols();
let take_k = k.min(m_evecs);
let mut idx: Vec<usize> = (0..m_evecs).collect();
idx.sort_by(|&a, &b| {
tri_eigs[b]
.abs()
.partial_cmp(&tri_eigs[a].abs())
.unwrap_or(std::cmp::Ordering::Equal)
});
idx.truncate(take_k);
let mut eigenvalues = Array1::<F>::zeros(take_k);
let mut eigenvectors = Array2::<F>::zeros((n, take_k));
for (new_col, &old_col) in idx.iter().enumerate() {
eigenvalues[new_col] = tri_eigs[old_col];
let mut vec_col = Array1::<F>::zeros(n);
for j in 0..m_used.min(q_vecs.len()) {
let coeff = tri_evecs[[j, old_col]];
for i in 0..n {
vec_col[i] += coeff * q_vecs[j][i];
}
}
let norm_col = vector_norm(&vec_col.view(), 2)?;
if norm_col > F::epsilon() {
vec_col.mapv_inplace(|x| x / norm_col);
}
eigenvectors.column_mut(new_col).assign(&vec_col);
}
Ok(LanczosResult {
eigenvalues,
eigenvectors,
})
}
fn tridiagonal_symmetric_eig<F>(alpha: &[F], beta: &[F]) -> LinalgResult<(Vec<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + ScalarOperand + Send + Sync + 'static,
{
let n = alpha.len();
if n == 0 {
return Ok((vec![], Array2::<F>::zeros((0, 0))));
}
if n == 1 {
return Ok((vec![alpha[0]], Array2::<F>::eye(1)));
}
let mut t = Array2::<F>::zeros((n, n));
for i in 0..n {
t[[i, i]] = alpha[i];
}
for i in 0..beta.len() {
if i + 1 < n {
t[[i, i + 1]] = beta[i];
t[[i + 1, i]] = beta[i];
}
}
let (eigs, evecs) = crate::eigen::standard::eigh(&t.view(), None)?;
Ok((eigs.to_vec(), evecs))
}
pub fn arnoldi<F>(a: &ArrayView2<F>, k: usize) -> LinalgResult<ArnoldiResult<F>>
where
F: Float + NumAssign + Sum + ScalarOperand + Send + Sync + 'static,
{
let (nrows, ncols) = (a.nrows(), a.ncols());
if nrows != ncols {
return Err(LinalgError::ShapeError(format!(
"arnoldi: matrix must be square, got ({nrows}, {ncols})"
)));
}
let n = nrows;
if k == 0 {
return Err(LinalgError::ShapeError(
"arnoldi: k must be >= 1".to_string(),
));
}
if k >= n {
return Err(LinalgError::ShapeError(format!(
"arnoldi: k={k} must be < n={n}"
)));
}
let mut h = Array2::<F>::zeros((k + 1, k));
let mut q = Array2::<F>::zeros((n, k + 1));
let mut rng = scirs2_core::random::rng();
let mut q0 = Array1::<F>::zeros(n);
for qi in q0.iter_mut() {
*qi = F::from(rng.random_range(-1.0..=1.0)).unwrap_or(F::zero());
}
let norm0 = vector_norm(&q0.view(), 2)?;
if norm0 < F::epsilon() {
q0[0] = F::one();
} else {
q0.mapv_inplace(|x| x / norm0);
}
q.column_mut(0).assign(&q0);
let mut steps = 0usize;
for j in 0..k {
let q_j = q.column(j).to_owned();
let mut w = mat_vec_mul(a, &q_j.view());
for i in 0..=j {
let qi = q.column(i).to_owned();
let h_ij = qi
.iter()
.zip(w.iter())
.fold(F::zero(), |acc, (&qi_v, &wi)| acc + qi_v * wi);
h[[i, j]] = h_ij;
for l in 0..n {
w[l] -= h_ij * qi[l];
}
}
let beta = vector_norm(&w.view(), 2)?;
h[[j + 1, j]] = beta;
steps = j + 1;
if beta < F::epsilon() {
break;
}
if j < k {
let q_next = w.mapv(|wi| wi / beta);
q.column_mut(j + 1).assign(&q_next);
}
}
Ok(ArnoldiResult { h, q, steps })
}
pub fn jacobi_eigenvalue<F>(
a: &ArrayView2<F>,
tol: F,
max_iter: usize,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + ScalarOperand + Send + Sync + 'static,
{
let (nrows, ncols) = (a.nrows(), a.ncols());
if nrows != ncols {
return Err(LinalgError::ShapeError(format!(
"jacobi_eigenvalue: matrix must be square, got ({nrows}, {ncols})"
)));
}
let n = nrows;
if n == 0 {
return Ok((Array1::<F>::zeros(0), Array2::<F>::zeros((0, 0))));
}
if n == 1 {
return Ok((Array1::from_elem(1, a[[0, 0]]), Array2::eye(1)));
}
let mut s = a.to_owned();
let mut v = Array2::<F>::eye(n);
let two = F::from(2.0).unwrap_or(F::one());
let half = F::from(0.5).unwrap_or(F::one());
'outer: for _sweep in 0..max_iter {
let mut off_norm_sq = F::zero();
for i in 0..n {
for j in (i + 1)..n {
off_norm_sq += s[[i, j]] * s[[i, j]] * two;
}
}
if off_norm_sq.sqrt() < tol {
break 'outer;
}
for p in 0..n {
for q in (p + 1)..n {
let s_pq = s[[p, q]];
if s_pq.abs() < F::epsilon() {
continue;
}
let theta = {
let diff = s[[q, q]] - s[[p, p]];
if diff.abs() < F::epsilon() {
F::from(std::f64::consts::FRAC_PI_4).unwrap_or(half)
} else {
let ratio = two * s_pq / diff;
half * ratio.atan()
}
};
let c = theta.cos();
let s_rot = theta.sin();
for r in 0..n {
if r == p || r == q {
continue;
}
let srp = s[[r, p]];
let srq = s[[r, q]];
s[[r, p]] = c * srp - s_rot * srq;
s[[p, r]] = s[[r, p]];
s[[r, q]] = s_rot * srp + c * srq;
s[[q, r]] = s[[r, q]];
}
let spp = s[[p, p]];
let sqq = s[[q, q]];
let spq = s[[p, q]];
s[[p, p]] = c * c * spp - two * c * s_rot * spq + s_rot * s_rot * sqq;
s[[q, q]] = s_rot * s_rot * spp + two * c * s_rot * spq + c * c * sqq;
s[[p, q]] = F::zero();
s[[q, p]] = F::zero();
for r in 0..n {
let vrp = v[[r, p]];
let vrq = v[[r, q]];
v[[r, p]] = c * vrp - s_rot * vrq;
v[[r, q]] = s_rot * vrp + c * vrq;
}
}
}
}
let mut eigenvalues: Vec<F> = (0..n).map(|i| s[[i, i]]).collect();
let mut idx: Vec<usize> = (0..n).collect();
idx.sort_by(|&a, &b| {
eigenvalues[a]
.partial_cmp(&eigenvalues[b])
.unwrap_or(std::cmp::Ordering::Equal)
});
let sorted_eigenvalues = Array1::from_iter(idx.iter().map(|&i| eigenvalues[i]));
let mut sorted_eigenvectors = Array2::<F>::zeros((n, n));
for (new_col, &old_col) in idx.iter().enumerate() {
sorted_eigenvectors
.column_mut(new_col)
.assign(&v.column(old_col));
}
eigenvalues.clear();
Ok((sorted_eigenvalues, sorted_eigenvectors))
}
#[inline]
fn mat_vec_mul<F>(a: &ArrayView2<F>, x: &ArrayView1<F>) -> Array1<F>
where
F: Float + NumAssign + Sum + 'static,
{
let m = a.nrows();
let mut y = Array1::<F>::zeros(m);
for i in 0..m {
let mut s = F::zero();
for j in 0..a.ncols() {
s += a[[i, j]] * x[j];
}
y[i] = s;
}
y
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use scirs2_core::ndarray::array;
#[test]
fn test_power_iteration_dense_diagonal() {
let a = array![[5.0_f64, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 1.0]];
let (lam, v) = power_iteration_dense(&a.view(), 500, 1e-12).expect("converged");
assert_relative_eq!(lam, 5.0, epsilon = 1e-8);
let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
assert_relative_eq!(norm, 1.0, epsilon = 1e-10);
}
#[test]
fn test_power_iteration_dense_2x2() {
let a = array![[4.0_f64, 1.0], [2.0, 3.0]];
let (lam, v) = power_iteration_dense(&a.view(), 300, 1e-12).expect("converged");
assert_relative_eq!(lam, 5.0, epsilon = 1e-8);
let av = a.dot(&v);
for i in 0..2 {
assert_relative_eq!(av[i], lam * v[i], epsilon = 1e-7);
}
}
#[test]
fn test_power_iteration_dense_non_square_err() {
let a = array![[1.0_f64, 2.0, 3.0], [4.0, 5.0, 6.0]];
assert!(power_iteration_dense(&a.view(), 100, 1e-10).is_err());
}
#[test]
fn test_power_iteration_dense_symmetric() {
let a = array![[3.0_f64, 1.0], [1.0, 3.0]];
let (lam, v) = power_iteration_dense(&a.view(), 500, 1e-12).expect("converged");
assert_relative_eq!(lam, 4.0, epsilon = 1e-6);
let av = a.dot(&v);
for i in 0..2 {
assert_relative_eq!(av[i], lam * v[i], epsilon = 1e-5);
}
}
#[test]
fn test_inverse_power_iteration_smallest() {
let a = array![[3.0_f64, 1.0], [1.0, 3.0]];
let (lam, v) = inverse_power_iteration(&a.view(), 1.8, 300, 1e-12).expect("converged");
assert_relative_eq!(lam, 2.0, epsilon = 1e-7);
let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
assert_relative_eq!(norm, 1.0, epsilon = 1e-10);
}
#[test]
fn test_inverse_power_iteration_dominant() {
let a = array![[3.0_f64, 1.0], [1.0, 3.0]];
let (lam, _) = inverse_power_iteration(&a.view(), 3.9, 300, 1e-12).expect("converged");
assert_relative_eq!(lam, 4.0, epsilon = 1e-7);
}
#[test]
fn test_inverse_power_iteration_3x3() {
let a = array![[2.0_f64, -1.0, 0.0], [-1.0, 2.0, -1.0], [0.0, -1.0, 2.0]];
let (lam, _) = inverse_power_iteration(&a.view(), 0.5, 500, 1e-11).expect("converged");
let expected = 2.0_f64 - std::f64::consts::SQRT_2;
assert_relative_eq!(lam, expected, epsilon = 1e-8);
}
#[test]
fn test_inverse_power_iteration_non_square_err() {
let a = array![[1.0_f64, 2.0, 3.0], [4.0, 5.0, 6.0]];
assert!(inverse_power_iteration(&a.view(), 1.0, 100, 1e-10).is_err());
}
#[test]
fn test_rayleigh_quotient_iteration_symmetric_2x2() {
let a = array![[2.0_f64, 1.0], [1.0, 3.0]];
let x0 = array![0.0_f64, 1.0];
let (lam, v) =
rayleigh_quotient_iteration(&a.view(), &x0.view(), 50, 1e-12).expect("converged");
let eig1 = (5.0_f64 + 5.0_f64.sqrt()) / 2.0;
let eig2 = (5.0_f64 - 5.0_f64.sqrt()) / 2.0;
let close = (lam - eig1).abs() < 1e-8 || (lam - eig2).abs() < 1e-8;
assert!(close, "eigenvalue {lam} not near {eig1} or {eig2}");
let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
assert_relative_eq!(norm, 1.0, epsilon = 1e-10);
}
#[test]
fn test_rayleigh_quotient_iteration_diagonal() {
let a = array![[3.0_f64, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 5.0]];
let x0 = array![0.01_f64, 0.01, 1.0];
let (lam, _) =
rayleigh_quotient_iteration(&a.view(), &x0.view(), 100, 1e-12).expect("converged");
assert_relative_eq!(lam, 5.0, epsilon = 1e-8);
}
#[test]
fn test_rayleigh_quotient_iteration_wrong_dim_err() {
let a = array![[1.0_f64, 0.0], [0.0, 2.0]];
let x0 = array![1.0_f64, 0.0, 0.0]; assert!(rayleigh_quotient_iteration(&a.view(), &x0.view(), 50, 1e-10).is_err());
}
#[test]
fn test_rayleigh_quotient_residual_check() {
let a = array![[5.0_f64, 2.0, 0.0], [2.0, 4.0, 1.0], [0.0, 1.0, 3.0]];
let x0 = array![1.0_f64, 0.5, 0.2];
let (lam, v) =
rayleigh_quotient_iteration(&a.view(), &x0.view(), 100, 1e-12).expect("converged");
let av = a.dot(&v);
let lam_v = v.mapv(|x| x * lam);
for i in 0..3 {
assert_relative_eq!(av[i], lam_v[i], epsilon = 1e-7);
}
}
#[test]
fn test_lanczos_3x3() {
let a = array![[4.0_f64, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 2.0]];
let res = lanczos(&a.view(), 2, 1e-12).expect("lanczos");
assert_eq!(res.eigenvalues.len(), 2);
assert_eq!(res.eigenvectors.nrows(), 3);
assert_eq!(res.eigenvectors.ncols(), 2);
}
#[test]
fn test_lanczos_largest_eigenvalue() {
let a = array![[4.0_f64, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 2.0]];
let res = lanczos(&a.view(), 1, 1e-12).expect("lanczos");
let max_eig = res.eigenvalues[0];
assert!(
(max_eig - 4.732050807568877).abs() < 1e-4,
"expected ~4.732, got {max_eig}"
);
}
#[test]
fn test_lanczos_eigenvectors_orthonormal() {
let a = array![[4.0_f64, 2.0, 0.0], [2.0, 5.0, 1.0], [0.0, 1.0, 3.0]];
let res = lanczos(&a.view(), 2, 1e-12).expect("lanczos");
let vecs = &res.eigenvectors;
let ncols = vecs.ncols();
for j in 0..ncols {
let v = vecs.column(j);
let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
assert!(
(norm - 1.0).abs() < 1e-8,
"column {j} not unit length, norm={norm}"
);
}
}
#[test]
fn test_lanczos_residual_check() {
let a = array![[4.0_f64, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 2.0]];
let res = lanczos(&a.view(), 2, 1e-12).expect("lanczos");
for j in 0..res.eigenvalues.len() {
let v = res.eigenvectors.column(j).to_owned();
let lam = res.eigenvalues[j];
let av = a.dot(&v);
let lam_v = v.mapv(|x| x * lam);
let diff: f64 = av
.iter()
.zip(lam_v.iter())
.map(|(&a, &b)| (a - b).powi(2))
.sum::<f64>()
.sqrt();
assert!(diff < 1e-5, "residual={diff} for eigenvalue {lam}");
}
}
#[test]
fn test_lanczos_invalid_k_err() {
let a = Array2::<f64>::eye(4);
assert!(lanczos(&a.view(), 0, 1e-10).is_err());
assert!(lanczos(&a.view(), 4, 1e-10).is_err());
}
#[test]
fn test_arnoldi_dimensions() {
let a = array![[1.0_f64, 2.0, 0.0], [0.0, 3.0, 1.0], [1.0, 0.0, 4.0]];
let k = 2;
let res = arnoldi(&a.view(), k).expect("arnoldi");
assert_eq!(res.h.dim(), (k + 1, k));
assert_eq!(res.q.dim(), (3, k + 1));
assert!(res.steps >= 1);
}
#[test]
fn test_arnoldi_orthonormality() {
let a = array![[2.0_f64, 1.0, 0.5], [1.0, 4.0, 0.0], [0.5, 0.0, 3.0]];
let res = arnoldi(&a.view(), 2).expect("arnoldi");
let q = &res.q;
let k_plus1 = res.steps + 1;
for i in 0..k_plus1 {
for j in i..k_plus1 {
let qi = q.column(i);
let qj = q.column(j);
let dot: f64 = qi.iter().zip(qj.iter()).map(|(&x, &y)| x * y).sum();
let expected = if i == j { 1.0 } else { 0.0 };
assert!(
(dot - expected).abs() < 1e-10,
"Q^T Q [{i},{j}] = {dot}, expected {expected}"
);
}
}
}
#[test]
fn test_arnoldi_relation_aq_qh() {
let a = array![[3.0_f64, 1.0, 0.0], [0.5, 2.0, 0.5], [0.0, 0.5, 4.0]];
let k = 2;
let res = arnoldi(&a.view(), k).expect("arnoldi");
let q = &res.q;
let h = &res.h;
let n = 3;
let mut aq = Array2::<f64>::zeros((n, k));
for j in 0..k {
let qj = q.column(j).to_owned();
let aqj = a.dot(&qj);
aq.column_mut(j).assign(&aqj);
}
let mut qh = Array2::<f64>::zeros((n, k));
for col in 0..k {
for row in 0..n {
let mut val = 0.0_f64;
for l in 0..k + 1 {
val += q[[row, l]] * h[[l, col]];
}
qh[[row, col]] = val;
}
}
for i in 0..n {
for j in 0..k {
assert!(
(aq[[i, j]] - qh[[i, j]]).abs() < 1e-10,
"AQ[{i},{j}]={} ≠ QH[{i},{j}]={}",
aq[[i, j]],
qh[[i, j]]
);
}
}
}
#[test]
fn test_arnoldi_hessenberg_structure() {
let a = array![
[2.0_f64, 3.0, 1.0, 0.5],
[0.0, 1.0, 2.0, 0.0],
[1.0, 0.0, 3.0, 1.0],
[0.5, 0.0, 1.0, 4.0]
];
let k = 3;
let res = arnoldi(&a.view(), k).expect("arnoldi");
let h = &res.h;
for i in 0..h.nrows() {
for j in 0..h.ncols() {
if i > j + 1 {
assert!(
h[[i, j]].abs() < 1e-12,
"H[{i},{j}] = {} should be 0 (below subdiagonal)",
h[[i, j]]
);
}
}
}
}
#[test]
fn test_arnoldi_invalid_args_err() {
let a = array![[1.0_f64, 0.0], [0.0, 2.0]];
assert!(arnoldi(&a.view(), 0).is_err()); assert!(arnoldi(&a.view(), 2).is_err()); let b = array![[1.0_f64, 2.0, 3.0], [4.0, 5.0, 6.0]];
assert!(arnoldi(&b.view(), 1).is_err()); }
#[test]
fn test_jacobi_2x2() {
let a = array![[2.0_f64, 1.0], [1.0, 3.0]];
let (eigs, vecs) = jacobi_eigenvalue(&a.view(), 1e-13, 500).expect("jacobi");
assert_eq!(eigs.len(), 2);
let e1 = (5.0_f64 - 5.0_f64.sqrt()) / 2.0;
let e2 = (5.0_f64 + 5.0_f64.sqrt()) / 2.0;
assert_relative_eq!(eigs[0], e1, epsilon = 1e-10);
assert_relative_eq!(eigs[1], e2, epsilon = 1e-10);
let dot: f64 = vecs
.column(0)
.iter()
.zip(vecs.column(1).iter())
.map(|(&x, &y)| x * y)
.sum();
assert_relative_eq!(dot, 0.0, epsilon = 1e-10);
let n0: f64 = vecs.column(0).iter().map(|x| x * x).sum::<f64>().sqrt();
let n1: f64 = vecs.column(1).iter().map(|x| x * x).sum::<f64>().sqrt();
assert_relative_eq!(n0, 1.0, epsilon = 1e-10);
assert_relative_eq!(n1, 1.0, epsilon = 1e-10);
}
#[test]
fn test_jacobi_3x3() {
let a = array![[4.0_f64, 1.0, 0.5], [1.0, 3.0, 0.75], [0.5, 0.75, 2.0]];
let (eigs, vecs) = jacobi_eigenvalue(&a.view(), 1e-13, 1000).expect("jacobi");
assert_eq!(eigs.len(), 3);
assert!(
eigs[0] <= eigs[1] && eigs[1] <= eigs[2],
"not sorted: {eigs:?}"
);
for j in 0..3 {
let v = vecs.column(j).to_owned();
let av = a.dot(&v);
let lam_v = v.mapv(|x| x * eigs[j]);
let diff: f64 = av
.iter()
.zip(lam_v.iter())
.map(|(&a, &b)| (a - b).powi(2))
.sum::<f64>()
.sqrt();
assert!(diff < 1e-10, "residual={diff} for eigenvalue {}", eigs[j]);
}
}
#[test]
fn test_jacobi_identity() {
let a = Array2::<f64>::eye(4);
let (eigs, vecs) = jacobi_eigenvalue(&a.view(), 1e-13, 500).expect("jacobi");
for &e in eigs.iter() {
assert_relative_eq!(e, 1.0, epsilon = 1e-10);
}
let vt_v = vecs.t().dot(&vecs);
for i in 0..4 {
for j in 0..4 {
let expected = if i == j { 1.0 } else { 0.0 };
assert_relative_eq!(vt_v[[i, j]], expected, epsilon = 1e-9);
}
}
}
#[test]
fn test_jacobi_diagonal_matrix() {
let a = array![[3.0_f64, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 5.0]];
let (eigs, _) = jacobi_eigenvalue(&a.view(), 1e-13, 500).expect("jacobi");
assert_relative_eq!(eigs[0], 1.0, epsilon = 1e-10);
assert_relative_eq!(eigs[1], 3.0, epsilon = 1e-10);
assert_relative_eq!(eigs[2], 5.0, epsilon = 1e-10);
}
#[test]
fn test_jacobi_non_square_err() {
let a = array![[1.0_f64, 2.0, 3.0], [4.0, 5.0, 6.0]];
assert!(jacobi_eigenvalue(&a.view(), 1e-10, 100).is_err());
}
#[test]
fn test_jacobi_4x4_symmetric() {
let a = array![
[4.0_f64, 1.0, 0.5, 0.0],
[1.0, 3.0, 1.0, 0.5],
[0.5, 1.0, 2.0, 1.0],
[0.0, 0.5, 1.0, 5.0]
];
let (eigs, vecs) = jacobi_eigenvalue(&a.view(), 1e-12, 1000).expect("jacobi");
for i in 0..3 {
assert!(eigs[i] <= eigs[i + 1], "not sorted at index {i}");
}
for j in 0..4 {
let v = vecs.column(j).to_owned();
let av = a.dot(&v);
let lam_v = v.mapv(|x| x * eigs[j]);
let diff: f64 = av
.iter()
.zip(lam_v.iter())
.map(|(&a, &b)| (a - b).powi(2))
.sum::<f64>()
.sqrt();
assert!(diff < 1e-9, "residual={diff} for eigenvalue {}", eigs[j]);
}
}
#[test]
fn test_jacobi_orthogonality_of_eigenvectors() {
let a = array![[5.0_f64, 2.0, 1.0], [2.0, 4.0, 0.5], [1.0, 0.5, 3.0]];
let (_, vecs) = jacobi_eigenvalue(&a.view(), 1e-12, 1000).expect("jacobi");
let n = 3;
for i in 0..n {
for j in i..n {
let vi = vecs.column(i);
let vj = vecs.column(j);
let dot: f64 = vi.iter().zip(vj.iter()).map(|(&x, &y)| x * y).sum();
let expected = if i == j { 1.0 } else { 0.0 };
assert!(
(dot - expected).abs() < 1e-8,
"Vᵀ V [{i},{j}] = {dot}, expected {expected}"
);
}
}
}
#[test]
fn test_jacobi_trace_invariance() {
let a = array![[4.0_f64, 1.0, 0.5], [1.0, 3.0, 0.75], [0.5, 0.75, 2.0]];
let trace_a: f64 = (0..3).map(|i| a[[i, i]]).sum();
let (eigs, _) = jacobi_eigenvalue(&a.view(), 1e-13, 1000).expect("jacobi");
let sum_eigs: f64 = eigs.iter().sum();
assert_relative_eq!(trace_a, sum_eigs, epsilon = 1e-10);
}
#[test]
fn test_jacobi_1x1() {
let a = array![[7.5_f64]];
let (eigs, vecs) = jacobi_eigenvalue(&a.view(), 1e-12, 100).expect("jacobi 1x1");
assert_relative_eq!(eigs[0], 7.5, epsilon = 1e-12);
assert_relative_eq!(vecs[[0, 0]], 1.0, epsilon = 1e-12);
}
}