use scirs2_core::ndarray::{s, Array1, Array2, ArrayView1, ArrayView2, ScalarOperand};
use scirs2_core::numeric::Complex;
use scirs2_core::numeric::{Float, NumAssign};
use scirs2_core::random::prelude::*;
use std::iter::Sum;
use crate::error::{LinalgError, LinalgResult};
pub type SparseEigenResult<F> = LinalgResult<(Array1<Complex<F>>, Array2<Complex<F>>)>;
pub type QrResult<F> = LinalgResult<(Array2<Complex<F>>, Array2<Complex<F>>)>;
pub trait SparseMatrix<F> {
fn nrows(&self) -> usize;
fn ncols(&self) -> usize;
fn matvec(&self, x: &ArrayView1<F>, y: &mut Array1<F>) -> LinalgResult<()>;
fn is_symmetric(&self) -> bool;
fn sparsity(&self) -> f64;
}
#[allow(dead_code)]
pub fn lanczos<F, M>(
matrix: &M,
k: usize,
which: &str,
target: F,
max_iter: usize,
tol: F,
) -> SparseEigenResult<F>
where
F: Float + NumAssign + Sum + Send + Sync + 'static + Default,
M: SparseMatrix<F> + Sync,
{
let n = matrix.nrows();
if n != matrix.ncols() {
return Err(LinalgError::ShapeError(
"Matrix must be square for eigenvalue decomposition".to_string(),
));
}
if k >= n {
return Err(LinalgError::InvalidInputError(
"Number of eigenvalues requested must be less than matrix size".to_string(),
));
}
let max_steps = max_iter.min(n);
let mut v_basis: Vec<Array1<F>> = Vec::with_capacity(max_steps + 1);
let mut rng = scirs2_core::random::rng();
let mut v_init = Array1::<F>::zeros(n);
for i in 0..n {
v_init[i] = F::from(rng.random::<f64>()).unwrap_or(F::one());
}
let norm = v_init.iter().map(|x| (*x) * (*x)).sum::<F>().sqrt();
if norm < F::from(1e-15).unwrap_or(F::epsilon()) {
return Err(LinalgError::InvalidInputError(
"Initial Lanczos vector has near-zero norm".to_string(),
));
}
v_init.mapv_inplace(|x| x / norm);
v_basis.push(v_init);
let mut alpha = Vec::with_capacity(max_steps);
let mut beta = Vec::with_capacity(max_steps);
let mut v_next = Array1::<F>::zeros(n);
for iter in 0..max_steps {
let v_curr = &v_basis[iter];
matrix.matvec(&v_curr.view(), &mut v_next)?;
if iter > 0 {
let beta_prev = beta[iter - 1];
let v_prev = &v_basis[iter - 1];
for j in 0..n {
v_next[j] -= beta_prev * v_prev[j];
}
}
let alpha_curr = v_curr
.iter()
.zip(v_next.iter())
.map(|(v, w)| (*v) * (*w))
.sum::<F>();
alpha.push(alpha_curr);
for j in 0..n {
v_next[j] -= alpha_curr * v_curr[j];
}
for _pass in 0..2 {
for prev_v in v_basis.iter() {
let proj = prev_v
.iter()
.zip(v_next.iter())
.map(|(p, w)| (*p) * (*w))
.sum::<F>();
for j in 0..n {
v_next[j] -= proj * prev_v[j];
}
}
}
let beta_curr = v_next.iter().map(|x| (*x) * (*x)).sum::<F>().sqrt();
if beta_curr < tol {
break;
}
beta.push(beta_curr);
let v_new = v_next.mapv(|x| x / beta_curr);
v_basis.push(v_new);
v_next = Array1::<F>::zeros(n);
if iter >= k && iter % 5 == 0 && check_lanczos_convergence(&alpha, &beta, k, tol) {
break;
}
}
let (eigenvals, eigenvecs) = solve_tridiagonal_eigenproblem(&alpha, &beta, which, target, k)?;
let complex_eigenvals = eigenvals.mapv(|x| Complex::new(x, F::zero()));
let complex_eigenvecs = eigenvecs.mapv(|x| Complex::new(x, F::zero()));
Ok((complex_eigenvals, complex_eigenvecs))
}
#[allow(dead_code)]
fn check_lanczos_convergence<F: Float>(_alpha: &[F], beta: &[F], k: usize, tol: F) -> bool {
if beta.len() < k {
return false;
}
let recent_betas = &beta[beta.len().saturating_sub(k)..];
recent_betas
.iter()
.all(|&b| b < tol * F::from(10.0).expect("Operation failed"))
}
#[allow(dead_code)]
fn solve_tridiagonal_eigenproblem<F: Float + NumAssign + Sum + Send + Sync + 'static>(
alpha: &[F],
beta: &[F],
which: &str,
target: F,
k: usize,
) -> LinalgResult<(Array1<F>, Array2<F>)> {
let n = alpha.len();
if n == 0 {
return Err(LinalgError::InvalidInputError(
"Empty tridiagonal matrix".to_string(),
));
}
let mut trimatrix = Array2::<F>::zeros((n, n));
for i in 0..n {
trimatrix[[i, i]] = alpha[i];
}
for i in 0..n.saturating_sub(1) {
if i < beta.len() {
trimatrix[[i, i + 1]] = beta[i];
trimatrix[[i + 1, i]] = beta[i];
}
}
let (eigenvals, eigenvecs) = qr_algorithm_tridiagonal(&trimatrix)?;
let selected_indices = select_eigenvalues(&eigenvals, which, target, k);
let mut result_eigenvals = Array1::<F>::zeros(k);
let mut result_eigenvecs = Array2::<F>::zeros((n, k));
for (i, &idx) in selected_indices.iter().enumerate() {
result_eigenvals[i] = eigenvals[idx];
for j in 0..n {
result_eigenvecs[[j, i]] = eigenvecs[[j, idx]];
}
}
Ok((result_eigenvals, result_eigenvecs))
}
#[allow(dead_code)]
fn qr_algorithm_tridiagonal<F: Float + NumAssign + Sum + 'static>(
matrix: &Array2<F>,
) -> LinalgResult<(Array1<F>, Array2<F>)> {
let n = matrix.nrows();
let mut a = matrix.clone();
let mut q_total = Array2::<F>::eye(n);
let max_iterations = 1000;
let tolerance = F::from(1e-12).unwrap_or(F::epsilon());
for _iter in 0..max_iterations {
let mut converged = true;
for i in 0..n - 1 {
if a[[i + 1, i]].abs() > tolerance {
converged = false;
break;
}
}
if converged {
break;
}
let shift = if n >= 2 {
let d = (a[[n - 2, n - 2]] - a[[n - 1, n - 1]]) / F::from(2.0).unwrap_or(F::one());
let b_sq = a[[n - 1, n - 2]] * a[[n - 1, n - 2]];
let sign_d = if d >= F::zero() { F::one() } else { -F::one() };
a[[n - 1, n - 1]] - sign_d * b_sq / (d.abs() + (d * d + b_sq).sqrt())
} else {
a[[0, 0]]
};
for i in 0..n {
a[[i, i]] -= shift;
}
let (q, r) = qr_decomposition_tridiagonal(&a)?;
a = r.dot(&q);
for i in 0..n {
a[[i, i]] += shift;
}
q_total = q_total.dot(&q);
}
let eigenvals = (0..n).map(|i| a[[i, i]]).collect::<Array1<F>>();
Ok((eigenvals, q_total))
}
#[allow(dead_code)]
fn qr_decomposition_tridiagonal<F: Float + NumAssign + Sum>(
matrix: &Array2<F>,
) -> LinalgResult<(Array2<F>, Array2<F>)> {
let n = matrix.nrows();
let mut g_product = Array2::<F>::eye(n);
let mut r = matrix.clone();
let eps = F::from(1e-15).unwrap_or(F::epsilon());
for i in 0..n - 1 {
let a = r[[i, i]];
let b = r[[i + 1, i]];
if b.abs() > eps {
let (c, s) = givens_rotation(a, b);
apply_givens_rotation(&mut r, i, i + 1, c, s);
apply_givens_rotation_transpose(&mut g_product, i, i + 1, c, s);
}
}
let q = g_product.t().to_owned();
Ok((q, r))
}
#[allow(dead_code)]
fn givens_rotation<F: Float>(a: F, b: F) -> (F, F) {
if b.abs() < F::from(1e-15).expect("Operation failed") {
(F::one(), F::zero())
} else {
let r = (a * a + b * b).sqrt();
(a / r, -b / r)
}
}
#[allow(dead_code)]
fn apply_givens_rotation<F: Float + NumAssign>(
matrix: &mut Array2<F>,
i: usize,
j: usize,
c: F,
s: F,
) {
let n = matrix.ncols();
for k in 0..n {
let temp1 = matrix[[i, k]];
let temp2 = matrix[[j, k]];
matrix[[i, k]] = c * temp1 - s * temp2;
matrix[[j, k]] = s * temp1 + c * temp2;
}
}
#[allow(dead_code)]
fn apply_givens_rotation_transpose<F: Float + NumAssign>(
matrix: &mut Array2<F>,
i: usize,
j: usize,
c: F,
s: F,
) {
let n = matrix.nrows();
for k in 0..n {
let temp1 = matrix[[k, i]];
let temp2 = matrix[[k, j]];
matrix[[k, i]] = c * temp1 + s * temp2;
matrix[[k, j]] = -s * temp1 + c * temp2;
}
}
#[allow(dead_code)]
fn select_eigenvalues<F: Float>(
eigenvals: &Array1<F>,
which: &str,
target: F,
k: usize,
) -> Vec<usize> {
let mut indices_and_values: Vec<(usize, F)> = eigenvals
.iter()
.enumerate()
.map(|(i, &val)| (i, val))
.collect();
match which {
"largest" | "LM" => {
indices_and_values.sort_by(|a, b| b.1.partial_cmp(&a.1).expect("Operation failed"));
}
"smallest" | "SM" => {
indices_and_values.sort_by(|a, b| a.1.partial_cmp(&b.1).expect("Operation failed"));
}
"target" | "nearest" => {
indices_and_values.sort_by(|a, b| {
let dist_a = (a.1 - target).abs();
let dist_b = (b.1 - target).abs();
dist_a.partial_cmp(&dist_b).expect("Operation failed")
});
}
_ => {
indices_and_values.sort_by(|a, b| b.1.partial_cmp(&a.1).expect("Operation failed"));
}
}
indices_and_values
.into_iter()
.take(k)
.map(|(idx, _)| idx)
.collect()
}
#[allow(dead_code)]
pub fn arnoldi<F, M>(
matrix: &M,
k: usize,
target: Complex<F>,
max_iter: usize,
tol: F,
) -> SparseEigenResult<F>
where
F: Float + NumAssign + Sum + Send + Sync + 'static + Default,
M: SparseMatrix<F> + Sync,
{
let n = matrix.nrows();
if n != matrix.ncols() {
return Err(LinalgError::ShapeError(
"Matrix must be square for eigenvalue decomposition".to_string(),
));
}
if k >= n {
return Err(LinalgError::InvalidInputError(
"Number of eigenvalues requested must be less than matrix size".to_string(),
));
}
let m = (max_iter + 1).min(n);
let mut v_vectors = vec![Array1::<F>::zeros(n); m + 1];
let mut hmatrix = Array2::<F>::zeros((m + 1, m));
let mut rng = scirs2_core::random::rng();
for v_elem in &mut v_vectors[0] {
*v_elem = F::from(rng.random::<f64>()).expect("Operation failed");
}
let norm = v_vectors[0].iter().map(|x| (*x) * (*x)).sum::<F>().sqrt();
for v_elem in &mut v_vectors[0] {
*v_elem /= norm;
}
let mut actual_m = 0;
for j in 0..m {
actual_m = j + 1;
let mut w = Array1::<F>::zeros(n);
matrix.matvec(&v_vectors[j].view(), &mut w)?;
for i in 0..=j {
let h_ij = w
.iter()
.zip(v_vectors[i].iter())
.map(|(w_val, v_val)| (*w_val) * (*v_val))
.sum::<F>();
hmatrix[[i, j]] = h_ij;
for l in 0..n {
w[l] -= h_ij * v_vectors[i][l];
}
}
let h_j1_j = w.iter().map(|x| (*x) * (*x)).sum::<F>().sqrt();
if h_j1_j < tol {
break;
}
if j + 1 < m {
hmatrix[[j + 1, j]] = h_j1_j;
for l in 0..n {
v_vectors[j + 1][l] = w[l] / h_j1_j;
}
}
if j >= k && j % 5 == 0 && check_arnoldi_convergence(&hmatrix, j + 1, k, tol) {
break;
}
}
let h_reduced = hmatrix.slice(s![..actual_m, ..actual_m]).to_owned();
let (ritz_values, ritz_vectors) = solve_hessenberg_eigenproblem(&h_reduced)?;
let eigenvals = if target.im == F::zero() {
select_closest_real_eigenvalues(&ritz_values, target.re, k)
} else {
select_closest_complex_eigenvalues(&ritz_values, target, k)
};
let mut eigenvecs = Array2::<Complex<F>>::zeros((n, k));
let v_basis = v_vectors[..actual_m]
.iter()
.map(|v| v.mapv(|x| Complex::new(x, F::zero())))
.collect::<Vec<_>>();
for (i, &ritz_idx) in eigenvals.iter().enumerate() {
for j in 0..n {
let mut eigenvec_j = Complex::new(F::zero(), F::zero());
for l in 0..actual_m {
eigenvec_j += ritz_vectors[[l, ritz_idx]] * v_basis[l][j];
}
eigenvecs[[j, i]] = eigenvec_j;
}
}
let final_eigenvals = eigenvals
.iter()
.map(|&idx| ritz_values[idx])
.collect::<Array1<_>>();
Ok((final_eigenvals, eigenvecs))
}
#[allow(dead_code)]
fn check_arnoldi_convergence<F: Float>(hmatrix: &Array2<F>, m: usize, k: usize, tol: F) -> bool {
if m < k + 1 {
return false;
}
(0..k).all(|i| {
let row = m - 1 - i;
let col = m - 2 - i;
if row < hmatrix.nrows() && col < hmatrix.ncols() {
hmatrix[[row, col]].abs() < tol * F::from(10.0).expect("Operation failed")
} else {
true
}
})
}
#[allow(dead_code)]
fn solve_hessenberg_eigenproblem<F: Float + NumAssign + Sum + 'static>(
hmatrix: &Array2<F>,
) -> SparseEigenResult<F> {
let n = hmatrix.nrows();
let mut matrix_complex = Array2::<Complex<F>>::zeros((n, n));
for i in 0..n {
for j in 0..n {
matrix_complex[[i, j]] = Complex::new(hmatrix[[i, j]], F::zero());
}
}
qr_algorithm_complex(&matrix_complex)
}
#[allow(dead_code)]
fn qr_algorithm_complex<F: Float + NumAssign + Sum + 'static>(
matrix: &Array2<Complex<F>>,
) -> SparseEigenResult<F> {
let n = matrix.nrows();
let mut a = matrix.clone();
let mut q_total = Array2::<Complex<F>>::eye(n);
let max_iterations = 1000;
let tolerance = F::from(1e-12).expect("Operation failed");
for _iter in 0..max_iterations {
let mut converged = true;
for i in 0..n - 1 {
if a[[i + 1, i]].norm() > tolerance {
converged = false;
break;
}
}
if converged {
break;
}
let (q, r) = householder_qr_complex(&a)?;
a = r.dot(&q);
q_total = q_total.dot(&q);
}
let eigenvals = (0..n).map(|i| a[[i, i]]).collect::<Array1<_>>();
Ok((eigenvals, q_total))
}
#[allow(dead_code)]
fn householder_qr_complex<F: Float + NumAssign + Sum>(matrix: &Array2<Complex<F>>) -> QrResult<F> {
let (m, n) = matrix.dim();
let mut q = Array2::<Complex<F>>::eye(m);
let mut r = matrix.clone();
let min_dim = m.min(n);
for k in 0..min_dim {
let x = r.slice(s![k.., k]).to_owned();
let (house_vec, tau) = householder_vector_complex(&x);
apply_householder_left_complex(&mut r, &house_vec, tau, k);
apply_householder_right_complex(&mut q, &house_vec, tau.conj(), k);
}
Ok((q, r))
}
#[allow(dead_code)]
fn householder_vector_complex<F: Float + NumAssign + Sum>(
x: &Array1<Complex<F>>,
) -> (Array1<Complex<F>>, Complex<F>) {
let n = x.len();
if n == 0 {
return (Array1::zeros(0), Complex::new(F::zero(), F::zero()));
}
let norm_x = x.iter().map(|z| z.norm_sqr()).sum::<F>().sqrt();
if norm_x == F::zero() {
return (Array1::zeros(n), Complex::new(F::zero(), F::zero()));
}
let mut v = x.clone();
let sign = if x[0].re >= F::zero() {
F::one()
} else {
-F::one()
};
v[0] += Complex::new(sign * norm_x, F::zero());
let norm_v = v.iter().map(|z| z.norm_sqr()).sum::<F>().sqrt();
if norm_v > F::zero() {
v.mapv_inplace(|z| z / norm_v);
}
let tau = Complex::new(F::from(2.0).expect("Operation failed"), F::zero());
(v, tau)
}
#[allow(dead_code)]
fn apply_householder_left_complex<F: Float + NumAssign>(
matrix: &mut Array2<Complex<F>>,
house_vec: &Array1<Complex<F>>,
tau: Complex<F>,
k: usize,
) {
let (m, n) = matrix.dim();
let house_len = house_vec.len();
for j in k..n {
let mut sum = Complex::new(F::zero(), F::zero());
for i in 0..house_len {
if k + i < m {
sum += house_vec[i].conj() * matrix[[k + i, j]];
}
}
for i in 0..house_len {
if k + i < m {
matrix[[k + i, j]] -= tau * house_vec[i] * sum;
}
}
}
}
#[allow(dead_code)]
fn apply_householder_right_complex<F: Float + NumAssign>(
matrix: &mut Array2<Complex<F>>,
house_vec: &Array1<Complex<F>>,
tau: Complex<F>,
k: usize,
) {
let (m, _n) = matrix.dim();
let house_len = house_vec.len();
for i in 0..m {
let mut sum = Complex::new(F::zero(), F::zero());
for j in 0..house_len {
if k + j < matrix.ncols() {
sum += matrix[[i, k + j]] * house_vec[j];
}
}
for j in 0..house_len {
if k + j < matrix.ncols() {
matrix[[i, k + j]] -= sum * tau.conj() * house_vec[j].conj();
}
}
}
}
#[allow(dead_code)]
fn select_closest_real_eigenvalues<F: Float>(
eigenvals: &Array1<Complex<F>>,
target: F,
k: usize,
) -> Vec<usize> {
let mut real_eigenvals: Vec<(usize, F)> = eigenvals
.iter()
.enumerate()
.filter(|(_, z)| z.im.abs() < F::from(1e-10).expect("Operation failed"))
.map(|(i, z)| (i, z.re))
.collect();
real_eigenvals.sort_by(|a, b| {
let dist_a = (a.1 - target).abs();
let dist_b = (b.1 - target).abs();
dist_a.partial_cmp(&dist_b).expect("Operation failed")
});
real_eigenvals
.into_iter()
.take(k)
.map(|(idx, _)| idx)
.collect()
}
#[allow(dead_code)]
fn select_closest_complex_eigenvalues<F: Float>(
eigenvals: &Array1<Complex<F>>,
target: Complex<F>,
k: usize,
) -> Vec<usize> {
let mut eigenvals_with_dist: Vec<(usize, F)> = eigenvals
.iter()
.enumerate()
.map(|(i, z)| {
let diff = *z - target;
(i, diff.norm())
})
.collect();
eigenvals_with_dist.sort_by(|a, b| a.1.partial_cmp(&b.1).expect("Operation failed"));
eigenvals_with_dist
.into_iter()
.take(k)
.map(|(idx, _)| idx)
.collect()
}
#[allow(dead_code)]
pub fn eigs_gen<F, M1, M2>(
_a: &M1,
_b: &M2,
_k: usize,
_which: &str,
_target: F,
_max_iter: usize,
_tol: F,
) -> SparseEigenResult<F>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
M1: SparseMatrix<F>,
M2: SparseMatrix<F>,
{
Err(LinalgError::NotImplementedError(
"Sparse generalized eigenvalue solver not yet implemented".to_string(),
))
}
#[allow(dead_code)]
pub fn svds<F, M>(
matrix: &M,
_k: usize,
_which: &str,
_max_iter: usize,
_tol: F,
) -> LinalgResult<(Array1<F>, Array2<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
M: SparseMatrix<F>,
{
Err(LinalgError::NotImplementedError(
"Sparse SVD solver not yet implemented".to_string(),
))
}
pub struct CsrMatrix<F> {
nrows: usize,
ncols: usize,
data: Vec<F>,
indices: Vec<usize>,
indptr: Vec<usize>,
}
impl<F> CsrMatrix<F>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
{
pub fn new(
nrows: usize,
ncols: usize,
data: Vec<F>,
indices: Vec<usize>,
indptr: Vec<usize>,
) -> Self {
Self {
nrows,
ncols,
data,
indices,
indptr,
}
}
pub fn nnz(&self) -> usize {
self.data.len()
}
pub fn from_dense(dense: &ArrayView2<F>, threshold: F) -> Self {
let (m, n) = dense.dim();
let mut data = Vec::new();
let mut indices = Vec::new();
let mut indptr = Vec::with_capacity(m + 1);
indptr.push(0);
for i in 0..m {
for j in 0..n {
let val = dense[[i, j]];
if val.abs() >= threshold {
data.push(val);
indices.push(j);
}
}
indptr.push(data.len());
}
Self {
nrows: m,
ncols: n,
data,
indices,
indptr,
}
}
}
#[allow(dead_code)]
pub fn dense_to_sparse<F>(
densematrix: &ArrayView2<F>,
threshold: F,
) -> LinalgResult<Box<dyn SparseMatrix<F>>>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
{
Ok(Box::new(CsrMatrix::from_dense(densematrix, threshold)))
}
impl<F> SparseMatrix<F> for CsrMatrix<F>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
{
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.ncols
}
fn matvec(&self, x: &ArrayView1<F>, y: &mut Array1<F>) -> LinalgResult<()> {
if x.len() != self.ncols {
return Err(LinalgError::DimensionError(format!(
"CsrMatrix::matvec: x has {} elements but matrix has {} columns",
x.len(),
self.ncols
)));
}
if y.len() != self.nrows {
return Err(LinalgError::DimensionError(format!(
"CsrMatrix::matvec: y has {} elements but matrix has {} rows",
y.len(),
self.nrows
)));
}
for i in 0..self.nrows {
let row_start = if i < self.indptr.len() {
self.indptr[i]
} else {
self.data.len()
};
let row_end = if i + 1 < self.indptr.len() {
self.indptr[i + 1]
} else {
self.data.len()
};
let mut acc = F::zero();
for k in row_start..row_end {
if k < self.data.len() && k < self.indices.len() {
let col = self.indices[k];
if col < x.len() {
acc += self.data[k] * x[col];
}
}
}
y[i] = acc;
}
Ok(())
}
fn is_symmetric(&self) -> bool {
if self.nrows != self.ncols {
return false;
}
let n = self.nrows;
for i in 0..n {
let row_start = if i < self.indptr.len() {
self.indptr[i]
} else {
return false;
};
let row_end = if i + 1 < self.indptr.len() {
self.indptr[i + 1]
} else {
self.data.len()
};
for k in row_start..row_end {
if k >= self.data.len() || k >= self.indices.len() {
continue;
}
let j = self.indices[k];
if j >= n {
return false;
}
let val = self.data[k];
let ji_start = if j < self.indptr.len() {
self.indptr[j]
} else {
return false;
};
let ji_end = if j + 1 < self.indptr.len() {
self.indptr[j + 1]
} else {
self.data.len()
};
let mut found = false;
for kk in ji_start..ji_end {
if kk < self.indices.len()
&& self.indices[kk] == i
&& kk < self.data.len()
&& (self.data[kk] - val).abs() < F::from(1e-14).unwrap_or(F::epsilon())
{
found = true;
break;
}
}
if !found {
return false;
}
}
}
true
}
fn sparsity(&self) -> f64 {
let total = (self.nrows as f64) * (self.ncols as f64);
if total == 0.0 {
return 0.0;
}
self.data.len() as f64 / total
}
}
#[cfg(test)]
mod tests {
use super::*;
fn tridiag_csr_5x5() -> CsrMatrix<f64> {
let n = 5;
let mut data = Vec::new();
let mut indices = Vec::new();
let mut indptr = vec![0usize];
for i in 0..n {
if i > 0 {
data.push(-1.0);
indices.push(i - 1);
}
data.push(2.0);
indices.push(i);
if i < n - 1 {
data.push(-1.0);
indices.push(i + 1);
}
indptr.push(data.len());
}
CsrMatrix::new(n, n, data, indices, indptr)
}
#[test]
fn test_csr_matvec() {
let csr = tridiag_csr_5x5();
let x = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
let mut y = Array1::zeros(5);
csr.matvec(&x.view(), &mut y).expect("matvec failed");
assert!((y[0] - 0.0).abs() < 1e-12);
assert!((y[1] - 0.0).abs() < 1e-12);
assert!((y[2] - 0.0).abs() < 1e-12);
assert!((y[3] - 0.0).abs() < 1e-12);
assert!((y[4] - 6.0).abs() < 1e-12);
}
#[test]
fn test_csr_is_symmetric() {
let csr = tridiag_csr_5x5();
assert!(
csr.is_symmetric(),
"Tridiagonal Laplacian should be symmetric"
);
}
#[test]
fn test_csr_sparsity() {
let csr = tridiag_csr_5x5();
let sp = csr.sparsity();
assert!(
(sp - 0.52).abs() < 0.01,
"Sparsity should be ~0.52, got {sp}"
);
}
#[test]
fn test_csrmatrix_empty() {
let csr = CsrMatrix::<f64>::new(5, 5, vec![], vec![], vec![0, 0, 0, 0, 0, 0]);
assert_eq!(csr.nrows(), 5);
assert_eq!(csr.ncols(), 5);
assert_eq!(csr.nnz(), 0);
assert_eq!(csr.sparsity(), 0.0);
}
#[test]
fn test_csr_from_dense() {
let dense = Array2::from_shape_fn((3, 3), |(i, j)| {
if i == j {
2.0_f64
} else if (i as isize - j as isize).abs() == 1 {
-1.0
} else {
0.0
}
});
let csr = CsrMatrix::from_dense(&dense.view(), 1e-14);
assert_eq!(csr.nrows(), 3);
assert_eq!(csr.ncols(), 3);
assert_eq!(csr.nnz(), 7); assert!(csr.is_symmetric());
}
#[test]
fn test_dense_to_sparse_fn() {
let dense = Array2::<f64>::eye(4);
let sparse = dense_to_sparse(&dense.view(), 1e-12).expect("dense_to_sparse failed");
assert_eq!(sparse.nrows(), 4);
assert_eq!(sparse.ncols(), 4);
let x = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0]);
let mut y = Array1::zeros(4);
sparse.matvec(&x.view(), &mut y).expect("matvec failed");
for i in 0..4 {
assert!((y[i] - x[i]).abs() < 1e-12, "Identity matvec failed at {i}");
}
}
#[test]
#[ignore = "flaky: random initial vector causes rare convergence failure in parallel test runs"]
fn test_lanczos_with_csr() {
let csr = tridiag_csr_5x5();
let result = lanczos(&csr, 2, "largest", 0.0_f64, 100, 1e-6);
assert!(
result.is_ok(),
"Lanczos should succeed on tridiagonal: {:?}",
result.as_ref().err()
);
let (eigenvals, _) = result.expect("already checked");
assert_eq!(eigenvals.len(), 2);
let max_eig = eigenvals
.iter()
.map(|z| z.re)
.fold(f64::NEG_INFINITY, f64::max);
assert!(
max_eig > 0.0 && max_eig < 5.0,
"Eigenvalue should be in range (0, 5), got {max_eig}"
);
}
}