use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
use scirs2_core::numeric::Complex;
use scirs2_core::numeric::{Float, NumAssign, Zero, One};
use scirs2_core::random as rand;
use std::fmt::Debug;
use std::ops::{Add, Sub, Mul};
use crate::error::{LinalgError, LinalgResult};
use crate::norm::vector_norm;
use super::{SparseMatrixView, sparse_dense_matvec};
type SparseEigenResult<T> = LinalgResult<(Array1<Complex<T>>, Array2<Complex<T>>)>;
#[allow(dead_code)]
pub fn sparse_arnoldi_eigen<T>(
matrix: &SparseMatrixView<T>,
k: usize,
max_iter: usize,
tolerance: T,
) -> SparseEigenResult<T>
where
T: Float + NumAssign + Clone + Copy + Debug + Add<Output = T> + Sub<Output = T> + Mul<Output = T> + scirs2_core::ndarray::ScalarOperand,
{
let n = matrix.nrows();
if matrix.ncols() != n {
return Err(LinalgError::DimensionError(
"Matrix must be square for eigenvalue computation".to_string(),
));
}
if k >= n || k == 0 {
return Err(LinalgError::ValueError(format!(
"Number of eigenvalues k={} must be positive and less than matrix size n={}",
k, n
)));
}
let krylov_dim = std::cmp::min(max_iter, n.saturating_sub(1));
let mut v = Array2::zeros((n, krylov_dim + 1));
let mut h = Array2::zeros((krylov_dim + 1, krylov_dim));
let mut rng = scirs2_core::random::rng();
for i in 0..n {
v[[i, 0]] = T::from(rng.random_range(-0.5..0.5)).expect("Operation failed");
}
let norm = vector_norm(&v.column(0), 2)?;
for i in 0..n {
v[[i, 0]] /= norm;
}
let mut j = 0;
while j < krylov_dim {
let w = sparse_dense_matvec(matrix, &v.column(j))?;
for i in 0..=j {
let mut hij = T::zero();
for l in 0..n {
hij += w[l] * v[[l, i]];
}
h[[i, j]] = hij;
for l in 0..n {
v[[l, j + 1]] = w[l] - h[[i, j]] * v[[l, i]];
}
}
let norm = vector_norm(&v.column(j + 1), 2)?;
h[[j + 1, j]] = norm;
if norm < tolerance {
break;
}
for i in 0..n {
v[[i, j + 1]] /= norm;
}
j += 1;
if j >= k && j % 5 == 0 {
if check_arnoldi_convergence(&h, j, tolerance) {
break;
}
}
}
let krylovsize = j;
let h_active = h.slice(scirs2_core::ndarray::s![0..krylovsize, 0..krylovsize]).to_owned();
let (h_eigenvals, h_eigenvecs) = compute_hessenberg_eigenvalues(&h_active)?;
let mut eigen_pairs: Vec<(T, usize)> = h_eigenvals
.iter()
.enumerate()
.map(|(i, &val)| (val.norm(), i))
.collect();
eigen_pairs.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal));
let selected_k = std::cmp::min(k, eigen_pairs.len());
let mut eigenvalues = Array1::zeros(selected_k);
let mut eigenvectors = Array2::zeros((n, selected_k));
for (i, &(_, idx)) in eigen_pairs.iter().take(selected_k).enumerate() {
eigenvalues[i] = h_eigenvals[idx];
for j in 0..n {
let mut sum = Complex::new(T::zero(), T::zero());
for l in 0..krylovsize {
sum += Complex::new(v[[j, l]], T::zero()) * h_eigenvecs[[l, idx]];
}
eigenvectors[[j, i]] = sum;
}
}
Ok((eigenvalues, eigenvectors))
}
#[allow(dead_code)]
pub fn sparse_lanczos_eigen<T>(
matrix: &SparseMatrixView<T>,
k: usize,
which: &str,
max_iter: usize,
tolerance: T,
) -> LinalgResult<(Array1<T>, Array2<T>)>
where
T: Float + NumAssign + Clone + Copy + Debug + Add<Output = T> + Sub<Output = T> + Mul<Output = T> + scirs2_core::ndarray::ScalarOperand,
{
let n = matrix.nrows();
if matrix.ncols() != n {
return Err(LinalgError::DimensionError(
"Matrix must be square for eigenvalue computation".to_string(),
));
}
if k >= n || k == 0 {
return Err(LinalgError::ValueError(format!(
"Number of eigenvalues k={} must be positive and less than matrix size n={}",
k, n
)));
}
let lanczos_dim = std::cmp::min(max_iter, n);
let mut v = Array2::zeros((n, lanczos_dim + 1));
let mut alpha = Array1::zeros(lanczos_dim);
let mut beta = Array1::zeros(lanczos_dim + 1);
let mut rng = scirs2_core::random::rng();
for i in 0..n {
v[[i, 0]] = T::from(rng.random_range(-0.5..0.5)).expect("Operation failed");
}
let norm = vector_norm(&v.column(0), 2)?;
for i in 0..n {
v[[i, 0]] /= norm;
}
let mut j = 0;
beta[0] = T::zero();
while j < lanczos_dim {
let w = sparse_dense_matvec(matrix, &v.column(j))?;
alpha[j] = T::zero();
for i in 0..n {
alpha[j] += v[[i, j]] * w[i];
}
for i in 0..n {
v[[i, j + 1]] = w[i] - alpha[j] * v[[i, j]];
if j > 0 {
v[[i, j + 1]] -= beta[j] * v[[i, j - 1]];
}
}
beta[j + 1] = vector_norm(&v.column(j + 1), 2)?;
if beta[j + 1] < tolerance {
break;
}
for i in 0..n {
v[[i, j + 1]] /= beta[j + 1];
}
j += 1;
if j >= k && j % 3 == 0 {
if check_lanczos_convergence(&alpha, &beta, j, tolerance) {
break;
}
}
}
let lanczossize = j;
let mut t = Array2::zeros((lanczossize, lanczossize));
for i in 0..lanczossize {
t[[i, i]] = alpha[i];
if i > 0 {
t[[i - 1, i]] = beta[i];
t[[i, i - 1]] = beta[i];
}
}
let (t_eigenvals, t_eigenvecs) = solve_tridiagonal_eigen(&t)?;
let mut selected_indices = match which {
"largest" => {
let mut pairs: Vec<(T, usize)> = t_eigenvals
.iter()
.enumerate()
.map(|(i, &val)| (val, i))
.collect();
pairs.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal));
pairs.into_iter().take(k).map(|(_, i)| i).collect()
},
"smallest" => {
let mut pairs: Vec<(T, usize)> = t_eigenvals
.iter()
.enumerate()
.map(|(i, &val)| (val, i))
.collect();
pairs.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal));
pairs.into_iter().take(k).map(|(_, i)| i).collect()
},
"both" => {
let half_k = k / 2;
let remaining = k - half_k;
let mut pairs: Vec<(T, usize)> = t_eigenvals
.iter()
.enumerate()
.map(|(i, &val)| (val, i))
.collect();
pairs.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal));
let mut selected = Vec::new();
selected.extend(pairs.iter().take(half_k).map(|(_, i)| *i));
selected.extend(pairs.iter().rev().take(remaining).map(|(_, i)| *i));
selected
_ => return Err(LinalgError::ValueError(format!(
"Invalid which parameter: {}. Must be 'largest', 'smallest', or 'both'",
which
))),
};
selected_indices.truncate(k);
let final_k = selected_indices.len();
let mut eigenvalues = Array1::zeros(final_k);
let mut eigenvectors = Array2::zeros((n, final_k));
for (i, &idx) in selected_indices.iter().enumerate() {
eigenvalues[i] = t_eigenvals[idx];
for j in 0..n {
let mut sum = T::zero();
for l in 0..lanczossize {
sum += v[[j, l]] * t_eigenvecs[[l, idx]];
}
eigenvectors[[j, i]] = sum;
}
}
Ok((eigenvalues, eigenvectors))
}
#[allow(dead_code)]
fn check_arnoldi_convergence<T>(h: &Array2<T>, j: usize, tolerance: T) -> bool
where
T: Float + Copy,
{
if j < 2 {
return false;
}
let subdiag_norm = h[[j, j - 1]].abs();
subdiag_norm < tolerance
}
#[allow(dead_code)]
fn check_lanczos_convergence<T>(alpha: &Array1<T>, beta: &Array1<T>, j: usize, tolerance: T) -> bool
where
T: Float + Copy,
{
if j < 2 {
return false;
}
let off_diag_norm = beta[j].abs();
off_diag_norm < tolerance * alpha[j - 1].abs()
}
#[allow(dead_code)]
fn compute_hessenberg_eigenvalues<T>(h: &Array2<T>) -> LinalgResult<(Array1<Complex<T>>, Array2<Complex<T>>)>
where
T: Float + NumAssign + Clone + Copy + Debug + scirs2_core::ndarray::ScalarOperand,
{
let n = h.nrows();
let mut h_complex = Array2::zeros((n, n));
for i in 0..n {
for j in 0..n {
h_complex[[i, j]] = Complex::new(h[[i, j]], T::zero());
}
}
qr_algorithm_complex(&mut h_complex)
}
#[allow(dead_code)]
fn solve_tridiagonal_eigen<T>(t: &Array2<T>) -> LinalgResult<(Array1<T>, Array2<T>)>
where
T: Float + NumAssign + Clone + Copy + Debug + scirs2_core::ndarray::ScalarOperand,
{
crate::eigen::eigh(&t.view(), None)
}
#[allow(dead_code)]
fn qr_algorithm_complex<T>(a: &mut Array2<Complex<T>>) -> LinalgResult<(Array1<Complex<T>>, Array2<Complex<T>>)>
where
T: Float + NumAssign + Clone + Copy + Debug + scirs2_core::ndarray::ScalarOperand,
{
let n = a.nrows();
let mut eigenvalues = Array1::zeros(n);
let mut eigenvectors = Array2::eye(n);
for i in 0..n {
eigenvalues[i] = a[[i, i]];
eigenvectors[[i, i]] = Complex::new(T::one(), T::zero());
}
Ok((eigenvalues, eigenvectors))
}
#[allow(dead_code)]
pub fn sparse_eirandom_range<T>(
matrix: &SparseMatrixView<T>,
range: (T, T),
max_iter: usize,
tolerance: T,
) -> LinalgResult<(Array1<T>, Array2<T>)>
where
T: Float + NumAssign + Clone + Copy + Debug + Add<Output = T> + Sub<Output = T> + Mul<Output = T> + scirs2_core::ndarray::ScalarOperand,
{
let (min_val, max_val) = range;
let n = matrix.nrows();
let shift = (min_val + max_val) / (T::one() + T::one());
let k = std::cmp::min(std::cmp::max(10, n / 10), n - 1);
let (all_eigenvals, all_eigenvecs) = sparse_lanczos_eigen(matrix, k, "both", max_iter, tolerance)?;
let mut selected_indices = Vec::new();
for (i, &eigenval) in all_eigenvals.iter().enumerate() {
if eigenval >= min_val && eigenval <= max_val {
selected_indices.push(i);
}
}
if selected_indices.is_empty() {
return Ok((Array1::zeros(0), Array2::zeros((n, 0))));
}
let final_k = selected_indices.len();
let mut eigenvalues = Array1::zeros(final_k);
let mut eigenvectors = Array2::zeros((n, final_k));
for (i, &idx) in selected_indices.iter().enumerate() {
eigenvalues[i] = all_eigenvals[idx];
for j in 0..n {
eigenvectors[[j, i]] = all_eigenvecs[[j, idx]];
}
}
Ok((eigenvalues, eigenvectors))
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::array;
use approx::assert_abs_diff_eq;
use crate::sparse_dense::sparse_from_ndarray;
#[test]
fn test_sparse_lanczos_smallmatrix() {
let dense = array![
[4.0, 1.0, 0.0],
[1.0, 3.0, 1.0],
[0.0, 1.0, 2.0]
];
let sparse = sparse_from_ndarray(&dense.view(), 1e-12).expect("Operation failed");
let result = sparse_lanczos_eigen(&sparse, 1, "largest", 50, 1e-8);
assert!(result.is_ok());
let (eigenvals_eigenvecs) = result.expect("Operation failed");
assert_eq!(eigenvals.len(), 1);
assert!(eigenvals[0] > 4.5);
assert!(eigenvals[0] < 5.5);
}
#[test]
fn test_sparse_eigen_range() {
let dense = array![
[3.0, 1.0, 0.0],
[1.0, 3.0, 1.0],
[0.0, 1.0, 3.0]
];
let sparse = sparse_from_ndarray(&dense.view(), 1e-12).expect("Operation failed");
let result = sparse_eigen_range(&sparse, (2.0, 4.0), 50, 1e-8);
assert!(result.is_ok());
let (eigenvals, eigenvecs) = result.expect("Operation failed");
assert!(eigenvals.len() > 0);
assert_eq!(eigenvecs.nrows(), 3);
assert_eq!(eigenvecs.ncols(), eigenvals.len());
for &eigenval in eigenvals.iter() {
assert!(eigenval >= 2.0 - 1e-6);
assert!(eigenval <= 4.0 + 1e-6);
}
}
}