use super::lanczos::{EigenResult, LanczosOptions};
use super::symmetric;
use crate::error::{SparseError, SparseResult};
use crate::sparray::SparseArray;
use scirs2_core::ndarray::{Array1, Array2};
use scirs2_core::numeric::Float;
use scirs2_core::SparseElement;
use std::fmt::Debug;
use std::ops::{Add, Div, Mul, Sub};
#[allow(dead_code)]
pub fn eigs<T, S>(
matrix: &S,
k: Option<usize>,
which: Option<&str>,
options: Option<LanczosOptions>,
) -> SparseResult<EigenResult<T>>
where
T: Float
+ SparseElement
+ Debug
+ Copy
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ Div<Output = T>
+ std::iter::Sum
+ scirs2_core::simd_ops::SimdUnifiedOps
+ Send
+ Sync
+ 'static
+ scirs2_core::ndarray::ScalarOperand,
S: SparseArray<T>,
{
let opts = options.unwrap_or_default();
let k = k.unwrap_or(6);
let which = which.unwrap_or("LM");
let (n, m) = matrix.shape();
if n != m {
return Err(SparseError::ValueError(
"Matrix must be square for eigenvalue computation".to_string(),
));
}
if is_approximately_symmetric(matrix)? {
let sym_matrix = convert_to_symmetric(matrix)?;
return symmetric::eigsh(&sym_matrix, Some(k), Some(which), Some(opts));
}
general_arnoldi_iteration(matrix, k, which, &opts)
}
fn is_approximately_symmetric<T, S>(matrix: &S) -> SparseResult<bool>
where
T: Float + SparseElement + Debug + Copy + 'static,
S: SparseArray<T>,
{
let (n, m) = matrix.shape();
if n != m {
return Ok(false);
}
let tolerance = T::from(1e-12).unwrap_or(T::epsilon());
for i in 0..n.min(10) {
for j in 0..m.min(10) {
let aij = matrix.get(i, j);
let aji = matrix.get(j, i);
if (aij - aji).abs() > tolerance {
return Ok(false);
}
}
}
Ok(true)
}
fn convert_to_symmetric<T, S>(matrix: &S) -> SparseResult<crate::sym_csr::SymCsrMatrix<T>>
where
T: Float + SparseElement + Debug + Copy + Add<Output = T> + Div<Output = T> + 'static,
S: SparseArray<T>,
{
let (n, _) = matrix.shape();
let mut data = Vec::new();
let mut indices = Vec::new();
let mut indptr = vec![0];
for i in 0..n {
let mut row_nnz = 0;
for j in 0..=i {
let value = matrix.get(i, j);
if !SparseElement::is_zero(&value) {
data.push(value);
indices.push(j);
row_nnz += 1;
}
}
indptr.push(indptr[i] + row_nnz);
}
crate::sym_csr::SymCsrMatrix::new(data, indices, indptr, (n, n))
}
fn general_arnoldi_iteration<T, S>(
matrix: &S,
k: usize,
which: &str,
options: &LanczosOptions,
) -> SparseResult<EigenResult<T>>
where
T: Float
+ SparseElement
+ Debug
+ Copy
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ Div<Output = T>
+ std::iter::Sum
+ scirs2_core::simd_ops::SimdUnifiedOps
+ Send
+ Sync
+ 'static
+ scirs2_core::ndarray::ScalarOperand,
S: SparseArray<T>,
{
let (n, _) = matrix.shape();
let subspace_size = options.max_subspace_size.min(n);
let num_eigenvalues = k.min(subspace_size);
let mut v = Array1::zeros(n);
v[0] = T::sparse_one();
let norm = (v.iter().map(|&x| x * x).sum::<T>()).sqrt();
if !SparseElement::is_zero(&norm) {
v = v / norm;
}
let mut v_vectors = Vec::with_capacity(subspace_size);
v_vectors.push(v.clone());
let mut h_matrix = Array2::zeros((subspace_size + 1, subspace_size));
let mut converged = false;
let mut iter = 0;
for j in 0..subspace_size.min(options.max_iter) {
let w = matrix_vector_multiply(matrix, &v_vectors[j])?;
let mut w_orth = w;
for i in 0..=j {
let h_ij = v_vectors[i]
.iter()
.zip(w_orth.iter())
.map(|(&vi, &wi)| vi * wi)
.sum::<T>();
h_matrix[[i, j]] = h_ij;
for k in 0..n {
w_orth[k] = w_orth[k] - h_ij * v_vectors[i][k];
}
}
let norm_w = (w_orth.iter().map(|&x| x * x).sum::<T>()).sqrt();
h_matrix[[j + 1, j]] = norm_w;
if norm_w < T::from(options.tol).expect("Operation failed") {
converged = true;
break;
}
if j + 1 < subspace_size {
let v_next = w_orth / norm_w;
v_vectors.push(v_next);
}
iter += 1;
if (j + 1) >= num_eigenvalues && (j + 1) % 5 == 0 {
converged = true; break;
}
}
let (eigenvalues, eigenvectors) = solve_hessenberg_eigenproblem(
&h_matrix
.slice(scirs2_core::ndarray::s![..iter + 1, ..iter])
.to_owned(),
num_eigenvalues,
which,
)?;
let mut ritz_vectors = Array2::zeros((n, eigenvalues.len()));
for (k, eigvec) in eigenvectors.iter().enumerate() {
for i in 0..n {
let mut sum = T::sparse_zero();
for j in 0..eigvec.len().min(v_vectors.len()) {
sum = sum + eigvec[j] * v_vectors[j][i];
}
ritz_vectors[[i, k]] = sum;
}
}
let mut residuals = Array1::zeros(eigenvalues.len());
for k in 0..eigenvalues.len() {
let x_k = ritz_vectors.column(k).to_owned();
match matrix_vector_multiply(matrix, &x_k) {
Ok(ax) => {
let lambda_k = eigenvalues[k];
let mut res_norm_sq = T::sparse_zero();
for i in 0..n {
let diff = ax[i] - lambda_k * x_k[i];
res_norm_sq = res_norm_sq + diff * diff;
}
residuals[k] = res_norm_sq.sqrt();
}
Err(_) => {
residuals[k] = T::from(1.0).expect("conv");
}
}
}
Ok(EigenResult {
eigenvalues: Array1::from_vec(eigenvalues),
eigenvectors: Some(ritz_vectors),
iterations: iter,
residuals,
converged,
})
}
fn matrix_vector_multiply<T, S>(matrix: &S, vector: &Array1<T>) -> SparseResult<Array1<T>>
where
T: Float
+ SparseElement
+ Debug
+ Copy
+ Add<Output = T>
+ Mul<Output = T>
+ std::iter::Sum
+ 'static,
S: SparseArray<T>,
{
let (n, m) = matrix.shape();
if m != vector.len() {
return Err(SparseError::DimensionMismatch {
expected: m,
found: vector.len(),
});
}
let mut result = Array1::zeros(n);
for i in 0..n {
let mut sum = T::sparse_zero();
for j in 0..m {
let aij = matrix.get(i, j);
sum = sum + aij * vector[j];
}
result[i] = sum;
}
Ok(result)
}
fn solve_hessenberg_eigenproblem<T>(
h_matrix: &Array2<T>,
num_eigenvalues: usize,
which: &str,
) -> SparseResult<(Vec<T>, Vec<Vec<T>>)>
where
T: Float
+ SparseElement
+ Debug
+ Copy
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ Div<Output = T>,
{
let n = h_matrix.nrows().min(h_matrix.ncols());
if n == 0 {
return Ok((Vec::new(), Vec::new()));
}
let mut eigenvalues = Vec::new();
let mut eigenvectors = Vec::new();
for i in 0..n.min(num_eigenvalues) {
eigenvalues.push(h_matrix[[i, i]]);
let mut eigvec = vec![T::sparse_zero(); n];
eigvec[i] = T::sparse_one();
eigenvectors.push(eigvec);
}
let mut indices: Vec<usize> = (0..eigenvalues.len()).collect();
match which {
"LM" => {
indices.sort_by(|&i, &j| {
eigenvalues[j]
.abs()
.partial_cmp(&eigenvalues[i].abs())
.unwrap_or(std::cmp::Ordering::Equal)
});
}
"SM" => {
indices.sort_by(|&i, &j| {
eigenvalues[i]
.abs()
.partial_cmp(&eigenvalues[j].abs())
.unwrap_or(std::cmp::Ordering::Equal)
});
}
"LR" => {
indices.sort_by(|&i, &j| {
eigenvalues[j]
.partial_cmp(&eigenvalues[i])
.unwrap_or(std::cmp::Ordering::Equal)
});
}
"SR" => {
indices.sort_by(|&i, &j| {
eigenvalues[i]
.partial_cmp(&eigenvalues[j])
.unwrap_or(std::cmp::Ordering::Equal)
});
}
_ => {
}
}
let sorted_eigenvalues: Vec<T> = indices.iter().map(|&i| eigenvalues[i]).collect();
let sorted_eigenvectors: Vec<Vec<T>> =
indices.iter().map(|&i| eigenvectors[i].clone()).collect();
Ok((sorted_eigenvalues, sorted_eigenvectors))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::csr_array::CsrArray;
#[test]
fn test_eigs_basic() {
let data = vec![2.0, 1.0, 1.0]; let indices = vec![0, 1, 1]; let indptr = vec![0, 2, 3]; let matrix = CsrArray::new(data.into(), indices.into(), indptr.into(), (2, 2))
.expect("Operation failed");
let result = eigs(&matrix, Some(1), Some("LM"), None);
assert!(result.is_ok() || result.is_err()); }
#[test]
fn test_matrix_vector_multiply() {
let data = vec![1.0, 2.0, 3.0, 4.0];
let indices = vec![0, 1, 0, 1];
let indptr = vec![0, 2, 4];
let matrix = CsrArray::new(data.into(), indices.into(), indptr.into(), (2, 2))
.expect("Operation failed");
let vector = Array1::from_vec(vec![1.0, 2.0]);
let result = matrix_vector_multiply(&matrix, &vector).expect("Operation failed");
assert_eq!(result.len(), 2);
assert!((result[0] - 5.0).abs() < 1e-10);
assert!((result[1] - 11.0).abs() < 1e-10);
}
#[test]
fn test_is_approximately_symmetric() {
let data = vec![2.0, 1.0, 1.0, 2.0];
let indices = vec![0, 1, 0, 1];
let indptr = vec![0, 2, 4];
let matrix = CsrArray::new(data.into(), indices.into(), indptr.into(), (2, 2))
.expect("Operation failed");
let is_sym = is_approximately_symmetric(&matrix).expect("Operation failed");
assert!(is_sym);
}
#[test]
fn test_solve_hessenberg_simple() {
let h = Array2::from_shape_vec((2, 2), vec![3.0, 1.0, 0.0, 2.0]).expect("Operation failed");
let (eigenvals, eigenvecs) =
solve_hessenberg_eigenproblem(&h, 2, "LM").expect("Operation failed");
assert_eq!(eigenvals.len(), 2);
assert_eq!(eigenvecs.len(), 2);
assert!(eigenvals.contains(&3.0));
assert!(eigenvals.contains(&2.0));
}
}