use scirs2_core::ndarray::{Array1, Array2, ArrayView2, ScalarOperand};
use scirs2_core::numeric::Complex;
use scirs2_core::numeric::{Float, NumAssign};
use scirs2_core::random::prelude::*;
use std::iter::Sum;
use crate::decomposition::qr;
use crate::error::{LinalgError, LinalgResult};
use crate::norm::vector_norm;
use crate::validation::validate_decomposition;
pub type EigenResult<F> = LinalgResult<(Array1<Complex<F>>, Array2<Complex<F>>)>;
#[allow(dead_code)]
pub fn eig<F>(a: &ArrayView2<F>, workers: Option<usize>) -> EigenResult<F>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
use crate::parallel;
parallel::configure_workers(workers);
validate_decomposition(a, "Eigenvalue computation", true)?;
let n = a.nrows();
if n == 1 {
let eigenvalue = Complex::new(a[[0, 0]], F::zero());
let eigenvector = Array2::eye(1).mapv(|x| Complex::new(x, F::zero()));
return Ok((Array1::from_elem(1, eigenvalue), eigenvector));
} else if n == 2 {
return solve_2x2_eigenvalue_problem(a);
}
solve_qr_algorithm(a)
}
#[allow(dead_code)]
pub fn eigvals<F>(a: &ArrayView2<F>, workers: Option<usize>) -> LinalgResult<Array1<Complex<F>>>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let (eigenvalues, _) = eig(a, workers)?;
Ok(eigenvalues)
}
#[allow(dead_code)]
pub fn power_iteration<F>(
a: &ArrayView2<F>,
max_iter: usize,
tol: F,
) -> LinalgResult<(F, Array1<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
{
if a.nrows() != a.ncols() {
return Err(LinalgError::ShapeError(format!(
"Expected square matrix, got shape {:?}",
a.shape()
)));
}
let n = a.nrows();
let mut rng = scirs2_core::random::rng();
let mut b = Array1::zeros(n);
for i in 0..n {
b[i] = F::from(rng.random_range(-1.0..=1.0)).unwrap_or(F::zero());
}
let norm_b = vector_norm(&b.view(), 2)?;
b.mapv_inplace(|x| x / norm_b);
let mut eigenvalue = F::zero();
let mut prev_eigenvalue = F::zero();
for _ in 0..max_iter {
let mut b_new = Array1::zeros(n);
for i in 0..n {
let mut sum = F::zero();
for j in 0..n {
sum += a[[i, j]] * b[j];
}
b_new[i] = sum;
}
eigenvalue = F::zero();
for i in 0..n {
eigenvalue += b[i] * b_new[i];
}
let norm_b_new = vector_norm(&b_new.view(), 2)?;
for i in 0..n {
b[i] = b_new[i] / norm_b_new;
}
if (eigenvalue - prev_eigenvalue).abs() < tol {
return Ok((eigenvalue, b));
}
prev_eigenvalue = eigenvalue;
}
Ok((eigenvalue, b))
}
#[allow(dead_code)]
pub fn eigh<F>(a: &ArrayView2<F>, workers: Option<usize>) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
use crate::parallel;
parallel::configure_workers(workers);
if a.nrows() != a.ncols() {
return Err(LinalgError::ShapeError(format!(
"Expected square matrix, got shape {:?}",
a.shape()
)));
}
for i in 0..a.nrows() {
for j in (i + 1)..a.ncols() {
if (a[[i, j]] - a[[j, i]]).abs() > F::epsilon() {
return Err(LinalgError::ShapeError(
"Matrix must be symmetric for Hermitian eigenvalue computation".to_string(),
));
}
}
}
let n = a.nrows();
if n == 1 {
let eigenvalue = a[[0, 0]];
let eigenvector = Array2::eye(1);
return Ok((Array1::from_elem(1, eigenvalue), eigenvector));
} else if n == 2 {
return solve_2x2_symmetric_eigenvalue_problem(a);
} else if n == 3 {
return solve_3x3_symmetric_eigenvalue_problem(a);
} else if n == 4 {
return solve_4x4_symmetric_eigenvalue_problem(a);
}
let use_work_stealing = if let Some(num_workers) = workers {
num_workers > 1 && n > 100
} else {
false
};
if use_work_stealing {
if let Some(num_workers) = workers {
return crate::parallel::parallel_eigvalsh_work_stealing(a, num_workers);
}
}
solve_symmetric_with_power_iteration(a)
}
#[allow(dead_code)]
pub fn advanced_precision_eig<F>(
a: &ArrayView2<F>,
workers: Option<usize>,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
use crate::parallel;
parallel::configure_workers(workers);
if a.nrows() != a.ncols() {
return Err(LinalgError::ShapeError(format!(
"Expected square matrix, got shape {:?}",
a.shape()
)));
}
let n = a.nrows();
let condition_number = estimate_condition_number(a)?;
let precision_target =
if condition_number > F::from(1e12).expect("Failed to convert constant to float") {
F::from(1e-8).expect("Failed to convert constant to float") } else if condition_number > F::from(1e8).expect("Failed to convert constant to float") {
F::from(1e-9).expect("Failed to convert constant to float") } else {
F::from(1e-10).expect("Failed to convert constant to float") };
if n <= 4 {
return advanced_precise_smallmatrix_eig(a, precision_target);
}
advanced_precise_iterative_eig(a, precision_target, workers)
}
#[allow(dead_code)]
fn estimate_condition_number<F>(a: &ArrayView2<F>) -> LinalgResult<F>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let n = a.nrows();
if n == 1 {
return Ok(F::one());
}
let (lambda_max_, _) = power_iteration(
a,
100,
F::from(1e-8).expect("Failed to convert constant to float"),
)?;
let _trace = (0..n).map(|i| a[[i, i]]).fold(F::zero(), |acc, x| acc + x);
let det_estimate = if n == 2 {
a[[0, 0]] * a[[1, 1]] - a[[0, 1]] * a[[1, 0]]
} else {
let product = (0..n)
.map(|i| a[[i, i]].abs())
.fold(F::one(), |acc, x| acc * x);
if product > F::zero() {
product.powf(F::one() / F::from(n).expect("Failed to convert to float"))
} else {
F::from(1e-15).expect("Failed to convert constant to float") }
};
let lambda_min =
if det_estimate.abs() > F::from(1e-15).expect("Failed to convert constant to float") {
det_estimate / lambda_max_.powf(F::from(n - 1).expect("Failed to convert to float"))
} else {
F::from(1e-15).expect("Failed to convert constant to float")
};
let condition = lambda_max_.abs()
/ lambda_min
.abs()
.max(F::from(1e-15).expect("Failed to convert constant to float"));
Ok(condition)
}
#[allow(dead_code)]
fn advanced_precise_smallmatrix_eig<F>(
a: &ArrayView2<F>,
precision_target: F,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let n = a.nrows();
match n {
1 => {
let eigenvalue = a[[0, 0]];
let eigenvector = Array2::eye(1);
Ok((Array1::from_elem(1, eigenvalue), eigenvector))
}
2 => advanced_precise_2x2_eig(a, precision_target),
3 => advanced_precise_3x3_eig(a, precision_target),
4 => advanced_precise_4x4_eig(a, precision_target),
_ => Err(LinalgError::InvalidInput(
"Matrix size not supported for advanced-precise small matrix solver".to_string(),
)),
}
}
#[allow(dead_code)]
fn advanced_precise_2x2_eig<F>(
a: &ArrayView2<F>,
_precision_target: F,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let a11 = a[[0, 0]];
let a12 = a[[0, 1]];
let a21 = a[[1, 0]];
let a22 = a[[1, 1]];
let trace = kahan_sum(&[a11, a22]);
let det = kahan_sum(&[a11 * a22, -(a12 * a21)]);
let trace_squared = trace * trace;
let four_det = F::from(4.0).expect("Failed to convert constant to float") * det;
let discriminant = kahan_sum(&[trace_squared, -four_det]);
let half = F::from(0.5).expect("Failed to convert constant to float");
let sqrt_discriminant = discriminant.abs().sqrt();
let lambda1 = if discriminant >= F::zero() {
(trace + sqrt_discriminant) * half
} else {
trace * half
};
let lambda2 = if discriminant >= F::zero() {
(trace - sqrt_discriminant) * half
} else {
trace * half
};
let mut eigenvalues = Array1::zeros(2);
eigenvalues[0] = lambda1;
eigenvalues[1] = lambda2;
let eigenvectors = compute_advanced_precise_eigenvectors_2x2(a, &eigenvalues)?;
Ok((eigenvalues, eigenvectors))
}
#[allow(dead_code)]
fn advanced_precise_3x3_eig<F>(
a: &ArrayView2<F>,
precision_target: F,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let characteristic_poly = compute_characteristic_polynomial_3x3(a)?;
let eigenvalues =
solve_cubic_equation_advanced_precise(&characteristic_poly, precision_target)?;
let eigenvectors =
compute_advanced_precise_eigenvectors_3x3(a, &eigenvalues, precision_target)?;
Ok((eigenvalues, eigenvectors))
}
#[allow(dead_code)]
fn advanced_precise_4x4_eig<F>(
a: &ArrayView2<F>,
precision_target: F,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
advanced_precise_qr_iteration(a, precision_target, 1000)
}
#[allow(dead_code)]
fn kahan_sum<F>(values: &[F]) -> F
where
F: Float + NumAssign,
{
let mut sum = F::zero();
let mut c = F::zero();
for &value in values {
let y = value - c;
let t = sum + y;
c = (t - sum) - y;
sum = t;
}
sum
}
#[allow(dead_code)]
fn compute_characteristic_polynomial_3x3<F>(a: &ArrayView2<F>) -> LinalgResult<[F; 4]>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let a00 = a[[0, 0]];
let a01 = a[[0, 1]];
let a02 = a[[0, 2]];
let a10 = a[[1, 0]];
let a11 = a[[1, 1]];
let a12 = a[[1, 2]];
let a20 = a[[2, 0]];
let a21 = a[[2, 1]];
let a22 = a[[2, 2]];
let c3 = -F::one();
let c2 = kahan_sum(&[-a00, -a11, -a22]);
let minor_00_11 = a00 * a11 - a01 * a10;
let minor_00_22 = a00 * a22 - a02 * a20;
let minor_11_22 = a11 * a22 - a12 * a21;
let c1 = kahan_sum(&[minor_00_11, minor_00_22, minor_11_22]);
let det_part1 = a00 * (a11 * a22 - a12 * a21);
let det_part2 = -a01 * (a10 * a22 - a12 * a20);
let det_part3 = a02 * (a10 * a21 - a11 * a20);
let c0 = -kahan_sum(&[det_part1, det_part2, det_part3]);
Ok([c0, c1, c2, c3])
}
#[allow(dead_code)]
fn solve_cubic_equation_advanced_precise<F>(
coeffs: &[F; 4],
precision_target: F,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let [c0, c1, c2, c3] = *coeffs;
let a = c2 / c3;
let b = c1 / c3;
let c = c0 / c3;
let third = F::one() / F::from(3.0).expect("Failed to convert constant to float");
let p = b - a * a * third;
let q_part1 = F::from(2.0).expect("Failed to convert constant to float") * a * a * a
/ F::from(27.0).expect("Failed to convert constant to float");
let q_part2 = -a * b * third;
let q = kahan_sum(&[q_part1, q_part2, c]);
let discriminant_part1 =
(q / F::from(2.0).expect("Failed to convert constant to float")).powi(2);
let discriminant_part2 =
(p / F::from(3.0).expect("Failed to convert constant to float")).powi(3);
let discriminant = discriminant_part1 + discriminant_part2;
let mut roots = Array1::zeros(3);
if discriminant > precision_target {
let sqrt_disc = discriminant.sqrt();
let half_q = q / F::from(2.0).expect("Failed to convert constant to float");
let u = if half_q + sqrt_disc >= F::zero() {
(half_q + sqrt_disc).powf(third)
} else {
-(-half_q - sqrt_disc).powf(third)
};
let v = if u.abs() > precision_target {
-p / (F::from(3.0).expect("Failed to convert constant to float") * u)
} else {
F::zero()
};
let root = u + v - a * third;
let refined_root = newton_method_cubic_refinement(coeffs, root, precision_target, 20)?;
roots[0] = refined_root;
roots[1] = refined_root; roots[2] = refined_root;
} else {
let m = F::from(2.0).expect("Failed to convert constant to float")
* (-p / F::from(3.0).expect("Failed to convert constant to float")).sqrt();
let theta = (F::from(3.0).expect("Failed to convert constant to float") * q / (p * m))
.acos()
/ F::from(3.0).expect("Failed to convert constant to float");
let two_pi_third = F::from(2.0).expect("Failed to convert constant to float")
* F::from(std::f64::consts::PI).expect("Failed to convert to float")
/ F::from(3.0).expect("Failed to convert constant to float");
let a_third = a * third;
roots[0] = m * theta.cos() - a_third;
roots[1] = m * (theta - two_pi_third).cos() - a_third;
roots[2] = m
* (theta - F::from(2.0).expect("Failed to convert constant to float") * two_pi_third)
.cos()
- a_third;
for i in 0..3 {
roots[i] = newton_method_cubic_refinement(coeffs, roots[i], precision_target, 20)?;
}
}
let mut root_vec: Vec<F> = roots.to_vec();
root_vec.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
for (i, &root) in root_vec.iter().enumerate() {
roots[i] = root;
}
Ok(roots)
}
#[allow(dead_code)]
fn newton_method_cubic_refinement<F>(
coeffs: &[F; 4],
initial_guess: F,
tolerance: F,
max_iter: usize,
) -> LinalgResult<F>
where
F: Float + NumAssign,
{
let [c0, c1, c2, c3] = *coeffs;
let mut x = initial_guess;
for _ in 0..max_iter {
let f_x = ((c3 * x + c2) * x + c1) * x + c0;
let df_x = (F::from(3.0).expect("Failed to convert constant to float") * c3 * x
+ F::from(2.0).expect("Failed to convert constant to float") * c2)
* x
+ c1;
if df_x.abs() < tolerance {
break; }
let x_new = x - f_x / df_x;
if (x_new - x).abs() < tolerance {
return Ok(x_new);
}
x = x_new;
}
Ok(x)
}
#[allow(dead_code)]
fn compute_advanced_precise_eigenvectors_2x2<F>(
a: &ArrayView2<F>,
eigenvalues: &Array1<F>,
) -> LinalgResult<Array2<F>>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let mut eigenvectors = Array2::zeros((2, 2));
for (i, &lambda) in eigenvalues.iter().enumerate() {
let mut v = Array1::zeros(2);
let a11_lambda = a[[0, 0]] - lambda;
let a22_lambda = a[[1, 1]] - lambda;
if a[[0, 1]].abs() > a[[1, 0]].abs() {
if a[[0, 1]].abs() > F::epsilon() {
v[0] = F::one();
v[1] = -a11_lambda / a[[0, 1]];
} else {
v[0] = F::one();
v[1] = F::zero();
}
} else if a[[1, 0]].abs() > F::epsilon() {
v[0] = -a22_lambda / a[[1, 0]];
v[1] = F::one();
} else {
if a11_lambda.abs() < a22_lambda.abs() {
v[0] = F::one();
v[1] = F::zero();
} else {
v[0] = F::zero();
v[1] = F::one();
}
}
let norm = vector_norm(&v.view(), 2)?;
if norm > F::epsilon() {
v.mapv_inplace(|x| x / norm);
}
eigenvectors.column_mut(i).assign(&v);
}
gram_schmidt_orthogonalization(&mut eigenvectors)?;
Ok(eigenvectors)
}
#[allow(dead_code)]
fn compute_advanced_precise_eigenvectors_3x3<F>(
a: &ArrayView2<F>,
eigenvalues: &Array1<F>,
precision_target: F,
) -> LinalgResult<Array2<F>>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let mut eigenvectors = Array2::zeros((3, 3));
for (i, &lambda) in eigenvalues.iter().enumerate() {
let mut matrix = a.to_owned();
for j in 0..3 {
matrix[[j, j]] -= lambda;
}
let eigenvector = enhanced_null_space_computation(&matrix.view(), precision_target)?;
eigenvectors.column_mut(i).assign(&eigenvector);
}
for _ in 0..3 {
gram_schmidt_orthogonalization(&mut eigenvectors)?;
}
Ok(eigenvectors)
}
#[allow(dead_code)]
fn enhanced_null_space_computation<F>(
matrix: &ArrayView2<F>,
precision_target: F,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let n = matrix.nrows();
let mut v = Array1::zeros(n);
let mut best_v = v.clone();
let mut best_residual = F::infinity();
for _trial in 0..5 {
let mut rng = scirs2_core::random::rng();
for i in 0..n {
v[i] = F::from(rng.random_range(-1.0..=1.0)).unwrap_or(F::zero());
}
let norm = vector_norm(&v.view(), 2)?;
if norm > F::epsilon() {
v.mapv_inplace(|x| x / norm);
}
for _iter in 0..50 {
let mut av = Array1::zeros(n);
for i in 0..n {
for j in 0..n {
av[i] += matrix[[i, j]] * v[j];
}
}
let residual = vector_norm(&av.view(), 2)?;
if residual < best_residual {
best_residual = residual;
best_v.assign(&v);
}
if residual < precision_target {
break;
}
let norm_v = vector_norm(&v.view(), 2)?;
if norm_v > F::epsilon() {
v.mapv_inplace(|x| x / norm_v);
}
}
}
Ok(best_v)
}
#[allow(dead_code)]
fn gram_schmidt_orthogonalization<F>(matrix: &mut Array2<F>) -> LinalgResult<()>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let n = matrix.nrows();
let m = matrix.ncols();
for i in 0..m {
let mut col_i = matrix.column(i).to_owned();
let norm_i = vector_norm(&col_i.view(), 2)?;
if norm_i > F::epsilon() {
col_i.mapv_inplace(|x| x / norm_i);
matrix.column_mut(i).assign(&col_i);
}
for j in (i + 1)..m {
let mut col_j = matrix.column(j).to_owned();
let mut dot_product = F::zero();
let mut c = F::zero();
for k in 0..n {
let y = col_i[k] * col_j[k] - c;
let t = dot_product + y;
c = (t - dot_product) - y;
dot_product = t;
}
for k in 0..n {
col_j[k] -= dot_product * col_i[k];
}
matrix.column_mut(j).assign(&col_j);
}
}
Ok(())
}
#[allow(dead_code)]
fn advanced_precise_iterative_eig<F>(
a: &ArrayView2<F>,
precision_target: F,
_workers: Option<usize>,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
advanced_precise_qr_iteration(a, precision_target, 2000)
}
#[allow(dead_code)]
fn advanced_precise_qr_iteration<F>(
a: &ArrayView2<F>,
precision_target: F,
max_iter: usize,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let n = a.nrows();
let mut h = a.to_owned(); let mut q_total = Array2::eye(n);
let mut iter_count = 0;
while iter_count < max_iter {
let mut converged = true;
for i in 1..n {
for j in 0..i {
if h[[i, j]].abs() > precision_target {
converged = false;
break;
}
}
if !converged {
break;
}
}
if converged {
break;
}
let (q, r) = enhanced_qr_decomposition(&h.view())?;
h = r.dot(&q);
q_total = q_total.dot(&q);
iter_count += 1;
}
let mut eigenvalues = Array1::zeros(n);
for i in 0..n {
eigenvalues[i] = h[[i, i]];
}
let mut indices: Vec<usize> = (0..n).collect();
indices.sort_by(|&a, &b| {
eigenvalues[a]
.partial_cmp(&eigenvalues[b])
.unwrap_or(std::cmp::Ordering::Equal)
});
let mut sorted_eigenvalues = Array1::zeros(n);
let mut sorted_eigenvectors = Array2::zeros((n, n));
for (new_idx, &old_idx) in indices.iter().enumerate() {
sorted_eigenvalues[new_idx] = eigenvalues[old_idx];
sorted_eigenvectors
.column_mut(new_idx)
.assign(&q_total.column(old_idx));
}
Ok((sorted_eigenvalues, sorted_eigenvectors))
}
#[allow(dead_code)]
fn enhanced_qr_decomposition<F>(a: &ArrayView2<F>) -> LinalgResult<(Array2<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
let (m, n) = a.dim();
let mut q = Array2::eye(m);
let mut r = a.to_owned();
for k in 0..n.min(m - 1) {
let mut x = Array1::zeros(m - k);
for i in k..m {
x[i - k] = r[[i, k]];
}
let norm_x = vector_norm(&x.view(), 2)?;
if norm_x < F::epsilon() {
continue;
}
let mut v = x.clone();
v[0] += if x[0] >= F::zero() { norm_x } else { -norm_x };
let norm_v = vector_norm(&v.view(), 2)?;
if norm_v > F::epsilon() {
v.mapv_inplace(|val| val / norm_v);
}
for j in k..n {
let mut col = Array1::zeros(m - k);
for i in k..m {
col[i - k] = r[[i, j]];
}
let dot_product = v.dot(&col);
for i in k..m {
r[[i, j]] -= F::from(2.0).expect("Failed to convert constant to float")
* dot_product
* v[i - k];
}
}
for j in 0..m {
let mut col = Array1::zeros(m - k);
for i in k..m {
col[i - k] = q[[i, j]];
}
let dot_product = v.dot(&col);
for i in k..m {
q[[i, j]] -= F::from(2.0).expect("Failed to convert constant to float")
* dot_product
* v[i - k];
}
}
}
Ok((q.t().to_owned(), r))
}
#[allow(dead_code)]
fn solve_2x2_eigenvalue_problem<F>(a: &ArrayView2<F>) -> EigenResult<F>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
{
let a11 = a[[0, 0]];
let a12 = a[[0, 1]];
let a21 = a[[1, 0]];
let a22 = a[[1, 1]];
let trace = a11 + a22;
let det = a11 * a22 - a12 * a21;
let discriminant =
trace * trace - F::from(4.0).expect("Failed to convert constant to float") * det;
let mut eigenvalues = Array1::zeros(2);
let mut eigenvectors = Array2::zeros((2, 2));
if discriminant >= F::zero() {
let sqrt_discriminant = discriminant.sqrt();
let lambda1 = (trace + sqrt_discriminant)
/ F::from(2.0).expect("Failed to convert constant to float");
let lambda2 = (trace - sqrt_discriminant)
/ F::from(2.0).expect("Failed to convert constant to float");
eigenvalues[0] = Complex::new(lambda1, F::zero());
eigenvalues[1] = Complex::new(lambda2, F::zero());
for (i, &lambda) in [lambda1, lambda2].iter().enumerate() {
let mut eigenvector = Array1::zeros(2);
if a12 != F::zero() {
eigenvector[0] = a12;
eigenvector[1] = lambda - a11;
} else if a21 != F::zero() {
eigenvector[0] = lambda - a22;
eigenvector[1] = a21;
} else {
eigenvector[0] = if (a11 - lambda).abs() < F::epsilon() {
F::one()
} else {
F::zero()
};
eigenvector[1] = if (a22 - lambda).abs() < F::epsilon() {
F::one()
} else {
F::zero()
};
}
let norm = vector_norm(&eigenvector.view(), 2)?;
if norm > F::epsilon() {
eigenvector.mapv_inplace(|x| x / norm);
}
eigenvectors.column_mut(i).assign(&eigenvector);
}
let complex_eigenvectors = eigenvectors.mapv(|x| Complex::new(x, F::zero()));
Ok((eigenvalues, complex_eigenvectors))
} else {
let real_part = trace / F::from(2.0).expect("Failed to convert constant to float");
let imag_part =
(-discriminant).sqrt() / F::from(2.0).expect("Failed to convert constant to float");
eigenvalues[0] = Complex::new(real_part, imag_part);
eigenvalues[1] = Complex::new(real_part, -imag_part);
let mut complex_eigenvectors = Array2::zeros((2, 2));
if a12 != F::zero() {
complex_eigenvectors[[0, 0]] = Complex::new(a12, F::zero());
complex_eigenvectors[[1, 0]] = Complex::new(eigenvalues[0].re - a11, eigenvalues[0].im);
complex_eigenvectors[[0, 1]] = Complex::new(a12, F::zero());
complex_eigenvectors[[1, 1]] = Complex::new(eigenvalues[1].re - a11, eigenvalues[1].im);
} else if a21 != F::zero() {
complex_eigenvectors[[0, 0]] = Complex::new(eigenvalues[0].re - a22, eigenvalues[0].im);
complex_eigenvectors[[1, 0]] = Complex::new(a21, F::zero());
complex_eigenvectors[[0, 1]] = Complex::new(eigenvalues[1].re - a22, eigenvalues[1].im);
complex_eigenvectors[[1, 1]] = Complex::new(a21, F::zero());
}
for i in 0..2 {
let mut norm_sq = Complex::new(F::zero(), F::zero());
for j in 0..2 {
norm_sq += complex_eigenvectors[[j, i]] * complex_eigenvectors[[j, i]].conj();
}
let norm = norm_sq.re.sqrt();
if norm > F::epsilon() {
for j in 0..2 {
complex_eigenvectors[[j, i]] /= Complex::new(norm, F::zero());
}
}
}
Ok((eigenvalues, complex_eigenvectors))
}
}
#[allow(dead_code)]
fn solve_2x2_symmetric_eigenvalue_problem<F>(
a: &ArrayView2<F>,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
{
let a11 = a[[0, 0]];
let a12 = a[[0, 1]];
let a22 = a[[1, 1]];
let trace = a11 + a22;
let det = a11 * a22 - a12 * a12;
let discriminant =
trace * trace - F::from(4.0).expect("Failed to convert constant to float") * det;
let sqrt_discriminant = discriminant.sqrt();
let lambda1 =
(trace + sqrt_discriminant) / F::from(2.0).expect("Failed to convert constant to float");
let lambda2 =
(trace - sqrt_discriminant) / F::from(2.0).expect("Failed to convert constant to float");
let (lambda_small, lambda_large) = if lambda1 <= lambda2 {
(lambda1, lambda2)
} else {
(lambda2, lambda1)
};
let mut eigenvalues = Array1::zeros(2);
eigenvalues[0] = lambda_small;
eigenvalues[1] = lambda_large;
let mut eigenvectors = Array2::zeros((2, 2));
if a12 != F::zero() {
eigenvectors[[0, 0]] = a12;
eigenvectors[[1, 0]] = lambda_small - a11;
} else {
eigenvectors[[0, 0]] = if (a11 - lambda_small).abs() < F::epsilon() {
F::one()
} else {
F::zero()
};
eigenvectors[[1, 0]] = if (a22 - lambda_small).abs() < F::epsilon() {
F::one()
} else {
F::zero()
};
}
let norm1 = (eigenvectors[[0, 0]] * eigenvectors[[0, 0]]
+ eigenvectors[[1, 0]] * eigenvectors[[1, 0]])
.sqrt();
if norm1 > F::epsilon() {
eigenvectors[[0, 0]] /= norm1;
eigenvectors[[1, 0]] /= norm1;
}
if a12 != F::zero() {
eigenvectors[[0, 1]] = a12;
eigenvectors[[1, 1]] = lambda_large - a11;
} else {
eigenvectors[[0, 1]] = if (a11 - lambda_large).abs() < F::epsilon() {
F::one()
} else {
F::zero()
};
eigenvectors[[1, 1]] = if (a22 - lambda_large).abs() < F::epsilon() {
F::one()
} else {
F::zero()
};
}
let norm2 = (eigenvectors[[0, 1]] * eigenvectors[[0, 1]]
+ eigenvectors[[1, 1]] * eigenvectors[[1, 1]])
.sqrt();
if norm2 > F::epsilon() {
eigenvectors[[0, 1]] /= norm2;
eigenvectors[[1, 1]] /= norm2;
}
Ok((eigenvalues, eigenvectors))
}
#[allow(dead_code)]
fn solve_3x3_symmetric_eigenvalue_problem<F>(
a: &ArrayView2<F>,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
{
let mut workmatrix = a.to_owned();
let mut q_total = Array2::eye(3);
let max_iter = 50;
let tol = F::from(1e-12).expect("Failed to convert constant to float");
for _ in 0..max_iter {
let off_diag =
workmatrix[[0, 1]].abs() + workmatrix[[0, 2]].abs() + workmatrix[[1, 2]].abs();
if off_diag < tol {
break;
}
let (q, r) = qr(&workmatrix.view(), None)?;
workmatrix = r.dot(&q);
q_total = q_total.dot(&q);
}
let mut eigenvalues = Array1::zeros(3);
for i in 0..3 {
eigenvalues[i] = workmatrix[[i, i]];
}
let mut indices = [0, 1, 2];
indices.sort_by(|&i, &j| {
eigenvalues[i]
.partial_cmp(&eigenvalues[j])
.expect("Operation failed")
});
let mut sorted_eigenvalues = Array1::zeros(3);
let mut sorted_eigenvectors = Array2::zeros((3, 3));
for (new_idx, &old_idx) in indices.iter().enumerate() {
sorted_eigenvalues[new_idx] = eigenvalues[old_idx];
for i in 0..3 {
sorted_eigenvectors[[i, new_idx]] = q_total[[i, old_idx]];
}
}
Ok((sorted_eigenvalues, sorted_eigenvectors))
}
#[allow(dead_code)]
fn solve_4x4_symmetric_eigenvalue_problem<F>(
a: &ArrayView2<F>,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
{
let mut workmatrix = a.to_owned();
let mut q_total = Array2::eye(4);
let max_iter = 100;
let tol = F::from(1e-12).expect("Failed to convert constant to float");
for _ in 0..max_iter {
let mut off_diag = F::zero();
for i in 0..4 {
for j in (i + 1)..4 {
off_diag += workmatrix[[i, j]].abs();
}
}
if off_diag < tol {
break;
}
let (q, r) = qr(&workmatrix.view(), None)?;
workmatrix = r.dot(&q);
q_total = q_total.dot(&q);
}
let mut eigenvalues = Array1::zeros(4);
for i in 0..4 {
eigenvalues[i] = workmatrix[[i, i]];
}
let mut indices = [0, 1, 2, 3];
indices.sort_by(|&i, &j| {
eigenvalues[i]
.partial_cmp(&eigenvalues[j])
.expect("Operation failed")
});
let mut sorted_eigenvalues = Array1::zeros(4);
let mut sorted_eigenvectors = Array2::zeros((4, 4));
for (new_idx, &old_idx) in indices.iter().enumerate() {
sorted_eigenvalues[new_idx] = eigenvalues[old_idx];
for i in 0..4 {
sorted_eigenvectors[[i, new_idx]] = q_total[[i, old_idx]];
}
}
Ok((sorted_eigenvalues, sorted_eigenvectors))
}
#[allow(dead_code)]
fn solve_qr_algorithm<F>(a: &ArrayView2<F>) -> EigenResult<F>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
{
let mut a_k = a.to_owned();
let n = a.nrows();
let max_iter = 100;
let tol = F::epsilon() * F::from(100.0).expect("Failed to convert constant to float");
let mut eigenvalues = Array1::zeros(n);
let mut eigenvectors = Array2::eye(n);
for _iter in 0..max_iter {
let (q, r) = qr(&a_k.view(), None)?;
let a_next = r.dot(&q);
eigenvectors = eigenvectors.dot(&q);
let mut converged = true;
for i in 1..n {
if a_next[[i, i - 1]].abs() > tol {
converged = false;
break;
}
}
if converged {
for i in 0..n {
eigenvalues[i] = Complex::new(a_next[[i, i]], F::zero());
}
let complex_eigenvectors = eigenvectors.mapv(|x| Complex::new(x, F::zero()));
return Ok((eigenvalues, complex_eigenvectors));
}
a_k = a_next;
}
let mut eigenvals = Array1::zeros(n);
for i in 0..n {
eigenvals[i] = Complex::new(a_k[[i, i]], F::zero());
}
let complex_eigenvectors = eigenvectors.mapv(|x| Complex::new(x, F::zero()));
Ok((eigenvals, complex_eigenvectors))
}
#[allow(dead_code)]
fn solve_symmetric_with_power_iteration<F>(
a: &ArrayView2<F>,
) -> LinalgResult<(Array1<F>, Array2<F>)>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand + 'static,
{
use crate::decomposition::qr;
use crate::norm::vector_norm;
let n = a.nrows();
let max_iter = 1000;
let tolerance =
F::from(1e-10).unwrap_or_else(|| F::epsilon() * F::from(100.0).unwrap_or(F::one()));
let mut workmatrix = a.to_owned();
let mut q_accumulated = Array2::eye(n);
for iter in 0..max_iter {
let mut max_off_diag = F::zero();
for i in 0..n {
for j in 0..n {
if i != j {
let abs_val = workmatrix[[i, j]].abs();
if abs_val > max_off_diag {
max_off_diag = abs_val;
}
}
}
}
if max_off_diag < tolerance {
break;
}
let shift = if n >= 2 {
let a_nn = workmatrix[[n - 1, n - 1]];
let a_n1n1 = workmatrix[[n - 2, n - 2]];
let a_nn1 = workmatrix[[n - 1, n - 2]];
let b = a_nn + a_n1n1;
let c = a_nn * a_n1n1 - a_nn1 * a_nn1;
let discriminant = b * b - F::from(4.0).unwrap_or(F::one()) * c;
if discriminant >= F::zero() {
let sqrt_disc = discriminant.sqrt();
let lambda1 = (b + sqrt_disc) / F::from(2.0).unwrap_or(F::one());
let lambda2 = (b - sqrt_disc) / F::from(2.0).unwrap_or(F::one());
if (lambda1 - a_nn).abs() < (lambda2 - a_nn).abs() {
lambda1
} else {
lambda2
}
} else {
a_nn
}
} else {
workmatrix[[n - 1, n - 1]]
};
for i in 0..n {
workmatrix[[i, i]] -= shift;
}
let (q, r) = qr(&workmatrix.view(), None).map_err(|_| {
LinalgError::ConvergenceError(format!(
"QR decomposition failed in symmetric eigenvalue computation at iteration {iter}"
))
})?;
workmatrix = r.dot(&q);
for i in 0..n {
workmatrix[[i, i]] += shift;
}
q_accumulated = q_accumulated.dot(&q);
if iter > 10 && max_off_diag < tolerance * F::from(10.0).unwrap_or(F::one()) {
break;
}
}
let mut eigenvalues = Array1::zeros(n);
for i in 0..n {
eigenvalues[i] = workmatrix[[i, i]];
}
let mut indices: Vec<usize> = (0..n).collect();
indices.sort_by(|&i, &j| {
eigenvalues[j]
.partial_cmp(&eigenvalues[i])
.unwrap_or(std::cmp::Ordering::Equal)
});
let sorted_eigenvalues = Array1::from_iter(indices.iter().map(|&i| eigenvalues[i]));
let mut sorted_eigenvectors = Array2::zeros((n, n));
for (new_idx, &old_idx) in indices.iter().enumerate() {
for i in 0..n {
sorted_eigenvectors[[i, new_idx]] = q_accumulated[[i, old_idx]];
}
}
for j in 0..n {
let mut col = sorted_eigenvectors.column_mut(j);
let norm = vector_norm(&col.to_owned().view(), 2)?;
if norm > F::epsilon() {
col.mapv_inplace(|x| x / norm);
}
let mut max_idx = 0;
let mut max_abs = F::zero();
for i in 0..n {
let abs_val = col[i].abs();
if abs_val > max_abs {
max_abs = abs_val;
max_idx = i;
}
}
if col[max_idx] < F::zero() {
col.mapv_inplace(|x| x * (-F::one()));
}
}
Ok((sorted_eigenvalues, sorted_eigenvectors))
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use scirs2_core::ndarray::array;
#[test]
fn test_1x1matrix() {
let a = array![[3.0_f64]];
let (eigenvalues, eigenvectors) = eig(&a.view(), None).expect("Operation failed");
assert_relative_eq!(eigenvalues[0].re, 3.0, epsilon = 1e-10);
assert_relative_eq!(eigenvalues[0].im, 0.0, epsilon = 1e-10);
assert_relative_eq!(eigenvectors[[0, 0]].re, 1.0, epsilon = 1e-10);
assert_relative_eq!(eigenvectors[[0, 0]].im, 0.0, epsilon = 1e-10);
}
#[test]
fn test_2x2_diagonalmatrix() {
let a = array![[3.0_f64, 0.0], [0.0, 4.0]];
let (eigenvalues, eigenvectors) = eig(&a.view(), None).expect("Operation failed");
assert_relative_eq!(eigenvalues[0].im, 0.0, epsilon = 1e-10);
assert_relative_eq!(eigenvalues[1].im, 0.0, epsilon = 1e-10);
let (eigenvalues, eigenvectors) = eigh(&a.view(), None).expect("Operation failed");
assert!(
(eigenvalues[0] - 3.0).abs() < 1e-10 && (eigenvalues[1] - 4.0).abs() < 1e-10
|| (eigenvalues[1] - 3.0).abs() < 1e-10 && (eigenvalues[0] - 4.0).abs() < 1e-10
);
}
#[test]
fn test_2x2_symmetricmatrix() {
let a = array![[1.0, 2.0], [2.0, 4.0]];
let (eigenvalues, eigenvectors) = eigh(&a.view(), None).expect("Operation failed");
assert!(
(eigenvalues[0] - 5.0).abs() < 1e-10 && eigenvalues[1].abs() < 1e-10
|| (eigenvalues[1] - 5.0).abs() < 1e-10 && eigenvalues[0].abs() < 1e-10
);
let dot_product = eigenvectors[[0, 0]] * eigenvectors[[0, 1]]
+ eigenvectors[[1, 0]] * eigenvectors[[1, 1]];
assert!(
(dot_product).abs() < 1e-10,
"Eigenvectors should be orthogonal"
);
let norm1 = (eigenvectors[[0, 0]] * eigenvectors[[0, 0]]
+ eigenvectors[[1, 0]] * eigenvectors[[1, 0]])
.sqrt();
let norm2 = (eigenvectors[[0, 1]] * eigenvectors[[0, 1]]
+ eigenvectors[[1, 1]] * eigenvectors[[1, 1]])
.sqrt();
assert!(
(norm1 - 1.0).abs() < 1e-10,
"First eigenvector should be normalized"
);
assert!(
(norm2 - 1.0).abs() < 1e-10,
"Second eigenvector should be normalized"
);
}
#[test]
fn test_power_iteration() {
let a = array![[3.0, 1.0], [1.0, 3.0]];
let (eigenvalue, eigenvector) =
power_iteration(&a.view(), 100, 1e-10).expect("Operation failed");
assert_relative_eq!(eigenvalue, 4.0, epsilon = 1e-8);
let norm = (eigenvector[0] * eigenvector[0] + eigenvector[1] * eigenvector[1]).sqrt();
assert_relative_eq!(norm, 1.0, epsilon = 1e-10);
let av = a.dot(&eigenvector);
let lambda_v = &eigenvector * eigenvalue;
let mut max_diff = 0.0;
for i in 0..eigenvector.len() {
let diff = (av[i] - lambda_v[i]).abs();
if diff > max_diff {
max_diff = diff;
}
}
assert!(
max_diff < 1e-5,
"A*v should approximately equal lambda*v, max diff: {}",
max_diff
);
}
}