use crate::csr::CsrMatrix;
use crate::error::{SparseError, SparseResult};
use scirs2_core::ndarray::{Array1, ScalarOperand};
use scirs2_core::numeric::{Float, NumAssign, SparseElement};
use std::fmt::Debug;
use std::iter::Sum;
use std::ops::{AddAssign, MulAssign};
#[derive(Debug, Clone)]
pub struct IterativeSolverConfig {
pub max_iter: usize,
pub tol: f64,
pub verbose: bool,
}
impl Default for IterativeSolverConfig {
fn default() -> Self {
Self {
max_iter: 1000,
tol: 1e-10,
verbose: false,
}
}
}
#[derive(Debug, Clone)]
pub struct SolverResult<F> {
pub solution: Array1<F>,
pub n_iter: usize,
pub residual_norm: F,
pub converged: bool,
}
pub trait Preconditioner<F: Float> {
fn apply(&self, r: &Array1<F>) -> SparseResult<Array1<F>>;
}
pub struct JacobiPreconditioner<F> {
diagonal_inv: Array1<F>,
}
impl<F: Float + SparseElement + Debug> JacobiPreconditioner<F> {
pub fn new(matrix: &CsrMatrix<F>) -> SparseResult<Self> {
let n = matrix.rows();
if n != matrix.cols() {
return Err(SparseError::ValueError(
"Matrix must be square for Jacobi preconditioner".to_string(),
));
}
let mut diag_inv = Array1::zeros(n);
for i in 0..n {
let d = matrix.get(i, i);
if d.abs() < F::epsilon() {
return Err(SparseError::ValueError(format!(
"Zero diagonal element at row {i} prevents Jacobi preconditioner"
)));
}
diag_inv[i] = F::sparse_one() / d;
}
Ok(Self {
diagonal_inv: diag_inv,
})
}
pub fn from_diagonal(diagonal: Array1<F>) -> SparseResult<Self> {
let n = diagonal.len();
let mut diag_inv = Array1::zeros(n);
for i in 0..n {
if diagonal[i].abs() < F::epsilon() {
return Err(SparseError::ValueError(format!(
"Zero diagonal element at position {i}"
)));
}
diag_inv[i] = F::sparse_one() / diagonal[i];
}
Ok(Self {
diagonal_inv: diag_inv,
})
}
}
impl<F: Float + SparseElement> Preconditioner<F> for JacobiPreconditioner<F> {
fn apply(&self, r: &Array1<F>) -> SparseResult<Array1<F>> {
if r.len() != self.diagonal_inv.len() {
return Err(SparseError::DimensionMismatch {
expected: self.diagonal_inv.len(),
found: r.len(),
});
}
Ok(r * &self.diagonal_inv)
}
}
pub struct ILU0Preconditioner<F> {
l_data: Vec<F>,
u_data: Vec<F>,
l_indices: Vec<usize>,
u_indices: Vec<usize>,
l_indptr: Vec<usize>,
u_indptr: Vec<usize>,
n: usize,
}
impl<F: Float + NumAssign + Sum + Debug + SparseElement + 'static> ILU0Preconditioner<F> {
pub fn new(matrix: &CsrMatrix<F>) -> SparseResult<Self> {
let n = matrix.rows();
if n != matrix.cols() {
return Err(SparseError::ValueError(
"Matrix must be square for ILU(0) preconditioner".to_string(),
));
}
let mut data = matrix.data.clone();
let indices = matrix.indices.clone();
let indptr = matrix.indptr.clone();
for k in 0..n {
let k_diag_idx = find_csr_diag_index(&indices, &indptr, k)?;
let k_diag = data[k_diag_idx];
if k_diag.abs() < F::epsilon() {
return Err(SparseError::ValueError(format!(
"Zero pivot at row {k} in ILU(0) factorization"
)));
}
for i in (k + 1)..n {
let row_start = indptr[i];
let row_end = indptr[i + 1];
let mut k_pos = None;
for pos in row_start..row_end {
if indices[pos] == k {
k_pos = Some(pos);
break;
}
if indices[pos] > k {
break;
}
}
if let Some(ki_idx) = k_pos {
let mult = data[ki_idx] / k_diag;
data[ki_idx] = mult;
let k_row_start = indptr[k];
let k_row_end = indptr[k + 1];
for kj_idx in k_row_start..k_row_end {
let j = indices[kj_idx];
if j <= k {
continue;
}
for ij_idx in row_start..row_end {
if indices[ij_idx] == j {
let kj_val = data[kj_idx];
data[ij_idx] -= mult * kj_val;
break;
}
}
}
}
}
}
let mut l_data = Vec::new();
let mut u_data = Vec::new();
let mut l_indices = Vec::new();
let mut u_indices = Vec::new();
let mut l_indptr = vec![0usize];
let mut u_indptr = vec![0usize];
for i in 0..n {
let row_start = indptr[i];
let row_end = indptr[i + 1];
for pos in row_start..row_end {
let col = indices[pos];
let val = data[pos];
if col < i {
l_indices.push(col);
l_data.push(val);
} else {
u_indices.push(col);
u_data.push(val);
}
}
l_indptr.push(l_indices.len());
u_indptr.push(u_indices.len());
}
Ok(Self {
l_data,
u_data,
l_indices,
u_indices,
l_indptr,
u_indptr,
n,
})
}
}
impl<F: Float + NumAssign + Sum + Debug + SparseElement + 'static> Preconditioner<F>
for ILU0Preconditioner<F>
{
fn apply(&self, r: &Array1<F>) -> SparseResult<Array1<F>> {
if r.len() != self.n {
return Err(SparseError::DimensionMismatch {
expected: self.n,
found: r.len(),
});
}
let mut y = Array1::zeros(self.n);
for i in 0..self.n {
y[i] = r[i];
let start = self.l_indptr[i];
let end = self.l_indptr[i + 1];
for pos in start..end {
let col = self.l_indices[pos];
y[i] = y[i] - self.l_data[pos] * y[col];
}
}
let mut z = Array1::zeros(self.n);
for i in (0..self.n).rev() {
z[i] = y[i];
let start = self.u_indptr[i];
let end = self.u_indptr[i + 1];
let mut diag_val = F::sparse_one();
for pos in start..end {
let col = self.u_indices[pos];
if col == i {
diag_val = self.u_data[pos];
} else if col > i {
z[i] = z[i] - self.u_data[pos] * z[col];
}
}
z[i] /= diag_val;
}
Ok(z)
}
}
pub struct SSORPreconditioner<F> {
omega: F,
matrix: CsrMatrix<F>,
diagonal: Vec<F>,
}
impl<F: Float + NumAssign + Sum + Debug + SparseElement + 'static> SSORPreconditioner<F> {
pub fn new(matrix: CsrMatrix<F>, omega: F) -> SparseResult<Self> {
let two = F::from(2.0).ok_or_else(|| {
SparseError::ValueError("Failed to convert 2.0 to float type".to_string())
})?;
if omega <= F::sparse_zero() || omega >= two {
return Err(SparseError::ValueError(
"SSOR omega must be in the open interval (0, 2)".to_string(),
));
}
let n = matrix.rows();
if n != matrix.cols() {
return Err(SparseError::ValueError(
"Matrix must be square for SSOR preconditioner".to_string(),
));
}
let mut diagonal = vec![F::sparse_zero(); n];
for i in 0..n {
let d = matrix.get(i, i);
if d.abs() < F::epsilon() {
return Err(SparseError::ValueError(format!(
"Zero diagonal element at row {i} prevents SSOR preconditioner"
)));
}
diagonal[i] = d;
}
Ok(Self {
omega,
matrix,
diagonal,
})
}
}
impl<F: Float + NumAssign + Sum + Debug + SparseElement + 'static> Preconditioner<F>
for SSORPreconditioner<F>
{
fn apply(&self, r: &Array1<F>) -> SparseResult<Array1<F>> {
let n = self.matrix.rows();
if r.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: r.len(),
});
}
let two = F::from(2.0)
.ok_or_else(|| SparseError::ValueError("Failed to convert 2.0 to float".to_string()))?;
let scale = self.omega * (two - self.omega);
let mut y = Array1::zeros(n);
for i in 0..n {
let mut sum = r[i] * scale;
let range = self.matrix.row_range(i);
let row_indices = &self.matrix.indices[range.clone()];
let row_data = &self.matrix.data[range];
for (idx, &col) in row_indices.iter().enumerate() {
if col < i {
sum -= self.omega * row_data[idx] * y[col];
}
}
y[i] = sum / self.diagonal[i];
}
let mut z = Array1::zeros(n);
for i in 0..n {
z[i] = y[i] * self.diagonal[i];
}
let mut w = Array1::zeros(n);
for i in (0..n).rev() {
let mut sum = z[i];
let range = self.matrix.row_range(i);
let row_indices = &self.matrix.indices[range.clone()];
let row_data = &self.matrix.data[range];
for (idx, &col) in row_indices.iter().enumerate() {
if col > i {
sum -= self.omega * row_data[idx] * w[col];
}
}
w[i] = sum / self.diagonal[i];
}
Ok(w)
}
}
fn spmv<F: Float + NumAssign + Sum + SparseElement + 'static>(
a: &CsrMatrix<F>,
x: &Array1<F>,
) -> SparseResult<Array1<F>> {
let (m, n) = a.shape();
if x.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: x.len(),
});
}
let mut y = Array1::zeros(m);
for i in 0..m {
let range = a.row_range(i);
let cols = &a.indices[range.clone()];
let vals = &a.data[range];
let mut acc = F::sparse_zero();
for (idx, &col) in cols.iter().enumerate() {
acc += vals[idx] * x[col];
}
y[i] = acc;
}
Ok(y)
}
#[inline]
fn dot_arr<F: Float + Sum>(a: &Array1<F>, b: &Array1<F>) -> F {
a.iter().zip(b.iter()).map(|(&ai, &bi)| ai * bi).sum()
}
#[inline]
fn norm2_arr<F: Float + Sum>(v: &Array1<F>) -> F {
dot_arr(v, v).sqrt()
}
#[inline]
fn axpy<F: Float>(y: &mut Array1<F>, alpha: F, x: &Array1<F>) {
for (yi, &xi) in y.iter_mut().zip(x.iter()) {
*yi = *yi + alpha * xi;
}
}
pub fn cg<F>(
a: &CsrMatrix<F>,
b: &Array1<F>,
config: &IterativeSolverConfig,
precond: Option<&dyn Preconditioner<F>>,
) -> SparseResult<SolverResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
{
let (m, n) = a.shape();
if m != n {
return Err(SparseError::ValueError(
"CG requires a square matrix".to_string(),
));
}
if b.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: b.len(),
});
}
let tol = F::from(config.tol).ok_or_else(|| {
SparseError::ValueError("Failed to convert tolerance to float type".to_string())
})?;
let mut x = Array1::zeros(n);
let mut r = b.clone();
let bnorm = norm2_arr(b);
if bnorm <= F::epsilon() {
return Ok(SolverResult {
solution: x,
n_iter: 0,
residual_norm: F::sparse_zero(),
converged: true,
});
}
let tolerance = tol * bnorm;
let mut z = match precond {
Some(pc) => pc.apply(&r)?,
None => r.clone(),
};
let mut p = z.clone();
let mut rz = dot_arr(&r, &z);
for k in 0..config.max_iter {
let ap = spmv(a, &p)?;
let pap = dot_arr(&p, &ap);
if pap <= F::sparse_zero() {
return Ok(SolverResult {
solution: x,
n_iter: k,
residual_norm: norm2_arr(&r),
converged: false,
});
}
let alpha = rz / pap;
axpy(&mut x, alpha, &p);
axpy(&mut r, -alpha, &ap);
let rnorm = norm2_arr(&r);
if config.verbose {
}
if rnorm <= tolerance {
return Ok(SolverResult {
solution: x,
n_iter: k + 1,
residual_norm: rnorm,
converged: true,
});
}
z = match precond {
Some(pc) => pc.apply(&r)?,
None => r.clone(),
};
let rz_new = dot_arr(&r, &z);
let beta = rz_new / rz;
for (pi, &zi) in p.iter_mut().zip(z.iter()) {
*pi = zi + beta * *pi;
}
rz = rz_new;
}
let rnorm = norm2_arr(&r);
Ok(SolverResult {
solution: x,
n_iter: config.max_iter,
residual_norm: rnorm,
converged: rnorm <= tolerance,
})
}
pub fn bicgstab<F>(
a: &CsrMatrix<F>,
b: &Array1<F>,
config: &IterativeSolverConfig,
precond: Option<&dyn Preconditioner<F>>,
) -> SparseResult<SolverResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
{
let (m, n) = a.shape();
if m != n {
return Err(SparseError::ValueError(
"BiCGSTAB requires a square matrix".to_string(),
));
}
if b.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: b.len(),
});
}
let tol = F::from(config.tol).ok_or_else(|| {
SparseError::ValueError("Failed to convert tolerance to float type".to_string())
})?;
let mut x = Array1::zeros(n);
let mut r = b.clone();
let bnorm = norm2_arr(b);
if bnorm <= F::epsilon() {
return Ok(SolverResult {
solution: x,
n_iter: 0,
residual_norm: F::sparse_zero(),
converged: true,
});
}
let tolerance = tol * bnorm;
let r_hat = r.clone();
let mut rho = F::sparse_one();
let mut alpha = F::sparse_one();
let mut omega = F::sparse_one();
let mut v = Array1::zeros(n);
let mut p = Array1::zeros(n);
let ten_eps = F::epsilon()
* F::from(10.0).ok_or_else(|| {
SparseError::ValueError("Failed to convert 10.0 to float".to_string())
})?;
for k in 0..config.max_iter {
let rho_new = dot_arr(&r_hat, &r);
if rho_new.abs() < ten_eps {
return Ok(SolverResult {
solution: x,
n_iter: k,
residual_norm: norm2_arr(&r),
converged: false,
});
}
let beta = (rho_new / rho) * (alpha / omega);
for i in 0..n {
p[i] = r[i] + beta * (p[i] - omega * v[i]);
}
let p_hat = match precond {
Some(pc) => pc.apply(&p)?,
None => p.clone(),
};
v = spmv(a, &p_hat)?;
let den = dot_arr(&r_hat, &v);
if den.abs() < ten_eps {
return Ok(SolverResult {
solution: x,
n_iter: k,
residual_norm: norm2_arr(&r),
converged: false,
});
}
alpha = rho_new / den;
let mut s = r.clone();
axpy(&mut s, -alpha, &v);
let snorm = norm2_arr(&s);
if snorm <= tolerance {
axpy(&mut x, alpha, &p_hat);
return Ok(SolverResult {
solution: x,
n_iter: k + 1,
residual_norm: snorm,
converged: true,
});
}
let s_hat = match precond {
Some(pc) => pc.apply(&s)?,
None => s.clone(),
};
let t = spmv(a, &s_hat)?;
let tt = dot_arr(&t, &t);
if tt < ten_eps {
axpy(&mut x, alpha, &p_hat);
return Ok(SolverResult {
solution: x,
n_iter: k + 1,
residual_norm: snorm,
converged: false,
});
}
omega = dot_arr(&t, &s) / tt;
axpy(&mut x, alpha, &p_hat);
axpy(&mut x, omega, &s_hat);
r = s;
axpy(&mut r, -omega, &t);
let rnorm = norm2_arr(&r);
if rnorm <= tolerance {
return Ok(SolverResult {
solution: x,
n_iter: k + 1,
residual_norm: rnorm,
converged: true,
});
}
if omega.abs() < ten_eps {
return Ok(SolverResult {
solution: x,
n_iter: k + 1,
residual_norm: rnorm,
converged: false,
});
}
rho = rho_new;
}
let rnorm = norm2_arr(&r);
Ok(SolverResult {
solution: x,
n_iter: config.max_iter,
residual_norm: rnorm,
converged: rnorm <= tolerance,
})
}
pub fn gmres<F>(
a: &CsrMatrix<F>,
b: &Array1<F>,
config: &IterativeSolverConfig,
restart: usize,
precond: Option<&dyn Preconditioner<F>>,
) -> SparseResult<SolverResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + ScalarOperand + Debug + 'static,
{
let (m_rows, n) = a.shape();
if m_rows != n {
return Err(SparseError::ValueError(
"GMRES requires a square matrix".to_string(),
));
}
if b.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: b.len(),
});
}
let tol = F::from(config.tol).ok_or_else(|| {
SparseError::ValueError("Failed to convert tolerance to float type".to_string())
})?;
let restart_dim = restart.min(n).max(1);
let mut x = Array1::zeros(n);
let bnorm = norm2_arr(b);
if bnorm <= F::epsilon() {
return Ok(SolverResult {
solution: x,
n_iter: 0,
residual_norm: F::sparse_zero(),
converged: true,
});
}
let tolerance = tol * bnorm;
let ten_eps = F::epsilon()
* F::from(10.0).ok_or_else(|| {
SparseError::ValueError("Failed to convert 10.0 to float".to_string())
})?;
let mut total_iter = 0usize;
while total_iter < config.max_iter {
let ax = spmv(a, &x)?;
let mut r = b.clone();
axpy(&mut r, -F::sparse_one(), &ax);
r = match precond {
Some(pc) => pc.apply(&r)?,
None => r,
};
let mut rnorm = norm2_arr(&r);
if rnorm <= tolerance {
return Ok(SolverResult {
solution: x,
n_iter: total_iter,
residual_norm: rnorm,
converged: true,
});
}
let mut v_basis: Vec<Array1<F>> = Vec::with_capacity(restart_dim + 1);
v_basis.push(&r / rnorm);
let mut h = vec![vec![F::sparse_zero(); restart_dim]; restart_dim + 1];
let mut cs = vec![F::sparse_zero(); restart_dim];
let mut sn = vec![F::sparse_zero(); restart_dim];
let mut g = vec![F::sparse_zero(); restart_dim + 1];
g[0] = rnorm;
let mut inner_iter = 0usize;
while inner_iter < restart_dim && total_iter + inner_iter < config.max_iter {
let j = inner_iter;
let mut w = spmv(a, &v_basis[j])?;
w = match precond {
Some(pc) => pc.apply(&w)?,
None => w,
};
for i in 0..=j {
h[i][j] = dot_arr(&v_basis[i], &w);
axpy(&mut w, -h[i][j], &v_basis[i]);
}
h[j + 1][j] = norm2_arr(&w);
if h[j + 1][j] < ten_eps {
inner_iter += 1;
break;
}
v_basis.push(&w / h[j + 1][j]);
for i in 0..j {
let temp = cs[i] * h[i][j] + sn[i] * h[i + 1][j];
h[i + 1][j] = -sn[i] * h[i][j] + cs[i] * h[i + 1][j];
h[i][j] = temp;
}
let (c_val, s_val, r_val) = givens_rotation(h[j][j], h[j + 1][j]);
cs[j] = c_val;
sn[j] = s_val;
h[j][j] = r_val;
h[j + 1][j] = F::sparse_zero();
let temp = cs[j] * g[j] + sn[j] * g[j + 1];
g[j + 1] = -sn[j] * g[j] + cs[j] * g[j + 1];
g[j] = temp;
inner_iter += 1;
rnorm = g[j + 1].abs();
if rnorm <= tolerance {
break;
}
}
let m_dim = inner_iter;
let mut y_vec = vec![F::sparse_zero(); m_dim];
for i in (0..m_dim).rev() {
y_vec[i] = g[i];
for jj in (i + 1)..m_dim {
y_vec[i] = y_vec[i] - h[i][jj] * y_vec[jj];
}
if h[i][i].abs() < ten_eps {
y_vec[i] = F::sparse_zero();
} else {
y_vec[i] /= h[i][i];
}
}
for (i, &yi) in y_vec.iter().enumerate() {
axpy(&mut x, yi, &v_basis[i]);
}
total_iter += inner_iter;
if rnorm <= tolerance {
return Ok(SolverResult {
solution: x,
n_iter: total_iter,
residual_norm: rnorm,
converged: true,
});
}
}
let ax = spmv(a, &x)?;
let mut r_final = b.clone();
axpy(&mut r_final, -F::sparse_one(), &ax);
let rnorm = norm2_arr(&r_final);
Ok(SolverResult {
solution: x,
n_iter: total_iter,
residual_norm: rnorm,
converged: rnorm <= tolerance,
})
}
fn givens_rotation<F: Float + SparseElement>(a: F, b: F) -> (F, F, F) {
let zero = F::sparse_zero();
let one = F::sparse_one();
if b == zero {
let c = if a >= zero { one } else { -one };
return (c, zero, a.abs());
}
if a == zero {
let s = if b >= zero { one } else { -one };
return (zero, s, b.abs());
}
if b.abs() > a.abs() {
let tau = a / b;
let s_sign = if b >= zero { one } else { -one };
let s = s_sign / (one + tau * tau).sqrt();
let c = s * tau;
let r = b / s;
(c, s, r)
} else {
let tau = b / a;
let c_sign = if a >= zero { one } else { -one };
let c = c_sign / (one + tau * tau).sqrt();
let s = c * tau;
let r = a / c;
(c, s, r)
}
}
pub fn chebyshev<F>(
a: &CsrMatrix<F>,
b: &Array1<F>,
config: &IterativeSolverConfig,
lambda_min: F,
lambda_max: F,
) -> SparseResult<SolverResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
{
let (m, n) = a.shape();
if m != n {
return Err(SparseError::ValueError(
"Chebyshev iteration requires a square matrix".to_string(),
));
}
if b.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: b.len(),
});
}
if lambda_min <= F::sparse_zero() || lambda_max <= F::sparse_zero() {
return Err(SparseError::ValueError(
"Eigenvalue bounds must be positive for Chebyshev iteration".to_string(),
));
}
if lambda_min >= lambda_max {
return Err(SparseError::ValueError(
"lambda_min must be strictly less than lambda_max".to_string(),
));
}
let tol = F::from(config.tol).ok_or_else(|| {
SparseError::ValueError("Failed to convert tolerance to float type".to_string())
})?;
let two = F::from(2.0)
.ok_or_else(|| SparseError::ValueError("Failed to convert 2.0 to float".to_string()))?;
let bnorm = norm2_arr(b);
if bnorm <= F::epsilon() {
return Ok(SolverResult {
solution: Array1::zeros(n),
n_iter: 0,
residual_norm: F::sparse_zero(),
converged: true,
});
}
let tolerance = tol * bnorm;
let d = (lambda_max + lambda_min) / two;
let c = (lambda_max - lambda_min) / two;
let mut x = Array1::zeros(n);
let mut r = b.clone();
let mut rnorm = norm2_arr(&r);
if rnorm <= tolerance {
return Ok(SolverResult {
solution: x,
n_iter: 0,
residual_norm: rnorm,
converged: true,
});
}
let inv_d = F::sparse_one() / d;
let mut p = Array1::zeros(n);
for i in 0..n {
p[i] = inv_d * r[i];
}
axpy(&mut x, F::sparse_one(), &p);
let ax = spmv(a, &x)?;
for i in 0..n {
r[i] = b[i] - ax[i];
}
rnorm = norm2_arr(&r);
if rnorm <= tolerance {
return Ok(SolverResult {
solution: x,
n_iter: 1,
residual_norm: rnorm,
converged: true,
});
}
let mut alpha;
let mut beta;
let half = F::sparse_one() / two;
let mut alpha_prev = inv_d;
let c_half = c * half;
for k in 1..config.max_iter {
beta = (c_half * alpha_prev) * (c_half * alpha_prev);
let denom = d - beta / alpha_prev;
if denom.abs() < F::epsilon() {
break;
}
alpha = F::sparse_one() / denom;
for i in 0..n {
p[i] = alpha * r[i] + beta * p[i];
}
axpy(&mut x, F::sparse_one(), &p);
let ax_k = spmv(a, &x)?;
for i in 0..n {
r[i] = b[i] - ax_k[i];
}
rnorm = norm2_arr(&r);
if rnorm <= tolerance {
return Ok(SolverResult {
solution: x,
n_iter: k + 1,
residual_norm: rnorm,
converged: true,
});
}
alpha_prev = alpha;
}
Ok(SolverResult {
solution: x,
n_iter: config.max_iter,
residual_norm: rnorm,
converged: rnorm <= tolerance,
})
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum NormType {
Frobenius,
Inf,
One,
}
pub fn estimate_spectral_radius<F>(a: &CsrMatrix<F>, n_iter: usize) -> SparseResult<F>
where
F: Float + NumAssign + Sum + SparseElement + ScalarOperand + Debug + 'static,
{
let (m, n) = a.shape();
if m != n {
return Err(SparseError::ValueError(
"Matrix must be square to estimate spectral radius".to_string(),
));
}
if n == 0 {
return Ok(F::sparse_zero());
}
let inv_sqrt_n = F::sparse_one()
/ F::from(n)
.ok_or_else(|| {
SparseError::ValueError("Failed to convert matrix size to float".to_string())
})?
.sqrt();
let mut v = Array1::from_elem(n, inv_sqrt_n);
let mut lambda = F::sparse_zero();
let iters = if n_iter == 0 { 50 } else { n_iter };
for _ in 0..iters {
let w = spmv(a, &v)?;
lambda = dot_arr(&v, &w);
let wnorm = norm2_arr(&w);
if wnorm < F::epsilon() {
return Ok(F::sparse_zero());
}
v = &w / wnorm;
}
Ok(lambda.abs())
}
pub fn sparse_diagonal<F>(a: &CsrMatrix<F>) -> Array1<F>
where
F: Float + SparseElement,
{
let (m, n) = a.shape();
let dim = m.min(n);
let mut diag = Array1::zeros(dim);
for i in 0..dim {
diag[i] = a.get(i, i);
}
diag
}
pub fn sparse_trace<F>(a: &CsrMatrix<F>) -> F
where
F: Float + SparseElement,
{
let (m, n) = a.shape();
let dim = m.min(n);
let mut tr = F::sparse_zero();
for i in 0..dim {
tr = tr + a.get(i, i);
}
tr
}
pub fn sparse_norm<F>(a: &CsrMatrix<F>, norm_type: NormType) -> F
where
F: Float + NumAssign + SparseElement + AddAssign + MulAssign + 'static,
{
match norm_type {
NormType::Frobenius => {
let mut sum_sq = F::sparse_zero();
for &val in &a.data {
sum_sq += val * val;
}
sum_sq.sqrt()
}
NormType::Inf => {
let m = a.rows();
let mut max_sum = F::sparse_zero();
for i in 0..m {
let range = a.row_range(i);
let row_data = &a.data[range];
let mut row_sum = F::sparse_zero();
for &v in row_data {
let abs_v: F = v.abs();
row_sum += abs_v;
}
if row_sum > max_sum {
max_sum = row_sum;
}
}
max_sum
}
NormType::One => {
let n = a.cols();
let mut col_sums = vec![F::sparse_zero(); n];
for (&col, &val) in a.indices.iter().zip(a.data.iter()) {
if col < n {
let abs_val: F = val.abs();
col_sums[col] += abs_val;
}
}
let mut max_sum = F::sparse_zero();
for &s in &col_sums {
if s > max_sum {
max_sum = s;
}
}
max_sum
}
}
}
fn find_csr_diag_index(indices: &[usize], indptr: &[usize], row: usize) -> SparseResult<usize> {
let start = indptr[row];
let end = indptr[row + 1];
for pos in start..end {
if indices[pos] == row {
return Ok(pos);
}
}
Err(SparseError::ValueError(format!(
"Missing diagonal element at row {row}"
)))
}
#[cfg(test)]
mod tests {
use super::*;
fn spd_3x3() -> CsrMatrix<f64> {
let rows = vec![0, 0, 0, 1, 1, 1, 2, 2, 2];
let cols = vec![0, 1, 2, 0, 1, 2, 0, 1, 2];
let data = vec![4.0, -1.0, -1.0, -1.0, 4.0, -1.0, -1.0, -1.0, 4.0];
CsrMatrix::new(data, rows, cols, (3, 3)).expect("failed to build test matrix")
}
fn nonsym_3x3() -> CsrMatrix<f64> {
let rows = vec![0, 0, 1, 1, 1, 2, 2];
let cols = vec![0, 1, 0, 1, 2, 1, 2];
let data = vec![5.0, -1.0, -2.0, 5.0, -1.0, -2.0, 5.0];
CsrMatrix::new(data, rows, cols, (3, 3)).expect("failed to build test matrix")
}
fn spd_5x5() -> CsrMatrix<f64> {
let mut rows = Vec::new();
let mut cols = Vec::new();
let mut data = Vec::new();
for i in 0..5 {
rows.push(i);
cols.push(i);
data.push(4.0);
if i > 0 {
rows.push(i);
cols.push(i - 1);
data.push(-1.0);
}
if i < 4 {
rows.push(i);
cols.push(i + 1);
data.push(-1.0);
}
}
CsrMatrix::new(data, rows, cols, (5, 5)).expect("failed to build test matrix")
}
fn rhs_3() -> Array1<f64> {
Array1::from_vec(vec![1.0, 2.0, 3.0])
}
fn rhs_5() -> Array1<f64> {
Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0])
}
fn verify_solution(a: &CsrMatrix<f64>, x: &Array1<f64>, b: &Array1<f64>, tol: f64) {
let ax = spmv(a, x).expect("spmv failed in verification");
for (i, (&axi, &bi)) in ax.iter().zip(b.iter()).enumerate() {
assert!(
(axi - bi).abs() < tol,
"Mismatch at index {i}: Ax[{i}]={axi}, b[{i}]={bi}"
);
}
}
#[test]
fn test_cg_spd_3x3() {
let a = spd_3x3();
let b = rhs_3();
let cfg = IterativeSolverConfig::default();
let res = cg(&a, &b, &cfg, None).expect("CG failed");
assert!(res.converged, "CG did not converge");
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_cg_spd_5x5() {
let a = spd_5x5();
let b = rhs_5();
let cfg = IterativeSolverConfig::default();
let res = cg(&a, &b, &cfg, None).expect("CG failed");
assert!(res.converged);
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_cg_with_jacobi_precond() {
let a = spd_3x3();
let b = rhs_3();
let pc = JacobiPreconditioner::new(&a).expect("Jacobi failed");
let cfg = IterativeSolverConfig::default();
let res = cg(&a, &b, &cfg, Some(&pc)).expect("CG + Jacobi failed");
assert!(res.converged);
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_cg_zero_rhs() {
let a = spd_3x3();
let b = Array1::zeros(3);
let cfg = IterativeSolverConfig::default();
let res = cg(&a, &b, &cfg, None).expect("CG failed");
assert!(res.converged);
assert!(res.residual_norm <= 1e-14);
}
#[test]
fn test_cg_dimension_mismatch() {
let a = spd_3x3();
let b = Array1::from_vec(vec![1.0, 2.0]);
let cfg = IterativeSolverConfig::default();
assert!(cg(&a, &b, &cfg, None).is_err());
}
#[test]
fn test_bicgstab_nonsym_3x3() {
let a = nonsym_3x3();
let b = rhs_3();
let cfg = IterativeSolverConfig::default();
let res = bicgstab(&a, &b, &cfg, None).expect("BiCGSTAB failed");
assert!(res.converged, "BiCGSTAB did not converge");
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_bicgstab_spd_3x3() {
let a = spd_3x3();
let b = rhs_3();
let cfg = IterativeSolverConfig::default();
let res = bicgstab(&a, &b, &cfg, None).expect("BiCGSTAB failed");
assert!(res.converged);
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_bicgstab_with_jacobi() {
let a = nonsym_3x3();
let b = rhs_3();
let pc = JacobiPreconditioner::new(&a).expect("Jacobi failed");
let cfg = IterativeSolverConfig::default();
let res = bicgstab(&a, &b, &cfg, Some(&pc)).expect("BiCGSTAB + Jacobi failed");
assert!(res.converged);
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_bicgstab_zero_rhs() {
let a = nonsym_3x3();
let b = Array1::zeros(3);
let cfg = IterativeSolverConfig::default();
let res = bicgstab(&a, &b, &cfg, None).expect("BiCGSTAB failed");
assert!(res.converged);
}
#[test]
fn test_gmres_nonsym_3x3() {
let a = nonsym_3x3();
let b = rhs_3();
let cfg = IterativeSolverConfig::default();
let res = gmres(&a, &b, &cfg, 30, None).expect("GMRES failed");
assert!(res.converged, "GMRES did not converge");
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_gmres_spd_5x5() {
let a = spd_5x5();
let b = rhs_5();
let cfg = IterativeSolverConfig::default();
let res = gmres(&a, &b, &cfg, 10, None).expect("GMRES failed");
assert!(res.converged);
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_gmres_with_jacobi() {
let a = nonsym_3x3();
let b = rhs_3();
let pc = JacobiPreconditioner::new(&a).expect("Jacobi failed");
let cfg = IterativeSolverConfig::default();
let res = gmres(&a, &b, &cfg, 30, Some(&pc)).expect("GMRES + Jacobi failed");
assert!(res.converged);
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_gmres_restart_small() {
let a = spd_5x5();
let b = rhs_5();
let cfg = IterativeSolverConfig {
max_iter: 200,
tol: 1e-8,
verbose: false,
};
let res = gmres(&a, &b, &cfg, 2, None).expect("GMRES failed");
assert!(res.converged, "GMRES(2) did not converge");
verify_solution(&a, &res.solution, &b, 1e-6);
}
#[test]
fn test_chebyshev_spd_3x3() {
let a = spd_3x3();
let b = rhs_3();
let cfg = IterativeSolverConfig {
max_iter: 200,
tol: 1e-8,
verbose: false,
};
let res = chebyshev(&a, &b, &cfg, 1.5, 5.5).expect("Chebyshev failed");
assert!(res.converged, "Chebyshev did not converge");
verify_solution(&a, &res.solution, &b, 1e-6);
}
#[test]
fn test_chebyshev_spd_5x5() {
let a = spd_5x5();
let b = rhs_5();
let cfg = IterativeSolverConfig {
max_iter: 300,
tol: 1e-8,
verbose: false,
};
let res = chebyshev(&a, &b, &cfg, 2.0, 6.0).expect("Chebyshev failed");
assert!(res.converged, "Chebyshev did not converge");
verify_solution(&a, &res.solution, &b, 1e-6);
}
#[test]
fn test_chebyshev_invalid_bounds() {
let a = spd_3x3();
let b = rhs_3();
let cfg = IterativeSolverConfig::default();
assert!(chebyshev(&a, &b, &cfg, 5.0, 3.0).is_err());
assert!(chebyshev(&a, &b, &cfg, -1.0, 5.0).is_err());
}
#[test]
fn test_jacobi_from_matrix() {
let a = spd_3x3();
let pc = JacobiPreconditioner::new(&a).expect("Jacobi creation failed");
let r = Array1::from_vec(vec![4.0, 8.0, 12.0]);
let z = pc.apply(&r).expect("Jacobi apply failed");
assert!((z[0] - 1.0).abs() < 1e-12);
assert!((z[1] - 2.0).abs() < 1e-12);
assert!((z[2] - 3.0).abs() < 1e-12);
}
#[test]
fn test_ilu0_preconditioner() {
let a = spd_3x3();
let pc = ILU0Preconditioner::new(&a).expect("ILU0 creation failed");
let r = Array1::from_vec(vec![1.0, 2.0, 3.0]);
let z = pc.apply(&r).expect("ILU0 apply failed");
let a_inv_r = spmv(&a, &z).expect("spmv failed");
for i in 0..3 {
assert!(
(a_inv_r[i] - r[i]).abs() < 1e-10,
"ILU0 did not produce exact inverse on dense matrix at index {i}"
);
}
}
#[test]
fn test_ssor_preconditioner() {
let a = spd_3x3();
let pc = SSORPreconditioner::new(a.clone(), 1.0).expect("SSOR creation failed");
let r = Array1::from_vec(vec![1.0, 2.0, 3.0]);
let z = pc.apply(&r).expect("SSOR apply failed");
assert_eq!(z.len(), 3);
for &val in z.iter() {
assert!(val.is_finite(), "SSOR produced non-finite value");
}
}
#[test]
fn test_ssor_invalid_omega() {
let a = spd_3x3();
assert!(SSORPreconditioner::new(a.clone(), 0.0).is_err());
assert!(SSORPreconditioner::new(a.clone(), 2.0).is_err());
assert!(SSORPreconditioner::new(a.clone(), -0.5).is_err());
}
#[test]
fn test_cg_with_ilu0() {
let a = spd_3x3();
let b = rhs_3();
let pc = ILU0Preconditioner::new(&a).expect("ILU0 creation failed");
let cfg = IterativeSolverConfig::default();
let res = cg(&a, &b, &cfg, Some(&pc)).expect("CG + ILU0 failed");
assert!(res.converged, "CG + ILU0 did not converge");
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_sparse_diagonal() {
let a = spd_3x3();
let d = sparse_diagonal(&a);
assert_eq!(d.len(), 3);
assert!((d[0] - 4.0).abs() < 1e-12);
assert!((d[1] - 4.0).abs() < 1e-12);
assert!((d[2] - 4.0).abs() < 1e-12);
}
#[test]
fn test_sparse_trace() {
let a = spd_3x3();
let tr = sparse_trace(&a);
assert!((tr - 12.0).abs() < 1e-12);
}
#[test]
fn test_sparse_norm_frobenius() {
let a = spd_3x3();
let nf = sparse_norm(&a, NormType::Frobenius);
assert!((nf - 54.0_f64.sqrt()).abs() < 1e-10);
}
#[test]
fn test_sparse_norm_inf() {
let a = spd_3x3();
let ni = sparse_norm(&a, NormType::Inf);
assert!((ni - 6.0).abs() < 1e-12);
}
#[test]
fn test_sparse_norm_one() {
let a = spd_3x3();
let n1 = sparse_norm(&a, NormType::One);
assert!((n1 - 6.0).abs() < 1e-12);
}
#[test]
fn test_estimate_spectral_radius() {
let a = spd_3x3();
let rho = estimate_spectral_radius(&a, 100).expect("spectral radius estimation failed");
assert!(
(rho - 5.0).abs() < 0.5,
"Expected spectral radius near 5.0, got {rho}"
);
}
#[test]
fn test_sparse_diagonal_rectangular() {
let rows = vec![0, 1, 2, 3];
let cols = vec![0, 1, 2, 0];
let data = vec![10.0, 20.0, 30.0, 99.0];
let a = CsrMatrix::new(data, rows, cols, (4, 3)).expect("failed to build matrix");
let d = sparse_diagonal(&a);
assert_eq!(d.len(), 3);
assert!((d[0] - 10.0).abs() < 1e-12);
assert!((d[1] - 20.0).abs() < 1e-12);
assert!((d[2] - 30.0).abs() < 1e-12);
}
#[test]
fn test_solver_config_default() {
let cfg = IterativeSolverConfig::default();
assert_eq!(cfg.max_iter, 1000);
assert!((cfg.tol - 1e-10).abs() < 1e-15);
assert!(!cfg.verbose);
}
#[test]
fn test_gmres_dimension_mismatch() {
let a = spd_3x3();
let b = Array1::from_vec(vec![1.0, 2.0]);
let cfg = IterativeSolverConfig::default();
assert!(gmres(&a, &b, &cfg, 10, None).is_err());
}
#[test]
fn test_bicgstab_5x5() {
let a = spd_5x5();
let b = rhs_5();
let cfg = IterativeSolverConfig::default();
let res = bicgstab(&a, &b, &cfg, None).expect("BiCGSTAB failed");
assert!(res.converged);
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_cg_with_ssor_precond() {
let a = spd_5x5();
let b = rhs_5();
let pc = SSORPreconditioner::new(a.clone(), 1.2).expect("SSOR creation failed");
let cfg = IterativeSolverConfig::default();
let res = cg(&a, &b, &cfg, Some(&pc)).expect("CG + SSOR failed");
assert!(res.converged);
verify_solution(&a, &res.solution, &b, 1e-8);
}
#[test]
fn test_nonsquare_matrix_error() {
let rows = vec![0, 1, 2];
let cols = vec![0, 0, 1];
let data = vec![1.0, 2.0, 3.0];
let a = CsrMatrix::new(data, rows, cols, (3, 2)).expect("failed to build matrix");
let b = Array1::from_vec(vec![1.0, 2.0, 3.0]);
let cfg = IterativeSolverConfig::default();
assert!(cg(&a, &b, &cfg, None).is_err());
assert!(bicgstab(&a, &b, &cfg, None).is_err());
assert!(gmres(&a, &b, &cfg, 10, None).is_err());
}
}