#![allow(unused_variables)]
#![allow(unused_assignments)]
#![allow(unused_mut)]
use crate::error::{SparseError, SparseResult};
use crate::linalg::interface::LinearOperator;
use scirs2_core::numeric::{Float, NumAssign, SparseElement};
use std::iter::Sum;
#[derive(Debug, Clone)]
pub struct IterationResult<F> {
pub x: Vec<F>,
pub iterations: usize,
pub residual_norm: F,
pub converged: bool,
pub message: String,
}
pub struct CGOptions<F> {
pub max_iter: usize,
pub rtol: F,
pub atol: F,
pub x0: Option<Vec<F>>,
pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
}
impl<F: Float> Default for CGOptions<F> {
fn default() -> Self {
Self {
max_iter: 1000,
rtol: F::from(1e-8).expect("Failed to convert constant to float"),
atol: F::from(1e-12).expect("Failed to convert constant to float"),
x0: None,
preconditioner: None,
}
}
}
#[allow(dead_code)]
pub fn cg<F>(
a: &dyn LinearOperator<F>,
b: &[F],
options: CGOptions<F>,
) -> SparseResult<IterationResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + 'static,
{
let (rows, cols) = a.shape();
if rows != cols {
return Err(SparseError::ValueError(
"Matrix must be square for CG solver".to_string(),
));
}
if b.len() != rows {
return Err(SparseError::DimensionMismatch {
expected: rows,
found: b.len(),
});
}
let n = rows;
let mut x: Vec<F> = match &options.x0 {
Some(x0) => {
if x0.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: x0.len(),
});
}
x0.clone()
}
None => vec![F::sparse_zero(); n],
};
let ax = a.matvec(&x)?;
let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
let mut z = if let Some(m) = &options.preconditioner {
m.matvec(&r)?
} else {
r.clone()
};
let mut p = z.clone();
let mut rz_old = dot(&r, &z);
let bnorm = norm2(b);
let tolerance = F::max(options.atol, options.rtol * bnorm);
let mut iterations = 0;
while iterations < options.max_iter {
let ap = a.matvec(&p)?;
let pap = dot(&p, &ap);
if pap <= F::sparse_zero() {
return Ok(IterationResult {
x,
iterations,
residual_norm: norm2(&r),
converged: false,
message: "Matrix not positive definite (p^T*A*p <= 0)".to_string(),
});
}
let alpha = rz_old / pap;
for i in 0..n {
x[i] += alpha * p[i];
}
for i in 0..n {
r[i] -= alpha * ap[i];
}
let rnorm = norm2(&r);
if rnorm <= tolerance {
return Ok(IterationResult {
x,
iterations: iterations + 1,
residual_norm: rnorm,
converged: true,
message: "Converged".to_string(),
});
}
z = if let Some(m) = &options.preconditioner {
m.matvec(&r)?
} else {
r.clone()
};
let rz_new = dot(&r, &z);
let beta = rz_new / rz_old;
for i in 0..n {
p[i] = z[i] + beta * p[i];
}
rz_old = rz_new;
iterations += 1;
}
Ok(IterationResult {
x,
iterations,
residual_norm: norm2(&r),
converged: false,
message: "Maximum iterations reached".to_string(),
})
}
pub struct BiCGOptions<F> {
pub max_iter: usize,
pub rtol: F,
pub atol: F,
pub x0: Option<Vec<F>>,
pub left_preconditioner: Option<Box<dyn LinearOperator<F>>>,
pub right_preconditioner: Option<Box<dyn LinearOperator<F>>>,
}
impl<F: Float> Default for BiCGOptions<F> {
fn default() -> Self {
Self {
max_iter: 1000,
rtol: F::from(1e-8).expect("Failed to convert constant to float"),
atol: F::from(1e-12).expect("Failed to convert constant to float"),
x0: None,
left_preconditioner: None,
right_preconditioner: None,
}
}
}
#[allow(dead_code)]
pub fn bicg<F>(
a: &dyn LinearOperator<F>,
b: &[F],
options: BiCGOptions<F>,
) -> SparseResult<IterationResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + 'static,
{
let (rows, cols) = a.shape();
if rows != cols {
return Err(SparseError::ValueError(
"Matrix must be square for BiCG solver".to_string(),
));
}
if b.len() != rows {
return Err(SparseError::DimensionMismatch {
expected: rows,
found: b.len(),
});
}
let n = rows;
let mut x: Vec<F> = match &options.x0 {
Some(x0) => {
if x0.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: x0.len(),
});
}
x0.clone()
}
None => vec![F::sparse_zero(); n],
};
let ax = a.matvec(&x)?;
let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
let mut r_star = r.clone();
let mut z = if let Some(m1) = &options.left_preconditioner {
m1.matvec(&r)?
} else {
r.clone()
};
let mut z_star = if let Some(m2) = &options.right_preconditioner {
m2.matvec(&r_star)?
} else {
r_star.clone()
};
let mut p = z.clone();
let mut p_star = z_star.clone();
let mut rho_old = dot(&r_star, &z);
let bnorm = norm2(b);
let tolerance = F::max(options.atol, options.rtol * bnorm);
let mut iterations = 0;
while iterations < options.max_iter {
let mut q = a.matvec(&p)?;
if let Some(m2) = &options.right_preconditioner {
q = m2.matvec(&q)?;
}
let mut q_star = a.rmatvec(&p_star)?;
if let Some(m1) = &options.left_preconditioner {
q_star = m1.matvec(&q_star)?;
}
let alpha_den = dot(&p_star, &q);
if alpha_den.abs()
< F::epsilon() * F::from(10).expect("Failed to convert constant to float")
{
return Ok(IterationResult {
x,
iterations,
residual_norm: norm2(&r),
converged: false,
message: "BiCG breakdown: (p_star, q) ≈ 0".to_string(),
});
}
let alpha = rho_old / alpha_den;
for i in 0..n {
x[i] += alpha * p[i];
r[i] -= alpha * q[i];
r_star[i] -= alpha * q_star[i];
}
let rnorm = norm2(&r);
if rnorm <= tolerance {
return Ok(IterationResult {
x,
iterations: iterations + 1,
residual_norm: rnorm,
converged: true,
message: "Converged".to_string(),
});
}
z = if let Some(m1) = &options.left_preconditioner {
m1.matvec(&r)?
} else {
r.clone()
};
z_star = if let Some(m2) = &options.right_preconditioner {
m2.matvec(&r_star)?
} else {
r_star.clone()
};
let rho = dot(&r_star, &z);
if rho.abs() < F::epsilon() * F::from(10).expect("Failed to convert constant to float") {
return Ok(IterationResult {
x,
iterations: iterations + 1,
residual_norm: rnorm,
converged: false,
message: "BiCG breakdown: rho ≈ 0".to_string(),
});
}
let beta = rho / rho_old;
for i in 0..n {
p[i] = z[i] + beta * p[i];
p_star[i] = z_star[i] + beta * p_star[i];
}
rho_old = rho;
iterations += 1;
}
Ok(IterationResult {
x,
iterations,
residual_norm: norm2(&r),
converged: false,
message: "Maximum iterations reached".to_string(),
})
}
pub struct BiCGSTABOptions<F> {
pub max_iter: usize,
pub rtol: F,
pub atol: F,
pub x0: Option<Vec<F>>,
pub left_preconditioner: Option<Box<dyn LinearOperator<F>>>,
pub right_preconditioner: Option<Box<dyn LinearOperator<F>>>,
}
impl<F: Float> Default for BiCGSTABOptions<F> {
fn default() -> Self {
Self {
max_iter: 1000,
rtol: F::from(1e-8).expect("Failed to convert constant to float"),
atol: F::from(1e-12).expect("Failed to convert constant to float"),
x0: None,
left_preconditioner: None,
right_preconditioner: None,
}
}
}
pub type BiCGSTABResult<F> = IterationResult<F>;
#[allow(dead_code)]
pub fn bicgstab<F>(
a: &dyn LinearOperator<F>,
b: &[F],
options: BiCGSTABOptions<F>,
) -> SparseResult<BiCGSTABResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + 'static,
{
let (rows, cols) = a.shape();
if rows != cols {
return Err(SparseError::ValueError(
"Matrix must be square for BiCGSTAB solver".to_string(),
));
}
if b.len() != rows {
return Err(SparseError::DimensionMismatch {
expected: rows,
found: b.len(),
});
}
let n = rows;
let mut x: Vec<F> = match &options.x0 {
Some(x0) => {
if x0.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: x0.len(),
});
}
x0.clone()
}
None => vec![F::sparse_zero(); n],
};
let ax = a.matvec(&x)?;
let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
let mut rnorm = norm2(&r);
let bnorm = norm2(b);
let tolerance = F::max(options.atol, options.rtol * bnorm);
if rnorm <= tolerance {
return Ok(BiCGSTABResult {
x,
iterations: 0,
residual_norm: rnorm,
converged: true,
message: "Converged with initial guess".to_string(),
});
}
let r_hat = r.clone();
let mut v = vec![F::sparse_zero(); n];
let mut p = vec![F::sparse_zero(); n];
let mut y = vec![F::sparse_zero(); n];
let mut s = vec![F::sparse_zero(); n];
let mut t: Vec<F>;
let mut rho_old = F::sparse_one();
let mut alpha = F::sparse_zero();
let mut omega = F::sparse_one();
let mut iterations = 0;
while iterations < options.max_iter {
let rho = dot(&r_hat, &r);
if rho.abs() < F::epsilon() * F::from(10).expect("Failed to convert constant to float") {
return Ok(BiCGSTABResult {
x,
iterations,
residual_norm: rnorm,
converged: false,
message: "BiCGSTAB breakdown: rho ≈ 0".to_string(),
});
}
let beta = (rho / rho_old) * (alpha / omega);
for i in 0..n {
p[i] = r[i] + beta * (p[i] - omega * v[i]);
}
let p_tilde = match &options.left_preconditioner {
Some(m1) => m1.matvec(&p)?,
None => p.clone(),
};
v = a.matvec(&p_tilde)?;
if let Some(m2) = &options.right_preconditioner {
v = m2.matvec(&v)?;
}
let den = dot(&r_hat, &v);
if den.abs() < F::epsilon() * F::from(10).expect("Failed to convert constant to float") {
return Ok(BiCGSTABResult {
x,
iterations,
residual_norm: rnorm,
converged: false,
message: "BiCGSTAB breakdown: (r_hat, v) ≈ 0".to_string(),
});
}
alpha = rho / den;
if !alpha.is_finite() {
return Ok(BiCGSTABResult {
x,
iterations,
residual_norm: rnorm,
converged: false,
message: "BiCGSTAB breakdown: alpha is not finite".to_string(),
});
}
for i in 0..n {
y[i] = x[i] + alpha * p_tilde[i];
s[i] = r[i] - alpha * v[i];
}
let snorm = norm2(&s);
if snorm <= tolerance {
x = y;
if let Some(m2) = &options.right_preconditioner {
x = m2.matvec(&x)?;
}
return Ok(BiCGSTABResult {
x,
iterations: iterations + 1,
residual_norm: snorm,
converged: true,
message: "Converged".to_string(),
});
}
let s_tilde = match &options.left_preconditioner {
Some(m1) => m1.matvec(&s)?,
None => s.clone(),
};
t = a.matvec(&s_tilde)?;
if let Some(m2) = &options.right_preconditioner {
t = m2.matvec(&t)?;
}
let ts = dot(&t, &s);
let tt = dot(&t, &t);
if tt < F::epsilon() * F::from(10).expect("Failed to convert constant to float") {
return Ok(BiCGSTABResult {
x,
iterations,
residual_norm: rnorm,
converged: false,
message: "BiCGSTAB breakdown: (t, t) ≈ 0".to_string(),
});
}
omega = ts / tt;
if !omega.is_finite()
|| omega.abs()
< F::epsilon() * F::from(10).expect("Failed to convert constant to float")
{
return Ok(BiCGSTABResult {
x,
iterations,
residual_norm: rnorm,
converged: false,
message: "BiCGSTAB breakdown: omega is not finite or too small".to_string(),
});
}
for i in 0..n {
x[i] = y[i] + omega * s_tilde[i];
r[i] = s[i] - omega * t[i];
}
if let Some(m2) = &options.right_preconditioner {
x = m2.matvec(&x)?;
}
rnorm = norm2(&r);
if rnorm <= tolerance {
return Ok(BiCGSTABResult {
x,
iterations: iterations + 1,
residual_norm: rnorm,
converged: true,
message: "Converged".to_string(),
});
}
rho_old = rho;
iterations += 1;
}
Ok(BiCGSTABResult {
x,
iterations,
residual_norm: rnorm,
converged: false,
message: "Maximum iterations reached".to_string(),
})
}
pub struct GMRESOptions<F> {
pub max_iter: usize,
pub restart: usize,
pub rtol: F,
pub atol: F,
pub x0: Option<Vec<F>>,
pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
}
impl<F: Float> Default for GMRESOptions<F> {
fn default() -> Self {
Self {
max_iter: 1000,
restart: 30,
rtol: F::from(1e-8).expect("Failed to convert constant to float"),
atol: F::from(1e-12).expect("Failed to convert constant to float"),
x0: None,
preconditioner: None,
}
}
}
#[allow(dead_code)]
pub fn gmres<F>(
a: &dyn LinearOperator<F>,
b: &[F],
options: GMRESOptions<F>,
) -> SparseResult<IterationResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + 'static,
{
let (rows, cols) = a.shape();
if rows != cols {
return Err(SparseError::ValueError(
"Matrix must be square for GMRES solver".to_string(),
));
}
if b.len() != rows {
return Err(SparseError::DimensionMismatch {
expected: rows,
found: b.len(),
});
}
let n = rows;
let restart = options.restart.min(n);
let mut x: Vec<F> = match &options.x0 {
Some(x0) => {
if x0.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: x0.len(),
});
}
x0.clone()
}
None => vec![F::sparse_zero(); n],
};
let ax = a.matvec(&x)?;
let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
if let Some(m) = &options.preconditioner {
r = m.matvec(&r)?;
}
let mut rnorm = norm2(&r);
let bnorm = norm2(b);
let tolerance = F::max(options.atol, options.rtol * bnorm);
let mut outer_iterations = 0;
while outer_iterations < options.max_iter && rnorm > tolerance {
let mut v = vec![vec![F::sparse_zero(); n]; restart + 1];
let mut h = vec![vec![F::sparse_zero(); restart]; restart + 1];
let mut cs = vec![F::sparse_zero(); restart]; let mut sn = vec![F::sparse_zero(); restart]; let mut s = vec![F::sparse_zero(); restart + 1];
v[0] = r.iter().map(|&ri| ri / rnorm).collect();
s[0] = rnorm;
let mut inner_iter = 0;
while inner_iter < restart && inner_iter + outer_iterations < options.max_iter {
let mut w = a.matvec(&v[inner_iter])?;
if let Some(m) = &options.preconditioner {
w = m.matvec(&w)?;
}
for i in 0..=inner_iter {
h[i][inner_iter] = dot(&v[i], &w);
for (k, w_elem) in w.iter_mut().enumerate().take(n) {
*w_elem -= h[i][inner_iter] * v[i][k];
}
}
h[inner_iter + 1][inner_iter] = norm2(&w);
if h[inner_iter + 1][inner_iter]
< F::epsilon() * F::from(10).expect("Failed to convert constant to float")
{
break;
}
v[inner_iter + 1] = w
.iter()
.map(|&wi| wi / h[inner_iter + 1][inner_iter])
.collect();
for i in 0..inner_iter {
let temp = cs[i] * h[i][inner_iter] + sn[i] * h[i + 1][inner_iter];
h[i + 1][inner_iter] = -sn[i] * h[i][inner_iter] + cs[i] * h[i + 1][inner_iter];
h[i][inner_iter] = temp;
}
let rho = (h[inner_iter][inner_iter] * h[inner_iter][inner_iter]
+ h[inner_iter + 1][inner_iter] * h[inner_iter + 1][inner_iter])
.sqrt();
cs[inner_iter] = h[inner_iter][inner_iter] / rho;
sn[inner_iter] = h[inner_iter + 1][inner_iter] / rho;
h[inner_iter][inner_iter] = rho;
h[inner_iter + 1][inner_iter] = F::sparse_zero();
let temp = cs[inner_iter] * s[inner_iter] + sn[inner_iter] * s[inner_iter + 1];
s[inner_iter + 1] =
-sn[inner_iter] * s[inner_iter] + cs[inner_iter] * s[inner_iter + 1];
s[inner_iter] = temp;
inner_iter += 1;
let residual = s[inner_iter].abs();
if residual <= tolerance {
break;
}
}
let mut y = vec![F::sparse_zero(); inner_iter];
for i in (0..inner_iter).rev() {
y[i] = s[i];
for j in i + 1..inner_iter {
let y_j = y[j];
y[i] -= h[i][j] * y_j;
}
y[i] /= h[i][i];
}
for i in 0..inner_iter {
for (j, x_val) in x.iter_mut().enumerate().take(n) {
*x_val += y[i] * v[i][j];
}
}
let ax = a.matvec(&x)?;
r = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
if let Some(m) = &options.preconditioner {
r = m.matvec(&r)?;
}
rnorm = norm2(&r);
outer_iterations += inner_iter;
if rnorm <= tolerance {
break;
}
}
Ok(IterationResult {
x,
iterations: outer_iterations,
residual_norm: rnorm,
converged: rnorm <= tolerance,
message: if rnorm <= tolerance {
"Converged".to_string()
} else {
"Maximum iterations reached".to_string()
},
})
}
pub trait IterativeSolver<F: Float> {
fn solve(&self, a: &dyn LinearOperator<F>, b: &[F]) -> SparseResult<IterationResult<F>>;
}
pub(crate) fn dot<F: Float + Sum>(x: &[F], y: &[F]) -> F {
x.iter().zip(y).map(|(&xi, &yi)| xi * yi).sum()
}
pub(crate) fn norm2<F: Float + Sum>(x: &[F]) -> F {
dot(x, x).sqrt()
}
pub struct LSQROptions<F> {
#[allow(dead_code)]
pub max_iter: usize,
#[allow(dead_code)]
pub rtol: F,
#[allow(dead_code)]
pub atol: F,
#[allow(dead_code)]
pub btol: F,
#[allow(dead_code)]
pub x0: Option<Vec<F>>,
}
impl<F: Float> Default for LSQROptions<F> {
fn default() -> Self {
Self {
max_iter: 1000,
rtol: F::from(1e-8).expect("Failed to convert constant to float"),
atol: F::from(1e-12).expect("Failed to convert constant to float"),
btol: F::from(1e-8).expect("Failed to convert constant to float"),
x0: None,
}
}
}
#[allow(dead_code)]
pub struct LSMROptions<F> {
pub max_iter: usize,
pub rtol: F,
pub atol: F,
pub btol: F,
pub x0: Option<Vec<F>>,
pub damp: F,
}
impl<F: Float + SparseElement> Default for LSMROptions<F> {
fn default() -> Self {
Self {
max_iter: 1000,
rtol: F::from(1e-8).expect("Failed to convert constant to float"),
atol: F::from(1e-12).expect("Failed to convert constant to float"),
btol: F::from(1e-8).expect("Failed to convert constant to float"),
x0: None,
damp: F::sparse_zero(),
}
}
}
#[allow(dead_code)]
pub struct GCROTOptions<F> {
pub max_iter: usize,
pub restart: usize,
pub truncate: usize,
pub rtol: F,
pub atol: F,
pub x0: Option<Vec<F>>,
pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
}
impl<F: Float> Default for GCROTOptions<F> {
fn default() -> Self {
Self {
max_iter: 1000,
restart: 30,
truncate: 2,
rtol: F::from(1e-8).expect("Failed to convert constant to float"),
atol: F::from(1e-12).expect("Failed to convert constant to float"),
x0: None,
preconditioner: None,
}
}
}
#[allow(dead_code)]
pub struct TFQMROptions<F> {
pub max_iter: usize,
pub rtol: F,
pub atol: F,
pub x0: Option<Vec<F>>,
pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
}
impl<F: Float> Default for TFQMROptions<F> {
fn default() -> Self {
Self {
max_iter: 1000,
rtol: F::from(1e-8).expect("Failed to convert constant to float"),
atol: F::from(1e-12).expect("Failed to convert constant to float"),
x0: None,
preconditioner: None,
}
}
}
fn sym_ortho<F: Float + SparseElement>(a: F, b: F) -> (F, F, F) {
use scirs2_core::numeric::One;
let zero = F::sparse_zero();
let one = <F as One>::one();
if b == zero {
return (if a >= zero { one } else { -one }, zero, a.abs());
} else if a == zero {
return (zero, if b >= zero { one } else { -one }, b.abs());
} else if b.abs() > a.abs() {
let tau = a / b;
let s_sign = if b >= zero { one } else { -one };
let s = s_sign / (one + tau * tau).sqrt();
let c = s * tau;
let r = b / s;
(c, s, r)
} else {
let tau = b / a;
let c_sign = if a >= zero { one } else { -one };
let c = c_sign / (one + tau * tau).sqrt();
let s = c * tau;
let r = a / c;
(c, s, r)
}
}
#[allow(dead_code)]
pub fn lsqr<F>(
a: &dyn LinearOperator<F>,
b: &[F],
options: LSQROptions<F>,
) -> SparseResult<IterationResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + 'static,
{
let (m, n) = a.shape();
if b.len() != m {
return Err(SparseError::DimensionMismatch {
expected: m,
found: b.len(),
});
}
let mut x: Vec<F> = match &options.x0 {
Some(x0) => {
if x0.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: x0.len(),
});
}
x0.clone()
}
None => vec![F::sparse_zero(); n],
};
let mut u: Vec<F> = b.to_vec();
let bnorm = norm2(b);
if x.iter().any(|&xi| xi != F::sparse_zero()) {
let ax = a.matvec(&x)?;
for i in 0..u.len() {
u[i] -= ax[i];
}
}
let mut beta = norm2(&u);
if beta == F::sparse_zero() {
return Ok(IterationResult {
x,
iterations: 0,
residual_norm: F::sparse_zero(),
converged: true,
message: "Zero initial residual".to_string(),
});
}
for elem in &mut u {
*elem /= beta;
}
let mut v = a.rmatvec(&u)?;
let mut alfa = norm2(&v);
if alfa > F::sparse_zero() {
for elem in &mut v {
*elem /= alfa;
}
}
let mut w = v.clone();
let mut rhobar = alfa;
let mut phibar = beta;
let tolerance = F::max(options.atol, options.rtol * bnorm);
for iterations in 0..options.max_iter {
let av = a.matvec(&v)?;
for i in 0..u.len() {
u[i] = av[i] - alfa * u[i];
}
beta = norm2(&u);
if beta > F::sparse_zero() {
for elem in &mut u {
*elem /= beta;
}
let atu = a.rmatvec(&u)?;
for i in 0..v.len() {
v[i] = atu[i] - beta * v[i];
}
alfa = norm2(&v);
if alfa > F::sparse_zero() {
for elem in &mut v {
*elem /= alfa;
}
}
}
let (cs, sn, rho) = sym_ortho(rhobar, beta);
let theta = sn * alfa;
rhobar = -cs * alfa;
let phi = cs * phibar;
phibar = sn * phibar;
let t1 = phi / rho;
let t2 = -theta / rho;
for i in 0..x.len() {
x[i] += t1 * w[i];
}
for i in 0..w.len() {
w[i] = v[i] + t2 * w[i];
}
let residual_norm = phibar.abs();
if residual_norm <= tolerance {
return Ok(IterationResult {
x,
iterations: iterations + 1,
residual_norm,
converged: true,
message: "Converged".to_string(),
});
}
}
Ok(IterationResult {
x,
iterations: options.max_iter,
residual_norm: phibar.abs(),
converged: false,
message: "Maximum iterations reached".to_string(),
})
}
#[allow(dead_code)]
pub fn lsmr<F>(
a: &dyn LinearOperator<F>,
b: &[F],
options: LSMROptions<F>,
) -> SparseResult<IterationResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + 'static,
{
use scirs2_core::numeric::One;
let (m, n) = a.shape();
if b.len() != m {
return Err(SparseError::DimensionMismatch {
expected: m,
found: b.len(),
});
}
let zero = F::sparse_zero();
let one = <F as One>::one();
let mut x: Vec<F> = match &options.x0 {
Some(x0) => {
if x0.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: x0.len(),
});
}
x0.clone()
}
None => vec![zero; n],
};
let mut u: Vec<F> = b.to_vec();
let normb = norm2(b);
if x.iter().any(|&xi| xi != zero) {
let ax = a.matvec(&x)?;
for i in 0..u.len() {
u[i] -= ax[i];
}
}
let mut beta = norm2(&u);
if beta == zero {
return Ok(IterationResult {
x,
iterations: 0,
residual_norm: zero,
converged: true,
message: "Zero initial residual".to_string(),
});
}
for elem in &mut u {
*elem /= beta;
}
let mut v = a.rmatvec(&u)?;
let mut alpha = norm2(&v);
if alpha > zero {
for elem in &mut v {
*elem /= alpha;
}
}
let mut alphabar = alpha;
let mut zetabar = alpha * beta;
let mut rho = one;
let mut rhobar = one;
let mut cbar = one;
let mut sbar = zero;
let mut h = v.clone();
let mut hbar: Vec<F> = vec![zero; n];
let tolerance = F::max(options.atol, options.rtol * normb);
for itn in 0..options.max_iter {
let av = a.matvec(&v)?;
for i in 0..u.len() {
u[i] = av[i] - alpha * u[i];
}
beta = norm2(&u);
if beta > zero {
for elem in &mut u {
*elem /= beta;
}
let atu = a.rmatvec(&u)?;
for i in 0..v.len() {
v[i] = atu[i] - beta * v[i];
}
alpha = norm2(&v);
if alpha > zero {
for elem in &mut v {
*elem /= alpha;
}
}
}
let rhoold = rho;
let (c, s, rho_new) = sym_ortho(alphabar, beta);
rho = rho_new;
let thetanew = s * alpha;
alphabar = c * alpha;
let rhobarold = rhobar;
let zetaold = zetabar / (rhoold * rhobarold);
let thetabar = sbar * rho;
let rhotemp = cbar * rho;
let (cbar_new, sbar_new, rhobar_new) = sym_ortho(rhotemp, thetanew);
cbar = cbar_new;
sbar = sbar_new;
rhobar = rhobar_new;
let zeta = cbar * zetabar;
zetabar = -sbar * zetabar;
let factor = thetabar * rho / (rhoold * rhobarold);
for i in 0..n {
hbar[i] = h[i] - factor * hbar[i];
}
let update_factor = zeta / (rho * rhobar);
for i in 0..n {
x[i] += update_factor * hbar[i];
}
let h_factor = thetanew / rho;
for i in 0..n {
h[i] = v[i] - h_factor * h[i];
}
let normar = zetabar.abs();
if normar <= tolerance {
return Ok(IterationResult {
x,
iterations: itn + 1,
residual_norm: normar,
converged: true,
message: "Converged".to_string(),
});
}
}
Ok(IterationResult {
x,
iterations: options.max_iter,
residual_norm: zetabar.abs(),
converged: false,
message: "Maximum iterations reached".to_string(),
})
}
#[allow(dead_code)]
pub fn tfqmr<F>(
a: &dyn LinearOperator<F>,
b: &[F],
options: TFQMROptions<F>,
) -> SparseResult<IterationResult<F>>
where
F: Float + NumAssign + Sum + SparseElement + 'static,
{
let (rows, cols) = a.shape();
if rows != cols {
return Err(SparseError::ValueError(
"Matrix must be square for TFQMR solver".to_string(),
));
}
if b.len() != rows {
return Err(SparseError::DimensionMismatch {
expected: rows,
found: b.len(),
});
}
let n = rows;
let one = F::sparse_one();
let zero = F::sparse_zero();
let mut x: Vec<F> = match &options.x0 {
Some(x0) => {
if x0.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: x0.len(),
});
}
x0.clone()
}
None => vec![zero; n],
};
let ax = a.matvec(&x)?;
let r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
let r0norm = norm2(&r);
let bnorm = norm2(b);
let tolerance = F::max(options.atol, options.rtol * bnorm);
if r0norm <= tolerance || r0norm == zero {
return Ok(IterationResult {
x,
iterations: 0,
residual_norm: r0norm,
converged: true,
message: "Converged with initial guess".to_string(),
});
}
let mut u = r.clone();
let mut w = r.clone();
let rstar = r.clone();
let ar = a.matvec(&r)?;
let mut v = match &options.preconditioner {
Some(m) => m.matvec(&ar)?,
None => ar,
};
let mut uhat = v.clone();
let mut d: Vec<F> = vec![zero; n];
let mut theta = zero;
let mut eta = zero;
let mut rho = dot(&rstar, &r);
let mut rho_last = rho;
let mut tau = r0norm;
for iter in 0..options.max_iter {
let even = iter % 2 == 0;
let mut alpha = zero;
let mut u_next: Vec<F> = vec![zero; n];
if even {
let vtrstar = dot(&rstar, &v);
if vtrstar == zero {
return Ok(IterationResult {
x,
iterations: iter,
residual_norm: tau,
converged: false,
message: "TFQMR breakdown: v'*rstar = 0".to_string(),
});
}
alpha = rho / vtrstar;
for i in 0..n {
u_next[i] = u[i] - alpha * v[i];
}
}
let alpha_used = if even {
alpha
} else {
rho / dot(&rstar, &v)
};
for i in 0..n {
w[i] -= alpha_used * uhat[i];
}
let theta_sq_over_alpha =
if alpha_used.abs() > F::from(1e-300).expect("Failed to convert constant to float") {
theta * theta / alpha_used
} else {
zero
};
for i in 0..n {
d[i] = u[i] + theta_sq_over_alpha * eta * d[i];
}
theta = norm2(&w) / tau;
let c = one / (one + theta * theta).sqrt();
tau = tau * theta * c;
eta = c * c * alpha_used;
let z = match &options.preconditioner {
Some(m) => m.matvec(&d)?,
None => d.clone(),
};
for i in 0..n {
x[i] += eta * z[i];
}
let iter_f = F::from(iter + 1).expect("Failed to convert to float");
if tau * iter_f.sqrt() < tolerance {
return Ok(IterationResult {
x,
iterations: iter + 1,
residual_norm: tau,
converged: true,
message: "Converged".to_string(),
});
}
if !even {
rho = dot(&rstar, &w);
if rho == zero {
return Ok(IterationResult {
x,
iterations: iter + 1,
residual_norm: tau,
converged: false,
message: "TFQMR breakdown: rho = 0".to_string(),
});
}
let beta = rho / rho_last;
for i in 0..n {
u[i] = w[i] + beta * u[i];
}
for i in 0..n {
v[i] = beta * uhat[i] + beta * beta * v[i];
}
let au = a.matvec(&u)?;
uhat = match &options.preconditioner {
Some(m) => m.matvec(&au)?,
None => au,
};
for i in 0..n {
v[i] += uhat[i];
}
} else {
let au_next = a.matvec(&u_next)?;
uhat = match &options.preconditioner {
Some(m) => m.matvec(&au_next)?,
None => au_next,
};
u = u_next;
rho_last = rho;
}
}
Ok(IterationResult {
x,
iterations: options.max_iter,
residual_norm: tau,
converged: false,
message: "Maximum iterations reached".to_string(),
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::csr::CsrMatrix;
use crate::linalg::interface::AsLinearOperator;
#[test]
fn test_cg_identity() {
let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
let b = vec![1.0, 2.0, 3.0];
let options = CGOptions::default();
let result = cg(&identity, &b, options).expect("Operation failed");
assert!(result.converged);
for (xi, bi) in result.x.iter().zip(&b) {
assert!((xi - bi).abs() < 1e-3);
}
}
#[test]
fn test_cg_diagonal() {
let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
let b = vec![2.0, 6.0, 12.0];
let options = CGOptions::default();
let result = cg(&diag, &b, options).expect("Operation failed");
assert!(result.converged);
let expected = vec![1.0, 2.0, 3.0];
for (xi, ei) in result.x.iter().zip(&expected) {
assert!((xi - ei).abs() < 1e-3);
}
}
#[test]
fn test_cg_sparse_matrix() {
let rows = vec![0, 0, 0, 1, 1, 1, 2, 2, 2];
let cols = vec![0, 1, 2, 0, 1, 2, 0, 1, 2];
let data = vec![4.0, -1.0, -1.0, -1.0, 4.0, -1.0, -1.0, -1.0, 4.0];
let shape = (3, 3);
let matrix = CsrMatrix::new(data, rows, cols, shape).expect("Operation failed");
let op = matrix.as_linear_operator();
let b = vec![1.0, 2.0, 3.0];
let options = CGOptions::default();
let result = cg(op.as_ref(), &b, options).expect("Operation failed");
assert!(result.converged);
let ax = op.matvec(&result.x).expect("Operation failed");
for (axi, bi) in ax.iter().zip(&b) {
assert!((axi - bi).abs() < 1e-9);
}
}
#[test]
fn test_bicgstab_identity() {
let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
let b = vec![1.0, 2.0, 3.0];
let options = BiCGSTABOptions::default();
let result = bicgstab(&identity, &b, options).expect("Operation failed");
assert!(result.converged);
for (xi, bi) in result.x.iter().zip(&b) {
assert!((xi - bi).abs() < 1e-3);
}
}
#[test]
fn test_bicgstab_diagonal() {
let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
let b = vec![2.0, 6.0, 12.0];
let options = BiCGSTABOptions::default();
let result = bicgstab(&diag, &b, options).expect("Operation failed");
assert!(result.converged);
let expected = vec![1.0, 2.0, 3.0];
for (xi, ei) in result.x.iter().zip(&expected) {
assert!((xi - ei).abs() < 1e-3);
}
}
#[test]
fn test_bicgstab_non_symmetric() {
let rows = vec![0, 0, 0, 1, 1, 1, 2, 2, 2];
let cols = vec![0, 1, 2, 0, 1, 2, 0, 1, 2];
let data = vec![4.0, -1.0, -2.0, -1.0, 4.0, -1.0, 0.0, -1.0, 3.0];
let shape = (3, 3);
let matrix = CsrMatrix::new(data, rows, cols, shape).expect("Operation failed");
let op = matrix.as_linear_operator();
let b = vec![1.0, 2.0, 1.0];
let options = BiCGSTABOptions::default();
let result = bicgstab(op.as_ref(), &b, options).expect("Operation failed");
assert!(result.converged);
let ax = op.matvec(&result.x).expect("Operation failed");
for (axi, bi) in ax.iter().zip(&b) {
assert!((axi - bi).abs() < 1e-9);
}
}
#[test]
fn test_lsqr_identity() {
let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
let b = vec![1.0, 2.0, 3.0];
let options = LSQROptions::default();
let result = lsqr(&identity, &b, options).expect("Operation failed");
println!("Identity test - Expected: {:?}, Got: {:?}", b, result.x);
println!(
"Converged: {}, Iterations: {}",
result.converged, result.iterations
);
assert!(result.converged);
for (xi, bi) in result.x.iter().zip(&b) {
assert!((xi - bi).abs() < 1e-3);
}
}
#[test]
fn test_lsqr_diagonal() {
let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
let b = vec![2.0, 6.0, 12.0];
let options = LSQROptions::default();
let result = lsqr(&diag, &b, options).expect("Operation failed");
assert!(result.converged);
let expected = vec![1.0, 2.0, 3.0];
for (xi, ei) in result.x.iter().zip(&expected) {
assert!((xi - ei).abs() < 1e-3);
}
}
#[test]
fn test_lsmr_identity() {
let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
let b = vec![1.0, 2.0, 3.0];
let options = LSMROptions::default();
let result = lsmr(&identity, &b, options).expect("Operation failed");
assert!(result.converged);
for (xi, bi) in result.x.iter().zip(&b) {
assert!((xi - bi).abs() < 1e-3);
}
}
#[test]
fn test_lsmr_diagonal() {
let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
let b = vec![2.0, 6.0, 12.0];
let options = LSMROptions::default();
let result = lsmr(&diag, &b, options).expect("Operation failed");
assert!(result.converged);
let expected = vec![1.0, 2.0, 3.0];
for (xi, ei) in result.x.iter().zip(&expected) {
assert!((xi - ei).abs() < 1e-3);
}
}
#[test]
fn test_tfqmr_identity() {
let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
let b = vec![1.0, 2.0, 3.0];
let options = TFQMROptions::default();
let result = tfqmr(&identity, &b, options).expect("Operation failed");
assert!(result.converged);
for (xi, bi) in result.x.iter().zip(&b) {
assert!((xi - bi).abs() < 1e-3);
}
}
#[test]
fn test_tfqmr_diagonal() {
let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
let b = vec![2.0, 6.0, 12.0];
let options = TFQMROptions::default();
let result = tfqmr(&diag, &b, options).expect("Operation failed");
assert!(result.converged, "TFQMR did not converge");
let expected = vec![1.0, 2.0, 3.0];
for (xi, ei) in result.x.iter().zip(&expected) {
assert!((xi - ei).abs() < 1e-3);
}
}
#[test]
fn test_lsqr_least_squares() {
let rows = vec![0, 0, 1, 1, 2, 2, 3, 3];
let cols = vec![0, 1, 0, 1, 0, 1, 0, 1];
let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let shape = (4, 2);
let matrix = CsrMatrix::new(data, rows, cols, shape).expect("Operation failed");
let op = matrix.as_linear_operator();
let b = vec![1.0, 2.0, 3.0, 4.0];
let options = LSQROptions::default();
let result = lsqr(op.as_ref(), &b, options).expect("Operation failed");
assert!(result.converged || result.residual_norm < 1e-6);
}
#[test]
fn test_solver_options_defaults() {
let lsqr_opts = LSQROptions::<f64>::default();
assert_eq!(lsqr_opts.max_iter, 1000);
assert!(lsqr_opts.x0.is_none());
let lsmr_opts = LSMROptions::<f64>::default();
assert_eq!(lsmr_opts.max_iter, 1000);
assert_eq!(lsmr_opts.damp, 0.0);
assert!(lsmr_opts.x0.is_none());
let tfqmr_opts = TFQMROptions::<f64>::default();
assert_eq!(tfqmr_opts.max_iter, 1000);
assert!(tfqmr_opts.x0.is_none());
assert!(tfqmr_opts.preconditioner.is_none());
}
}