use crate::error::{SparseError, SparseResult};
use crate::linalg::interface::LinearOperator;
use scirs2_core::numeric::{Float, NumAssign, SparseElement};
use std::iter::Sum;
#[derive(Clone)]
pub struct KrylovResult<F> {
pub x: Vec<F>,
pub iterations: usize,
pub residual: F,
pub converged: bool,
pub residual_history: Vec<F>,
pub message: String,
}
impl<F: Float> std::fmt::Debug for KrylovResult<F> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("KrylovResult")
.field("iterations", &self.iterations)
.field("converged", &self.converged)
.field("history_len", &self.residual_history.len())
.field("message", &self.message)
.finish()
}
}
impl<F: Float> KrylovResult<F> {
pub fn converged(x: Vec<F>, iters: usize, res: F, hist: Vec<F>) -> Self {
Self { x, iterations: iters, residual: res, converged: true,
residual_history: hist, message: "Converged".to_string() }
}
pub fn not_converged(x: Vec<F>, iters: usize, res: F, hist: Vec<F>) -> Self {
Self { x, iterations: iters, residual: res, converged: false,
residual_history: hist,
message: "Maximum iterations reached without convergence".to_string() }
}
}
pub struct SymmlqOptions<F> {
pub max_iter: usize,
pub rtol: F,
pub atol: F,
pub x0: Option<Vec<F>>,
pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
pub track_history: bool,
}
impl<F: Float> Default for SymmlqOptions<F> {
fn default() -> Self {
Self {
max_iter: 1000,
rtol: F::from(1e-8).expect("constant"),
atol: F::from(1e-12).expect("constant"),
x0: None,
preconditioner: None,
track_history: false,
}
}
}
pub fn symmlq<F>(
a: &dyn LinearOperator<F>,
b: &[F],
options: SymmlqOptions<F>,
) -> SparseResult<KrylovResult<F>>
where
F: Float + SparseElement + NumAssign + Sum + 'static,
{
let n = b.len();
let (m, k) = a.shape();
if m != n || k != n {
return Err(SparseError::DimensionMismatch { expected: n, found: m });
}
let zero = F::sparse_zero();
let bnorm = norm2(b);
let tol = options.atol + options.rtol * bnorm;
let x0: Vec<F> = options.x0.unwrap_or_else(|| vec![zero; n]);
let ax0 = a.matvec(&x0)?;
let r0: Vec<F> = b.iter().zip(ax0.iter()).map(|(&bi,&axi)| bi-axi).collect();
let y0 = precond_apply(options.preconditioner.as_deref(), &r0)?;
let bsq = dot(&r0, &y0);
if bsq <= zero {
let res = norm2(&r0);
return Ok(KrylovResult {
x: x0, iterations: 0, residual: res,
converged: res <= tol,
residual_history: vec![],
message: "Initial residual is zero".to_string(),
});
}
let beta1: F = bsq.sqrt();
if beta1 <= tol {
return Ok(KrylovResult::converged(x0, 0, beta1, vec![]));
}
run_symmlq(
a, b, x0, y0, beta1,
options.preconditioner.as_deref(),
options.max_iter, tol, options.track_history,
)
}
#[allow(clippy::too_many_arguments)]
fn run_symmlq<F>(
a: &dyn LinearOperator<F>,
b: &[F],
x_init: Vec<F>,
y0: Vec<F>, beta1: F,
precond: Option<&dyn LinearOperator<F>>,
max_iter: usize,
tol: F,
track_hist: bool,
) -> SparseResult<KrylovResult<F>>
where
F: Float + SparseElement + NumAssign + Sum + 'static,
{
let n = b.len();
let z = F::sparse_zero();
let one = F::sparse_one();
let eps = F::epsilon();
let mut oldb = z;
let mut beta = beta1;
let mut r1: Vec<F> = vec![z; n]; let mut r2: Vec<F> = b.iter().zip(x_init.iter()).zip(
a.matvec(&x_init)?.iter()
).map(|((&bi,_),&axi)| bi - axi).collect();
let mut y_vec: Vec<F> = y0;
let mut dbar = z; let mut epsln = z; let mut phibar = beta1; let mut cs = F::from(-1.0).expect("constant"); let mut sn = z;
let mut w : Vec<F> = vec![z; n]; let mut w2: Vec<F> = vec![z; n];
let mut x_mr: Vec<F> = x_init.clone();
let mut xl2: Vec<F> = x_init;
let mut cl: F = one; let mut sl: F = z; let mut zl2: F = z; let mut zbar = beta1; let mut gamma_l = beta1;
let mut wl : Vec<F> = vec![z; n];
let mut wl2: Vec<F> = vec![z; n];
let mut history: Vec<F> = Vec::new();
let mut iters = 0_usize;
for itn in 0..max_iter {
iters = itn + 1;
let s = one / beta;
let mut v: Vec<F> = vec![z; n];
for i in 0..n {
v[i] = s * y_vec[i];
}
let mut y_new = a.matvec(&v)?;
if itn >= 1 {
for i in 0..n {
y_new[i] -= (beta / oldb) * r1[i];
}
}
let alfa: F = dot(&v, &y_new);
for i in 0..n {
y_new[i] -= (alfa / beta) * r2[i];
}
r1 = r2;
r2 = y_new;
y_vec = precond_apply(precond, &r2)?;
oldb = beta;
let bsq: F = dot(&r2, &y_vec);
if bsq < z {
return Err(SparseError::ComputationError("Non-symmetric or indefinite M".to_string()));
}
beta = if bsq > z { bsq.sqrt() } else { z };
let oldeps = epsln;
let delta = cs * dbar + sn * alfa; let gbar = sn * dbar - cs * alfa; epsln = sn * beta; dbar = -cs * beta;
let gamma = (gbar * gbar + beta * beta).sqrt();
let gamma_c = if gamma > eps { gamma } else { eps };
cs = gbar / gamma_c;
sn = beta / gamma_c;
let phi = cs * phibar;
phibar = sn * phibar;
let denom = one / gamma_c;
let w1 = w2; w2 = w; let mut w_new: Vec<F> = vec![z; n];
for i in 0..n {
w_new[i] = (v[i] - oldeps * w1[i] - delta * w2[i]) * denom;
}
for i in 0..n {
x_mr[i] += phi * w_new[i];
}
w = w_new;
for i in 0..n {
xl2[i] = x_mr[i] - phibar * w[i];
}
let res_bound = phibar.abs();
if track_hist { history.push(res_bound); }
if res_bound < tol {
let ax = a.matvec(&xl2)?;
let r: Vec<F> = b.iter().zip(ax.iter()).map(|(&bi,&axi)| bi-axi).collect();
let true_res = norm2(&r);
return Ok(KrylovResult::converged(xl2, iters, true_res, history));
}
if beta <= eps {
break;
}
}
let ax_f = a.matvec(&xl2)?;
let r_f: Vec<F> = b.iter().zip(ax_f.iter()).map(|(&bi,&axi)| bi-axi).collect();
let res_f = norm2(&r_f);
let _ = (cl, sl, zl2, zbar, gamma_l, wl, wl2);
if res_f < tol {
Ok(KrylovResult::converged(xl2, iters, res_f, history))
} else {
Ok(KrylovResult::not_converged(xl2, iters, res_f, history))
}
}
fn precond_apply<F: Float>(
m: Option<&dyn LinearOperator<F>>,
v: &[F],
) -> SparseResult<Vec<F>> {
match m { Some(op) => op.matvec(v), None => Ok(v.to_vec()) }
}
#[inline]
fn dot<F: Float + Sum>(a: &[F], b: &[F]) -> F {
a.iter().zip(b.iter()).map(|(&ai, &bi)| ai * bi).sum()
}
#[inline]
fn norm2<F: Float + Sum>(v: &[F]) -> F {
v.iter().map(|&vi| vi * vi).sum::<F>().sqrt()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::csr::CsrMatrix;
use crate::linalg::interface::{AsLinearOperator, DiagonalOperator, IdentityOperator};
fn rel_res(op: &dyn LinearOperator<f64>, x: &[f64], b: &[f64]) -> f64 {
let ax = op.matvec(x).expect("mv");
let rn: f64 = ax.iter().zip(b).map(|(ai,bi)|(ai-bi).powi(2)).sum::<f64>().sqrt();
let bn: f64 = b.iter().map(|bi|bi*bi).sum::<f64>().sqrt();
rn / bn.max(1e-300)
}
#[test]
fn test_krylov_result_api() {
let r: KrylovResult<f64> = KrylovResult::converged(
vec![1.0, 2.0], 5, 1e-12, vec![1.0, 0.1, 0.01, 0.001, 1e-12],
);
assert!(r.converged);
assert_eq!(r.iterations, 5);
assert_eq!(r.residual_history.len(), 5);
assert!(r.residual < 1e-10);
let r2: KrylovResult<f64> = KrylovResult::not_converged(vec![0.0], 100, 1.0, vec![1.0]);
assert!(!r2.converged);
assert!(r2.message.contains("convergence"));
}
#[test]
fn test_symmlq_identity() {
let id = IdentityOperator::<f64>::new(4);
let b = vec![1.0, 2.0, 3.0, 4.0];
let r = symmlq(&id, &b, SymmlqOptions::default()).expect("identity");
let rr = rel_res(&id, &r.x, &b);
assert!(rr < 1e-7, "rel_res={} converged={}", rr, r.converged);
}
#[test]
fn test_symmlq_diagonal_spd() {
let d = DiagonalOperator::new(vec![1.0_f64, 2.0, 3.0]);
let b = vec![1.0, 4.0, 9.0];
let r = symmlq(&d, &b, SymmlqOptions::default()).expect("diag");
let rr = rel_res(&d, &r.x, &b);
assert!(rr < 1e-7, "rel_res={}", rr);
}
#[test]
fn test_symmlq_1d_poisson() {
let (mat, b) = build_1d_poisson(5);
let op = mat.as_linear_operator();
let opts = SymmlqOptions { rtol: 1e-10, ..Default::default() };
let r = symmlq(op.as_ref(), &b, opts).expect("poisson");
let rr = rel_res(op.as_ref(), &r.x, &b);
assert!(rr < 1e-7, "rel_res={} converged={}", rr, r.converged);
}
#[test]
fn test_symmlq_spd_3x3() {
let rows = vec![0,0,1,1,1,2,2];
let cols = vec![0,1,0,1,2,1,2];
let data = vec![4.0_f64,-1.0,-1.0,4.0,-1.0,-1.0,4.0];
let mat = CsrMatrix::new(data, rows, cols, (3,3)).expect("csr");
let op = mat.as_linear_operator();
let b = vec![1.0, 0.0, 1.0];
let opts = SymmlqOptions { rtol: 1e-9, ..Default::default() };
let r = symmlq(op.as_ref(), &b, opts).expect("3x3");
let rr = rel_res(op.as_ref(), &r.x, &b);
assert!(rr < 1e-7, "rel_res={}", rr);
}
#[test]
fn test_symmlq_5pt_laplacian_3x3() {
let (mat, b) = build_2d_laplacian(3);
let op = mat.as_linear_operator();
let opts = SymmlqOptions { max_iter: 500, rtol: 1e-10, ..Default::default() };
let r = symmlq(op.as_ref(), &b, opts).expect("laplacian");
let rr = rel_res(op.as_ref(), &r.x, &b);
assert!(rr < 1e-7, "rel_res={} converged={}", rr, r.converged);
}
#[test]
fn test_symmlq_initial_guess() {
let id = IdentityOperator::<f64>::new(3);
let b = vec![2.0, 3.0, 5.0];
let x0 = vec![1.9, 2.9, 4.9];
let opts = SymmlqOptions { x0: Some(x0), ..Default::default() };
let r = symmlq(&id, &b, opts).expect("x0");
let rr = rel_res(&id, &r.x, &b);
assert!(rr < 1e-7, "rel_res={}", rr);
}
#[test]
fn test_symmlq_history() {
let id = IdentityOperator::<f64>::new(5);
let b = vec![1.0; 5];
let opts = SymmlqOptions { track_history: true, ..Default::default() };
let r = symmlq(&id, &b, opts).expect("hist");
assert!(r.converged);
}
fn build_1d_poisson(n: usize) -> (CsrMatrix<f64>, Vec<f64>) {
let mut r=vec![]; let mut c=vec![]; let mut d=vec![];
for i in 0..n {
r.push(i); c.push(i); d.push(2.0);
if i>0 { r.push(i); c.push(i-1); d.push(-1.0); }
if i<n-1 { r.push(i); c.push(i+1); d.push(-1.0); }
}
(CsrMatrix::new(d,r,c,(n,n)).expect("p"), vec![1.0;n])
}
fn build_2d_laplacian(m: usize) -> (CsrMatrix<f64>, Vec<f64>) {
let n=m*m;
let mut rows=vec![]; let mut cols=vec![]; let mut data=vec![];
for iy in 0..m { for ix in 0..m {
let i=iy*m+ix;
rows.push(i); cols.push(i); data.push(4.0);
if ix>0 { rows.push(i); cols.push(i-1); data.push(-1.0); }
if ix<m-1 { rows.push(i); cols.push(i+1); data.push(-1.0); }
if iy>0 { rows.push(i); cols.push(i-m); data.push(-1.0); }
if iy<m-1 { rows.push(i); cols.push(i+m); data.push(-1.0); }
}}
let a=CsrMatrix::new(data,rows,cols,(n,n)).expect("l");
let b=(1..=n).map(|i| i as f64).collect();
(a,b)
}
}