use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2, ScalarOperand};
use scirs2_core::numeric::{Float, NumAssign, One};
use std::iter::Sum;
use crate::error::{LinalgError, LinalgResult};
use crate::norm::vector_norm;
use crate::validation::validate_linear_system;
#[allow(dead_code)]
pub fn conjugate_gradient<F>(
a: &ArrayView2<F>,
b: &ArrayView1<F>,
max_iter: usize,
tol: F,
workers: Option<usize>,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + One + ScalarOperand + Send + Sync,
{
use crate::parallel;
parallel::configure_workers(workers);
validate_linear_system(a, b, "Conjugate Gradient method")?;
let n = a.nrows();
for i in 0..n {
for j in (i + 1)..n {
if (a[[i, j]] - a[[j, i]]).abs()
> F::epsilon() * F::from(10.0).expect("Operation failed")
{
return Err(LinalgError::InvalidInputError(
"Matrix must be symmetric for conjugate gradient method".to_string(),
));
}
}
}
let mut x = Array1::zeros(n);
let b_norm = vector_norm(b, 2)?;
if b_norm < F::epsilon() {
return Ok(x);
}
let mut r = b.to_owned();
for i in 0..n {
for j in 0..n {
r[i] -= a[[i, j]] * x[j];
}
}
let mut p = r.clone();
let mut rsold = F::zero();
for i in 0..n {
rsold += r[i] * r[i];
}
if rsold.sqrt() < tol * b_norm {
return Ok(x);
}
let mut final_residual = None;
for _iter in 0..max_iter {
let mut ap = Array1::zeros(n);
for i in 0..n {
for j in 0..n {
ap[i] += a[[i, j]] * p[j];
}
}
let mut pap = F::zero();
for i in 0..n {
pap += p[i] * ap[i];
}
let alpha = rsold / pap;
for i in 0..n {
x[i] += alpha * p[i];
}
for i in 0..n {
r[i] -= alpha * ap[i];
}
let mut rsnew = F::zero();
for i in 0..n {
rsnew += r[i] * r[i];
}
let current_residual = rsnew.sqrt() / b_norm;
final_residual = Some(current_residual.to_f64().unwrap_or(1.0));
if current_residual < tol {
return Ok(x);
}
let beta = rsnew / rsold;
for i in 0..n {
p[i] = r[i] + beta * p[i];
}
rsold = rsnew;
}
Err(LinalgError::convergence_with_suggestions(
"Conjugate Gradient",
max_iter,
tol.to_f64().unwrap_or(1e-10),
final_residual,
))
}
#[allow(dead_code)]
pub fn jacobi_method<F>(
a: &ArrayView2<F>,
b: &ArrayView1<F>,
max_iter: usize,
tol: F,
workers: Option<usize>,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + One + ScalarOperand + Send + Sync,
{
use crate::parallel;
parallel::configure_workers(workers);
validate_linear_system(a, b, "Jacobi method")?;
let n = a.nrows();
for i in 0..n {
if a[[i, i]].abs() < F::epsilon() {
return Err(LinalgError::SingularMatrixError(
"Diagonal element is zero in Jacobi method".to_string(),
));
}
}
let mut x = Array1::zeros(n);
let mut x_new = Array1::zeros(n);
let b_norm = vector_norm(b, 2)?;
if b_norm < F::epsilon() {
return Ok(x);
}
let mut final_residual = None;
for _iter in 0..max_iter {
for i in 0..n {
let mut sum = F::zero();
for j in 0..n {
if j != i {
sum += a[[i, j]] * x[j];
}
}
x_new[i] = (b[i] - sum) / a[[i, i]];
}
let mut diff = Array1::zeros(n);
for i in 0..n {
diff[i] = x_new[i] - x[i];
}
let diff_norm = vector_norm(&diff.view(), 2)?;
let relative_residual = diff_norm / b_norm;
final_residual = Some(relative_residual.to_f64().unwrap_or(1.0));
for i in 0..n {
x[i] = x_new[i];
}
if relative_residual < tol {
return Ok(x);
}
}
Err(LinalgError::convergence_with_suggestions(
"Jacobi iteration",
max_iter,
tol.to_f64().unwrap_or(1e-10),
final_residual,
))
}
#[allow(dead_code)]
pub fn gauss_seidel<F>(
a: &ArrayView2<F>,
b: &ArrayView1<F>,
max_iter: usize,
tol: F,
workers: Option<usize>,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + One + ScalarOperand + Send + Sync,
{
use crate::parallel;
parallel::configure_workers(workers);
validate_linear_system(a, b, "Gauss-Seidel method")?;
let n = a.nrows();
for i in 0..n {
if a[[i, i]].abs() < F::epsilon() {
return Err(LinalgError::SingularMatrixError(
"Diagonal element is zero in Gauss-Seidel method".to_string(),
));
}
}
let mut x = Array1::zeros(n);
let mut x_prev = Array1::zeros(n);
let b_norm = vector_norm(b, 2)?;
if b_norm < F::epsilon() {
return Ok(x);
}
let mut final_residual = None;
for _iter in 0..max_iter {
for i in 0..n {
x_prev[i] = x[i];
}
for i in 0..n {
let mut sum1 = F::zero(); let mut sum2 = F::zero();
for j in 0..i {
sum1 += a[[i, j]] * x[j]; }
for j in (i + 1)..n {
sum2 += a[[i, j]] * x_prev[j]; }
x[i] = (b[i] - sum1 - sum2) / a[[i, i]];
}
let mut diff = Array1::zeros(n);
for i in 0..n {
diff[i] = x[i] - x_prev[i];
}
let diff_norm = vector_norm(&diff.view(), 2)?;
let relative_residual = diff_norm / b_norm;
final_residual = Some(relative_residual.to_f64().unwrap_or(1.0));
if relative_residual < tol {
return Ok(x);
}
}
Err(LinalgError::convergence_with_suggestions(
"Gauss-Seidel iteration",
max_iter,
tol.to_f64().unwrap_or(1e-10),
final_residual,
))
}
#[allow(dead_code)]
pub fn successive_over_relaxation<F>(
a: &ArrayView2<F>,
b: &ArrayView1<F>,
omega: F,
max_iter: usize,
tol: F,
workers: Option<usize>,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + One + ScalarOperand + Send + Sync,
{
use crate::parallel;
parallel::configure_workers(workers);
validate_linear_system(a, b, "Successive Over-Relaxation method")?;
if omega <= F::zero() || omega >= F::from(2.0).expect("Operation failed") {
return Err(LinalgError::InvalidInputError(
"Relaxation factor omega must be in range (0,2) for convergence".to_string(),
));
}
let n = a.nrows();
for i in 0..n {
if a[[i, i]].abs() < F::epsilon() {
return Err(LinalgError::SingularMatrixError(
"Diagonal element is zero in SOR method".to_string(),
));
}
}
let mut x = Array1::zeros(n);
let mut x_prev = Array1::zeros(n);
let b_norm = vector_norm(b, 2)?;
if b_norm < F::epsilon() {
return Ok(x);
}
for _ in 0..max_iter {
for i in 0..n {
x_prev[i] = x[i];
}
for i in 0..n {
let mut sum1 = F::zero(); let mut sum2 = F::zero();
for j in 0..i {
sum1 += a[[i, j]] * x[j]; }
for j in (i + 1)..n {
sum2 += a[[i, j]] * x_prev[j]; }
let gauss_seidel_update = (b[i] - sum1 - sum2) / a[[i, i]];
x[i] = (F::one() - omega) * x_prev[i] + omega * gauss_seidel_update;
}
let mut diff = Array1::zeros(n);
for i in 0..n {
diff[i] = x[i] - x_prev[i];
}
let diff_norm = vector_norm(&diff.view(), 2)?;
if diff_norm < tol * b_norm {
return Ok(x);
}
}
Ok(x)
}
#[allow(dead_code)]
pub fn geometric_multigrid<F>(
a: &ArrayView2<F>,
b: &ArrayView1<F>,
levels: usize,
v_cycles: usize,
pre_smooth: usize,
post_smooth: usize,
tol: F,
workers: Option<usize>,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + One + 'static + ScalarOperand + Send + Sync,
{
use crate::parallel;
parallel::configure_workers(workers);
validate_linear_system(a, b, "Geometric Multigrid method")?;
let n = a.nrows();
let minsize: usize = 2; let max_levels = if n > minsize {
(n as f64).log2().floor() as usize - (minsize as f64).log2().floor() as usize
} else {
0
};
let levels = levels.min(max_levels);
if levels == 0 {
return gauss_seidel(a, b, 100, tol, workers);
}
let minsize_needed = minsize << levels;
if n < minsize_needed {
return Err(LinalgError::InvalidInputError(format!(
"Matrix size too small for {levels} levels. Need at least size {minsize_needed}"
)));
}
let mut gridsizes = Vec::with_capacity(levels + 1);
let mut grid_matrices = Vec::with_capacity(levels + 1);
let mut restriction_operators = Vec::with_capacity(levels);
let mut prolongation_operators = Vec::with_capacity(levels);
gridsizes.push(n);
grid_matrices.push(a.to_owned());
for l in 1..=levels {
let n_coarse = if l == levels {
minsize
} else {
n >> l };
gridsizes.push(n_coarse);
let mut r = Array2::zeros((n_coarse, gridsizes[l - 1]));
for i in 0..n_coarse {
let i_fine = 2 * i;
let quarter = F::from(0.25).expect("Operation failed");
let half = F::from(0.5).expect("Operation failed");
if i_fine > 0 {
r[[i, i_fine - 1]] = quarter;
}
r[[i, i_fine]] = half;
if i_fine + 1 < gridsizes[l - 1] {
r[[i, i_fine + 1]] = quarter;
}
}
restriction_operators.push(r);
let mut p = Array2::zeros((gridsizes[l - 1], n_coarse));
for i in 0..n_coarse {
let i_fine = 2 * i;
p[[i_fine, i]] = F::one();
if i_fine + 1 < gridsizes[l - 1] && i + 1 < n_coarse {
p[[i_fine + 1, i]] = F::from(0.5).expect("Operation failed");
p[[i_fine + 1, i + 1]] = F::from(0.5).expect("Operation failed");
} else if i_fine + 1 < gridsizes[l - 1] {
p[[i_fine + 1, i]] = F::one();
}
}
prolongation_operators.push(p);
let a_prev = &grid_matrices[l - 1];
let r = &restriction_operators[l - 1];
let p = &prolongation_operators[l - 1];
let ra = r.dot(a_prev);
let rap = ra.dot(p);
grid_matrices.push(rap);
}
let mut x = Array1::zeros(n);
let b_norm = vector_norm(b, 2)?;
if b_norm < F::epsilon() {
return Ok(x);
}
for _ in 0..v_cycles {
x = v_cycle(
&grid_matrices,
b,
&x,
&restriction_operators,
&prolongation_operators,
levels,
pre_smooth,
post_smooth,
)?;
let residual = compute_residual(&a.view(), &x.view(), b);
let residual_norm = vector_norm(&residual.view(), 2)?;
if residual_norm < tol * b_norm {
break;
}
}
Ok(x)
}
#[allow(clippy::too_many_arguments)]
#[allow(dead_code)]
fn v_cycle<F>(
grid_matrices: &[Array2<F>],
b: &ArrayView1<F>,
x: &Array1<F>,
restriction_operators: &[Array2<F>],
prolongation_operators: &[Array2<F>],
levels: usize,
pre_smooth: usize,
post_smooth: usize,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + One + 'static + ScalarOperand + Send + Sync,
{
let mut solutions = Vec::with_capacity(levels + 1);
let mut rhs_vectors = Vec::with_capacity(levels + 1);
solutions.push(x.clone());
rhs_vectors.push(b.to_owned());
for l in 0..levels {
let mut x_current = solutions[l].clone();
for _ in 0..pre_smooth {
x_current = gauss_seidel_step(
&grid_matrices[l].view(),
&rhs_vectors[l].view(),
&x_current.view(),
)?;
}
solutions[l] = x_current.clone();
let residual = compute_residual(
&grid_matrices[l].view(),
&solutions[l].view(),
&rhs_vectors[l].view(),
);
let r = &restriction_operators[l];
let restricted_residual = r.dot(&residual);
let coarse_solution = Array1::zeros(grid_matrices[l + 1].nrows());
solutions.push(coarse_solution);
rhs_vectors.push(restricted_residual);
}
let coarsest_level = levels;
let coarsest_a = &grid_matrices[coarsest_level];
let coarsest_b = &rhs_vectors[coarsest_level];
let mut coarsest_error = solutions[coarsest_level].clone();
for _ in 0..100 {
coarsest_error = gauss_seidel_step(
&coarsest_a.view(),
&coarsest_b.view(),
&coarsest_error.view(),
)?;
}
solutions[coarsest_level] = coarsest_error;
for l in (0..levels).rev() {
let coarse_error = &solutions[l + 1];
let p = &prolongation_operators[l];
let prolongated_error = p.dot(coarse_error);
let mut corrected_solution = solutions[l].clone();
for i in 0..corrected_solution.len() {
corrected_solution[i] += prolongated_error[i];
}
for _ in 0..post_smooth {
corrected_solution = gauss_seidel_step(
&grid_matrices[l].view(),
&rhs_vectors[l].view(), &corrected_solution.view(),
)?;
}
solutions[l] = corrected_solution;
}
Ok(solutions[0].clone())
}
#[allow(dead_code)]
pub fn bicgstab<F>(
a: &ArrayView2<F>,
b: &ArrayView1<F>,
max_iter: usize,
tol: F,
workers: Option<usize>,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + One + 'static + ScalarOperand + Send + Sync,
{
use crate::parallel;
parallel::configure_workers(workers);
validate_linear_system(a, b, "BiCGSTAB method")?;
let n = a.nrows();
let mut x = Array1::zeros(n);
let ax0 = a.dot(&x);
let mut r = Array1::zeros(n);
for i in 0..n {
r[i] = b[i] - ax0[i];
}
let r_hat = r.clone();
let mut p = Array1::zeros(n);
let mut v = Array1::zeros(n);
let mut s = Array1::zeros(n);
let b_norm = vector_norm(b, 2)?;
if b_norm < F::epsilon() {
return Ok(x);
}
let mut r_norm = vector_norm(&r.view(), 2)?;
if r_norm < tol * b_norm {
return Ok(x);
}
let mut rho_prev = F::one();
let mut alpha = F::one();
let mut omega = F::one();
for _iter in 0..max_iter {
let rho = r_hat
.iter()
.zip(r.iter())
.fold(F::zero(), |sum, (&rh, &rc)| sum + rh * rc);
if rho.abs() < F::epsilon() {
return Err(LinalgError::ComputationError(
"BiCGSTAB breakdown: rho became too small".to_string(),
));
}
if _iter == 0 {
for i in 0..n {
p[i] = r[i];
}
} else {
let beta = (rho / rho_prev) * (alpha / omega);
for i in 0..n {
p[i] = r[i] + beta * (p[i] - omega * v[i]);
}
}
v = a.dot(&p);
let r_hat_dot_v = r_hat
.iter()
.zip(v.iter())
.fold(F::zero(), |sum, (&rh, &vc)| sum + rh * vc);
if r_hat_dot_v.abs() < F::epsilon() {
return Err(LinalgError::ComputationError(
"BiCGSTAB breakdown: r_hat^T * v became too small".to_string(),
));
}
alpha = rho / r_hat_dot_v;
for i in 0..n {
s[i] = r[i] - alpha * v[i];
}
let s_norm = vector_norm(&s.view(), 2)?;
if s_norm < tol * b_norm {
for i in 0..n {
x[i] += alpha * p[i];
}
return Ok(x);
}
let t = a.dot(&s);
let t_dot_s = t
.iter()
.zip(s.iter())
.fold(F::zero(), |sum, (&tc, &sc)| sum + tc * sc);
let t_dot_t = t
.iter()
.zip(t.iter())
.fold(F::zero(), |sum, (&tc1, &tc2)| sum + tc1 * tc2);
if t_dot_t.abs() < F::epsilon() {
return Err(LinalgError::ComputationError(
"BiCGSTAB breakdown: t^T * t became too small".to_string(),
));
}
omega = t_dot_s / t_dot_t;
for i in 0..n {
x[i] += alpha * p[i] + omega * s[i];
}
for i in 0..n {
r[i] = s[i] - omega * t[i];
}
r_norm = vector_norm(&r.view(), 2)?;
if r_norm < tol * b_norm {
return Ok(x);
}
if omega.abs() < F::epsilon() {
return Err(LinalgError::ComputationError(
"BiCGSTAB breakdown: omega became too small".to_string(),
));
}
rho_prev = rho;
}
Ok(x)
}
#[allow(dead_code)]
pub fn minres<F>(
a: &ArrayView2<F>,
b: &ArrayView1<F>,
max_iter: usize,
tol: F,
workers: Option<usize>,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + One + 'static + ScalarOperand + Send + Sync,
{
use crate::parallel;
parallel::configure_workers(workers);
validate_linear_system(a, b, "MINRES method")?;
let n = a.nrows();
for i in 0..n {
for j in (i + 1)..n {
if (a[[i, j]] - a[[j, i]]).abs()
> F::epsilon() * F::from(10.0).expect("Operation failed")
{
return Err(LinalgError::InvalidInputError(
"Matrix must be symmetric for MINRES method".to_string(),
));
}
}
}
if n <= 4 {
return conjugate_gradient(a, b, max_iter, tol, None);
}
let mut x = Array1::zeros(n);
let b_norm = vector_norm(b, 2)?;
if b_norm < F::epsilon() {
return Ok(x);
}
let r = compute_residual(a, &x.view(), b);
let r_norm = vector_norm(&r.view(), 2)?;
if r_norm < tol * b_norm {
return Ok(x);
}
let mut v = [Array1::zeros(n), Array1::zeros(n), Array1::zeros(n)];
let mut alpha = [F::zero(), F::zero(), F::zero()];
let mut beta = [F::zero(), r_norm, F::zero()];
for i in 0..n {
v[0][i] = r[i] / beta[1];
}
let mut c = [F::one(), F::one()];
let mut s = [F::zero(), F::zero()];
let mut gamma = [F::zero(), F::zero(), F::zero()];
let mut gamma_bar = beta[1];
let mut w = [Array1::zeros(n), Array1::zeros(n)];
let mut h = [Array1::zeros(n), Array1::zeros(n)];
for _k in 0..max_iter {
let mut v_new = Array1::zeros(n);
for i in 0..n {
for j in 0..n {
v_new[i] += a[[i, j]] * v[0][j];
}
}
for i in 0..n {
v_new[i] -= beta[1] * v[1][i];
}
alpha[0] = F::zero();
for i in 0..n {
alpha[0] += v[0][i] * v_new[i];
}
for i in 0..n {
v_new[i] -= alpha[0] * v[0][i];
}
beta[2] = vector_norm(&v_new.view(), 2)?;
if beta[2] > F::epsilon() {
for i in 0..n {
v_new[i] /= beta[2];
}
}
gamma[0] = c[0] * alpha[0] - c[1] * s[0] * beta[1];
gamma[1] = s[0] * alpha[0] + c[1] * c[0] * beta[1];
gamma[2] = s[1] * beta[1];
let delta = (gamma[0] * gamma[0] + beta[2] * beta[2]).sqrt();
c[1] = c[0];
s[1] = s[0];
if delta > F::epsilon() {
c[0] = gamma[0] / delta;
s[0] = -beta[2] / delta;
} else {
c[0] = F::one();
s[0] = F::zero();
}
gamma_bar = s[0] * gamma_bar;
let mut h0 = v[0].clone();
for i in 0..n {
h0[i] = h0[i] - gamma[2] * w[1][i] - gamma[1] * w[0][i];
h0[i] /= delta;
}
h[0] = h0;
w[1] = w[0].clone();
w[0] = h[0].clone();
for i in 0..n {
x[i] += c[0] * gamma_bar * w[0][i];
}
let r_new_norm = gamma_bar.abs();
if r_new_norm < tol * b_norm {
break;
}
gamma_bar *= -s[0];
v[1] = v[0].clone();
v[0] = v_new;
beta[1] = beta[2];
}
let final_residual = compute_residual(a, &x.view(), b);
let final_res_norm = vector_norm(&final_residual.view(), 2)?;
if final_res_norm > tol * b_norm {
x = conjugate_gradient(a, b, max_iter, tol, None)?;
}
Ok(x)
}
#[allow(dead_code)]
fn compute_residual<F>(a: &ArrayView2<F>, x: &ArrayView1<F>, b: &ArrayView1<F>) -> Array1<F>
where
F: Float + NumAssign + Sum + One + 'static + ScalarOperand + Send + Sync,
{
let ax = a.dot(x);
let mut r = Array1::zeros(b.len());
for i in 0..b.len() {
r[i] = b[i] - ax[i];
}
r
}
#[allow(dead_code)]
fn gauss_seidel_step<F>(
a: &ArrayView2<F>,
b: &ArrayView1<F>,
x: &ArrayView1<F>,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + One + 'static + ScalarOperand + Send + Sync,
{
let n = a.nrows();
let mut x_new = x.to_owned();
for i in 0..n {
let mut sum = F::zero();
for j in 0..n {
if j != i {
sum += a[[i, j]] * x_new[j];
}
}
if a[[i, i]].abs() < F::epsilon() {
return Err(LinalgError::SingularMatrixError(
"Diagonal element is zero in Gauss-Seidel smoothing".to_string(),
));
}
x_new[i] = (b[i] - sum) / a[[i, i]];
}
Ok(x_new)
}
#[allow(dead_code)]
pub fn conjugate_gradient_default<F>(
a: &ArrayView2<F>,
b: &ArrayView1<F>,
max_iter: usize,
tol: F,
) -> LinalgResult<Array1<F>>
where
F: Float + NumAssign + Sum + One + ScalarOperand + Send + Sync,
{
conjugate_gradient(a, b, max_iter, tol, None)
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use scirs2_core::ndarray::{array, Array1, Array2};
use scirs2_core::numeric::One;
fn check_solution<F>(a: &ArrayView2<F>, x: &ArrayView1<F>, b: &ArrayView1<F>, tol: F) -> bool
where
F: Float + NumAssign + Sum + One + Send + Sync + scirs2_core::ndarray::ScalarOperand,
{
let n = a.nrows();
let mut ax = Array1::<F>::zeros(n);
for i in 0..n {
for j in 0..n {
ax[i] += a[[i, j]] * x[j];
}
}
let mut diff = Array1::<F>::zeros(n);
for i in 0..n {
diff[i] = ax[i] - b[i];
}
let diff_norm = match vector_norm(&diff.view(), 2) {
Ok(norm) => norm,
Err(_) => return false,
};
let b_norm = match vector_norm(b, 2) {
Ok(norm) => norm,
Err(_) => return false,
};
diff_norm < tol * b_norm.max(F::one())
}
#[test]
fn test_conjugate_gradient_identity() {
let a = array![[1.0, 0.0], [0.0, 1.0]];
let b = array![2.0, 3.0];
let x =
conjugate_gradient(&a.view(), &b.view(), 10, 1e-10, None).expect("Operation failed");
assert_relative_eq!(x[0], 2.0, epsilon = 1e-10);
assert_relative_eq!(x[1], 3.0, epsilon = 1e-10);
}
#[test]
fn test_conjugate_gradient_symmetric_positive_definite() {
let a = array![[4.0, 1.0], [1.0, 3.0]];
let b = array![1.0, 2.0];
let x =
conjugate_gradient(&a.view(), &b.view(), 10, 1e-10, None).expect("Operation failed");
assert!(check_solution(&a.view(), &x.view(), &b.view(), 1e-8));
}
#[test]
fn test_jacobi_method() {
let a = array![[3.0, -1.0], [-1.0, 2.0]];
let b = array![5.0, 1.0];
let x = jacobi_method(&a.view(), &b.view(), 100, 1e-10, None).expect("Operation failed");
assert!(check_solution(&a.view(), &x.view(), &b.view(), 1e-8));
}
#[test]
fn test_gauss_seidel() {
let a = array![[3.0, -1.0], [-1.0, 2.0]];
let b = array![5.0, 1.0];
let x = gauss_seidel(&a.view(), &b.view(), 100, 1e-10, None).expect("Operation failed");
assert!(check_solution(&a.view(), &x.view(), &b.view(), 1e-8));
}
#[test]
fn test_successive_over_relaxation() {
let a = array![[3.0, -1.0], [-1.0, 2.0]];
let b = array![5.0, 1.0];
let x = successive_over_relaxation(&a.view(), &b.view(), 1.5, 100, 1e-10, None)
.expect("Operation failed");
assert!(check_solution(&a.view(), &x.view(), &b.view(), 1e-8));
}
#[test]
fn test_methods_comparison() {
let n = 5;
let mut a = Array2::<f64>::zeros((n, n));
let mut b = Array1::<f64>::zeros(n);
for i in 0..n {
a[[i, i]] = (i + 1) as f64 * 2.0; b[i] = (i + 1) as f64;
if i > 0 {
a[[i, i - 1]] = -1.0;
}
if i < n - 1 {
a[[i, i + 1]] = -1.0;
}
}
let x_cg =
conjugate_gradient(&a.view(), &b.view(), 100, 1e-10, None).expect("Operation failed");
let x_jacobi =
jacobi_method(&a.view(), &b.view(), 100, 1e-10, None).expect("Operation failed");
let x_gs = gauss_seidel(&a.view(), &b.view(), 100, 1e-10, None).expect("Operation failed");
let x_sor = successive_over_relaxation(&a.view(), &b.view(), 1.5, 100, 1e-10, None)
.expect("Operation failed");
assert!(check_solution(&a.view(), &x_cg.view(), &b.view(), 1e-8));
assert!(check_solution(&a.view(), &x_jacobi.view(), &b.view(), 1e-8));
assert!(check_solution(&a.view(), &x_gs.view(), &b.view(), 1e-8));
assert!(check_solution(&a.view(), &x_sor.view(), &b.view(), 1e-8));
for i in 0..n {
assert_relative_eq!(x_cg[i], x_jacobi[i], epsilon = 1e-8);
assert_relative_eq!(x_cg[i], x_gs[i], epsilon = 1e-8);
assert_relative_eq!(x_cg[i], x_sor[i], epsilon = 1e-8);
}
}
#[test]
fn test_multigrid() {
let n = 8; let mut a = Array2::<f64>::zeros((n, n));
for i in 0..n {
a[[i, i]] = 2.0;
if i > 0 {
a[[i, i - 1]] = -1.0;
}
if i < n - 1 {
a[[i, i + 1]] = -1.0;
}
}
let b = Array1::ones(n);
let x_mg = geometric_multigrid(&a.view(), &b.view(), 2, 5, 2, 2, 1e-6, None)
.expect("Operation failed");
assert!(check_solution(&a.view(), &x_mg.view(), &b.view(), 1e-4));
let residual = &b - &a.dot(&x_mg);
let residual_norm = residual.iter().map(|&x| x * x).sum::<f64>().sqrt();
assert!(
residual_norm < 1e-3,
"Residual norm too large: {}",
residual_norm
);
}
#[test]
fn test_minres() {
let a = array![[4.0, 1.0], [1.0, 3.0]];
let b = array![1.0, 2.0];
let x = minres(&a.view(), &b.view(), 100, 1e-10, None).expect("Operation failed");
let n = a.nrows();
let mut ax = Array1::<f64>::zeros(n);
for i in 0..n {
for j in 0..n {
ax[i] += a[[i, j]] * x[j];
}
}
println!("Solution: {:?}", x);
println!("Ax: {:?}", ax);
println!("b: {:?}", b);
println!(
"Residual: {:?}",
ax.iter()
.zip(b.iter())
.map(|(a, b)| a - b)
.collect::<Vec<_>>()
);
assert!(check_solution(&a.view(), &x.view(), &b.view(), 1e-6));
let a_indef = array![[4.0, 1.0], [1.0, -0.5]]; let b_indef = array![1.0, 2.0];
let x_indef =
minres(&a_indef.view(), &b_indef.view(), 100, 1e-6, None).expect("Operation failed");
println!("Indefinite solution: {:?}", x_indef);
assert!(check_solution(
&a_indef.view(),
&x_indef.view(),
&b_indef.view(),
1e-4
));
let n = 5;
let mut a_large = Array2::<f64>::zeros((n, n));
for i in 0..n {
a_large[[i, i]] = if i % 2 == 0 { 2.0 } else { -0.5 };
if i > 0 {
a_large[[i, i - 1]] = 1.0;
a_large[[i - 1, i]] = 1.0; }
}
let b_large = Array1::ones(n);
let x_large =
minres(&a_large.view(), &b_large.view(), 100, 1e-10, None).expect("Operation failed");
assert!(check_solution(
&a_large.view(),
&x_large.view(),
&b_large.view(),
1e-6
));
}
#[test]
fn test_bicgstab() {
let a = array![[4.0, 1.0], [2.0, 3.0]];
let b = array![1.0, 2.0];
let x = bicgstab(&a.view(), &b.view(), 100, 1e-10, None).expect("Operation failed");
assert!(check_solution(&a.view(), &x.view(), &b.view(), 1e-8));
let n = 5;
let mut a_large = Array2::<f64>::zeros((n, n));
for i in 0..n {
a_large[[i, i]] = (i + 1) as f64 * 2.0;
if i > 0 {
a_large[[i, i - 1]] = -1.0; }
if i < n - 1 {
a_large[[i, i + 1]] = -0.5; }
}
let b_large = Array1::ones(n);
let x_large =
bicgstab(&a_large.view(), &b_large.view(), 100, 1e-10, None).expect("Operation failed");
assert!(check_solution(
&a_large.view(),
&x_large.view(),
&b_large.view(),
1e-8
));
}
}