use crate::error::{SparseError, SparseResult};
use crate::learned_smoother::types::{LearnedSmootherConfig, Smoother, SmootherMetrics};
type CoarseningResult = (
Vec<f64>,
Vec<usize>,
Vec<usize>,
Vec<f64>,
Vec<usize>,
Vec<usize>,
usize,
);
fn csr_matvec(a_values: &[f64], a_row_ptr: &[usize], a_col_idx: &[usize], x: &[f64]) -> Vec<f64> {
let n = a_row_ptr.len().saturating_sub(1);
let mut y = vec![0.0; n];
for i in 0..n {
let start = a_row_ptr[i];
let end = a_row_ptr[i + 1];
let mut sum = 0.0;
for pos in start..end {
sum += a_values[pos] * x[a_col_idx[pos]];
}
y[i] = sum;
}
y
}
fn compute_residual(
a_values: &[f64],
a_row_ptr: &[usize],
a_col_idx: &[usize],
x: &[f64],
b: &[f64],
) -> Vec<f64> {
let ax = csr_matvec(a_values, a_row_ptr, a_col_idx, x);
b.iter()
.zip(ax.iter())
.map(|(&bi, &axi)| bi - axi)
.collect()
}
fn vec_norm(v: &[f64]) -> f64 {
v.iter().map(|x| x * x).sum::<f64>().sqrt()
}
fn extract_diagonal(
a_values: &[f64],
a_row_ptr: &[usize],
a_col_idx: &[usize],
n: usize,
) -> Vec<f64> {
let mut diag = vec![0.0; n];
for i in 0..n {
let start = a_row_ptr[i];
let end = a_row_ptr[i + 1];
for pos in start..end {
if a_col_idx[pos] == i {
diag[i] = a_values[pos];
break;
}
}
}
diag
}
fn jacobi_smooth(
a_values: &[f64],
a_row_ptr: &[usize],
a_col_idx: &[usize],
x: &mut [f64],
b: &[f64],
diag_inv: &[f64],
omega: f64,
n_sweeps: usize,
) {
let n = x.len();
for _ in 0..n_sweeps {
let r = compute_residual(a_values, a_row_ptr, a_col_idx, x, b);
for i in 0..n {
x[i] += omega * diag_inv[i] * r[i];
}
}
}
fn build_coarsening(n: usize) -> CoarseningResult {
let n_c = (n + 1) / 2;
let mut r_values = Vec::new();
let mut r_col_idx = Vec::new();
let mut r_row_ptr = vec![0usize];
for c in 0..n_c {
let fine = 2 * c;
if fine > 0 {
r_values.push(0.25);
r_col_idx.push(fine - 1);
}
r_values.push(0.5);
r_col_idx.push(fine);
if fine + 1 < n {
r_values.push(0.25);
r_col_idx.push(fine + 1);
}
r_row_ptr.push(r_values.len());
}
let mut p_values = Vec::new();
let mut p_col_idx = Vec::new();
let mut p_row_ptr = vec![0usize];
for i in 0..n {
if i % 2 == 0 {
let c = i / 2;
p_values.push(1.0);
p_col_idx.push(c);
} else {
let c_left = i / 2;
let c_right = c_left + 1;
p_values.push(0.5);
p_col_idx.push(c_left);
if c_right < n_c {
p_values.push(0.5);
p_col_idx.push(c_right);
}
}
p_row_ptr.push(p_values.len());
}
(
r_values, r_row_ptr, r_col_idx, p_values, p_row_ptr, p_col_idx, n_c,
)
}
fn compute_coarse_operator(
a_values: &[f64],
a_row_ptr: &[usize],
a_col_idx: &[usize],
n: usize,
r_values: &[f64],
r_row_ptr: &[usize],
r_col_idx: &[usize],
p_values: &[f64],
p_row_ptr: &[usize],
p_col_idx: &[usize],
n_c: usize,
) -> (Vec<f64>, Vec<usize>, Vec<usize>) {
let mut ac_dense = vec![vec![0.0; n_c]; n_c];
for ic in 0..n_c {
let mut e_c = vec![0.0; n_c];
e_c[ic] = 1.0;
let p_e = csr_matvec(p_values, p_row_ptr, p_col_idx, &e_c);
let a_p_e = csr_matvec(a_values, a_row_ptr, a_col_idx, &p_e);
let r_a_p_e = csr_matvec(r_values, r_row_ptr, r_col_idx, &a_p_e);
for jc in 0..n_c {
ac_dense[jc][ic] = r_a_p_e[jc];
}
}
let mut ac_values = Vec::new();
let mut ac_col_idx = Vec::new();
let mut ac_row_ptr = vec![0usize];
for i in 0..n_c {
for j in 0..n_c {
if ac_dense[i][j].abs() > 1e-14 {
ac_values.push(ac_dense[i][j]);
ac_col_idx.push(j);
}
}
ac_row_ptr.push(ac_values.len());
}
(ac_values, ac_row_ptr, ac_col_idx)
}
fn direct_solve(
a_values: &[f64],
a_row_ptr: &[usize],
a_col_idx: &[usize],
b: &[f64],
n: usize,
) -> SparseResult<Vec<f64>> {
if n == 0 {
return Ok(Vec::new());
}
let mut dense = vec![vec![0.0; n]; n];
for i in 0..n {
let start = a_row_ptr[i];
let end = a_row_ptr[i + 1];
for pos in start..end {
dense[i][a_col_idx[pos]] = a_values[pos];
}
}
let mut aug: Vec<Vec<f64>> = dense
.iter()
.enumerate()
.map(|(i, row)| {
let mut r = row.clone();
r.push(b[i]);
r
})
.collect();
for col in 0..n {
let mut max_val = aug[col][col].abs();
let mut max_row = col;
for row in (col + 1)..n {
if aug[row][col].abs() > max_val {
max_val = aug[row][col].abs();
max_row = row;
}
}
if max_val < 1e-14 {
return Err(SparseError::SingularMatrix(
"Coarse grid operator is singular".to_string(),
));
}
aug.swap(col, max_row);
let pivot = aug[col][col];
for row in (col + 1)..n {
let factor = aug[row][col] / pivot;
for j in col..=n {
let val = aug[col][j];
aug[row][j] -= factor * val;
}
}
}
let mut x = vec![0.0; n];
for i in (0..n).rev() {
let mut sum = aug[i][n];
for j in (i + 1)..n {
sum -= aug[i][j] * x[j];
}
x[i] = sum / aug[i][i];
}
Ok(x)
}
pub struct HybridMultigridSolver {
a_values: Vec<f64>,
a_row_ptr: Vec<usize>,
a_col_idx: Vec<usize>,
n: usize,
ac_values: Vec<f64>,
ac_row_ptr: Vec<usize>,
ac_col_idx: Vec<usize>,
n_c: usize,
r_values: Vec<f64>,
r_row_ptr: Vec<usize>,
r_col_idx: Vec<usize>,
p_values: Vec<f64>,
p_row_ptr: Vec<usize>,
p_col_idx: Vec<usize>,
smoother: Box<dyn Smoother>,
diag_inv: Vec<f64>,
config: LearnedSmootherConfig,
}
impl HybridMultigridSolver {
pub fn new(
a_values: Vec<f64>,
a_row_ptr: Vec<usize>,
a_col_idx: Vec<usize>,
smoother: Box<dyn Smoother>,
config: LearnedSmootherConfig,
) -> SparseResult<Self> {
let n = a_row_ptr.len().saturating_sub(1);
if n == 0 {
return Err(SparseError::ValueError(
"Matrix dimension must be positive".to_string(),
));
}
let (r_values, r_row_ptr, r_col_idx, p_values, p_row_ptr, p_col_idx, n_c) =
build_coarsening(n);
let (ac_values, ac_row_ptr, ac_col_idx) = compute_coarse_operator(
&a_values, &a_row_ptr, &a_col_idx, n, &r_values, &r_row_ptr, &r_col_idx, &p_values,
&p_row_ptr, &p_col_idx, n_c,
);
let diag = extract_diagonal(&a_values, &a_row_ptr, &a_col_idx, n);
let diag_inv: Vec<f64> = diag
.iter()
.map(|&d| if d.abs() > f64::EPSILON { 1.0 / d } else { 1.0 })
.collect();
Ok(Self {
a_values,
a_row_ptr,
a_col_idx,
n,
ac_values,
ac_row_ptr,
ac_col_idx,
n_c,
r_values,
r_row_ptr,
r_col_idx,
p_values,
p_row_ptr,
p_col_idx,
smoother,
diag_inv,
config,
})
}
fn v_cycle(&self, x: &mut [f64], b: &[f64]) -> SparseResult<()> {
self.smoother.smooth(
&self.a_values,
&self.a_row_ptr,
&self.a_col_idx,
x,
b,
self.config.pre_sweeps,
)?;
let r_fine = compute_residual(&self.a_values, &self.a_row_ptr, &self.a_col_idx, x, b);
let r_coarse = csr_matvec(&self.r_values, &self.r_row_ptr, &self.r_col_idx, &r_fine);
let e_coarse = direct_solve(
&self.ac_values,
&self.ac_row_ptr,
&self.ac_col_idx,
&r_coarse,
self.n_c,
)?;
let e_fine = csr_matvec(&self.p_values, &self.p_row_ptr, &self.p_col_idx, &e_coarse);
for i in 0..self.n {
x[i] += e_fine[i];
}
self.smoother.smooth(
&self.a_values,
&self.a_row_ptr,
&self.a_col_idx,
x,
b,
self.config.post_sweeps,
)?;
Ok(())
}
fn v_cycle_jacobi(&self, x: &mut [f64], b: &[f64]) -> SparseResult<()> {
let omega = self.config.omega;
let pre = self.config.pre_sweeps;
let post = self.config.post_sweeps;
jacobi_smooth(
&self.a_values,
&self.a_row_ptr,
&self.a_col_idx,
x,
b,
&self.diag_inv,
omega,
pre,
);
let r_fine = compute_residual(&self.a_values, &self.a_row_ptr, &self.a_col_idx, x, b);
let r_coarse = csr_matvec(&self.r_values, &self.r_row_ptr, &self.r_col_idx, &r_fine);
let e_coarse = direct_solve(
&self.ac_values,
&self.ac_row_ptr,
&self.ac_col_idx,
&r_coarse,
self.n_c,
)?;
let e_fine = csr_matvec(&self.p_values, &self.p_row_ptr, &self.p_col_idx, &e_coarse);
for i in 0..self.n {
x[i] += e_fine[i];
}
jacobi_smooth(
&self.a_values,
&self.a_row_ptr,
&self.a_col_idx,
x,
b,
&self.diag_inv,
omega,
post,
);
Ok(())
}
pub fn solve(&self, b: &[f64]) -> SparseResult<Vec<f64>> {
if b.len() != self.n {
return Err(SparseError::DimensionMismatch {
expected: self.n,
found: b.len(),
});
}
let mut x = vec![0.0; self.n];
let tol = self.config.convergence_tol;
let max_iter = self.config.max_training_steps;
let r0 = compute_residual(&self.a_values, &self.a_row_ptr, &self.a_col_idx, &x, b);
let norm0 = vec_norm(&r0);
if norm0 < tol {
return Ok(x);
}
let mut use_learned = true;
let mut prev_norm = norm0;
for _iter in 0..max_iter {
if use_learned {
let x_backup: Vec<f64> = x.clone();
let result = self.v_cycle(&mut x, b);
let r = compute_residual(&self.a_values, &self.a_row_ptr, &self.a_col_idx, &x, b);
let norm_r = vec_norm(&r);
if result.is_err() || norm_r > prev_norm * 2.0 {
x.copy_from_slice(&x_backup);
use_learned = false;
self.v_cycle_jacobi(&mut x, b)?;
let r2 =
compute_residual(&self.a_values, &self.a_row_ptr, &self.a_col_idx, &x, b);
prev_norm = vec_norm(&r2);
} else {
prev_norm = norm_r;
}
} else {
self.v_cycle_jacobi(&mut x, b)?;
let r = compute_residual(&self.a_values, &self.a_row_ptr, &self.a_col_idx, &x, b);
prev_norm = vec_norm(&r);
}
if prev_norm < tol * norm0 {
break;
}
}
Ok(x)
}
pub fn compare_with_jacobi(
&self,
b: &[f64],
n_iterations: usize,
) -> SparseResult<SmootherMetrics> {
if b.len() != self.n {
return Err(SparseError::DimensionMismatch {
expected: self.n,
found: b.len(),
});
}
let mut x_learned = vec![0.0; self.n];
let r0 = compute_residual(
&self.a_values,
&self.a_row_ptr,
&self.a_col_idx,
&x_learned,
b,
);
let norm0 = vec_norm(&r0);
let mut learned_history = Vec::with_capacity(n_iterations);
for _ in 0..n_iterations {
let _ = self.v_cycle(&mut x_learned, b);
let r = compute_residual(
&self.a_values,
&self.a_row_ptr,
&self.a_col_idx,
&x_learned,
b,
);
learned_history.push(vec_norm(&r));
}
let mut x_jacobi = vec![0.0; self.n];
let mut jacobi_final_norm = norm0;
for _ in 0..n_iterations {
self.v_cycle_jacobi(&mut x_jacobi, b)?;
let r = compute_residual(
&self.a_values,
&self.a_row_ptr,
&self.a_col_idx,
&x_jacobi,
b,
);
jacobi_final_norm = vec_norm(&r);
}
let learned_final = learned_history.last().copied().unwrap_or(norm0);
let convergence_factor = if n_iterations > 0 && norm0 > f64::EPSILON {
(learned_final / norm0).powf(1.0 / n_iterations as f64)
} else {
1.0
};
let residual_reduction = if norm0 > f64::EPSILON {
learned_final / norm0
} else {
0.0
};
let spectral_radius_reduction = if jacobi_final_norm > f64::EPSILON {
learned_final / jacobi_final_norm
} else {
1.0
};
Ok(SmootherMetrics {
spectral_radius_reduction,
convergence_factor,
residual_reduction,
training_loss_history: learned_history,
})
}
pub fn dim(&self) -> usize {
self.n
}
pub fn coarse_dim(&self) -> usize {
self.n_c
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::learned_smoother::linear_smoother::LinearSmoother;
fn poisson_1d(n: usize) -> (Vec<f64>, Vec<usize>, Vec<usize>) {
let mut values = Vec::new();
let mut col_idx = Vec::new();
let mut row_ptr = vec![0usize];
for i in 0..n {
if i > 0 {
values.push(-1.0);
col_idx.push(i - 1);
}
values.push(2.0);
col_idx.push(i);
if i + 1 < n {
values.push(-1.0);
col_idx.push(i + 1);
}
row_ptr.push(values.len());
}
(values, row_ptr, col_idx)
}
#[test]
fn test_coarsening_dimensions() {
let (_, _, _, _, _, _, n_c) = build_coarsening(7);
assert_eq!(n_c, 4);
let (_, _, _, _, _, _, n_c2) = build_coarsening(8);
assert_eq!(n_c2, 4);
}
#[test]
fn test_hybrid_solver_solves_poisson() {
let n = 7;
let (vals, rp, ci) = poisson_1d(n);
let smoother = LinearSmoother::from_csr(&vals, &rp, &ci, 2.0 / 3.0);
let config = LearnedSmootherConfig {
max_training_steps: 200,
convergence_tol: 1e-8,
..LearnedSmootherConfig::default()
};
let solver = HybridMultigridSolver::new(
vals.clone(),
rp.clone(),
ci.clone(),
Box::new(smoother),
config,
)
.expect("solver creation");
let mut b = vec![0.0; n];
b[0] = 1.0;
b[n - 1] = 1.0;
let x = solver.solve(&b).expect("solve");
let r = compute_residual(&vals, &rp, &ci, &x, &b);
let rel_res = vec_norm(&r) / vec_norm(&b);
assert!(
rel_res < 1e-4,
"Relative residual should be small, got {rel_res}"
);
}
#[test]
fn test_compare_with_jacobi() {
let n = 7;
let (vals, rp, ci) = poisson_1d(n);
let smoother = LinearSmoother::from_csr(&vals, &rp, &ci, 2.0 / 3.0);
let config = LearnedSmootherConfig::default();
let solver = HybridMultigridSolver::new(
vals.clone(),
rp.clone(),
ci.clone(),
Box::new(smoother),
config,
)
.expect("solver creation");
let mut b = vec![1.0; n];
b[0] = 2.0;
let metrics = solver.compare_with_jacobi(&b, 10).expect("compare");
assert!(
metrics.convergence_factor < 1.0,
"Convergence factor should be < 1, got {}",
metrics.convergence_factor
);
assert_eq!(metrics.training_loss_history.len(), 10);
}
#[test]
fn test_solver_dimension_mismatch() {
let n = 5;
let (vals, rp, ci) = poisson_1d(n);
let smoother = LinearSmoother::from_csr(&vals, &rp, &ci, 2.0 / 3.0);
let config = LearnedSmootherConfig::default();
let solver = HybridMultigridSolver::new(vals, rp, ci, Box::new(smoother), config)
.expect("solver creation");
let b = vec![1.0; 3]; assert!(solver.solve(&b).is_err());
}
#[test]
fn test_solver_coarse_dim() {
let n = 9;
let (vals, rp, ci) = poisson_1d(n);
let smoother = LinearSmoother::from_csr(&vals, &rp, &ci, 2.0 / 3.0);
let config = LearnedSmootherConfig::default();
let solver = HybridMultigridSolver::new(vals, rp, ci, Box::new(smoother), config)
.expect("solver creation");
assert_eq!(solver.dim(), 9);
assert_eq!(solver.coarse_dim(), 5);
}
#[test]
fn test_direct_solve_small_system() {
let vals = vec![2.0, 1.0, 1.0, 3.0];
let rp = vec![0, 2, 4];
let ci = vec![0, 1, 0, 1];
let b = vec![5.0, 7.0];
let x = direct_solve(&vals, &rp, &ci, &b, 2).expect("direct solve");
assert!((x[0] - 1.6).abs() < 1e-10);
assert!((x[1] - 1.8).abs() < 1e-10);
}
}