use crate::{CooTensor, CsrTensor, SparseTensor, TorshResult};
use std::collections::HashMap;
use torsh_core::{Shape, TorshError};
use torsh_tensor::{
creation::{ones, zeros},
Tensor,
};
pub struct IncompleteLU {
l_matrix: CsrTensor,
u_matrix: CsrTensor,
shape: Shape,
}
impl IncompleteLU {
pub fn new(matrix: &dyn SparseTensor, fill_factor: f32) -> TorshResult<Self> {
if matrix.shape().ndim() != 2 {
return Err(TorshError::InvalidArgument(
"Matrix must be 2D for LU decomposition".to_string(),
));
}
let shape = matrix.shape().clone();
let n = shape.dims()[0];
if shape.dims()[0] != shape.dims()[1] {
return Err(TorshError::InvalidArgument(
"Matrix must be square for LU decomposition".to_string(),
));
}
let csr = matrix.to_csr()?;
let mut l_rows = Vec::new();
let mut l_cols = Vec::new();
let mut l_vals = Vec::new();
let mut u_rows = Vec::new();
let mut u_cols = Vec::new();
let mut u_vals = Vec::new();
for i in 0..n {
l_rows.push(i);
l_cols.push(i);
l_vals.push(1.0);
let (cols, vals) = csr.get_row(i)?;
for (j, &col) in cols.iter().enumerate() {
let val = vals[j];
if col < i {
if val.abs() > fill_factor {
l_rows.push(i);
l_cols.push(col);
l_vals.push(val);
}
} else {
if val.abs() > fill_factor {
u_rows.push(i);
u_cols.push(col);
u_vals.push(val);
}
}
}
}
let l_coo = CooTensor::new(l_rows, l_cols, l_vals, shape.clone())?;
let u_coo = CooTensor::new(u_rows, u_cols, u_vals, shape.clone())?;
let l_matrix = CsrTensor::from_coo(&l_coo)?;
let u_matrix = CsrTensor::from_coo(&u_coo)?;
Ok(IncompleteLU {
l_matrix,
u_matrix,
shape,
})
}
pub fn solve_lower(&self, b: &Tensor) -> TorshResult<Tensor> {
if b.shape().ndim() != 1 {
return Err(TorshError::InvalidArgument(
"Right-hand side must be a vector".to_string(),
));
}
let n = self.shape.dims()[0];
if b.shape().dims()[0] != n {
return Err(TorshError::InvalidArgument(
"Vector size doesn't match matrix dimensions".to_string(),
));
}
let y = zeros::<f32>(&[n])?;
for i in 0..n {
let mut sum = 0.0;
let (cols, vals) = self.l_matrix.get_row(i)?;
for (k, &col) in cols.iter().enumerate() {
if col < i {
sum += vals[k] * y.get(&[col])?;
} else if col == i {
break;
}
}
y.set(&[i], b.get(&[i])? - sum)?;
}
Ok(y)
}
pub fn solve_upper(&self, y: &Tensor) -> TorshResult<Tensor> {
if y.shape().ndim() != 1 {
return Err(TorshError::InvalidArgument(
"Right-hand side must be a vector".to_string(),
));
}
let n = self.shape.dims()[0];
if y.shape().dims()[0] != n {
return Err(TorshError::InvalidArgument(
"Vector size doesn't match matrix dimensions".to_string(),
));
}
let x = zeros::<f32>(&[n])?;
for i in (0..n).rev() {
let mut sum = 0.0;
let mut diag_val = 1.0;
let (cols, vals) = self.u_matrix.get_row(i)?;
for (k, &col) in cols.iter().enumerate() {
if col > i {
sum += vals[k] * x.get(&[col])?;
} else if col == i {
diag_val = vals[k];
}
}
if diag_val.abs() < f32::EPSILON {
return Err(TorshError::InvalidArgument(
"Matrix is singular (zero diagonal element)".to_string(),
));
}
x.set(&[i], (y.get(&[i])? - sum) / diag_val)?;
}
Ok(x)
}
pub fn solve(&self, b: &Tensor) -> TorshResult<Tensor> {
let y = self.solve_lower(b)?;
self.solve_upper(&y)
}
}
pub fn conjugate_gradient(
matrix: &dyn SparseTensor,
b: &Tensor,
x0: Option<&Tensor>,
tol: f32,
max_iter: usize,
) -> TorshResult<(Tensor, usize, f32)> {
if matrix.shape().ndim() != 2 {
return Err(TorshError::InvalidArgument("Matrix must be 2D".to_string()));
}
let n = matrix.shape().dims()[0];
if matrix.shape().dims()[0] != matrix.shape().dims()[1] {
return Err(TorshError::InvalidArgument(
"Matrix must be square".to_string(),
));
}
if b.shape().ndim() != 1 || b.shape().dims()[0] != n {
return Err(TorshError::InvalidArgument(
"Vector b must match matrix dimensions".to_string(),
));
}
let csr_matrix = matrix.to_csr()?;
let x = match x0 {
Some(x0_val) => x0_val.clone(),
None => zeros::<f32>(&[n])?,
};
let ax = csr_matrix.matvec(&x)?;
let r = b.clone();
for i in 0..n {
r.set(&[i], b.get(&[i])? - ax.get(&[i])?)?;
}
let p = r.clone();
let mut rsold = dot_product(&r, &r)?;
for iter in 0..max_iter {
if rsold.sqrt() < tol {
return Ok((x, iter, rsold.sqrt()));
}
let ap = csr_matrix.matvec(&p)?;
let pap = dot_product(&p, &ap)?;
if pap <= 0.0 {
return Err(TorshError::InvalidArgument(
"Matrix is not positive definite".to_string(),
));
}
let alpha = rsold / pap;
for i in 0..n {
x.set(&[i], x.get(&[i])? + alpha * p.get(&[i])?)?;
}
for i in 0..n {
r.set(&[i], r.get(&[i])? - alpha * ap.get(&[i])?)?;
}
let rsnew = dot_product(&r, &r)?;
if rsnew.sqrt() < tol {
return Ok((x, iter + 1, rsnew.sqrt()));
}
let beta = rsnew / rsold;
for i in 0..n {
p.set(&[i], r.get(&[i])? + beta * p.get(&[i])?)?;
}
rsold = rsnew;
}
Ok((x, max_iter, rsold.sqrt()))
}
pub fn bicgstab(
matrix: &dyn SparseTensor,
b: &Tensor,
x0: Option<&Tensor>,
tol: f32,
max_iter: usize,
) -> TorshResult<(Tensor, usize, f32)> {
if matrix.shape().ndim() != 2 {
return Err(TorshError::InvalidArgument("Matrix must be 2D".to_string()));
}
let n = matrix.shape().dims()[0];
if matrix.shape().dims()[0] != matrix.shape().dims()[1] {
return Err(TorshError::InvalidArgument(
"Matrix must be square".to_string(),
));
}
if b.shape().ndim() != 1 || b.shape().dims()[0] != n {
return Err(TorshError::InvalidArgument(
"Vector b must match matrix dimensions".to_string(),
));
}
let csr_matrix = matrix.to_csr()?;
let x = match x0 {
Some(x0_val) => x0_val.clone(),
None => zeros::<f32>(&[n])?,
};
let ax = csr_matrix.matvec(&x)?;
let r = b.clone();
for i in 0..n {
r.set(&[i], b.get(&[i])? - ax.get(&[i])?)?;
}
let r_hat = r.clone();
let mut rho = 1.0;
let mut alpha = 1.0;
let mut omega = 1.0;
let mut v = zeros::<f32>(&[n])?;
let p = zeros::<f32>(&[n])?;
for iter in 0..max_iter {
let rho_new = dot_product(&r_hat, &r)?;
if rho_new.abs() < f32::EPSILON {
return Err(TorshError::InvalidArgument(
"BiCGSTAB breakdown: rho = 0".to_string(),
));
}
let beta = (rho_new / rho) * (alpha / omega);
for i in 0..n {
p.set(
&[i],
r.get(&[i])? + beta * (p.get(&[i])? - omega * v.get(&[i])?),
)?;
}
v = csr_matrix.matvec(&p)?;
let v_dot_rhat = dot_product(&r_hat, &v)?;
if v_dot_rhat.abs() < f32::EPSILON {
return Err(TorshError::InvalidArgument(
"BiCGSTAB breakdown: <r_hat, v> = 0".to_string(),
));
}
alpha = rho_new / v_dot_rhat;
let s = zeros::<f32>(&[n])?;
for i in 0..n {
s.set(&[i], r.get(&[i])? - alpha * v.get(&[i])?)?;
}
let s_norm = vector_norm(&s)?;
if s_norm < tol {
for i in 0..n {
x.set(&[i], x.get(&[i])? + alpha * p.get(&[i])?)?;
}
return Ok((x, iter + 1, s_norm));
}
let t = csr_matrix.matvec(&s)?;
let t_dot_t = dot_product(&t, &t)?;
if t_dot_t.abs() < f32::EPSILON {
omega = 0.0;
} else {
omega = dot_product(&t, &s)? / t_dot_t;
}
for i in 0..n {
x.set(
&[i],
x.get(&[i])? + alpha * p.get(&[i])? + omega * s.get(&[i])?,
)?;
}
for i in 0..n {
r.set(&[i], s.get(&[i])? - omega * t.get(&[i])?)?;
}
let r_norm = vector_norm(&r)?;
if r_norm < tol {
return Ok((x, iter + 1, r_norm));
}
if omega.abs() < f32::EPSILON {
return Err(TorshError::InvalidArgument(
"BiCGSTAB breakdown: omega = 0".to_string(),
));
}
rho = rho_new;
}
Ok((x, max_iter, vector_norm(&r)?))
}
pub fn power_iteration(
matrix: &dyn SparseTensor,
max_iter: usize,
tol: f32,
) -> TorshResult<(f32, Tensor)> {
if matrix.shape().ndim() != 2 {
return Err(TorshError::InvalidArgument("Matrix must be 2D".to_string()));
}
let n = matrix.shape().dims()[0];
if matrix.shape().dims()[0] != matrix.shape().dims()[1] {
return Err(TorshError::InvalidArgument(
"Matrix must be square".to_string(),
));
}
let csr_matrix = matrix.to_csr()?;
let v = ones::<f32>(&[n])?;
let norm = vector_norm(&v)?;
for i in 0..n {
v.set(&[i], v.get(&[i])? / norm)?;
}
let mut eigenvalue = 0.0;
for _iter in 0..max_iter {
let v_new = csr_matrix.matvec(&v)?;
let eigenvalue_new = dot_product(&v, &v_new)?;
let norm = vector_norm(&v_new)?;
if norm < f32::EPSILON {
return Err(TorshError::InvalidArgument(
"Power iteration failed: norm became zero".to_string(),
));
}
for i in 0..n {
v.set(&[i], v_new.get(&[i])? / norm)?;
}
if (eigenvalue_new - eigenvalue).abs() < tol {
return Ok((eigenvalue_new, v));
}
eigenvalue = eigenvalue_new;
}
Ok((eigenvalue, v))
}
fn dot_product(a: &Tensor, b: &Tensor) -> TorshResult<f32> {
if a.shape() != b.shape() {
return Err(TorshError::InvalidArgument(
"Vectors must have the same shape".to_string(),
));
}
let n = a.shape().dims()[0];
let mut result = 0.0;
for i in 0..n {
result += a.get(&[i])? * b.get(&[i])?;
}
Ok(result)
}
fn vector_norm(v: &Tensor) -> TorshResult<f32> {
dot_product(v, v).map(|x| x.sqrt())
}
pub struct SparseCholesky {
l_matrix: CsrTensor,
shape: Shape,
}
impl SparseCholesky {
pub fn new(matrix: &dyn SparseTensor, fill_factor: f32) -> TorshResult<Self> {
if matrix.shape().ndim() != 2 {
return Err(TorshError::InvalidArgument(
"Matrix must be 2D for Cholesky decomposition".to_string(),
));
}
let shape = matrix.shape().clone();
let n = shape.dims()[0];
if shape.dims()[0] != shape.dims()[1] {
return Err(TorshError::InvalidArgument(
"Matrix must be square for Cholesky decomposition".to_string(),
));
}
let csr = matrix.to_csr()?;
let mut elements: HashMap<(usize, usize), f32> = HashMap::new();
for i in 0..n {
let (cols, vals) = csr.get_row(i)?;
for (idx, &col) in cols.iter().enumerate() {
if col <= i {
elements.insert((i, col), vals[idx]);
} else {
elements.insert((col, i), vals[idx]);
}
}
}
let mut l_elements: HashMap<(usize, usize), f32> = HashMap::new();
for i in 0..n {
let mut sum = elements.get(&(i, i)).copied().unwrap_or(0.0);
for k in 0..i {
if let Some(&l_ik) = l_elements.get(&(i, k)) {
sum -= l_ik * l_ik;
}
}
if sum <= 0.0 {
return Err(TorshError::InvalidArgument(
"Matrix is not positive definite".to_string(),
));
}
let l_ii = sum.sqrt();
l_elements.insert((i, i), l_ii);
for j in (i + 1)..n {
let mut sum = elements.get(&(j, i)).copied().unwrap_or(0.0);
for k in 0..i {
let l_ik = l_elements.get(&(i, k)).copied().unwrap_or(0.0);
let l_jk = l_elements.get(&(j, k)).copied().unwrap_or(0.0);
sum -= l_ik * l_jk;
}
let l_ji = sum / l_ii;
if l_ji.abs() > fill_factor {
l_elements.insert((j, i), l_ji);
}
}
}
let mut triplets = Vec::new();
for ((row, col), val) in l_elements {
triplets.push((row, col, val));
}
let l_matrix = CooTensor::from_triplets(triplets, (n, n))?.to_csr()?;
Ok(Self { l_matrix, shape })
}
pub fn solve_lower(&self, b: &Tensor) -> TorshResult<Tensor> {
if b.shape().ndim() != 1 {
return Err(TorshError::InvalidArgument(
"Right-hand side must be a vector".to_string(),
));
}
let n = self.shape.dims()[0];
if b.shape().dims()[0] != n {
return Err(TorshError::InvalidArgument(
"Vector size doesn't match matrix dimensions".to_string(),
));
}
let x = zeros::<f32>(&[n])?;
for i in 0..n {
let mut sum = 0.0;
let (cols, vals) = self.l_matrix.get_row(i)?;
for (k, &col) in cols.iter().enumerate() {
if col < i {
sum += vals[k] * x.get(&[col])?;
} else if col == i {
x.set(&[i], (b.get(&[i])? - sum) / vals[k])?;
break;
}
}
}
Ok(x)
}
pub fn solve_upper(&self, y: &Tensor) -> TorshResult<Tensor> {
if y.shape().ndim() != 1 {
return Err(TorshError::InvalidArgument(
"Right-hand side must be a vector".to_string(),
));
}
let n = self.shape.dims()[0];
if y.shape().dims()[0] != n {
return Err(TorshError::InvalidArgument(
"Vector size doesn't match matrix dimensions".to_string(),
));
}
let x = zeros::<f32>(&[n])?;
for i in (0..n).rev() {
let mut sum = 0.0;
let mut diag_val = 1.0;
let (cols, vals) = self.l_matrix.get_row(i)?;
for (k, &col) in cols.iter().enumerate() {
if col == i {
diag_val = vals[k];
break;
}
}
for j in (i + 1)..n {
let (j_cols, j_vals) = self.l_matrix.get_row(j)?;
for (k, &col) in j_cols.iter().enumerate() {
if col == i {
sum += j_vals[k] * x.get(&[j])?;
break;
}
}
}
x.set(&[i], (y.get(&[i])? - sum) / diag_val)?;
}
Ok(x)
}
pub fn solve(&self, b: &Tensor) -> TorshResult<Tensor> {
let y = self.solve_lower(b)?;
self.solve_upper(&y)
}
pub fn get_l(&self) -> &CsrTensor {
&self.l_matrix
}
}
pub fn gmres(
a: &dyn SparseTensor,
b: &Tensor,
x0: Option<&Tensor>,
restart: usize,
max_iter: usize,
tol: f64,
) -> TorshResult<(Tensor, usize, f64)> {
if a.shape().ndim() != 2 {
return Err(TorshError::InvalidArgument("Matrix must be 2D".to_string()));
}
let n = a.shape().dims()[0];
if a.shape().dims()[0] != a.shape().dims()[1] {
return Err(TorshError::InvalidArgument(
"Matrix must be square".to_string(),
));
}
if b.shape().dims()[0] != n {
return Err(TorshError::InvalidArgument(
"Vector size doesn't match matrix dimensions".to_string(),
));
}
let x = if let Some(x_init) = x0 {
x_init.clone()
} else {
zeros::<f32>(&[n])?
};
let x_2d = zeros::<f32>(&[n, 1])?;
for i in 0..n {
x_2d.set(&[i, 0], x.get(&[i])?)?;
}
let ax_2d = crate::ops::spmm(a, &x_2d)?;
let mut r = b.clone();
for i in 0..n {
let val = r.get(&[i])? - ax_2d.get(&[i, 0])?;
r.set(&[i], val)?;
}
let b_norm = vector_norm(b)?;
if b_norm < f64::EPSILON as f32 {
return Ok((x, 0, 0.0));
}
let mut total_iterations = 0;
for _restart_iter in 0..max_iter {
let r_norm = vector_norm(&r)?;
let rel_residual = (r_norm as f64) / (b_norm as f64);
if rel_residual < tol {
return Ok((x, total_iterations, rel_residual));
}
let mut v: Vec<Tensor> = Vec::with_capacity(restart + 1);
let v0 = zeros::<f32>(&[n])?;
for i in 0..n {
v0.set(&[i], r.get(&[i])? / r_norm)?;
}
v.push(v0);
let mut h: Vec<Vec<f32>> = vec![vec![0.0; restart]; restart + 1];
let mut m = 0;
for j in 0..restart {
let v_2d = zeros::<f32>(&[n, 1])?;
for i in 0..n {
v_2d.set(&[i, 0], v[j].get(&[i])?)?;
}
let w_2d = crate::ops::spmm(a, &v_2d)?;
let w = zeros::<f32>(&[n])?;
for i in 0..n {
w.set(&[i], w_2d.get(&[i, 0])?)?;
}
let w_ortho = w.clone();
for i in 0..=j {
let mut dot_product = 0.0;
for k in 0..n {
dot_product += w_ortho.get(&[k])? * v[i].get(&[k])?;
}
h[i][j] = dot_product;
for k in 0..n {
let val = w_ortho.get(&[k])? - h[i][j] * v[i].get(&[k])?;
w_ortho.set(&[k], val)?;
}
}
h[j + 1][j] = vector_norm(&w_ortho)?;
if h[j + 1][j] < (f64::EPSILON as f32) {
m = j + 1;
break;
}
let v_next = zeros::<f32>(&[n])?;
for k in 0..n {
v_next.set(&[k], w_ortho.get(&[k])? / h[j + 1][j])?;
}
v.push(v_next);
m = j + 1;
}
let mut rhs = vec![0.0; m + 1];
rhs[0] = r_norm;
let y = solve_least_squares(&h, &rhs, m)?;
for i in 0..m {
for k in 0..n {
let val = x.get(&[k])? + y[i] * v[i].get(&[k])?;
x.set(&[k], val)?;
}
}
total_iterations += m;
let x_2d = zeros::<f32>(&[n, 1])?;
for i in 0..n {
x_2d.set(&[i, 0], x.get(&[i])?)?;
}
let ax_2d = crate::ops::spmm(a, &x_2d)?;
r = b.clone();
for i in 0..n {
let val = r.get(&[i])? - ax_2d.get(&[i, 0])?;
r.set(&[i], val)?;
}
}
let r_norm = vector_norm(&r)?;
let rel_residual = (r_norm as f64) / (b_norm as f64);
Ok((x, total_iterations, rel_residual))
}
fn solve_least_squares(h: &[Vec<f32>], b: &[f32], m: usize) -> TorshResult<Vec<f32>> {
let mut h_copy = h.to_vec();
let mut b_copy = b.to_vec();
for i in 0..m {
let a = h_copy[i][i];
let b_elem = h_copy[i + 1][i];
let r = (a * a + b_elem * b_elem).sqrt();
if r < f32::EPSILON {
continue;
}
let c = a / r;
let s = -b_elem / r;
h_copy[i][i] = r;
h_copy[i + 1][i] = 0.0;
for j in (i + 1)..m {
let temp1 = c * h_copy[i][j] - s * h_copy[i + 1][j];
let temp2 = s * h_copy[i][j] + c * h_copy[i + 1][j];
h_copy[i][j] = temp1;
h_copy[i + 1][j] = temp2;
}
let temp1 = c * b_copy[i] - s * b_copy[i + 1];
let temp2 = s * b_copy[i] + c * b_copy[i + 1];
b_copy[i] = temp1;
b_copy[i + 1] = temp2;
}
let mut y = vec![0.0; m];
for i in (0..m).rev() {
let mut sum = b_copy[i];
for j in (i + 1)..m {
sum -= h_copy[i][j] * y[j];
}
if h_copy[i][i].abs() < f32::EPSILON {
return Err(TorshError::ComputeError(
"Singular Hessenberg matrix in GMRES".to_string(),
));
}
y[i] = sum / h_copy[i][i];
}
Ok(y)
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
#[test]
fn test_conjugate_gradient() {
let rows = vec![0, 0, 1, 1, 1, 2, 2];
let cols = vec![0, 1, 0, 1, 2, 1, 2];
let vals = vec![4.0, 1.0, 1.0, 4.0, 1.0, 1.0, 4.0];
let matrix = CooTensor::new(rows, cols, vals, Shape::new(vec![3, 3])).unwrap();
let b = zeros::<f32>(&[3]).unwrap();
b.set(&[0], 1.0).unwrap();
b.set(&[1], 1.0).unwrap();
b.set(&[2], 1.0).unwrap();
let (solution, iterations, residual) = conjugate_gradient(
&matrix as &dyn SparseTensor,
&b,
None,
1e-4, 50, )
.unwrap();
assert!(iterations < 50);
assert!(residual < 1e-4);
assert_eq!(solution.shape().dims(), &[3]);
}
#[test]
fn test_incomplete_lu() {
let rows = vec![0, 0, 1, 1, 2, 2];
let cols = vec![0, 1, 1, 2, 0, 2];
let vals = vec![4.0, 1.0, 3.0, 2.0, 1.0, 5.0];
let matrix = CooTensor::new(rows, cols, vals, Shape::new(vec![3, 3])).unwrap();
let ilu = IncompleteLU::new(&matrix as &dyn SparseTensor, 0.01).unwrap();
let b = ones::<f32>(&[3]).unwrap();
let solution = ilu.solve(&b).unwrap();
assert_eq!(solution.shape().dims(), &[3]);
}
#[test]
fn test_power_iteration() {
let rows = vec![0, 0, 1, 1, 2, 2];
let cols = vec![0, 1, 0, 1, 1, 2];
let vals = vec![3.0, 1.0, 1.0, 2.0, 1.0, 1.0];
let matrix = CooTensor::new(rows, cols, vals, Shape::new(vec![3, 3])).unwrap();
let (eigenvalue, eigenvector) =
power_iteration(&matrix as &dyn SparseTensor, 100, 1e-6).unwrap();
assert!(eigenvalue > 0.0);
assert_eq!(eigenvector.shape().dims(), &[3]);
let norm = vector_norm(&eigenvector).unwrap();
assert_relative_eq!(norm, 1.0, epsilon = 1e-6);
}
#[test]
#[ignore] fn test_gmres() {
let rows = vec![0, 0, 0, 1, 1, 1, 2, 2];
let cols = vec![0, 1, 2, 0, 1, 2, 0, 2];
let vals = vec![4.0, 1.0, 2.0, 1.0, 3.0, 1.0, 2.0, 5.0];
let matrix = CooTensor::new(rows, cols, vals, Shape::new(vec![3, 3])).unwrap();
let b = zeros::<f32>(&[3]).unwrap();
b.set(&[0], 1.0).unwrap();
b.set(&[1], 2.0).unwrap();
b.set(&[2], 3.0).unwrap();
let (solution, iterations, residual) =
gmres(&matrix as &dyn SparseTensor, &b, None, 5, 20, 1e-3).unwrap();
assert!(iterations <= 100); assert!(residual < 1e-2); assert_eq!(solution.shape().dims(), &[3]);
let solution_2d = zeros::<f32>(&[3, 1]).unwrap();
for i in 0..3 {
solution_2d
.set(&[i, 0], solution.get(&[i]).unwrap())
.unwrap();
}
let ax = crate::ops::spmm(&matrix as &dyn SparseTensor, &solution_2d).unwrap();
let mut max_error: f32 = 0.0;
for i in 0..3 {
let diff = (ax.get(&[i, 0]).unwrap() - b.get(&[i]).unwrap()).abs();
let b_val = b.get(&[i]).unwrap().abs().max(1.0); let rel_error = diff / b_val;
max_error = max_error.max(rel_error);
}
assert!(
max_error < 0.1,
"Solution relative error too large: {max_error}"
);
}
}