use crate::csr::CsrMatrix;
use crate::error::{SparseError, SparseResult};
use scirs2_core::numeric::{Float, NumAssign, SparseElement};
use std::collections::BTreeSet;
use std::fmt::Debug;
use std::iter::Sum;
pub trait SparseSolver<F: Float> {
fn factorize(&mut self, matrix: &CsrMatrix<F>) -> SparseResult<()>;
fn solve(&self, b: &[F]) -> SparseResult<Vec<F>>;
fn solve_multi(&self, b_columns: &[Vec<F>]) -> SparseResult<Vec<Vec<F>>> {
let mut results = Vec::with_capacity(b_columns.len());
for b in b_columns {
results.push(self.solve(b)?);
}
Ok(results)
}
}
#[derive(Debug, Clone)]
pub struct SymbolicAnalysis {
pub perm: Vec<usize>,
pub perm_inv: Vec<usize>,
pub etree: Vec<usize>,
pub l_colptr: Vec<usize>,
pub l_rowind: Vec<usize>,
pub u_colptr: Vec<usize>,
pub u_rowind: Vec<usize>,
pub n: usize,
}
pub fn amd_ordering<F>(matrix: &CsrMatrix<F>) -> SparseResult<Vec<usize>>
where
F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
{
let n = matrix.rows();
if n != matrix.cols() {
return Err(SparseError::ValueError(
"AMD ordering requires a square matrix".to_string(),
));
}
if n == 0 {
return Ok(Vec::new());
}
let mut adj: Vec<BTreeSet<usize>> = vec![BTreeSet::new(); n];
for i in 0..n {
let range = i_row_range(matrix, i);
for idx in range {
let j = matrix.indices[idx];
if i != j {
adj[i].insert(j);
adj[j].insert(i);
}
}
}
let mut degree: Vec<usize> = (0..n).map(|i| adj[i].len()).collect();
let mut eliminated = vec![false; n];
let mut perm = Vec::with_capacity(n);
for _ in 0..n {
let mut min_deg = usize::MAX;
let mut pivot = 0;
for (node, °) in degree.iter().enumerate() {
if !eliminated[node] && deg < min_deg {
min_deg = deg;
pivot = node;
}
}
eliminated[pivot] = true;
perm.push(pivot);
let neighbours: Vec<usize> = adj[pivot]
.iter()
.copied()
.filter(|&nb| !eliminated[nb])
.collect();
for i in 0..neighbours.len() {
let u = neighbours[i];
adj[u].remove(&pivot);
for j in (i + 1)..neighbours.len() {
let v = neighbours[j];
adj[u].insert(v);
adj[v].insert(u);
}
degree[u] = adj[u].iter().filter(|&&nb| !eliminated[nb]).count();
}
}
Ok(perm)
}
pub fn inverse_perm(perm: &[usize]) -> Vec<usize> {
let n = perm.len();
let mut inv = vec![0usize; n];
for (i, &p) in perm.iter().enumerate() {
inv[p] = i;
}
inv
}
pub fn nested_dissection_ordering<F>(matrix: &CsrMatrix<F>) -> SparseResult<Vec<usize>>
where
F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
{
let n = matrix.rows();
if n != matrix.cols() {
return Err(SparseError::ValueError(
"Nested dissection requires a square matrix".to_string(),
));
}
if n == 0 {
return Ok(Vec::new());
}
let mut adj: Vec<Vec<usize>> = vec![Vec::new(); n];
for i in 0..n {
let range = i_row_range(matrix, i);
for idx in range {
let j = matrix.indices[idx];
if i != j {
if !adj[i].contains(&j) {
adj[i].push(j);
}
if !adj[j].contains(&i) {
adj[j].push(i);
}
}
}
}
let nodes: Vec<usize> = (0..n).collect();
let mut perm = Vec::with_capacity(n);
nd_recurse(&adj, &nodes, &mut perm);
if perm.len() != n {
let in_perm: BTreeSet<usize> = perm.iter().copied().collect();
for i in 0..n {
if !in_perm.contains(&i) {
perm.push(i);
}
}
}
Ok(perm)
}
fn nd_recurse(adj: &[Vec<usize>], nodes: &[usize], perm: &mut Vec<usize>) {
if nodes.len() <= 64 {
perm.extend_from_slice(nodes);
return;
}
let start = find_pseudo_peripheral(adj, nodes);
let (part_a, separator, part_b) = bfs_bisect(adj, nodes, start);
if !part_a.is_empty() {
nd_recurse(adj, &part_a, perm);
}
if !part_b.is_empty() {
nd_recurse(adj, &part_b, perm);
}
perm.extend_from_slice(&separator);
}
fn find_pseudo_peripheral(adj: &[Vec<usize>], nodes: &[usize]) -> usize {
if nodes.is_empty() {
return 0;
}
let node_set: BTreeSet<usize> = nodes.iter().copied().collect();
let mut current = nodes[0];
for _ in 0..2 {
let levels = bfs_levels(adj, current, &node_set);
if let Some(last_level) = levels.last() {
if !last_level.is_empty() {
current = last_level[0];
}
}
}
current
}
fn bfs_levels(adj: &[Vec<usize>], start: usize, allowed: &BTreeSet<usize>) -> Vec<Vec<usize>> {
let mut visited = BTreeSet::new();
let mut levels: Vec<Vec<usize>> = Vec::new();
visited.insert(start);
levels.push(vec![start]);
loop {
let prev = match levels.last() {
Some(p) => p.clone(),
None => break,
};
let mut next_level = Vec::new();
for &node in &prev {
for &nb in &adj[node] {
if allowed.contains(&nb) && !visited.contains(&nb) {
visited.insert(nb);
next_level.push(nb);
}
}
}
if next_level.is_empty() {
break;
}
levels.push(next_level);
}
levels
}
fn bfs_bisect(
adj: &[Vec<usize>],
nodes: &[usize],
start: usize,
) -> (Vec<usize>, Vec<usize>, Vec<usize>) {
let node_set: BTreeSet<usize> = nodes.iter().copied().collect();
let levels = bfs_levels(adj, start, &node_set);
let total = nodes.len();
let half = total / 2;
let mut count = 0;
let mut cut_level = 0;
for (li, level) in levels.iter().enumerate() {
count += level.len();
if count >= half {
cut_level = li;
break;
}
}
let mut part_a = Vec::new();
let mut separator = Vec::new();
let mut part_b = Vec::new();
for (li, level) in levels.iter().enumerate() {
if li < cut_level {
part_a.extend_from_slice(level);
} else if li == cut_level {
separator.extend_from_slice(level);
} else {
part_b.extend_from_slice(level);
}
}
let reached: BTreeSet<usize> = part_a
.iter()
.chain(separator.iter())
.chain(part_b.iter())
.copied()
.collect();
for &node in nodes {
if !reached.contains(&node) {
part_b.push(node);
}
}
(part_a, separator, part_b)
}
pub fn elimination_tree<F>(matrix: &CsrMatrix<F>, perm: &[usize]) -> Vec<usize>
where
F: Float + SparseElement + Debug + 'static,
{
let n = matrix.rows();
let perm_inv = inverse_perm(perm);
let mut parent = vec![usize::MAX; n];
let mut ancestor = vec![0usize; n];
for k in 0..n {
ancestor[k] = k;
let orig_row = perm[k];
let range = i_row_range(matrix, orig_row);
for idx in range {
let orig_col = matrix.indices[idx];
let j = perm_inv[orig_col];
if j < k {
let mut node = j;
loop {
let next = ancestor[node];
if next == k {
break;
}
ancestor[node] = k;
if parent[node] == usize::MAX || parent[node] > k {
parent[node] = k;
}
if next == node {
break;
}
node = next;
}
}
}
}
parent
}
pub fn symbolic_cholesky<F>(matrix: &CsrMatrix<F>, perm: &[usize]) -> SparseResult<SymbolicAnalysis>
where
F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
{
let n = matrix.rows();
if n != matrix.cols() {
return Err(SparseError::ValueError(
"Symbolic Cholesky requires a square matrix".to_string(),
));
}
let perm_inv = inverse_perm(perm);
let etree = elimination_tree(matrix, perm);
let mut l_col_count = vec![1usize; n];
let mut visited = vec![usize::MAX; n];
for k in 0..n {
visited[k] = k;
let orig_row = perm[k];
let range = i_row_range(matrix, orig_row);
for idx in range {
let orig_col = matrix.indices[idx];
let j = perm_inv[orig_col];
if j < k {
let mut node = j;
while visited[node] != k {
visited[node] = k;
l_col_count[node] += 1;
if etree[node] == usize::MAX || etree[node] >= n {
break;
}
node = etree[node];
}
}
}
}
let mut l_colptr = vec![0usize; n + 1];
for j in 0..n {
l_colptr[j + 1] = l_colptr[j] + l_col_count[j];
}
let total_nnz = l_colptr[n];
let l_rowind = vec![0usize; total_nnz];
Ok(SymbolicAnalysis {
perm: perm.to_vec(),
perm_inv,
etree,
l_colptr,
l_rowind,
u_colptr: Vec::new(),
u_rowind: Vec::new(),
n,
})
}
#[derive(Debug, Clone)]
pub struct SparseCholResult<F> {
pub l_dense: Vec<Vec<F>>,
pub perm: Vec<usize>,
pub perm_inv: Vec<usize>,
pub n: usize,
}
pub struct SparseCholeskySolver<F> {
result: Option<SparseCholResult<F>>,
}
impl<F: Float + NumAssign + Sum + SparseElement + Debug + 'static> SparseCholeskySolver<F> {
pub fn new() -> Self {
Self { result: None }
}
pub fn factorization(&self) -> Option<&SparseCholResult<F>> {
self.result.as_ref()
}
}
impl<F: Float + NumAssign + Sum + SparseElement + Debug + 'static> Default
for SparseCholeskySolver<F>
{
fn default() -> Self {
Self::new()
}
}
impl<F: Float + NumAssign + Sum + SparseElement + Debug + 'static> SparseSolver<F>
for SparseCholeskySolver<F>
{
fn factorize(&mut self, matrix: &CsrMatrix<F>) -> SparseResult<()> {
let n = matrix.rows();
if n != matrix.cols() {
return Err(SparseError::ValueError(
"Cholesky requires a square matrix".to_string(),
));
}
if n == 0 {
self.result = Some(SparseCholResult {
l_dense: Vec::new(),
perm: Vec::new(),
perm_inv: Vec::new(),
n: 0,
});
return Ok(());
}
let perm = amd_ordering(matrix)?;
let perm_inv = inverse_perm(&perm);
let mut b_dense = vec![vec![F::sparse_zero(); n]; n];
for i in 0..n {
let orig_row = perm[i];
let range = i_row_range(matrix, orig_row);
for idx in range {
let orig_col = matrix.indices[idx];
let j = perm_inv[orig_col];
b_dense[i][j] += matrix.data[idx];
}
}
let mut l = vec![vec![F::sparse_zero(); n]; n];
for i in 0..n {
for j in 0..=i {
let mut sum = b_dense[i][j];
for k in 0..j {
sum -= l[i][k] * l[j][k];
}
if i == j {
if sum <= F::sparse_zero() {
return Err(SparseError::ValueError(format!(
"Matrix is not positive definite: non-positive diagonal at row {i}"
)));
}
l[i][j] = sum.sqrt();
} else {
let ljj = l[j][j];
if ljj.abs() < F::epsilon() {
return Err(SparseError::SingularMatrix(format!(
"Zero diagonal in L at row {j}"
)));
}
l[i][j] = sum / ljj;
}
}
}
self.result = Some(SparseCholResult {
l_dense: l,
perm,
perm_inv,
n,
});
Ok(())
}
fn solve(&self, b: &[F]) -> SparseResult<Vec<F>> {
let res = self.result.as_ref().ok_or_else(|| {
SparseError::ValueError("Cholesky factorization not computed".to_string())
})?;
let n = res.n;
if b.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: b.len(),
});
}
if n == 0 {
return Ok(Vec::new());
}
let mut y = vec![F::sparse_zero(); n];
for i in 0..n {
y[i] = b[res.perm[i]];
}
for i in 0..n {
for j in 0..i {
y[i] = y[i] - res.l_dense[i][j] * y[j];
}
let d = res.l_dense[i][i];
if d.abs() < F::epsilon() {
return Err(SparseError::SingularMatrix(
"Zero diagonal in L during solve".to_string(),
));
}
y[i] /= d;
}
for i in (0..n).rev() {
for j in (i + 1)..n {
y[i] = y[i] - res.l_dense[j][i] * y[j];
}
let d = res.l_dense[i][i];
if d.abs() < F::epsilon() {
return Err(SparseError::SingularMatrix(
"Zero diagonal in L^T during solve".to_string(),
));
}
y[i] /= d;
}
let mut x = vec![F::sparse_zero(); n];
for i in 0..n {
x[res.perm[i]] = y[i];
}
Ok(x)
}
}
#[derive(Debug, Clone)]
pub struct SparseLuResult<F> {
pub lu_dense: Vec<Vec<F>>,
pub row_perm: Vec<usize>,
pub col_perm: Vec<usize>,
pub n: usize,
}
pub struct SparseLuSolver<F> {
result: Option<SparseLuResult<F>>,
}
impl<F: Float + NumAssign + Sum + SparseElement + Debug + 'static> SparseLuSolver<F> {
pub fn new() -> Self {
Self { result: None }
}
pub fn factorization(&self) -> Option<&SparseLuResult<F>> {
self.result.as_ref()
}
}
impl<F: Float + NumAssign + Sum + SparseElement + Debug + 'static> Default for SparseLuSolver<F> {
fn default() -> Self {
Self::new()
}
}
impl<F: Float + NumAssign + Sum + SparseElement + Debug + 'static> SparseSolver<F>
for SparseLuSolver<F>
{
fn factorize(&mut self, matrix: &CsrMatrix<F>) -> SparseResult<()> {
let n = matrix.rows();
if n != matrix.cols() {
return Err(SparseError::ValueError(
"LU requires a square matrix".to_string(),
));
}
if n == 0 {
self.result = Some(SparseLuResult {
lu_dense: Vec::new(),
row_perm: Vec::new(),
col_perm: Vec::new(),
n: 0,
});
return Ok(());
}
let col_perm = amd_ordering(matrix)?;
let col_perm_inv = inverse_perm(&col_perm);
let mut a = vec![vec![F::sparse_zero(); n]; n];
for i in 0..n {
let range = i_row_range(matrix, i);
for idx in range {
let orig_col = matrix.indices[idx];
let j = col_perm_inv[orig_col];
a[i][j] += matrix.data[idx];
}
}
let mut row_perm: Vec<usize> = (0..n).collect();
for k in 0..n {
let mut max_abs = F::sparse_zero();
let mut pivot = k;
for i in k..n {
if a[i][k].abs() > max_abs {
max_abs = a[i][k].abs();
pivot = i;
}
}
if pivot != k {
a.swap(k, pivot);
row_perm.swap(k, pivot);
}
let akk = a[k][k];
if akk.abs() < F::epsilon() {
continue; }
for i in (k + 1)..n {
let lik = a[i][k] / akk;
a[i][k] = lik; for j in (k + 1)..n {
let ukj = a[k][j];
a[i][j] -= lik * ukj;
}
}
}
self.result = Some(SparseLuResult {
lu_dense: a,
row_perm,
col_perm,
n,
});
Ok(())
}
fn solve(&self, b: &[F]) -> SparseResult<Vec<F>> {
let res = self
.result
.as_ref()
.ok_or_else(|| SparseError::ValueError("LU factorization not computed".to_string()))?;
let n = res.n;
if b.len() != n {
return Err(SparseError::DimensionMismatch {
expected: n,
found: b.len(),
});
}
if n == 0 {
return Ok(Vec::new());
}
let mut x = vec![F::sparse_zero(); n];
for i in 0..n {
x[i] = b[res.row_perm[i]];
}
for i in 0..n {
for j in 0..i {
x[i] = x[i] - res.lu_dense[i][j] * x[j];
}
}
for i in (0..n).rev() {
for j in (i + 1)..n {
x[i] = x[i] - res.lu_dense[i][j] * x[j];
}
let d = res.lu_dense[i][i];
if d.abs() < F::epsilon() {
return Err(SparseError::SingularMatrix(format!(
"Zero diagonal in U at row {i}"
)));
}
x[i] /= d;
}
let mut result = vec![F::sparse_zero(); n];
for j in 0..n {
result[res.col_perm[j]] = x[j];
}
Ok(result)
}
}
pub fn sparse_lu_solve<F>(matrix: &CsrMatrix<F>, b: &[F]) -> SparseResult<Vec<F>>
where
F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
{
let mut solver = SparseLuSolver::new();
solver.factorize(matrix)?;
solver.solve(b)
}
pub fn sparse_cholesky_solve<F>(matrix: &CsrMatrix<F>, b: &[F]) -> SparseResult<Vec<F>>
where
F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
{
let mut solver = SparseCholeskySolver::new();
solver.factorize(matrix)?;
solver.solve(b)
}
fn i_row_range<F: SparseElement + Clone + Copy + scirs2_core::numeric::Zero + PartialEq>(
matrix: &CsrMatrix<F>,
row: usize,
) -> std::ops::Range<usize> {
if row >= matrix.rows() {
return 0..0;
}
matrix.indptr[row]..matrix.indptr[row + 1]
}
#[cfg(test)]
mod tests {
use super::*;
fn create_spd_3x3() -> CsrMatrix<f64> {
let rows = vec![0, 0, 1, 1, 1, 2, 2];
let cols = vec![0, 1, 0, 1, 2, 1, 2];
let data = vec![4.0, 1.0, 1.0, 5.0, 2.0, 2.0, 6.0];
CsrMatrix::new(data, rows, cols, (3, 3)).expect("Failed to create SPD matrix")
}
fn create_general_3x3() -> CsrMatrix<f64> {
let rows = vec![0, 0, 0, 1, 1, 1, 2, 2, 2];
let cols = vec![0, 1, 2, 0, 1, 2, 0, 1, 2];
let data = vec![3.0, 1.0, 2.0, 1.0, 4.0, 1.0, 0.0, 1.0, 5.0];
CsrMatrix::new(data, rows, cols, (3, 3)).expect("Failed to create matrix")
}
fn create_spd_4x4() -> CsrMatrix<f64> {
let rows = vec![0, 0, 1, 1, 1, 2, 2, 2, 3, 3];
let cols = vec![0, 1, 0, 1, 2, 1, 2, 3, 2, 3];
let data = vec![4.0, 1.0, 1.0, 4.0, 1.0, 1.0, 4.0, 1.0, 1.0, 4.0];
CsrMatrix::new(data, rows, cols, (4, 4)).expect("Failed to create SPD 4x4")
}
fn verify_solve(mat: &CsrMatrix<f64>, x: &[f64], b: &[f64], tol: f64) {
let dense = mat.to_dense();
let n = b.len();
for i in 0..n {
let mut row_sum = 0.0;
for j in 0..n {
row_sum += dense[i][j] * x[j];
}
assert!(
(row_sum - b[i]).abs() < tol,
"Row {i}: residual {}",
(row_sum - b[i]).abs()
);
}
}
#[test]
fn test_amd_ordering_basic() {
let mat = create_spd_3x3();
let perm = amd_ordering(&mat).expect("AMD failed");
assert_eq!(perm.len(), 3);
let mut sorted = perm.clone();
sorted.sort();
assert_eq!(sorted, vec![0, 1, 2]);
}
#[test]
fn test_amd_ordering_empty() {
let mat =
CsrMatrix::<f64>::new(vec![], vec![], vec![], (0, 0)).expect("Failed to create empty");
let perm = amd_ordering(&mat).expect("AMD failed on empty");
assert!(perm.is_empty());
}
#[test]
fn test_inverse_perm() {
let perm = vec![2, 0, 1];
let inv = inverse_perm(&perm);
assert_eq!(inv, vec![1, 2, 0]);
for i in 0..3 {
assert_eq!(perm[inv[i]], i);
}
}
#[test]
fn test_nested_dissection_basic() {
let mat = create_spd_4x4();
let perm = nested_dissection_ordering(&mat).expect("ND failed");
assert_eq!(perm.len(), 4);
let mut sorted = perm.clone();
sorted.sort();
assert_eq!(sorted, vec![0, 1, 2, 3]);
}
#[test]
fn test_elimination_tree() {
let mat = create_spd_3x3();
let perm: Vec<usize> = (0..3).collect();
let etree = elimination_tree(&mat, &perm);
assert_eq!(etree.len(), 3);
}
#[test]
fn test_cholesky_solve_3x3() {
let mat = create_spd_3x3();
let b = vec![5.0, 8.0, 8.0];
let x = sparse_cholesky_solve(&mat, &b).expect("Cholesky solve failed");
assert_eq!(x.len(), 3);
for (i, &xi) in x.iter().enumerate() {
assert!((xi - 1.0).abs() < 1e-10, "x[{i}] = {xi}, expected 1.0");
}
}
#[test]
fn test_cholesky_solve_4x4() {
let mat = create_spd_4x4();
let b = vec![5.0, 6.0, 6.0, 5.0];
let x = sparse_cholesky_solve(&mat, &b).expect("Cholesky solve 4x4 failed");
verify_solve(&mat, &x, &b, 1e-10);
}
#[test]
fn test_cholesky_non_spd() {
let rows = vec![0, 1, 2];
let cols = vec![0, 1, 2];
let data = vec![-1.0, 1.0, 1.0];
let mat = CsrMatrix::new(data, rows, cols, (3, 3)).expect("Failed to create matrix");
let result = sparse_cholesky_solve(&mat, &[1.0, 1.0, 1.0]);
assert!(result.is_err());
}
#[test]
fn test_lu_solve_3x3() {
let mat = create_general_3x3();
let b = vec![6.0, 6.0, 6.0];
let x = sparse_lu_solve(&mat, &b).expect("LU solve failed");
verify_solve(&mat, &x, &b, 1e-9);
}
#[test]
fn test_lu_solve_identity() {
let rows = vec![0, 1, 2, 3];
let cols = vec![0, 1, 2, 3];
let data = vec![1.0, 1.0, 1.0, 1.0];
let mat = CsrMatrix::new(data, rows, cols, (4, 4)).expect("Failed to create identity");
let b = vec![1.0, 2.0, 3.0, 4.0];
let x = sparse_lu_solve(&mat, &b).expect("LU solve on identity failed");
for i in 0..4 {
assert!(
(x[i] - b[i]).abs() < 1e-12,
"x[{i}] = {}, expected {}",
x[i],
b[i]
);
}
}
#[test]
fn test_lu_solve_multi() {
let mat = create_general_3x3();
let mut solver = SparseLuSolver::new();
solver.factorize(&mat).expect("LU factorize failed");
let b1 = vec![6.0, 6.0, 6.0];
let b2 = vec![3.0, 1.0, 2.0];
let results = solver
.solve_multi(&[b1.clone(), b2.clone()])
.expect("Solve multi failed");
verify_solve(&mat, &results[0], &b1, 1e-9);
verify_solve(&mat, &results[1], &b2, 1e-9);
}
#[test]
fn test_cholesky_solver_trait() {
let mat = create_spd_3x3();
let mut solver = SparseCholeskySolver::new();
solver.factorize(&mat).expect("Factorize failed");
assert!(solver.factorization().is_some());
let b = vec![5.0, 8.0, 8.0];
let x = solver.solve(&b).expect("Solve failed");
for (i, xi) in x.iter().enumerate() {
assert!((xi - 1.0).abs() < 1e-10, "x[{i}] = {xi}");
}
}
#[test]
fn test_lu_empty_matrix() {
let mat =
CsrMatrix::<f64>::new(vec![], vec![], vec![], (0, 0)).expect("Failed to create empty");
let mut solver = SparseLuSolver::new();
solver
.factorize(&mat)
.expect("LU factorize on empty failed");
let x = solver.solve(&[]).expect("LU solve on empty failed");
assert!(x.is_empty());
}
#[test]
fn test_cholesky_dimension_mismatch() {
let mat = create_spd_3x3();
let mut solver = SparseCholeskySolver::new();
solver.factorize(&mat).expect("Factorize failed");
let result = solver.solve(&[1.0, 2.0]);
assert!(result.is_err());
}
#[test]
fn test_lu_solve_5x5_diag_dominant() {
let mut rows = Vec::new();
let mut cols = Vec::new();
let mut data = Vec::new();
for i in 0..5 {
for j in 0..5 {
if i == j {
rows.push(i);
cols.push(j);
data.push(10.0);
} else if (i as isize - j as isize).unsigned_abs() <= 1 {
rows.push(i);
cols.push(j);
data.push(1.0);
}
}
}
let mat = CsrMatrix::new(data, rows, cols, (5, 5)).expect("Failed to create 5x5");
let b = vec![12.0, 12.0, 12.0, 12.0, 12.0];
let x = sparse_lu_solve(&mat, &b).expect("LU 5x5 failed");
verify_solve(&mat, &x, &b, 1e-8);
}
#[test]
fn test_symbolic_cholesky() {
let mat = create_spd_3x3();
let perm: Vec<usize> = (0..3).collect();
let analysis = symbolic_cholesky(&mat, &perm).expect("Symbolic Cholesky failed");
assert_eq!(analysis.n, 3);
assert_eq!(analysis.l_colptr.len(), 4);
assert!(analysis.l_colptr[3] >= 3);
}
#[test]
fn test_lu_non_square_error() {
let rows = vec![0, 1];
let cols = vec![0, 0];
let data = vec![1.0, 2.0];
let mat = CsrMatrix::new(data, rows, cols, (2, 3)).expect("Failed to create non-square");
let result = sparse_lu_solve(&mat, &[1.0, 2.0]);
assert!(result.is_err());
}
#[test]
fn test_cholesky_non_square_error() {
let rows = vec![0, 1, 2];
let cols = vec![0, 0, 0];
let data = vec![1.0, 2.0, 3.0];
let mat = CsrMatrix::new(data, rows, cols, (3, 4)).expect("Failed to create non-square");
let result = sparse_cholesky_solve(&mat, &[1.0, 2.0, 3.0]);
assert!(result.is_err());
}
#[test]
fn test_lu_solve_with_zeros() {
let rows = vec![0, 0, 1, 2, 2];
let cols = vec![0, 2, 1, 0, 2];
let data = vec![2.0, 1.0, 3.0, 1.0, 4.0];
let mat = CsrMatrix::new(data, rows, cols, (3, 3)).expect("Failed");
let b = vec![3.0, 3.0, 5.0];
let x = sparse_lu_solve(&mat, &b).expect("LU solve sparse matrix failed");
verify_solve(&mat, &x, &b, 1e-9);
}
#[test]
fn test_amd_non_square_error() {
let rows = vec![0];
let cols = vec![0];
let data = vec![1.0];
let mat = CsrMatrix::new(data, rows, cols, (2, 3)).expect("Failed");
let result = amd_ordering(&mat);
assert!(result.is_err());
}
}