use super::{Inertia, KktMatrix, LinearSolver, SolverError};
use faer::Index;
use faer::sparse::linalg::cholesky::{
factorize_symbolic_cholesky, LdltRef, SymbolicCholesky,
};
use faer::linalg::cholesky::ldlt_diagonal::compute::LdltRegularization;
use faer::Side;
use faer::dyn_stack::{PodStack, GlobalPodBuffer};
use faer::Parallelism;
use faer::Mat;
use faer::Conj;
pub struct SparseLdl {
n: usize,
symbolic: Option<SymbolicCholesky<usize>>,
l_values: Vec<f64>,
factored: bool,
zero_pivot_tol: f64,
force_supernodal: bool,
csc: Option<faer::sparse::SparseColMat<usize, f64>>,
coo_to_csc: Vec<usize>,
factor_buf: Option<GlobalPodBuffer>,
solve_buf: Option<GlobalPodBuffer>,
}
impl Default for SparseLdl {
fn default() -> Self {
Self::new()
}
}
impl SparseLdl {
pub fn new() -> Self {
Self {
n: 0,
symbolic: None,
l_values: Vec::new(),
factored: false,
zero_pivot_tol: 1e-12,
force_supernodal: false,
csc: None,
coo_to_csc: Vec::new(),
factor_buf: None,
solve_buf: None,
}
}
fn compute_inertia(&self) -> Inertia {
let symbolic = self.symbolic.as_ref().unwrap();
let mut positive = 0;
let mut negative = 0;
let mut zero = 0;
match symbolic.raw() {
faer::sparse::linalg::cholesky::SymbolicCholeskyRaw::Simplicial(ref s) => {
let col_ptrs = s.col_ptrs();
for j in 0..self.n {
let d_j = self.l_values[col_ptrs[j]];
if d_j > self.zero_pivot_tol {
positive += 1;
} else if d_j < -self.zero_pivot_tol {
negative += 1;
} else {
zero += 1;
}
}
}
faer::sparse::linalg::cholesky::SymbolicCholeskyRaw::Supernodal(ref s) => {
let n_supernodes = s.n_supernodes();
let sn_begin = s.supernode_begin();
let sn_end_slice = s.supernode_end();
let col_ptrs_ri = s.col_ptrs_for_row_indices();
let col_ptrs_val = s.col_ptrs_for_values();
for sn in 0..n_supernodes {
let sn_start = sn_begin[sn].zx();
let sn_end = sn_end_slice[sn].zx();
let sn_ncols = sn_end - sn_start;
let pattern_len = col_ptrs_ri[sn + 1].zx() - col_ptrs_ri[sn].zx();
let sn_nrows = pattern_len + sn_ncols;
let val_start = col_ptrs_val[sn].zx();
for j in 0..sn_ncols {
let d_j = self.l_values[val_start + j * sn_nrows + j];
if d_j > self.zero_pivot_tol {
positive += 1;
} else if d_j < -self.zero_pivot_tol {
negative += 1;
} else {
zero += 1;
}
}
}
}
}
Inertia {
positive,
negative,
zero,
}
}
fn build_coo_to_csc_mapping(
triplet_rows: &[usize],
triplet_cols: &[usize],
csc: &faer::sparse::SparseColMat<usize, f64>,
) -> Vec<usize> {
let nnz_coo = triplet_rows.len();
let mut mapping = Vec::with_capacity(nnz_coo);
let sym = csc.symbolic();
let col_ptrs = sym.col_ptrs();
let row_indices = sym.row_indices();
for k in 0..nnz_coo {
let row = triplet_rows[k];
let col = triplet_cols[k];
let col_start = col_ptrs[col];
let col_end = col_ptrs[col + 1];
let slice = &row_indices[col_start..col_end];
let pos = slice.binary_search(&row)
.unwrap_or_else(|_| panic!(
"COO entry ({}, {}) not found in CSC structure", row, col
));
mapping.push(col_start + pos);
}
mapping
}
fn scatter_coo_to_csc(
coo_to_csc: &[usize],
triplet_vals: &[f64],
csc_values: &mut [f64],
) {
for v in csc_values.iter_mut() {
*v = 0.0;
}
for (k, &val) in triplet_vals.iter().enumerate() {
csc_values[coo_to_csc[k]] += val;
}
}
}
impl SparseLdl {
#[cfg(test)]
fn force_supernodal(&mut self) {
self.force_supernodal = true;
}
}
impl LinearSolver for SparseLdl {
fn factor(&mut self, matrix: &KktMatrix) -> Result<Option<Inertia>, SolverError> {
let sparse = match matrix {
KktMatrix::Sparse(s) => s,
KktMatrix::Dense(_) => {
return Err(SolverError::NumericalFailure(
"SparseLdl requires KktMatrix::Sparse".into(),
))
}
};
self.n = sparse.n;
if self.n == 0 {
self.factored = true;
return Ok(Some(Inertia {
positive: 0,
negative: 0,
zero: 0,
}));
}
let first_call = self.symbolic.is_none();
if first_call {
let triplets: Vec<(usize, usize, f64)> = sparse.triplet_rows.iter()
.zip(sparse.triplet_cols.iter())
.zip(sparse.triplet_vals.iter())
.map(|((&r, &c), &v)| (r, c, v))
.collect();
let csc = faer::sparse::SparseColMat::<usize, f64>::try_new_from_triplets(
self.n, self.n, &triplets,
).map_err(|e| SolverError::NumericalFailure(format!("CSC conversion: {:?}", e)))?;
self.coo_to_csc = Self::build_coo_to_csc_mapping(
&sparse.triplet_rows,
&sparse.triplet_cols,
&csc,
);
use faer::sparse::linalg::cholesky::CholeskySymbolicParams;
use faer::sparse::linalg::SupernodalThreshold;
let threshold = if self.force_supernodal || self.n >= 500 {
SupernodalThreshold::FORCE_SUPERNODAL
} else {
SupernodalThreshold::AUTO
};
let symbolic = factorize_symbolic_cholesky(
csc.symbolic(),
Side::Upper,
Default::default(), CholeskySymbolicParams {
supernodal_flop_ratio_threshold: threshold,
..Default::default()
},
)
.map_err(|e| SolverError::NumericalFailure(format!("symbolic factorization: {:?}", e)))?;
self.l_values = vec![0.0; symbolic.len_values()];
let par = Parallelism::Rayon(0);
let factor_req = symbolic
.factorize_numeric_ldlt_req::<f64>(false, par)
.map_err(|e| SolverError::NumericalFailure(format!("stack req: {:?}", e)))?;
self.factor_buf = Some(GlobalPodBuffer::new(factor_req));
let solve_req = symbolic
.solve_in_place_req::<f64>(1)
.map_err(|e| SolverError::NumericalFailure(format!("solve stack req: {:?}", e)))?;
self.solve_buf = Some(GlobalPodBuffer::new(solve_req));
self.symbolic = Some(symbolic);
self.csc = Some(csc);
} else {
let new_nnz = sparse.triplet_rows.len();
if new_nnz > self.coo_to_csc.len() {
let extra_mapping = Self::build_coo_to_csc_mapping(
&sparse.triplet_rows[self.coo_to_csc.len()..],
&sparse.triplet_cols[self.coo_to_csc.len()..],
self.csc.as_ref().unwrap(),
);
self.coo_to_csc.extend(extra_mapping);
}
let csc = self.csc.as_mut().unwrap();
let csc_values = csc.values_mut();
Self::scatter_coo_to_csc(&self.coo_to_csc, &sparse.triplet_vals, csc_values);
}
let csc = self.csc.as_ref().unwrap();
let symbolic = self.symbolic.as_ref().unwrap();
let par = Parallelism::Rayon(0);
let reg = LdltRegularization {
dynamic_regularization_signs: None,
dynamic_regularization_delta: 0.0,
dynamic_regularization_epsilon: 0.0,
};
let stack = PodStack::new(self.factor_buf.as_mut().unwrap());
let _ldlt = symbolic.factorize_numeric_ldlt::<f64>(
&mut self.l_values,
csc.as_ref(),
Side::Upper,
reg,
par,
stack,
);
self.factored = true;
let inertia = self.compute_inertia();
Ok(Some(inertia))
}
fn solve(&mut self, rhs: &[f64], solution: &mut [f64]) -> Result<(), SolverError> {
if !self.factored {
return Err(SolverError::NumericalFailure(
"matrix not factored".to_string(),
));
}
if rhs.len() != self.n || solution.len() != self.n {
return Err(SolverError::DimensionMismatch {
expected: self.n,
got: rhs.len(),
});
}
if self.n == 0 {
return Ok(());
}
let symbolic = self.symbolic.as_ref().unwrap();
let ldlt = LdltRef::new(symbolic, self.l_values.as_slice());
let mut rhs_mat = Mat::<f64>::from_fn(self.n, 1, |i, _| rhs[i]);
let stack = PodStack::new(self.solve_buf.as_mut().unwrap());
ldlt.solve_in_place_with_conj(Conj::No, rhs_mat.as_mut(), Parallelism::Rayon(0), stack);
for i in 0..self.n {
solution[i] = rhs_mat[(i, 0)];
}
Ok(())
}
fn provides_inertia(&self) -> bool {
true
}
fn min_diagonal(&self) -> Option<f64> {
if !self.factored || self.n == 0 {
return None;
}
let symbolic = self.symbolic.as_ref()?;
let mut min_d = f64::INFINITY;
match symbolic.raw() {
faer::sparse::linalg::cholesky::SymbolicCholeskyRaw::Simplicial(ref s) => {
let col_ptrs = s.col_ptrs();
for j in 0..self.n {
min_d = min_d.min(self.l_values[col_ptrs[j]]);
}
}
faer::sparse::linalg::cholesky::SymbolicCholeskyRaw::Supernodal(ref s) => {
let n_supernodes = s.n_supernodes();
let sn_begin = s.supernode_begin();
let sn_end_slice = s.supernode_end();
let col_ptrs_ri = s.col_ptrs_for_row_indices();
let col_ptrs_val = s.col_ptrs_for_values();
for sn in 0..n_supernodes {
let sn_start = sn_begin[sn].zx();
let sn_end = sn_end_slice[sn].zx();
let sn_ncols = sn_end - sn_start;
let pattern_len = col_ptrs_ri[sn + 1].zx() - col_ptrs_ri[sn].zx();
let sn_nrows = pattern_len + sn_ncols;
let val_start = col_ptrs_val[sn].zx();
for j in 0..sn_ncols {
min_d = min_d.min(self.l_values[val_start + j * sn_nrows + j]);
}
}
}
}
Some(min_d)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linear_solver::SparseSymmetricMatrix;
#[test]
fn test_sparse_ldl_positive_definite() {
let mut s = SparseSymmetricMatrix::zeros(3);
s.add(0, 0, 4.0);
s.add(1, 0, 2.0);
s.add(1, 1, 5.0);
s.add(2, 0, 1.0);
s.add(2, 1, 3.0);
s.add(2, 2, 6.0);
let matrix = KktMatrix::Sparse(s);
let mut solver = SparseLdl::new();
let inertia = solver.factor(&matrix).unwrap().unwrap();
assert_eq!(inertia.positive, 3, "Expected 3 positive, got {:?}", inertia);
assert_eq!(inertia.negative, 0);
assert_eq!(inertia.zero, 0);
let rhs = [1.0, 2.0, 3.0];
let mut sol = [0.0; 3];
solver.solve(&rhs, &mut sol).unwrap();
let mut ax = [0.0; 3];
matrix.matvec(&sol, &mut ax);
for i in 0..3 {
assert!(
(ax[i] - rhs[i]).abs() < 1e-10,
"Row {}: Ax={}, b={}",
i, ax[i], rhs[i]
);
}
}
#[test]
fn test_sparse_ldl_kkt_matrix() {
let mut s = SparseSymmetricMatrix::zeros(3);
s.add(0, 0, 2.0);
s.add(1, 1, 2.0);
s.add(2, 0, 1.0);
s.add(2, 1, 1.0);
let matrix = KktMatrix::Sparse(s);
let mut solver = SparseLdl::new();
let inertia = solver.factor(&matrix).unwrap().unwrap();
assert_eq!(inertia.positive, 2, "Expected 2 positive, got {:?}", inertia);
assert_eq!(inertia.negative, 1, "Expected 1 negative, got {:?}", inertia);
assert_eq!(inertia.zero, 0);
let rhs = [1.0, 2.0, 3.0];
let mut sol = [0.0; 3];
solver.solve(&rhs, &mut sol).unwrap();
let mut ax = [0.0; 3];
matrix.matvec(&sol, &mut ax);
for i in 0..3 {
assert!(
(ax[i] - rhs[i]).abs() < 1e-10,
"Row {}: Ax={}, b={}",
i, ax[i], rhs[i]
);
}
}
#[test]
fn test_sparse_ldl_identity() {
let mut s = SparseSymmetricMatrix::zeros(4);
for i in 0..4 {
s.add(i, i, 1.0);
}
let matrix = KktMatrix::Sparse(s);
let mut solver = SparseLdl::new();
let inertia = solver.factor(&matrix).unwrap().unwrap();
assert_eq!(inertia.positive, 4);
let rhs = [1.0, 2.0, 3.0, 4.0];
let mut sol = [0.0; 4];
solver.solve(&rhs, &mut sol).unwrap();
for i in 0..4 {
assert!((sol[i] - rhs[i]).abs() < 1e-12);
}
}
#[test]
fn test_sparse_dense_inertia_match() {
use crate::linear_solver::dense::DenseLdl;
use crate::linear_solver::SymmetricMatrix;
let mut dense = SymmetricMatrix::zeros(3);
let mut sparse = SparseSymmetricMatrix::zeros(3);
let entries = [(0, 0, 2.0), (1, 1, 2.0), (2, 0, 1.0), (2, 1, 1.0)];
for &(i, j, v) in &entries {
dense.set(i, j, v);
sparse.add(i, j, v);
}
let mut dense_solver = DenseLdl::new();
let dense_inertia = dense_solver
.factor(&KktMatrix::Dense(dense))
.unwrap()
.unwrap();
let mut sparse_solver = SparseLdl::new();
let sparse_inertia = sparse_solver
.factor(&KktMatrix::Sparse(sparse))
.unwrap()
.unwrap();
assert_eq!(
dense_inertia, sparse_inertia,
"Dense inertia {:?} != Sparse inertia {:?}",
dense_inertia, sparse_inertia
);
}
#[test]
fn test_supernodal_kkt_matrix() {
let n = 5;
let m = 3;
let dim = n + m;
let mut s = SparseSymmetricMatrix::zeros(dim);
for i in 0..n {
s.add(i, i, 10.0 + i as f64);
}
s.add(1, 0, 1.0);
s.add(2, 0, 0.5);
s.add(2, 1, 0.5);
s.add(n, 0, 1.0);
s.add(n, 1, 2.0);
s.add(n + 1, 1, 1.0);
s.add(n + 1, 2, 3.0);
s.add(n + 2, 3, 1.0);
s.add(n + 2, 4, 1.0);
for i in n..dim {
s.add(i, i, -1e-8);
}
let matrix = KktMatrix::Sparse(s.clone());
let mut simp_solver = SparseLdl::new();
let simp_inertia = simp_solver.factor(&KktMatrix::Sparse(s)).unwrap().unwrap();
let mut solver = SparseLdl::new();
solver.force_supernodal();
let inertia = solver.factor(&matrix).unwrap().unwrap();
assert_eq!(inertia, simp_inertia,
"Supernodal {:?} != Simplicial {:?}", inertia, simp_inertia);
let rhs: Vec<f64> = (1..=dim).map(|i| i as f64).collect();
let mut sol = vec![0.0; dim];
solver.solve(&rhs, &mut sol).unwrap();
let mut ax = vec![0.0; dim];
matrix.matvec(&sol, &mut ax);
for i in 0..dim {
assert!(
(ax[i] - rhs[i]).abs() < 1e-4,
"Row {}: Ax={}, b={}, err={}",
i, ax[i], rhs[i], (ax[i] - rhs[i]).abs()
);
}
}
#[test]
fn test_supernodal_positive_definite() {
let n = 20;
let mut s = SparseSymmetricMatrix::zeros(n);
for i in 0..n {
s.add(i, i, 4.0);
if i > 0 { s.add(i, i - 1, -1.0); }
if i > 1 { s.add(i, i - 2, -0.5); }
}
let matrix = KktMatrix::Sparse(s);
let mut solver = SparseLdl::new();
solver.force_supernodal();
let inertia = solver.factor(&matrix).unwrap().unwrap();
assert_eq!(inertia.positive, n);
assert_eq!(inertia.negative, 0);
assert_eq!(inertia.zero, 0);
let rhs: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
let mut sol = vec![0.0; n];
solver.solve(&rhs, &mut sol).unwrap();
let mut ax = vec![0.0; n];
matrix.matvec(&sol, &mut ax);
for i in 0..n {
assert!(
(ax[i] - rhs[i]).abs() < 1e-8,
"Row {}: Ax={}, b={}",
i, ax[i], rhs[i]
);
}
}
#[test]
fn test_cached_refactorization() {
let mut s = SparseSymmetricMatrix::zeros(3);
s.add(0, 0, 4.0);
s.add(1, 0, 2.0);
s.add(1, 1, 5.0);
s.add(2, 0, 1.0);
s.add(2, 1, 3.0);
s.add(2, 2, 6.0);
let matrix1 = KktMatrix::Sparse(s);
let mut solver = SparseLdl::new();
solver.factor(&matrix1).unwrap();
let rhs = [1.0, 2.0, 3.0];
let mut sol1 = [0.0; 3];
solver.solve(&rhs, &mut sol1).unwrap();
let mut s2 = SparseSymmetricMatrix::zeros(3);
s2.add(0, 0, 10.0);
s2.add(1, 0, 1.0);
s2.add(1, 1, 10.0);
s2.add(2, 0, 0.5);
s2.add(2, 1, 1.0);
s2.add(2, 2, 10.0);
let matrix2 = KktMatrix::Sparse(s2.clone());
solver.factor(&matrix2).unwrap();
let mut sol2 = [0.0; 3];
solver.solve(&rhs, &mut sol2).unwrap();
let mut ax = [0.0; 3];
matrix2.matvec(&sol2, &mut ax);
for i in 0..3 {
assert!(
(ax[i] - rhs[i]).abs() < 1e-10,
"Row {}: Ax={}, b={}",
i, ax[i], rhs[i]
);
}
assert!((sol1[0] - sol2[0]).abs() > 1e-6, "Solutions should differ");
}
}