use crate::csr::CsrMatrix;
use crate::error::{SparseError, SparseResult};
use scirs2_core::numeric::{Float, NumAssign, SparseElement, Zero};
use std::fmt::Debug;
use std::iter::Sum;
#[derive(Debug, Clone)]
pub struct SparsityPattern {
pub nrows: usize,
pub ncols: usize,
pub nnz: usize,
pub density: f64,
pub max_row_nnz: usize,
pub min_row_nnz: usize,
pub mean_row_nnz: f64,
pub std_row_nnz: f64,
pub bandwidth: usize,
pub profile: usize,
pub zero_diag_count: usize,
pub is_structurally_symmetric: bool,
}
impl SparsityPattern {
pub fn analyze<T>(a: &CsrMatrix<T>) -> Self
where
T: Clone + Copy + SparseElement + Zero + Debug,
{
let (nrows, ncols) = a.shape();
let nnz = a.nnz();
let row_lengths: Vec<usize> = (0..nrows)
.map(|r| a.indptr[r + 1] - a.indptr[r])
.collect();
let max_row_nnz = row_lengths.iter().copied().max().unwrap_or(0);
let min_row_nnz = row_lengths.iter().copied().min().unwrap_or(0);
let mean_row_nnz = if nrows > 0 {
nnz as f64 / nrows as f64
} else {
0.0
};
let std_row_nnz = if nrows > 1 {
let var: f64 = row_lengths
.iter()
.map(|&l| {
let diff = l as f64 - mean_row_nnz;
diff * diff
})
.sum::<f64>()
/ (nrows - 1) as f64;
var.sqrt()
} else {
0.0
};
let mut bandwidth = 0usize;
let mut profile = 0usize;
let mut zero_diag_count = 0usize;
let mut has_diag = vec![false; nrows.min(ncols)];
for row in 0..nrows {
let mut leftmost = row;
let mut rightmost = row;
for j in a.indptr[row]..a.indptr[row + 1] {
let col = a.indices[j];
if col < leftmost {
leftmost = col;
}
if col > rightmost {
rightmost = col;
}
if col == row && row < ncols {
has_diag[row] = true;
}
}
let bw = if rightmost > row {
rightmost - row
} else {
row - leftmost
};
if bw > bandwidth {
bandwidth = bw;
}
profile += row - leftmost;
}
for i in 0..nrows.min(ncols) {
if !has_diag[i] {
zero_diag_count += 1;
}
}
let is_structurally_symmetric = if nrows != ncols {
false
} else {
let mut pairs: std::collections::HashSet<(usize, usize)> =
std::collections::HashSet::with_capacity(nnz);
for row in 0..nrows {
for j in a.indptr[row]..a.indptr[row + 1] {
pairs.insert((row, a.indices[j]));
}
}
pairs.iter().all(|&(r, c)| pairs.contains(&(c, r)))
};
let density = if nrows > 0 && ncols > 0 {
nnz as f64 / (nrows as f64 * ncols as f64)
} else {
0.0
};
SparsityPattern {
nrows,
ncols,
nnz,
density,
max_row_nnz,
min_row_nnz,
mean_row_nnz,
std_row_nnz,
bandwidth,
profile,
zero_diag_count,
is_structurally_symmetric,
}
}
pub fn estimate_iluk_fill(&self, fill_levels: usize) -> usize {
let base = self.nnz;
let growth = (1.5_f64).powi(fill_levels as i32);
(base as f64 * growth) as usize
}
}
#[derive(Debug, Clone)]
pub struct ConditionNumberEstimate {
pub kappa: f64,
pub sigma_max: f64,
pub sigma_min: f64,
pub iterations: usize,
pub converged: bool,
}
impl ConditionNumberEstimate {
pub fn estimate(a: &CsrMatrix<f64>, max_iter: usize, tol: f64) -> SparseResult<Self> {
let (n, m) = a.shape();
if n != m {
return Err(SparseError::ValueError(
"Condition number estimate requires a square matrix".to_string(),
));
}
let (sigma_max_sq, iter_max, conv_max) = power_iteration_ata(a, n, max_iter, tol)?;
let sigma_max = sigma_max_sq.sqrt();
let (sigma_min_sq, iter_min, conv_min) = smallest_eigenvalue_ata(a, n, sigma_max_sq, max_iter, tol)?;
let sigma_min = sigma_min_sq.max(0.0).sqrt();
let kappa = if sigma_min > 1e-300 {
sigma_max / sigma_min
} else {
f64::INFINITY
};
Ok(Self {
kappa,
sigma_max,
sigma_min,
iterations: iter_max.max(iter_min),
converged: conv_max && conv_min,
})
}
}
fn power_iteration_ata(
a: &CsrMatrix<f64>,
n: usize,
max_iter: usize,
tol: f64,
) -> SparseResult<(f64, usize, bool)> {
let mut v: Vec<f64> = (0..n).map(|i| ((i * 7 + 3) % 17) as f64 + 1.0).collect();
normalize_inplace(&mut v);
let mut lambda = 0.0f64;
let mut converged = false;
for iter in 0..max_iter {
let w = csr_spmv(a, &v);
let u = csr_spmv_t(a, &w, n);
let new_lambda = dot(&u, &v);
let diff = (new_lambda - lambda).abs();
let mut u_norm = u;
normalize_inplace(&mut u_norm);
v = u_norm;
lambda = new_lambda;
if diff < tol * (lambda.abs() + 1e-300) {
return Ok((lambda, iter + 1, true));
}
converged = false;
}
let _ = converged;
Ok((lambda.max(0.0), max_iter, false))
}
fn smallest_eigenvalue_ata(
a: &CsrMatrix<f64>,
n: usize,
lambda_max: f64,
max_iter: usize,
tol: f64,
) -> SparseResult<(f64, usize, bool)> {
if lambda_max < 1e-300 {
return Ok((0.0, 0, true));
}
let mut v: Vec<f64> = (0..n)
.map(|i| (((i * 13 + 5) % 23) as f64 + 0.5) * (if i % 2 == 0 { 1.0 } else { -1.0 }))
.collect();
normalize_inplace(&mut v);
let mut min_rq = f64::INFINITY;
let mut converged = false;
for iter in 0..max_iter {
let w = csr_spmv(a, &v);
let u = csr_spmv_t(a, &w, n);
let rq = dot(&u, &v); min_rq = min_rq.min(rq);
let deflated: Vec<f64> = v
.iter()
.zip(u.iter())
.map(|(&vi, &ui)| lambda_max * vi - ui)
.collect();
let deflated_norm_val = dot(&deflated, &deflated).sqrt();
if deflated_norm_val < tol * (lambda_max + 1e-300) {
converged = true;
return Ok((min_rq.max(0.0), iter + 1, converged));
}
v = deflated.iter().map(|&d| d / deflated_norm_val).collect();
let new_rq = {
let w2 = csr_spmv(a, &v);
let u2 = csr_spmv_t(a, &w2, n);
dot(&u2, &v)
};
min_rq = min_rq.min(new_rq);
let lambda_candidate = lambda_max - dot(&deflated.iter().map(|d| d / deflated_norm_val).collect::<Vec<_>>(), &v).abs() * deflated_norm_val;
let _ = lambda_candidate;
if (rq - new_rq).abs() < tol * (lambda_max + 1e-300) && iter > 2 {
converged = true;
return Ok((min_rq.max(0.0), iter + 1, converged));
}
}
Ok((min_rq.max(0.0), max_iter, converged))
}
#[derive(Debug, Clone)]
pub struct SpectralRadius {
pub rho: f64,
pub eigenvector: Vec<f64>,
pub iterations: usize,
pub converged: bool,
}
impl SpectralRadius {
pub fn estimate(a: &CsrMatrix<f64>, max_iter: usize, tol: f64) -> SparseResult<Self> {
let (n, m) = a.shape();
if n != m {
return Err(SparseError::ValueError(
"SpectralRadius requires a square matrix".to_string(),
));
}
let mut v: Vec<f64> = (0..n).map(|i| ((i * 11 + 1) % 13) as f64 + 1.0).collect();
normalize_inplace(&mut v);
let mut rho = 0.0f64;
let mut converged = false;
for iter in 0..max_iter {
let u = csr_spmv(a, &v);
let new_rho = dot(&u, &v);
let norm_u = norm2(&u);
let magnitude = norm_u;
let diff = (magnitude - rho).abs();
let mut u_norm = u;
if norm_u > 1e-300 {
for x in u_norm.iter_mut() {
*x /= norm_u;
}
}
v = u_norm;
rho = magnitude;
let _ = new_rho;
if diff < tol * (rho + 1e-300) {
converged = true;
return Ok(Self {
rho,
eigenvector: v,
iterations: iter + 1,
converged,
});
}
}
Ok(Self {
rho,
eigenvector: v,
iterations: max_iter,
converged,
})
}
}
#[derive(Debug, Clone)]
pub struct MatrixDiagnosis {
pub is_square: bool,
pub is_structurally_symmetric: bool,
pub is_numerically_symmetric: bool,
pub has_positive_diagonal: bool,
pub is_row_diagonally_dominant: bool,
pub gershgorin_positive_definite: bool,
pub spectral_radius_estimate: f64,
pub min_abs_diagonal: f64,
pub max_abs_diagonal: f64,
pub negative_diag_count: usize,
pub zero_diag_count: usize,
pub antisymmetric_norm: f64,
}
impl MatrixDiagnosis {
pub fn diagnose(a: &CsrMatrix<f64>, sym_tol: f64) -> SparseResult<Self> {
let (n, m) = a.shape();
let is_square = n == m;
let pat = SparsityPattern::analyze(a);
let is_structurally_symmetric = pat.is_structurally_symmetric;
let mut antisym_fro_sq = 0.0f64;
let mut is_numerically_symmetric = is_structurally_symmetric;
if is_square {
for row in 0..n {
for j in a.indptr[row]..a.indptr[row + 1] {
let col = a.indices[j];
let a_ij = a.data[j];
let a_ji = a.get(col, row);
let diff = (a_ij - a_ji) / 2.0;
antisym_fro_sq += diff * diff;
if (a_ij - a_ji).abs() > sym_tol {
is_numerically_symmetric = false;
}
}
}
}
let mut min_abs_diag = f64::INFINITY;
let mut max_abs_diag = 0.0f64;
let mut neg_diag = 0usize;
let mut zero_diag = 0usize;
let mut has_positive_diagonal = true;
let mut is_dd = true;
let mut gershgorin_pd = true;
for row in 0..n.min(m) {
let diag = a.get(row, row);
let abs_diag = diag.abs();
if abs_diag < min_abs_diag {
min_abs_diag = abs_diag;
}
if abs_diag > max_abs_diag {
max_abs_diag = abs_diag;
}
if diag <= 0.0 {
has_positive_diagonal = false;
}
if diag < 0.0 {
neg_diag += 1;
}
if diag == 0.0 {
zero_diag += 1;
}
let off_diag_sum: f64 = (a.indptr[row]..a.indptr[row + 1])
.filter(|&j| a.indices[j] != row)
.map(|j| a.data[j].abs())
.sum();
if diag < off_diag_sum {
is_dd = false;
}
if diag - off_diag_sum <= 0.0 {
gershgorin_pd = false;
}
}
if n == 0 {
min_abs_diag = 0.0;
}
let sr = SpectralRadius::estimate(a, 20, 1e-4).map(|r| r.rho).unwrap_or(0.0);
Ok(Self {
is_square,
is_structurally_symmetric,
is_numerically_symmetric,
has_positive_diagonal,
is_row_diagonally_dominant: is_dd,
gershgorin_positive_definite: gershgorin_pd,
spectral_radius_estimate: sr,
min_abs_diagonal: min_abs_diag,
max_abs_diagonal: max_abs_diag,
negative_diag_count: neg_diag,
zero_diag_count: zero_diag,
antisymmetric_norm: antisym_fro_sq.sqrt(),
})
}
}
#[derive(Debug, Clone)]
pub struct FillReductionEntry {
pub ordering: String,
pub estimated_fill: usize,
pub bandwidth: usize,
pub profile: usize,
pub fill_ratio: f64,
}
#[derive(Debug, Clone)]
pub struct FillReductionAnalysis {
pub original_nnz: usize,
pub entries: Vec<FillReductionEntry>,
}
impl FillReductionAnalysis {
pub fn analyze(a: &CsrMatrix<f64>) -> SparseResult<Self> {
use crate::ordering::{CuthillMcKee, MinimumDegree, NaturalOrdering};
let (n, _) = a.shape();
let original_nnz = a.nnz();
let mut entries = Vec::new();
let analyze_ordering = |label: &str,
perm: &Vec<usize>,
inv_perm: &Vec<usize>|
-> SparseResult<FillReductionEntry> {
let mut row_idx: Vec<usize> = Vec::with_capacity(original_nnz);
let mut col_idx: Vec<usize> = Vec::with_capacity(original_nnz);
let mut data: Vec<f64> = Vec::with_capacity(original_nnz);
for new_row in 0..n {
let old_row = perm[new_row];
for j in a.indptr[old_row]..a.indptr[old_row + 1] {
let old_col = a.indices[j];
let new_col = inv_perm[old_col];
row_idx.push(new_row);
col_idx.push(new_col);
data.push(a.data[j]);
}
}
let a_perm = CsrMatrix::new(data, row_idx, col_idx, (n, n))?;
let pat = SparsityPattern::analyze(&a_perm);
let ilu0_fill = original_nnz; let fill_ratio = ilu0_fill as f64 / original_nnz.max(1) as f64;
Ok(FillReductionEntry {
ordering: label.to_string(),
estimated_fill: ilu0_fill,
bandwidth: pat.bandwidth,
profile: pat.profile,
fill_ratio,
})
};
let nat = NaturalOrdering::compute(a)?;
entries.push(analyze_ordering("Natural", &nat.perm, &nat.inv_perm)?);
let rcm = CuthillMcKee::compute(a)?;
let rcm_entry = FillReductionEntry {
ordering: "RCM".to_string(),
estimated_fill: original_nnz, bandwidth: rcm.bandwidth_after,
profile: rcm.profile_after,
fill_ratio: 1.0,
};
entries.push(rcm_entry);
let amd = MinimumDegree::compute(a)?;
let amd_entry = FillReductionEntry {
ordering: "AMD".to_string(),
estimated_fill: original_nnz,
bandwidth: amd.bandwidth_after,
profile: amd.profile_after,
fill_ratio: 1.0,
};
entries.push(amd_entry);
Ok(Self {
original_nnz,
entries,
})
}
pub fn best_bandwidth(&self) -> Option<&FillReductionEntry> {
self.entries.iter().min_by_key(|e| e.bandwidth)
}
pub fn best_profile(&self) -> Option<&FillReductionEntry> {
self.entries.iter().min_by_key(|e| e.profile)
}
}
fn csr_spmv(a: &CsrMatrix<f64>, x: &[f64]) -> Vec<f64> {
let (m, _) = a.shape();
let mut y = vec![0.0f64; m];
for row in 0..m {
for j in a.indptr[row]..a.indptr[row + 1] {
y[row] += a.data[j] * x[a.indices[j]];
}
}
y
}
fn csr_spmv_t(a: &CsrMatrix<f64>, x: &[f64], ncols: usize) -> Vec<f64> {
let (m, _) = a.shape();
let mut y = vec![0.0f64; ncols];
for row in 0..m {
let xi = x[row];
for j in a.indptr[row]..a.indptr[row + 1] {
y[a.indices[j]] += a.data[j] * xi;
}
}
y
}
fn dot(a: &[f64], b: &[f64]) -> f64 {
a.iter().zip(b.iter()).map(|(&x, &y)| x * y).sum()
}
fn norm2(v: &[f64]) -> f64 {
dot(v, v).sqrt()
}
fn normalize_inplace(v: &mut Vec<f64>) {
let n = norm2(v);
if n > 1e-300 {
for x in v.iter_mut() {
*x /= n;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
fn tridiag(n: usize) -> CsrMatrix<f64> {
let mut rows = Vec::new();
let mut cols = Vec::new();
let mut vals = Vec::new();
for i in 0..n {
rows.push(i);
cols.push(i);
vals.push(2.0);
if i > 0 {
rows.push(i);
cols.push(i - 1);
vals.push(-1.0);
}
if i + 1 < n {
rows.push(i);
cols.push(i + 1);
vals.push(-1.0);
}
}
CsrMatrix::new(vals, rows, cols, (n, n)).expect("tridiag")
}
#[test]
fn test_sparsity_pattern() {
let a = tridiag(5);
let pat = SparsityPattern::analyze(&a);
assert_eq!(pat.nrows, 5);
assert_eq!(pat.ncols, 5);
assert_eq!(pat.nnz, 13); assert_eq!(pat.max_row_nnz, 3);
assert_eq!(pat.min_row_nnz, 2);
assert_eq!(pat.bandwidth, 1);
assert_eq!(pat.zero_diag_count, 0);
assert!(pat.is_structurally_symmetric);
}
#[test]
fn test_condition_number_estimate_identity() {
let n = 5;
let rows: Vec<usize> = (0..n).collect();
let cols: Vec<usize> = (0..n).collect();
let vals = vec![1.0f64; n];
let id = CsrMatrix::new(vals, rows, cols, (n, n)).expect("identity");
let est = ConditionNumberEstimate::estimate(&id, 100, 1e-8).expect("cond est");
assert_relative_eq!(est.sigma_max, 1.0, epsilon = 0.1);
assert!(est.kappa < 10.0, "cond number too large: {}", est.kappa);
}
#[test]
fn test_spectral_radius_diag() {
let n = 5;
let rows: Vec<usize> = (0..n).collect();
let cols: Vec<usize> = (0..n).collect();
let vals: Vec<f64> = (1..=n).map(|i| i as f64).collect();
let d = CsrMatrix::new(vals, rows, cols, (n, n)).expect("diag");
let sr = SpectralRadius::estimate(&d, 200, 1e-8).expect("sr");
assert_relative_eq!(sr.rho, 5.0, epsilon = 0.1);
}
#[test]
fn test_matrix_diagnosis_tridiag() {
let a = tridiag(5);
let diag = MatrixDiagnosis::diagnose(&a, 1e-12).expect("diagnose");
assert!(diag.is_square);
assert!(diag.is_structurally_symmetric);
assert!(diag.is_numerically_symmetric);
assert!(diag.has_positive_diagonal);
assert!(diag.is_row_diagonally_dominant);
assert!(diag.gershgorin_positive_definite);
assert_eq!(diag.zero_diag_count, 0);
assert_eq!(diag.negative_diag_count, 0);
}
#[test]
fn test_fill_reduction_analysis() {
let a = tridiag(8);
let analysis = FillReductionAnalysis::analyze(&a).expect("fill analysis");
assert_eq!(analysis.entries.len(), 3); assert!(analysis.best_bandwidth().is_some());
assert!(analysis.best_profile().is_some());
}
}