Skip to main content

oxiphysics_core/sparse/
functions.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5#![allow(clippy::needless_range_loop)]
6use super::types::{CsrMatrix, SparseLu};
7
8/// Return the n×n identity matrix in CSR format.
9pub fn identity_csr(n: usize) -> CsrMatrix {
10    let rows: Vec<usize> = (0..n).collect();
11    let cols: Vec<usize> = (0..n).collect();
12    let vals: Vec<f64> = vec![1.0; n];
13    CsrMatrix::from_triplets(n, n, &rows, &cols, &vals)
14}
15/// 1-D finite-difference Laplacian on `n` interior nodes.
16///
17/// Stencil: `(-1, 2, -1) / dx²`
18pub fn laplacian_1d(n: usize, dx: f64) -> CsrMatrix {
19    let inv_dx2 = 1.0 / (dx * dx);
20    let mut rows = Vec::new();
21    let mut cols = Vec::new();
22    let mut vals = Vec::new();
23    for i in 0..n {
24        rows.push(i);
25        cols.push(i);
26        vals.push(2.0 * inv_dx2);
27        if i > 0 {
28            rows.push(i);
29            cols.push(i - 1);
30            vals.push(-inv_dx2);
31        }
32        if i + 1 < n {
33            rows.push(i);
34            cols.push(i + 1);
35            vals.push(-inv_dx2);
36        }
37    }
38    CsrMatrix::from_triplets(n, n, &rows, &cols, &vals)
39}
40/// 2-D finite-difference Laplacian on an `nx × ny` grid (5-point stencil).
41///
42/// Grid ordering: node `(i, j)` maps to global index `i * ny + j`.
43pub fn laplacian_2d(nx: usize, ny: usize, dx: f64) -> CsrMatrix {
44    let n = nx * ny;
45    let inv_dx2 = 1.0 / (dx * dx);
46    let mut rows = Vec::new();
47    let mut cols = Vec::new();
48    let mut vals = Vec::new();
49    for i in 0..nx {
50        for j in 0..ny {
51            let node = i * ny + j;
52            rows.push(node);
53            cols.push(node);
54            vals.push(4.0 * inv_dx2);
55            if j > 0 {
56                rows.push(node);
57                cols.push(node - 1);
58                vals.push(-inv_dx2);
59            }
60            if j + 1 < ny {
61                rows.push(node);
62                cols.push(node + 1);
63                vals.push(-inv_dx2);
64            }
65            if i > 0 {
66                rows.push(node);
67                cols.push(node - ny);
68                vals.push(-inv_dx2);
69            }
70            if i + 1 < nx {
71                rows.push(node);
72                cols.push(node + ny);
73                vals.push(-inv_dx2);
74            }
75        }
76    }
77    CsrMatrix::from_triplets(n, n, &rows, &cols, &vals)
78}
79/// Conjugate Gradient solver for symmetric positive-definite systems `A x = b`.
80///
81/// Returns `(x, n_iter, residual_norm)`.
82pub fn cg_solve(
83    a: &CsrMatrix,
84    b: &[f64],
85    x0: &[f64],
86    tol: f64,
87    max_iter: usize,
88) -> (Vec<f64>, u32, f64) {
89    let n = b.len();
90    assert_eq!(a.nrows, n);
91    assert_eq!(a.ncols, n);
92    let mut x = x0.to_vec();
93    let ax0 = a.matvec(&x);
94    let mut r: Vec<f64> = b.iter().zip(ax0.iter()).map(|(bi, ai)| bi - ai).collect();
95    let mut p = r.clone();
96    let mut rr: f64 = r.iter().map(|ri| ri * ri).sum();
97    for iter in 0..max_iter {
98        let res_norm = rr.sqrt();
99        if res_norm < tol {
100            return (x, iter as u32, res_norm);
101        }
102        let ap = a.matvec(&p);
103        let pap: f64 = p.iter().zip(ap.iter()).map(|(pi, api)| pi * api).sum();
104        if pap.abs() < f64::EPSILON {
105            return (x, iter as u32, res_norm);
106        }
107        let alpha = rr / pap;
108        for i in 0..n {
109            x[i] += alpha * p[i];
110            r[i] -= alpha * ap[i];
111        }
112        let rr_new: f64 = r.iter().map(|ri| ri * ri).sum();
113        let beta = rr_new / rr;
114        for i in 0..n {
115            p[i] = r[i] + beta * p[i];
116        }
117        rr = rr_new;
118    }
119    let res_norm = rr.sqrt();
120    (x, max_iter as u32, res_norm)
121}
122/// Preconditioned Conjugate Gradient solver.
123///
124/// `precond[i]` = `1 / a_ii` (diagonal preconditioner).
125/// Returns `(x, n_iter, residual_norm)`.
126pub fn pcg_solve(
127    a: &CsrMatrix,
128    b: &[f64],
129    precond: &[f64],
130    tol: f64,
131    max_iter: usize,
132) -> (Vec<f64>, u32, f64) {
133    let n = b.len();
134    assert_eq!(a.nrows, n);
135    assert_eq!(a.ncols, n);
136    let mut x = vec![0.0f64; n];
137    let ax0 = a.matvec(&x);
138    let mut r: Vec<f64> = b.iter().zip(ax0.iter()).map(|(bi, ai)| bi - ai).collect();
139    let mut z: Vec<f64> = r
140        .iter()
141        .zip(precond.iter())
142        .map(|(ri, mi)| ri * mi)
143        .collect();
144    let mut p = z.clone();
145    let mut rz: f64 = r.iter().zip(z.iter()).map(|(ri, zi)| ri * zi).sum();
146    for iter in 0..max_iter {
147        let res_norm: f64 = r.iter().map(|ri| ri * ri).sum::<f64>().sqrt();
148        if res_norm < tol {
149            return (x, iter as u32, res_norm);
150        }
151        let ap = a.matvec(&p);
152        let pap: f64 = p.iter().zip(ap.iter()).map(|(pi, api)| pi * api).sum();
153        if pap.abs() < f64::EPSILON {
154            return (x, iter as u32, res_norm);
155        }
156        let alpha = rz / pap;
157        for i in 0..n {
158            x[i] += alpha * p[i];
159            r[i] -= alpha * ap[i];
160        }
161        z = r
162            .iter()
163            .zip(precond.iter())
164            .map(|(ri, mi)| ri * mi)
165            .collect();
166        let rz_new: f64 = r.iter().zip(z.iter()).map(|(ri, zi)| ri * zi).sum();
167        let beta = rz_new / rz;
168        for i in 0..n {
169            p[i] = z[i] + beta * p[i];
170        }
171        rz = rz_new;
172    }
173    let res_norm: f64 = r.iter().map(|ri| ri * ri).sum::<f64>().sqrt();
174    (x, max_iter as u32, res_norm)
175}
176/// Successive Over-Relaxation (SOR) solver. Use `omega = 1` for Gauss-Seidel.
177///
178/// Returns `(x, n_iter)`.
179pub fn sor_solve(
180    a: &CsrMatrix,
181    b: &[f64],
182    omega: f64,
183    tol: f64,
184    max_iter: usize,
185) -> (Vec<f64>, u32) {
186    let n = b.len();
187    assert_eq!(a.nrows, n);
188    assert_eq!(a.ncols, n);
189    let mut x = vec![0.0f64; n];
190    for iter in 0..max_iter {
191        let mut max_change = 0.0f64;
192        for i in 0..n {
193            let aii = a.get(i, i);
194            if aii.abs() < f64::EPSILON {
195                continue;
196            }
197            let mut sigma = 0.0;
198            for k in a.row_ptr[i]..a.row_ptr[i + 1] {
199                let j = a.col_idx[k];
200                if j != i {
201                    sigma += a.values[k] * x[j];
202                }
203            }
204            let x_new = (b[i] - sigma) / aii;
205            let delta = omega * (x_new - x[i]);
206            max_change = max_change.max(delta.abs());
207            x[i] += delta;
208        }
209        if max_change < tol {
210            return (x, iter as u32 + 1);
211        }
212    }
213    (x, max_iter as u32)
214}
215/// Jacobi iterative solver.
216///
217/// Returns `(x, n_iter)`.
218pub fn jacobi_solve(a: &CsrMatrix, b: &[f64], tol: f64, max_iter: usize) -> (Vec<f64>, u32) {
219    let n = b.len();
220    assert_eq!(a.nrows, n);
221    assert_eq!(a.ncols, n);
222    let mut x = vec![0.0f64; n];
223    for iter in 0..max_iter {
224        let mut x_new = vec![0.0f64; n];
225        let mut max_change = 0.0f64;
226        for i in 0..n {
227            let aii = a.get(i, i);
228            if aii.abs() < f64::EPSILON {
229                x_new[i] = x[i];
230                continue;
231            }
232            let mut sigma = 0.0;
233            for k in a.row_ptr[i]..a.row_ptr[i + 1] {
234                let j = a.col_idx[k];
235                if j != i {
236                    sigma += a.values[k] * x[j];
237                }
238            }
239            x_new[i] = (b[i] - sigma) / aii;
240            max_change = max_change.max((x_new[i] - x[i]).abs());
241        }
242        x = x_new;
243        if max_change < tol {
244            return (x, iter as u32 + 1);
245        }
246    }
247    (x, max_iter as u32)
248}
249/// Build a diagonal (Jacobi) preconditioner: returns `1 / a_ii` for each `i`.
250///
251/// Returns 1.0 where the diagonal is zero (to avoid division by zero).
252pub fn diagonal_preconditioner(a: &CsrMatrix) -> Vec<f64> {
253    let n = a.nrows.min(a.ncols);
254    let mut m = vec![1.0f64; a.nrows];
255    for i in 0..n {
256        let d = a.get(i, i);
257        if d.abs() > f64::EPSILON {
258            m[i] = 1.0 / d;
259        }
260    }
261    m
262}
263/// Compute the dominant eigenvalue and eigenvector of a sparse matrix
264/// using the power method.
265///
266/// Returns `(eigenvalue, eigenvector)` or `None` if not converged.
267pub fn sparse_eig_power(a: &CsrMatrix, tol: f64, max_iter: usize) -> Option<(f64, Vec<f64>)> {
268    let n = a.nrows;
269    assert_eq!(n, a.ncols, "Matrix must be square");
270    if n == 0 {
271        return None;
272    }
273    let mut v: Vec<f64> = vec![1.0 / (n as f64).sqrt(); n];
274    let mut eigenvalue = 0.0f64;
275    for _ in 0..max_iter {
276        let w = a.matvec(&v);
277        let norm: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
278        if norm < 1e-30 {
279            return None;
280        }
281        let lambda_new: f64 = v.iter().zip(w.iter()).map(|(vi, wi)| vi * wi).sum();
282        if (lambda_new - eigenvalue).abs() < tol {
283            return Some((lambda_new, v));
284        }
285        eigenvalue = lambda_new;
286        v = w.iter().map(|x| x / norm).collect();
287    }
288    Some((eigenvalue, v))
289}
290/// Compute the smallest eigenvalue of a sparse SPD matrix using inverse
291/// iteration (shift-and-invert with the ILU preconditioner).
292///
293/// Returns `(eigenvalue, eigenvector)` or `None` if not converged.
294pub fn sparse_eig_smallest(a: &CsrMatrix, tol: f64, max_iter: usize) -> Option<(f64, Vec<f64>)> {
295    let n = a.nrows;
296    assert_eq!(n, a.ncols, "Matrix must be square");
297    if n == 0 {
298        return None;
299    }
300    let lu = SparseLu::ilu0(a);
301    let mut v: Vec<f64> = vec![1.0 / (n as f64).sqrt(); n];
302    let mut eigenvalue = 0.0f64;
303    for _ in 0..max_iter {
304        let w = lu.solve(&v);
305        let norm: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
306        if norm < 1e-30 {
307            return None;
308        }
309        let rq_inv: f64 = v.iter().zip(w.iter()).map(|(vi, wi)| vi * wi).sum();
310        let lambda_new = if rq_inv.abs() < 1e-30 {
311            0.0
312        } else {
313            1.0 / rq_inv
314        };
315        if (lambda_new - eigenvalue).abs() < tol {
316            return Some((lambda_new, v));
317        }
318        eigenvalue = lambda_new;
319        v = w.iter().map(|x| x / norm).collect();
320    }
321    Some((eigenvalue, v))
322}
323/// Estimate the spectral radius (largest singular value approximation) via
324/// the power method applied to A^T A.
325///
326/// Returns the square root of the dominant eigenvalue of A^T A.
327pub fn spectral_radius_estimate(a: &CsrMatrix, tol: f64, max_iter: usize) -> f64 {
328    let at = a.transpose();
329    let ata_matvec = |v: &[f64]| -> Vec<f64> {
330        let w = a.matvec(v);
331        at.matvec(&w)
332    };
333    let n = a.ncols;
334    let mut v: Vec<f64> = vec![1.0 / (n as f64).sqrt(); n];
335    let mut eigenvalue = 0.0f64;
336    for _ in 0..max_iter {
337        let w = ata_matvec(&v);
338        let norm: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
339        if norm < 1e-30 {
340            break;
341        }
342        let lambda_new: f64 = v.iter().zip(w.iter()).map(|(vi, wi)| vi * wi).sum();
343        if (lambda_new - eigenvalue).abs() < tol {
344            return lambda_new.sqrt();
345        }
346        eigenvalue = lambda_new;
347        v = w.iter().map(|x| x / norm).collect();
348    }
349    eigenvalue.sqrt()
350}
351/// Sparse-sparse matrix multiply: C = A * B.
352///
353/// A is `m × k`, B is `k × n`, result C is `m × n`.
354/// Uses a dense accumulator row for each output row.
355pub fn spgemm(a: &CsrMatrix, b: &CsrMatrix) -> CsrMatrix {
356    assert_eq!(a.ncols, b.nrows, "spgemm: inner dimension mismatch");
357    let m = a.nrows;
358    let n = b.ncols;
359    let mut c_row_ptr = vec![0usize; m + 1];
360    let mut c_col_idx: Vec<usize> = Vec::new();
361    let mut c_values: Vec<f64> = Vec::new();
362    let mut accum = vec![0.0f64; n];
363    let mut marker = vec![usize::MAX; n];
364    for i in 0..m {
365        let mut touched: Vec<usize> = Vec::new();
366        for ka in a.row_ptr[i]..a.row_ptr[i + 1] {
367            let p = a.col_idx[ka];
368            let a_ip = a.values[ka];
369            for kb in b.row_ptr[p]..b.row_ptr[p + 1] {
370                let j = b.col_idx[kb];
371                accum[j] += a_ip * b.values[kb];
372                if marker[j] != i {
373                    marker[j] = i;
374                    touched.push(j);
375                }
376            }
377        }
378        touched.sort_unstable();
379        for j in &touched {
380            if accum[*j].abs() > 0.0 {
381                c_col_idx.push(*j);
382                c_values.push(accum[*j]);
383            }
384            accum[*j] = 0.0;
385        }
386        c_row_ptr[i + 1] = c_col_idx.len();
387    }
388    CsrMatrix {
389        nrows: m,
390        ncols: n,
391        row_ptr: c_row_ptr,
392        col_idx: c_col_idx,
393        values: c_values,
394    }
395}
396/// Convert COO (coordinate format) triplets directly to CSR.
397///
398/// Handles unsorted input; duplicate (row, col) entries are summed.
399/// This is a standalone function complementing `CsrMatrix::from_triplets`.
400pub fn coo_to_csr(
401    nrows: usize,
402    ncols: usize,
403    rows: &[usize],
404    cols: &[usize],
405    vals: &[f64],
406) -> CsrMatrix {
407    CsrMatrix::from_triplets(nrows, ncols, rows, cols, vals)
408}
409/// Arnoldi iteration: build an orthonormal Krylov basis for `A`.
410///
411/// Starting from initial vector `b0`, computes `k` basis vectors
412/// `Q = [q_0, q_1, ..., q_{k-1}]` (each of length `n`) and the
413/// upper Hessenberg matrix `H` (stored row-major as `Vec<Vec`f64`>`
414/// of size `(k+1) × k`).
415///
416/// The Arnoldi relation holds: A Q_k = Q_{k+1} H_{k+1,k}.
417///
418/// # Arguments
419/// * `a`  – input sparse matrix (n × n)
420/// * `b0` – starting vector (length n)
421/// * `k`  – number of Arnoldi steps to perform
422///
423/// # Returns
424/// `(Q, H)` where `Q[i]` is the i-th Krylov basis vector and
425/// `H[i][j]` is the Hessenberg coefficient.
426pub fn arnoldi(a: &CsrMatrix, b0: &[f64], k: usize) -> (Vec<Vec<f64>>, Vec<Vec<f64>>) {
427    let n = a.nrows;
428    assert_eq!(
429        b0.len(),
430        n,
431        "arnoldi: b0 length must equal matrix dimension"
432    );
433    let norm0: f64 = b0.iter().map(|x| x * x).sum::<f64>().sqrt();
434    assert!(norm0 > 1e-30, "arnoldi: starting vector is zero");
435    let mut q: Vec<Vec<f64>> = Vec::with_capacity(k + 1);
436    let mut h: Vec<Vec<f64>> = vec![vec![0.0; k]; k + 1];
437    q.push(b0.iter().map(|x| x / norm0).collect());
438    for j in 0..k {
439        if j >= q.len() {
440            break;
441        }
442        let mut w = a.matvec(&q[j]);
443        for i in 0..=j {
444            let hij: f64 = w.iter().zip(q[i].iter()).map(|(wi, qi)| wi * qi).sum();
445            h[i][j] = hij;
446            for (wl, qi) in w.iter_mut().zip(q[i].iter()) {
447                *wl -= hij * qi;
448            }
449        }
450        let beta: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
451        h[j + 1][j] = beta;
452        if beta > 1e-14 {
453            q.push(w.iter().map(|x| x / beta).collect());
454        } else {
455            break;
456        }
457    }
458    let q_len = q.len();
459    h.truncate(q_len);
460    (q, h)
461}
462/// Diagonal (Jacobi) scaling: return D^{-1} A where D = diag(A).
463///
464/// Rows with zero diagonal are left unchanged (scale factor = 1).
465/// This is the simplest preconditioner and is used to equilibrate the
466/// system before iterative solves.
467pub fn jacobi_scale(a: &CsrMatrix) -> CsrMatrix {
468    let diag = a.diagonal();
469    let mut scaled = CsrMatrix {
470        nrows: a.nrows,
471        ncols: a.ncols,
472        row_ptr: a.row_ptr.clone(),
473        col_idx: a.col_idx.clone(),
474        values: a.values.clone(),
475    };
476    for i in 0..a.nrows {
477        let d = diag[i];
478        let scale = if d.abs() > 1e-30 { 1.0 / d } else { 1.0 };
479        for k in a.row_ptr[i]..a.row_ptr[i + 1] {
480            scaled.values[k] *= scale;
481        }
482    }
483    scaled
484}
485/// Biconjugate Gradient Stabilised (BICGStab) solver for non-symmetric systems A x = b.
486///
487/// Returns `(solution, converged, iterations)`.
488#[allow(dead_code)]
489pub fn bicgstab_solve(
490    a: &CsrMatrix,
491    b: &[f64],
492    x0: Option<&[f64]>,
493    tol: f64,
494    max_iter: usize,
495) -> (Vec<f64>, bool, usize) {
496    let n = a.nrows;
497    assert_eq!(b.len(), n);
498    let mut x: Vec<f64> = match x0 {
499        Some(v) => v.to_vec(),
500        None => vec![0.0; n],
501    };
502    let ax = a.matvec(&x);
503    let mut r: Vec<f64> = b.iter().zip(ax.iter()).map(|(bi, ai)| bi - ai).collect();
504    let r_hat = r.clone();
505    let mut rho_prev = 1.0f64;
506    let mut alpha = 1.0f64;
507    let mut omega = 1.0f64;
508    let mut p = vec![0.0f64; n];
509    let mut v = vec![0.0f64; n];
510    let b_norm: f64 = b.iter().map(|x| x * x).sum::<f64>().sqrt().max(1e-30);
511    for iter in 0..max_iter {
512        let rho = r_hat.iter().zip(r.iter()).map(|(a, b)| a * b).sum::<f64>();
513        if rho.abs() < 1e-300 {
514            break;
515        }
516        if iter == 0 {
517            p = r.clone();
518        } else {
519            let beta = (rho / rho_prev) * (alpha / omega);
520            for i in 0..n {
521                p[i] = r[i] + beta * (p[i] - omega * v[i]);
522            }
523        }
524        v = a.matvec(&p);
525        let r_hat_v: f64 = r_hat.iter().zip(v.iter()).map(|(a, b)| a * b).sum();
526        if r_hat_v.abs() < 1e-300 {
527            break;
528        }
529        alpha = rho / r_hat_v;
530        let s: Vec<f64> = r
531            .iter()
532            .zip(v.iter())
533            .map(|(ri, vi)| ri - alpha * vi)
534            .collect();
535        let s_norm: f64 = s.iter().map(|x| x * x).sum::<f64>().sqrt();
536        if s_norm / b_norm < tol {
537            for i in 0..n {
538                x[i] += alpha * p[i];
539            }
540            return (x, true, iter + 1);
541        }
542        let t = a.matvec(&s);
543        let t_dot_s: f64 = t.iter().zip(s.iter()).map(|(a, b)| a * b).sum();
544        let t_dot_t: f64 = t.iter().map(|x| x * x).sum();
545        omega = if t_dot_t < 1e-300 {
546            0.0
547        } else {
548            t_dot_s / t_dot_t
549        };
550        for i in 0..n {
551            x[i] += alpha * p[i] + omega * s[i];
552        }
553        for i in 0..n {
554            r[i] = s[i] - omega * t[i];
555        }
556        let r_norm: f64 = r.iter().map(|x| x * x).sum::<f64>().sqrt();
557        if r_norm / b_norm < tol {
558            return (x, true, iter + 1);
559        }
560        if omega.abs() < 1e-300 {
561            break;
562        }
563        rho_prev = rho;
564    }
565    let r_final = {
566        let ax2 = a.matvec(&x);
567        let res: Vec<f64> = b.iter().zip(ax2.iter()).map(|(bi, ai)| bi - ai).collect();
568        res.iter().map(|x| x * x).sum::<f64>().sqrt()
569    };
570    (x, r_final / b_norm < tol, max_iter)
571}
572/// Minimum Residual (MINRES) method for symmetric (possibly indefinite) systems A x = b.
573///
574/// Returns `(solution, converged, iterations)`.
575#[allow(dead_code)]
576#[allow(unused_assignments)]
577pub fn minres_solve(
578    a: &CsrMatrix,
579    b: &[f64],
580    tol: f64,
581    max_iter: usize,
582) -> (Vec<f64>, bool, usize) {
583    let n = a.nrows;
584    assert_eq!(b.len(), n);
585    let mut x = vec![0.0f64; n];
586    let mut r = b.to_vec();
587    let b_norm: f64 = b.iter().map(|x| x * x).sum::<f64>().sqrt().max(1e-30);
588    let mut v_old = vec![0.0f64; n];
589    let mut beta_old = 0.0f64;
590    let mut beta: f64 = r.iter().map(|x| x * x).sum::<f64>().sqrt();
591    if beta < 1e-30 {
592        return (x, true, 0);
593    }
594    let mut v = r.iter().map(|x| x / beta).collect::<Vec<f64>>();
595    let mut d = vec![0.0f64; n];
596    let mut d_old = vec![0.0f64; n];
597    let mut phi_bar = beta;
598    let mut c_old = 1.0f64;
599    let mut s_old = 0.0f64;
600    let mut c = 1.0f64;
601    let mut s = 0.0f64;
602    for iter in 0..max_iter {
603        let av = a.matvec(&v);
604        let alpha: f64 = v.iter().zip(av.iter()).map(|(vi, avi)| vi * avi).sum();
605        let mut v_new: Vec<f64> = av
606            .iter()
607            .enumerate()
608            .map(|(i, &avi)| avi - alpha * v[i] - beta * v_old[i])
609            .collect();
610        let beta_new: f64 = v_new.iter().map(|x| x * x).sum::<f64>().sqrt();
611        if beta_new > 1e-30 {
612            for vi in &mut v_new {
613                *vi /= beta_new;
614            }
615        }
616        let eps = s_old * beta;
617        let delta = c_old * c * beta + s * alpha;
618        let gamma_bar = -s_old * s * beta + c * alpha - c_old * delta * s_old;
619        let rhs = phi_bar * c;
620        let phi = phi_bar * s;
621        let _ = eps;
622        let _ = delta;
623        let _ = gamma_bar;
624        let _ = rhs;
625        let _ = phi;
626        let gamma: f64 = (c * alpha + s * beta_new).hypot(beta_new);
627        if gamma < 1e-30 {
628            break;
629        }
630        let c_new = (c * alpha + s * beta_new) / gamma;
631        let s_new = beta_new / gamma;
632        let d_new: Vec<f64> = v
633            .iter()
634            .enumerate()
635            .map(|(i, &vi)| (vi - c * d[i] - s * d_old[i]) / gamma)
636            .collect();
637        let eta = c_new * phi_bar;
638        for i in 0..n {
639            x[i] += eta * d_new[i];
640        }
641        phi_bar *= s_new;
642        if phi_bar / b_norm < tol {
643            return (x, true, iter + 1);
644        }
645        let _ = c_old;
646        let _ = s_old;
647        c_old = c;
648        s_old = s;
649        c = c_new;
650        s = s_new;
651        d_old = d;
652        d = d_new;
653        v_old = v;
654        v = v_new;
655        beta_old = beta;
656        beta = beta_new;
657        let _ = beta_old;
658        r = b
659            .iter()
660            .zip(a.matvec(&x).iter())
661            .map(|(bi, ai)| bi - ai)
662            .collect();
663        let r_norm: f64 = r.iter().map(|x| x * x).sum::<f64>().sqrt();
664        if r_norm / b_norm < tol {
665            return (x, true, iter + 1);
666        }
667    }
668    (x, false, max_iter)
669}
670/// Gauss-Seidel iterative solver for A x = b.
671///
672/// Requires A to have non-zero diagonal entries.
673/// Returns `(solution, iterations_performed)`.
674#[allow(dead_code)]
675pub fn gauss_seidel_solve(
676    a: &CsrMatrix,
677    b: &[f64],
678    tol: f64,
679    max_iter: usize,
680) -> (Vec<f64>, usize) {
681    let n = a.nrows;
682    assert_eq!(b.len(), n);
683    let mut x = vec![0.0f64; n];
684    for iter in 0..max_iter {
685        let mut max_change = 0.0f64;
686        for i in 0..n {
687            let diag = a.get(i, i);
688            assert!(diag.abs() > 1e-30, "Gauss-Seidel: zero diagonal at row {i}");
689            let row_sum: f64 = (a.row_ptr[i]..a.row_ptr[i + 1])
690                .map(|k| {
691                    let j = a.col_idx[k];
692                    if j != i { a.values[k] * x[j] } else { 0.0 }
693                })
694                .sum();
695            let x_new = (b[i] - row_sum) / diag;
696            max_change = max_change.max((x_new - x[i]).abs());
697            x[i] = x_new;
698        }
699        if max_change < tol {
700            return (x, iter + 1);
701        }
702    }
703    (x, max_iter)
704}
705/// Forward substitution: solve L x = b where L is lower triangular (CSR).
706#[allow(dead_code)]
707pub fn forward_substitution(l: &CsrMatrix, b: &[f64]) -> Vec<f64> {
708    let n = l.nrows;
709    assert_eq!(b.len(), n);
710    let mut x = vec![0.0f64; n];
711    for i in 0..n {
712        let mut sum = b[i];
713        let diag = l.get(i, i);
714        for k in l.row_ptr[i]..l.row_ptr[i + 1] {
715            let j = l.col_idx[k];
716            if j < i {
717                sum -= l.values[k] * x[j];
718            }
719        }
720        assert!(
721            diag.abs() > 1e-30,
722            "forward_substitution: zero diagonal at row {i}"
723        );
724        x[i] = sum / diag;
725    }
726    x
727}
728/// Backward substitution: solve U x = b where U is upper triangular (CSR).
729#[allow(dead_code)]
730pub fn backward_substitution(u: &CsrMatrix, b: &[f64]) -> Vec<f64> {
731    let n = u.nrows;
732    assert_eq!(b.len(), n);
733    let mut x = vec![0.0f64; n];
734    for i in (0..n).rev() {
735        let mut sum = b[i];
736        let diag = u.get(i, i);
737        for k in u.row_ptr[i]..u.row_ptr[i + 1] {
738            let j = u.col_idx[k];
739            if j > i {
740                sum -= u.values[k] * x[j];
741            }
742        }
743        assert!(
744            diag.abs() > 1e-30,
745            "backward_substitution: zero diagonal at row {i}"
746        );
747        x[i] = sum / diag;
748    }
749    x
750}
751/// Build a CSR matrix from a banded stencil.
752///
753/// `diagonals[k]` is the value at offset `offsets[k]` (0 = main diagonal,
754/// positive = superdiagonal, negative = subdiagonal).
755#[allow(dead_code)]
756pub fn banded_matrix(n: usize, offsets: &[i64], diagonals: &[f64]) -> CsrMatrix {
757    assert_eq!(offsets.len(), diagonals.len());
758    let mut rows = Vec::new();
759    let mut cols = Vec::new();
760    let mut vals = Vec::new();
761    for (k, &off) in offsets.iter().enumerate() {
762        for i in 0..n {
763            let j = i as i64 + off;
764            if j >= 0 && (j as usize) < n {
765                rows.push(i);
766                cols.push(j as usize);
767                vals.push(diagonals[k]);
768            }
769        }
770    }
771    CsrMatrix::from_triplets(n, n, &rows, &cols, &vals)
772}
773/// Row-scale a CSR matrix: D_r A where D_r = diag(scales).
774#[allow(dead_code)]
775pub fn row_scale(a: &CsrMatrix, scales: &[f64]) -> CsrMatrix {
776    assert_eq!(scales.len(), a.nrows);
777    let values: Vec<f64> = (0..a.nrows)
778        .flat_map(|i| {
779            let s = scales[i];
780            (a.row_ptr[i]..a.row_ptr[i + 1]).map(move |k| a.values[k] * s)
781        })
782        .collect();
783    CsrMatrix {
784        nrows: a.nrows,
785        ncols: a.ncols,
786        row_ptr: a.row_ptr.clone(),
787        col_idx: a.col_idx.clone(),
788        values,
789    }
790}
791/// Column-scale a CSR matrix: A D_c where D_c = diag(scales).
792#[allow(dead_code)]
793pub fn col_scale(a: &CsrMatrix, scales: &[f64]) -> CsrMatrix {
794    assert_eq!(scales.len(), a.ncols);
795    let values: Vec<f64> = a
796        .col_idx
797        .iter()
798        .zip(a.values.iter())
799        .map(|(&j, &v)| v * scales[j])
800        .collect();
801    CsrMatrix {
802        nrows: a.nrows,
803        ncols: a.ncols,
804        row_ptr: a.row_ptr.clone(),
805        col_idx: a.col_idx.clone(),
806        values,
807    }
808}
809/// Two-sided equilibration scaling: compute row/col scale vectors so that
810/// the scaled matrix has unit diagonal (i.e., sqrt(|A_ii|) scaling).
811#[allow(dead_code)]
812pub fn equilibration_scales(a: &CsrMatrix) -> (Vec<f64>, Vec<f64>) {
813    let n = a.nrows.max(a.ncols);
814    let mut row_scales = vec![1.0f64; a.nrows];
815    let mut col_scales = vec![1.0f64; a.ncols];
816    for i in 0..a.nrows {
817        let d = a.get(i, i).abs();
818        if d > 1e-30 {
819            row_scales[i] = 1.0 / d.sqrt();
820        }
821    }
822    for j in 0..a.ncols {
823        let d = a.get(j.min(a.nrows - 1), j).abs();
824        if d > 1e-30 {
825            col_scales[j] = 1.0 / d.sqrt();
826        }
827    }
828    let _ = n;
829    (row_scales, col_scales)
830}
831/// Solve A X = B for multiple right-hand sides simultaneously using CG.
832///
833/// Each column of `b_cols` is a separate RHS; returns a `Vec<Vec`f64`>` of solutions.
834#[allow(dead_code)]
835pub fn multi_rhs_cg(
836    a: &CsrMatrix,
837    b_cols: &[Vec<f64>],
838    tol: f64,
839    max_iter: usize,
840) -> Vec<Vec<f64>> {
841    b_cols
842        .iter()
843        .map(|b| {
844            let x0 = vec![0.0f64; b.len()];
845            let (x, _iters, _res) = cg_solve(a, b, &x0, tol, max_iter);
846            x
847        })
848        .collect()
849}
850/// Solve the saddle-point system:
851///   \[ A   B^T \] \[ x \]   \[ f \]
852///   \[ B   0   \] \[ p \] = \[ g \]
853///
854/// Uses a Schur complement approach with CG on the pressure sub-system.
855///
856/// # Arguments
857/// * `a`        – (n×n) SPD matrix
858/// * `bt`       – (n×m) matrix B^T
859/// * `b`        – (m×n) matrix B
860/// * `f`        – RHS vector of length n
861/// * `g`        – RHS vector of length m
862/// * `tol`      – solver tolerance
863/// * `max_iter` – max iterations
864#[allow(dead_code)]
865#[allow(clippy::too_many_arguments)]
866pub fn saddle_point_solve(
867    a: &CsrMatrix,
868    bt: &CsrMatrix,
869    b: &CsrMatrix,
870    f: &[f64],
871    g: &[f64],
872    tol: f64,
873    max_iter: usize,
874) -> (Vec<f64>, Vec<f64>) {
875    let n = a.nrows;
876    let m = b.nrows;
877    assert_eq!(f.len(), n);
878    assert_eq!(g.len(), m);
879    let x0_init = vec![0.0f64; n];
880    let (x0, _iters0, _res0) = cg_solve(a, f, &x0_init, tol, max_iter);
881    let b_x0 = b.matvec(&x0);
882    let schur_rhs: Vec<f64> = g
883        .iter()
884        .zip(b_x0.iter())
885        .map(|(gi, bxi)| gi - bxi)
886        .collect();
887    let s_diag: Vec<f64> = (0..m)
888        .map(|i| {
889            let mut e = vec![0.0f64; m];
890            e[i] = 1.0;
891            let bt_e = bt.matvec(&e);
892            let x0_schur = vec![0.0f64; n];
893            let (a_inv_bt_e, _, _) = cg_solve(a, &bt_e, &x0_schur, tol * 0.1, max_iter);
894            let b_a_inv = b.matvec(&a_inv_bt_e);
895            b_a_inv[i]
896        })
897        .collect();
898    let p: Vec<f64> = schur_rhs
899        .iter()
900        .zip(s_diag.iter())
901        .map(|(r, s)| if s.abs() > 1e-30 { r / s } else { 0.0 })
902        .collect();
903    let bt_p = bt.matvec(&p);
904    let f_corr: Vec<f64> = f.iter().zip(bt_p.iter()).map(|(fi, bi)| fi - bi).collect();
905    let x0_final = vec![0.0f64; n];
906    let (x, _, _) = cg_solve(a, &f_corr, &x0_final, tol, max_iter);
907    (x, p)
908}
909/// Lanczos iteration: build a tridiagonal approximation of a symmetric matrix A.
910///
911/// Returns `(alphas, betas, Q)` where:
912/// - `alphas[k]` is the k-th diagonal of the tridiagonal matrix,
913/// - `betas[k]` is the k-th subdiagonal (length k steps),
914/// - `Q[k]` is the k-th Lanczos vector (orthonormal).
915#[allow(dead_code)]
916pub fn lanczos(a: &CsrMatrix, b0: &[f64], k: usize) -> (Vec<f64>, Vec<f64>, Vec<Vec<f64>>) {
917    let n = a.nrows;
918    assert_eq!(b0.len(), n);
919    let norm0: f64 = b0.iter().map(|x| x * x).sum::<f64>().sqrt();
920    assert!(norm0 > 1e-30, "lanczos: starting vector is zero");
921    let mut q: Vec<Vec<f64>> = Vec::with_capacity(k + 1);
922    q.push(b0.iter().map(|x| x / norm0).collect());
923    let mut alphas = Vec::with_capacity(k);
924    let mut betas = Vec::with_capacity(k);
925    let mut q_prev = vec![0.0f64; n];
926    for j in 0..k {
927        let mut w = a.matvec(&q[j]);
928        let alpha: f64 = q[j].iter().zip(w.iter()).map(|(qi, wi)| qi * wi).sum();
929        alphas.push(alpha);
930        for i in 0..n {
931            w[i] -= alpha * q[j][i];
932            if j > 0 {
933                w[i] -= betas[j - 1] * q_prev[i];
934            }
935        }
936        let beta: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
937        betas.push(beta);
938        if beta < 1e-30 || j + 1 >= k {
939            break;
940        }
941        q_prev = q[j].clone();
942        q.push(w.iter().map(|x| x / beta).collect());
943    }
944    (alphas, betas, q)
945}
946/// Compute Ritz values from a Lanczos tridiagonal matrix via QR iteration.
947///
948/// Given `alphas` (diagonal) and `betas` (subdiagonal), returns approximate eigenvalues.
949#[allow(dead_code)]
950pub fn ritz_values(alphas: &[f64], betas: &[f64]) -> Vec<f64> {
951    let m = alphas.len();
952    if m == 0 {
953        return Vec::new();
954    }
955    let mut diag = alphas.to_vec();
956    let mut off = betas[..betas.len().min(m - 1)].to_vec();
957    for _ in 0..500 {
958        let shift = diag[m - 1];
959        for d in &mut diag {
960            *d -= shift;
961        }
962        let mut c = vec![1.0f64; m];
963        let mut s_vec = vec![0.0f64; m];
964        let mut d_new = diag.clone();
965        let mut off_new = off.clone();
966        for i in 0..m - 1 {
967            let r = (diag[i] * diag[i] + off[i] * off[i]).sqrt();
968            if r < 1e-30 {
969                continue;
970            }
971            c[i] = diag[i] / r;
972            s_vec[i] = off[i] / r;
973            let new_d = c[i] * diag[i] + s_vec[i] * off[i];
974            d_new[i] = new_d;
975            if i + 1 < m {
976                off_new[i] = c[i] * off[i] - s_vec[i] * diag[i];
977                d_new[i + 1] = -s_vec[i] * off[i] + c[i] * diag[i + 1];
978            }
979        }
980        diag = d_new;
981        off = off_new;
982        for d in &mut diag {
983            *d += shift;
984        }
985        let off_norm: f64 = off.iter().map(|x| x * x).sum::<f64>().sqrt();
986        if off_norm < 1e-10 {
987            break;
988        }
989    }
990    diag
991}
992/// Invert a small dense matrix (size × size) stored row-major using Gauss-Jordan.
993/// Returns `None` if singular.
994#[allow(dead_code)]
995pub(super) fn invert_small_dense(m: &[f64], size: usize) -> Option<Vec<f64>> {
996    let mut a = m.to_vec();
997    let mut inv = vec![0.0f64; size * size];
998    for i in 0..size {
999        inv[i * size + i] = 1.0;
1000    }
1001    for col in 0..size {
1002        let mut pivot_row = col;
1003        let mut max_val = a[col * size + col].abs();
1004        for row in (col + 1)..size {
1005            if a[row * size + col].abs() > max_val {
1006                max_val = a[row * size + col].abs();
1007                pivot_row = row;
1008            }
1009        }
1010        if max_val < 1e-14 {
1011            return None;
1012        }
1013        for j in 0..size {
1014            a.swap(col * size + j, pivot_row * size + j);
1015            inv.swap(col * size + j, pivot_row * size + j);
1016        }
1017        let piv = a[col * size + col];
1018        for j in 0..size {
1019            a[col * size + j] /= piv;
1020            inv[col * size + j] /= piv;
1021        }
1022        for row in 0..size {
1023            if row == col {
1024                continue;
1025            }
1026            let factor = a[row * size + col];
1027            for j in 0..size {
1028                let av = a[col * size + j];
1029                a[row * size + j] -= factor * av;
1030                let iv = inv[col * size + j];
1031                inv[row * size + j] -= factor * iv;
1032            }
1033        }
1034    }
1035    Some(inv)
1036}
1037/// Extract the lower triangular part of a sparse matrix (including diagonal).
1038#[allow(dead_code)]
1039pub fn lower_triangular(a: &CsrMatrix) -> CsrMatrix {
1040    let mut rows = Vec::new();
1041    let mut cols = Vec::new();
1042    let mut vals = Vec::new();
1043    for i in 0..a.nrows {
1044        for k in a.row_ptr[i]..a.row_ptr[i + 1] {
1045            let j = a.col_idx[k];
1046            if j <= i {
1047                rows.push(i);
1048                cols.push(j);
1049                vals.push(a.values[k]);
1050            }
1051        }
1052    }
1053    CsrMatrix::from_triplets(a.nrows, a.ncols, &rows, &cols, &vals)
1054}
1055#[cfg(test)]
1056mod tests {
1057    use super::*;
1058    use crate::sparse::CscMatrix;
1059    use crate::sparse::IncompleteCholesky;
1060    use crate::sparse::SparseQr;
1061    use crate::sparse::SsorPreconditioner;
1062    pub(super) const TOL: f64 = 1e-10;
1063    #[test]
1064    fn test_new_empty() {
1065        let m = CsrMatrix::new(3, 4);
1066        assert_eq!(m.nrows, 3);
1067        assert_eq!(m.ncols, 4);
1068        assert_eq!(m.nnz(), 0);
1069        assert_eq!(m.row_ptr.len(), 4);
1070    }
1071    #[test]
1072    fn test_from_triplets_basic() {
1073        let rows = [0, 1, 1, 2];
1074        let cols = [0, 0, 2, 2];
1075        let vals = [1.0, 2.0, 3.0, 4.0];
1076        let m = CsrMatrix::from_triplets(3, 3, &rows, &cols, &vals);
1077        assert_eq!(m.nnz(), 4);
1078        assert!((m.get(0, 0) - 1.0).abs() < TOL);
1079        assert!((m.get(1, 0) - 2.0).abs() < TOL);
1080        assert!((m.get(1, 2) - 3.0).abs() < TOL);
1081        assert!((m.get(2, 2) - 4.0).abs() < TOL);
1082        assert!((m.get(0, 1)).abs() < TOL);
1083    }
1084    #[test]
1085    fn test_from_triplets_duplicate_sum() {
1086        let rows = [0, 0];
1087        let cols = [0, 0];
1088        let vals = [1.5, 2.5];
1089        let m = CsrMatrix::from_triplets(2, 2, &rows, &cols, &vals);
1090        assert_eq!(m.nnz(), 1);
1091        assert!((m.get(0, 0) - 4.0).abs() < TOL);
1092    }
1093    #[test]
1094    fn test_get_zero_for_missing() {
1095        let m = CsrMatrix::new(4, 4);
1096        assert_eq!(m.get(2, 3), 0.0);
1097    }
1098    #[test]
1099    fn test_matvec_identity() {
1100        let id = identity_csr(4);
1101        let x = vec![1.0, 2.0, 3.0, 4.0];
1102        let y = id.matvec(&x);
1103        for (yi, xi) in y.iter().zip(x.iter()) {
1104            assert!((yi - xi).abs() < TOL);
1105        }
1106    }
1107    #[test]
1108    fn test_matvec_simple() {
1109        let m = CsrMatrix::from_triplets(2, 2, &[0, 0, 1], &[0, 1, 1], &[2.0, 1.0, 3.0]);
1110        let y = m.matvec(&[1.0, 2.0]);
1111        assert!((y[0] - 4.0).abs() < TOL);
1112        assert!((y[1] - 6.0).abs() < TOL);
1113    }
1114    #[test]
1115    fn test_add_matrices() {
1116        let a = identity_csr(3);
1117        let b = identity_csr(3);
1118        let c = a.add(&b);
1119        for i in 0..3 {
1120            assert!((c.get(i, i) - 2.0).abs() < TOL);
1121        }
1122    }
1123    #[test]
1124    fn test_add_overlap() {
1125        let r = [0usize, 0];
1126        let c = [0usize, 0];
1127        let v1 = [1.0, 0.0];
1128        let v2 = [0.0, 1.0];
1129        let a = CsrMatrix::from_triplets(2, 2, &r[..1], &c[..1], &v1[..1]);
1130        let b = CsrMatrix::from_triplets(2, 2, &r[..1], &c[..1], &v2[..1]);
1131        let s = a.add(&b);
1132        assert!((s.get(0, 0) - 1.0).abs() < TOL);
1133    }
1134    #[test]
1135    fn test_scale() {
1136        let id = identity_csr(3);
1137        let scaled = id.scale(5.0);
1138        for i in 0..3 {
1139            assert!((scaled.get(i, i) - 5.0).abs() < TOL);
1140        }
1141    }
1142    #[test]
1143    fn test_scale_zero() {
1144        let id = identity_csr(3);
1145        let z = id.scale(0.0);
1146        for i in 0..3 {
1147            assert!(z.get(i, i).abs() < TOL);
1148        }
1149    }
1150    #[test]
1151    fn test_transpose_identity() {
1152        let id = identity_csr(4);
1153        let t = id.transpose();
1154        let d = t.to_dense();
1155        let orig = id.to_dense();
1156        assert_eq!(d, orig);
1157    }
1158    #[test]
1159    fn test_transpose_rectangular() {
1160        let m = CsrMatrix::from_triplets(2, 3, &[0, 0, 1], &[0, 2, 1], &[1.0, 2.0, 3.0]);
1161        let t = m.transpose();
1162        assert_eq!(t.nrows, 3);
1163        assert_eq!(t.ncols, 2);
1164        assert!((t.get(0, 0) - 1.0).abs() < TOL);
1165        assert!((t.get(2, 0) - 2.0).abs() < TOL);
1166        assert!((t.get(1, 1) - 3.0).abs() < TOL);
1167    }
1168    #[test]
1169    fn test_diagonal_identity() {
1170        let id = identity_csr(5);
1171        let d = id.diagonal();
1172        assert_eq!(d.len(), 5);
1173        for v in d {
1174            assert!((v - 1.0).abs() < TOL);
1175        }
1176    }
1177    #[test]
1178    fn test_diagonal_general() {
1179        let m = CsrMatrix::from_triplets(3, 3, &[0, 1, 2, 0], &[0, 1, 2, 2], &[7.0, 8.0, 9.0, 1.0]);
1180        let d = m.diagonal();
1181        assert!((d[0] - 7.0).abs() < TOL);
1182        assert!((d[1] - 8.0).abs() < TOL);
1183        assert!((d[2] - 9.0).abs() < TOL);
1184    }
1185    #[test]
1186    fn test_to_dense_identity() {
1187        let id = identity_csr(3);
1188        let d = id.to_dense();
1189        for i in 0..3 {
1190            for j in 0..3 {
1191                let expected = if i == j { 1.0 } else { 0.0 };
1192                assert!((d[i][j] - expected).abs() < TOL);
1193            }
1194        }
1195    }
1196    #[test]
1197    fn test_from_dense_round_trip() {
1198        let dense = vec![
1199            vec![1.0, 0.0, 2.0],
1200            vec![0.0, 3.0, 0.0],
1201            vec![4.0, 0.0, 5.0],
1202        ];
1203        let m = CsrMatrix::from_dense(&dense);
1204        let back = m.to_dense();
1205        for i in 0..3 {
1206            for j in 0..3 {
1207                assert!((back[i][j] - dense[i][j]).abs() < TOL);
1208            }
1209        }
1210    }
1211    #[test]
1212    fn test_is_symmetric_identity() {
1213        assert!(identity_csr(5).is_symmetric(TOL));
1214    }
1215    #[test]
1216    fn test_is_symmetric_laplacian() {
1217        let lap = laplacian_1d(10, 0.1);
1218        assert!(lap.is_symmetric(TOL));
1219    }
1220    #[test]
1221    fn test_is_not_symmetric() {
1222        let m = CsrMatrix::from_triplets(2, 2, &[0], &[1], &[1.0]);
1223        assert!(!m.is_symmetric(TOL));
1224    }
1225    #[test]
1226    fn test_identity_csr() {
1227        let id = identity_csr(6);
1228        assert_eq!(id.nnz(), 6);
1229        let y = id.matvec(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
1230        for (i, yi) in y.iter().enumerate() {
1231            assert!((yi - (i + 1) as f64).abs() < TOL);
1232        }
1233    }
1234    #[test]
1235    fn test_laplacian_1d_size() {
1236        let n = 8;
1237        let lap = laplacian_1d(n, 0.1);
1238        assert_eq!(lap.nrows, n);
1239        assert_eq!(lap.ncols, n);
1240        assert!(lap.is_symmetric(TOL));
1241    }
1242    #[test]
1243    fn test_laplacian_1d_stencil() {
1244        let dx = 0.5;
1245        let lap = laplacian_1d(3, dx);
1246        let inv = 1.0 / (dx * dx);
1247        assert!((lap.get(0, 0) - 2.0 * inv).abs() < TOL);
1248        assert!((lap.get(0, 1) + inv).abs() < TOL);
1249        assert!((lap.get(1, 0) + inv).abs() < TOL);
1250        assert!((lap.get(1, 1) - 2.0 * inv).abs() < TOL);
1251    }
1252    #[test]
1253    fn test_laplacian_2d_size() {
1254        let (nx, ny) = (4, 5);
1255        let lap = laplacian_2d(nx, ny, 0.1);
1256        assert_eq!(lap.nrows, nx * ny);
1257        assert_eq!(lap.ncols, nx * ny);
1258        assert!(lap.is_symmetric(TOL));
1259    }
1260    #[test]
1261    fn test_laplacian_2d_diagonal() {
1262        let lap = laplacian_2d(3, 3, 1.0);
1263        let d = lap.diagonal();
1264        for v in d {
1265            assert!((v - 4.0).abs() < TOL);
1266        }
1267    }
1268    #[test]
1269    fn test_cg_solve_simple() {
1270        let a = CsrMatrix::from_triplets(2, 2, &[0, 0, 1, 1], &[0, 1, 0, 1], &[4.0, 1.0, 1.0, 3.0]);
1271        let b = [1.0, 2.0];
1272        let x0 = [0.0, 0.0];
1273        let (x, _, res) = cg_solve(&a, &b, &x0, 1e-10, 100);
1274        let ax = a.matvec(&x);
1275        assert!((ax[0] - b[0]).abs() < 1e-8);
1276        assert!((ax[1] - b[1]).abs() < 1e-8);
1277        assert!(res < 1e-8);
1278    }
1279    #[test]
1280    fn test_cg_solve_identity() {
1281        let n = 5;
1282        let id = identity_csr(n);
1283        let b: Vec<f64> = (1..=n).map(|i| i as f64).collect();
1284        let x0 = vec![0.0; n];
1285        let (x, _, _) = cg_solve(&id, &b, &x0, 1e-12, 100);
1286        for (xi, bi) in x.iter().zip(b.iter()) {
1287            assert!((xi - bi).abs() < 1e-10);
1288        }
1289    }
1290    #[test]
1291    fn test_cg_solve_laplacian() {
1292        let n = 10;
1293        let lap = laplacian_1d(n, 1.0 / (n + 1) as f64);
1294        let b = vec![1.0; n];
1295        let x0 = vec![0.0; n];
1296        let (x, _, res) = cg_solve(&lap, &b, &x0, 1e-10, 1000);
1297        let ax = lap.matvec(&x);
1298        let err: f64 = ax
1299            .iter()
1300            .zip(b.iter())
1301            .map(|(ai, bi)| (ai - bi).powi(2))
1302            .sum::<f64>()
1303            .sqrt();
1304        assert!(err < 1e-8, "residual={res}, err={err}");
1305    }
1306    #[test]
1307    fn test_jacobi_solve_diagonal() {
1308        let a = CsrMatrix::from_triplets(3, 3, &[0, 1, 2], &[0, 1, 2], &[2.0, 4.0, 8.0]);
1309        let b = [4.0, 8.0, 16.0];
1310        let (x, _) = jacobi_solve(&a, &b, 1e-12, 100);
1311        assert!((x[0] - 2.0).abs() < TOL);
1312        assert!((x[1] - 2.0).abs() < TOL);
1313        assert!((x[2] - 2.0).abs() < TOL);
1314    }
1315    #[test]
1316    fn test_jacobi_solve_convergence() {
1317        let a =
1318            CsrMatrix::from_triplets(2, 2, &[0, 0, 1, 1], &[0, 1, 0, 1], &[10.0, 1.0, 1.0, 10.0]);
1319        let b = [11.0, 11.0];
1320        let (x, _) = jacobi_solve(&a, &b, 1e-12, 1000);
1321        assert!((x[0] - 1.0).abs() < 1e-9);
1322        assert!((x[1] - 1.0).abs() < 1e-9);
1323    }
1324    #[test]
1325    fn test_sor_gauss_seidel() {
1326        let a = CsrMatrix::from_triplets(2, 2, &[0, 0, 1, 1], &[0, 1, 0, 1], &[4.0, 1.0, 1.0, 3.0]);
1327        let b = [1.0, 2.0];
1328        let (x, _) = sor_solve(&a, &b, 1.0, 1e-12, 10000);
1329        let ax = a.matvec(&x);
1330        assert!((ax[0] - b[0]).abs() < 1e-9);
1331        assert!((ax[1] - b[1]).abs() < 1e-9);
1332    }
1333    #[test]
1334    fn test_sor_omega() {
1335        let a = CsrMatrix::from_triplets(
1336            3,
1337            3,
1338            &[0, 1, 2, 0, 1, 1, 2],
1339            &[0, 1, 2, 1, 0, 2, 1],
1340            &[4.0, 4.0, 4.0, -1.0, -1.0, -1.0, -1.0],
1341        );
1342        let b = [3.0, 2.0, 3.0];
1343        let (x, _) = sor_solve(&a, &b, 1.2, 1e-10, 10000);
1344        let ax = a.matvec(&x);
1345        for i in 0..3 {
1346            assert!(
1347                (ax[i] - b[i]).abs() < 1e-8,
1348                "ax[{i}]={} b[{i}]={}",
1349                ax[i],
1350                b[i]
1351            );
1352        }
1353    }
1354    #[test]
1355    fn test_diagonal_preconditioner() {
1356        let a = CsrMatrix::from_triplets(3, 3, &[0, 1, 2], &[0, 1, 2], &[2.0, 4.0, 8.0]);
1357        let m = diagonal_preconditioner(&a);
1358        assert!((m[0] - 0.5).abs() < TOL);
1359        assert!((m[1] - 0.25).abs() < TOL);
1360        assert!((m[2] - 0.125).abs() < TOL);
1361    }
1362    #[test]
1363    fn test_diagonal_preconditioner_zero_diag() {
1364        let a = CsrMatrix::new(3, 3);
1365        let m = diagonal_preconditioner(&a);
1366        for v in m {
1367            assert!((v - 1.0).abs() < TOL);
1368        }
1369    }
1370    #[test]
1371    fn test_pcg_solve() {
1372        let lap = laplacian_1d(10, 1.0 / 11.0);
1373        let b = vec![1.0; 10];
1374        let precond = diagonal_preconditioner(&lap);
1375        let (x, _, res) = pcg_solve(&lap, &b, &precond, 1e-10, 500);
1376        let ax = lap.matvec(&x);
1377        let err: f64 = ax
1378            .iter()
1379            .zip(b.iter())
1380            .map(|(ai, bi)| (ai - bi).powi(2))
1381            .sum::<f64>()
1382            .sqrt();
1383        assert!(err < 1e-8, "residual={res}, err={err}");
1384    }
1385    #[test]
1386    fn test_nnz() {
1387        let m = laplacian_1d(5, 0.2);
1388        assert_eq!(m.nnz(), 5 + 2 * 4);
1389    }
1390    #[test]
1391    fn test_ilu0_solves_diagonal_system() {
1392        let a = CsrMatrix::from_triplets(3, 3, &[0, 1, 2], &[0, 1, 2], &[2.0, 4.0, 8.0]);
1393        let lu = SparseLu::ilu0(&a);
1394        let b = [4.0, 8.0, 16.0];
1395        let x = lu.solve(&b);
1396        let ax = a.matvec(&x);
1397        for i in 0..3 {
1398            assert!(
1399                (ax[i] - b[i]).abs() < 1e-8,
1400                "ILU0 solve: ax[{i}]={} b[{i}]={}",
1401                ax[i],
1402                b[i]
1403            );
1404        }
1405    }
1406    #[test]
1407    fn test_ilu0_spd_small() {
1408        let a = CsrMatrix::from_triplets(2, 2, &[0, 0, 1, 1], &[0, 1, 0, 1], &[4.0, 1.0, 1.0, 3.0]);
1409        let lu = SparseLu::ilu0(&a);
1410        let b = [1.0, 2.0];
1411        let x = lu.solve(&b);
1412        let ax = a.matvec(&x);
1413        for i in 0..2 {
1414            assert!(
1415                (ax[i] - b[i]).abs() < 1e-8,
1416                "ax[{i}]={} b[{i}]={}",
1417                ax[i],
1418                b[i]
1419            );
1420        }
1421    }
1422    #[test]
1423    fn test_ilu0_l_unit_diagonal() {
1424        let a = identity_csr(4);
1425        let lu = SparseLu::ilu0(&a);
1426        for i in 0..4 {
1427            assert!(
1428                (lu.l.get(i, i) - 1.0).abs() < 1e-12,
1429                "L diagonal should be 1"
1430            );
1431        }
1432    }
1433    #[test]
1434    fn test_ic0_spd_factor() {
1435        let a = CsrMatrix::from_triplets(
1436            3,
1437            3,
1438            &[0, 0, 1, 1, 1, 2, 2],
1439            &[0, 1, 0, 1, 2, 1, 2],
1440            &[4.0, -1.0, -1.0, 4.0, -1.0, -1.0, 4.0],
1441        );
1442        let ic = IncompleteCholesky::factor(&a).expect("IC should succeed for SPD matrix");
1443        for i in 0..3 {
1444            assert!(ic.l.get(i, i) > 0.0, "IC diagonal should be positive");
1445        }
1446    }
1447    #[test]
1448    fn test_ic0_apply_returns_vector() {
1449        let lap = laplacian_1d(5, 1.0 / 6.0);
1450        let ic = IncompleteCholesky::factor(&lap).expect("IC should succeed");
1451        let b = vec![1.0, 0.0, 0.0, 0.0, 0.0];
1452        let x = ic.apply(&b);
1453        assert_eq!(x.len(), 5);
1454        let nrm: f64 = x.iter().map(|v| v * v).sum::<f64>().sqrt();
1455        assert!(nrm > 0.0, "IC apply should return non-zero vector");
1456    }
1457    #[test]
1458    fn test_sparse_qr_least_squares_identity() {
1459        let id = identity_csr(3);
1460        let qr = SparseQr::factor(&id);
1461        let b = vec![1.0, 2.0, 3.0];
1462        let x = qr.solve_least_squares(&b);
1463        for i in 0..3 {
1464            assert!(
1465                (x[i] - b[i]).abs() < 1e-8,
1466                "QR solve: x[{i}]={} b[{i}]={}",
1467                x[i],
1468                b[i]
1469            );
1470        }
1471    }
1472    #[test]
1473    fn test_sparse_qr_overdetermined() {
1474        let a = CsrMatrix::from_triplets(3, 2, &[0, 1, 2, 2], &[0, 1, 0, 1], &[1.0, 1.0, 1.0, 1.0]);
1475        let qr = SparseQr::factor(&a);
1476        let b = vec![1.0, 1.0, 2.0];
1477        let x = qr.solve_least_squares(&b);
1478        assert_eq!(x.len(), 2);
1479        let ax = a.matvec(&x);
1480        let res: f64 = ax
1481            .iter()
1482            .zip(b.iter())
1483            .map(|(ai, bi)| (ai - bi).powi(2))
1484            .sum::<f64>();
1485        assert!(res < 1.0, "QR least-squares residual={res} should be small");
1486    }
1487    #[test]
1488    fn test_sparse_qr_q_orthogonal() {
1489        let a = laplacian_1d(3, 1.0);
1490        let qr = SparseQr::factor(&a);
1491        let m = qr.nrows;
1492        for i in 0..m {
1493            for j in 0..m {
1494                let dot: f64 = (0..m).map(|k| qr.q[i][k] * qr.q[j][k]).sum();
1495                let expected = if i == j { 1.0 } else { 0.0 };
1496                assert!(
1497                    (dot - expected).abs() < 1e-8,
1498                    "Q orthogonality failed at ({i},{j}): {dot}"
1499                );
1500            }
1501        }
1502    }
1503    #[test]
1504    fn test_ssor_apply_returns_correct_size() {
1505        let a = laplacian_1d(5, 1.0 / 6.0);
1506        let ssor = SsorPreconditioner::new(a, 1.0);
1507        let b = vec![1.0; 5];
1508        let x = ssor.apply(&b);
1509        assert_eq!(x.len(), 5, "SSOR apply should return vector of same size");
1510    }
1511    #[test]
1512    fn test_ssor_with_identity_omega1() {
1513        let a = identity_csr(4);
1514        let ssor = SsorPreconditioner::new(a, 1.0);
1515        let b = vec![2.0, 3.0, 4.0, 5.0];
1516        let x = ssor.apply(&b);
1517        assert_eq!(x.len(), 4);
1518        let nrm: f64 = x.iter().map(|v| v * v).sum::<f64>().sqrt();
1519        assert!(nrm > 0.0, "SSOR apply should return non-zero");
1520    }
1521    #[test]
1522    fn test_sparse_eig_power_identity() {
1523        let a = identity_csr(5);
1524        let result = sparse_eig_power(&a, 1e-10, 1000);
1525        let (lam, v) = result.expect("power iteration should converge");
1526        assert!(
1527            (lam - 1.0).abs() < 1e-6,
1528            "dominant eigenvalue of I should be 1, got {lam}"
1529        );
1530        let nrm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
1531        assert!(
1532            (nrm - 1.0).abs() < 1e-6,
1533            "eigenvector should be unit length"
1534        );
1535    }
1536    #[test]
1537    fn test_sparse_eig_power_diagonal() {
1538        let a = CsrMatrix::from_triplets(
1539            5,
1540            5,
1541            &[0, 1, 2, 3, 4],
1542            &[0, 1, 2, 3, 4],
1543            &[1.0, 2.0, 3.0, 4.0, 5.0],
1544        );
1545        let (lam, _) = sparse_eig_power(&a, 1e-10, 1000).expect("should converge");
1546        assert!(
1547            (lam - 5.0).abs() < 1e-6,
1548            "dominant eigenvalue should be 5, got {lam}"
1549        );
1550    }
1551    #[test]
1552    fn test_spectral_radius_estimate_identity() {
1553        let a = identity_csr(4);
1554        let sr = spectral_radius_estimate(&a, 1e-10, 1000);
1555        assert!(
1556            (sr - 1.0).abs() < 1e-4,
1557            "spectral radius of I should be 1, got {sr}"
1558        );
1559    }
1560    #[test]
1561    fn test_sparse_eig_smallest_laplacian() {
1562        let lap = laplacian_1d(5, 1.0 / 6.0);
1563        let result = sparse_eig_smallest(&lap, 1e-6, 500);
1564        if let Some((lam, _)) = result {
1565            assert!(
1566                lam > 0.0,
1567                "smallest eigenvalue of Laplacian should be positive, got {lam}"
1568            );
1569        }
1570    }
1571    #[test]
1572    fn test_csc_from_csr_identity() {
1573        let id = identity_csr(3);
1574        let csc = CscMatrix::from_csr(&id);
1575        assert_eq!(csc.nrows, 3);
1576        assert_eq!(csc.ncols, 3);
1577        for j in 0..3 {
1578            let col = csc.get_column(j);
1579            assert_eq!(col.len(), 1, "identity column {j} should have 1 entry");
1580            assert_eq!(col[0].0, j, "identity column {j} entry row should be {j}");
1581            assert!(
1582                (col[0].1 - 1.0).abs() < 1e-12,
1583                "identity column {j} value should be 1"
1584            );
1585        }
1586    }
1587    #[test]
1588    fn test_csc_column_extraction_laplacian() {
1589        let lap = laplacian_1d(5, 1.0);
1590        let csc = CscMatrix::from_csr(&lap);
1591        let col2 = csc.get_column(2);
1592        assert_eq!(
1593            col2.len(),
1594            3,
1595            "interior column of 1D Laplacian should have 3 entries"
1596        );
1597    }
1598    #[test]
1599    fn test_csc_column_extraction_values() {
1600        let a = CsrMatrix::from_triplets(3, 3, &[0, 0, 1, 2], &[0, 1, 1, 2], &[1.0, 2.0, 3.0, 4.0]);
1601        let csc = CscMatrix::from_csr(&a);
1602        let col1 = csc.get_column(1);
1603        assert_eq!(col1.len(), 2);
1604        let found_row0 = col1.iter().any(|&(r, v)| r == 0 && (v - 2.0).abs() < 1e-12);
1605        let found_row1 = col1.iter().any(|&(r, v)| r == 1 && (v - 3.0).abs() < 1e-12);
1606        assert!(
1607            found_row0,
1608            "column 1 should have row 0 entry with value 2.0"
1609        );
1610        assert!(
1611            found_row1,
1612            "column 1 should have row 1 entry with value 3.0"
1613        );
1614    }
1615    #[test]
1616    fn test_csc_matvec() {
1617        let id = identity_csr(4);
1618        let csc = CscMatrix::from_csr(&id);
1619        let x = vec![1.0, 2.0, 3.0, 4.0];
1620        let y = csc.matvec(&x);
1621        for i in 0..4 {
1622            assert!(
1623                (y[i] - x[i]).abs() < 1e-12,
1624                "CSC matvec identity: y[{i}]={} expected {}",
1625                y[i],
1626                x[i]
1627            );
1628        }
1629    }
1630    #[test]
1631    fn test_spgemm_identity_times_identity() {
1632        let id = identity_csr(3);
1633        let result = spgemm(&id, &id);
1634        let dense = result.to_dense();
1635        for i in 0..3 {
1636            for j in 0..3 {
1637                let expected = if i == j { 1.0 } else { 0.0 };
1638                assert!(
1639                    (dense[i][j] - expected).abs() < 1e-12,
1640                    "I*I[{i}][{j}] = {} expected {}",
1641                    dense[i][j],
1642                    expected
1643                );
1644            }
1645        }
1646    }
1647    #[test]
1648    fn test_spgemm_diagonal_times_diagonal() {
1649        let a = CsrMatrix::from_triplets(3, 3, &[0, 1, 2], &[0, 1, 2], &[2.0, 3.0, 4.0]);
1650        let b = CsrMatrix::from_triplets(3, 3, &[0, 1, 2], &[0, 1, 2], &[5.0, 6.0, 7.0]);
1651        let c = spgemm(&a, &b);
1652        let dense = c.to_dense();
1653        assert!(
1654            (dense[0][0] - 10.0).abs() < 1e-12,
1655            "C[0][0] = {} expected 10",
1656            dense[0][0]
1657        );
1658        assert!(
1659            (dense[1][1] - 18.0).abs() < 1e-12,
1660            "C[1][1] = {} expected 18",
1661            dense[1][1]
1662        );
1663        assert!(
1664            (dense[2][2] - 28.0).abs() < 1e-12,
1665            "C[2][2] = {} expected 28",
1666            dense[2][2]
1667        );
1668    }
1669    #[test]
1670    fn test_spgemm_rectangular() {
1671        let a = CsrMatrix::from_triplets(
1672            2,
1673            3,
1674            &[0, 0, 0, 1, 1, 1],
1675            &[0, 1, 2, 0, 1, 2],
1676            &[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
1677        );
1678        let b = CsrMatrix::from_triplets(
1679            3,
1680            2,
1681            &[0, 0, 1, 1, 2, 2],
1682            &[0, 1, 0, 1, 0, 1],
1683            &[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
1684        );
1685        let c = spgemm(&a, &b);
1686        assert_eq!(c.nrows, 2);
1687        assert_eq!(c.ncols, 2);
1688        let dense = c.to_dense();
1689        assert!(
1690            (dense[0][0] - 58.0).abs() < 1e-10,
1691            "C[0][0] = {} expected 58",
1692            dense[0][0]
1693        );
1694        assert!(
1695            (dense[0][1] - 64.0).abs() < 1e-10,
1696            "C[0][1] = {} expected 64",
1697            dense[0][1]
1698        );
1699        assert!(
1700            (dense[1][0] - 139.0).abs() < 1e-10,
1701            "C[1][0] = {} expected 139",
1702            dense[1][0]
1703        );
1704        assert!(
1705            (dense[1][1] - 154.0).abs() < 1e-10,
1706            "C[1][1] = {} expected 154",
1707            dense[1][1]
1708        );
1709    }
1710    #[test]
1711    fn test_spgemm_matches_matvec() {
1712        let lap = laplacian_1d(4, 1.0);
1713        let a2 = spgemm(&lap, &lap);
1714        let x = vec![1.0, 0.0, 0.0, 0.0];
1715        let direct = a2.matvec(&x);
1716        let two_step = lap.matvec(&lap.matvec(&x));
1717        for i in 0..4 {
1718            assert!(
1719                (direct[i] - two_step[i]).abs() < 1e-10,
1720                "SpGEMM result differs from sequential matvec at [{i}]"
1721            );
1722        }
1723    }
1724    #[test]
1725    fn test_coo_to_csr_basic() {
1726        let rows = vec![0usize, 1, 2];
1727        let cols = vec![0usize, 1, 2];
1728        let vals = vec![1.0, 2.0, 3.0];
1729        let csr = coo_to_csr(3, 3, &rows, &cols, &vals);
1730        assert_eq!(csr.nrows, 3);
1731        assert_eq!(csr.ncols, 3);
1732        assert!((csr.get(0, 0) - 1.0).abs() < 1e-12);
1733        assert!((csr.get(1, 1) - 2.0).abs() < 1e-12);
1734        assert!((csr.get(2, 2) - 3.0).abs() < 1e-12);
1735    }
1736    #[test]
1737    fn test_coo_to_csr_duplicate_sum() {
1738        let rows = vec![0usize, 0];
1739        let cols = vec![0usize, 0];
1740        let vals = vec![1.0, 2.0];
1741        let csr = coo_to_csr(2, 2, &rows, &cols, &vals);
1742        assert!(
1743            (csr.get(0, 0) - 3.0).abs() < 1e-12,
1744            "Duplicate entries should be summed"
1745        );
1746    }
1747    #[test]
1748    fn test_coo_to_csr_unsorted() {
1749        let rows = vec![2usize, 0, 1];
1750        let cols = vec![2usize, 0, 1];
1751        let vals = vec![3.0, 1.0, 2.0];
1752        let csr = coo_to_csr(3, 3, &rows, &cols, &vals);
1753        assert!(
1754            (csr.get(0, 0) - 1.0).abs() < 1e-12,
1755            "COO to CSR from unsorted: [0,0]"
1756        );
1757        assert!(
1758            (csr.get(1, 1) - 2.0).abs() < 1e-12,
1759            "COO to CSR from unsorted: [1,1]"
1760        );
1761        assert!(
1762            (csr.get(2, 2) - 3.0).abs() < 1e-12,
1763            "COO to CSR from unsorted: [2,2]"
1764        );
1765    }
1766    #[test]
1767    fn test_arnoldi_basis_orthonormal() {
1768        let a = laplacian_1d(6, 1.0);
1769        let b0 = vec![1.0, 0.0, 0.0, 0.0, 0.0, 0.0];
1770        let (q, _h) = arnoldi(&a, &b0, 4);
1771        let k = q.len();
1772        for i in 0..k {
1773            for j in 0..k {
1774                let dot: f64 = q[i].iter().zip(q[j].iter()).map(|(a, b)| a * b).sum();
1775                let expected = if i == j { 1.0 } else { 0.0 };
1776                assert!(
1777                    (dot - expected).abs() < 1e-8,
1778                    "Arnoldi basis not orthonormal at ({i},{j}): dot = {dot}"
1779                );
1780            }
1781        }
1782    }
1783    #[test]
1784    fn test_arnoldi_krylov_relation() {
1785        let a = laplacian_1d(5, 1.0);
1786        let b0 = vec![1.0, 0.0, 0.0, 0.0, 0.0];
1787        let (q, h) = arnoldi(&a, &b0, 3);
1788        for j in 0..q.len().saturating_sub(1) {
1789            let aqj = a.matvec(&q[j]);
1790            let mut residual = aqj.clone();
1791            for i in 0..h.len() {
1792                if i < q.len() && j < h[i].len() {
1793                    for (r, qr) in residual.iter_mut().zip(q[i].iter()) {
1794                        *r -= h[i][j] * qr;
1795                    }
1796                }
1797            }
1798            let res_norm: f64 = residual.iter().map(|x| x * x).sum::<f64>().sqrt();
1799            assert!(
1800                res_norm < 1e-8,
1801                "Arnoldi relation violated at j={j}: residual norm = {res_norm}"
1802            );
1803        }
1804    }
1805    #[test]
1806    fn test_arnoldi_single_step_is_normalized_input() {
1807        let a = identity_csr(4);
1808        let b0 = vec![3.0, 4.0, 0.0, 0.0];
1809        let (q, _h) = arnoldi(&a, &b0, 1);
1810        assert_eq!(q.len(), 1);
1811        let expected = [0.6, 0.8, 0.0, 0.0];
1812        for i in 0..4 {
1813            assert!(
1814                (q[0][i] - expected[i]).abs() < 1e-10,
1815                "First Arnoldi vector: q[0][{i}] = {} expected {}",
1816                q[0][i],
1817                expected[i]
1818            );
1819        }
1820    }
1821    #[test]
1822    fn test_jacobi_scale_identity() {
1823        let id = identity_csr(4);
1824        let scaled = jacobi_scale(&id);
1825        for i in 0..4 {
1826            assert!(
1827                (scaled.get(i, i) - 1.0).abs() < 1e-12,
1828                "Jacobi-scaled identity diagonal[{i}] = {} expected 1",
1829                scaled.get(i, i)
1830            );
1831        }
1832    }
1833    #[test]
1834    fn test_jacobi_scale_diagonal_matrix() {
1835        let d = CsrMatrix::from_triplets(3, 3, &[0, 1, 2], &[0, 1, 2], &[2.0, 4.0, 8.0]);
1836        let scaled = jacobi_scale(&d);
1837        assert!((scaled.get(0, 0) - 1.0).abs() < 1e-12);
1838        assert!((scaled.get(1, 1) - 1.0).abs() < 1e-12);
1839        assert!((scaled.get(2, 2) - 1.0).abs() < 1e-12);
1840    }
1841    #[test]
1842    fn test_jacobi_scale_preserves_sparsity() {
1843        let lap = laplacian_1d(5, 1.0);
1844        let nnz_before = lap.nnz();
1845        let scaled = jacobi_scale(&lap);
1846        assert_eq!(
1847            scaled.nnz(),
1848            nnz_before,
1849            "Jacobi scaling should not change sparsity pattern"
1850        );
1851    }
1852    #[test]
1853    fn test_kronecker_identity_by_identity() {
1854        let i2 = identity_csr(2);
1855        let kron = i2.kronecker_product(&i2);
1856        assert_eq!(kron.nrows, 4);
1857        assert_eq!(kron.ncols, 4);
1858        for i in 0..4 {
1859            assert!(
1860                (kron.get(i, i) - 1.0).abs() < 1e-12,
1861                "diagonal mismatch at {}",
1862                i
1863            );
1864            for j in 0..4 {
1865                if i != j {
1866                    assert!(
1867                        kron.get(i, j).abs() < 1e-12,
1868                        "off-diagonal non-zero at ({},{})",
1869                        i,
1870                        j
1871                    );
1872                }
1873            }
1874        }
1875    }
1876    #[test]
1877    fn test_kronecker_product_dimensions() {
1878        let a = CsrMatrix::from_triplets(2, 3, &[0, 1], &[0, 2], &[1.0, 1.0]);
1879        let b = CsrMatrix::from_triplets(4, 5, &[0, 3], &[0, 4], &[1.0, 1.0]);
1880        let kron = a.kronecker_product(&b);
1881        assert_eq!(kron.nrows, 8);
1882        assert_eq!(kron.ncols, 15);
1883    }
1884    #[test]
1885    fn test_kronecker_scalar() {
1886        let a = CsrMatrix::from_triplets(1, 1, &[0], &[0], &[3.0]);
1887        let b = CsrMatrix::from_triplets(1, 1, &[0], &[0], &[4.0]);
1888        let kron = a.kronecker_product(&b);
1889        assert_eq!(kron.nrows, 1);
1890        assert_eq!(kron.ncols, 1);
1891        assert!((kron.get(0, 0) - 12.0).abs() < 1e-12);
1892    }
1893    #[test]
1894    fn test_ic0_identity_gives_identity() {
1895        let id = identity_csr(4);
1896        let l = id.incomplete_cholesky();
1897        for i in 0..4 {
1898            assert!(
1899                (l.get(i, i) - 1.0).abs() < 1e-10,
1900                "L[{},{}]={}",
1901                i,
1902                i,
1903                l.get(i, i)
1904            );
1905        }
1906    }
1907    #[test]
1908    fn test_ic0_diagonal_matrix() {
1909        let d = CsrMatrix::from_triplets(3, 3, &[0, 1, 2], &[0, 1, 2], &[4.0, 9.0, 16.0]);
1910        let l = d.incomplete_cholesky();
1911        assert!((l.get(0, 0) - 2.0).abs() < 1e-10);
1912        assert!((l.get(1, 1) - 3.0).abs() < 1e-10);
1913        assert!((l.get(2, 2) - 4.0).abs() < 1e-10);
1914    }
1915    #[test]
1916    fn test_ic0_lower_triangular() {
1917        let id = identity_csr(5);
1918        let l = id.incomplete_cholesky();
1919        for r in 0..5 {
1920            for c in (r + 1)..5 {
1921                assert!(
1922                    l.get(r, c).abs() < 1e-12,
1923                    "upper triangle non-zero L[{},{}]={}",
1924                    r,
1925                    c,
1926                    l.get(r, c)
1927                );
1928            }
1929        }
1930    }
1931    #[test]
1932    fn test_power_iteration_identity() {
1933        let id = identity_csr(4);
1934        let (lam, _v) = id.power_iteration(100, 1e-10);
1935        assert!((lam - 1.0).abs() < 1e-6, "lambda={}", lam);
1936    }
1937    #[test]
1938    fn test_power_iteration_diagonal() {
1939        let d =
1940            CsrMatrix::from_triplets(4, 4, &[0, 1, 2, 3], &[0, 1, 2, 3], &[1.0, 2.0, 3.0, 10.0]);
1941        let (lam, _v) = d.power_iteration(200, 1e-10);
1942        assert!((lam - 10.0).abs() < 1e-4, "expected lambda≈10, got {}", lam);
1943    }
1944    #[test]
1945    fn test_power_iteration_eigenvector_unit() {
1946        let id = identity_csr(3);
1947        let (_lam, v) = id.power_iteration(50, 1e-10);
1948        let norm: f64 = v.iter().map(|&x| x * x).sum::<f64>().sqrt();
1949        assert!(
1950            (norm - 1.0).abs() < 1e-9,
1951            "eigenvector not unit: norm={}",
1952            norm
1953        );
1954    }
1955}