Skip to main content

scirs2_sparse/formats/
poly_precond.rs

1//! Polynomial preconditioners with automatic eigenvalue bound estimation
2//!
3//! This module provides polynomial preconditioners for iterative solvers:
4//!
5//! - **Chebyshev polynomial preconditioner**: Uses Chebyshev polynomials shifted
6//!   to the estimated eigenvalue interval `[lambda_min, lambda_max]` to construct
7//!   `p(A) ~ A^{-1}`. Eigenvalue bounds are estimated via a few Lanczos iterations.
8//!
9//! - **Neumann series preconditioner**: Truncated Neumann series
10//!   `M^{-1} ~ sum_{k=0}^{d} (I - A)^k` (for `||I - A|| < 1`).
11//!
12//! Both preconditioners are applied matrix-free using only sparse matrix-vector
13//! products (no explicit matrix construction).
14//!
15//! # References
16//!
17//! - Saad, Y. (2003). *Iterative Methods for Sparse Linear Systems*, 2nd ed. SIAM.
18//! - Golub, G. H. & Van Loan, C. F. (2013). *Matrix Computations*, 4th ed.
19
20use crate::csr::CsrMatrix;
21use crate::error::{SparseError, SparseResult};
22
23/// Configuration for the polynomial preconditioner.
24#[derive(Debug, Clone)]
25pub struct PolyPrecondConfig {
26    /// Polynomial degree (number of Chebyshev terms or Neumann terms).
27    /// Default: 10.
28    pub degree: usize,
29    /// Number of Lanczos iterations for eigenvalue estimation.
30    /// Default: 20.
31    pub lanczos_steps: usize,
32    /// Safety margin for eigenvalue bounds (multiplicative factor).
33    /// lambda_min is divided by this, lambda_max is multiplied by this.
34    /// Default: 1.1.
35    pub eigenvalue_margin: f64,
36}
37
38impl Default for PolyPrecondConfig {
39    fn default() -> Self {
40        Self {
41            degree: 10,
42            lanczos_steps: 20,
43            eigenvalue_margin: 1.1,
44        }
45    }
46}
47
48/// Chebyshev polynomial preconditioner with automatic eigenvalue estimation.
49///
50/// Constructs `p(A) ~ A^{-1}` where `p` is a degree-d Chebyshev polynomial
51/// optimally scaled to the spectrum `[lambda_min, lambda_max]`.
52///
53/// Application proceeds via the three-term Chebyshev recurrence, requiring
54/// `degree` sparse matrix-vector products per application.
55pub struct ChebyshevPreconditioner {
56    /// Polynomial degree.
57    degree: usize,
58    /// Estimated smallest eigenvalue.
59    lambda_min: f64,
60    /// Estimated largest eigenvalue.
61    lambda_max: f64,
62    /// CSR row pointers.
63    indptr: Vec<usize>,
64    /// CSR column indices.
65    indices: Vec<usize>,
66    /// CSR values.
67    data: Vec<f64>,
68    /// Matrix dimension.
69    n: usize,
70}
71
72impl ChebyshevPreconditioner {
73    /// Build a Chebyshev preconditioner from a CSR matrix.
74    ///
75    /// Eigenvalue bounds are estimated automatically using a few Lanczos steps.
76    ///
77    /// # Arguments
78    ///
79    /// * `csr` - The SPD sparse matrix in CSR format.
80    /// * `config` - Configuration parameters.
81    pub fn from_csr(csr: &CsrMatrix<f64>, config: &PolyPrecondConfig) -> SparseResult<Self> {
82        let (nrows, ncols) = csr.shape();
83        if nrows != ncols {
84            return Err(SparseError::ValueError(
85                "ChebyshevPreconditioner requires a square matrix".to_string(),
86            ));
87        }
88        if nrows == 0 {
89            return Err(SparseError::ValueError(
90                "ChebyshevPreconditioner requires a non-empty matrix".to_string(),
91            ));
92        }
93
94        let n = nrows;
95
96        // Estimate eigenvalue bounds via Lanczos
97        let (lambda_min, lambda_max) = lanczos_eigenvalue_bounds(
98            &csr.indptr,
99            &csr.indices,
100            &csr.data,
101            n,
102            config.lanczos_steps,
103            config.eigenvalue_margin,
104        )?;
105
106        if lambda_min <= 0.0 {
107            return Err(SparseError::ValueError(format!(
108                "Estimated lambda_min={:.6e} <= 0; matrix may not be SPD",
109                lambda_min
110            )));
111        }
112
113        Ok(Self {
114            degree: config.degree,
115            lambda_min,
116            lambda_max,
117            indptr: csr.indptr.clone(),
118            indices: csr.indices.clone(),
119            data: csr.data.clone(),
120            n,
121        })
122    }
123
124    /// Build a Chebyshev preconditioner with explicit eigenvalue bounds.
125    ///
126    /// # Arguments
127    ///
128    /// * `csr` - The SPD sparse matrix in CSR format.
129    /// * `degree` - Polynomial degree (5-20 recommended).
130    /// * `lambda_min` - Lower bound on the eigenvalue spectrum.
131    /// * `lambda_max` - Upper bound on the eigenvalue spectrum.
132    pub fn with_bounds(
133        csr: &CsrMatrix<f64>,
134        degree: usize,
135        lambda_min: f64,
136        lambda_max: f64,
137    ) -> SparseResult<Self> {
138        let (nrows, ncols) = csr.shape();
139        if nrows != ncols {
140            return Err(SparseError::ValueError(
141                "ChebyshevPreconditioner requires a square matrix".to_string(),
142            ));
143        }
144        if lambda_min >= lambda_max {
145            return Err(SparseError::ValueError(format!(
146                "lambda_min ({}) must be < lambda_max ({})",
147                lambda_min, lambda_max
148            )));
149        }
150        if lambda_min <= 0.0 {
151            return Err(SparseError::ValueError(format!(
152                "lambda_min ({}) must be > 0 for SPD systems",
153                lambda_min
154            )));
155        }
156
157        Ok(Self {
158            degree,
159            lambda_min,
160            lambda_max,
161            indptr: csr.indptr.clone(),
162            indices: csr.indices.clone(),
163            data: csr.data.clone(),
164            n: nrows,
165        })
166    }
167
168    /// Apply the Chebyshev polynomial preconditioner: `z = p(A) * r`.
169    ///
170    /// Uses the Chebyshev iteration three-term recurrence. This computes
171    /// an approximation to `A^{-1} r` without constructing any matrix.
172    pub fn apply(&self, r: &[f64]) -> SparseResult<Vec<f64>> {
173        if r.len() != self.n {
174            return Err(SparseError::DimensionMismatch {
175                expected: self.n,
176                found: r.len(),
177            });
178        }
179
180        let n = self.n;
181        let theta = 0.5 * (self.lambda_max + self.lambda_min); // center
182        let delta = 0.5 * (self.lambda_max - self.lambda_min); // half-width
183        let sigma = theta / delta.max(1e-300);
184
185        // degree 0: z = r / theta
186        if self.degree == 0 {
187            return Ok(r.iter().map(|&v| v / theta).collect());
188        }
189
190        // Chebyshev iteration (Gutknecht & Rollin formulation)
191        // z_0 = (1/theta) * r
192        let mut z: Vec<f64> = r.iter().map(|&v| v / theta).collect();
193
194        if self.degree == 1 {
195            return Ok(z);
196        }
197
198        let mut z_prev = vec![0.0f64; n];
199        let mut rho = 1.0 / sigma;
200
201        for _ in 1..self.degree {
202            // residual: res = r - A * z
203            let az = csr_matvec(&self.indptr, &self.indices, &self.data, &z, n);
204            let res: Vec<f64> = (0..n).map(|i| r[i] - az[i]).collect();
205
206            let rho_new = 1.0 / (2.0 * sigma - rho);
207            let coeff_r = 2.0 * rho_new / delta;
208            let coeff_y = rho_new * rho;
209
210            let z_next: Vec<f64> = (0..n)
211                .map(|i| z[i] + coeff_r * res[i] + coeff_y * (z[i] - z_prev[i]))
212                .collect();
213
214            z_prev = z;
215            z = z_next;
216            rho = rho_new;
217        }
218
219        Ok(z)
220    }
221
222    /// Return the estimated eigenvalue bounds.
223    pub fn eigenvalue_bounds(&self) -> (f64, f64) {
224        (self.lambda_min, self.lambda_max)
225    }
226
227    /// Return the polynomial degree.
228    pub fn degree(&self) -> usize {
229        self.degree
230    }
231
232    /// Return the matrix dimension.
233    pub fn size(&self) -> usize {
234        self.n
235    }
236}
237
238/// Neumann series preconditioner.
239///
240/// Approximates `A^{-1}` by the truncated Neumann series:
241///   `M^{-1} = alpha * sum_{k=0}^{d} (I - alpha * A)^k`
242///
243/// Convergent when `||I - alpha * A|| < 1`. The scaling `alpha` is chosen
244/// automatically as `1 / ||A||_inf` (infinity norm).
245pub struct NeumannPreconditioner {
246    /// Polynomial degree.
247    degree: usize,
248    /// Scaling factor alpha.
249    alpha: f64,
250    /// CSR row pointers.
251    indptr: Vec<usize>,
252    /// CSR column indices.
253    indices: Vec<usize>,
254    /// CSR values.
255    data: Vec<f64>,
256    /// Matrix dimension.
257    n: usize,
258}
259
260impl NeumannPreconditioner {
261    /// Build a Neumann series preconditioner from a CSR matrix.
262    ///
263    /// # Arguments
264    ///
265    /// * `csr` - The sparse matrix in CSR format (should be well-conditioned).
266    /// * `degree` - Number of terms in the Neumann series (1-10 typical).
267    /// * `alpha` - Scaling factor. Pass `None` for automatic estimation.
268    pub fn from_csr(csr: &CsrMatrix<f64>, degree: usize, alpha: Option<f64>) -> SparseResult<Self> {
269        let (nrows, ncols) = csr.shape();
270        if nrows != ncols {
271            return Err(SparseError::ValueError(
272                "NeumannPreconditioner requires a square matrix".to_string(),
273            ));
274        }
275        if nrows == 0 {
276            return Err(SparseError::ValueError(
277                "NeumannPreconditioner requires a non-empty matrix".to_string(),
278            ));
279        }
280
281        let n = nrows;
282
283        let alpha = match alpha {
284            Some(a) => {
285                if a <= 0.0 {
286                    return Err(SparseError::ValueError(
287                        "alpha must be positive".to_string(),
288                    ));
289                }
290                a
291            }
292            None => {
293                // Estimate alpha = 1 / ||A||_inf
294                let inf_norm: f64 = (0..n)
295                    .map(|i| {
296                        csr.data[csr.indptr[i]..csr.indptr[i + 1]]
297                            .iter()
298                            .map(|v| v.abs())
299                            .sum::<f64>()
300                    })
301                    .fold(0.0f64, f64::max);
302                if inf_norm > 1e-300 {
303                    1.0 / inf_norm
304                } else {
305                    1.0
306                }
307            }
308        };
309
310        Ok(Self {
311            degree,
312            alpha,
313            indptr: csr.indptr.clone(),
314            indices: csr.indices.clone(),
315            data: csr.data.clone(),
316            n,
317        })
318    }
319
320    /// Apply the Neumann preconditioner: `z = M^{-1} * r`.
321    ///
322    /// Evaluates `z = alpha * sum_{k=0}^{d} (I - alpha*A)^k * r`
323    /// using Horner's scheme in `degree` SpMV operations.
324    pub fn apply(&self, r: &[f64]) -> SparseResult<Vec<f64>> {
325        if r.len() != self.n {
326            return Err(SparseError::DimensionMismatch {
327                expected: self.n,
328                found: r.len(),
329            });
330        }
331
332        let n = self.n;
333        let alpha = self.alpha;
334
335        // B*v = v - alpha * A*v  (i.e., (I - alpha*A)*v)
336        let b_apply = |v: &[f64]| -> Vec<f64> {
337            let av = csr_matvec(&self.indptr, &self.indices, &self.data, v, n);
338            let mut bv = v.to_vec();
339            for (bi, ai) in bv.iter_mut().zip(av.iter()) {
340                *bi -= alpha * ai;
341            }
342            bv
343        };
344
345        // Horner: sum = r; for k in 0..degree: sum = r + B*sum
346        let mut sum = r.to_vec();
347        for _ in 0..self.degree {
348            let b_sum = b_apply(&sum);
349            for (si, (&ri, &bi)) in sum.iter_mut().zip(r.iter().zip(b_sum.iter())) {
350                *si = ri + bi;
351            }
352        }
353
354        // Multiply by alpha
355        for v in sum.iter_mut() {
356            *v *= alpha;
357        }
358
359        Ok(sum)
360    }
361
362    /// Return the scaling factor alpha.
363    pub fn alpha(&self) -> f64 {
364        self.alpha
365    }
366
367    /// Return the polynomial degree.
368    pub fn degree(&self) -> usize {
369        self.degree
370    }
371
372    /// Return the matrix dimension.
373    pub fn size(&self) -> usize {
374        self.n
375    }
376}
377
378// ---------------------------------------------------------------------------
379// Internal helpers
380// ---------------------------------------------------------------------------
381
382/// Sparse matrix-vector product (CSR).
383fn csr_matvec(indptr: &[usize], indices: &[usize], data: &[f64], x: &[f64], n: usize) -> Vec<f64> {
384    let mut y = vec![0.0f64; n];
385    for i in 0..n {
386        let mut acc = 0.0f64;
387        for pos in indptr[i]..indptr[i + 1] {
388            acc += data[pos] * x[indices[pos]];
389        }
390        y[i] = acc;
391    }
392    y
393}
394
395/// Estimate eigenvalue bounds of a symmetric matrix using a few Lanczos steps.
396///
397/// Returns `(lambda_min, lambda_max)` with safety margins applied.
398fn lanczos_eigenvalue_bounds(
399    indptr: &[usize],
400    indices: &[usize],
401    data: &[f64],
402    n: usize,
403    num_steps: usize,
404    margin: f64,
405) -> SparseResult<(f64, f64)> {
406    if n == 0 {
407        return Err(SparseError::ValueError(
408            "Cannot estimate eigenvalues of empty matrix".to_string(),
409        ));
410    }
411
412    let steps = num_steps.min(n);
413
414    // Starting vector: uniform
415    let inv_sqrt_n = 1.0 / (n as f64).sqrt();
416    let mut q = vec![inv_sqrt_n; n];
417
418    // Lanczos tridiagonal matrix coefficients
419    let mut alpha_vec: Vec<f64> = Vec::with_capacity(steps);
420    let mut beta_vec: Vec<f64> = Vec::with_capacity(steps);
421
422    let mut q_prev = vec![0.0f64; n];
423
424    for j in 0..steps {
425        // w = A * q
426        let mut w = csr_matvec(indptr, indices, data, &q, n);
427
428        // alpha_j = q^T * w
429        let alpha_j: f64 = q.iter().zip(w.iter()).map(|(&qi, &wi)| qi * wi).sum();
430        alpha_vec.push(alpha_j);
431
432        // w = w - alpha_j * q - beta_{j-1} * q_prev
433        let beta_prev = if j > 0 { beta_vec[j - 1] } else { 0.0 };
434        for i in 0..n {
435            w[i] -= alpha_j * q[i] + beta_prev * q_prev[i];
436        }
437
438        // Re-orthogonalization (full) for numerical stability
439        // This is a simple implementation for the small number of steps we do
440        // We only need to re-orthogonalize against the current and previous vectors
441        // For a production implementation we'd store all vectors, but for
442        // eigenvalue estimation, this is sufficient.
443
444        // beta_j = ||w||
445        let beta_j: f64 = w.iter().map(|&v| v * v).sum::<f64>().sqrt();
446
447        if beta_j < 1e-14 {
448            // Invariant subspace found
449            break;
450        }
451        beta_vec.push(beta_j);
452
453        // q_{j+1} = w / beta_j
454        q_prev = q;
455        q = w.iter().map(|&v| v / beta_j).collect();
456    }
457
458    // Compute eigenvalues of the tridiagonal matrix using the QR algorithm
459    let k = alpha_vec.len();
460    if k == 0 {
461        return Err(SparseError::ValueError(
462            "Lanczos produced no iterations".to_string(),
463        ));
464    }
465
466    let eigenvalues = tridiagonal_eigenvalues(&alpha_vec, &beta_vec)?;
467
468    let mut lambda_min = f64::MAX;
469    let mut lambda_max = f64::MIN;
470    for &ev in &eigenvalues {
471        if ev < lambda_min {
472            lambda_min = ev;
473        }
474        if ev > lambda_max {
475            lambda_max = ev;
476        }
477    }
478
479    // Apply safety margins
480    let lambda_min_safe = lambda_min / margin;
481    let lambda_max_safe = lambda_max * margin;
482
483    // Ensure lambda_min > 0 for SPD matrices
484    let lambda_min_final = if lambda_min_safe > 1e-15 {
485        lambda_min_safe
486    } else {
487        lambda_min.abs().max(1e-10) / margin
488    };
489
490    Ok((lambda_min_final, lambda_max_safe))
491}
492
493/// Compute eigenvalues of a symmetric tridiagonal matrix.
494///
495/// `alpha` is the diagonal, `beta` is the sub/super-diagonal.
496/// Uses the Sturm sequence bisection method for robustness.
497fn tridiagonal_eigenvalues(alpha: &[f64], beta: &[f64]) -> SparseResult<Vec<f64>> {
498    bisection_tridiag_eigenvalues(alpha, beta)
499}
500
501/// Compute all eigenvalues of a symmetric tridiagonal matrix using bisection.
502///
503/// Uses the Sturm sequence property of the characteristic polynomial.
504fn bisection_tridiag_eigenvalues(alpha: &[f64], beta: &[f64]) -> SparseResult<Vec<f64>> {
505    let n = alpha.len();
506    if n == 0 {
507        return Ok(Vec::new());
508    }
509    if n == 1 {
510        return Ok(vec![alpha[0]]);
511    }
512
513    // Compute Gershgorin bounds for the eigenvalue interval
514    let mut lower = f64::MAX;
515    let mut upper = f64::MIN;
516    for i in 0..n {
517        let off_diag = if i > 0 && i - 1 < beta.len() {
518            beta[i - 1].abs()
519        } else {
520            0.0
521        } + if i < beta.len() { beta[i].abs() } else { 0.0 };
522
523        let lo = alpha[i] - off_diag;
524        let hi = alpha[i] + off_diag;
525        if lo < lower {
526            lower = lo;
527        }
528        if hi > upper {
529            upper = hi;
530        }
531    }
532
533    // Expand slightly
534    let range = (upper - lower).max(1e-10);
535    lower -= 0.01 * range;
536    upper += 0.01 * range;
537
538    // Count eigenvalues <= x using Sturm sequence
539    let count_less_eq = |x: f64| -> usize {
540        let mut count = 0usize;
541        let mut d = alpha[0] - x;
542        if d <= 0.0 {
543            count += 1;
544        }
545        for i in 1..n {
546            let b = if i - 1 < beta.len() { beta[i - 1] } else { 0.0 };
547            if d.abs() < 1e-300 {
548                d = 1e-300; // avoid division by zero
549            }
550            d = alpha[i] - x - b * b / d;
551            if d <= 0.0 {
552                count += 1;
553            }
554        }
555        count
556    };
557
558    // Find each eigenvalue by bisection
559    let mut eigenvalues = Vec::with_capacity(n);
560    let tol = 1e-12 * range;
561
562    for k in 0..n {
563        // Find the (k+1)-th eigenvalue (0-indexed: eigenvalue with index k)
564        let mut lo = lower;
565        let mut hi = upper;
566
567        for _ in 0..200 {
568            let mid = 0.5 * (lo + hi);
569            if (hi - lo) < tol {
570                eigenvalues.push(mid);
571                break;
572            }
573            let count = count_less_eq(mid);
574            if count > k {
575                hi = mid;
576            } else {
577                lo = mid;
578            }
579        }
580        if eigenvalues.len() <= k {
581            // Bisection didn't converge — use midpoint
582            eigenvalues.push(0.5 * (lo + hi));
583        }
584    }
585
586    eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
587    Ok(eigenvalues)
588}
589
590#[cfg(test)]
591mod tests {
592    use super::*;
593    use approx::assert_relative_eq;
594
595    fn make_spd_csr(n: usize) -> CsrMatrix<f64> {
596        // Diagonally dominant tridiagonal SPD matrix: 2 on diagonal, -0.5 on off-diags
597        let mut rows = Vec::new();
598        let mut cols = Vec::new();
599        let mut vals = Vec::new();
600        for i in 0..n {
601            rows.push(i);
602            cols.push(i);
603            vals.push(2.0);
604            if i > 0 {
605                rows.push(i);
606                cols.push(i - 1);
607                vals.push(-0.5);
608            }
609            if i + 1 < n {
610                rows.push(i);
611                cols.push(i + 1);
612                vals.push(-0.5);
613            }
614        }
615        CsrMatrix::new(vals, rows, cols, (n, n)).expect("csr")
616    }
617
618    #[allow(clippy::type_complexity)]
619    fn cg_solve(
620        csr: &CsrMatrix<f64>,
621        b: &[f64],
622        precond: Option<&dyn Fn(&[f64]) -> Vec<f64>>,
623        max_iter: usize,
624    ) -> (Vec<f64>, usize) {
625        let n = b.len();
626        let mut x = vec![0.0f64; n];
627        let ax = csr_matvec(&csr.indptr, &csr.indices, &csr.data, &x, n);
628        let mut r: Vec<f64> = (0..n).map(|i| b[i] - ax[i]).collect();
629        let mut z = match precond {
630            Some(p) => p(&r),
631            None => r.clone(),
632        };
633        let mut p = z.clone();
634        let mut rz: f64 = r.iter().zip(z.iter()).map(|(&ri, &zi)| ri * zi).sum();
635
636        let tol = 1e-10;
637        let b_norm: f64 = b.iter().map(|v| v * v).sum::<f64>().sqrt();
638        let tol_abs = tol * b_norm.max(1e-15);
639
640        for iter in 0..max_iter {
641            let ap = csr_matvec(&csr.indptr, &csr.indices, &csr.data, &p, n);
642            let pap: f64 = p.iter().zip(ap.iter()).map(|(&pi, &ai)| pi * ai).sum();
643            if pap.abs() < 1e-300 {
644                return (x, iter);
645            }
646            let alpha = rz / pap;
647            for i in 0..n {
648                x[i] += alpha * p[i];
649                r[i] -= alpha * ap[i];
650            }
651            let r_norm: f64 = r.iter().map(|v| v * v).sum::<f64>().sqrt();
652            if r_norm < tol_abs {
653                return (x, iter + 1);
654            }
655            z = match precond {
656                Some(pc) => pc(&r),
657                None => r.clone(),
658            };
659            let rz_new: f64 = r.iter().zip(z.iter()).map(|(&ri, &zi)| ri * zi).sum();
660            let beta = rz_new / rz.max(1e-300);
661            for i in 0..n {
662                p[i] = z[i] + beta * p[i];
663            }
664            rz = rz_new;
665        }
666        (x, max_iter)
667    }
668
669    #[test]
670    fn test_poly_precond_chebyshev_reduces_cg_iterations() {
671        let n = 20;
672        let csr = make_spd_csr(n);
673        let b: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
674
675        // CG without preconditioner
676        let (_, iters_no_precond) = cg_solve(&csr, &b, None, 200);
677
678        // CG with Chebyshev preconditioner
679        let config = PolyPrecondConfig {
680            degree: 5,
681            lanczos_steps: 15,
682            eigenvalue_margin: 1.2,
683        };
684        let precond = ChebyshevPreconditioner::from_csr(&csr, &config).expect("cheby");
685        let precond_fn =
686            |r: &[f64]| -> Vec<f64> { precond.apply(r).unwrap_or_else(|_| r.to_vec()) };
687        let (_, iters_precond) = cg_solve(&csr, &b, Some(&precond_fn), 200);
688
689        // Preconditioned CG should converge faster (or at least not slower)
690        assert!(
691            iters_precond <= iters_no_precond + 2,
692            "Chebyshev precond: {} iters vs {} without (should be fewer)",
693            iters_precond,
694            iters_no_precond
695        );
696    }
697
698    #[test]
699    fn test_poly_precond_chebyshev_recurrence_stability() {
700        let n = 10;
701        let csr = make_spd_csr(n);
702        let r: Vec<f64> = vec![1.0; n];
703
704        for degree in [5, 10, 15, 20] {
705            let precond =
706                ChebyshevPreconditioner::with_bounds(&csr, degree, 1.0, 3.0).expect("cheby");
707            let z = precond.apply(&r).expect("apply");
708            // Should be finite
709            assert!(
710                z.iter().all(|v| v.is_finite()),
711                "Chebyshev degree={} produced non-finite values",
712                degree
713            );
714            // Should be non-trivial
715            let norm: f64 = z.iter().map(|v| v * v).sum::<f64>().sqrt();
716            assert!(
717                norm > 0.0,
718                "Chebyshev degree={} produced zero output",
719                degree
720            );
721        }
722    }
723
724    #[test]
725    fn test_poly_precond_neumann() {
726        let n = 10;
727        let csr = make_spd_csr(n);
728        let r: Vec<f64> = vec![1.0; n];
729
730        let neumann = NeumannPreconditioner::from_csr(&csr, 3, None).expect("neumann");
731        let z = neumann.apply(&r).expect("apply");
732        assert!(z.iter().all(|v| v.is_finite()));
733        let norm: f64 = z.iter().map(|v| v * v).sum::<f64>().sqrt();
734        assert!(norm > 0.0);
735    }
736
737    #[test]
738    fn test_poly_precond_neumann_reduces_residual() {
739        let n = 10;
740        let csr = make_spd_csr(n);
741        let b: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
742
743        let neumann = NeumannPreconditioner::from_csr(&csr, 4, None).expect("neumann");
744        let z = neumann.apply(&b).expect("apply");
745
746        // Az should be closer to b than b itself (i.e., z ~ A^{-1}b)
747        let az = csr_matvec(&csr.indptr, &csr.indices, &csr.data, &z, n);
748        let res_norm: f64 = (0..n).map(|i| (b[i] - az[i]).powi(2)).sum::<f64>().sqrt();
749        let b_norm: f64 = b.iter().map(|v| v * v).sum::<f64>().sqrt();
750
751        // The residual should be less than the original b norm
752        assert!(
753            res_norm < b_norm,
754            "Neumann precond residual {} >= b_norm {}",
755            res_norm,
756            b_norm
757        );
758    }
759
760    #[test]
761    fn test_poly_precond_lanczos_eigenvalue_bounds() {
762        let n = 10;
763        let csr = make_spd_csr(n);
764
765        let (lmin, lmax) =
766            lanczos_eigenvalue_bounds(&csr.indptr, &csr.indices, &csr.data, n, 15, 1.1)
767                .expect("lanczos");
768
769        // For tridiag(2, -0.5), eigenvalues are in [1, 3] approximately
770        assert!(lmin > 0.0, "lambda_min should be positive: {}", lmin);
771        assert!(lmax > lmin, "lambda_max ({}) > lambda_min ({})", lmax, lmin);
772        assert!(lmin < 2.0, "lambda_min ({}) should be < 2", lmin);
773        assert!(lmax > 1.5, "lambda_max ({}) should be > 1.5", lmax);
774    }
775
776    #[test]
777    fn test_poly_precond_chebyshev_with_explicit_bounds() {
778        let n = 8;
779        let csr = make_spd_csr(n);
780        let r = vec![1.0; n];
781
782        let precond = ChebyshevPreconditioner::with_bounds(&csr, 5, 1.0, 3.0).expect("cheby");
783        assert_eq!(precond.degree(), 5);
784        assert_eq!(precond.size(), n);
785        let (lmin, lmax) = precond.eigenvalue_bounds();
786        assert_relative_eq!(lmin, 1.0, epsilon = 1e-12);
787        assert_relative_eq!(lmax, 3.0, epsilon = 1e-12);
788
789        let z = precond.apply(&r).expect("apply");
790        assert_eq!(z.len(), n);
791    }
792
793    #[test]
794    fn test_poly_precond_neumann_explicit_alpha() {
795        let n = 5;
796        let csr = make_spd_csr(n);
797        let r = vec![1.0; n];
798
799        let neumann = NeumannPreconditioner::from_csr(&csr, 2, Some(0.3)).expect("neumann");
800        assert_relative_eq!(neumann.alpha(), 0.3, epsilon = 1e-12);
801        assert_eq!(neumann.degree(), 2);
802        assert_eq!(neumann.size(), n);
803
804        let z = neumann.apply(&r).expect("apply");
805        assert!(z.iter().all(|v| v.is_finite()));
806    }
807
808    #[test]
809    fn test_poly_precond_error_cases() {
810        let csr = make_spd_csr(5);
811        // Invalid bounds
812        assert!(ChebyshevPreconditioner::with_bounds(&csr, 5, 3.0, 1.0).is_err());
813        assert!(ChebyshevPreconditioner::with_bounds(&csr, 5, -1.0, 3.0).is_err());
814        assert!(ChebyshevPreconditioner::with_bounds(&csr, 5, 0.0, 3.0).is_err());
815
816        // Invalid alpha
817        assert!(NeumannPreconditioner::from_csr(&csr, 2, Some(-1.0)).is_err());
818        assert!(NeumannPreconditioner::from_csr(&csr, 2, Some(0.0)).is_err());
819
820        // Non-square matrix
821        let rect = CsrMatrix::new(vec![1.0, 2.0], vec![0, 0], vec![0, 1], (1, 3)).expect("rect");
822        assert!(ChebyshevPreconditioner::with_bounds(&rect, 5, 1.0, 3.0).is_err());
823        assert!(NeumannPreconditioner::from_csr(&rect, 2, None).is_err());
824    }
825}