1use crate::csr::CsrMatrix;
21use crate::error::{SparseError, SparseResult};
22
23#[derive(Debug, Clone)]
25pub struct PolyPrecondConfig {
26 pub degree: usize,
29 pub lanczos_steps: usize,
32 pub eigenvalue_margin: f64,
36}
37
38impl Default for PolyPrecondConfig {
39 fn default() -> Self {
40 Self {
41 degree: 10,
42 lanczos_steps: 20,
43 eigenvalue_margin: 1.1,
44 }
45 }
46}
47
48pub struct ChebyshevPreconditioner {
56 degree: usize,
58 lambda_min: f64,
60 lambda_max: f64,
62 indptr: Vec<usize>,
64 indices: Vec<usize>,
66 data: Vec<f64>,
68 n: usize,
70}
71
72impl ChebyshevPreconditioner {
73 pub fn from_csr(csr: &CsrMatrix<f64>, config: &PolyPrecondConfig) -> SparseResult<Self> {
82 let (nrows, ncols) = csr.shape();
83 if nrows != ncols {
84 return Err(SparseError::ValueError(
85 "ChebyshevPreconditioner requires a square matrix".to_string(),
86 ));
87 }
88 if nrows == 0 {
89 return Err(SparseError::ValueError(
90 "ChebyshevPreconditioner requires a non-empty matrix".to_string(),
91 ));
92 }
93
94 let n = nrows;
95
96 let (lambda_min, lambda_max) = lanczos_eigenvalue_bounds(
98 &csr.indptr,
99 &csr.indices,
100 &csr.data,
101 n,
102 config.lanczos_steps,
103 config.eigenvalue_margin,
104 )?;
105
106 if lambda_min <= 0.0 {
107 return Err(SparseError::ValueError(format!(
108 "Estimated lambda_min={:.6e} <= 0; matrix may not be SPD",
109 lambda_min
110 )));
111 }
112
113 Ok(Self {
114 degree: config.degree,
115 lambda_min,
116 lambda_max,
117 indptr: csr.indptr.clone(),
118 indices: csr.indices.clone(),
119 data: csr.data.clone(),
120 n,
121 })
122 }
123
124 pub fn with_bounds(
133 csr: &CsrMatrix<f64>,
134 degree: usize,
135 lambda_min: f64,
136 lambda_max: f64,
137 ) -> SparseResult<Self> {
138 let (nrows, ncols) = csr.shape();
139 if nrows != ncols {
140 return Err(SparseError::ValueError(
141 "ChebyshevPreconditioner requires a square matrix".to_string(),
142 ));
143 }
144 if lambda_min >= lambda_max {
145 return Err(SparseError::ValueError(format!(
146 "lambda_min ({}) must be < lambda_max ({})",
147 lambda_min, lambda_max
148 )));
149 }
150 if lambda_min <= 0.0 {
151 return Err(SparseError::ValueError(format!(
152 "lambda_min ({}) must be > 0 for SPD systems",
153 lambda_min
154 )));
155 }
156
157 Ok(Self {
158 degree,
159 lambda_min,
160 lambda_max,
161 indptr: csr.indptr.clone(),
162 indices: csr.indices.clone(),
163 data: csr.data.clone(),
164 n: nrows,
165 })
166 }
167
168 pub fn apply(&self, r: &[f64]) -> SparseResult<Vec<f64>> {
173 if r.len() != self.n {
174 return Err(SparseError::DimensionMismatch {
175 expected: self.n,
176 found: r.len(),
177 });
178 }
179
180 let n = self.n;
181 let theta = 0.5 * (self.lambda_max + self.lambda_min); let delta = 0.5 * (self.lambda_max - self.lambda_min); let sigma = theta / delta.max(1e-300);
184
185 if self.degree == 0 {
187 return Ok(r.iter().map(|&v| v / theta).collect());
188 }
189
190 let mut z: Vec<f64> = r.iter().map(|&v| v / theta).collect();
193
194 if self.degree == 1 {
195 return Ok(z);
196 }
197
198 let mut z_prev = vec![0.0f64; n];
199 let mut rho = 1.0 / sigma;
200
201 for _ in 1..self.degree {
202 let az = csr_matvec(&self.indptr, &self.indices, &self.data, &z, n);
204 let res: Vec<f64> = (0..n).map(|i| r[i] - az[i]).collect();
205
206 let rho_new = 1.0 / (2.0 * sigma - rho);
207 let coeff_r = 2.0 * rho_new / delta;
208 let coeff_y = rho_new * rho;
209
210 let z_next: Vec<f64> = (0..n)
211 .map(|i| z[i] + coeff_r * res[i] + coeff_y * (z[i] - z_prev[i]))
212 .collect();
213
214 z_prev = z;
215 z = z_next;
216 rho = rho_new;
217 }
218
219 Ok(z)
220 }
221
222 pub fn eigenvalue_bounds(&self) -> (f64, f64) {
224 (self.lambda_min, self.lambda_max)
225 }
226
227 pub fn degree(&self) -> usize {
229 self.degree
230 }
231
232 pub fn size(&self) -> usize {
234 self.n
235 }
236}
237
238pub struct NeumannPreconditioner {
246 degree: usize,
248 alpha: f64,
250 indptr: Vec<usize>,
252 indices: Vec<usize>,
254 data: Vec<f64>,
256 n: usize,
258}
259
260impl NeumannPreconditioner {
261 pub fn from_csr(csr: &CsrMatrix<f64>, degree: usize, alpha: Option<f64>) -> SparseResult<Self> {
269 let (nrows, ncols) = csr.shape();
270 if nrows != ncols {
271 return Err(SparseError::ValueError(
272 "NeumannPreconditioner requires a square matrix".to_string(),
273 ));
274 }
275 if nrows == 0 {
276 return Err(SparseError::ValueError(
277 "NeumannPreconditioner requires a non-empty matrix".to_string(),
278 ));
279 }
280
281 let n = nrows;
282
283 let alpha = match alpha {
284 Some(a) => {
285 if a <= 0.0 {
286 return Err(SparseError::ValueError(
287 "alpha must be positive".to_string(),
288 ));
289 }
290 a
291 }
292 None => {
293 let inf_norm: f64 = (0..n)
295 .map(|i| {
296 csr.data[csr.indptr[i]..csr.indptr[i + 1]]
297 .iter()
298 .map(|v| v.abs())
299 .sum::<f64>()
300 })
301 .fold(0.0f64, f64::max);
302 if inf_norm > 1e-300 {
303 1.0 / inf_norm
304 } else {
305 1.0
306 }
307 }
308 };
309
310 Ok(Self {
311 degree,
312 alpha,
313 indptr: csr.indptr.clone(),
314 indices: csr.indices.clone(),
315 data: csr.data.clone(),
316 n,
317 })
318 }
319
320 pub fn apply(&self, r: &[f64]) -> SparseResult<Vec<f64>> {
325 if r.len() != self.n {
326 return Err(SparseError::DimensionMismatch {
327 expected: self.n,
328 found: r.len(),
329 });
330 }
331
332 let n = self.n;
333 let alpha = self.alpha;
334
335 let b_apply = |v: &[f64]| -> Vec<f64> {
337 let av = csr_matvec(&self.indptr, &self.indices, &self.data, v, n);
338 let mut bv = v.to_vec();
339 for (bi, ai) in bv.iter_mut().zip(av.iter()) {
340 *bi -= alpha * ai;
341 }
342 bv
343 };
344
345 let mut sum = r.to_vec();
347 for _ in 0..self.degree {
348 let b_sum = b_apply(&sum);
349 for (si, (&ri, &bi)) in sum.iter_mut().zip(r.iter().zip(b_sum.iter())) {
350 *si = ri + bi;
351 }
352 }
353
354 for v in sum.iter_mut() {
356 *v *= alpha;
357 }
358
359 Ok(sum)
360 }
361
362 pub fn alpha(&self) -> f64 {
364 self.alpha
365 }
366
367 pub fn degree(&self) -> usize {
369 self.degree
370 }
371
372 pub fn size(&self) -> usize {
374 self.n
375 }
376}
377
378fn csr_matvec(indptr: &[usize], indices: &[usize], data: &[f64], x: &[f64], n: usize) -> Vec<f64> {
384 let mut y = vec![0.0f64; n];
385 for i in 0..n {
386 let mut acc = 0.0f64;
387 for pos in indptr[i]..indptr[i + 1] {
388 acc += data[pos] * x[indices[pos]];
389 }
390 y[i] = acc;
391 }
392 y
393}
394
395fn lanczos_eigenvalue_bounds(
399 indptr: &[usize],
400 indices: &[usize],
401 data: &[f64],
402 n: usize,
403 num_steps: usize,
404 margin: f64,
405) -> SparseResult<(f64, f64)> {
406 if n == 0 {
407 return Err(SparseError::ValueError(
408 "Cannot estimate eigenvalues of empty matrix".to_string(),
409 ));
410 }
411
412 let steps = num_steps.min(n);
413
414 let inv_sqrt_n = 1.0 / (n as f64).sqrt();
416 let mut q = vec![inv_sqrt_n; n];
417
418 let mut alpha_vec: Vec<f64> = Vec::with_capacity(steps);
420 let mut beta_vec: Vec<f64> = Vec::with_capacity(steps);
421
422 let mut q_prev = vec![0.0f64; n];
423
424 for j in 0..steps {
425 let mut w = csr_matvec(indptr, indices, data, &q, n);
427
428 let alpha_j: f64 = q.iter().zip(w.iter()).map(|(&qi, &wi)| qi * wi).sum();
430 alpha_vec.push(alpha_j);
431
432 let beta_prev = if j > 0 { beta_vec[j - 1] } else { 0.0 };
434 for i in 0..n {
435 w[i] -= alpha_j * q[i] + beta_prev * q_prev[i];
436 }
437
438 let beta_j: f64 = w.iter().map(|&v| v * v).sum::<f64>().sqrt();
446
447 if beta_j < 1e-14 {
448 break;
450 }
451 beta_vec.push(beta_j);
452
453 q_prev = q;
455 q = w.iter().map(|&v| v / beta_j).collect();
456 }
457
458 let k = alpha_vec.len();
460 if k == 0 {
461 return Err(SparseError::ValueError(
462 "Lanczos produced no iterations".to_string(),
463 ));
464 }
465
466 let eigenvalues = tridiagonal_eigenvalues(&alpha_vec, &beta_vec)?;
467
468 let mut lambda_min = f64::MAX;
469 let mut lambda_max = f64::MIN;
470 for &ev in &eigenvalues {
471 if ev < lambda_min {
472 lambda_min = ev;
473 }
474 if ev > lambda_max {
475 lambda_max = ev;
476 }
477 }
478
479 let lambda_min_safe = lambda_min / margin;
481 let lambda_max_safe = lambda_max * margin;
482
483 let lambda_min_final = if lambda_min_safe > 1e-15 {
485 lambda_min_safe
486 } else {
487 lambda_min.abs().max(1e-10) / margin
488 };
489
490 Ok((lambda_min_final, lambda_max_safe))
491}
492
493fn tridiagonal_eigenvalues(alpha: &[f64], beta: &[f64]) -> SparseResult<Vec<f64>> {
498 bisection_tridiag_eigenvalues(alpha, beta)
499}
500
501fn bisection_tridiag_eigenvalues(alpha: &[f64], beta: &[f64]) -> SparseResult<Vec<f64>> {
505 let n = alpha.len();
506 if n == 0 {
507 return Ok(Vec::new());
508 }
509 if n == 1 {
510 return Ok(vec![alpha[0]]);
511 }
512
513 let mut lower = f64::MAX;
515 let mut upper = f64::MIN;
516 for i in 0..n {
517 let off_diag = if i > 0 && i - 1 < beta.len() {
518 beta[i - 1].abs()
519 } else {
520 0.0
521 } + if i < beta.len() { beta[i].abs() } else { 0.0 };
522
523 let lo = alpha[i] - off_diag;
524 let hi = alpha[i] + off_diag;
525 if lo < lower {
526 lower = lo;
527 }
528 if hi > upper {
529 upper = hi;
530 }
531 }
532
533 let range = (upper - lower).max(1e-10);
535 lower -= 0.01 * range;
536 upper += 0.01 * range;
537
538 let count_less_eq = |x: f64| -> usize {
540 let mut count = 0usize;
541 let mut d = alpha[0] - x;
542 if d <= 0.0 {
543 count += 1;
544 }
545 for i in 1..n {
546 let b = if i - 1 < beta.len() { beta[i - 1] } else { 0.0 };
547 if d.abs() < 1e-300 {
548 d = 1e-300; }
550 d = alpha[i] - x - b * b / d;
551 if d <= 0.0 {
552 count += 1;
553 }
554 }
555 count
556 };
557
558 let mut eigenvalues = Vec::with_capacity(n);
560 let tol = 1e-12 * range;
561
562 for k in 0..n {
563 let mut lo = lower;
565 let mut hi = upper;
566
567 for _ in 0..200 {
568 let mid = 0.5 * (lo + hi);
569 if (hi - lo) < tol {
570 eigenvalues.push(mid);
571 break;
572 }
573 let count = count_less_eq(mid);
574 if count > k {
575 hi = mid;
576 } else {
577 lo = mid;
578 }
579 }
580 if eigenvalues.len() <= k {
581 eigenvalues.push(0.5 * (lo + hi));
583 }
584 }
585
586 eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
587 Ok(eigenvalues)
588}
589
590#[cfg(test)]
591mod tests {
592 use super::*;
593 use approx::assert_relative_eq;
594
595 fn make_spd_csr(n: usize) -> CsrMatrix<f64> {
596 let mut rows = Vec::new();
598 let mut cols = Vec::new();
599 let mut vals = Vec::new();
600 for i in 0..n {
601 rows.push(i);
602 cols.push(i);
603 vals.push(2.0);
604 if i > 0 {
605 rows.push(i);
606 cols.push(i - 1);
607 vals.push(-0.5);
608 }
609 if i + 1 < n {
610 rows.push(i);
611 cols.push(i + 1);
612 vals.push(-0.5);
613 }
614 }
615 CsrMatrix::new(vals, rows, cols, (n, n)).expect("csr")
616 }
617
618 #[allow(clippy::type_complexity)]
619 fn cg_solve(
620 csr: &CsrMatrix<f64>,
621 b: &[f64],
622 precond: Option<&dyn Fn(&[f64]) -> Vec<f64>>,
623 max_iter: usize,
624 ) -> (Vec<f64>, usize) {
625 let n = b.len();
626 let mut x = vec![0.0f64; n];
627 let ax = csr_matvec(&csr.indptr, &csr.indices, &csr.data, &x, n);
628 let mut r: Vec<f64> = (0..n).map(|i| b[i] - ax[i]).collect();
629 let mut z = match precond {
630 Some(p) => p(&r),
631 None => r.clone(),
632 };
633 let mut p = z.clone();
634 let mut rz: f64 = r.iter().zip(z.iter()).map(|(&ri, &zi)| ri * zi).sum();
635
636 let tol = 1e-10;
637 let b_norm: f64 = b.iter().map(|v| v * v).sum::<f64>().sqrt();
638 let tol_abs = tol * b_norm.max(1e-15);
639
640 for iter in 0..max_iter {
641 let ap = csr_matvec(&csr.indptr, &csr.indices, &csr.data, &p, n);
642 let pap: f64 = p.iter().zip(ap.iter()).map(|(&pi, &ai)| pi * ai).sum();
643 if pap.abs() < 1e-300 {
644 return (x, iter);
645 }
646 let alpha = rz / pap;
647 for i in 0..n {
648 x[i] += alpha * p[i];
649 r[i] -= alpha * ap[i];
650 }
651 let r_norm: f64 = r.iter().map(|v| v * v).sum::<f64>().sqrt();
652 if r_norm < tol_abs {
653 return (x, iter + 1);
654 }
655 z = match precond {
656 Some(pc) => pc(&r),
657 None => r.clone(),
658 };
659 let rz_new: f64 = r.iter().zip(z.iter()).map(|(&ri, &zi)| ri * zi).sum();
660 let beta = rz_new / rz.max(1e-300);
661 for i in 0..n {
662 p[i] = z[i] + beta * p[i];
663 }
664 rz = rz_new;
665 }
666 (x, max_iter)
667 }
668
669 #[test]
670 fn test_poly_precond_chebyshev_reduces_cg_iterations() {
671 let n = 20;
672 let csr = make_spd_csr(n);
673 let b: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
674
675 let (_, iters_no_precond) = cg_solve(&csr, &b, None, 200);
677
678 let config = PolyPrecondConfig {
680 degree: 5,
681 lanczos_steps: 15,
682 eigenvalue_margin: 1.2,
683 };
684 let precond = ChebyshevPreconditioner::from_csr(&csr, &config).expect("cheby");
685 let precond_fn =
686 |r: &[f64]| -> Vec<f64> { precond.apply(r).unwrap_or_else(|_| r.to_vec()) };
687 let (_, iters_precond) = cg_solve(&csr, &b, Some(&precond_fn), 200);
688
689 assert!(
691 iters_precond <= iters_no_precond + 2,
692 "Chebyshev precond: {} iters vs {} without (should be fewer)",
693 iters_precond,
694 iters_no_precond
695 );
696 }
697
698 #[test]
699 fn test_poly_precond_chebyshev_recurrence_stability() {
700 let n = 10;
701 let csr = make_spd_csr(n);
702 let r: Vec<f64> = vec![1.0; n];
703
704 for degree in [5, 10, 15, 20] {
705 let precond =
706 ChebyshevPreconditioner::with_bounds(&csr, degree, 1.0, 3.0).expect("cheby");
707 let z = precond.apply(&r).expect("apply");
708 assert!(
710 z.iter().all(|v| v.is_finite()),
711 "Chebyshev degree={} produced non-finite values",
712 degree
713 );
714 let norm: f64 = z.iter().map(|v| v * v).sum::<f64>().sqrt();
716 assert!(
717 norm > 0.0,
718 "Chebyshev degree={} produced zero output",
719 degree
720 );
721 }
722 }
723
724 #[test]
725 fn test_poly_precond_neumann() {
726 let n = 10;
727 let csr = make_spd_csr(n);
728 let r: Vec<f64> = vec![1.0; n];
729
730 let neumann = NeumannPreconditioner::from_csr(&csr, 3, None).expect("neumann");
731 let z = neumann.apply(&r).expect("apply");
732 assert!(z.iter().all(|v| v.is_finite()));
733 let norm: f64 = z.iter().map(|v| v * v).sum::<f64>().sqrt();
734 assert!(norm > 0.0);
735 }
736
737 #[test]
738 fn test_poly_precond_neumann_reduces_residual() {
739 let n = 10;
740 let csr = make_spd_csr(n);
741 let b: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
742
743 let neumann = NeumannPreconditioner::from_csr(&csr, 4, None).expect("neumann");
744 let z = neumann.apply(&b).expect("apply");
745
746 let az = csr_matvec(&csr.indptr, &csr.indices, &csr.data, &z, n);
748 let res_norm: f64 = (0..n).map(|i| (b[i] - az[i]).powi(2)).sum::<f64>().sqrt();
749 let b_norm: f64 = b.iter().map(|v| v * v).sum::<f64>().sqrt();
750
751 assert!(
753 res_norm < b_norm,
754 "Neumann precond residual {} >= b_norm {}",
755 res_norm,
756 b_norm
757 );
758 }
759
760 #[test]
761 fn test_poly_precond_lanczos_eigenvalue_bounds() {
762 let n = 10;
763 let csr = make_spd_csr(n);
764
765 let (lmin, lmax) =
766 lanczos_eigenvalue_bounds(&csr.indptr, &csr.indices, &csr.data, n, 15, 1.1)
767 .expect("lanczos");
768
769 assert!(lmin > 0.0, "lambda_min should be positive: {}", lmin);
771 assert!(lmax > lmin, "lambda_max ({}) > lambda_min ({})", lmax, lmin);
772 assert!(lmin < 2.0, "lambda_min ({}) should be < 2", lmin);
773 assert!(lmax > 1.5, "lambda_max ({}) should be > 1.5", lmax);
774 }
775
776 #[test]
777 fn test_poly_precond_chebyshev_with_explicit_bounds() {
778 let n = 8;
779 let csr = make_spd_csr(n);
780 let r = vec![1.0; n];
781
782 let precond = ChebyshevPreconditioner::with_bounds(&csr, 5, 1.0, 3.0).expect("cheby");
783 assert_eq!(precond.degree(), 5);
784 assert_eq!(precond.size(), n);
785 let (lmin, lmax) = precond.eigenvalue_bounds();
786 assert_relative_eq!(lmin, 1.0, epsilon = 1e-12);
787 assert_relative_eq!(lmax, 3.0, epsilon = 1e-12);
788
789 let z = precond.apply(&r).expect("apply");
790 assert_eq!(z.len(), n);
791 }
792
793 #[test]
794 fn test_poly_precond_neumann_explicit_alpha() {
795 let n = 5;
796 let csr = make_spd_csr(n);
797 let r = vec![1.0; n];
798
799 let neumann = NeumannPreconditioner::from_csr(&csr, 2, Some(0.3)).expect("neumann");
800 assert_relative_eq!(neumann.alpha(), 0.3, epsilon = 1e-12);
801 assert_eq!(neumann.degree(), 2);
802 assert_eq!(neumann.size(), n);
803
804 let z = neumann.apply(&r).expect("apply");
805 assert!(z.iter().all(|v| v.is_finite()));
806 }
807
808 #[test]
809 fn test_poly_precond_error_cases() {
810 let csr = make_spd_csr(5);
811 assert!(ChebyshevPreconditioner::with_bounds(&csr, 5, 3.0, 1.0).is_err());
813 assert!(ChebyshevPreconditioner::with_bounds(&csr, 5, -1.0, 3.0).is_err());
814 assert!(ChebyshevPreconditioner::with_bounds(&csr, 5, 0.0, 3.0).is_err());
815
816 assert!(NeumannPreconditioner::from_csr(&csr, 2, Some(-1.0)).is_err());
818 assert!(NeumannPreconditioner::from_csr(&csr, 2, Some(0.0)).is_err());
819
820 let rect = CsrMatrix::new(vec![1.0, 2.0], vec![0, 0], vec![0, 1], (1, 3)).expect("rect");
822 assert!(ChebyshevPreconditioner::with_bounds(&rect, 5, 1.0, 3.0).is_err());
823 assert!(NeumannPreconditioner::from_csr(&rect, 2, None).is_err());
824 }
825}