1#![allow(unused_variables)]
2#![allow(unused_assignments)]
3#![allow(unused_mut)]
4
5use crate::error::{SparseError, SparseResult};
6use crate::linalg::interface::LinearOperator;
7use scirs2_core::numeric::{Float, NumAssign, SparseElement};
8use std::iter::Sum;
9
10#[derive(Debug, Clone)]
12pub struct IterationResult<F> {
13 pub x: Vec<F>,
14 pub iterations: usize,
15 pub residual_norm: F,
16 pub converged: bool,
17 pub message: String,
18}
19
20pub struct CGOptions<F> {
22 pub max_iter: usize,
23 pub rtol: F,
24 pub atol: F,
25 pub x0: Option<Vec<F>>,
26 pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
27}
28
29impl<F: Float> Default for CGOptions<F> {
30 fn default() -> Self {
31 Self {
32 max_iter: 1000,
33 rtol: F::from(1e-8).expect("Failed to convert constant to float"),
34 atol: F::from(1e-12).expect("Failed to convert constant to float"),
35 x0: None,
36 preconditioner: None,
37 }
38 }
39}
40
41#[allow(dead_code)]
45pub fn cg<F>(
46 a: &dyn LinearOperator<F>,
47 b: &[F],
48 options: CGOptions<F>,
49) -> SparseResult<IterationResult<F>>
50where
51 F: Float + NumAssign + Sum + SparseElement + 'static,
52{
53 let (rows, cols) = a.shape();
54 if rows != cols {
55 return Err(SparseError::ValueError(
56 "Matrix must be square for CG solver".to_string(),
57 ));
58 }
59 if b.len() != rows {
60 return Err(SparseError::DimensionMismatch {
61 expected: rows,
62 found: b.len(),
63 });
64 }
65
66 let n = rows;
67
68 let mut x: Vec<F> = match &options.x0 {
70 Some(x0) => {
71 if x0.len() != n {
72 return Err(SparseError::DimensionMismatch {
73 expected: n,
74 found: x0.len(),
75 });
76 }
77 x0.clone()
78 }
79 None => vec![F::sparse_zero(); n],
80 };
81
82 let ax = a.matvec(&x)?;
84 let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
85
86 let mut z = if let Some(m) = &options.preconditioner {
88 m.matvec(&r)?
89 } else {
90 r.clone()
91 };
92
93 let mut p = z.clone();
94 let mut rz_old = dot(&r, &z);
95
96 let bnorm = norm2(b);
98 let tolerance = F::max(options.atol, options.rtol * bnorm);
99
100 let mut iterations = 0;
101 while iterations < options.max_iter {
102 let ap = a.matvec(&p)?;
104
105 let pap = dot(&p, &ap);
107 if pap <= F::sparse_zero() {
108 return Ok(IterationResult {
109 x,
110 iterations,
111 residual_norm: norm2(&r),
112 converged: false,
113 message: "Matrix not positive definite (p^T*A*p <= 0)".to_string(),
114 });
115 }
116 let alpha = rz_old / pap;
117
118 for i in 0..n {
120 x[i] += alpha * p[i];
121 }
122
123 for i in 0..n {
125 r[i] -= alpha * ap[i];
126 }
127
128 let rnorm = norm2(&r);
130 if rnorm <= tolerance {
131 return Ok(IterationResult {
132 x,
133 iterations: iterations + 1,
134 residual_norm: rnorm,
135 converged: true,
136 message: "Converged".to_string(),
137 });
138 }
139
140 z = if let Some(m) = &options.preconditioner {
142 m.matvec(&r)?
143 } else {
144 r.clone()
145 };
146
147 let rz_new = dot(&r, &z);
149 let beta = rz_new / rz_old;
150
151 for i in 0..n {
153 p[i] = z[i] + beta * p[i];
154 }
155
156 rz_old = rz_new;
157 iterations += 1;
158 }
159
160 Ok(IterationResult {
161 x,
162 iterations,
163 residual_norm: norm2(&r),
164 converged: false,
165 message: "Maximum iterations reached".to_string(),
166 })
167}
168
169pub struct BiCGOptions<F> {
171 pub max_iter: usize,
172 pub rtol: F,
173 pub atol: F,
174 pub x0: Option<Vec<F>>,
175 pub left_preconditioner: Option<Box<dyn LinearOperator<F>>>,
176 pub right_preconditioner: Option<Box<dyn LinearOperator<F>>>,
177}
178
179impl<F: Float> Default for BiCGOptions<F> {
180 fn default() -> Self {
181 Self {
182 max_iter: 1000,
183 rtol: F::from(1e-8).expect("Failed to convert constant to float"),
184 atol: F::from(1e-12).expect("Failed to convert constant to float"),
185 x0: None,
186 left_preconditioner: None,
187 right_preconditioner: None,
188 }
189 }
190}
191
192#[allow(dead_code)]
196pub fn bicg<F>(
197 a: &dyn LinearOperator<F>,
198 b: &[F],
199 options: BiCGOptions<F>,
200) -> SparseResult<IterationResult<F>>
201where
202 F: Float + NumAssign + Sum + SparseElement + 'static,
203{
204 let (rows, cols) = a.shape();
205 if rows != cols {
206 return Err(SparseError::ValueError(
207 "Matrix must be square for BiCG solver".to_string(),
208 ));
209 }
210 if b.len() != rows {
211 return Err(SparseError::DimensionMismatch {
212 expected: rows,
213 found: b.len(),
214 });
215 }
216
217 let n = rows;
218
219 let mut x: Vec<F> = match &options.x0 {
221 Some(x0) => {
222 if x0.len() != n {
223 return Err(SparseError::DimensionMismatch {
224 expected: n,
225 found: x0.len(),
226 });
227 }
228 x0.clone()
229 }
230 None => vec![F::sparse_zero(); n],
231 };
232
233 let ax = a.matvec(&x)?;
235 let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
236
237 let mut r_star = r.clone();
239
240 let mut z = if let Some(m1) = &options.left_preconditioner {
242 m1.matvec(&r)?
243 } else {
244 r.clone()
245 };
246
247 let mut z_star = if let Some(m2) = &options.right_preconditioner {
248 m2.matvec(&r_star)?
249 } else {
250 r_star.clone()
251 };
252
253 let mut p = z.clone();
254 let mut p_star = z_star.clone();
255
256 let mut rho_old = dot(&r_star, &z);
257
258 let bnorm = norm2(b);
259 let tolerance = F::max(options.atol, options.rtol * bnorm);
260
261 let mut iterations = 0;
262 while iterations < options.max_iter {
263 let mut q = a.matvec(&p)?;
265 if let Some(m2) = &options.right_preconditioner {
266 q = m2.matvec(&q)?;
267 }
268
269 let mut q_star = a.rmatvec(&p_star)?;
270 if let Some(m1) = &options.left_preconditioner {
271 q_star = m1.matvec(&q_star)?;
272 }
273
274 let alpha_den = dot(&p_star, &q);
276 if alpha_den.abs()
277 < F::epsilon() * F::from(10).expect("Failed to convert constant to float")
278 {
279 return Ok(IterationResult {
280 x,
281 iterations,
282 residual_norm: norm2(&r),
283 converged: false,
284 message: "BiCG breakdown: (p_star, q) ≈ 0".to_string(),
285 });
286 }
287 let alpha = rho_old / alpha_den;
288
289 for i in 0..n {
291 x[i] += alpha * p[i];
292 r[i] -= alpha * q[i];
293 r_star[i] -= alpha * q_star[i];
294 }
295
296 let rnorm = norm2(&r);
298 if rnorm <= tolerance {
299 return Ok(IterationResult {
300 x,
301 iterations: iterations + 1,
302 residual_norm: rnorm,
303 converged: true,
304 message: "Converged".to_string(),
305 });
306 }
307
308 z = if let Some(m1) = &options.left_preconditioner {
310 m1.matvec(&r)?
311 } else {
312 r.clone()
313 };
314
315 z_star = if let Some(m2) = &options.right_preconditioner {
316 m2.matvec(&r_star)?
317 } else {
318 r_star.clone()
319 };
320
321 let rho = dot(&r_star, &z);
323 if rho.abs() < F::epsilon() * F::from(10).expect("Failed to convert constant to float") {
324 return Ok(IterationResult {
325 x,
326 iterations: iterations + 1,
327 residual_norm: rnorm,
328 converged: false,
329 message: "BiCG breakdown: rho ≈ 0".to_string(),
330 });
331 }
332
333 let beta = rho / rho_old;
335
336 for i in 0..n {
338 p[i] = z[i] + beta * p[i];
339 p_star[i] = z_star[i] + beta * p_star[i];
340 }
341
342 rho_old = rho;
343 iterations += 1;
344 }
345
346 Ok(IterationResult {
347 x,
348 iterations,
349 residual_norm: norm2(&r),
350 converged: false,
351 message: "Maximum iterations reached".to_string(),
352 })
353}
354
355pub struct BiCGSTABOptions<F> {
357 pub max_iter: usize,
358 pub rtol: F,
359 pub atol: F,
360 pub x0: Option<Vec<F>>,
361 pub left_preconditioner: Option<Box<dyn LinearOperator<F>>>,
362 pub right_preconditioner: Option<Box<dyn LinearOperator<F>>>,
363}
364
365impl<F: Float> Default for BiCGSTABOptions<F> {
366 fn default() -> Self {
367 Self {
368 max_iter: 1000,
369 rtol: F::from(1e-8).expect("Failed to convert constant to float"),
370 atol: F::from(1e-12).expect("Failed to convert constant to float"),
371 x0: None,
372 left_preconditioner: None,
373 right_preconditioner: None,
374 }
375 }
376}
377
378pub type BiCGSTABResult<F> = IterationResult<F>;
380
381#[allow(dead_code)]
386pub fn bicgstab<F>(
387 a: &dyn LinearOperator<F>,
388 b: &[F],
389 options: BiCGSTABOptions<F>,
390) -> SparseResult<BiCGSTABResult<F>>
391where
392 F: Float + NumAssign + Sum + SparseElement + 'static,
393{
394 let (rows, cols) = a.shape();
395 if rows != cols {
396 return Err(SparseError::ValueError(
397 "Matrix must be square for BiCGSTAB solver".to_string(),
398 ));
399 }
400 if b.len() != rows {
401 return Err(SparseError::DimensionMismatch {
402 expected: rows,
403 found: b.len(),
404 });
405 }
406
407 let n = rows;
408
409 let mut x: Vec<F> = match &options.x0 {
411 Some(x0) => {
412 if x0.len() != n {
413 return Err(SparseError::DimensionMismatch {
414 expected: n,
415 found: x0.len(),
416 });
417 }
418 x0.clone()
419 }
420 None => vec![F::sparse_zero(); n],
421 };
422
423 let ax = a.matvec(&x)?;
425 let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
426
427 let mut rnorm = norm2(&r);
429 let bnorm = norm2(b);
430 let tolerance = F::max(options.atol, options.rtol * bnorm);
431
432 if rnorm <= tolerance {
433 return Ok(BiCGSTABResult {
434 x,
435 iterations: 0,
436 residual_norm: rnorm,
437 converged: true,
438 message: "Converged with initial guess".to_string(),
439 });
440 }
441
442 let r_hat = r.clone();
444
445 let mut v = vec![F::sparse_zero(); n];
446 let mut p = vec![F::sparse_zero(); n];
447 let mut y = vec![F::sparse_zero(); n];
448 let mut s = vec![F::sparse_zero(); n];
449 let mut t: Vec<F>;
450
451 let mut rho_old = F::sparse_one();
452 let mut alpha = F::sparse_zero();
453 let mut omega = F::sparse_one();
454
455 let mut iterations = 0;
457 while iterations < options.max_iter {
458 let rho = dot(&r_hat, &r);
460
461 if rho.abs() < F::epsilon() * F::from(10).expect("Failed to convert constant to float") {
463 return Ok(BiCGSTABResult {
464 x,
465 iterations,
466 residual_norm: rnorm,
467 converged: false,
468 message: "BiCGSTAB breakdown: rho ≈ 0".to_string(),
469 });
470 }
471
472 let beta = (rho / rho_old) * (alpha / omega);
474
475 for i in 0..n {
477 p[i] = r[i] + beta * (p[i] - omega * v[i]);
478 }
479
480 let p_tilde = match &options.left_preconditioner {
482 Some(m1) => m1.matvec(&p)?,
483 None => p.clone(),
484 };
485
486 v = a.matvec(&p_tilde)?;
488
489 if let Some(m2) = &options.right_preconditioner {
491 v = m2.matvec(&v)?;
492 }
493
494 let den = dot(&r_hat, &v);
496 if den.abs() < F::epsilon() * F::from(10).expect("Failed to convert constant to float") {
497 return Ok(BiCGSTABResult {
498 x,
499 iterations,
500 residual_norm: rnorm,
501 converged: false,
502 message: "BiCGSTAB breakdown: (r_hat, v) ≈ 0".to_string(),
503 });
504 }
505 alpha = rho / den;
506
507 if !alpha.is_finite() {
509 return Ok(BiCGSTABResult {
510 x,
511 iterations,
512 residual_norm: rnorm,
513 converged: false,
514 message: "BiCGSTAB breakdown: alpha is not finite".to_string(),
515 });
516 }
517
518 for i in 0..n {
520 y[i] = x[i] + alpha * p_tilde[i];
521 s[i] = r[i] - alpha * v[i];
522 }
523
524 let snorm = norm2(&s);
526 if snorm <= tolerance {
527 x = y;
529
530 if let Some(m2) = &options.right_preconditioner {
532 x = m2.matvec(&x)?;
533 }
534
535 return Ok(BiCGSTABResult {
536 x,
537 iterations: iterations + 1,
538 residual_norm: snorm,
539 converged: true,
540 message: "Converged".to_string(),
541 });
542 }
543
544 let s_tilde = match &options.left_preconditioner {
546 Some(m1) => m1.matvec(&s)?,
547 None => s.clone(),
548 };
549
550 t = a.matvec(&s_tilde)?;
552
553 if let Some(m2) = &options.right_preconditioner {
555 t = m2.matvec(&t)?;
556 }
557
558 let ts = dot(&t, &s);
560 let tt = dot(&t, &t);
561
562 if tt < F::epsilon() * F::from(10).expect("Failed to convert constant to float") {
563 return Ok(BiCGSTABResult {
564 x,
565 iterations,
566 residual_norm: rnorm,
567 converged: false,
568 message: "BiCGSTAB breakdown: (t, t) ≈ 0".to_string(),
569 });
570 }
571
572 omega = ts / tt;
573
574 if !omega.is_finite()
576 || omega.abs()
577 < F::epsilon() * F::from(10).expect("Failed to convert constant to float")
578 {
579 return Ok(BiCGSTABResult {
580 x,
581 iterations,
582 residual_norm: rnorm,
583 converged: false,
584 message: "BiCGSTAB breakdown: omega is not finite or too small".to_string(),
585 });
586 }
587
588 for i in 0..n {
590 x[i] = y[i] + omega * s_tilde[i];
591 r[i] = s[i] - omega * t[i];
592 }
593
594 if let Some(m2) = &options.right_preconditioner {
596 x = m2.matvec(&x)?;
597 }
598
599 rnorm = norm2(&r);
600
601 if rnorm <= tolerance {
603 return Ok(BiCGSTABResult {
604 x,
605 iterations: iterations + 1,
606 residual_norm: rnorm,
607 converged: true,
608 message: "Converged".to_string(),
609 });
610 }
611
612 rho_old = rho;
613 iterations += 1;
614 }
615
616 Ok(BiCGSTABResult {
617 x,
618 iterations,
619 residual_norm: rnorm,
620 converged: false,
621 message: "Maximum iterations reached".to_string(),
622 })
623}
624
625pub struct GMRESOptions<F> {
627 pub max_iter: usize,
628 pub restart: usize,
629 pub rtol: F,
630 pub atol: F,
631 pub x0: Option<Vec<F>>,
632 pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
633}
634
635impl<F: Float> Default for GMRESOptions<F> {
636 fn default() -> Self {
637 Self {
638 max_iter: 1000,
639 restart: 30,
640 rtol: F::from(1e-8).expect("Failed to convert constant to float"),
641 atol: F::from(1e-12).expect("Failed to convert constant to float"),
642 x0: None,
643 preconditioner: None,
644 }
645 }
646}
647
648#[allow(dead_code)]
654pub fn gmres<F>(
655 a: &dyn LinearOperator<F>,
656 b: &[F],
657 options: GMRESOptions<F>,
658) -> SparseResult<IterationResult<F>>
659where
660 F: Float + NumAssign + Sum + SparseElement + 'static,
661{
662 let (rows, cols) = a.shape();
663 if rows != cols {
664 return Err(SparseError::ValueError(
665 "Matrix must be square for GMRES solver".to_string(),
666 ));
667 }
668 if b.len() != rows {
669 return Err(SparseError::DimensionMismatch {
670 expected: rows,
671 found: b.len(),
672 });
673 }
674
675 let n = rows;
676 let restart = options.restart.min(n);
677
678 let mut x: Vec<F> = match &options.x0 {
680 Some(x0) => {
681 if x0.len() != n {
682 return Err(SparseError::DimensionMismatch {
683 expected: n,
684 found: x0.len(),
685 });
686 }
687 x0.clone()
688 }
689 None => vec![F::sparse_zero(); n],
690 };
691
692 let ax = a.matvec(&x)?;
694 let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
695
696 if let Some(m) = &options.preconditioner {
698 r = m.matvec(&r)?;
699 }
700
701 let mut rnorm = norm2(&r);
702 let bnorm = norm2(b);
703 let tolerance = F::max(options.atol, options.rtol * bnorm);
704
705 let mut outer_iterations = 0;
706
707 while outer_iterations < options.max_iter && rnorm > tolerance {
709 let mut v = vec![vec![F::sparse_zero(); n]; restart + 1];
711 let mut h = vec![vec![F::sparse_zero(); restart]; restart + 1];
712 let mut cs = vec![F::sparse_zero(); restart]; let mut sn = vec![F::sparse_zero(); restart]; let mut s = vec![F::sparse_zero(); restart + 1]; v[0] = r.iter().map(|&ri| ri / rnorm).collect();
718 s[0] = rnorm;
719
720 let mut inner_iter = 0;
722 while inner_iter < restart && inner_iter + outer_iterations < options.max_iter {
723 let mut w = a.matvec(&v[inner_iter])?;
725
726 if let Some(m) = &options.preconditioner {
728 w = m.matvec(&w)?;
729 }
730
731 for i in 0..=inner_iter {
733 h[i][inner_iter] = dot(&v[i], &w);
734 for (k, w_elem) in w.iter_mut().enumerate().take(n) {
735 *w_elem -= h[i][inner_iter] * v[i][k];
736 }
737 }
738
739 h[inner_iter + 1][inner_iter] = norm2(&w);
740
741 if h[inner_iter + 1][inner_iter]
743 < F::epsilon() * F::from(10).expect("Failed to convert constant to float")
744 {
745 break;
746 }
747
748 v[inner_iter + 1] = w
750 .iter()
751 .map(|&wi| wi / h[inner_iter + 1][inner_iter])
752 .collect();
753
754 for i in 0..inner_iter {
756 let temp = cs[i] * h[i][inner_iter] + sn[i] * h[i + 1][inner_iter];
757 h[i + 1][inner_iter] = -sn[i] * h[i][inner_iter] + cs[i] * h[i + 1][inner_iter];
758 h[i][inner_iter] = temp;
759 }
760
761 let rho = (h[inner_iter][inner_iter] * h[inner_iter][inner_iter]
763 + h[inner_iter + 1][inner_iter] * h[inner_iter + 1][inner_iter])
764 .sqrt();
765 cs[inner_iter] = h[inner_iter][inner_iter] / rho;
766 sn[inner_iter] = h[inner_iter + 1][inner_iter] / rho;
767
768 h[inner_iter][inner_iter] = rho;
770 h[inner_iter + 1][inner_iter] = F::sparse_zero();
771
772 let temp = cs[inner_iter] * s[inner_iter] + sn[inner_iter] * s[inner_iter + 1];
773 s[inner_iter + 1] =
774 -sn[inner_iter] * s[inner_iter] + cs[inner_iter] * s[inner_iter + 1];
775 s[inner_iter] = temp;
776
777 inner_iter += 1;
778
779 let residual = s[inner_iter].abs();
781 if residual <= tolerance {
782 break;
783 }
784 }
785
786 let mut y = vec![F::sparse_zero(); inner_iter];
788 for i in (0..inner_iter).rev() {
789 y[i] = s[i];
790 for j in i + 1..inner_iter {
791 let y_j = y[j];
792 y[i] -= h[i][j] * y_j;
793 }
794 y[i] /= h[i][i];
795 }
796
797 for i in 0..inner_iter {
799 for (j, x_val) in x.iter_mut().enumerate().take(n) {
800 *x_val += y[i] * v[i][j];
801 }
802 }
803
804 let ax = a.matvec(&x)?;
806 r = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
807
808 if let Some(m) = &options.preconditioner {
810 r = m.matvec(&r)?;
811 }
812
813 rnorm = norm2(&r);
814 outer_iterations += inner_iter;
815
816 if rnorm <= tolerance {
817 break;
818 }
819 }
820
821 Ok(IterationResult {
822 x,
823 iterations: outer_iterations,
824 residual_norm: rnorm,
825 converged: rnorm <= tolerance,
826 message: if rnorm <= tolerance {
827 "Converged".to_string()
828 } else {
829 "Maximum iterations reached".to_string()
830 },
831 })
832}
833
834pub trait IterativeSolver<F: Float> {
836 fn solve(&self, a: &dyn LinearOperator<F>, b: &[F]) -> SparseResult<IterationResult<F>>;
838}
839
840pub(crate) fn dot<F: Float + Sum>(x: &[F], y: &[F]) -> F {
844 x.iter().zip(y).map(|(&xi, &yi)| xi * yi).sum()
845}
846
847pub(crate) fn norm2<F: Float + Sum>(x: &[F]) -> F {
849 dot(x, x).sqrt()
850}
851
852pub struct LSQROptions<F> {
854 #[allow(dead_code)]
855 pub max_iter: usize,
856 #[allow(dead_code)]
857 pub rtol: F,
858 #[allow(dead_code)]
859 pub atol: F,
860 #[allow(dead_code)]
861 pub btol: F,
862 #[allow(dead_code)]
863 pub x0: Option<Vec<F>>,
864}
865
866impl<F: Float> Default for LSQROptions<F> {
867 fn default() -> Self {
868 Self {
869 max_iter: 1000,
870 rtol: F::from(1e-8).expect("Failed to convert constant to float"),
871 atol: F::from(1e-12).expect("Failed to convert constant to float"),
872 btol: F::from(1e-8).expect("Failed to convert constant to float"),
873 x0: None,
874 }
875 }
876}
877
878#[allow(dead_code)]
880pub struct LSMROptions<F> {
881 pub max_iter: usize,
882 pub rtol: F,
883 pub atol: F,
884 pub btol: F,
885 pub x0: Option<Vec<F>>,
886 pub damp: F,
887}
888
889impl<F: Float + SparseElement> Default for LSMROptions<F> {
890 fn default() -> Self {
891 Self {
892 max_iter: 1000,
893 rtol: F::from(1e-8).expect("Failed to convert constant to float"),
894 atol: F::from(1e-12).expect("Failed to convert constant to float"),
895 btol: F::from(1e-8).expect("Failed to convert constant to float"),
896 x0: None,
897 damp: F::sparse_zero(),
898 }
899 }
900}
901
902#[allow(dead_code)]
904pub struct GCROTOptions<F> {
905 pub max_iter: usize,
906 pub restart: usize,
907 pub truncate: usize,
908 pub rtol: F,
909 pub atol: F,
910 pub x0: Option<Vec<F>>,
911 pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
912}
913
914impl<F: Float> Default for GCROTOptions<F> {
915 fn default() -> Self {
916 Self {
917 max_iter: 1000,
918 restart: 30,
919 truncate: 2,
920 rtol: F::from(1e-8).expect("Failed to convert constant to float"),
921 atol: F::from(1e-12).expect("Failed to convert constant to float"),
922 x0: None,
923 preconditioner: None,
924 }
925 }
926}
927
928#[allow(dead_code)]
930pub struct TFQMROptions<F> {
931 pub max_iter: usize,
932 pub rtol: F,
933 pub atol: F,
934 pub x0: Option<Vec<F>>,
935 pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
936}
937
938impl<F: Float> Default for TFQMROptions<F> {
939 fn default() -> Self {
940 Self {
941 max_iter: 1000,
942 rtol: F::from(1e-8).expect("Failed to convert constant to float"),
943 atol: F::from(1e-12).expect("Failed to convert constant to float"),
944 x0: None,
945 preconditioner: None,
946 }
947 }
948}
949
950fn sym_ortho<F: Float + SparseElement>(a: F, b: F) -> (F, F, F) {
955 use scirs2_core::numeric::One;
956
957 let zero = F::sparse_zero();
958 let one = <F as One>::one();
959
960 if b == zero {
961 return (if a >= zero { one } else { -one }, zero, a.abs());
962 } else if a == zero {
963 return (zero, if b >= zero { one } else { -one }, b.abs());
964 } else if b.abs() > a.abs() {
965 let tau = a / b;
966 let s_sign = if b >= zero { one } else { -one };
967 let s = s_sign / (one + tau * tau).sqrt();
968 let c = s * tau;
969 let r = b / s;
970 (c, s, r)
971 } else {
972 let tau = b / a;
973 let c_sign = if a >= zero { one } else { -one };
974 let c = c_sign / (one + tau * tau).sqrt();
975 let s = c * tau;
976 let r = a / c;
977 (c, s, r)
978 }
979}
980
981#[allow(dead_code)]
992pub fn lsqr<F>(
993 a: &dyn LinearOperator<F>,
994 b: &[F],
995 options: LSQROptions<F>,
996) -> SparseResult<IterationResult<F>>
997where
998 F: Float + NumAssign + Sum + SparseElement + 'static,
999{
1000 let (m, n) = a.shape();
1001 if b.len() != m {
1002 return Err(SparseError::DimensionMismatch {
1003 expected: m,
1004 found: b.len(),
1005 });
1006 }
1007
1008 let mut x: Vec<F> = match &options.x0 {
1010 Some(x0) => {
1011 if x0.len() != n {
1012 return Err(SparseError::DimensionMismatch {
1013 expected: n,
1014 found: x0.len(),
1015 });
1016 }
1017 x0.clone()
1018 }
1019 None => vec![F::sparse_zero(); n],
1020 };
1021
1022 let mut u: Vec<F> = b.to_vec();
1025 let bnorm = norm2(b);
1026
1027 if x.iter().any(|&xi| xi != F::sparse_zero()) {
1029 let ax = a.matvec(&x)?;
1030 for i in 0..u.len() {
1031 u[i] -= ax[i];
1032 }
1033 }
1034
1035 let mut beta = norm2(&u);
1036 if beta == F::sparse_zero() {
1037 return Ok(IterationResult {
1038 x,
1039 iterations: 0,
1040 residual_norm: F::sparse_zero(),
1041 converged: true,
1042 message: "Zero initial residual".to_string(),
1043 });
1044 }
1045
1046 for elem in &mut u {
1048 *elem /= beta;
1049 }
1050
1051 let mut v = a.rmatvec(&u)?;
1053 let mut alfa = norm2(&v);
1054
1055 if alfa > F::sparse_zero() {
1057 for elem in &mut v {
1058 *elem /= alfa;
1059 }
1060 }
1061
1062 let mut w = v.clone();
1063
1064 let mut rhobar = alfa;
1066 let mut phibar = beta;
1067
1068 let tolerance = F::max(options.atol, options.rtol * bnorm);
1069
1070 for iterations in 0..options.max_iter {
1071 let av = a.matvec(&v)?;
1078 for i in 0..u.len() {
1079 u[i] = av[i] - alfa * u[i];
1080 }
1081 beta = norm2(&u);
1082
1083 if beta > F::sparse_zero() {
1084 for elem in &mut u {
1086 *elem /= beta;
1087 }
1088
1089 let atu = a.rmatvec(&u)?;
1091 for i in 0..v.len() {
1092 v[i] = atu[i] - beta * v[i];
1093 }
1094 alfa = norm2(&v);
1095
1096 if alfa > F::sparse_zero() {
1097 for elem in &mut v {
1099 *elem /= alfa;
1100 }
1101 }
1102 }
1103
1104 let (cs, sn, rho) = sym_ortho(rhobar, beta);
1107
1108 let theta = sn * alfa;
1109 rhobar = -cs * alfa;
1110 let phi = cs * phibar;
1111 phibar = sn * phibar;
1112
1113 let t1 = phi / rho;
1115 let t2 = -theta / rho;
1116
1117 for i in 0..x.len() {
1118 x[i] += t1 * w[i];
1119 }
1120 for i in 0..w.len() {
1121 w[i] = v[i] + t2 * w[i];
1122 }
1123
1124 let residual_norm = phibar.abs();
1126 if residual_norm <= tolerance {
1127 return Ok(IterationResult {
1128 x,
1129 iterations: iterations + 1,
1130 residual_norm,
1131 converged: true,
1132 message: "Converged".to_string(),
1133 });
1134 }
1135 }
1136
1137 Ok(IterationResult {
1138 x,
1139 iterations: options.max_iter,
1140 residual_norm: phibar.abs(),
1141 converged: false,
1142 message: "Maximum iterations reached".to_string(),
1143 })
1144}
1145
1146#[allow(dead_code)]
1153pub fn lsmr<F>(
1154 a: &dyn LinearOperator<F>,
1155 b: &[F],
1156 options: LSMROptions<F>,
1157) -> SparseResult<IterationResult<F>>
1158where
1159 F: Float + NumAssign + Sum + SparseElement + 'static,
1160{
1161 use scirs2_core::numeric::One;
1162
1163 let (m, n) = a.shape();
1164 if b.len() != m {
1165 return Err(SparseError::DimensionMismatch {
1166 expected: m,
1167 found: b.len(),
1168 });
1169 }
1170
1171 let zero = F::sparse_zero();
1172 let one = <F as One>::one();
1173
1174 let mut x: Vec<F> = match &options.x0 {
1176 Some(x0) => {
1177 if x0.len() != n {
1178 return Err(SparseError::DimensionMismatch {
1179 expected: n,
1180 found: x0.len(),
1181 });
1182 }
1183 x0.clone()
1184 }
1185 None => vec![zero; n],
1186 };
1187
1188 let mut u: Vec<F> = b.to_vec();
1190 let normb = norm2(b);
1191
1192 if x.iter().any(|&xi| xi != zero) {
1194 let ax = a.matvec(&x)?;
1195 for i in 0..u.len() {
1196 u[i] -= ax[i];
1197 }
1198 }
1199
1200 let mut beta = norm2(&u);
1201 if beta == zero {
1202 return Ok(IterationResult {
1203 x,
1204 iterations: 0,
1205 residual_norm: zero,
1206 converged: true,
1207 message: "Zero initial residual".to_string(),
1208 });
1209 }
1210
1211 for elem in &mut u {
1213 *elem /= beta;
1214 }
1215
1216 let mut v = a.rmatvec(&u)?;
1218 let mut alpha = norm2(&v);
1219
1220 if alpha > zero {
1222 for elem in &mut v {
1223 *elem /= alpha;
1224 }
1225 }
1226
1227 let mut alphabar = alpha;
1229 let mut zetabar = alpha * beta;
1230 let mut rho = one;
1231 let mut rhobar = one;
1232 let mut cbar = one;
1233 let mut sbar = zero;
1234
1235 let mut h = v.clone();
1236 let mut hbar: Vec<F> = vec![zero; n];
1237
1238 let tolerance = F::max(options.atol, options.rtol * normb);
1239
1240 for itn in 0..options.max_iter {
1241 let av = a.matvec(&v)?;
1244 for i in 0..u.len() {
1245 u[i] = av[i] - alpha * u[i];
1246 }
1247 beta = norm2(&u);
1248
1249 if beta > zero {
1250 for elem in &mut u {
1252 *elem /= beta;
1253 }
1254
1255 let atu = a.rmatvec(&u)?;
1257 for i in 0..v.len() {
1258 v[i] = atu[i] - beta * v[i];
1259 }
1260 alpha = norm2(&v);
1261
1262 if alpha > zero {
1263 for elem in &mut v {
1264 *elem /= alpha;
1265 }
1266 }
1267 }
1268
1269 let rhoold = rho;
1271 let (c, s, rho_new) = sym_ortho(alphabar, beta);
1272 rho = rho_new;
1273 let thetanew = s * alpha;
1274 alphabar = c * alpha;
1275
1276 let rhobarold = rhobar;
1278 let zetaold = zetabar / (rhoold * rhobarold);
1279 let thetabar = sbar * rho;
1280 let rhotemp = cbar * rho;
1281 let (cbar_new, sbar_new, rhobar_new) = sym_ortho(rhotemp, thetanew);
1282 cbar = cbar_new;
1283 sbar = sbar_new;
1284 rhobar = rhobar_new;
1285 let zeta = cbar * zetabar;
1286 zetabar = -sbar * zetabar;
1287
1288 let factor = thetabar * rho / (rhoold * rhobarold);
1290 for i in 0..n {
1291 hbar[i] = h[i] - factor * hbar[i];
1292 }
1293 let update_factor = zeta / (rho * rhobar);
1294 for i in 0..n {
1295 x[i] += update_factor * hbar[i];
1296 }
1297 let h_factor = thetanew / rho;
1298 for i in 0..n {
1299 h[i] = v[i] - h_factor * h[i];
1300 }
1301
1302 let normar = zetabar.abs();
1304 if normar <= tolerance {
1305 return Ok(IterationResult {
1306 x,
1307 iterations: itn + 1,
1308 residual_norm: normar,
1309 converged: true,
1310 message: "Converged".to_string(),
1311 });
1312 }
1313 }
1314
1315 Ok(IterationResult {
1316 x,
1317 iterations: options.max_iter,
1318 residual_norm: zetabar.abs(),
1319 converged: false,
1320 message: "Maximum iterations reached".to_string(),
1321 })
1322}
1323
1324#[allow(dead_code)]
1333pub fn tfqmr<F>(
1334 a: &dyn LinearOperator<F>,
1335 b: &[F],
1336 options: TFQMROptions<F>,
1337) -> SparseResult<IterationResult<F>>
1338where
1339 F: Float + NumAssign + Sum + SparseElement + 'static,
1340{
1341 let (rows, cols) = a.shape();
1342 if rows != cols {
1343 return Err(SparseError::ValueError(
1344 "Matrix must be square for TFQMR solver".to_string(),
1345 ));
1346 }
1347 if b.len() != rows {
1348 return Err(SparseError::DimensionMismatch {
1349 expected: rows,
1350 found: b.len(),
1351 });
1352 }
1353
1354 let n = rows;
1355 let one = F::sparse_one();
1356 let zero = F::sparse_zero();
1357
1358 let mut x: Vec<F> = match &options.x0 {
1360 Some(x0) => {
1361 if x0.len() != n {
1362 return Err(SparseError::DimensionMismatch {
1363 expected: n,
1364 found: x0.len(),
1365 });
1366 }
1367 x0.clone()
1368 }
1369 None => vec![zero; n],
1370 };
1371
1372 let ax = a.matvec(&x)?;
1374 let r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
1375
1376 let r0norm = norm2(&r);
1377 let bnorm = norm2(b);
1378 let tolerance = F::max(options.atol, options.rtol * bnorm);
1379
1380 if r0norm <= tolerance || r0norm == zero {
1381 return Ok(IterationResult {
1382 x,
1383 iterations: 0,
1384 residual_norm: r0norm,
1385 converged: true,
1386 message: "Converged with initial guess".to_string(),
1387 });
1388 }
1389
1390 let mut u = r.clone();
1392 let mut w = r.clone();
1393 let rstar = r.clone(); let ar = a.matvec(&r)?;
1397 let mut v = match &options.preconditioner {
1398 Some(m) => m.matvec(&ar)?,
1399 None => ar,
1400 };
1401 let mut uhat = v.clone();
1402
1403 let mut d: Vec<F> = vec![zero; n];
1404 let mut theta = zero;
1405 let mut eta = zero;
1406
1407 let mut rho = dot(&rstar, &r);
1409 let mut rho_last = rho;
1410 let mut tau = r0norm;
1411
1412 for iter in 0..options.max_iter {
1413 let even = iter % 2 == 0;
1414
1415 let mut alpha = zero;
1417 let mut u_next: Vec<F> = vec![zero; n];
1418
1419 if even {
1420 let vtrstar = dot(&rstar, &v);
1421 if vtrstar == zero {
1422 return Ok(IterationResult {
1423 x,
1424 iterations: iter,
1425 residual_norm: tau,
1426 converged: false,
1427 message: "TFQMR breakdown: v'*rstar = 0".to_string(),
1428 });
1429 }
1430 alpha = rho / vtrstar;
1431
1432 for i in 0..n {
1434 u_next[i] = u[i] - alpha * v[i];
1435 }
1436 }
1437
1438 let alpha_used = if even {
1441 alpha
1442 } else {
1443 rho / dot(&rstar, &v)
1446 };
1447
1448 for i in 0..n {
1449 w[i] -= alpha_used * uhat[i];
1450 }
1451
1452 let theta_sq_over_alpha =
1454 if alpha_used.abs() > F::from(1e-300).expect("Failed to convert constant to float") {
1455 theta * theta / alpha_used
1456 } else {
1457 zero
1458 };
1459 for i in 0..n {
1460 d[i] = u[i] + theta_sq_over_alpha * eta * d[i];
1461 }
1462
1463 theta = norm2(&w) / tau;
1465
1466 let c = one / (one + theta * theta).sqrt();
1468
1469 tau = tau * theta * c;
1471
1472 eta = c * c * alpha_used;
1474
1475 let z = match &options.preconditioner {
1477 Some(m) => m.matvec(&d)?,
1478 None => d.clone(),
1479 };
1480
1481 for i in 0..n {
1483 x[i] += eta * z[i];
1484 }
1485
1486 let iter_f = F::from(iter + 1).expect("Failed to convert to float");
1488 if tau * iter_f.sqrt() < tolerance {
1489 return Ok(IterationResult {
1490 x,
1491 iterations: iter + 1,
1492 residual_norm: tau,
1493 converged: true,
1494 message: "Converged".to_string(),
1495 });
1496 }
1497
1498 if !even {
1499 rho = dot(&rstar, &w);
1502
1503 if rho == zero {
1504 return Ok(IterationResult {
1505 x,
1506 iterations: iter + 1,
1507 residual_norm: tau,
1508 converged: false,
1509 message: "TFQMR breakdown: rho = 0".to_string(),
1510 });
1511 }
1512
1513 let beta = rho / rho_last;
1514
1515 for i in 0..n {
1517 u[i] = w[i] + beta * u[i];
1518 }
1519
1520 for i in 0..n {
1522 v[i] = beta * uhat[i] + beta * beta * v[i];
1523 }
1524
1525 let au = a.matvec(&u)?;
1527 uhat = match &options.preconditioner {
1528 Some(m) => m.matvec(&au)?,
1529 None => au,
1530 };
1531
1532 for i in 0..n {
1534 v[i] += uhat[i];
1535 }
1536 } else {
1537 let au_next = a.matvec(&u_next)?;
1540 uhat = match &options.preconditioner {
1541 Some(m) => m.matvec(&au_next)?,
1542 None => au_next,
1543 };
1544
1545 u = u_next;
1547
1548 rho_last = rho;
1550 }
1551 }
1552
1553 Ok(IterationResult {
1554 x,
1555 iterations: options.max_iter,
1556 residual_norm: tau,
1557 converged: false,
1558 message: "Maximum iterations reached".to_string(),
1559 })
1560}
1561
1562#[cfg(test)]
1563mod tests {
1564 use super::*;
1565 use crate::csr::CsrMatrix;
1566 use crate::linalg::interface::AsLinearOperator;
1567
1568 #[test]
1569 fn test_cg_identity() {
1570 let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
1572 let b = vec![1.0, 2.0, 3.0];
1573 let options = CGOptions::default();
1574 let result = cg(&identity, &b, options).expect("Operation failed");
1575
1576 assert!(result.converged);
1577 for (xi, bi) in result.x.iter().zip(&b) {
1578 assert!((xi - bi).abs() < 1e-3);
1579 }
1580 }
1581
1582 #[test]
1583 fn test_cg_diagonal() {
1584 let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
1586 let b = vec![2.0, 6.0, 12.0];
1587 let options = CGOptions::default();
1588 let result = cg(&diag, &b, options).expect("Operation failed");
1589
1590 assert!(result.converged);
1591 let expected = vec![1.0, 2.0, 3.0];
1592 for (xi, ei) in result.x.iter().zip(&expected) {
1593 assert!((xi - ei).abs() < 1e-3);
1594 }
1595 }
1596
1597 #[test]
1598 fn test_cg_sparse_matrix() {
1599 let rows = vec![0, 0, 0, 1, 1, 1, 2, 2, 2];
1601 let cols = vec![0, 1, 2, 0, 1, 2, 0, 1, 2];
1602 let data = vec![4.0, -1.0, -1.0, -1.0, 4.0, -1.0, -1.0, -1.0, 4.0];
1603 let shape = (3, 3);
1604
1605 let matrix = CsrMatrix::new(data, rows, cols, shape).expect("Operation failed");
1606 let op = matrix.as_linear_operator();
1607
1608 let b = vec![1.0, 2.0, 3.0];
1609 let options = CGOptions::default();
1610 let result = cg(op.as_ref(), &b, options).expect("Operation failed");
1611
1612 assert!(result.converged);
1613 let ax = op.matvec(&result.x).expect("Operation failed");
1615 for (axi, bi) in ax.iter().zip(&b) {
1616 assert!((axi - bi).abs() < 1e-9);
1617 }
1618 }
1619
1620 #[test]
1621 fn test_bicgstab_identity() {
1622 let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
1624 let b = vec![1.0, 2.0, 3.0];
1625 let options = BiCGSTABOptions::default();
1626 let result = bicgstab(&identity, &b, options).expect("Operation failed");
1627
1628 assert!(result.converged);
1629 for (xi, bi) in result.x.iter().zip(&b) {
1630 assert!((xi - bi).abs() < 1e-3);
1631 }
1632 }
1633
1634 #[test]
1635 fn test_bicgstab_diagonal() {
1636 let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
1638 let b = vec![2.0, 6.0, 12.0];
1639 let options = BiCGSTABOptions::default();
1640 let result = bicgstab(&diag, &b, options).expect("Operation failed");
1641
1642 assert!(result.converged);
1643 let expected = vec![1.0, 2.0, 3.0];
1644 for (xi, ei) in result.x.iter().zip(&expected) {
1645 assert!((xi - ei).abs() < 1e-3);
1646 }
1647 }
1648
1649 #[test]
1650 fn test_bicgstab_non_symmetric() {
1651 let rows = vec![0, 0, 0, 1, 1, 1, 2, 2, 2];
1653 let cols = vec![0, 1, 2, 0, 1, 2, 0, 1, 2];
1654 let data = vec![4.0, -1.0, -2.0, -1.0, 4.0, -1.0, 0.0, -1.0, 3.0];
1655 let shape = (3, 3);
1656
1657 let matrix = CsrMatrix::new(data, rows, cols, shape).expect("Operation failed");
1658 let op = matrix.as_linear_operator();
1659
1660 let b = vec![1.0, 2.0, 1.0];
1661 let options = BiCGSTABOptions::default();
1662 let result = bicgstab(op.as_ref(), &b, options).expect("Operation failed");
1663
1664 assert!(result.converged);
1665 let ax = op.matvec(&result.x).expect("Operation failed");
1667 for (axi, bi) in ax.iter().zip(&b) {
1668 assert!((axi - bi).abs() < 1e-9);
1669 }
1670 }
1671
1672 #[test]
1673 fn test_lsqr_identity() {
1674 let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
1676 let b = vec![1.0, 2.0, 3.0];
1677 let options = LSQROptions::default();
1678 let result = lsqr(&identity, &b, options).expect("Operation failed");
1679
1680 println!("Identity test - Expected: {:?}, Got: {:?}", b, result.x);
1681 println!(
1682 "Converged: {}, Iterations: {}",
1683 result.converged, result.iterations
1684 );
1685
1686 assert!(result.converged);
1687 for (xi, bi) in result.x.iter().zip(&b) {
1688 assert!((xi - bi).abs() < 1e-3);
1689 }
1690 }
1691
1692 #[test]
1693 fn test_lsqr_diagonal() {
1694 let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
1696 let b = vec![2.0, 6.0, 12.0];
1697 let options = LSQROptions::default();
1698 let result = lsqr(&diag, &b, options).expect("Operation failed");
1699
1700 assert!(result.converged);
1701 let expected = vec![1.0, 2.0, 3.0];
1702 for (xi, ei) in result.x.iter().zip(&expected) {
1703 assert!((xi - ei).abs() < 1e-3);
1704 }
1705 }
1706
1707 #[test]
1708 fn test_lsmr_identity() {
1709 let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
1711 let b = vec![1.0, 2.0, 3.0];
1712 let options = LSMROptions::default();
1713 let result = lsmr(&identity, &b, options).expect("Operation failed");
1714
1715 assert!(result.converged);
1716 for (xi, bi) in result.x.iter().zip(&b) {
1717 assert!((xi - bi).abs() < 1e-3);
1718 }
1719 }
1720
1721 #[test]
1722 fn test_lsmr_diagonal() {
1723 let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
1725 let b = vec![2.0, 6.0, 12.0];
1726 let options = LSMROptions::default();
1727 let result = lsmr(&diag, &b, options).expect("Operation failed");
1728
1729 assert!(result.converged);
1730 let expected = vec![1.0, 2.0, 3.0];
1731 for (xi, ei) in result.x.iter().zip(&expected) {
1732 assert!((xi - ei).abs() < 1e-3);
1733 }
1734 }
1735
1736 #[test]
1737 fn test_tfqmr_identity() {
1738 let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
1740 let b = vec![1.0, 2.0, 3.0];
1741 let options = TFQMROptions::default();
1742 let result = tfqmr(&identity, &b, options).expect("Operation failed");
1743
1744 assert!(result.converged);
1745 for (xi, bi) in result.x.iter().zip(&b) {
1746 assert!((xi - bi).abs() < 1e-3);
1747 }
1748 }
1749
1750 #[test]
1751 fn test_tfqmr_diagonal() {
1752 let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
1754 let b = vec![2.0, 6.0, 12.0];
1755 let options = TFQMROptions::default();
1756 let result = tfqmr(&diag, &b, options).expect("Operation failed");
1757
1758 assert!(result.converged, "TFQMR did not converge");
1759 let expected = vec![1.0, 2.0, 3.0];
1760 for (xi, ei) in result.x.iter().zip(&expected) {
1761 assert!((xi - ei).abs() < 1e-3);
1762 }
1763 }
1764
1765 #[test]
1766 fn test_lsqr_least_squares() {
1767 let rows = vec![0, 0, 1, 1, 2, 2, 3, 3];
1769 let cols = vec![0, 1, 0, 1, 0, 1, 0, 1];
1770 let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
1771 let shape = (4, 2);
1772
1773 let matrix = CsrMatrix::new(data, rows, cols, shape).expect("Operation failed");
1774 let op = matrix.as_linear_operator();
1775
1776 let b = vec![1.0, 2.0, 3.0, 4.0];
1777 let options = LSQROptions::default();
1778 let result = lsqr(op.as_ref(), &b, options).expect("Operation failed");
1779
1780 assert!(result.converged || result.residual_norm < 1e-6);
1782 }
1783
1784 #[test]
1785 fn test_solver_options_defaults() {
1786 let lsqr_opts = LSQROptions::<f64>::default();
1787 assert_eq!(lsqr_opts.max_iter, 1000);
1788 assert!(lsqr_opts.x0.is_none());
1789
1790 let lsmr_opts = LSMROptions::<f64>::default();
1791 assert_eq!(lsmr_opts.max_iter, 1000);
1792 assert_eq!(lsmr_opts.damp, 0.0);
1793 assert!(lsmr_opts.x0.is_none());
1794
1795 let tfqmr_opts = TFQMROptions::<f64>::default();
1796 assert_eq!(tfqmr_opts.max_iter, 1000);
1797 assert!(tfqmr_opts.x0.is_none());
1798 assert!(tfqmr_opts.preconditioner.is_none());
1799 }
1800}