scirs2_sparse/linalg/
iterative.rs

1#![allow(unused_variables)]
2#![allow(unused_assignments)]
3#![allow(unused_mut)]
4
5use crate::error::{SparseError, SparseResult};
6use crate::linalg::interface::LinearOperator;
7use scirs2_core::numeric::{Float, NumAssign};
8use std::iter::Sum;
9
10/// Result of an iterative solver
11#[derive(Debug, Clone)]
12pub struct IterationResult<F> {
13    pub x: Vec<F>,
14    pub iterations: usize,
15    pub residual_norm: F,
16    pub converged: bool,
17    pub message: String,
18}
19
20/// Options for conjugate gradient solver
21pub struct CGOptions<F> {
22    pub max_iter: usize,
23    pub rtol: F,
24    pub atol: F,
25    pub x0: Option<Vec<F>>,
26    pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
27}
28
29impl<F: Float> Default for CGOptions<F> {
30    fn default() -> Self {
31        Self {
32            max_iter: 1000,
33            rtol: F::from(1e-8).unwrap(),
34            atol: F::from(1e-12).unwrap(),
35            x0: None,
36            preconditioner: None,
37        }
38    }
39}
40
41/// Conjugate gradient solver for symmetric positive definite systems
42///
43/// Solves Ax = b where A is symmetric positive definite
44#[allow(dead_code)]
45pub fn cg<F>(
46    a: &dyn LinearOperator<F>,
47    b: &[F],
48    options: CGOptions<F>,
49) -> SparseResult<IterationResult<F>>
50where
51    F: Float + NumAssign + Sum + 'static,
52{
53    let (rows, cols) = a.shape();
54    if rows != cols {
55        return Err(SparseError::ValueError(
56            "Matrix must be square for CG solver".to_string(),
57        ));
58    }
59    if b.len() != rows {
60        return Err(SparseError::DimensionMismatch {
61            expected: rows,
62            found: b.len(),
63        });
64    }
65
66    let n = rows;
67
68    // Initialize solution
69    let mut x: Vec<F> = match &options.x0 {
70        Some(x0) => {
71            if x0.len() != n {
72                return Err(SparseError::DimensionMismatch {
73                    expected: n,
74                    found: x0.len(),
75                });
76            }
77            x0.clone()
78        }
79        None => vec![F::zero(); n],
80    };
81
82    // Compute initial residual: r = b - A*x
83    let ax = a.matvec(&x)?;
84    let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
85
86    // Apply preconditioner if provided
87    let mut z = if let Some(m) = &options.preconditioner {
88        m.matvec(&r)?
89    } else {
90        r.clone()
91    };
92
93    let mut p = z.clone();
94    let mut rz_old = dot(&r, &z);
95
96    // Check for convergence
97    let bnorm = norm2(b);
98    let tolerance = F::max(options.atol, options.rtol * bnorm);
99
100    let mut iterations = 0;
101    while iterations < options.max_iter {
102        // Compute Ap
103        let ap = a.matvec(&p)?;
104
105        // Compute alpha = (r,z) / (p,Ap)
106        let pap = dot(&p, &ap);
107        if pap <= F::zero() {
108            return Ok(IterationResult {
109                x,
110                iterations,
111                residual_norm: norm2(&r),
112                converged: false,
113                message: "Matrix not positive definite (p^T*A*p <= 0)".to_string(),
114            });
115        }
116        let alpha = rz_old / pap;
117
118        // Update solution: x = x + alpha*p
119        for i in 0..n {
120            x[i] += alpha * p[i];
121        }
122
123        // Update residual: r = r - alpha*Ap
124        for i in 0..n {
125            r[i] -= alpha * ap[i];
126        }
127
128        // Check for convergence
129        let rnorm = norm2(&r);
130        if rnorm <= tolerance {
131            return Ok(IterationResult {
132                x,
133                iterations: iterations + 1,
134                residual_norm: rnorm,
135                converged: true,
136                message: "Converged".to_string(),
137            });
138        }
139
140        // Apply preconditioner
141        z = if let Some(m) = &options.preconditioner {
142            m.matvec(&r)?
143        } else {
144            r.clone()
145        };
146
147        // Compute beta = (r_{i+1},z_{i+1}) / (r_i,z_i)
148        let rz_new = dot(&r, &z);
149        let beta = rz_new / rz_old;
150
151        // Update direction: p = z + beta*p
152        for i in 0..n {
153            p[i] = z[i] + beta * p[i];
154        }
155
156        rz_old = rz_new;
157        iterations += 1;
158    }
159
160    Ok(IterationResult {
161        x,
162        iterations,
163        residual_norm: norm2(&r),
164        converged: false,
165        message: "Maximum iterations reached".to_string(),
166    })
167}
168
169/// Options for BiCG solver
170pub struct BiCGOptions<F> {
171    pub max_iter: usize,
172    pub rtol: F,
173    pub atol: F,
174    pub x0: Option<Vec<F>>,
175    pub left_preconditioner: Option<Box<dyn LinearOperator<F>>>,
176    pub right_preconditioner: Option<Box<dyn LinearOperator<F>>>,
177}
178
179impl<F: Float> Default for BiCGOptions<F> {
180    fn default() -> Self {
181        Self {
182            max_iter: 1000,
183            rtol: F::from(1e-8).unwrap(),
184            atol: F::from(1e-12).unwrap(),
185            x0: None,
186            left_preconditioner: None,
187            right_preconditioner: None,
188        }
189    }
190}
191
192/// Biconjugate Gradient solver
193///
194/// Solves Ax = b where A is non-symmetric.
195#[allow(dead_code)]
196pub fn bicg<F>(
197    a: &dyn LinearOperator<F>,
198    b: &[F],
199    options: BiCGOptions<F>,
200) -> SparseResult<IterationResult<F>>
201where
202    F: Float + NumAssign + Sum + 'static,
203{
204    let (rows, cols) = a.shape();
205    if rows != cols {
206        return Err(SparseError::ValueError(
207            "Matrix must be square for BiCG solver".to_string(),
208        ));
209    }
210    if b.len() != rows {
211        return Err(SparseError::DimensionMismatch {
212            expected: rows,
213            found: b.len(),
214        });
215    }
216
217    let n = rows;
218
219    // Initialize solution
220    let mut x: Vec<F> = match &options.x0 {
221        Some(x0) => {
222            if x0.len() != n {
223                return Err(SparseError::DimensionMismatch {
224                    expected: n,
225                    found: x0.len(),
226                });
227            }
228            x0.clone()
229        }
230        None => vec![F::zero(); n],
231    };
232
233    // Compute initial residual: r = b - A*x
234    let ax = a.matvec(&x)?;
235    let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
236
237    // Initialize r_star (shadow residual) = r
238    let mut r_star = r.clone();
239
240    // Apply preconditioners to initial residuals
241    let mut z = if let Some(m1) = &options.left_preconditioner {
242        m1.matvec(&r)?
243    } else {
244        r.clone()
245    };
246
247    let mut z_star = if let Some(m2) = &options.right_preconditioner {
248        m2.matvec(&r_star)?
249    } else {
250        r_star.clone()
251    };
252
253    let mut p = z.clone();
254    let mut p_star = z_star.clone();
255
256    let mut rho_old = dot(&r_star, &z);
257
258    let bnorm = norm2(b);
259    let tolerance = F::max(options.atol, options.rtol * bnorm);
260
261    let mut iterations = 0;
262    while iterations < options.max_iter {
263        // Compute q = A*p and q_star = A^T*p_star
264        let mut q = a.matvec(&p)?;
265        if let Some(m2) = &options.right_preconditioner {
266            q = m2.matvec(&q)?;
267        }
268
269        let mut q_star = a.rmatvec(&p_star)?;
270        if let Some(m1) = &options.left_preconditioner {
271            q_star = m1.matvec(&q_star)?;
272        }
273
274        // Compute alpha = rho_old / (p_star, q)
275        let alpha_den = dot(&p_star, &q);
276        if alpha_den.abs() < F::epsilon() * F::from(10).unwrap() {
277            return Ok(IterationResult {
278                x,
279                iterations,
280                residual_norm: norm2(&r),
281                converged: false,
282                message: "BiCG breakdown: (p_star, q) ≈ 0".to_string(),
283            });
284        }
285        let alpha = rho_old / alpha_den;
286
287        // Update solution and residuals
288        for i in 0..n {
289            x[i] += alpha * p[i];
290            r[i] -= alpha * q[i];
291            r_star[i] -= alpha * q_star[i];
292        }
293
294        // Check for convergence - compute residual norm BEFORE the next iteration
295        let rnorm = norm2(&r);
296        if rnorm <= tolerance {
297            return Ok(IterationResult {
298                x,
299                iterations: iterations + 1,
300                residual_norm: rnorm,
301                converged: true,
302                message: "Converged".to_string(),
303            });
304        }
305
306        // Apply preconditioners
307        z = if let Some(m1) = &options.left_preconditioner {
308            m1.matvec(&r)?
309        } else {
310            r.clone()
311        };
312
313        z_star = if let Some(m2) = &options.right_preconditioner {
314            m2.matvec(&r_star)?
315        } else {
316            r_star.clone()
317        };
318
319        // Compute new rho
320        let rho = dot(&r_star, &z);
321        if rho.abs() < F::epsilon() * F::from(10).unwrap() {
322            return Ok(IterationResult {
323                x,
324                iterations: iterations + 1,
325                residual_norm: rnorm,
326                converged: false,
327                message: "BiCG breakdown: rho ≈ 0".to_string(),
328            });
329        }
330
331        // Compute beta = rho / rho_old
332        let beta = rho / rho_old;
333
334        // Update search directions
335        for i in 0..n {
336            p[i] = z[i] + beta * p[i];
337            p_star[i] = z_star[i] + beta * p_star[i];
338        }
339
340        rho_old = rho;
341        iterations += 1;
342    }
343
344    Ok(IterationResult {
345        x,
346        iterations,
347        residual_norm: norm2(&r),
348        converged: false,
349        message: "Maximum iterations reached".to_string(),
350    })
351}
352
353/// Options for BiCGSTAB solver
354pub struct BiCGSTABOptions<F> {
355    pub max_iter: usize,
356    pub rtol: F,
357    pub atol: F,
358    pub x0: Option<Vec<F>>,
359    pub left_preconditioner: Option<Box<dyn LinearOperator<F>>>,
360    pub right_preconditioner: Option<Box<dyn LinearOperator<F>>>,
361}
362
363impl<F: Float> Default for BiCGSTABOptions<F> {
364    fn default() -> Self {
365        Self {
366            max_iter: 1000,
367            rtol: F::from(1e-8).unwrap(),
368            atol: F::from(1e-12).unwrap(),
369            x0: None,
370            left_preconditioner: None,
371            right_preconditioner: None,
372        }
373    }
374}
375
376/// Result from BiCGSTAB solver
377pub type BiCGSTABResult<F> = IterationResult<F>;
378
379/// BiConjugate Gradient Stabilized method
380///
381/// An improved version of BiCG that avoids the irregular convergence patterns
382/// and has better numerical stability. Works for general non-symmetric systems.
383#[allow(dead_code)]
384pub fn bicgstab<F>(
385    a: &dyn LinearOperator<F>,
386    b: &[F],
387    options: BiCGSTABOptions<F>,
388) -> SparseResult<BiCGSTABResult<F>>
389where
390    F: Float + NumAssign + Sum + 'static,
391{
392    let (rows, cols) = a.shape();
393    if rows != cols {
394        return Err(SparseError::ValueError(
395            "Matrix must be square for BiCGSTAB solver".to_string(),
396        ));
397    }
398    if b.len() != rows {
399        return Err(SparseError::DimensionMismatch {
400            expected: rows,
401            found: b.len(),
402        });
403    }
404
405    let n = rows;
406
407    // Initialize solution
408    let mut x: Vec<F> = match &options.x0 {
409        Some(x0) => {
410            if x0.len() != n {
411                return Err(SparseError::DimensionMismatch {
412                    expected: n,
413                    found: x0.len(),
414                });
415            }
416            x0.clone()
417        }
418        None => vec![F::zero(); n],
419    };
420
421    // Compute initial residual: r = b - A*x
422    let ax = a.matvec(&x)?;
423    let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
424
425    // Check if initial guess is solution
426    let mut rnorm = norm2(&r);
427    let bnorm = norm2(b);
428    let tolerance = F::max(options.atol, options.rtol * bnorm);
429
430    if rnorm <= tolerance {
431        return Ok(BiCGSTABResult {
432            x,
433            iterations: 0,
434            residual_norm: rnorm,
435            converged: true,
436            message: "Converged with initial guess".to_string(),
437        });
438    }
439
440    // Choose shadow residual (r_hat) as r0
441    let r_hat = r.clone();
442
443    let mut v = vec![F::zero(); n];
444    let mut p = vec![F::zero(); n];
445    let mut y = vec![F::zero(); n];
446    let mut s = vec![F::zero(); n];
447    let mut t: Vec<F>;
448
449    let mut rho_old = F::one();
450    let mut alpha = F::zero();
451    let mut omega = F::one();
452
453    // Main iteration loop
454    let mut iterations = 0;
455    while iterations < options.max_iter {
456        // Compute rho = (r_hat, r)
457        let rho = dot(&r_hat, &r);
458
459        // Check for breakdown
460        if rho.abs() < F::epsilon() * F::from(10).unwrap() {
461            return Ok(BiCGSTABResult {
462                x,
463                iterations,
464                residual_norm: rnorm,
465                converged: false,
466                message: "BiCGSTAB breakdown: rho ≈ 0".to_string(),
467            });
468        }
469
470        // Compute beta and update p
471        let beta = (rho / rho_old) * (alpha / omega);
472
473        // p = r + beta * (p - omega * v)
474        for i in 0..n {
475            p[i] = r[i] + beta * (p[i] - omega * v[i]);
476        }
477
478        // Apply left preconditioner if provided
479        let p_tilde = match &options.left_preconditioner {
480            Some(m1) => m1.matvec(&p)?,
481            None => p.clone(),
482        };
483
484        // Compute v = A * p_tilde
485        v = a.matvec(&p_tilde)?;
486
487        // Apply right preconditioner if provided
488        if let Some(m2) = &options.right_preconditioner {
489            v = m2.matvec(&v)?;
490        }
491
492        // Compute alpha = rho / (r_hat, v)
493        let den = dot(&r_hat, &v);
494        if den.abs() < F::epsilon() * F::from(10).unwrap() {
495            return Ok(BiCGSTABResult {
496                x,
497                iterations,
498                residual_norm: rnorm,
499                converged: false,
500                message: "BiCGSTAB breakdown: (r_hat, v) ≈ 0".to_string(),
501            });
502        }
503        alpha = rho / den;
504
505        // Check if alpha is reasonable
506        if !alpha.is_finite() {
507            return Ok(BiCGSTABResult {
508                x,
509                iterations,
510                residual_norm: rnorm,
511                converged: false,
512                message: "BiCGSTAB breakdown: alpha is not finite".to_string(),
513            });
514        }
515
516        // Update solution and residual: s = r - alpha * v
517        for i in 0..n {
518            y[i] = x[i] + alpha * p_tilde[i];
519            s[i] = r[i] - alpha * v[i];
520        }
521
522        // Check convergence
523        let snorm = norm2(&s);
524        if snorm <= tolerance {
525            // Final update: x = y
526            x = y;
527
528            // Apply right preconditioner to final solution if provided
529            if let Some(m2) = &options.right_preconditioner {
530                x = m2.matvec(&x)?;
531            }
532
533            return Ok(BiCGSTABResult {
534                x,
535                iterations: iterations + 1,
536                residual_norm: snorm,
537                converged: true,
538                message: "Converged".to_string(),
539            });
540        }
541
542        // Apply left preconditioner to s if provided
543        let s_tilde = match &options.left_preconditioner {
544            Some(m1) => m1.matvec(&s)?,
545            None => s.clone(),
546        };
547
548        // Compute t = A * s_tilde
549        t = a.matvec(&s_tilde)?;
550
551        // Apply right preconditioner if provided
552        if let Some(m2) = &options.right_preconditioner {
553            t = m2.matvec(&t)?;
554        }
555
556        // Compute omega = (t, s) / (t, t)
557        let ts = dot(&t, &s);
558        let tt = dot(&t, &t);
559
560        if tt < F::epsilon() * F::from(10).unwrap() {
561            return Ok(BiCGSTABResult {
562                x,
563                iterations,
564                residual_norm: rnorm,
565                converged: false,
566                message: "BiCGSTAB breakdown: (t, t) ≈ 0".to_string(),
567            });
568        }
569
570        omega = ts / tt;
571
572        // Check if omega is reasonable
573        if !omega.is_finite() || omega.abs() < F::epsilon() * F::from(10).unwrap() {
574            return Ok(BiCGSTABResult {
575                x,
576                iterations,
577                residual_norm: rnorm,
578                converged: false,
579                message: "BiCGSTAB breakdown: omega is not finite or too small".to_string(),
580            });
581        }
582
583        // Update solution: x = y + omega * s_tilde
584        for i in 0..n {
585            x[i] = y[i] + omega * s_tilde[i];
586            r[i] = s[i] - omega * t[i];
587        }
588
589        // Apply right preconditioner to final solution if provided
590        if let Some(m2) = &options.right_preconditioner {
591            x = m2.matvec(&x)?;
592        }
593
594        rnorm = norm2(&r);
595
596        // Check for convergence
597        if rnorm <= tolerance {
598            return Ok(BiCGSTABResult {
599                x,
600                iterations: iterations + 1,
601                residual_norm: rnorm,
602                converged: true,
603                message: "Converged".to_string(),
604            });
605        }
606
607        rho_old = rho;
608        iterations += 1;
609    }
610
611    Ok(BiCGSTABResult {
612        x,
613        iterations,
614        residual_norm: rnorm,
615        converged: false,
616        message: "Maximum iterations reached".to_string(),
617    })
618}
619
620/// Options for GMRES solver
621pub struct GMRESOptions<F> {
622    pub max_iter: usize,
623    pub restart: usize,
624    pub rtol: F,
625    pub atol: F,
626    pub x0: Option<Vec<F>>,
627    pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
628}
629
630impl<F: Float> Default for GMRESOptions<F> {
631    fn default() -> Self {
632        Self {
633            max_iter: 1000,
634            restart: 30,
635            rtol: F::from(1e-8).unwrap(),
636            atol: F::from(1e-12).unwrap(),
637            x0: None,
638            preconditioner: None,
639        }
640    }
641}
642
643/// Generalized Minimal Residual Method
644///
645/// Solves Ax = b for general non-symmetric systems. GMRES is particularly
646/// robust but requires more memory than other methods due to the need to
647/// store the Krylov basis vectors.
648#[allow(dead_code)]
649pub fn gmres<F>(
650    a: &dyn LinearOperator<F>,
651    b: &[F],
652    options: GMRESOptions<F>,
653) -> SparseResult<IterationResult<F>>
654where
655    F: Float + NumAssign + Sum + 'static,
656{
657    let (rows, cols) = a.shape();
658    if rows != cols {
659        return Err(SparseError::ValueError(
660            "Matrix must be square for GMRES solver".to_string(),
661        ));
662    }
663    if b.len() != rows {
664        return Err(SparseError::DimensionMismatch {
665            expected: rows,
666            found: b.len(),
667        });
668    }
669
670    let n = rows;
671    let restart = options.restart.min(n);
672
673    // Initialize solution
674    let mut x: Vec<F> = match &options.x0 {
675        Some(x0) => {
676            if x0.len() != n {
677                return Err(SparseError::DimensionMismatch {
678                    expected: n,
679                    found: x0.len(),
680                });
681            }
682            x0.clone()
683        }
684        None => vec![F::zero(); n],
685    };
686
687    // Compute initial residual: r = b - A*x
688    let ax = a.matvec(&x)?;
689    let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
690
691    // Apply preconditioner if provided
692    if let Some(m) = &options.preconditioner {
693        r = m.matvec(&r)?;
694    }
695
696    let mut rnorm = norm2(&r);
697    let bnorm = norm2(b);
698    let tolerance = F::max(options.atol, options.rtol * bnorm);
699
700    let mut outer_iterations = 0;
701
702    // Outer iteration loop (restarts)
703    while outer_iterations < options.max_iter && rnorm > tolerance {
704        // Initialize Krylov subspace
705        let mut v = vec![vec![F::zero(); n]; restart + 1];
706        let mut h = vec![vec![F::zero(); restart]; restart + 1];
707        let mut cs = vec![F::zero(); restart]; // Cosines for Givens rotations
708        let mut sn = vec![F::zero(); restart]; // Sines for Givens rotations
709        let mut s = vec![F::zero(); restart + 1]; // RHS for triangular system
710
711        // Set up initial vector
712        v[0] = r.iter().map(|&ri| ri / rnorm).collect();
713        s[0] = rnorm;
714
715        // Arnoldi iteration
716        let mut inner_iter = 0;
717        while inner_iter < restart && inner_iter + outer_iterations < options.max_iter {
718            // Compute w = A * v[j]
719            let mut w = a.matvec(&v[inner_iter])?;
720
721            // Apply preconditioner if provided
722            if let Some(m) = &options.preconditioner {
723                w = m.matvec(&w)?;
724            }
725
726            // Orthogonalize against previous vectors
727            for i in 0..=inner_iter {
728                h[i][inner_iter] = dot(&v[i], &w);
729                for (k, w_elem) in w.iter_mut().enumerate().take(n) {
730                    *w_elem -= h[i][inner_iter] * v[i][k];
731                }
732            }
733
734            h[inner_iter + 1][inner_iter] = norm2(&w);
735
736            // Check for breakdown
737            if h[inner_iter + 1][inner_iter] < F::epsilon() * F::from(10).unwrap() {
738                break;
739            }
740
741            // Normalize w and store in v[j+1]
742            v[inner_iter + 1] = w
743                .iter()
744                .map(|&wi| wi / h[inner_iter + 1][inner_iter])
745                .collect();
746
747            // Apply previous Givens rotations
748            for i in 0..inner_iter {
749                let temp = cs[i] * h[i][inner_iter] + sn[i] * h[i + 1][inner_iter];
750                h[i + 1][inner_iter] = -sn[i] * h[i][inner_iter] + cs[i] * h[i + 1][inner_iter];
751                h[i][inner_iter] = temp;
752            }
753
754            // Compute new Givens rotation
755            let rho = (h[inner_iter][inner_iter] * h[inner_iter][inner_iter]
756                + h[inner_iter + 1][inner_iter] * h[inner_iter + 1][inner_iter])
757                .sqrt();
758            cs[inner_iter] = h[inner_iter][inner_iter] / rho;
759            sn[inner_iter] = h[inner_iter + 1][inner_iter] / rho;
760
761            // Apply new Givens rotation
762            h[inner_iter][inner_iter] = rho;
763            h[inner_iter + 1][inner_iter] = F::zero();
764
765            let temp = cs[inner_iter] * s[inner_iter] + sn[inner_iter] * s[inner_iter + 1];
766            s[inner_iter + 1] =
767                -sn[inner_iter] * s[inner_iter] + cs[inner_iter] * s[inner_iter + 1];
768            s[inner_iter] = temp;
769
770            inner_iter += 1;
771
772            // Check for convergence
773            let residual = s[inner_iter].abs();
774            if residual <= tolerance {
775                break;
776            }
777        }
778
779        // Solve the upper triangular system
780        let mut y = vec![F::zero(); inner_iter];
781        for i in (0..inner_iter).rev() {
782            y[i] = s[i];
783            for j in i + 1..inner_iter {
784                let y_j = y[j];
785                y[i] -= h[i][j] * y_j;
786            }
787            y[i] /= h[i][i];
788        }
789
790        // Update solution
791        for i in 0..inner_iter {
792            for (j, x_val) in x.iter_mut().enumerate().take(n) {
793                *x_val += y[i] * v[i][j];
794            }
795        }
796
797        // Compute new residual
798        let ax = a.matvec(&x)?;
799        r = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
800
801        // Apply preconditioner if provided
802        if let Some(m) = &options.preconditioner {
803            r = m.matvec(&r)?;
804        }
805
806        rnorm = norm2(&r);
807        outer_iterations += inner_iter;
808
809        if rnorm <= tolerance {
810            break;
811        }
812    }
813
814    Ok(IterationResult {
815        x,
816        iterations: outer_iterations,
817        residual_norm: rnorm,
818        converged: rnorm <= tolerance,
819        message: if rnorm <= tolerance {
820            "Converged".to_string()
821        } else {
822            "Maximum iterations reached".to_string()
823        },
824    })
825}
826
827/// Trait for iterative solvers
828pub trait IterativeSolver<F: Float> {
829    /// Solve the linear system Ax = b
830    fn solve(&self, a: &dyn LinearOperator<F>, b: &[F]) -> SparseResult<IterationResult<F>>;
831}
832
833// Helper functions
834
835/// Compute the dot product of two vectors
836pub(crate) fn dot<F: Float + Sum>(x: &[F], y: &[F]) -> F {
837    x.iter().zip(y).map(|(&xi, &yi)| xi * yi).sum()
838}
839
840/// Compute the 2-norm of a vector
841pub(crate) fn norm2<F: Float + Sum>(x: &[F]) -> F {
842    dot(x, x).sqrt()
843}
844
845/// Options for LSQR solver
846pub struct LSQROptions<F> {
847    #[allow(dead_code)]
848    pub max_iter: usize,
849    #[allow(dead_code)]
850    pub rtol: F,
851    #[allow(dead_code)]
852    pub atol: F,
853    #[allow(dead_code)]
854    pub btol: F,
855    #[allow(dead_code)]
856    pub x0: Option<Vec<F>>,
857}
858
859impl<F: Float> Default for LSQROptions<F> {
860    fn default() -> Self {
861        Self {
862            max_iter: 1000,
863            rtol: F::from(1e-8).unwrap(),
864            atol: F::from(1e-12).unwrap(),
865            btol: F::from(1e-8).unwrap(),
866            x0: None,
867        }
868    }
869}
870
871/// Options for LSMR solver
872#[allow(dead_code)]
873pub struct LSMROptions<F> {
874    pub max_iter: usize,
875    pub rtol: F,
876    pub atol: F,
877    pub btol: F,
878    pub x0: Option<Vec<F>>,
879    pub damp: F,
880}
881
882impl<F: Float> Default for LSMROptions<F> {
883    fn default() -> Self {
884        Self {
885            max_iter: 1000,
886            rtol: F::from(1e-8).unwrap(),
887            atol: F::from(1e-12).unwrap(),
888            btol: F::from(1e-8).unwrap(),
889            x0: None,
890            damp: F::zero(),
891        }
892    }
893}
894
895/// Options for GCROT solver
896#[allow(dead_code)]
897pub struct GCROTOptions<F> {
898    pub max_iter: usize,
899    pub restart: usize,
900    pub truncate: usize,
901    pub rtol: F,
902    pub atol: F,
903    pub x0: Option<Vec<F>>,
904    pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
905}
906
907impl<F: Float> Default for GCROTOptions<F> {
908    fn default() -> Self {
909        Self {
910            max_iter: 1000,
911            restart: 30,
912            truncate: 2,
913            rtol: F::from(1e-8).unwrap(),
914            atol: F::from(1e-12).unwrap(),
915            x0: None,
916            preconditioner: None,
917        }
918    }
919}
920
921/// Options for TFQMR solver
922#[allow(dead_code)]
923pub struct TFQMROptions<F> {
924    pub max_iter: usize,
925    pub rtol: F,
926    pub atol: F,
927    pub x0: Option<Vec<F>>,
928    pub preconditioner: Option<Box<dyn LinearOperator<F>>>,
929}
930
931impl<F: Float> Default for TFQMROptions<F> {
932    fn default() -> Self {
933        Self {
934            max_iter: 1000,
935            rtol: F::from(1e-8).unwrap(),
936            atol: F::from(1e-12).unwrap(),
937            x0: None,
938            preconditioner: None,
939        }
940    }
941}
942
943/// Least Squares QR (LSQR) solver
944///
945/// Solves the least squares problem min ||Ax - b||_2 or the system Ax = b
946/// using the LSQR algorithm. Suitable for large sparse least squares problems.
947#[allow(dead_code)]
948pub fn lsqr<F>(
949    a: &dyn LinearOperator<F>,
950    b: &[F],
951    options: LSQROptions<F>,
952) -> SparseResult<IterationResult<F>>
953where
954    F: Float + NumAssign + Sum + 'static,
955{
956    let (m, n) = a.shape();
957    if b.len() != m {
958        return Err(SparseError::DimensionMismatch {
959            expected: m,
960            found: b.len(),
961        });
962    }
963
964    // Initialize solution
965    let mut x: Vec<F> = match &options.x0 {
966        Some(x0) => {
967            if x0.len() != n {
968                return Err(SparseError::DimensionMismatch {
969                    expected: n,
970                    found: x0.len(),
971                });
972            }
973            x0.clone()
974        }
975        None => vec![F::zero(); n],
976    };
977
978    // Compute initial residual r = b - A*x
979    let ax = a.matvec(&x)?;
980    let mut u: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
981
982    // Initialize variables
983    let mut alpha = norm2(&u);
984    if alpha == F::zero() {
985        return Ok(IterationResult {
986            x,
987            iterations: 0,
988            residual_norm: F::zero(),
989            converged: true,
990            message: "Zero initial residual".to_string(),
991        });
992    }
993
994    for element in &mut u {
995        *element /= alpha;
996    }
997
998    let mut v = a.rmatvec(&u)?;
999    let mut beta = norm2(&v);
1000
1001    let mut w = v.clone();
1002    if beta != F::zero() {
1003        for element in &mut w {
1004            *element /= beta;
1005        }
1006    }
1007
1008    let mut phi = alpha;
1009    let mut rho = beta;
1010
1011    let tolerance = F::max(options.atol, options.rtol * norm2(b));
1012
1013    for iterations in 0..options.max_iter {
1014        // Continue bidiagonalization
1015        let au = a.matvec(&w)?;
1016        for i in 0..u.len() {
1017            u[i] = au[i] - beta * u[i];
1018        }
1019        alpha = norm2(&u);
1020
1021        if alpha != F::zero() {
1022            for elem in &mut u {
1023                *elem /= alpha;
1024            }
1025        }
1026
1027        let atv = a.rmatvec(&u)?;
1028        for i in 0..w.len() {
1029            w[i] = atv[i] - alpha * w[i];
1030        }
1031        beta = norm2(&w);
1032
1033        if beta != F::zero() {
1034            for elem in &mut w {
1035                *elem /= beta;
1036            }
1037        }
1038
1039        // Update QR factorization
1040        let rho_old = rho;
1041        let c = rho_old / (rho_old * rho_old + alpha * alpha).sqrt();
1042        let s = alpha / (rho_old * rho_old + alpha * alpha).sqrt();
1043
1044        let theta = s * beta;
1045        rho = -c * beta;
1046        phi = c * phi;
1047
1048        // Update solution
1049        let t = phi / rho_old;
1050        for i in 0..x.len() {
1051            x[i] += t * w[i];
1052        }
1053
1054        // Check convergence
1055        let residual_norm = theta.abs();
1056        if residual_norm <= tolerance {
1057            return Ok(IterationResult {
1058                x,
1059                iterations: iterations + 1,
1060                residual_norm,
1061                converged: true,
1062                message: "Converged".to_string(),
1063            });
1064        }
1065
1066        phi = -s * phi;
1067    }
1068
1069    Ok(IterationResult {
1070        x,
1071        iterations: options.max_iter,
1072        residual_norm: phi.abs(),
1073        converged: false,
1074        message: "Maximum iterations reached".to_string(),
1075    })
1076}
1077
1078/// Least Squares Minimal Residual (LSMR) solver
1079///
1080/// An enhanced version of LSQR with better numerical properties
1081#[allow(dead_code)]
1082pub fn lsmr<F>(
1083    a: &dyn LinearOperator<F>,
1084    b: &[F],
1085    options: LSMROptions<F>,
1086) -> SparseResult<IterationResult<F>>
1087where
1088    F: Float + NumAssign + Sum + 'static,
1089{
1090    let (m, n) = a.shape();
1091    if b.len() != m {
1092        return Err(SparseError::DimensionMismatch {
1093            expected: m,
1094            found: b.len(),
1095        });
1096    }
1097
1098    // Initialize solution
1099    let mut x: Vec<F> = match &options.x0 {
1100        Some(x0) => {
1101            if x0.len() != n {
1102                return Err(SparseError::DimensionMismatch {
1103                    expected: n,
1104                    found: x0.len(),
1105                });
1106            }
1107            x0.clone()
1108        }
1109        None => vec![F::zero(); n],
1110    };
1111
1112    // Compute initial residual r = b - A*x
1113    let ax = a.matvec(&x)?;
1114    let mut u: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
1115
1116    let beta_1 = norm2(&u);
1117    if beta_1 == F::zero() {
1118        return Ok(IterationResult {
1119            x,
1120            iterations: 0,
1121            residual_norm: F::zero(),
1122            converged: true,
1123            message: "Zero initial residual".to_string(),
1124        });
1125    }
1126
1127    for elem in &mut u {
1128        *elem /= beta_1;
1129    }
1130
1131    let mut v = a.rmatvec(&u)?;
1132    let mut alpha_1 = norm2(&v);
1133
1134    if alpha_1 != F::zero() {
1135        for item in &mut v {
1136            *item /= alpha_1;
1137        }
1138    }
1139
1140    let mut h = vec![F::zero(); n];
1141    let mut h_bar = v.clone();
1142
1143    let mut rho_bar = alpha_1;
1144    let mut phi_bar = beta_1;
1145
1146    let tolerance = F::max(options.atol, options.rtol * norm2(b));
1147
1148    for iterations in 0..options.max_iter {
1149        // Lanczos bidiagonalization
1150        let au = a.matvec(&h_bar)?;
1151        for i in 0..u.len() {
1152            u[i] = au[i] - alpha_1 * u[i];
1153        }
1154        let beta = norm2(&u);
1155
1156        if beta != F::zero() {
1157            for item in &mut u {
1158                *item /= beta;
1159            }
1160        }
1161
1162        let atv = a.rmatvec(&u)?;
1163        for i in 0..h_bar.len() {
1164            h_bar[i] = atv[i] - beta * h_bar[i];
1165        }
1166        let alpha = norm2(&h_bar);
1167
1168        if alpha != F::zero() {
1169            for item in &mut h_bar {
1170                *item /= alpha;
1171            }
1172        }
1173
1174        // Apply previous rotation
1175        let rho = (rho_bar * rho_bar + beta * beta).sqrt();
1176        let c = rho_bar / rho;
1177        let s = beta / rho;
1178
1179        let theta = s * alpha;
1180        rho_bar = -c * alpha;
1181
1182        let phi = c * phi_bar;
1183        phi_bar = s * phi_bar;
1184
1185        // Update solution
1186        let t1 = phi / rho;
1187        let t2 = -theta / rho;
1188
1189        for i in 0..x.len() {
1190            x[i] += t1 * h[i];
1191            h[i] = h_bar[i] + t2 * h[i];
1192        }
1193
1194        // Check convergence
1195        let residual_norm = phi_bar.abs();
1196        if residual_norm <= tolerance {
1197            return Ok(IterationResult {
1198                x,
1199                iterations: iterations + 1,
1200                residual_norm,
1201                converged: true,
1202                message: "Converged".to_string(),
1203            });
1204        }
1205
1206        // Update for next iteration
1207        alpha_1 = alpha;
1208    }
1209
1210    Ok(IterationResult {
1211        x,
1212        iterations: options.max_iter,
1213        residual_norm: phi_bar.abs(),
1214        converged: false,
1215        message: "Maximum iterations reached".to_string(),
1216    })
1217}
1218
1219/// Transpose-Free Quasi-Minimal Residual (TFQMR) solver
1220///
1221/// A transpose-free variant of QMR that avoids the need for A^T
1222#[allow(dead_code)]
1223pub fn tfqmr<F>(
1224    a: &dyn LinearOperator<F>,
1225    b: &[F],
1226    options: TFQMROptions<F>,
1227) -> SparseResult<IterationResult<F>>
1228where
1229    F: Float + NumAssign + Sum + 'static,
1230{
1231    let (rows, cols) = a.shape();
1232    if rows != cols {
1233        return Err(SparseError::ValueError(
1234            "Matrix must be square for TFQMR solver".to_string(),
1235        ));
1236    }
1237    if b.len() != rows {
1238        return Err(SparseError::DimensionMismatch {
1239            expected: rows,
1240            found: b.len(),
1241        });
1242    }
1243
1244    let n = rows;
1245
1246    // Initialize solution
1247    let mut x: Vec<F> = match &options.x0 {
1248        Some(x0) => {
1249            if x0.len() != n {
1250                return Err(SparseError::DimensionMismatch {
1251                    expected: n,
1252                    found: x0.len(),
1253                });
1254            }
1255            x0.clone()
1256        }
1257        None => vec![F::zero(); n],
1258    };
1259
1260    // Compute initial residual
1261    let ax = a.matvec(&x)?;
1262    let mut r: Vec<F> = b.iter().zip(&ax).map(|(&bi, &axi)| bi - axi).collect();
1263
1264    let mut rnorm = norm2(&r);
1265    let bnorm = norm2(b);
1266    let tolerance = F::max(options.atol, options.rtol * bnorm);
1267
1268    if rnorm <= tolerance {
1269        return Ok(IterationResult {
1270            x,
1271            iterations: 0,
1272            residual_norm: rnorm,
1273            converged: true,
1274            message: "Converged with initial guess".to_string(),
1275        });
1276    }
1277
1278    // Initialize vectors
1279    let r_tilde = r.clone(); // Shadow residual
1280    let mut w = r.clone();
1281    let mut u = r.clone();
1282    let mut v = vec![F::zero(); n];
1283    let mut d = vec![F::zero(); n];
1284
1285    let mut tau = rnorm;
1286    let mut theta = F::zero();
1287    let mut eta = F::zero();
1288    let mut rho = dot(&r_tilde, &r);
1289
1290    if rho == F::zero() {
1291        return Ok(IterationResult {
1292            x,
1293            iterations: 0,
1294            residual_norm: rnorm,
1295            converged: false,
1296            message: "TFQMR breakdown: rho = 0".to_string(),
1297        });
1298    }
1299
1300    for iterations in 0..options.max_iter {
1301        // Apply preconditioner if provided
1302        let y = match &options.preconditioner {
1303            Some(m) => m.matvec(&w)?,
1304            None => w.clone(),
1305        };
1306
1307        v = a.matvec(&y)?;
1308        let alpha = rho / dot(&r_tilde, &v);
1309
1310        // Update w
1311        for i in 0..n {
1312            w[i] -= alpha * v[i];
1313        }
1314
1315        // First half-step update
1316        let theta_old = theta;
1317        theta = norm2(&w) / tau;
1318        let c = F::one() / (F::one() + theta * theta).sqrt();
1319        tau = tau * theta * c;
1320        eta = c * c * alpha;
1321
1322        for i in 0..n {
1323            d[i] = u[i] + (theta_old * theta_old * eta / alpha) * d[i];
1324            x[i] += eta * d[i];
1325        }
1326
1327        // Check convergence after first half-step
1328        if tau * (F::one() + theta * theta).sqrt() <= tolerance {
1329            return Ok(IterationResult {
1330                x,
1331                iterations: iterations * 2 + 1,
1332                residual_norm: tau,
1333                converged: true,
1334                message: "Converged".to_string(),
1335            });
1336        }
1337
1338        // Second half-step update
1339        for i in 0..n {
1340            u[i] = w[i] - alpha * v[i];
1341        }
1342
1343        let theta_old = theta;
1344        theta = norm2(&u) / tau;
1345        let c = F::one() / (F::one() + theta * theta).sqrt();
1346        tau = tau * theta * c;
1347        eta = c * c * alpha;
1348
1349        for i in 0..n {
1350            d[i] = w[i] + (theta_old * theta_old * eta / alpha) * d[i];
1351            x[i] += eta * d[i];
1352        }
1353
1354        // Check convergence after second half-step
1355        if tau <= tolerance {
1356            return Ok(IterationResult {
1357                x,
1358                iterations: iterations * 2 + 2,
1359                residual_norm: tau,
1360                converged: true,
1361                message: "Converged".to_string(),
1362            });
1363        }
1364
1365        // Update for next iteration
1366        let rho_old = rho;
1367        rho = dot(&r_tilde, &u);
1368
1369        if rho == F::zero() {
1370            return Ok(IterationResult {
1371                x,
1372                iterations: iterations * 2 + 2,
1373                residual_norm: tau,
1374                converged: false,
1375                message: "TFQMR breakdown: rho = 0".to_string(),
1376            });
1377        }
1378
1379        let beta = rho / rho_old;
1380        for i in 0..n {
1381            w[i] = u[i] + beta * w[i];
1382        }
1383    }
1384
1385    Ok(IterationResult {
1386        x,
1387        iterations: options.max_iter * 2,
1388        residual_norm: tau,
1389        converged: false,
1390        message: "Maximum iterations reached".to_string(),
1391    })
1392}
1393
1394#[cfg(test)]
1395mod tests {
1396    use super::*;
1397    use crate::csr::CsrMatrix;
1398    use crate::linalg::interface::AsLinearOperator;
1399
1400    #[test]
1401    fn test_cg_identity() {
1402        // Test CG on identity matrix: I * x = b => x = b
1403        let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
1404        let b = vec![1.0, 2.0, 3.0];
1405        let options = CGOptions::default();
1406        let result = cg(&identity, &b, options).unwrap();
1407
1408        assert!(result.converged);
1409        for (xi, bi) in result.x.iter().zip(&b) {
1410            assert!((xi - bi).abs() < 1e-3);
1411        }
1412    }
1413
1414    #[test]
1415    fn test_cg_diagonal() {
1416        // Test CG on diagonal matrix
1417        let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
1418        let b = vec![2.0, 6.0, 12.0];
1419        let options = CGOptions::default();
1420        let result = cg(&diag, &b, options).unwrap();
1421
1422        assert!(result.converged);
1423        let expected = vec![1.0, 2.0, 3.0];
1424        for (xi, ei) in result.x.iter().zip(&expected) {
1425            assert!((xi - ei).abs() < 1e-3);
1426        }
1427    }
1428
1429    #[test]
1430    fn test_cg_sparse_matrix() {
1431        // Test CG on a sparse positive definite matrix
1432        let rows = vec![0, 0, 0, 1, 1, 1, 2, 2, 2];
1433        let cols = vec![0, 1, 2, 0, 1, 2, 0, 1, 2];
1434        let data = vec![4.0, -1.0, -1.0, -1.0, 4.0, -1.0, -1.0, -1.0, 4.0];
1435        let shape = (3, 3);
1436
1437        let matrix = CsrMatrix::new(data, rows, cols, shape).unwrap();
1438        let op = matrix.as_linear_operator();
1439
1440        let b = vec![1.0, 2.0, 3.0];
1441        let options = CGOptions::default();
1442        let result = cg(op.as_ref(), &b, options).unwrap();
1443
1444        assert!(result.converged);
1445        // Verify solution by checking Ax = b
1446        let ax = op.matvec(&result.x).unwrap();
1447        for (axi, bi) in ax.iter().zip(&b) {
1448            assert!((axi - bi).abs() < 1e-9);
1449        }
1450    }
1451
1452    #[test]
1453    fn test_bicgstab_identity() {
1454        // Test BiCGSTAB on identity matrix: I * x = b => x = b
1455        let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
1456        let b = vec![1.0, 2.0, 3.0];
1457        let options = BiCGSTABOptions::default();
1458        let result = bicgstab(&identity, &b, options).unwrap();
1459
1460        assert!(result.converged);
1461        for (xi, bi) in result.x.iter().zip(&b) {
1462            assert!((xi - bi).abs() < 1e-3);
1463        }
1464    }
1465
1466    #[test]
1467    fn test_bicgstab_diagonal() {
1468        // Test BiCGSTAB on diagonal matrix
1469        let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
1470        let b = vec![2.0, 6.0, 12.0];
1471        let options = BiCGSTABOptions::default();
1472        let result = bicgstab(&diag, &b, options).unwrap();
1473
1474        assert!(result.converged);
1475        let expected = vec![1.0, 2.0, 3.0];
1476        for (xi, ei) in result.x.iter().zip(&expected) {
1477            assert!((xi - ei).abs() < 1e-3);
1478        }
1479    }
1480
1481    #[test]
1482    fn test_bicgstab_non_symmetric() {
1483        // Test BiCGSTAB on a non-symmetric matrix
1484        let rows = vec![0, 0, 0, 1, 1, 1, 2, 2, 2];
1485        let cols = vec![0, 1, 2, 0, 1, 2, 0, 1, 2];
1486        let data = vec![4.0, -1.0, -2.0, -1.0, 4.0, -1.0, 0.0, -1.0, 3.0];
1487        let shape = (3, 3);
1488
1489        let matrix = CsrMatrix::new(data, rows, cols, shape).unwrap();
1490        let op = matrix.as_linear_operator();
1491
1492        let b = vec![1.0, 2.0, 1.0];
1493        let options = BiCGSTABOptions::default();
1494        let result = bicgstab(op.as_ref(), &b, options).unwrap();
1495
1496        assert!(result.converged);
1497        // Verify solution by checking Ax = b
1498        let ax = op.matvec(&result.x).unwrap();
1499        for (axi, bi) in ax.iter().zip(&b) {
1500            assert!((axi - bi).abs() < 1e-9);
1501        }
1502    }
1503
1504    #[test]
1505    #[ignore] // TODO: Fix iterative LSQR implementation - different algorithm issues
1506    fn test_lsqr_identity() {
1507        // Test LSQR on identity matrix
1508        let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
1509        let b = vec![1.0, 2.0, 3.0];
1510        let options = LSQROptions::default();
1511        let result = lsqr(&identity, &b, options).unwrap();
1512
1513        println!("Identity test - Expected: {:?}, Got: {:?}", b, result.x);
1514        println!(
1515            "Converged: {}, Iterations: {}",
1516            result.converged, result.iterations
1517        );
1518
1519        assert!(result.converged);
1520        for (xi, bi) in result.x.iter().zip(&b) {
1521            assert!((xi - bi).abs() < 1e-3);
1522        }
1523    }
1524
1525    #[test]
1526    #[ignore] // TODO: Fix iterative LSQR implementation - different algorithm issues
1527    fn test_lsqr_diagonal() {
1528        // Test LSQR on diagonal matrix
1529        let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
1530        let b = vec![2.0, 6.0, 12.0];
1531        let options = LSQROptions::default();
1532        let result = lsqr(&diag, &b, options).unwrap();
1533
1534        assert!(result.converged);
1535        let expected = vec![1.0, 2.0, 3.0];
1536        for (xi, ei) in result.x.iter().zip(&expected) {
1537            assert!((xi - ei).abs() < 1e-3);
1538        }
1539    }
1540
1541    #[test]
1542    #[ignore] // TODO: Fix LSMR algorithm - currently not converging correctly
1543    fn test_lsmr_identity() {
1544        // Test LSMR on identity matrix
1545        let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
1546        let b = vec![1.0, 2.0, 3.0];
1547        let options = LSMROptions::default();
1548        let result = lsmr(&identity, &b, options).unwrap();
1549
1550        assert!(result.converged);
1551        for (xi, bi) in result.x.iter().zip(&b) {
1552            assert!((xi - bi).abs() < 1e-3);
1553        }
1554    }
1555
1556    #[test]
1557    #[ignore] // TODO: Fix LSMR algorithm - currently not converging correctly
1558    fn test_lsmr_diagonal() {
1559        // Test LSMR on diagonal matrix
1560        let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
1561        let b = vec![2.0, 6.0, 12.0];
1562        let options = LSMROptions::default();
1563        let result = lsmr(&diag, &b, options).unwrap();
1564
1565        assert!(result.converged);
1566        let expected = vec![1.0, 2.0, 3.0];
1567        for (xi, ei) in result.x.iter().zip(&expected) {
1568            assert!((xi - ei).abs() < 1e-3);
1569        }
1570    }
1571
1572    #[test]
1573    fn test_tfqmr_identity() {
1574        // Test TFQMR on identity matrix
1575        let identity = crate::linalg::interface::IdentityOperator::<f64>::new(3);
1576        let b = vec![1.0, 2.0, 3.0];
1577        let options = TFQMROptions::default();
1578        let result = tfqmr(&identity, &b, options).unwrap();
1579
1580        assert!(result.converged);
1581        for (xi, bi) in result.x.iter().zip(&b) {
1582            assert!((xi - bi).abs() < 1e-3);
1583        }
1584    }
1585
1586    #[test]
1587    #[ignore] // TODO: Fix TFQMR algorithm - currently not converging correctly
1588    fn test_tfqmr_diagonal() {
1589        // Test TFQMR on diagonal matrix
1590        let diag = crate::linalg::interface::DiagonalOperator::new(vec![2.0, 3.0, 4.0]);
1591        let b = vec![2.0, 6.0, 12.0];
1592        let options = TFQMROptions::default();
1593        let result = tfqmr(&diag, &b, options).unwrap();
1594
1595        assert!(result.converged);
1596        let expected = vec![1.0, 2.0, 3.0];
1597        for (xi, ei) in result.x.iter().zip(&expected) {
1598            assert!((xi - ei).abs() < 1e-3);
1599        }
1600    }
1601
1602    #[test]
1603    fn test_lsqr_least_squares() {
1604        // Test LSQR on overdetermined system (m > n)
1605        let rows = vec![0, 0, 1, 1, 2, 2, 3, 3];
1606        let cols = vec![0, 1, 0, 1, 0, 1, 0, 1];
1607        let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
1608        let shape = (4, 2);
1609
1610        let matrix = CsrMatrix::new(data, rows, cols, shape).unwrap();
1611        let op = matrix.as_linear_operator();
1612
1613        let b = vec![1.0, 2.0, 3.0, 4.0];
1614        let options = LSQROptions::default();
1615        let result = lsqr(op.as_ref(), &b, options).unwrap();
1616
1617        // LSQR should converge for overdetermined systems
1618        assert!(result.converged || result.residual_norm < 1e-6);
1619    }
1620
1621    #[test]
1622    fn test_solver_options_defaults() {
1623        let lsqr_opts = LSQROptions::<f64>::default();
1624        assert_eq!(lsqr_opts.max_iter, 1000);
1625        assert!(lsqr_opts.x0.is_none());
1626
1627        let lsmr_opts = LSMROptions::<f64>::default();
1628        assert_eq!(lsmr_opts.max_iter, 1000);
1629        assert_eq!(lsmr_opts.damp, 0.0);
1630        assert!(lsmr_opts.x0.is_none());
1631
1632        let tfqmr_opts = TFQMROptions::<f64>::default();
1633        assert_eq!(tfqmr_opts.max_iter, 1000);
1634        assert!(tfqmr_opts.x0.is_none());
1635        assert!(tfqmr_opts.preconditioner.is_none());
1636    }
1637}