Skip to main content

scirs2_sparse/
incomplete_factorizations.rs

1//! Incomplete matrix factorizations for preconditioning
2//!
3//! This module provides several incomplete factorization preconditioners:
4//!
5//! - **ILU(0)**: Incomplete LU with zero fill-in
6//! - **ILU(k)**: Incomplete LU with level-k fill-in
7//! - **IC(0)**: Incomplete Cholesky for symmetric positive definite matrices
8//! - **MILU**: Modified ILU with diagonal compensation
9//! - **ILUT**: Incomplete LU with threshold dropping
10//!
11//! All implementations conform to the `Preconditioner` trait from
12//! `crate::iterative_solvers`, enabling them to be used as drop-in
13//! preconditioners for iterative solvers (CG, BiCGSTAB, GMRES, etc.).
14//!
15//! # References
16//!
17//! - Saad, Y. (2003). *Iterative Methods for Sparse Linear Systems*, 2nd ed. SIAM.
18
19use crate::csr::CsrMatrix;
20use crate::error::{SparseError, SparseResult};
21use crate::iterative_solvers::Preconditioner;
22use scirs2_core::ndarray::Array1;
23use scirs2_core::numeric::{Float, NumAssign, SparseElement};
24use std::fmt::Debug;
25use std::iter::Sum;
26
27// ---------------------------------------------------------------------------
28// Helpers
29// ---------------------------------------------------------------------------
30
31/// Find the position of column `col` in the CSR row.
32fn find_col_in_row(
33    indices: &[usize],
34    row_start: usize,
35    row_end: usize,
36    col: usize,
37) -> Option<usize> {
38    for pos in row_start..row_end {
39        if indices[pos] == col {
40            return Some(pos);
41        }
42        if indices[pos] > col {
43            return None;
44        }
45    }
46    None
47}
48
49/// Forward solve: L y = b where L is unit-lower triangular stored in CSR.
50fn forward_solve_unit<F: Float + NumAssign + SparseElement>(
51    l_data: &[F],
52    l_indices: &[usize],
53    l_indptr: &[usize],
54    b: &[F],
55    n: usize,
56) -> Vec<F> {
57    let mut y = vec![F::sparse_zero(); n];
58    for i in 0..n {
59        y[i] = b[i];
60        let start = l_indptr[i];
61        let end = l_indptr[i + 1];
62        for pos in start..end {
63            let col = l_indices[pos];
64            y[i] = y[i] - l_data[pos] * y[col];
65        }
66    }
67    y
68}
69
70/// Backward solve: U x = y where U is upper triangular stored in CSR.
71fn backward_solve<F: Float + NumAssign + SparseElement>(
72    u_data: &[F],
73    u_indices: &[usize],
74    u_indptr: &[usize],
75    y: &[F],
76    n: usize,
77) -> SparseResult<Vec<F>> {
78    let mut x = vec![F::sparse_zero(); n];
79    for ii in 0..n {
80        let i = n - 1 - ii;
81        let start = u_indptr[i];
82        let end = u_indptr[i + 1];
83
84        // Find diagonal
85        let mut diag = F::sparse_zero();
86        let mut sum = y[i];
87        for pos in start..end {
88            let col = u_indices[pos];
89            if col == i {
90                diag = u_data[pos];
91            } else if col > i {
92                sum -= u_data[pos] * x[col];
93            }
94        }
95        if diag.abs() < F::epsilon() {
96            return Err(SparseError::SingularMatrix(format!(
97                "Zero diagonal at row {i} during backward solve"
98            )));
99        }
100        x[i] = sum / diag;
101    }
102    Ok(x)
103}
104
105// ---------------------------------------------------------------------------
106// ILU(0) — Incomplete LU with zero fill-in
107// ---------------------------------------------------------------------------
108
109/// ILU(0) preconditioner: Incomplete LU factorization with zero fill-in.
110///
111/// The sparsity pattern of L + U is identical to the sparsity pattern of A.
112/// This is the simplest ILU preconditioner and works well for mildly
113/// ill-conditioned systems.
114pub struct Ilu0<F> {
115    l_data: Vec<F>,
116    l_indices: Vec<usize>,
117    l_indptr: Vec<usize>,
118    u_data: Vec<F>,
119    u_indices: Vec<usize>,
120    u_indptr: Vec<usize>,
121    n: usize,
122}
123
124impl<F> Ilu0<F>
125where
126    F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
127{
128    /// Construct an ILU(0) preconditioner from a CSR matrix.
129    ///
130    /// # Errors
131    ///
132    /// Returns an error if:
133    /// - The matrix is not square
134    /// - A zero pivot is encountered
135    pub fn new(matrix: &CsrMatrix<F>) -> SparseResult<Self> {
136        let n = matrix.rows();
137        if n != matrix.cols() {
138            return Err(SparseError::ValueError(
139                "ILU(0) requires a square matrix".to_string(),
140            ));
141        }
142
143        // Copy matrix data for in-place factorization
144        let mut data = matrix.data.clone();
145        let indices = matrix.indices.clone();
146        let indptr = matrix.indptr.clone();
147
148        // IKJ variant of ILU(0)
149        for i in 1..n {
150            let i_start = indptr[i];
151            let i_end = indptr[i + 1];
152
153            for pos_k in i_start..i_end {
154                let k = indices[pos_k];
155                if k >= i {
156                    break;
157                }
158
159                // Find diagonal of row k
160                let k_start = indptr[k];
161                let k_end = indptr[k + 1];
162                let k_diag_pos = match find_col_in_row(&indices, k_start, k_end, k) {
163                    Some(p) => p,
164                    None => {
165                        return Err(SparseError::SingularMatrix(format!(
166                            "Missing diagonal at row {k}"
167                        )));
168                    }
169                };
170                let k_diag = data[k_diag_pos];
171                if k_diag.abs() < F::epsilon() {
172                    return Err(SparseError::SingularMatrix(format!(
173                        "Zero pivot at row {k} in ILU(0)"
174                    )));
175                }
176
177                // a_{ik} = a_{ik} / a_{kk}
178                data[pos_k] /= k_diag;
179                let multiplier = data[pos_k];
180
181                // Update row i for columns j > k that exist in both rows i and k
182                for kj_pos in k_start..k_end {
183                    let j = indices[kj_pos];
184                    if j <= k {
185                        continue;
186                    }
187                    // Find j in row i
188                    if let Some(ij_pos) = find_col_in_row(&indices, i_start, i_end, j) {
189                        data[ij_pos] = data[ij_pos] - multiplier * data[kj_pos];
190                    }
191                }
192            }
193        }
194
195        // Split into L (strictly lower, unit diagonal implied) and U (upper + diagonal)
196        let mut l_data = Vec::new();
197        let mut u_data = Vec::new();
198        let mut l_indices = Vec::new();
199        let mut u_indices = Vec::new();
200        let mut l_indptr = vec![0usize];
201        let mut u_indptr = vec![0usize];
202
203        for i in 0..n {
204            let start = indptr[i];
205            let end = indptr[i + 1];
206            for pos in start..end {
207                let col = indices[pos];
208                if col < i {
209                    l_indices.push(col);
210                    l_data.push(data[pos]);
211                } else {
212                    u_indices.push(col);
213                    u_data.push(data[pos]);
214                }
215            }
216            l_indptr.push(l_indices.len());
217            u_indptr.push(u_indices.len());
218        }
219
220        Ok(Self {
221            l_data,
222            l_indices,
223            l_indptr,
224            u_data,
225            u_indices,
226            u_indptr,
227            n,
228        })
229    }
230}
231
232impl<F> Preconditioner<F> for Ilu0<F>
233where
234    F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
235{
236    fn apply(&self, r: &Array1<F>) -> SparseResult<Array1<F>> {
237        if r.len() != self.n {
238            return Err(SparseError::DimensionMismatch {
239                expected: self.n,
240                found: r.len(),
241            });
242        }
243        let r_vec: Vec<F> = r.to_vec();
244        let y = forward_solve_unit(
245            &self.l_data,
246            &self.l_indices,
247            &self.l_indptr,
248            &r_vec,
249            self.n,
250        );
251        let x = backward_solve(&self.u_data, &self.u_indices, &self.u_indptr, &y, self.n)?;
252        Ok(Array1::from_vec(x))
253    }
254}
255
256// ---------------------------------------------------------------------------
257// ILU(k) — Incomplete LU with level-k fill-in
258// ---------------------------------------------------------------------------
259
260/// ILU(k) preconditioner: Incomplete LU factorization with level-k fill-in.
261///
262/// Allows fill-in up to level k beyond the original sparsity pattern.
263/// Higher k produces better approximations but uses more memory.
264pub struct IluK<F> {
265    l_data: Vec<F>,
266    l_indices: Vec<usize>,
267    l_indptr: Vec<usize>,
268    u_data: Vec<F>,
269    u_indices: Vec<usize>,
270    u_indptr: Vec<usize>,
271    n: usize,
272}
273
274impl<F> IluK<F>
275where
276    F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
277{
278    /// Construct an ILU(k) preconditioner from a CSR matrix.
279    ///
280    /// # Arguments
281    ///
282    /// * `matrix` - Square sparse matrix in CSR format
283    /// * `fill_level` - Maximum fill level (0 = ILU(0), 1 = ILU(1), etc.)
284    pub fn new(matrix: &CsrMatrix<F>, fill_level: usize) -> SparseResult<Self> {
285        let n = matrix.rows();
286        if n != matrix.cols() {
287            return Err(SparseError::ValueError(
288                "ILU(k) requires a square matrix".to_string(),
289            ));
290        }
291
292        // Track level of fill for each entry
293        // Level 0 = original non-zero, Level k = k-th order fill-in
294        // Store as dense rows for simplicity; for very large matrices a hash map would be better
295
296        let mut row_data: Vec<Vec<(usize, F, usize)>> = Vec::with_capacity(n);
297        // Initialise with original matrix entries (level 0)
298        for i in 0..n {
299            let range = matrix.row_range(i);
300            let cols = &matrix.indices[range.clone()];
301            let vals = &matrix.data[range];
302            let row: Vec<(usize, F, usize)> = cols
303                .iter()
304                .zip(vals.iter())
305                .map(|(&c, &v)| (c, v, 0usize))
306                .collect();
307            row_data.push(row);
308        }
309
310        // IKJ factorization with level tracking
311        for i in 1..n {
312            let mut row_i = row_data[i].clone();
313
314            // Sort by column
315            row_i.sort_by_key(|&(c, _, _)| c);
316
317            let mut ki = 0;
318            while ki < row_i.len() {
319                let (k, _, _) = row_i[ki];
320                if k >= i {
321                    break;
322                }
323
324                // Find diagonal of row k
325                let row_k = &row_data[k];
326                let k_diag = row_k.iter().find(|&&(c, _, _)| c == k).map(|&(_, v, _)| v);
327                let k_diag = match k_diag {
328                    Some(d) if d.abs() > F::epsilon() => d,
329                    _ => {
330                        ki += 1;
331                        continue;
332                    }
333                };
334
335                // multiplier = a_{ik} / a_{kk}
336                let level_ik = row_i[ki].2;
337                let multiplier = row_i[ki].1 / k_diag;
338                row_i[ki].1 = multiplier;
339
340                // For each entry (j, a_{kj}) in row k with j > k
341                for &(j, a_kj, level_kj) in row_k.iter() {
342                    if j <= k {
343                        continue;
344                    }
345                    let new_level = level_ik.max(level_kj) + 1;
346                    if new_level > fill_level {
347                        continue; // Skip: exceeds fill level
348                    }
349
350                    // Find j in row i
351                    let existing = row_i.iter().position(|&(c, _, _)| c == j);
352                    match existing {
353                        Some(pos) => {
354                            row_i[pos].1 -= multiplier * a_kj;
355                            row_i[pos].2 = row_i[pos].2.min(new_level);
356                        }
357                        None => {
358                            // New fill-in entry
359                            row_i.push((j, -multiplier * a_kj, new_level));
360                        }
361                    }
362                }
363
364                ki += 1;
365                // Re-sort after potential insertions
366                row_i.sort_by_key(|&(c, _, _)| c);
367            }
368
369            row_data[i] = row_i;
370        }
371
372        // Split into L and U
373        let mut l_data = Vec::new();
374        let mut u_data = Vec::new();
375        let mut l_indices = Vec::new();
376        let mut u_indices = Vec::new();
377        let mut l_indptr = vec![0usize];
378        let mut u_indptr = vec![0usize];
379
380        for i in 0..n {
381            let row = &row_data[i];
382            for &(col, val, _) in row.iter() {
383                if col < i {
384                    l_indices.push(col);
385                    l_data.push(val);
386                } else {
387                    u_indices.push(col);
388                    u_data.push(val);
389                }
390            }
391            l_indptr.push(l_indices.len());
392            u_indptr.push(u_indices.len());
393        }
394
395        Ok(Self {
396            l_data,
397            l_indices,
398            l_indptr,
399            u_data,
400            u_indices,
401            u_indptr,
402            n,
403        })
404    }
405}
406
407impl<F> Preconditioner<F> for IluK<F>
408where
409    F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
410{
411    fn apply(&self, r: &Array1<F>) -> SparseResult<Array1<F>> {
412        if r.len() != self.n {
413            return Err(SparseError::DimensionMismatch {
414                expected: self.n,
415                found: r.len(),
416            });
417        }
418        let r_vec: Vec<F> = r.to_vec();
419        let y = forward_solve_unit(
420            &self.l_data,
421            &self.l_indices,
422            &self.l_indptr,
423            &r_vec,
424            self.n,
425        );
426        let x = backward_solve(&self.u_data, &self.u_indices, &self.u_indptr, &y, self.n)?;
427        Ok(Array1::from_vec(x))
428    }
429}
430
431// ---------------------------------------------------------------------------
432// IC(0) — Incomplete Cholesky for SPD matrices
433// ---------------------------------------------------------------------------
434
435/// IC(0) preconditioner: Incomplete Cholesky factorization with zero fill-in.
436///
437/// For symmetric positive definite (SPD) matrices. Computes a lower triangular
438/// matrix L such that A ~ L * L^T, retaining only the sparsity pattern of the
439/// lower triangle of A.
440pub struct Ic0<F> {
441    /// Lower triangular factor (including diagonal) stored in CSR.
442    l_data: Vec<F>,
443    l_indices: Vec<usize>,
444    l_indptr: Vec<usize>,
445    n: usize,
446}
447
448impl<F> Ic0<F>
449where
450    F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
451{
452    /// Construct an IC(0) preconditioner from an SPD CSR matrix.
453    ///
454    /// Only the lower triangular part (including diagonal) is used.
455    ///
456    /// # Errors
457    ///
458    /// Returns an error if the matrix is not square, or if the factorization
459    /// encounters a non-positive pivot (matrix is not SPD).
460    pub fn new(matrix: &CsrMatrix<F>) -> SparseResult<Self> {
461        let n = matrix.rows();
462        if n != matrix.cols() {
463            return Err(SparseError::ValueError(
464                "IC(0) requires a square matrix".to_string(),
465            ));
466        }
467
468        // Extract lower triangular entries (col <= row)
469        let mut rows_lower: Vec<Vec<(usize, F)>> = Vec::with_capacity(n);
470        for i in 0..n {
471            let range = matrix.row_range(i);
472            let cols = &matrix.indices[range.clone()];
473            let vals = &matrix.data[range];
474            let row: Vec<(usize, F)> = cols
475                .iter()
476                .zip(vals.iter())
477                .filter(|(&c, _)| c <= i)
478                .map(|(&c, &v)| (c, v))
479                .collect();
480            rows_lower.push(row);
481        }
482
483        // Cholesky-like factorization
484        for i in 0..n {
485            // Process row i
486            let mut row_i = rows_lower[i].clone();
487            row_i.sort_by_key(|&(c, _)| c);
488
489            for ki in 0..row_i.len() {
490                let (k, _) = row_i[ki];
491                if k >= i {
492                    break;
493                }
494
495                // Get L_{kk} from row k
496                let row_k = &rows_lower[k];
497                let l_kk = row_k
498                    .iter()
499                    .find(|&&(c, _)| c == k)
500                    .map(|&(_, v)| v)
501                    .unwrap_or(F::sparse_one());
502
503                if l_kk.abs() < F::epsilon() {
504                    continue;
505                }
506
507                // L_{ik} = (a_{ik} - sum_{j<k} L_{ij}*L_{kj}) / L_{kk}
508                let mut sum = F::sparse_zero();
509                for &(j, l_ij) in row_i.iter() {
510                    if j >= k {
511                        break;
512                    }
513                    // Find L_{kj} in row k
514                    if let Some(&(_, l_kj)) = row_k.iter().find(|&&(c, _)| c == j) {
515                        sum += l_ij * l_kj;
516                    }
517                }
518                let original_val = row_i[ki].1;
519                row_i[ki].1 = (original_val - sum) / l_kk;
520            }
521
522            // Update diagonal: L_{ii} = sqrt(a_{ii} - sum_{j<i} L_{ij}^2)
523            if let Some(diag_pos) = row_i.iter().position(|&(c, _)| c == i) {
524                let mut sum_sq = F::sparse_zero();
525                for &(j, l_ij) in row_i.iter() {
526                    if j >= i {
527                        break;
528                    }
529                    sum_sq += l_ij * l_ij;
530                }
531                let diag_val = row_i[diag_pos].1 - sum_sq;
532                if diag_val <= F::sparse_zero() {
533                    return Err(SparseError::ValueError(format!(
534                        "Non-positive pivot at row {i} in IC(0) — matrix may not be SPD"
535                    )));
536                }
537                row_i[diag_pos].1 = diag_val.sqrt();
538            } else {
539                return Err(SparseError::SingularMatrix(format!(
540                    "Missing diagonal at row {i}"
541                )));
542            }
543
544            rows_lower[i] = row_i;
545        }
546
547        // Pack into CSR
548        let mut l_data = Vec::new();
549        let mut l_indices = Vec::new();
550        let mut l_indptr = vec![0usize];
551        for i in 0..n {
552            let mut row = rows_lower[i].clone();
553            row.sort_by_key(|&(c, _)| c);
554            for &(col, val) in &row {
555                l_indices.push(col);
556                l_data.push(val);
557            }
558            l_indptr.push(l_indices.len());
559        }
560
561        Ok(Self {
562            l_data,
563            l_indices,
564            l_indptr,
565            n,
566        })
567    }
568}
569
570impl<F> Preconditioner<F> for Ic0<F>
571where
572    F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
573{
574    /// Apply M^{-1} r = L^{-T} L^{-1} r.
575    fn apply(&self, r: &Array1<F>) -> SparseResult<Array1<F>> {
576        if r.len() != self.n {
577            return Err(SparseError::DimensionMismatch {
578                expected: self.n,
579                found: r.len(),
580            });
581        }
582
583        let r_vec: Vec<F> = r.to_vec();
584
585        // Forward solve: L y = r
586        let mut y = vec![F::sparse_zero(); self.n];
587        for i in 0..self.n {
588            let start = self.l_indptr[i];
589            let end = self.l_indptr[i + 1];
590            let mut sum = r_vec[i];
591
592            let mut diag = F::sparse_one();
593            for pos in start..end {
594                let col = self.l_indices[pos];
595                if col < i {
596                    sum -= self.l_data[pos] * y[col];
597                } else if col == i {
598                    diag = self.l_data[pos];
599                }
600            }
601            if diag.abs() < F::epsilon() {
602                return Err(SparseError::SingularMatrix(format!(
603                    "Zero diagonal at row {i} in IC(0) solve"
604                )));
605            }
606            y[i] = sum / diag;
607        }
608
609        // Backward solve: L^T x = y
610        let mut x = vec![F::sparse_zero(); self.n];
611        for ii in 0..self.n {
612            let i = self.n - 1 - ii;
613            x[i] = y[i];
614        }
615
616        // L^T is upper triangular. We need to traverse columns.
617        // Since L is stored row-wise, we do a row-based backward sweep.
618        for ii in 0..self.n {
619            let i = self.n - 1 - ii;
620            let start = self.l_indptr[i];
621            let end = self.l_indptr[i + 1];
622
623            // Find diagonal
624            let mut diag = F::sparse_one();
625            for pos in start..end {
626                if self.l_indices[pos] == i {
627                    diag = self.l_data[pos];
628                    break;
629                }
630            }
631            if diag.abs() < F::epsilon() {
632                return Err(SparseError::SingularMatrix(format!(
633                    "Zero diagonal at row {i} in IC(0) backward solve"
634                )));
635            }
636            x[i] /= diag;
637
638            // Subtract contributions to earlier rows
639            for pos in start..end {
640                let col = self.l_indices[pos];
641                if col < i {
642                    x[col] = x[col] - self.l_data[pos] * x[i];
643                }
644            }
645        }
646
647        Ok(Array1::from_vec(x))
648    }
649}
650
651// ---------------------------------------------------------------------------
652// MILU — Modified ILU with diagonal compensation
653// ---------------------------------------------------------------------------
654
655/// MILU preconditioner: Modified Incomplete LU with diagonal compensation.
656///
657/// Like ILU(0), but the dropped fill-in is compensated by adding it to the
658/// diagonal, preserving the row sums of the original matrix. This improves
659/// convergence for problems where row-sum preservation matters (e.g.,
660/// M-matrices, diffusion problems).
661pub struct Milu<F> {
662    l_data: Vec<F>,
663    l_indices: Vec<usize>,
664    l_indptr: Vec<usize>,
665    u_data: Vec<F>,
666    u_indices: Vec<usize>,
667    u_indptr: Vec<usize>,
668    n: usize,
669}
670
671impl<F> Milu<F>
672where
673    F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
674{
675    /// Construct an MILU preconditioner from a CSR matrix.
676    ///
677    /// Dropped fill-in is compensated on the diagonal.
678    pub fn new(matrix: &CsrMatrix<F>) -> SparseResult<Self> {
679        let n = matrix.rows();
680        if n != matrix.cols() {
681            return Err(SparseError::ValueError(
682                "MILU requires a square matrix".to_string(),
683            ));
684        }
685
686        let mut data = matrix.data.clone();
687        let indices = matrix.indices.clone();
688        let indptr = matrix.indptr.clone();
689
690        // Diagonal compensation accumulators
691        let mut diag_comp = vec![F::sparse_zero(); n];
692
693        for i in 1..n {
694            let i_start = indptr[i];
695            let i_end = indptr[i + 1];
696
697            for pos_k in i_start..i_end {
698                let k = indices[pos_k];
699                if k >= i {
700                    break;
701                }
702
703                let k_start = indptr[k];
704                let k_end = indptr[k + 1];
705                let k_diag_pos = match find_col_in_row(&indices, k_start, k_end, k) {
706                    Some(p) => p,
707                    None => {
708                        return Err(SparseError::SingularMatrix(format!(
709                            "Missing diagonal at row {k}"
710                        )));
711                    }
712                };
713                let k_diag = data[k_diag_pos];
714                if k_diag.abs() < F::epsilon() {
715                    return Err(SparseError::SingularMatrix(format!(
716                        "Zero pivot at row {k} in MILU"
717                    )));
718                }
719
720                let multiplier = data[pos_k] / k_diag;
721                data[pos_k] = multiplier;
722
723                // Update row i for columns j > k
724                for kj_pos in k_start..k_end {
725                    let j = indices[kj_pos];
726                    if j <= k {
727                        continue;
728                    }
729                    let fill_val = multiplier * data[kj_pos];
730                    if let Some(ij_pos) = find_col_in_row(&indices, i_start, i_end, j) {
731                        data[ij_pos] -= fill_val;
732                    } else {
733                        // Dropped fill-in: compensate on diagonal
734                        diag_comp[i] += fill_val;
735                    }
736                }
737            }
738        }
739
740        // Apply diagonal compensation
741        for i in 0..n {
742            let range = indptr[i]..indptr[i + 1];
743            if let Some(diag_pos) = find_col_in_row(&indices, range.start, range.end, i) {
744                data[diag_pos] += diag_comp[i];
745            }
746        }
747
748        // Split into L and U
749        let mut l_data = Vec::new();
750        let mut u_data = Vec::new();
751        let mut l_indices = Vec::new();
752        let mut u_indices = Vec::new();
753        let mut l_indptr = vec![0usize];
754        let mut u_indptr = vec![0usize];
755
756        for i in 0..n {
757            let start = indptr[i];
758            let end = indptr[i + 1];
759            for pos in start..end {
760                let col = indices[pos];
761                if col < i {
762                    l_indices.push(col);
763                    l_data.push(data[pos]);
764                } else {
765                    u_indices.push(col);
766                    u_data.push(data[pos]);
767                }
768            }
769            l_indptr.push(l_indices.len());
770            u_indptr.push(u_indices.len());
771        }
772
773        Ok(Self {
774            l_data,
775            l_indices,
776            l_indptr,
777            u_data,
778            u_indices,
779            u_indptr,
780            n,
781        })
782    }
783}
784
785impl<F> Preconditioner<F> for Milu<F>
786where
787    F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
788{
789    fn apply(&self, r: &Array1<F>) -> SparseResult<Array1<F>> {
790        if r.len() != self.n {
791            return Err(SparseError::DimensionMismatch {
792                expected: self.n,
793                found: r.len(),
794            });
795        }
796        let r_vec: Vec<F> = r.to_vec();
797        let y = forward_solve_unit(
798            &self.l_data,
799            &self.l_indices,
800            &self.l_indptr,
801            &r_vec,
802            self.n,
803        );
804        let x = backward_solve(&self.u_data, &self.u_indices, &self.u_indptr, &y, self.n)?;
805        Ok(Array1::from_vec(x))
806    }
807}
808
809// ---------------------------------------------------------------------------
810// ILUT — Incomplete LU with Threshold dropping
811// ---------------------------------------------------------------------------
812
813/// Configuration for the ILUT preconditioner.
814#[derive(Debug, Clone)]
815pub struct IlutConfig {
816    /// Drop tolerance: entries with |a_{ij}| < tau * ||row_i||_2 are dropped.
817    pub drop_tolerance: f64,
818    /// Maximum fill per row (in addition to original non-zeros).
819    pub max_fill: usize,
820}
821
822impl Default for IlutConfig {
823    fn default() -> Self {
824        Self {
825            drop_tolerance: 1e-4,
826            max_fill: 20,
827        }
828    }
829}
830
831/// ILUT preconditioner: Incomplete LU with threshold-based dropping.
832///
833/// Entries smaller than `tau * ||row_i||` are dropped, and at most `max_fill`
834/// additional entries per row are kept. This gives a good balance between
835/// fill-in and accuracy.
836pub struct Ilut<F> {
837    l_data: Vec<F>,
838    l_indices: Vec<usize>,
839    l_indptr: Vec<usize>,
840    u_data: Vec<F>,
841    u_indices: Vec<usize>,
842    u_indptr: Vec<usize>,
843    n: usize,
844}
845
846impl<F> Ilut<F>
847where
848    F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
849{
850    /// Construct an ILUT preconditioner from a CSR matrix.
851    ///
852    /// # Arguments
853    ///
854    /// * `matrix` - Square sparse matrix in CSR format
855    /// * `config` - ILUT configuration (drop tolerance and max fill)
856    pub fn new(matrix: &CsrMatrix<F>, config: &IlutConfig) -> SparseResult<Self> {
857        let n = matrix.rows();
858        if n != matrix.cols() {
859            return Err(SparseError::ValueError(
860                "ILUT requires a square matrix".to_string(),
861            ));
862        }
863
864        let tau = F::from(config.drop_tolerance).ok_or_else(|| {
865            SparseError::ValueError("Failed to convert drop_tolerance".to_string())
866        })?;
867        let max_fill = config.max_fill;
868
869        // Work row-by-row, storing the factorized rows
870        let mut l_rows: Vec<Vec<(usize, F)>> = Vec::with_capacity(n);
871        let mut u_rows: Vec<Vec<(usize, F)>> = Vec::with_capacity(n);
872
873        for i in 0..n {
874            // Load row i into a dense workspace
875            let range = matrix.row_range(i);
876            let cols = &matrix.indices[range.clone()];
877            let vals = &matrix.data[range];
878
879            // Compute row norm for threshold
880            let row_norm: F = vals.iter().map(|v| *v * *v).sum::<F>().sqrt();
881            let threshold = tau * row_norm;
882
883            // Sparse accumulator for this row
884            let mut w: Vec<(usize, F)> = cols
885                .iter()
886                .zip(vals.iter())
887                .map(|(&c, &v)| (c, v))
888                .collect();
889            w.sort_by_key(|&(c, _)| c);
890
891            // Elimination: for each k < i in w
892            let mut ki = 0;
893            while ki < w.len() {
894                let (k, _) = w[ki];
895                if k >= i {
896                    break;
897                }
898
899                // Get u_{kk} (diagonal from row k of U)
900                let u_kk = u_rows
901                    .get(k)
902                    .and_then(|row| row.iter().find(|&&(c, _)| c == k))
903                    .map(|&(_, v)| v);
904
905                let u_kk = match u_kk {
906                    Some(d) if d.abs() > F::epsilon() => d,
907                    _ => {
908                        ki += 1;
909                        continue;
910                    }
911                };
912
913                let multiplier = w[ki].1 / u_kk;
914
915                // Drop small multipliers
916                if multiplier.abs() < threshold {
917                    ki += 1;
918                    continue;
919                }
920                w[ki].1 = multiplier;
921
922                // Update: w -= multiplier * u_row[k] for entries j > k
923                if let Some(u_row_k) = u_rows.get(k) {
924                    let updates: Vec<(usize, F)> = u_row_k
925                        .iter()
926                        .filter(|&&(j, _)| j > k)
927                        .map(|&(j, v)| (j, multiplier * v))
928                        .collect();
929
930                    for (j, fill_val) in updates {
931                        if let Some(pos) = w.iter().position(|&(c, _)| c == j) {
932                            w[pos].1 -= fill_val;
933                        } else {
934                            w.push((j, -fill_val));
935                        }
936                    }
937                    w.sort_by_key(|&(c, _)| c);
938                }
939
940                ki += 1;
941            }
942
943            // Split into L part (col < i) and U part (col >= i)
944            let mut l_part: Vec<(usize, F)> = Vec::new();
945            let mut u_part: Vec<(usize, F)> = Vec::new();
946
947            for &(col, val) in &w {
948                if col < i {
949                    l_part.push((col, val));
950                } else {
951                    u_part.push((col, val));
952                }
953            }
954
955            // Apply dropping to L part
956            l_part.retain(|&(_, v)| v.abs() >= threshold);
957            // Keep at most max_fill entries (by magnitude) plus diagonal
958            if l_part.len() > max_fill {
959                l_part.sort_by(|a, b| {
960                    b.1.abs()
961                        .partial_cmp(&a.1.abs())
962                        .unwrap_or(std::cmp::Ordering::Equal)
963                });
964                l_part.truncate(max_fill);
965                l_part.sort_by_key(|&(c, _)| c);
966            }
967
968            // Apply dropping to U part (keep diagonal always)
969            let diag_entry = u_part.iter().find(|&&(c, _)| c == i).copied();
970            u_part.retain(|&(c, v)| c == i || v.abs() >= threshold);
971            if u_part.len() > max_fill + 1 {
972                // Keep diagonal + top max_fill entries
973                let non_diag: Vec<(usize, F)> =
974                    u_part.iter().filter(|&&(c, _)| c != i).copied().collect();
975                let mut sorted_nd = non_diag;
976                sorted_nd.sort_by(|a, b| {
977                    b.1.abs()
978                        .partial_cmp(&a.1.abs())
979                        .unwrap_or(std::cmp::Ordering::Equal)
980                });
981                sorted_nd.truncate(max_fill);
982                u_part = sorted_nd;
983                if let Some(de) = diag_entry {
984                    u_part.push(de);
985                }
986                u_part.sort_by_key(|&(c, _)| c);
987            }
988
989            // Ensure diagonal exists
990            if !u_part.iter().any(|&(c, _)| c == i) {
991                // If the diagonal was dropped (shouldn't happen normally), add a small value
992                u_part.push((i, F::epsilon() * F::from(100.0).unwrap_or(F::sparse_one())));
993                u_part.sort_by_key(|&(c, _)| c);
994            }
995
996            l_rows.push(l_part);
997            u_rows.push(u_part);
998        }
999
1000        // Pack into CSR arrays
1001        let mut l_data = Vec::new();
1002        let mut l_indices = Vec::new();
1003        let mut l_indptr = vec![0usize];
1004        let mut u_data = Vec::new();
1005        let mut u_indices = Vec::new();
1006        let mut u_indptr = vec![0usize];
1007
1008        for i in 0..n {
1009            for &(col, val) in &l_rows[i] {
1010                l_indices.push(col);
1011                l_data.push(val);
1012            }
1013            l_indptr.push(l_indices.len());
1014
1015            for &(col, val) in &u_rows[i] {
1016                u_indices.push(col);
1017                u_data.push(val);
1018            }
1019            u_indptr.push(u_indices.len());
1020        }
1021
1022        Ok(Self {
1023            l_data,
1024            l_indices,
1025            l_indptr,
1026            u_data,
1027            u_indices,
1028            u_indptr,
1029            n,
1030        })
1031    }
1032}
1033
1034impl<F> Preconditioner<F> for Ilut<F>
1035where
1036    F: Float + NumAssign + Sum + SparseElement + Debug + 'static,
1037{
1038    fn apply(&self, r: &Array1<F>) -> SparseResult<Array1<F>> {
1039        if r.len() != self.n {
1040            return Err(SparseError::DimensionMismatch {
1041                expected: self.n,
1042                found: r.len(),
1043            });
1044        }
1045        let r_vec: Vec<F> = r.to_vec();
1046        let y = forward_solve_unit(
1047            &self.l_data,
1048            &self.l_indices,
1049            &self.l_indptr,
1050            &r_vec,
1051            self.n,
1052        );
1053        let x = backward_solve(&self.u_data, &self.u_indices, &self.u_indptr, &y, self.n)?;
1054        Ok(Array1::from_vec(x))
1055    }
1056}
1057
1058// ---------------------------------------------------------------------------
1059// Tests
1060// ---------------------------------------------------------------------------
1061
1062#[cfg(test)]
1063mod tests {
1064    use super::*;
1065
1066    /// Build a well-conditioned SPD tridiagonal matrix.
1067    fn build_spd_tridiag(n: usize) -> CsrMatrix<f64> {
1068        let mut rows = Vec::new();
1069        let mut cols = Vec::new();
1070        let mut data = Vec::new();
1071        for i in 0..n {
1072            if i > 0 {
1073                rows.push(i);
1074                cols.push(i - 1);
1075                data.push(-1.0);
1076            }
1077            rows.push(i);
1078            cols.push(i);
1079            data.push(4.0); // 4 on diagonal for well-conditioning
1080            if i + 1 < n {
1081                rows.push(i);
1082                cols.push(i + 1);
1083                data.push(-1.0);
1084            }
1085        }
1086        CsrMatrix::new(data, rows, cols, (n, n)).expect("valid matrix")
1087    }
1088
1089    /// Build a general diagonally dominant matrix.
1090    fn build_dd_matrix(n: usize) -> CsrMatrix<f64> {
1091        let mut rows = Vec::new();
1092        let mut cols = Vec::new();
1093        let mut data = Vec::new();
1094        for i in 0..n {
1095            let mut off_diag_sum = 0.0;
1096            if i > 0 {
1097                rows.push(i);
1098                cols.push(i - 1);
1099                data.push(-0.5);
1100                off_diag_sum += 0.5;
1101            }
1102            if i + 1 < n {
1103                rows.push(i);
1104                cols.push(i + 1);
1105                data.push(-0.3);
1106                off_diag_sum += 0.3;
1107            }
1108            if i + 2 < n {
1109                rows.push(i);
1110                cols.push(i + 2);
1111                data.push(-0.1);
1112                off_diag_sum += 0.1;
1113            }
1114            rows.push(i);
1115            cols.push(i);
1116            data.push(off_diag_sum + 1.0); // diagonally dominant
1117        }
1118        CsrMatrix::new(data, rows, cols, (n, n)).expect("valid matrix")
1119    }
1120
1121    fn build_identity(n: usize) -> CsrMatrix<f64> {
1122        let rows: Vec<usize> = (0..n).collect();
1123        let cols: Vec<usize> = (0..n).collect();
1124        CsrMatrix::new(vec![1.0; n], rows, cols, (n, n)).expect("valid identity")
1125    }
1126
1127    fn spmv(a: &CsrMatrix<f64>, x: &[f64]) -> Vec<f64> {
1128        let m = a.rows();
1129        let mut y = vec![0.0; m];
1130        for i in 0..m {
1131            let range = a.row_range(i);
1132            let cols = &a.indices[range.clone()];
1133            let vals = &a.data[range];
1134            for (idx, &col) in cols.iter().enumerate() {
1135                y[i] += vals[idx] * x[col];
1136            }
1137        }
1138        y
1139    }
1140
1141    #[test]
1142    fn test_ilu0_identity() {
1143        let eye = build_identity(5);
1144        let ilu = Ilu0::new(&eye).expect("ILU(0) of identity");
1145        let r = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
1146        let x = ilu.apply(&r).expect("apply");
1147        for i in 0..5 {
1148            assert!(
1149                (x[i] - r[i]).abs() < 1e-10,
1150                "ILU(0) of I should be identity"
1151            );
1152        }
1153    }
1154
1155    #[test]
1156    fn test_ilu0_tridiag() {
1157        let n = 10;
1158        let a = build_spd_tridiag(n);
1159        let ilu = Ilu0::new(&a).expect("ILU(0) of tridiag");
1160
1161        // Apply to a known vector
1162        let b: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
1163        let ab = spmv(&a, &b);
1164        let r = Array1::from_vec(ab);
1165        let x = ilu.apply(&r).expect("apply");
1166
1167        // x should be close to b (since ILU(0) is exact for tridiagonal)
1168        for i in 0..n {
1169            assert!(
1170                (x[i] - b[i]).abs() < 1e-6,
1171                "ILU(0) should be exact for tridiag at index {i}: {} vs {}",
1172                x[i],
1173                b[i]
1174            );
1175        }
1176    }
1177
1178    #[test]
1179    fn test_ilu0_preconditioner_reduces_residual() {
1180        let n = 10;
1181        let a = build_dd_matrix(n);
1182        let ilu = Ilu0::new(&a).expect("ILU(0)");
1183        let r = Array1::from_vec(vec![1.0; n]);
1184        let z = ilu.apply(&r).expect("apply");
1185
1186        // Az should be closer to r than just r itself would suggest
1187        let az: Vec<f64> = spmv(&a, &z.as_slice().expect("slice"));
1188        let residual: f64 = r
1189            .iter()
1190            .zip(az.iter())
1191            .map(|(&ri, &azi)| (ri - azi).powi(2))
1192            .sum::<f64>()
1193            .sqrt();
1194        // Just check it doesn't blow up
1195        assert!(residual.is_finite(), "Residual should be finite");
1196    }
1197
1198    #[test]
1199    fn test_ilu0_error_non_square() {
1200        let a = CsrMatrix::new(vec![1.0, 2.0], vec![0, 1], vec![0, 1], (2, 3)).expect("valid");
1201        assert!(Ilu0::<f64>::new(&a).is_err());
1202    }
1203
1204    #[test]
1205    fn test_ilu0_dimension_mismatch() {
1206        let a = build_identity(3);
1207        let ilu = Ilu0::new(&a).expect("ILU(0)");
1208        let r = Array1::from_vec(vec![1.0, 2.0]);
1209        assert!(ilu.apply(&r).is_err());
1210    }
1211
1212    #[test]
1213    fn test_iluk_level0_matches_ilu0() {
1214        let n = 8;
1215        let a = build_spd_tridiag(n);
1216        let ilu0 = Ilu0::new(&a).expect("ILU(0)");
1217        let iluk0 = IluK::new(&a, 0).expect("ILU(k=0)");
1218
1219        let r = Array1::from_vec(vec![1.0; n]);
1220        let x0 = ilu0.apply(&r).expect("apply ilu0");
1221        let xk = iluk0.apply(&r).expect("apply iluk");
1222
1223        for i in 0..n {
1224            assert!(
1225                (x0[i] - xk[i]).abs() < 0.1,
1226                "ILU(k=0) should be close to ILU(0) at {i}: {} vs {}",
1227                x0[i],
1228                xk[i]
1229            );
1230        }
1231    }
1232
1233    #[test]
1234    fn test_iluk_higher_fill_better() {
1235        let n = 10;
1236        let a = build_dd_matrix(n);
1237        let b = Array1::from_vec(vec![1.0; n]);
1238
1239        let iluk0 = IluK::new(&a, 0).expect("ILU(0)");
1240        let iluk1 = IluK::new(&a, 1).expect("ILU(1)");
1241        let iluk2 = IluK::new(&a, 2).expect("ILU(2)");
1242
1243        let x0 = iluk0.apply(&b).expect("apply");
1244        let x1 = iluk1.apply(&b).expect("apply");
1245        let x2 = iluk2.apply(&b).expect("apply");
1246
1247        // Higher fill levels should have more data (or equal)
1248        assert!(x0.len() == x1.len() && x1.len() == x2.len());
1249        // Just check all are finite
1250        for i in 0..n {
1251            assert!(x0[i].is_finite());
1252            assert!(x1[i].is_finite());
1253            assert!(x2[i].is_finite());
1254        }
1255    }
1256
1257    #[test]
1258    fn test_iluk_error_non_square() {
1259        let a = CsrMatrix::new(vec![1.0], vec![0], vec![0], (1, 2)).expect("valid");
1260        assert!(IluK::<f64>::new(&a, 0).is_err());
1261    }
1262
1263    #[test]
1264    fn test_ic0_spd_tridiag() {
1265        let n = 8;
1266        let a = build_spd_tridiag(n);
1267        let ic = Ic0::new(&a).expect("IC(0)");
1268
1269        let r = Array1::from_vec(vec![1.0; n]);
1270        let x = ic.apply(&r).expect("apply IC(0)");
1271        for i in 0..n {
1272            assert!(x[i].is_finite(), "IC(0) result should be finite at {i}");
1273        }
1274    }
1275
1276    #[test]
1277    fn test_ic0_identity() {
1278        let eye = build_identity(5);
1279        let ic = Ic0::new(&eye).expect("IC(0) identity");
1280        let r = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
1281        let x = ic.apply(&r).expect("apply");
1282        for i in 0..5 {
1283            assert!(
1284                (x[i] - r[i]).abs() < 1e-10,
1285                "IC(0) of I should be identity: {} vs {}",
1286                x[i],
1287                r[i]
1288            );
1289        }
1290    }
1291
1292    #[test]
1293    fn test_ic0_error_non_square() {
1294        let a = CsrMatrix::new(vec![1.0, 2.0], vec![0, 1], vec![0, 1], (2, 3)).expect("valid");
1295        assert!(Ic0::<f64>::new(&a).is_err());
1296    }
1297
1298    #[test]
1299    fn test_milu_tridiag() {
1300        let n = 8;
1301        let a = build_spd_tridiag(n);
1302        let milu = Milu::new(&a).expect("MILU");
1303        let r = Array1::from_vec(vec![1.0; n]);
1304        let x = milu.apply(&r).expect("apply MILU");
1305        for i in 0..n {
1306            assert!(x[i].is_finite(), "MILU result should be finite at {i}");
1307        }
1308    }
1309
1310    #[test]
1311    fn test_milu_vs_ilu0_tridiag() {
1312        // For tridiagonal matrices, MILU and ILU(0) should give similar results
1313        // since there's no fill-in to drop
1314        let n = 5;
1315        let a = build_spd_tridiag(n);
1316        let ilu = Ilu0::new(&a).expect("ILU(0)");
1317        let milu = Milu::new(&a).expect("MILU");
1318
1319        let r = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
1320        let x_ilu = ilu.apply(&r).expect("apply ILU");
1321        let x_milu = milu.apply(&r).expect("apply MILU");
1322
1323        for i in 0..n {
1324            assert!(
1325                (x_ilu[i] - x_milu[i]).abs() < 1e-6,
1326                "MILU ~ ILU(0) for tridiag at {i}: {} vs {}",
1327                x_ilu[i],
1328                x_milu[i]
1329            );
1330        }
1331    }
1332
1333    #[test]
1334    fn test_milu_error_non_square() {
1335        let a = CsrMatrix::new(vec![1.0], vec![0], vec![0], (1, 2)).expect("valid");
1336        assert!(Milu::<f64>::new(&a).is_err());
1337    }
1338
1339    #[test]
1340    fn test_ilut_basic() {
1341        let n = 8;
1342        let a = build_dd_matrix(n);
1343        let config = IlutConfig {
1344            drop_tolerance: 1e-4,
1345            max_fill: 10,
1346        };
1347        let ilut = Ilut::new(&a, &config).expect("ILUT");
1348        let r = Array1::from_vec(vec![1.0; n]);
1349        let x = ilut.apply(&r).expect("apply ILUT");
1350        for i in 0..n {
1351            assert!(x[i].is_finite(), "ILUT result should be finite at {i}");
1352        }
1353    }
1354
1355    #[test]
1356    fn test_ilut_low_threshold() {
1357        // Very low threshold (keep everything) should behave like full LU
1358        let n = 5;
1359        let a = build_spd_tridiag(n);
1360        let config = IlutConfig {
1361            drop_tolerance: 1e-15,
1362            max_fill: 100,
1363        };
1364        let ilut = Ilut::new(&a, &config).expect("ILUT low threshold");
1365        let b: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
1366        let ab = spmv(&a, &b);
1367        let r = Array1::from_vec(ab);
1368        let x = ilut.apply(&r).expect("apply");
1369
1370        for i in 0..n {
1371            assert!(
1372                (x[i] - b[i]).abs() < 1e-4,
1373                "ILUT with low threshold should be near-exact at {i}: {} vs {}",
1374                x[i],
1375                b[i]
1376            );
1377        }
1378    }
1379
1380    #[test]
1381    fn test_ilut_high_threshold() {
1382        // High threshold drops many entries, but should still produce finite results
1383        let n = 8;
1384        let a = build_dd_matrix(n);
1385        let config = IlutConfig {
1386            drop_tolerance: 0.5,
1387            max_fill: 2,
1388        };
1389        let ilut = Ilut::new(&a, &config).expect("ILUT high threshold");
1390        let r = Array1::from_vec(vec![1.0; n]);
1391        let x = ilut.apply(&r).expect("apply");
1392        for i in 0..n {
1393            assert!(x[i].is_finite(), "ILUT result should be finite at {i}");
1394        }
1395    }
1396
1397    #[test]
1398    fn test_ilut_error_non_square() {
1399        let a = CsrMatrix::new(vec![1.0], vec![0], vec![0], (1, 2)).expect("valid");
1400        assert!(Ilut::<f64>::new(&a, &IlutConfig::default()).is_err());
1401    }
1402
1403    #[test]
1404    fn test_ilut_dimension_mismatch() {
1405        let a = build_identity(3);
1406        let ilut = Ilut::new(&a, &IlutConfig::default()).expect("ILUT");
1407        let r = Array1::from_vec(vec![1.0, 2.0]);
1408        assert!(ilut.apply(&r).is_err());
1409    }
1410
1411    #[test]
1412    fn test_all_preconditioners_on_same_system() {
1413        let n = 10;
1414        let a = build_spd_tridiag(n);
1415        let b_vec: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
1416        let ab = spmv(&a, &b_vec);
1417        let r = Array1::from_vec(ab);
1418
1419        let ilu0 = Ilu0::new(&a).expect("ILU(0)");
1420        let iluk = IluK::new(&a, 1).expect("ILU(1)");
1421        let ic0 = Ic0::new(&a).expect("IC(0)");
1422        let milu = Milu::new(&a).expect("MILU");
1423        let ilut = Ilut::new(
1424            &a,
1425            &IlutConfig {
1426                drop_tolerance: 1e-10,
1427                max_fill: 20,
1428            },
1429        )
1430        .expect("ILUT");
1431
1432        let x_ilu0 = ilu0.apply(&r).expect("apply");
1433        let x_iluk = iluk.apply(&r).expect("apply");
1434        let x_ic0 = ic0.apply(&r).expect("apply");
1435        let x_milu = milu.apply(&r).expect("apply");
1436        let x_ilut = ilut.apply(&r).expect("apply");
1437
1438        // All should produce results close to b_vec
1439        for i in 0..n {
1440            assert!(
1441                (x_ilu0[i] - b_vec[i]).abs() < 1e-4,
1442                "ILU(0) at {i}: {} vs {}",
1443                x_ilu0[i],
1444                b_vec[i]
1445            );
1446            assert!(
1447                (x_iluk[i] - b_vec[i]).abs() < 1e-4,
1448                "ILU(1) at {i}: {} vs {}",
1449                x_iluk[i],
1450                b_vec[i]
1451            );
1452            assert!(x_ic0[i].is_finite(), "IC(0) finite at {i}");
1453            assert!(
1454                (x_milu[i] - b_vec[i]).abs() < 1e-4,
1455                "MILU at {i}: {} vs {}",
1456                x_milu[i],
1457                b_vec[i]
1458            );
1459            assert!(
1460                (x_ilut[i] - b_vec[i]).abs() < 1e-4,
1461                "ILUT at {i}: {} vs {}",
1462                x_ilut[i],
1463                b_vec[i]
1464            );
1465        }
1466    }
1467}