scirs2_sparse/linalg/
ic.rs

1//! Incomplete Cholesky factorization preconditioner
2
3use crate::csr::CsrMatrix;
4use crate::error::{SparseError, SparseResult};
5use crate::linalg::interface::LinearOperator;
6use scirs2_core::numeric::{Float, NumAssign, SparseElement};
7use std::fmt::Debug;
8use std::iter::Sum;
9
10/// Incomplete Cholesky factorization preconditioner (IC(0))
11///
12/// This preconditioner computes an incomplete Cholesky factorization L*L^T ≈ A
13/// where L is a lower triangular matrix with the same sparsity pattern as the
14/// lower triangular part of A.
15pub struct IC0Preconditioner<F> {
16    /// Lower triangular factor L in CSR format
17    l_factor: CsrMatrix<F>,
18}
19
20impl<F: Float + SparseElement + NumAssign + Sum + Debug + 'static> IC0Preconditioner<F> {
21    /// Create a new IC(0) preconditioner from a symmetric positive definite matrix
22    pub fn new(matrix: &CsrMatrix<F>) -> SparseResult<Self> {
23        let n = matrix.rows();
24        if n != matrix.cols() {
25            return Err(SparseError::DimensionMismatch {
26                expected: n,
27                found: matrix.cols(),
28            });
29        }
30
31        // Initialize L with the lower triangular part of A
32        let mut l_data = Vec::new();
33        let mut l_indices = Vec::new();
34        let mut l_indptr = vec![0];
35
36        for i in 0..n {
37            let row_start = matrix.indptr[i];
38            let row_end = matrix.indptr[i + 1];
39
40            // Copy lower triangular entries
41            for k in row_start..row_end {
42                let j = matrix.indices[k];
43                if j <= i {
44                    l_indices.push(j);
45                    l_data.push(matrix.data[k]);
46                }
47            }
48            l_indptr.push(l_indices.len());
49        }
50
51        // Perform incomplete Cholesky factorization
52        for i in 0..n {
53            let row_start = l_indptr[i];
54            let row_end = l_indptr[i + 1];
55
56            // Find diagonal element
57            let mut diag_idx = None;
58            for (idx, &col) in l_indices[row_start..row_end].iter().enumerate() {
59                if col == i {
60                    diag_idx = Some(row_start + idx);
61                    break;
62                }
63            }
64
65            let diag_idx = match diag_idx {
66                Some(idx) => idx,
67                None => {
68                    return Err(SparseError::ValueError(format!(
69                        "Missing diagonal element at position {i}"
70                    )));
71                }
72            };
73
74            // Update diagonal element
75            let mut diag_val = l_data[diag_idx];
76
77            // Subtract contributions from previous columns
78            for k in row_start..diag_idx {
79                let j = l_indices[k];
80
81                // Find L[j,j]
82                let j_row_start = l_indptr[j];
83                let j_row_end = l_indptr[j + 1];
84                let mut l_jj = F::sparse_zero();
85
86                for idx in j_row_start..j_row_end {
87                    if l_indices[idx] == j {
88                        l_jj = l_data[idx];
89                        break;
90                    }
91                }
92
93                diag_val -= l_data[k] * l_data[k] / l_jj;
94            }
95
96            // Check if factorization is possible
97            if diag_val <= F::sparse_zero() {
98                return Err(SparseError::ValueError(
99                    "Matrix is not positive definite or factorization broke down".to_string(),
100                ));
101            }
102
103            l_data[diag_idx] = diag_val.sqrt();
104
105            // Update off-diagonal elements
106            for k in (diag_idx + 1)..row_end {
107                let j = l_indices[k];
108                let mut sum = F::sparse_zero();
109
110                // Compute dot product of row i and row j up to column j
111                for p in row_start..diag_idx {
112                    let col_p = l_indices[p];
113
114                    // Find corresponding element in row j
115                    let j_row_start = l_indptr[j];
116                    let j_row_end = l_indptr[j + 1];
117
118                    for q in j_row_start..j_row_end {
119                        if l_indices[q] == col_p {
120                            sum += l_data[p] * l_data[q];
121                            break;
122                        }
123                    }
124                }
125
126                l_data[k] = (l_data[k] - sum) / l_data[diag_idx];
127            }
128        }
129
130        // Create the L factor as a CSR _matrix
131        let l_factor = CsrMatrix::from_raw_csr(l_data, l_indptr, l_indices, (n, n))?;
132
133        Ok(Self { l_factor })
134    }
135}
136
137impl<F: Float + SparseElement + NumAssign + Sum + Debug + 'static> LinearOperator<F>
138    for IC0Preconditioner<F>
139{
140    fn shape(&self) -> (usize, usize) {
141        self.l_factor.shape()
142    }
143
144    fn matvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
145        let n = self.l_factor.rows();
146        if x.len() != n {
147            return Err(SparseError::DimensionMismatch {
148                expected: n,
149                found: x.len(),
150            });
151        }
152
153        // Solve L * L^T * y = x
154        // First solve L * z = x (forward substitution)
155        let mut z = vec![F::sparse_zero(); n];
156        for i in 0..n {
157            let row_start = self.l_factor.indptr[i];
158            let row_end = self.l_factor.indptr[i + 1];
159
160            let mut sum = x[i];
161            let mut diag_val = F::sparse_one();
162
163            for k in row_start..row_end {
164                let j = self.l_factor.indices[k];
165                let val = self.l_factor.data[k];
166
167                match j.cmp(&i) {
168                    std::cmp::Ordering::Less => sum -= val * z[j],
169                    std::cmp::Ordering::Equal => diag_val = val,
170                    std::cmp::Ordering::Greater => {}
171                }
172            }
173
174            z[i] = sum / diag_val;
175        }
176
177        // Then solve L^T * y = z (backward substitution)
178        let mut y = vec![F::sparse_zero(); n];
179        for i in (0..n).rev() {
180            let mut sum = z[i];
181
182            // Find diagonal element
183            let row_start = self.l_factor.indptr[i];
184            let row_end = self.l_factor.indptr[i + 1];
185            let mut diag_val = F::sparse_one();
186
187            for k in row_start..row_end {
188                let j = self.l_factor.indices[k];
189                if j == i {
190                    diag_val = self.l_factor.data[k];
191                    break;
192                }
193            }
194
195            // Subtract contributions from already computed y values
196            // Iterate over y values for indices greater than i
197            // We need to use y as a lookup so we keep the index-based loop
198            // and add a comment explaining why we're not using an iterator pattern
199            #[allow(clippy::needless_range_loop)]
200            for j in (i + 1)..n {
201                // Find L[j,i] (which is L^T[i,j])
202                let j_row_start = self.l_factor.indptr[j];
203                let j_row_end = self.l_factor.indptr[j + 1];
204
205                // Use zip instead of index-based loop for indices and data
206                for (&col, &val) in self.l_factor.indices[j_row_start..j_row_end]
207                    .iter()
208                    .zip(self.l_factor.data[j_row_start..j_row_end].iter())
209                {
210                    if col == i {
211                        sum -= val * y[j];
212                        break;
213                    }
214                }
215            }
216
217            y[i] = sum / diag_val;
218        }
219
220        Ok(y)
221    }
222
223    fn has_adjoint(&self) -> bool {
224        true
225    }
226
227    fn rmatvec(&self, x: &[F]) -> SparseResult<Vec<F>> {
228        // For symmetric preconditioner, adjoint is the same as forward operation
229        self.matvec(x)
230    }
231}
232
233#[cfg(test)]
234mod tests {
235    use super::*;
236    use crate::csr::CsrMatrix;
237
238    #[test]
239    fn test_ic0_simple() {
240        // Test with a simple SPD matrix
241        // A = [4  -1   0]
242        //     [-1  4  -1]
243        //     [0  -1   4]
244        let data = vec![4.0, -1.0, -1.0, 4.0, -1.0, -1.0, 4.0];
245        let indptr = vec![0, 2, 5, 7];
246        let indices = vec![0, 1, 0, 1, 2, 1, 2];
247        let matrix = CsrMatrix::from_raw_csr(data, indptr, indices, (3, 3)).unwrap();
248
249        let preconditioner = IC0Preconditioner::new(&matrix).unwrap();
250
251        // Test by applying preconditioner to a vector
252        let b = vec![1.0, 2.0, 3.0];
253        let x = preconditioner.matvec(&b).unwrap();
254
255        // The result should be approximately the solution to Ax = b
256        // For this simple case, we can verify the result is reasonable
257        assert!(x.iter().all(|&xi| xi.is_finite()));
258    }
259
260    #[test]
261    fn test_ic0_not_spd() {
262        // Test with a non-SPD matrix (should fail)
263        // A = [1   2]
264        //     [3   4]
265        let data = vec![1.0, 2.0, 3.0, 4.0];
266        let indptr = vec![0, 2, 4];
267        let indices = vec![0, 1, 0, 1];
268        let matrix = CsrMatrix::from_raw_csr(data, indptr, indices, (2, 2)).unwrap();
269
270        let result = IC0Preconditioner::new(&matrix);
271        assert!(result.is_err());
272    }
273}