scirs2_optimize/sparse_numdiff/
jacobian.rs

1//! Sparse Jacobian computation using finite differences
2//!
3//! This module provides functions for computing sparse Jacobian matrices
4//! using various finite difference methods.
5
6use ndarray::{Array1, ArrayView1};
7use scirs2_core::parallel_ops::*;
8use scirs2_sparse::{csr_array::CsrArray, sparray::SparseArray};
9
10use super::coloring::determine_column_groups;
11use super::finite_diff::{compute_step_sizes, SparseFiniteDiffOptions};
12use crate::error::OptimizeError;
13
14// Helper function to replace get_index and set_value_by_index which are not available in CsrArray
15fn update_sparse_value(matrix: &mut CsrArray<f64>, row: usize, col: usize, value: f64) {
16    // Only update if the position is non-zero in the sparsity pattern and set operation succeeds
17    if matrix.get(row, col) != 0.0 && matrix.set(row, col, value).is_err() {
18        // If this fails, just silently continue
19    }
20}
21
22/// Computes a sparse Jacobian matrix using finite differences
23///
24/// # Arguments
25///
26/// * `func` - Function to differentiate, takes ArrayView1<f64> and returns Array1<f64>
27/// * `x` - Point at which to compute the Jacobian
28/// * `f0` - Function value at `x` (if None, computed internally)
29/// * `sparsity_pattern` - Sparse matrix indicating the known sparsity pattern (if None, dense Jacobian)
30/// * `options` - Options for finite differences computation
31///
32/// # Returns
33///
34/// * `CsrArray<f64>` - Sparse Jacobian matrix in CSR format
35///
36pub fn sparse_jacobian<F>(
37    func: F,
38    x: &ArrayView1<f64>,
39    f0: Option<&Array1<f64>>,
40    sparsity_pattern: Option<&CsrArray<f64>>,
41    options: Option<SparseFiniteDiffOptions>,
42) -> Result<CsrArray<f64>, OptimizeError>
43where
44    F: Fn(&ArrayView1<f64>) -> Array1<f64> + Sync,
45{
46    let options = options.unwrap_or_default();
47
48    // Compute f0 if not provided
49    let f0_owned: Array1<f64>;
50    let f0_ref = match f0 {
51        Some(f) => f,
52        None => {
53            f0_owned = func(x);
54            &f0_owned
55        }
56    };
57
58    let n = x.len(); // Input dimension
59    let m = f0_ref.len(); // Output dimension
60
61    // If no sparsity pattern provided, create a dense one
62    let sparsity_owned: CsrArray<f64>;
63    let sparsity = match sparsity_pattern {
64        Some(p) => p,
65        None => {
66            // Create dense sparsity pattern
67            let mut data = Vec::with_capacity(m * n);
68            let mut rows = Vec::with_capacity(m * n);
69            let mut cols = Vec::with_capacity(m * n);
70
71            for i in 0..m {
72                for j in 0..n {
73                    data.push(1.0);
74                    rows.push(i);
75                    cols.push(j);
76                }
77            }
78
79            sparsity_owned = CsrArray::from_triplets(&rows, &cols, &data, (m, n), false)?;
80            &sparsity_owned
81        }
82    };
83
84    // Choose implementation based on specified method
85    match options.method.as_str() {
86        "2-point" => compute_jacobian_2point(func, x, f0_ref, sparsity, &options),
87        "3-point" => compute_jacobian_3point(func, x, sparsity, &options),
88        "cs" => compute_jacobian_complex_step(func, x, sparsity, &options),
89        _ => Err(OptimizeError::ValueError(format!(
90            "Unknown method: {}. Valid options are '2-point', '3-point', and 'cs'",
91            options.method
92        ))),
93    }
94}
95
96/// Computes Jacobian using 2-point finite differences
97fn compute_jacobian_2point<F>(
98    func: F,
99    x: &ArrayView1<f64>,
100    f0: &Array1<f64>,
101    sparsity: &CsrArray<f64>,
102    options: &SparseFiniteDiffOptions,
103) -> Result<CsrArray<f64>, OptimizeError>
104where
105    F: Fn(&ArrayView1<f64>) -> Array1<f64> + Sync,
106{
107    let _n = x.len();
108    let _m = f0.len();
109
110    // Determine column groups for parallel evaluation using a greedy coloring algorithm
111    // First get the transpose and convert it to a CsrArray
112    let transposed = sparsity.transpose()?;
113    let transposed_csr = match transposed.as_any().downcast_ref::<CsrArray<f64>>() {
114        Some(csr) => csr,
115        None => {
116            return Err(OptimizeError::ValueError(
117                "Failed to downcast to CsrArray".to_string(),
118            ))
119        }
120    };
121    let groups = determine_column_groups(transposed_csr, None, None)?;
122
123    // Compute step sizes
124    let h = compute_step_sizes(x, options);
125
126    // Create result matrix with the same sparsity pattern
127    let (rows, cols, _) = sparsity.find();
128    let m = sparsity.shape().0;
129    let n = sparsity.shape().1;
130    let zeros = vec![0.0; rows.len()];
131    let mut jac = CsrArray::from_triplets(&rows.to_vec(), &cols.to_vec(), &zeros, (m, n), false)?;
132
133    // Create a mutable copy of x for perturbing
134    let mut x_perturbed = x.to_owned();
135
136    // Choose between parallel and serial execution based on options
137    let parallel = options
138        .parallel
139        .as_ref()
140        .map(|p| p.num_workers.unwrap_or(1) > 1)
141        .unwrap_or(false);
142
143    if parallel {
144        // For parallel evaluation, we need to collect the derivatives first and apply them later
145        let derivatives: Vec<(usize, usize, f64)> = groups
146            .par_iter()
147            .flat_map(|group| {
148                let mut derivatives = Vec::new();
149                let mut x_local = x.to_owned();
150
151                for &col in group {
152                    // Apply perturbation for this column
153                    x_local[col] += h[col];
154
155                    // Evaluate function at perturbed point
156                    let f_plus = func(&x_local.view());
157
158                    // Reset perturbation
159                    x_local[col] = x[col];
160
161                    // Compute finite difference approximation for all affected rows
162                    for (row, &f0_val) in f0.iter().enumerate() {
163                        // Calculate derivative and collect it
164                        let derivative = (f_plus[row] - f0_val) / h[col];
165
166                        // Only collect if position is in sparsity pattern
167                        if jac.get(row, col) != 0.0 {
168                            derivatives.push((row, col, derivative));
169                        }
170                    }
171                }
172
173                derivatives
174            })
175            .collect();
176
177        // Now apply all derivatives
178        for (row, col, derivative) in derivatives {
179            if jac.set(row, col, derivative).is_err() {
180                // If this fails, just silently continue
181            }
182        }
183    } else {
184        // Serial version processes each group sequentially
185        for group in &groups {
186            for &col in group {
187                // Apply perturbation
188                x_perturbed[col] += h[col];
189
190                // Evaluate function at perturbed point
191                let f_plus = func(&x_perturbed.view());
192
193                // Reset perturbation
194                x_perturbed[col] = x[col];
195
196                // Compute derivatives for all affected rows
197                for (row, &f0_val) in f0.iter().enumerate() {
198                    // Calculate derivative and update if in sparsity pattern
199                    let derivative = (f_plus[row] - f0_val) / h[col];
200                    update_sparse_value(&mut jac, row, col, derivative);
201                }
202            }
203        }
204    }
205
206    Ok(jac)
207}
208
209/// Computes Jacobian using 3-point finite differences (more accurate but twice as expensive)
210fn compute_jacobian_3point<F>(
211    func: F,
212    x: &ArrayView1<f64>,
213    sparsity: &CsrArray<f64>,
214    options: &SparseFiniteDiffOptions,
215) -> Result<CsrArray<f64>, OptimizeError>
216where
217    F: Fn(&ArrayView1<f64>) -> Array1<f64> + Sync,
218{
219    let _n = x.len();
220    let _m = sparsity.shape().0;
221
222    // Determine column groups for parallel evaluation
223    // First get the transpose and convert it to a CsrArray
224    let transposed = sparsity.transpose()?;
225    let transposed_csr = match transposed.as_any().downcast_ref::<CsrArray<f64>>() {
226        Some(csr) => csr,
227        None => {
228            return Err(OptimizeError::ValueError(
229                "Failed to downcast to CsrArray".to_string(),
230            ))
231        }
232    };
233    let groups = determine_column_groups(transposed_csr, None, None)?;
234
235    // Compute step sizes
236    let h = compute_step_sizes(x, options);
237
238    // Create result matrix with the same sparsity pattern
239    let (rows, cols, _) = sparsity.find();
240    let m = sparsity.shape().0;
241    let n = sparsity.shape().1;
242    let zeros = vec![0.0; rows.len()];
243    let mut jac = CsrArray::from_triplets(&rows.to_vec(), &cols.to_vec(), &zeros, (m, n), false)?;
244
245    // Create a mutable copy of x for perturbing
246    let mut x_perturbed = x.to_owned();
247
248    // Choose between parallel and serial execution
249    let parallel = options
250        .parallel
251        .as_ref()
252        .map(|p| p.num_workers.unwrap_or(1) > 1)
253        .unwrap_or(false);
254
255    if parallel {
256        // For parallel evaluation, we need to collect the derivatives first and apply them later
257        let derivatives: Vec<(usize, usize, f64)> = groups
258            .par_iter()
259            .flat_map(|group| {
260                let mut derivatives = Vec::new();
261                let mut x_local = x.to_owned();
262
263                for &col in group {
264                    // Forward perturbation
265                    x_local[col] += h[col];
266                    let f_plus = func(&x_local.view());
267
268                    // Backward perturbation
269                    x_local[col] = x[col] - h[col];
270                    let f_minus = func(&x_local.view());
271
272                    // Reset
273                    x_local[col] = x[col];
274
275                    // Compute central difference approximation for all affected rows
276                    for row in 0..m {
277                        // Calculate derivative and collect it
278                        let derivative = (f_plus[row] - f_minus[row]) / (2.0 * h[col]);
279
280                        // Only collect if position is in sparsity pattern
281                        if jac.get(row, col) != 0.0 {
282                            derivatives.push((row, col, derivative));
283                        }
284                    }
285                }
286
287                derivatives
288            })
289            .collect();
290
291        // Now apply all derivatives
292        for (row, col, derivative) in derivatives {
293            if jac.set(row, col, derivative).is_err() {
294                // If this fails, just silently continue
295            }
296        }
297    } else {
298        for group in &groups {
299            for &col in group {
300                // Forward perturbation
301                x_perturbed[col] += h[col];
302                let f_plus = func(&x_perturbed.view());
303
304                // Backward perturbation
305                x_perturbed[col] = x[col] - h[col];
306                let f_minus = func(&x_perturbed.view());
307
308                // Reset
309                x_perturbed[col] = x[col];
310
311                // Compute central difference approximation
312                for row in 0..m {
313                    // Calculate derivative and update if in sparsity pattern
314                    let derivative = (f_plus[row] - f_minus[row]) / (2.0 * h[col]);
315                    update_sparse_value(&mut jac, row, col, derivative);
316                }
317            }
318        }
319    }
320
321    Ok(jac)
322}
323
324/// Computes Jacobian using the complex step method (highly accurate)
325///
326/// Note: This requires the function to support complex inputs, which isn't
327/// checked at compile time. Use with caution.
328fn compute_jacobian_complex_step<F>(
329    _func: F,
330    _x: &ArrayView1<f64>,
331    _sparsity: &CsrArray<f64>,
332    _options: &SparseFiniteDiffOptions,
333) -> Result<CsrArray<f64>, OptimizeError>
334where
335    F: Fn(&ArrayView1<f64>) -> Array1<f64> + Sync,
336{
337    // Create error - this is just a stub implementation since complex step
338    // requires much more infrastructure than we can provide here
339    Err(OptimizeError::NotImplementedError(
340        "Complex step method for Jacobian computation is not yet implemented".to_string(),
341    ))
342}