Skip to main content

tensorlogic_infer/higher_order/
jacobian.rs

1//! Jacobian matrix computation via finite differences.
2//!
3//! Computes `J[i,j] = ∂f_i/∂x_j` for vector-valued functions f: ℝ^n → ℝ^m.
4
5use ndarray::{Array, ArrayD, IxDyn};
6
7/// Finite difference method for numerical differentiation.
8#[derive(Debug, Clone, Copy, PartialEq, Eq)]
9pub enum FiniteDiffMethod {
10    /// Forward differences: (f(x+ε) - f(x)) / ε — O(ε) error.
11    Forward,
12    /// Central differences: (f(x+ε) - f(x-ε)) / (2ε) — O(ε²) error, more accurate.
13    Central,
14}
15
16/// Configuration for Jacobian computation.
17#[derive(Debug, Clone)]
18pub struct JacobianConfig {
19    /// Step size for finite differences.
20    pub epsilon: f64,
21    /// Finite difference method.
22    pub method: FiniteDiffMethod,
23}
24
25impl Default for JacobianConfig {
26    fn default() -> Self {
27        Self {
28            epsilon: 1e-5,
29            method: FiniteDiffMethod::Central,
30        }
31    }
32}
33
34impl JacobianConfig {
35    /// Create a new configuration.
36    pub fn new(epsilon: f64, method: FiniteDiffMethod) -> Self {
37        Self { epsilon, method }
38    }
39
40    /// Builder: set epsilon.
41    pub fn with_epsilon(mut self, eps: f64) -> Self {
42        self.epsilon = eps;
43        self
44    }
45
46    /// Builder: set method.
47    pub fn with_method(mut self, method: FiniteDiffMethod) -> Self {
48        self.method = method;
49        self
50    }
51}
52
53/// Errors that can occur during Jacobian computation.
54#[derive(Debug, thiserror::Error)]
55pub enum JacobianError {
56    /// Input tensor must be 1-D (flat vector).
57    #[error("Input must be 1-D, got shape {0:?}")]
58    NonFlatInput(Vec<usize>),
59
60    /// Epsilon must be strictly positive.
61    #[error("Epsilon must be positive, got {0}")]
62    InvalidEpsilon(f64),
63
64    /// The wrapped function returned an error.
65    #[error("Function evaluation failed: {0}")]
66    EvalFailed(String),
67
68    /// Input has zero elements.
69    #[error("Empty input")]
70    EmptyInput,
71}
72
73/// Computes Jacobian matrices via finite differences.
74///
75/// For f: ℝ^n → ℝ^m the Jacobian J has shape `[m, n]` where `J[i,j] = ∂f_i/∂x_j`.
76pub struct JacobianComputer {
77    config: JacobianConfig,
78}
79
80impl JacobianComputer {
81    /// Create a new `JacobianComputer` with the given configuration.
82    pub fn new(config: JacobianConfig) -> Self {
83        Self { config }
84    }
85
86    /// Compute the Jacobian matrix for a vector-valued function.
87    ///
88    /// `input` must be 1-D. Returns a 2-D array of shape `[m, n]`.
89    pub fn compute<F>(&self, input: &ArrayD<f64>, f: F) -> Result<ArrayD<f64>, JacobianError>
90    where
91        F: Fn(&ArrayD<f64>) -> Result<ArrayD<f64>, String>,
92    {
93        self.validate(input)?;
94
95        let n = input.len();
96        let eps = self.config.epsilon;
97
98        // Get output size m via one evaluation.
99        let f0_for_shape = f(input).map_err(JacobianError::EvalFailed)?;
100        let m = f0_for_shape.len();
101
102        let mut jacobian_flat = vec![0.0f64; m * n];
103
104        for j in 0..n {
105            let f_plus = {
106                let mut x_plus = input.clone();
107                x_plus[j] += eps;
108                f(&x_plus).map_err(JacobianError::EvalFailed)?
109            };
110
111            let col: Vec<f64> = match self.config.method {
112                FiniteDiffMethod::Central => {
113                    let mut x_minus = input.clone();
114                    x_minus[j] -= eps;
115                    let f_minus = f(&x_minus).map_err(JacobianError::EvalFailed)?;
116                    f_plus
117                        .iter()
118                        .zip(f_minus.iter())
119                        .map(|(p, m_val)| (p - m_val) / (2.0 * eps))
120                        .collect()
121                }
122                FiniteDiffMethod::Forward => {
123                    let f_base = f(input).map_err(JacobianError::EvalFailed)?;
124                    f_plus
125                        .iter()
126                        .zip(f_base.iter())
127                        .map(|(p, b)| (p - b) / eps)
128                        .collect()
129                }
130            };
131
132            // Fill column j of [m x n] stored row-major.
133            for (i, val) in col.into_iter().enumerate() {
134                jacobian_flat[i * n + j] = val;
135            }
136        }
137
138        let jacobian = Array::from_shape_vec(IxDyn(&[m, n]), jacobian_flat)
139            .map_err(|e| JacobianError::EvalFailed(e.to_string()))?;
140
141        Ok(jacobian)
142    }
143
144    /// Compute gradient for a scalar-output function f: ℝ^n → ℝ.
145    ///
146    /// Returns a 1-D array of shape `[n]`.
147    pub fn compute_gradient<F>(
148        &self,
149        input: &ArrayD<f64>,
150        f: F,
151    ) -> Result<ArrayD<f64>, JacobianError>
152    where
153        F: Fn(&ArrayD<f64>) -> Result<f64, String>,
154    {
155        let scalar_f = move |x: &ArrayD<f64>| -> Result<ArrayD<f64>, String> {
156            let v = f(x)?;
157            Array::from_shape_vec(IxDyn(&[1]), vec![v]).map_err(|e| e.to_string())
158        };
159
160        let jac = self.compute(input, scalar_f)?;
161        // jac has shape [1, n]; squeeze to [n].
162        let n = input.len();
163        let grad_flat: Vec<f64> = jac.iter().cloned().collect();
164        let grad = Array::from_shape_vec(IxDyn(&[n]), grad_flat)
165            .map_err(|e| JacobianError::EvalFailed(e.to_string()))?;
166        Ok(grad)
167    }
168
169    /// Check whether computed and analytical Jacobians agree within `tol`.
170    ///
171    /// Uses element-wise relative error: |computed - analytical| / (|analytical| + 1e-8).
172    pub fn check_accuracy(computed: &ArrayD<f64>, analytical: &ArrayD<f64>, tol: f64) -> bool {
173        if computed.shape() != analytical.shape() {
174            return false;
175        }
176        computed.iter().zip(analytical.iter()).all(|(c, a)| {
177            let rel = (c - a).abs() / (a.abs() + 1e-8);
178            rel <= tol
179        })
180    }
181
182    // ── private helpers ──────────────────────────────────────────────────────
183
184    fn validate(&self, input: &ArrayD<f64>) -> Result<(), JacobianError> {
185        if self.config.epsilon <= 0.0 {
186            return Err(JacobianError::InvalidEpsilon(self.config.epsilon));
187        }
188        if input.ndim() != 1 {
189            return Err(JacobianError::NonFlatInput(input.shape().to_vec()));
190        }
191        if input.is_empty() {
192            return Err(JacobianError::EmptyInput);
193        }
194        Ok(())
195    }
196}
197
198// ── Tests ────────────────────────────────────────────────────────────────────
199
200#[cfg(test)]
201mod tests {
202    use super::*;
203    use ndarray::Array;
204
205    fn vec1d(data: &[f64]) -> ArrayD<f64> {
206        Array::from_shape_vec(IxDyn(&[data.len()]), data.to_vec()).unwrap()
207    }
208
209    #[test]
210    fn test_jacobian_identity_function() {
211        // f(x) = x, J = I
212        let config = JacobianConfig::default();
213        let comp = JacobianComputer::new(config);
214        let x = vec1d(&[1.0, 2.0, 3.0]);
215        let jac = comp
216            .compute(&x, |v| Ok(v.clone()))
217            .expect("jacobian identity");
218        assert_eq!(jac.shape(), &[3, 3]);
219        for i in 0..3 {
220            for j in 0..3 {
221                let expected = if i == j { 1.0 } else { 0.0 };
222                let err = (jac[[i, j]] - expected).abs();
223                assert!(
224                    err < 1e-8,
225                    "J[{},{}]={} expected {}",
226                    i,
227                    j,
228                    jac[[i, j]],
229                    expected
230                );
231            }
232        }
233    }
234
235    #[test]
236    fn test_jacobian_linear_f2x() {
237        // f(x) = 2x, J = 2*I
238        let comp = JacobianComputer::new(JacobianConfig::default());
239        let x = vec1d(&[1.0, -1.0]);
240        let jac = comp
241            .compute(&x, |v| {
242                let out: Vec<f64> = v.iter().map(|&a| 2.0 * a).collect();
243                Ok(Array::from_shape_vec(IxDyn(&[out.len()]), out).unwrap())
244            })
245            .expect("jacobian 2x");
246        assert!((jac[[0, 0]] - 2.0).abs() < 1e-8);
247        assert!((jac[[1, 1]] - 2.0).abs() < 1e-8);
248        assert!(jac[[0, 1]].abs() < 1e-8);
249        assert!(jac[[1, 0]].abs() < 1e-8);
250    }
251
252    #[test]
253    fn test_jacobian_quadratic_gradient() {
254        // f(x) = x0^2 + x1^2, ∂f/∂x0 = 2*x0
255        let comp = JacobianComputer::new(JacobianConfig::default());
256        let x = vec1d(&[3.0, 4.0]);
257        let grad = comp
258            .compute_gradient(&x, |v| Ok(v[0] * v[0] + v[1] * v[1]))
259            .expect("gradient quadratic");
260        assert!((grad[0] - 6.0).abs() < 1e-8, "grad[0]={}", grad[0]);
261        assert!((grad[1] - 8.0).abs() < 1e-8, "grad[1]={}", grad[1]);
262    }
263
264    #[test]
265    fn test_jacobian_output_shape_2x3() {
266        // f: ℝ^3 → ℝ^2, J should be [2,3]
267        let comp = JacobianComputer::new(JacobianConfig::default());
268        let x = vec1d(&[1.0, 2.0, 3.0]);
269        let jac = comp
270            .compute(&x, |v| {
271                let out = vec![v[0] + v[1], v[1] + v[2]];
272                Ok(Array::from_shape_vec(IxDyn(&[2]), out).unwrap())
273            })
274            .expect("jacobian 2x3");
275        assert_eq!(jac.shape(), &[2, 3]);
276    }
277
278    #[test]
279    fn test_jacobian_central_more_accurate_than_forward() {
280        // For a nonlinear function, central diff should yield smaller error vs analytical.
281        let x = vec1d(&[1.0]);
282        // f(x) = x^3, f'(x) = 3x^2 = 3 at x=1
283        let analytical_grad = vec1d(&[3.0]);
284
285        let comp_central =
286            JacobianComputer::new(JacobianConfig::new(1e-4, FiniteDiffMethod::Central));
287        let comp_forward =
288            JacobianComputer::new(JacobianConfig::new(1e-4, FiniteDiffMethod::Forward));
289
290        let central_grad = comp_central
291            .compute_gradient(&x, |v| Ok(v[0].powi(3)))
292            .unwrap();
293        let forward_grad = comp_forward
294            .compute_gradient(&x, |v| Ok(v[0].powi(3)))
295            .unwrap();
296
297        let central_err = (central_grad[0] - analytical_grad[0]).abs();
298        let forward_err = (forward_grad[0] - analytical_grad[0]).abs();
299        assert!(
300            central_err <= forward_err,
301            "central_err={} should be <= forward_err={}",
302            central_err,
303            forward_err
304        );
305    }
306
307    #[test]
308    fn test_jacobian_no_nan() {
309        // Smooth function should produce no NaN entries.
310        let comp = JacobianComputer::new(JacobianConfig::default());
311        let x = vec1d(&[0.5, 1.5, 2.5]);
312        let jac = comp
313            .compute(&x, |v| {
314                let out: Vec<f64> = v.iter().map(|&a| a.sin()).collect();
315                Ok(Array::from_shape_vec(IxDyn(&[out.len()]), out).unwrap())
316            })
317            .expect("jacobian sin");
318        assert!(jac.iter().all(|v| !v.is_nan()), "Jacobian contains NaN");
319    }
320
321    #[test]
322    fn test_jacobian_check_accuracy() {
323        // f(x) = 2*x, analytical J = 2*I_3
324        let comp = JacobianComputer::new(JacobianConfig::default());
325        let x = vec1d(&[1.0, 2.0, 3.0]);
326        let computed = comp
327            .compute(&x, |v| {
328                let out: Vec<f64> = v.iter().map(|&a| 2.0 * a).collect();
329                Ok(Array::from_shape_vec(IxDyn(&[out.len()]), out).unwrap())
330            })
331            .unwrap();
332
333        // Build analytical 2*I_3
334        let mut analytical_flat = vec![0.0f64; 9];
335        for i in 0..3 {
336            analytical_flat[i * 3 + i] = 2.0;
337        }
338        let analytical = Array::from_shape_vec(IxDyn(&[3, 3]), analytical_flat).unwrap();
339
340        assert!(JacobianComputer::check_accuracy(
341            &computed,
342            &analytical,
343            1e-6
344        ));
345    }
346
347    #[test]
348    fn test_jacobian_flat_input_required_error() {
349        let comp = JacobianComputer::new(JacobianConfig::default());
350        let x_2d = Array::from_shape_vec(IxDyn(&[2, 2]), vec![1.0; 4]).unwrap();
351        let result = comp.compute(&x_2d, |v| Ok(v.clone()));
352        assert!(matches!(result, Err(JacobianError::NonFlatInput(_))));
353    }
354
355    #[test]
356    fn test_jacobian_vector_valued() {
357        // f: ℝ^3 → ℝ^2 with f = [x0+x1, x1+x2]
358        // J = [[1,1,0],[0,1,1]]
359        let comp = JacobianComputer::new(JacobianConfig::default());
360        let x = vec1d(&[1.0, 2.0, 3.0]);
361        let jac = comp
362            .compute(&x, |v| {
363                Ok(Array::from_shape_vec(IxDyn(&[2]), vec![v[0] + v[1], v[1] + v[2]]).unwrap())
364            })
365            .unwrap();
366
367        assert!((jac[[0, 0]] - 1.0).abs() < 1e-7);
368        assert!((jac[[0, 1]] - 1.0).abs() < 1e-7);
369        assert!(jac[[0, 2]].abs() < 1e-7);
370        assert!(jac[[1, 0]].abs() < 1e-7);
371        assert!((jac[[1, 1]] - 1.0).abs() < 1e-7);
372        assert!((jac[[1, 2]] - 1.0).abs() < 1e-7);
373    }
374}