Skip to main content

cjc_runtime/
differentiate.rs

1//! Numerical Differentiation — deterministic finite-difference routines.
2//!
3//! # Determinism Contract
4//!
5//! All routines produce bit-identical results for the same inputs. No
6//! floating-point reductions are performed (each derivative estimate is a
7//! single arithmetic expression), so no accumulator is needed.
8//!
9//! # Functions
10//!
11//! - [`diff_central`] — central difference derivative estimates at interior points
12//! - [`diff_forward`] — forward difference derivative estimates
13//! - [`gradient_1d`] — numerical gradient of uniformly-spaced data
14
15// ---------------------------------------------------------------------------
16// Central Difference
17// ---------------------------------------------------------------------------
18
19/// Central difference approximation of the derivative.
20///
21/// Given paired arrays `xs` (abscissae) and `ys` (ordinates) of length n >= 3,
22/// returns n-2 derivative estimates at the interior points xs[1..n-1]:
23///
24///   dy/dx[i] ≈ (ys[i+1] - ys[i-1]) / (xs[i+1] - xs[i-1])
25///
26/// This is a second-order accurate approximation.
27pub fn diff_central(xs: &[f64], ys: &[f64]) -> Result<Vec<f64>, String> {
28    if xs.len() != ys.len() {
29        return Err(format!(
30            "diff_central: xs and ys must have equal length, got {} and {}",
31            xs.len(),
32            ys.len()
33        ));
34    }
35    if xs.len() < 3 {
36        return Err("diff_central: need at least 3 points".into());
37    }
38
39    let n = xs.len();
40    let mut result = Vec::with_capacity(n - 2);
41    for i in 1..n - 1 {
42        let dx = xs[i + 1] - xs[i - 1];
43        if dx == 0.0 {
44            return Err(format!(
45                "diff_central: zero spacing at index {}: xs[{}]={} == xs[{}]={}",
46                i,
47                i - 1,
48                xs[i - 1],
49                i + 1,
50                xs[i + 1]
51            ));
52        }
53        result.push((ys[i + 1] - ys[i - 1]) / dx);
54    }
55    Ok(result)
56}
57
58// ---------------------------------------------------------------------------
59// Forward Difference
60// ---------------------------------------------------------------------------
61
62/// Forward difference approximation of the derivative.
63///
64/// Given paired arrays `xs` and `ys` of length n >= 2, returns n-1
65/// derivative estimates:
66///
67///   dy/dx[i] ≈ (ys[i+1] - ys[i]) / (xs[i+1] - xs[i])
68///
69/// This is a first-order accurate approximation.
70pub fn diff_forward(xs: &[f64], ys: &[f64]) -> Result<Vec<f64>, String> {
71    if xs.len() != ys.len() {
72        return Err(format!(
73            "diff_forward: xs and ys must have equal length, got {} and {}",
74            xs.len(),
75            ys.len()
76        ));
77    }
78    if xs.len() < 2 {
79        return Err("diff_forward: need at least 2 points".into());
80    }
81
82    let n = xs.len();
83    let mut result = Vec::with_capacity(n - 1);
84    for i in 0..n - 1 {
85        let dx = xs[i + 1] - xs[i];
86        if dx == 0.0 {
87            return Err(format!(
88                "diff_forward: zero spacing at index {}: xs[{}]={} == xs[{}]={}",
89                i, i, xs[i], i + 1, xs[i + 1]
90            ));
91        }
92        result.push((ys[i + 1] - ys[i]) / dx);
93    }
94    Ok(result)
95}
96
97// ---------------------------------------------------------------------------
98// Gradient (uniform spacing)
99// ---------------------------------------------------------------------------
100
101/// Numerical gradient of uniformly-spaced 1D data.
102///
103/// Given array `ys` of length n and uniform spacing `dx`, returns n
104/// gradient values using:
105/// - forward difference at the first point
106/// - central difference at interior points
107/// - backward difference at the last point
108///
109/// This matches NumPy's `np.gradient` behaviour for 1D uniform grids.
110pub fn gradient_1d(ys: &[f64], dx: f64) -> Vec<f64> {
111    let n = ys.len();
112    if n == 0 {
113        return vec![];
114    }
115    if n == 1 {
116        return vec![0.0];
117    }
118
119    let mut result = Vec::with_capacity(n);
120
121    // Forward difference at first point
122    result.push((ys[1] - ys[0]) / dx);
123
124    // Central difference at interior points
125    let inv_2dx = 1.0 / (2.0 * dx);
126    for i in 1..n - 1 {
127        result.push((ys[i + 1] - ys[i - 1]) * inv_2dx);
128    }
129
130    // Backward difference at last point
131    result.push((ys[n - 1] - ys[n - 2]) / dx);
132
133    result
134}
135
136// ---------------------------------------------------------------------------
137// Tests
138// ---------------------------------------------------------------------------
139
140#[cfg(test)]
141mod tests {
142    use super::*;
143
144    #[test]
145    fn test_diff_central_quadratic() {
146        // f(x) = x^2, f'(x) = 2x
147        // At x=2, expect derivative ≈ 4.0
148        let xs: Vec<f64> = (0..=40).map(|i| i as f64 * 0.1).collect();
149        let ys: Vec<f64> = xs.iter().map(|&x| x * x).collect();
150        let derivs = diff_central(&xs, &ys).unwrap();
151        // Interior index for x=2.0 is i=20, but diff_central returns n-2 values
152        // starting at index 1 of the original array, so derivs[19] corresponds to x=2.0
153        let idx = 19; // xs[20] = 2.0, output index = 20-1 = 19
154        assert!(
155            (derivs[idx] - 4.0).abs() < 1e-10,
156            "diff_central at x=2: expected ~4.0, got {}",
157            derivs[idx]
158        );
159    }
160
161    #[test]
162    fn test_diff_forward_linear() {
163        // f(x) = 3x + 1, f'(x) = 3
164        let xs = vec![0.0, 1.0, 2.0, 3.0, 4.0];
165        let ys = vec![1.0, 4.0, 7.0, 10.0, 13.0];
166        let derivs = diff_forward(&xs, &ys).unwrap();
167        for &d in &derivs {
168            assert!((d - 3.0).abs() < 1e-12);
169        }
170    }
171
172    #[test]
173    fn test_gradient_1d_quadratic() {
174        // f(x) = x^2 at x = 0, 1, 2, 3, 4
175        let ys = vec![0.0, 1.0, 4.0, 9.0, 16.0];
176        let grad = gradient_1d(&ys, 1.0);
177        assert_eq!(grad.len(), 5);
178        // Forward: (1 - 0)/1 = 1
179        assert!((grad[0] - 1.0).abs() < 1e-12);
180        // Central: (4 - 0)/2 = 2
181        assert!((grad[1] - 2.0).abs() < 1e-12);
182        // Central: (9 - 1)/2 = 4
183        assert!((grad[2] - 4.0).abs() < 1e-12);
184        // Central: (16 - 4)/2 = 6
185        assert!((grad[3] - 6.0).abs() < 1e-12);
186        // Backward: (16 - 9)/1 = 7
187        assert!((grad[4] - 7.0).abs() < 1e-12);
188    }
189
190    #[test]
191    fn test_diff_central_length_mismatch() {
192        assert!(diff_central(&[0.0, 1.0, 2.0], &[0.0, 1.0]).is_err());
193    }
194
195    #[test]
196    fn test_diff_forward_too_few_points() {
197        assert!(diff_forward(&[0.0], &[0.0]).is_err());
198    }
199
200    #[test]
201    fn test_gradient_1d_empty() {
202        assert!(gradient_1d(&[], 1.0).is_empty());
203    }
204
205    #[test]
206    fn test_gradient_1d_single() {
207        assert_eq!(gradient_1d(&[5.0], 1.0), vec![0.0]);
208    }
209
210    #[test]
211    fn test_determinism() {
212        let xs: Vec<f64> = (0..=100).map(|i| i as f64 * 0.01).collect();
213        let ys: Vec<f64> = xs.iter().map(|&x| x.sin()).collect();
214        let r1 = diff_central(&xs, &ys).unwrap();
215        let r2 = diff_central(&xs, &ys).unwrap();
216        for (a, b) in r1.iter().zip(r2.iter()) {
217            assert_eq!(a.to_bits(), b.to_bits());
218        }
219    }
220}