rs_sci/calculus.rs
1use std::f64;
2
3pub struct Calculus;
4
5impl Calculus {
6 // numerical differentiation
7
8 /// calculates first derivative via central difference method
9 ///
10 /// #### Example
11 /// ```txt
12 /// let f = |x| x.powi(2);
13 /// let derivative = Calculus::derivative(f, 2.0, 0.0001); // ≈ 4.0
14 /// ```
15 /// ---
16 /// provides better accuracy than forward/backward difference by using points on both sides
17 pub fn derivative<F: Fn(f64) -> f64>(f: F, x: f64, h: f64) -> f64 {
18 (f(x + h) - f(x - h)) / (2.0 * h)
19 }
20
21 /// computes second derivative using central difference
22 ///
23 /// #### Example
24 /// ```txt
25 /// let f = |x| x.powi(3);
26 /// let second_deriv = Calculus::second_derivative(f, 2.0, 0.0001);
27 /// ```
28 /// ---
29 /// approximates second derivative with three points
30 pub fn second_derivative<F: Fn(f64) -> f64>(f: F, x: f64, h: f64) -> f64 {
31 (f(x + h) - 2.0 * f(x) + f(x - h)) / (h * h)
32 }
33
34 /// finds partial derivative wrt x using central difference
35 ///
36 /// #### Example
37 /// ```txt
38 /// let f = |x, y| x*x + y*y;
39 /// let dx = Calculus::partial_x(f, 1.0, 2.0, 0.0001); // ≈ 2.0
40 /// ```
41 /// ---
42 /// keeps y constant while differentiating in x direction
43 pub fn partial_x<F: Fn(f64, f64) -> f64>(f: F, x: f64, y: f64, h: f64) -> f64 {
44 (f(x + h, y) - f(x - h, y)) / (2.0 * h)
45 }
46
47 /// finds partial derivative wrt y using central difference
48 ///
49 /// #### Example
50 /// ```txt
51 /// let f = |x, y| x*x + y*y;
52 /// let dy = Calculus::partial_y(f, 1.0, 2.0, 0.0001); // ≈ 4.0
53 /// ```
54 /// ---
55 /// keeps x constant while differentiating in y direction
56 pub fn partial_y<F: Fn(f64, f64) -> f64>(f: F, x: f64, y: f64, h: f64) -> f64 {
57 (f(x, y + h) - f(x, y - h)) / (2.0 * h)
58 }
59
60 //numerical integration
61
62 /// computes definite integral using rectangle method
63 ///
64 /// #### Example
65 /// ```txt
66 /// let f = |x| x.powi(2);
67 /// let integral = Calculus::integrate_rectangle(f, 0.0, 1.0, 1000); // ≈ 0.333
68 /// ```
69 /// ---
70 /// approximates area using sum of rectangles with equal width
71 pub fn integrate_rectangle<F: Fn(f64) -> f64>(f: F, a: f64, b: f64, n: usize) -> f64 {
72 let dx = (b - a) / n as f64;
73 let mut sum = 0.0;
74
75 for i in 0..n {
76 let x = a + dx * i as f64;
77 sum += f(x);
78 }
79
80 sum * dx
81 }
82
83 /// computes definite integral using trapezoidal method
84 ///
85 /// #### Example
86 /// ```txt
87 /// let f = |x| x.powi(2);
88 /// let integral = Calculus::integrate_trapezoid(f, 0.0, 1.0, 1000); // ≈ 0.333
89 /// ```
90 /// ---
91 /// uses linear approximation between points for better accuracy than rectangle method
92 pub fn integrate_trapezoid<F: Fn(f64) -> f64>(f: F, a: f64, b: f64, n: usize) -> f64 {
93 let dx = (b - a) / n as f64;
94 let mut sum = (f(a) + f(b)) / 2.0;
95
96 for i in 1..n {
97 let x = a + dx * i as f64;
98 sum += f(x);
99 }
100
101 sum * dx
102 }
103
104 /// computes definite integral using simpson's method
105 ///
106 /// #### Example
107 /// ```txt
108 /// let f = |x| x.powi(2);
109 /// let integral = Calculus::integrate_simpson(f, 0.0, 1.0, 1000); // ≈ 0.333
110 /// ```
111 /// ---
112 /// uses quadratic approximation for higher accuracy than trapezoid method
113 pub fn integrate_simpson<F: Fn(f64) -> f64>(f: F, a: f64, b: f64, n: usize) -> f64 {
114 if n % 2 != 0 {
115 panic!("n must be even for Simpson's rule");
116 }
117
118 let dx = (b - a) / n as f64;
119 let mut sum = f(a) + f(b);
120
121 for i in 1..n {
122 let x = a + dx * i as f64;
123 sum += if i % 2 == 0 { 2.0 } else { 4.0 } * f(x);
124 }
125
126 sum * dx / 3.0
127 }
128
129 // differentials
130
131 /// solves first-order ODE using euler's method
132 ///
133 /// #### Example
134 /// ```txt
135 /// let f = |x, y| x + y;
136 /// let solution = Calculus::euler(f, 0.0, 1.0, 0.1, 10);
137 /// ```
138 /// ---
139 /// basic numerical method for ODEs using linear approximation
140 pub fn euler<F: Fn(f64, f64) -> f64>(
141 f: F, // dy/dx = f(x, y)
142 x0: f64, // initial x
143 y0: f64, // initial y
144 h: f64, // step size
145 steps: usize, // number of steps
146 ) -> Vec<(f64, f64)> {
147 let mut result = vec![(x0, y0)];
148 let mut x = x0;
149 let mut y = y0;
150
151 for _ in 0..steps {
152 y = y + h * f(x, y);
153 x = x + h;
154 result.push((x, y));
155 }
156
157 result
158 }
159
160 /// solves ODE using 4th order runge-kutta method
161 ///
162 /// #### Example
163 /// ```txt
164 /// let f = |x, y| x + y;
165 /// let solution = Calculus::runge_kutta4(f, 0.0, 1.0, 0.1, 10);
166 /// ```
167 /// ---
168 /// more accurate than euler's method by using weighted average of slopes
169 pub fn runge_kutta4<F: Fn(f64, f64) -> f64>(
170 f: F,
171 x0: f64,
172 y0: f64,
173 h: f64,
174 steps: usize,
175 ) -> Vec<(f64, f64)> {
176 let mut result = vec![(x0, y0)];
177 let mut x = x0;
178 let mut y = y0;
179
180 for _ in 0..steps {
181 let k1 = h * f(x, y);
182 let k2 = h * f(x + h / 2.0, y + k1 / 2.0);
183 let k3 = h * f(x + h / 2.0, y + k2 / 2.0);
184 let k4 = h * f(x + h, y + k3);
185
186 y = y + (k1 + 2.0 * k2 + 2.0 * k3 + k4) / 6.0;
187 x = x + h;
188 result.push((x, y));
189 }
190
191 result
192 }
193
194 // series and limits
195
196 /// approximates function using taylor series expansion
197 ///
198 /// #### Example
199 /// ```txt
200 /// let f = |x| x.exp();
201 /// let df = [|x| x.exp()]; // derivatives
202 /// let approx = Calculus::taylor_series(f, &df, 0.0, 0.1, 2);
203 /// ```
204 /// ---
205 /// uses function and its derivatives to create polynomial approximation
206
207 pub fn taylor_series<F: Fn(f64) -> f64>(
208 f: F, // function
209 df: &[F], // array of derivative functions
210 a: f64, // expansion point
211 x: f64, // evaluation point
212 terms: usize, // number of terms
213 ) -> f64 {
214 let mut sum = f(a);
215 let mut factorial = 1.0;
216 let mut power = 1.0;
217
218 for (n, derivative) in df.iter().take(terms - 1).enumerate() {
219 factorial *= (n + 1) as f64;
220 power *= x - a;
221 sum += derivative(a) * power / factorial;
222 }
223
224 sum
225 }
226
227 // vector calc
228
229 /// computes gradient of 2D scalar field
230 ///
231 /// #### Example
232 /// ```txt
233 /// let f = |x, y| x*x + y*y;
234 /// let grad = Calculus::gradient(f, 1.0, 1.0, 0.0001);
235 /// ```
236 /// ---
237 /// returns vector of partial derivatives (∂f/∂x, ∂f/∂y)
238
239 pub fn gradient<F: Fn(f64, f64) -> f64>(f: F, x: f64, y: f64, h: f64) -> (f64, f64) {
240 (Self::partial_x(&f, x, y, h), Self::partial_y(&f, x, y, h))
241 }
242
243 /// calculates divergence of 2D vector field
244 ///
245 /// #### Example
246 /// ```txt
247 /// let f = |x, y| (x*y, x+y);
248 /// let div = Calculus::divergence(f, 1.0, 1.0, 0.0001);
249 /// ```
250 /// ---
251 /// computes sum of partial derivatives ∂P/∂x + ∂Q/∂y
252
253 pub fn divergence<F: Fn(f64, f64) -> (f64, f64)>(f: F, x: f64, y: f64, h: f64) -> f64 {
254 let dx = |x, y| (f(x + h, y).0 - f(x - h, y).0) / (2.0 * h);
255 let dy = |x, y| (f(x, y + h).1 - f(x, y - h).1) / (2.0 * h);
256
257 dx(x, y) + dy(x, y)
258 }
259
260 /// finds z-component of curl for 2D vector field
261 ///
262 /// #### Example
263 /// ```txt
264 /// let f = |x, y| (x*y, x+y);
265 /// let curl = Calculus::curl_z(f, 1.0, 1.0, 0.0001);
266 /// ```
267 /// ---
268 /// computes \partial Q/\partial x - \partial P/\partial y
269
270 pub fn curl_z<F: Fn(f64, f64) -> (f64, f64)>(f: F, x: f64, y: f64, h: f64) -> f64 {
271 let dy_dx = (f(x + h, y).1 - f(x - h, y).1) / (2.0 * h);
272 let dx_dy = (f(x, y + h).0 - f(x, y - h).0) / (2.0 * h);
273
274 dy_dx - dx_dy
275 }
276
277 // optimization
278
279 /// finds local minimum using gradient descent
280 ///
281 /// #### Example
282 /// ```txt
283 /// let f = |x, y| x*x + y*y;
284 /// let min = Calculus::gradient_descent(f, 1.0, 1.0, 0.1, 1e-6, 1000);
285 /// ```
286 /// ---
287 /// iteratively moves in direction of steepest descent
288 pub fn gradient_descent<F: Fn(f64, f64) -> f64>(
289 f: F,
290 mut x: f64,
291 mut y: f64,
292 learning_rate: f64,
293 tolerance: f64,
294 max_iterations: usize,
295 ) -> (f64, f64) {
296 for _ in 0..max_iterations {
297 let (dx, dy) = Self::gradient(&f, x, y, 1e-6);
298 let new_x = x - learning_rate * dx;
299 let new_y = y - learning_rate * dy;
300
301 if (new_x - x).abs() < tolerance && (new_y - y).abs() < tolerance {
302 break;
303 }
304
305 x = new_x;
306 y = new_y;
307 }
308
309 (x, y)
310 }
311}