Skip to main content

oxiphysics_core/optimization/
functions_2.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5#![allow(clippy::type_complexity)]
6#[allow(unused_imports)]
7use super::functions::*;
8#[allow(unused_imports)]
9use super::types::*;
10#[cfg(test)]
11mod tests {
12    use super::*;
13    fn quad(x: &[f64]) -> f64 {
14        (x[0] - 2.0).powi(2)
15    }
16    fn quad_grad(x: &[f64]) -> Vec<f64> {
17        vec![2.0 * (x[0] - 2.0)]
18    }
19    fn bowl(x: &[f64]) -> f64 {
20        x.iter()
21            .enumerate()
22            .map(|(i, xi)| (xi - i as f64).powi(2))
23            .sum()
24    }
25    fn bowl_grad(x: &[f64]) -> Vec<f64> {
26        x.iter()
27            .enumerate()
28            .map(|(i, xi)| 2.0 * (xi - i as f64))
29            .collect()
30    }
31    fn rosenbrock(x: &[f64]) -> f64 {
32        (1.0 - x[0]).powi(2) + 100.0 * (x[1] - x[0].powi(2)).powi(2)
33    }
34    fn rosenbrock_grad(x: &[f64]) -> Vec<f64> {
35        let dx0 = -2.0 * (1.0 - x[0]) - 400.0 * x[0] * (x[1] - x[0].powi(2));
36        let dx1 = 200.0 * (x[1] - x[0].powi(2));
37        vec![dx0, dx1]
38    }
39    #[test]
40    fn test_gd_converges_quad() {
41        let res = gradient_descent(quad, quad_grad, vec![0.0], 0.1, 1e-6, 1000);
42        assert!(res.converged);
43        assert!((res.x[0] - 2.0).abs() < 1e-4);
44    }
45    #[test]
46    fn test_gd_fval_at_min() {
47        let res = gradient_descent(quad, quad_grad, vec![0.0], 0.1, 1e-8, 5000);
48        assert!(res.f_val < 1e-8);
49    }
50    #[test]
51    fn test_gd_bowl_2d() {
52        let res = gradient_descent(bowl, bowl_grad, vec![5.0, 5.0], 0.1, 1e-6, 5000);
53        assert!(res.converged);
54        assert!((res.x[0] - 0.0).abs() < 1e-3);
55        assert!((res.x[1] - 1.0).abs() < 1e-3);
56    }
57    #[test]
58    fn test_gd_n_iter_positive() {
59        let res = gradient_descent(quad, quad_grad, vec![0.0], 0.1, 1e-6, 1000);
60        assert!(res.n_iter > 0);
61    }
62    #[test]
63    fn test_gd_no_convergence_low_iter() {
64        let res = gradient_descent(quad, quad_grad, vec![100.0], 0.01, 1e-12, 2);
65        assert!(!res.converged);
66        assert_eq!(res.n_iter, 2);
67    }
68    #[test]
69    fn test_adam_converges_quad() {
70        let res = adam(quad, quad_grad, vec![0.0], 0.1, 0.9, 0.999, 1e-8, 5000);
71        assert!((res.x[0] - 2.0).abs() < 1e-3);
72    }
73    #[test]
74    fn test_adam_bowl_2d() {
75        let res = adam(bowl, bowl_grad, vec![5.0, 5.0], 0.1, 0.9, 0.999, 1e-8, 5000);
76        assert!((res.x[0] - 0.0).abs() < 1e-2);
77        assert!((res.x[1] - 1.0).abs() < 1e-2);
78    }
79    #[test]
80    fn test_adam_fval_reasonable() {
81        let res = adam(quad, quad_grad, vec![0.0], 0.1, 0.9, 0.999, 1e-8, 5000);
82        assert!(res.f_val < 1e-4);
83    }
84    #[test]
85    fn test_adam_n_iter_positive() {
86        let res = adam(quad, quad_grad, vec![0.0], 0.1, 0.9, 0.999, 1e-8, 100);
87        assert!(res.n_iter > 0 && res.n_iter <= 100);
88    }
89    #[test]
90    fn test_lbfgs_quad() {
91        let res = lbfgs(quad, quad_grad, vec![0.0], 5, 1e-8, 200);
92        assert!(res.converged);
93        assert!((res.x[0] - 2.0).abs() < 1e-6);
94    }
95    #[test]
96    fn test_lbfgs_bowl() {
97        let res = lbfgs(bowl, bowl_grad, vec![5.0, 5.0], 5, 1e-8, 500);
98        assert!(res.converged);
99        assert!((res.x[0] - 0.0).abs() < 1e-5);
100        assert!((res.x[1] - 1.0).abs() < 1e-5);
101    }
102    #[test]
103    fn test_lbfgs_rosenbrock() {
104        let res = lbfgs(rosenbrock, rosenbrock_grad, vec![-1.0, 1.0], 10, 1e-6, 2000);
105        assert!((res.x[0] - 1.0).abs() < 1e-3);
106        assert!((res.x[1] - 1.0).abs() < 1e-3);
107    }
108    #[test]
109    fn test_lbfgs_fval() {
110        let res = lbfgs(quad, quad_grad, vec![0.0], 5, 1e-8, 200);
111        assert!(res.f_val < 1e-12);
112    }
113    #[test]
114    fn test_nelder_mead_1d() {
115        let res = nelder_mead(|x| (x[0] - 3.0).powi(2), vec![0.0], 1.0, 1e-8, 1000);
116        assert!((res.x[0] - 3.0).abs() < 1e-4);
117    }
118    #[test]
119    fn test_nelder_mead_2d() {
120        let res = nelder_mead(bowl, vec![5.0, 5.0], 1.0, 1e-8, 10000);
121        assert!((res.x[0] - 0.0).abs() < 1e-2, "x[0]={}", res.x[0]);
122        assert!((res.x[1] - 1.0).abs() < 1e-2, "x[1]={}", res.x[1]);
123    }
124    #[test]
125    fn test_nelder_mead_rosenbrock() {
126        let res = nelder_mead(rosenbrock, vec![0.0, 0.0], 0.5, 1e-8, 10000);
127        assert!(res.f_val < 1e-4);
128    }
129    #[test]
130    fn test_nelder_mead_converged_flag() {
131        let res = nelder_mead(|x| (x[0] - 1.0).powi(2), vec![0.0], 1.0, 1e-8, 2000);
132        assert!(res.converged);
133    }
134    #[test]
135    fn test_golden_section_parabola() {
136        let (xm, fm) = golden_section(|x| (x - 3.0).powi(2), 0.0, 6.0, 1e-9);
137        assert!((xm - 3.0).abs() < 1e-6);
138        assert!(fm < 1e-10);
139    }
140    #[test]
141    fn test_golden_section_negative_side() {
142        let (xm, _) = golden_section(|x| (x + 1.0).powi(2), -5.0, 5.0, 1e-9);
143        assert!((xm + 1.0).abs() < 1e-6);
144    }
145    #[test]
146    fn test_golden_section_returns_fmin() {
147        let (_, fm) = golden_section(|x| x * x + 1.0, -2.0, 2.0, 1e-9);
148        assert!((fm - 1.0).abs() < 1e-6);
149    }
150    #[test]
151    fn test_brent_parabola() {
152        let (xm, fm) = brent(|x| (x - 2.0).powi(2), 0.0, 2.0, 4.0, 1e-9);
153        assert!((xm - 2.0).abs() < 1e-6);
154        assert!(fm < 1e-10);
155    }
156    #[test]
157    fn test_brent_cosine() {
158        let (xm, fm) = brent(
159            |x| x.cos(),
160            0.0,
161            std::f64::consts::PI,
162            2.0 * std::f64::consts::PI,
163            1e-9,
164        );
165        assert!((xm - std::f64::consts::PI).abs() < 1e-6);
166        assert!((fm + 1.0).abs() < 1e-6);
167    }
168    #[test]
169    fn test_bisection_simple() {
170        let root = bisection(|x| x - 1.5, 0.0, 3.0, 1e-9);
171        assert!(root.is_some());
172        assert!((root.unwrap() - 1.5).abs() < 1e-7);
173    }
174    #[test]
175    fn test_bisection_sqrt2() {
176        let root = bisection(|x| x * x - 2.0, 1.0, 2.0, 1e-10);
177        assert!(root.is_some());
178        assert!((root.unwrap() - 2.0_f64.sqrt()).abs() < 1e-8);
179    }
180    #[test]
181    fn test_bisection_no_bracket() {
182        let root = bisection(|x| x * x + 1.0, -2.0, 2.0, 1e-9);
183        assert!(root.is_none());
184    }
185    #[test]
186    fn test_numerical_gradient_quad() {
187        let g = numerical_gradient(quad, &[3.0], 1e-5);
188        assert!((g[0] - 2.0).abs() < 1e-6);
189    }
190    #[test]
191    fn test_numerical_gradient_bowl() {
192        let x = vec![2.0, 3.0];
193        let g = numerical_gradient(bowl, &x, 1e-5);
194        assert!((g[0] - 4.0).abs() < 1e-6);
195        assert!((g[1] - 4.0).abs() < 1e-6);
196    }
197    #[test]
198    fn test_numerical_hessian_quad() {
199        let h = numerical_hessian(quad, &[2.0], 1e-4);
200        assert!((h[0][0] - 2.0).abs() < 1e-4);
201    }
202    #[test]
203    fn test_numerical_hessian_bowl_2d() {
204        let h = numerical_hessian(bowl, &[0.0, 1.0], 1e-4);
205        assert!((h[0][0] - 2.0).abs() < 1e-4);
206        assert!((h[1][1] - 2.0).abs() < 1e-4);
207        assert!(h[0][1].abs() < 1e-4);
208    }
209    #[test]
210    fn test_backtracking_decreases_f() {
211        let x = vec![0.0];
212        let dir = vec![1.0];
213        let f0 = quad(&x);
214        let gd = -2.0 * 2.0;
215        let alpha = backtracking_line_search(quad, &x, &dir, f0, gd, 1.0, 0.5, 1e-4);
216        let x_new: Vec<f64> = x
217            .iter()
218            .zip(dir.iter())
219            .map(|(xi, di)| xi + alpha * di)
220            .collect();
221        assert!(quad(&x_new) <= f0 + 1e-4 * alpha * gd);
222    }
223    #[test]
224    fn test_backtracking_returns_positive_alpha() {
225        let x = vec![5.0];
226        let dir = vec![-1.0];
227        let f0 = quad(&x);
228        let gd = -6.0;
229        let alpha = backtracking_line_search(quad, &x, &dir, f0, gd, 1.0, 0.5, 1e-4);
230        assert!(alpha > 0.0);
231    }
232    #[test]
233    fn test_constrained_unconstrained_minimum_inside() {
234        let res = constrained_min_box(quad, quad_grad, vec![0.0], &[0.0], &[5.0], 1e-7, 1000);
235        assert!((res.x[0] - 2.0).abs() < 1e-4);
236    }
237    #[test]
238    fn test_constrained_minimum_at_boundary() {
239        let f = |x: &[f64]| (x[0] - 5.0).powi(2);
240        let g = |x: &[f64]| vec![2.0 * (x[0] - 5.0)];
241        let res = constrained_min_box(f, g, vec![0.0], &[0.0], &[3.0], 1e-7, 1000);
242        assert!((res.x[0] - 3.0).abs() < 1e-4);
243    }
244    #[test]
245    fn test_momentum_converges_quad() {
246        let res = gradient_descent_momentum(quad, quad_grad, vec![0.0], 0.05, 0.9, 1e-6, 5000);
247        assert!(res.converged || res.f_val < 1e-6, "f_val={}", res.f_val);
248        assert!((res.x[0] - 2.0).abs() < 1e-3, "x={}", res.x[0]);
249    }
250    #[test]
251    fn test_momentum_bowl_2d() {
252        let res = gradient_descent_momentum(bowl, bowl_grad, vec![5.0, 5.0], 0.05, 0.9, 1e-6, 5000);
253        assert!((res.x[0] - 0.0).abs() < 1e-2, "x[0]={}", res.x[0]);
254        assert!((res.x[1] - 1.0).abs() < 1e-2, "x[1]={}", res.x[1]);
255    }
256    #[test]
257    fn test_momentum_n_iter_positive() {
258        let res = gradient_descent_momentum(quad, quad_grad, vec![0.0], 0.05, 0.9, 1e-6, 100);
259        assert!(res.n_iter > 0 && res.n_iter <= 100);
260    }
261    #[test]
262    fn test_momentum_zero_at_minimum() {
263        let res = gradient_descent_momentum(quad, quad_grad, vec![2.0], 0.05, 0.9, 1e-8, 100);
264        assert!(res.f_val < 1e-8);
265    }
266    #[test]
267    fn test_sa_quad_finds_minimum() {
268        let res = simulated_annealing(quad, vec![0.0], 10.0, 0.95, 0.5, 10000);
269        assert!((res.x[0] - 2.0).abs() < 0.5, "sa x[0]={}", res.x[0]);
270    }
271    #[test]
272    fn test_sa_returns_finite() {
273        let res = simulated_annealing(bowl, vec![5.0, 5.0], 5.0, 0.99, 0.5, 5000);
274        assert!(res.f_val.is_finite());
275        assert!(res.x.iter().all(|x| x.is_finite()));
276    }
277    #[test]
278    fn test_sa_converges_flag_at_low_temp() {
279        let res = simulated_annealing(quad, vec![2.0], 1.0, 0.5, 0.1, 1000);
280        assert!(res.converged);
281    }
282    #[test]
283    fn test_sa_f_val_matches_returned_x() {
284        let res = simulated_annealing(quad, vec![5.0], 2.0, 0.9, 0.3, 2000);
285        let expected = quad(&res.x);
286        assert!((res.f_val - expected).abs() < 1e-12);
287    }
288    #[test]
289    fn test_pso_quad() {
290        let res = particle_swarm(quad, &[-10.0], &[10.0], 20, 0.7, 2.0, 2.0, 200);
291        assert!((res.x[0] - 2.0).abs() < 0.5, "pso x[0]={}", res.x[0]);
292    }
293    #[test]
294    fn test_pso_bowl_2d() {
295        let res = particle_swarm(bowl, &[-10.0, -10.0], &[10.0, 10.0], 30, 0.7, 2.0, 2.0, 300);
296        assert!(res.f_val < 2.0, "pso f_val={}", res.f_val);
297    }
298    #[test]
299    fn test_pso_respects_bounds() {
300        let res = particle_swarm(quad, &[0.0], &[1.0], 20, 0.7, 2.0, 2.0, 200);
301        assert!(
302            res.x[0] >= 0.0 && res.x[0] <= 1.0,
303            "out of bounds: x={}",
304            res.x[0]
305        );
306    }
307    #[test]
308    fn test_pso_n_iter_equals_max() {
309        let res = particle_swarm(quad, &[-5.0], &[5.0], 10, 0.7, 2.0, 2.0, 50);
310        assert_eq!(res.n_iter, 50);
311    }
312    #[test]
313    fn test_ga_quad_finds_minimum() {
314        let res = genetic_algorithm(quad, &[-10.0], &[10.0], 50, 0.8, 0.1, 0.5, 200);
315        assert!((res.x[0] - 2.0).abs() < 1.0, "ga x[0]={}", res.x[0]);
316    }
317    #[test]
318    fn test_ga_bowl_2d() {
319        let res = genetic_algorithm(bowl, &[-5.0, -5.0], &[5.0, 5.0], 80, 0.8, 0.2, 0.5, 500);
320        assert!(res.f_val < 3.0, "ga f_val={}", res.f_val);
321    }
322    #[test]
323    fn test_ga_respects_bounds() {
324        let res = genetic_algorithm(quad, &[0.0], &[1.5], 30, 0.8, 0.1, 0.2, 100);
325        assert!(
326            res.x[0] >= 0.0 && res.x[0] <= 1.5,
327            "out of bounds: x={}",
328            res.x[0]
329        );
330    }
331    #[test]
332    fn test_ga_n_iter_equals_max_generations() {
333        let res = genetic_algorithm(quad, &[-5.0], &[5.0], 10, 0.8, 0.1, 0.5, 20);
334        assert_eq!(res.n_iter, 20);
335    }
336    #[test]
337    fn test_lbfgsb_quad_inside_bounds() {
338        let res = lbfgsb(quad, quad_grad, vec![0.0], &[0.0], &[5.0], 5, 1e-8, 200);
339        assert!(res.converged);
340        assert!((res.x[0] - 2.0).abs() < 1e-5, "x={}", res.x[0]);
341    }
342    #[test]
343    fn test_lbfgsb_quad_min_outside_bounds() {
344        let f = |x: &[f64]| (x[0] - 5.0).powi(2);
345        let g = |x: &[f64]| vec![2.0 * (x[0] - 5.0)];
346        let res = lbfgsb(f, g, vec![0.0], &[-3.0], &[3.0], 5, 1e-8, 200);
347        assert!((res.x[0] - 3.0).abs() < 1e-4, "x={}", res.x[0]);
348    }
349    #[test]
350    fn test_lbfgsb_bowl_2d() {
351        let res = lbfgsb(
352            bowl,
353            bowl_grad,
354            vec![4.0, 4.0],
355            &[-10.0, -10.0],
356            &[10.0, 10.0],
357            5,
358            1e-8,
359            500,
360        );
361        assert!(res.converged, "lbfgsb should converge on bowl");
362        assert!((res.x[0] - 0.0).abs() < 1e-4, "x[0]={}", res.x[0]);
363        assert!((res.x[1] - 1.0).abs() < 1e-4, "x[1]={}", res.x[1]);
364    }
365    #[test]
366    fn test_lbfgsb_rosenbrock() {
367        let res = lbfgsb(
368            rosenbrock,
369            rosenbrock_grad,
370            vec![-1.0, 1.0],
371            &[-5.0, -5.0],
372            &[5.0, 5.0],
373            10,
374            1e-6,
375            2000,
376        );
377        assert!((res.x[0] - 1.0).abs() < 0.01, "x[0]={}", res.x[0]);
378        assert!((res.x[1] - 1.0).abs() < 0.01, "x[1]={}", res.x[1]);
379    }
380    #[test]
381    fn test_lbfgsb_respects_bounds() {
382        let res = lbfgsb(quad, quad_grad, vec![0.0], &[0.0], &[1.0], 5, 1e-8, 200);
383        assert!(
384            res.x[0] >= 0.0 && res.x[0] <= 1.0 + 1e-10,
385            "out of bounds: x={}",
386            res.x[0]
387        );
388    }
389    #[test]
390    fn test_nelder_mead_struct_minimizes_quadratic() {
391        let mut nm = NelderMead::new(vec![0.0], 1.0);
392        let best = nm.minimize(|x| (x[0] - 3.0).powi(2), 1000, 1e-8);
393        assert!(
394            (best[0] - 3.0).abs() < 1e-4,
395            "NelderMead struct: x={}",
396            best[0]
397        );
398    }
399    #[test]
400    fn test_nelder_mead_struct_2d() {
401        let mut nm = NelderMead::new(vec![5.0, 5.0], 1.0);
402        let best = nm.minimize(bowl, 5000, 1e-8);
403        assert!((best[0] - 0.0).abs() < 1e-2, "x[0]={}", best[0]);
404        assert!((best[1] - 1.0).abs() < 1e-2, "x[1]={}", best[1]);
405    }
406    #[test]
407    fn test_nelder_mead_struct_simplex_size() {
408        let nm = NelderMead::new(vec![1.0, 2.0], 0.5);
409        assert_eq!(nm.simplex.len(), 3);
410        assert_eq!(nm.n, 2);
411    }
412    #[test]
413    fn test_de_evolves_toward_minimum() {
414        let mut rng = rand::rng();
415        let bounds = vec![(-5.0_f64, 5.0_f64)];
416        let mut de = DifferentialEvolution::new(20, 1, &bounds, &mut rng);
417        for _ in 0..100 {
418            de.step(quad, &mut rng);
419        }
420        let best = de.best(quad);
421        assert!((best[0] - 2.0).abs() < 1.0, "DE best: x={}", best[0]);
422    }
423    #[test]
424    fn test_de_population_size() {
425        let mut rng = rand::rng();
426        let bounds = vec![(-5.0, 5.0), (-5.0, 5.0)];
427        let de = DifferentialEvolution::new(15, 2, &bounds, &mut rng);
428        assert_eq!(de.pop.len(), 15);
429        assert_eq!(de.pop[0].len(), 2);
430    }
431    #[test]
432    fn test_de_fitness_improves() {
433        let mut rng = rand::rng();
434        let bounds = vec![(-5.0_f64, 5.0_f64)];
435        let mut de = DifferentialEvolution::new(20, 1, &bounds, &mut rng);
436        let init_best = de.best(quad);
437        let f_init = quad(&init_best);
438        for _ in 0..50 {
439            de.step(quad, &mut rng);
440        }
441        let final_best = de.best(quad);
442        let f_final = quad(&final_best);
443        assert!(
444            f_final <= f_init + 1e-10,
445            "DE should not worsen: f_init={f_init}, f_final={f_final}"
446        );
447    }
448    #[test]
449    fn test_trust_region_reduces_residual() {
450        let mut tr = TrustRegion::new(1.0, 0.1);
451        let mut x = vec![5.0_f64];
452        let f_init = quad(&x);
453        for _ in 0..50 {
454            x = tr.step(quad, quad_grad, &x);
455        }
456        let f_final = quad(&x);
457        assert!(
458            f_final < f_init,
459            "TrustRegion: f did not decrease; f_init={f_init}, f_final={f_final}"
460        );
461    }
462    #[test]
463    fn test_trust_region_converges_quad() {
464        let mut tr = TrustRegion::new(1.0, 0.1);
465        let mut x = vec![0.0_f64];
466        for _ in 0..200 {
467            x = tr.step(quad, quad_grad, &x);
468        }
469        assert!((x[0] - 2.0).abs() < 0.5, "TrustRegion: x={}", x[0]);
470    }
471    #[test]
472    fn test_cg_minimizes_quadratic() {
473        let x = conjugate_gradient_minimize(quad, quad_grad, vec![0.0], 500, 1e-8);
474        assert!((x[0] - 2.0).abs() < 1e-4, "CG: x={}", x[0]);
475    }
476    #[test]
477    fn test_cg_minimizes_bowl_2d() {
478        let x = conjugate_gradient_minimize(bowl, bowl_grad, vec![5.0, 5.0], 2000, 1e-8);
479        assert!((x[0] - 0.0).abs() < 1e-3, "CG bowl: x[0]={}", x[0]);
480        assert!((x[1] - 1.0).abs() < 1e-3, "CG bowl: x[1]={}", x[1]);
481    }
482    #[test]
483    fn test_cg_already_at_minimum() {
484        let x = conjugate_gradient_minimize(quad, quad_grad, vec![2.0], 100, 1e-12);
485        assert!((x[0] - 2.0).abs() < 1e-10, "CG at minimum: x={}", x[0]);
486    }
487    #[test]
488    fn test_wolfe_returns_positive_alpha() {
489        let x = vec![5.0_f64];
490        let d = vec![-1.0_f64];
491        let f0 = quad(&x);
492        let g0d: f64 = quad_grad(&x)
493            .iter()
494            .zip(d.iter())
495            .map(|(g, di)| g * di)
496            .sum();
497        let alpha = wolfe_line_search(quad, quad_grad, &x, &d, f0, g0d, 1e-4, 0.9, 50);
498        assert!(alpha > 0.0, "alpha={alpha}");
499    }
500    #[test]
501    fn test_wolfe_armijo_satisfied() {
502        let x = vec![5.0_f64];
503        let d = vec![-1.0_f64];
504        let f0 = quad(&x);
505        let g0d: f64 = quad_grad(&x)
506            .iter()
507            .zip(d.iter())
508            .map(|(g, di)| g * di)
509            .sum();
510        let c1 = 1e-4;
511        let alpha = wolfe_line_search(quad, quad_grad, &x, &d, f0, g0d, c1, 0.9, 50);
512        let x_new = vec![x[0] + alpha * d[0]];
513        assert!(quad(&x_new) <= f0 + c1 * alpha * g0d, "Armijo violated");
514    }
515    #[test]
516    fn test_wolfe_bowl_2d() {
517        let x = vec![5.0, 5.0];
518        let g = bowl_grad(&x);
519        let d: Vec<f64> = g.iter().map(|gi| -*gi).collect();
520        let f0 = bowl(&x);
521        let g0d: f64 = g.iter().zip(d.iter()).map(|(gi, di)| gi * di).sum();
522        let alpha = wolfe_line_search(bowl, bowl_grad, &x, &d, f0, g0d, 1e-4, 0.9, 50);
523        let x_new: Vec<f64> = x
524            .iter()
525            .zip(d.iter())
526            .map(|(xi, di)| xi + alpha * di)
527            .collect();
528        assert!(bowl(&x_new) < f0, "Wolfe step should decrease f");
529    }
530    #[test]
531    fn test_aug_lagrangian_equality_constraint() {
532        let f = |x: &[f64]| x[0] * x[0];
533        let gf = |x: &[f64]| vec![2.0 * x[0]];
534        let c = |x: &[f64]| x[0] - 3.0;
535        let dc = |_x: &[f64]| vec![1.0_f64];
536        let res = augmented_lagrangian(f, gf, &[c], &[dc], vec![0.0], 1.0, 20, 50, 1e-6);
537        assert!((res.x[0] - 3.0).abs() < 0.1, "aug_lag x={}", res.x[0]);
538    }
539    #[test]
540    fn test_aug_lagrangian_unconstrained_reduces_to_min() {
541        let f = |x: &[f64]| x[0] * x[0];
542        let gf = |x: &[f64]| vec![2.0 * x[0]];
543        let constraints: Vec<fn(&[f64]) -> f64> = vec![];
544        let grad_constraints: Vec<fn(&[f64]) -> Vec<f64>> = vec![];
545        let res = augmented_lagrangian(
546            f,
547            gf,
548            &constraints,
549            &grad_constraints,
550            vec![5.0],
551            1.0,
552            20,
553            100,
554            1e-6,
555        );
556        assert!(res.x[0].abs() < 1.0, "x={}", res.x[0]);
557    }
558    #[test]
559    fn test_coordinate_descent_quadratic() {
560        let res = coordinate_descent(quad, vec![5.0], 8.0, 1e-6, 500);
561        assert!((res.x[0] - 2.0).abs() < 0.1, "coord_desc x={}", res.x[0]);
562    }
563    #[test]
564    fn test_coordinate_descent_bowl_2d() {
565        let res = coordinate_descent(bowl, vec![5.0, 5.0], 10.0, 1e-5, 1000);
566        assert!(res.f_val < 1.0, "coord_desc f_val={}", res.f_val);
567    }
568    #[test]
569    fn test_coordinate_descent_returns_finite() {
570        let res = coordinate_descent(
571            |x| (x[0] - 1.0).powi(2) + (x[1] + 2.0).powi(2),
572            vec![0.0, 0.0],
573            5.0,
574            1e-4,
575            200,
576        );
577        assert!(res.x.iter().all(|v| v.is_finite()));
578    }
579    #[test]
580    fn test_powell_quadratic() {
581        let res = powell(quad, vec![5.0], 1e-6, 100);
582        assert!((res.x[0] - 2.0).abs() < 0.5, "powell x={}", res.x[0]);
583    }
584    #[test]
585    fn test_powell_bowl_2d() {
586        let res = powell(bowl, vec![5.0, 5.0], 1e-5, 500);
587        assert!(res.f_val < 1.0, "powell f_val={}", res.f_val);
588    }
589    #[test]
590    fn test_powell_already_at_min() {
591        let res = powell(quad, vec![2.0], 1e-8, 50);
592        assert!(res.f_val < 1e-8, "powell at min: f_val={}", res.f_val);
593    }
594    #[test]
595    fn test_sgd_cosine_reduces_f() {
596        let f_init = quad(&[5.0]);
597        let res = sgd_cosine_annealing(quad, quad_grad, vec![5.0], 0.1, 1e-4, 200);
598        assert!(
599            res.f_val < f_init,
600            "SGD cosine should reduce f: f_init={f_init} f_final={}",
601            res.f_val
602        );
603    }
604    #[test]
605    fn test_sgd_cosine_n_iter() {
606        let res = sgd_cosine_annealing(quad, quad_grad, vec![0.0], 0.1, 1e-4, 100);
607        assert_eq!(res.n_iter, 100);
608    }
609    #[test]
610    fn test_sgd_cosine_finite() {
611        let res = sgd_cosine_annealing(bowl, bowl_grad, vec![5.0, 5.0], 0.05, 1e-5, 500);
612        assert!(res.x.iter().all(|v| v.is_finite()));
613    }
614    #[test]
615    fn test_bfgs_quadratic() {
616        let res = bfgs(quad, quad_grad, vec![0.0], 1e-8, 200);
617        assert!(res.converged, "BFGS should converge on quadratic");
618        assert!((res.x[0] - 2.0).abs() < 1e-5, "x={}", res.x[0]);
619    }
620    #[test]
621    fn test_bfgs_bowl_2d() {
622        let res = bfgs(bowl, bowl_grad, vec![5.0, 5.0], 1e-8, 500);
623        assert!(res.converged, "BFGS should converge on bowl");
624        assert!((res.x[0] - 0.0).abs() < 1e-4, "x[0]={}", res.x[0]);
625        assert!((res.x[1] - 1.0).abs() < 1e-4, "x[1]={}", res.x[1]);
626    }
627    #[test]
628    fn test_bfgs_rosenbrock() {
629        let res = bfgs(rosenbrock, rosenbrock_grad, vec![-1.0, 1.0], 1e-6, 2000);
630        assert!((res.x[0] - 1.0).abs() < 0.01, "x[0]={}", res.x[0]);
631        assert!((res.x[1] - 1.0).abs() < 0.01, "x[1]={}", res.x[1]);
632    }
633    #[test]
634    fn test_bfgs_f_val_at_min() {
635        let res = bfgs(quad, quad_grad, vec![0.0], 1e-8, 200);
636        assert!(res.f_val < 1e-12, "f_val={}", res.f_val);
637    }
638    #[test]
639    fn test_clip_gradient_clips_large() {
640        let mut g = vec![3.0_f64, 4.0_f64];
641        clip_gradient(&mut g, 1.0);
642        let norm: f64 = g.iter().map(|v| v * v).sum::<f64>().sqrt();
643        assert!((norm - 1.0).abs() < 1e-10, "norm={norm}");
644    }
645    #[test]
646    fn test_clip_gradient_no_op_when_small() {
647        let mut g = vec![0.1_f64, 0.1_f64];
648        let g_orig = g.clone();
649        clip_gradient(&mut g, 1.0);
650        for (a, b) in g.iter().zip(g_orig.iter()) {
651            assert!((a - b).abs() < 1e-15);
652        }
653    }
654    #[test]
655    fn test_gradient_descent_clipped_converges() {
656        let res = gradient_descent_clipped(quad, quad_grad, vec![0.0], 0.1, 5.0, 1e-6, 1000);
657        assert!(res.converged || res.f_val < 1e-6);
658        assert!((res.x[0] - 2.0).abs() < 1e-3, "clipped GD: x={}", res.x[0]);
659    }
660    #[test]
661    fn test_gradient_descent_clipped_respects_clip() {
662        let res = gradient_descent_clipped(quad, quad_grad, vec![1000.0], 0.01, 1.0, 1e-4, 5000);
663        assert!(res.x[0].is_finite(), "clipped GD: x should be finite");
664    }
665    #[test]
666    fn test_nelder_mead_already_at_minimum() {
667        let res = nelder_mead(quad, vec![2.0], 0.1, 1e-10, 100);
668        assert!(res.converged, "NM at minimum should converge");
669    }
670    #[test]
671    fn test_de_bowl_2d() {
672        let mut rng = rand::rng();
673        let bounds = vec![(-5.0_f64, 5.0_f64), (-5.0_f64, 5.0_f64)];
674        let mut de = DifferentialEvolution::new(20, 2, &bounds, &mut rng);
675        for _ in 0..200 {
676            de.step(bowl, &mut rng);
677        }
678        let best = de.best(bowl);
679        assert!(bowl(&best) < 1.0, "DE bowl: f_val={}", bowl(&best));
680    }
681    #[test]
682    fn test_de_f_cr_defaults() {
683        let mut rng = rand::rng();
684        let bounds = vec![(-1.0_f64, 1.0_f64)];
685        let de = DifferentialEvolution::new(10, 1, &bounds, &mut rng);
686        assert!((de.F - 0.8).abs() < 1e-12);
687        assert!((de.CR - 0.9).abs() < 1e-12);
688    }
689    #[test]
690    fn test_de_population_in_bounds() {
691        let mut rng = rand::rng();
692        let bounds = vec![(-3.0_f64, 3.0_f64), (0.0_f64, 5.0_f64)];
693        let de = DifferentialEvolution::new(15, 2, &bounds, &mut rng);
694        for individual in &de.pop {
695            assert!(individual[0] >= -3.0 && individual[0] <= 3.0);
696            assert!(individual[1] >= 0.0 && individual[1] <= 5.0);
697        }
698    }
699    #[test]
700    fn test_pso_rosenbrock() {
701        let res = particle_swarm(
702            rosenbrock,
703            &[-2.0, -2.0],
704            &[2.0, 2.0],
705            30,
706            0.7,
707            2.0,
708            2.0,
709            500,
710        );
711        assert!(res.f_val < 2.0, "PSO rosenbrock f_val={}", res.f_val);
712    }
713    #[test]
714    fn test_pso_1d_minimum() {
715        let res = particle_swarm(
716            |x: &[f64]| (x[0] - 3.0).powi(2),
717            &[0.0],
718            &[6.0],
719            20,
720            0.7,
721            2.0,
722            2.0,
723            300,
724        );
725        assert!((res.x[0] - 3.0).abs() < 0.5, "PSO 1D: x={}", res.x[0]);
726    }
727    #[test]
728    fn test_cg_minimize_rosenbrock() {
729        let x =
730            conjugate_gradient_minimize(rosenbrock, rosenbrock_grad, vec![-1.0, 1.0], 5000, 1e-6);
731        assert!((x[0] - 1.0).abs() < 0.05, "CG rosenbrock x[0]={}", x[0]);
732        assert!((x[1] - 1.0).abs() < 0.05, "CG rosenbrock x[1]={}", x[1]);
733    }
734    #[test]
735    fn test_lbfgs_converges_3d() {
736        let f = |x: &[f64]| x.iter().map(|xi| xi * xi).sum::<f64>();
737        let g = |x: &[f64]| x.iter().map(|xi| 2.0 * xi).collect::<Vec<_>>();
738        let res = lbfgs(f, g, vec![3.0, 3.0, 3.0], 5, 1e-10, 200);
739        assert!(res.converged);
740        for xi in &res.x {
741            assert!(xi.abs() < 1e-5, "x should be near 0, got {xi}");
742        }
743    }
744    #[test]
745    fn test_trust_region_at_minimum_stable() {
746        let mut tr = TrustRegion::new(1.0, 0.1);
747        let x0 = vec![2.0];
748        let x1 = tr.step(quad, quad_grad, &x0);
749        assert!(
750            (x1[0] - 2.0).abs() < 1e-6,
751            "TR at minimum should stay: x={}",
752            x1[0]
753        );
754    }
755    #[test]
756    fn test_golden_section_narrow_bracket() {
757        let (x, _) = golden_section(|x| x * x, -0.1, 0.1, 1e-12);
758        assert!(x.abs() < 1e-8, "golden section near 0: x={x}");
759    }
760    #[test]
761    fn test_golden_section_sine_minimum() {
762        use std::f64::consts::PI;
763        let (x, fx) = golden_section(|x| x.sin(), -PI, 0.0, 1e-8);
764        assert!((x - (-PI / 2.0)).abs() < 1e-4, "golden section sin: x={x}");
765        assert!((fx - (-1.0)).abs() < 1e-6, "golden section sin f(x)={fx}");
766    }
767    #[test]
768    fn test_bisection_root_at_boundary() {
769        let root = bisection(|x| x - 1.0, 0.0, 2.0, 1e-10).expect("root at x=1");
770        assert!((root - 1.0).abs() < 1e-8, "root={root}");
771    }
772    #[test]
773    fn test_numerical_gradient_rosenbrock() {
774        let x = vec![1.0, 1.0];
775        let g = numerical_gradient(rosenbrock, &x, 1e-5);
776        let g_exact = rosenbrock_grad(&x);
777        for i in 0..2 {
778            assert!(
779                (g[i] - g_exact[i]).abs() < 1e-4,
780                "numerical gradient at {i}: {} vs {}",
781                g[i],
782                g_exact[i]
783            );
784        }
785    }
786    #[test]
787    fn test_optimizer_de_finds_quadratic_minimum() {
788        let opt = Optimizer::new();
789        let bounds = vec![(-5.0_f64, 5.0_f64)];
790        let result =
791            opt.differential_evolution(|x| (x[0] - 3.0).powi(2), &bounds, 20, 0.8, 0.9, 200);
792        assert!(
793            (result.x[0] - 3.0).abs() < 0.2,
794            "DE should find min near x=3, got {}",
795            result.x[0]
796        );
797    }
798    #[test]
799    fn test_optimizer_de_2d_bowl() {
800        let opt = Optimizer::new();
801        let bounds = vec![(-5.0_f64, 5.0_f64), (-5.0_f64, 5.0_f64)];
802        let result =
803            opt.differential_evolution(|x| x[0] * x[0] + x[1] * x[1], &bounds, 30, 0.8, 0.9, 300);
804        assert!(result.f_val < 0.5, "DE 2D bowl: f_val={}", result.f_val);
805    }
806    #[test]
807    fn test_optimizer_de_returns_opt_result() {
808        let opt = Optimizer::new();
809        let bounds = vec![(0.0_f64, 1.0_f64)];
810        let result = opt.differential_evolution(|x| x[0], &bounds, 10, 0.5, 0.7, 50);
811        assert!(
812            result.f_val.is_finite(),
813            "DE result f_val should be finite: {}",
814            result.f_val
815        );
816        assert_eq!(result.n_iter, 50);
817    }
818    #[test]
819    fn test_optimizer_sa_finds_minimum() {
820        let opt = Optimizer::new();
821        let result = opt.simulated_annealing(
822            |x| (x[0] - 2.0).powi(2),
823            vec![0.0_f64],
824            10.0,
825            0.99,
826            0.5,
827            5000,
828        );
829        assert!(
830            (result.x[0] - 2.0).abs() < 0.5,
831            "SA should find min near x=2, got {}",
832            result.x[0]
833        );
834    }
835    #[test]
836    fn test_optimizer_sa_2d_sphere() {
837        let opt = Optimizer::new();
838        let result = opt.simulated_annealing(
839            |x| x[0] * x[0] + x[1] * x[1],
840            vec![5.0_f64, 5.0_f64],
841            20.0,
842            0.995,
843            0.3,
844            5000,
845        );
846        assert!(result.f_val < 5.0, "SA 2D sphere: f_val={}", result.f_val);
847    }
848    #[test]
849    fn test_cmaes_step_reduces_mean_distance_to_optimum() {
850        let mut cmaes = CmaEs::new(vec![5.0_f64], 1.0);
851        let f = |x: &[f64]| (x[0] - 0.0).powi(2);
852        for _ in 0..30 {
853            cmaes.step(&f);
854        }
855        assert!(
856            cmaes.mean[0].abs() < 3.0,
857            "CMA-ES mean should converge toward 0, got {}",
858            cmaes.mean[0]
859        );
860    }
861    #[test]
862    fn test_cmaes_generation_counter_increments() {
863        let mut cmaes = CmaEs::new(vec![1.0_f64, 1.0_f64], 0.5);
864        let f = |x: &[f64]| x[0] * x[0] + x[1] * x[1];
865        assert_eq!(cmaes.generation, 0);
866        cmaes.step(&f);
867        assert_eq!(cmaes.generation, 1);
868        cmaes.step(&f);
869        assert_eq!(cmaes.generation, 2);
870    }
871    #[test]
872    fn test_cmaes_sigma_stays_positive() {
873        let mut cmaes = CmaEs::new(vec![3.0_f64, -3.0_f64], 1.0);
874        let f = |x: &[f64]| x.iter().map(|v| v * v).sum::<f64>();
875        for _ in 0..10 {
876            cmaes.step(&f);
877            assert!(
878                cmaes.sigma > 0.0,
879                "sigma must remain positive: {}",
880                cmaes.sigma
881            );
882        }
883    }
884    #[test]
885    fn test_optimizer_cmaes_step_via_facade() {
886        let opt = Optimizer::new();
887        let mut cmaes = CmaEs::new(vec![4.0_f64], 2.0);
888        let f = |x: &[f64]| (x[0] - 1.0).powi(2);
889        let best_f = opt.cmaes_step(&mut cmaes, &f);
890        assert!(best_f.is_finite(), "cmaes_step should return finite f_val");
891    }
892    #[test]
893    fn test_cmaes_covariance_remains_symmetric() {
894        let mut cmaes = CmaEs::new(vec![2.0_f64, -1.0_f64], 0.5);
895        let f = |x: &[f64]| x[0] * x[0] + 2.0 * x[1] * x[1];
896        let n = 2;
897        for _ in 0..5 {
898            cmaes.step(&f);
899        }
900        for i in 0..n {
901            for j in 0..n {
902                let diff = (cmaes.cov[i * n + j] - cmaes.cov[j * n + i]).abs();
903                assert!(diff < 1e-10, "cov[{i},{j}] != cov[{j},{i}]: diff={diff}");
904            }
905        }
906    }
907}