convex_math/optimization/
mod.rs

1//! Optimization algorithms.
2//!
3//! This module provides optimization routines for curve fitting
4//! and other financial calculations.
5
6use crate::error::MathResult;
7
8/// Configuration for optimization algorithms.
9#[derive(Debug, Clone, Copy)]
10pub struct OptimizationConfig {
11    /// Tolerance for convergence.
12    pub tolerance: f64,
13    /// Maximum number of iterations.
14    pub max_iterations: u32,
15    /// Step size for numerical gradients.
16    pub step_size: f64,
17}
18
19impl Default for OptimizationConfig {
20    fn default() -> Self {
21        Self {
22            tolerance: 1e-10,
23            max_iterations: 100,
24            step_size: 1e-8,
25        }
26    }
27}
28
29/// Result of an optimization run.
30#[derive(Debug, Clone)]
31pub struct OptimizationResult {
32    /// Optimal parameters found.
33    pub parameters: Vec<f64>,
34    /// Final objective function value.
35    pub objective_value: f64,
36    /// Number of iterations used.
37    pub iterations: u32,
38    /// Whether the optimization converged.
39    pub converged: bool,
40}
41
42/// Simple gradient descent optimizer.
43///
44/// Minimizes a function using steepest descent with numerical gradients.
45pub fn gradient_descent<F>(
46    f: F,
47    initial: &[f64],
48    config: &OptimizationConfig,
49) -> MathResult<OptimizationResult>
50where
51    F: Fn(&[f64]) -> f64,
52{
53    let mut params = initial.to_vec();
54    let mut best_value = f(&params);
55    let n = params.len();
56
57    for iteration in 0..config.max_iterations {
58        // Compute numerical gradient
59        let mut gradient = vec![0.0; n];
60        for i in 0..n {
61            let mut params_plus = params.clone();
62            let mut params_minus = params.clone();
63            params_plus[i] += config.step_size;
64            params_minus[i] -= config.step_size;
65
66            gradient[i] = (f(&params_plus) - f(&params_minus)) / (2.0 * config.step_size);
67        }
68
69        // Compute gradient magnitude
70        let grad_mag: f64 = gradient.iter().map(|g| g * g).sum::<f64>().sqrt();
71
72        if grad_mag < config.tolerance {
73            return Ok(OptimizationResult {
74                parameters: params,
75                objective_value: best_value,
76                iterations: iteration,
77                converged: true,
78            });
79        }
80
81        // Line search with backtracking
82        let mut step = 1.0;
83        let c = 0.5; // Armijo parameter
84
85        loop {
86            let mut new_params = params.clone();
87            for i in 0..n {
88                new_params[i] -= step * gradient[i];
89            }
90
91            let new_value = f(&new_params);
92            if new_value < best_value - c * step * grad_mag * grad_mag {
93                params = new_params;
94                best_value = new_value;
95                break;
96            }
97
98            step *= 0.5;
99            if step < 1e-15 {
100                // Can't make progress
101                return Ok(OptimizationResult {
102                    parameters: params,
103                    objective_value: best_value,
104                    iterations: iteration,
105                    converged: false,
106                });
107            }
108        }
109    }
110
111    Ok(OptimizationResult {
112        parameters: params,
113        objective_value: best_value,
114        iterations: config.max_iterations,
115        converged: false,
116    })
117}
118
119#[cfg(test)]
120mod tests {
121    use super::*;
122    use approx::assert_relative_eq;
123
124    #[test]
125    fn test_gradient_descent_quadratic() {
126        // Minimize (x-2)^2 + (y-3)^2
127        let f = |params: &[f64]| {
128            let x = params[0];
129            let y = params[1];
130            (x - 2.0).powi(2) + (y - 3.0).powi(2)
131        };
132
133        let initial = vec![0.0, 0.0];
134        let result = gradient_descent(f, &initial, &OptimizationConfig::default()).unwrap();
135
136        assert!(result.converged);
137        assert_relative_eq!(result.parameters[0], 2.0, epsilon = 1e-5);
138        assert_relative_eq!(result.parameters[1], 3.0, epsilon = 1e-5);
139    }
140}