scirs2_optimize/unconstrained/
utils.rs1use crate::error::OptimizeError;
4use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
5
6#[allow(dead_code)]
8pub fn finite_difference_gradient<F, S>(
9 fun: &mut F,
10 x: &ArrayView1<f64>,
11 step: f64,
12) -> Result<Array1<f64>, OptimizeError>
13where
14 F: FnMut(&ArrayView1<f64>) -> S,
15 S: Into<f64>,
16{
17 let n = x.len();
18 let mut grad = Array1::<f64>::zeros(n);
19 let mut x_plus = x.to_owned();
20 let mut x_minus = x.to_owned();
21
22 for i in 0..n {
23 let h = step * (1.0 + x[i].abs());
24 x_plus[i] = x[i] + h;
25 x_minus[i] = x[i] - h;
26
27 let f_plus = fun(&x_plus.view()).into();
28 let f_minus = fun(&x_minus.view()).into();
29
30 if !f_plus.is_finite() || !f_minus.is_finite() {
31 return Err(OptimizeError::ComputationError(
32 "Function returned non-finite value during gradient computation".to_string(),
33 ));
34 }
35
36 grad[i] = (f_plus - f_minus) / (2.0 * h);
37
38 x_plus[i] = x[i];
40 x_minus[i] = x[i];
41 }
42
43 Ok(grad)
44}
45
46#[allow(dead_code)]
48pub fn finite_difference_hessian<F, S>(
49 fun: &mut F,
50 x: &ArrayView1<f64>,
51 step: f64,
52) -> Result<Array2<f64>, OptimizeError>
53where
54 F: FnMut(&ArrayView1<f64>) -> S,
55 S: Into<f64>,
56{
57 let n = x.len();
58 let mut hess = Array2::<f64>::zeros((n, n));
59 let mut x_temp = x.to_owned();
60
61 let f0 = fun(&x.view()).into();
62
63 for i in 0..n {
64 let hi = step * (1.0 + x[i].abs());
65
66 x_temp[i] = x[i] + hi;
68 let fp = fun(&x_temp.view()).into();
69 x_temp[i] = x[i] - hi;
70 let fm = fun(&x_temp.view()).into();
71 x_temp[i] = x[i];
72
73 hess[[i, i]] = (fp - 2.0 * f0 + fm) / (hi * hi);
74
75 for j in (i + 1)..n {
77 let hj = step * (1.0 + x[j].abs());
78
79 x_temp[i] = x[i] + hi;
80 x_temp[j] = x[j] + hj;
81 let fpp = fun(&x_temp.view()).into();
82
83 x_temp[i] = x[i] + hi;
84 x_temp[j] = x[j] - hj;
85 let fpm = fun(&x_temp.view()).into();
86
87 x_temp[i] = x[i] - hi;
88 x_temp[j] = x[j] + hj;
89 let fmp = fun(&x_temp.view()).into();
90
91 x_temp[i] = x[i] - hi;
92 x_temp[j] = x[j] - hj;
93 let fmm = fun(&x_temp.view()).into();
94
95 x_temp[i] = x[i];
96 x_temp[j] = x[j];
97
98 let hess_ij = (fpp - fpm - fmp + fmm) / (4.0 * hi * hj);
99 hess[[i, j]] = hess_ij;
100 hess[[j, i]] = hess_ij;
101 }
102 }
103
104 Ok(hess)
105}
106
107#[allow(dead_code)]
109pub fn compute_gradient_with_jacobian<'a, F, S>(
110 fun: &mut F,
111 x: &ArrayView1<f64>,
112 jacobian: &crate::unconstrained::Jacobian<'a>,
113 eps: f64,
114 nfev: &mut usize,
115) -> Result<Array1<f64>, OptimizeError>
116where
117 F: FnMut(&ArrayView1<f64>) -> S,
118 S: Into<f64>,
119{
120 match jacobian {
121 crate::unconstrained::Jacobian::FiniteDiff => {
122 *nfev += x.len();
123 finite_difference_gradient(fun, x, eps)
124 }
125 crate::unconstrained::Jacobian::Function(grad_fn) => Ok(grad_fn(x)),
126 }
127}
128
129#[allow(dead_code)]
131pub fn check_convergence(
132 f_delta: f64,
133 x_delta: f64,
134 g_norm: f64,
135 ftol: f64,
136 xtol: f64,
137 gtol: f64,
138) -> bool {
139 f_delta.abs() < ftol || x_delta < xtol || g_norm < gtol
140}
141
142#[allow(dead_code)]
144pub fn array_diff_norm(x1: &ArrayView1<f64>, x2: &ArrayView1<f64>) -> f64 {
145 (x1 - x2).mapv(|x| x.powi(2)).sum().sqrt()
146}
147
148#[allow(dead_code)]
150pub fn clip_step(
151 x: &ArrayView1<f64>,
152 direction: &ArrayView1<f64>,
153 alpha: f64,
154 lower: &[Option<f64>],
155 upper: &[Option<f64>],
156) -> f64 {
157 let mut clipped_alpha = alpha;
158
159 for i in 0..x.len() {
160 if direction[i] != 0.0 {
161 if let Some(lb) = lower[i] {
163 if direction[i] < 0.0 {
164 let max_step = (lb - x[i]) / direction[i];
165 if max_step >= 0.0 {
166 clipped_alpha = clipped_alpha.min(max_step);
167 }
168 }
169 }
170
171 if let Some(ub) = upper[i] {
173 if direction[i] > 0.0 {
174 let max_step = (ub - x[i]) / direction[i];
175 if max_step >= 0.0 {
176 clipped_alpha = clipped_alpha.min(max_step);
177 }
178 }
179 }
180 }
181 }
182
183 clipped_alpha.max(0.0)
184}
185
186#[allow(dead_code)]
188pub fn to_array_view<T>(arr: &Array1<T>) -> ArrayView1<T> {
189 arr.view()
190}
191
192#[allow(dead_code)]
194pub fn initial_step_size(_grad_norm: f64, max_step: Option<f64>) -> f64 {
195 let default_step = if _grad_norm > 0.0 {
196 1.0 / _grad_norm
197 } else {
198 1.0
199 };
200
201 if let Some(max_s) = max_step {
202 default_step.min(max_s)
203 } else {
204 default_step
205 }
206}
207
208#[cfg(test)]
209mod tests {
210 use super::*;
211 use approx::assert_abs_diff_eq;
212
213 #[test]
214 fn test_finite_difference_gradient() {
215 let mut quadratic = |x: &ArrayView1<f64>| -> f64 { x[0] * x[0] + 2.0 * x[1] * x[1] };
216
217 let x = Array1::from_vec(vec![1.0, 2.0]);
218 let grad =
219 finite_difference_gradient(&mut quadratic, &x.view(), 1e-8).expect("Operation failed");
220
221 assert_abs_diff_eq!(grad[0], 2.0, epsilon = 1e-6);
222 assert_abs_diff_eq!(grad[1], 8.0, epsilon = 1e-6);
223 }
224}