optimization_solvers/quasi_newton/
sr1_b.rs1use super::*;
2
3#[derive(derive_getters::Getters)]
4pub struct SR1B {
5 approx_inv_hessian: DMatrix<Floating>,
6 x: DVector<Floating>,
7 k: usize,
8 tol: Floating,
9 s_norm: Option<Floating>,
10 y_norm: Option<Floating>,
11 identity: DMatrix<Floating>,
12 lower_bound: DVector<Floating>,
13 upper_bound: DVector<Floating>,
14}
15
16impl HasBounds for SR1B {
17 fn lower_bound(&self) -> &DVector<Floating> {
18 &self.lower_bound
19 }
20 fn set_lower_bound(&mut self, lower_bound: DVector<Floating>) {
21 self.lower_bound = lower_bound;
22 }
23 fn set_upper_bound(&mut self, upper_bound: DVector<Floating>) {
24 self.upper_bound = upper_bound;
25 }
26 fn upper_bound(&self) -> &DVector<Floating> {
27 &self.upper_bound
28 }
29}
30
31impl SR1B {
32 pub fn next_iterate_too_close(&self) -> bool {
33 match self.s_norm() {
34 Some(s) => s < &self.tol,
35 None => false,
36 }
37 }
38 pub fn gradient_next_iterate_too_close(&self) -> bool {
39 match self.y_norm() {
40 Some(y) => y < &self.tol,
41 None => false,
42 }
43 }
44 pub fn new(
45 tol: Floating,
46 x0: DVector<Floating>,
47 lower_bound: DVector<Floating>,
48 upper_bound: DVector<Floating>,
49 ) -> Self {
50 let n = x0.len();
51 let x0 = x0.box_projection(&lower_bound, &upper_bound);
52 let identity = DMatrix::identity(n, n);
53 SR1B {
54 approx_inv_hessian: identity.clone(),
55 x: x0,
56 k: 0,
57 tol,
58 s_norm: None,
59 y_norm: None,
60 identity,
61 lower_bound,
62 upper_bound,
63 }
64 }
65}
66
67impl ComputeDirection for SR1B {
68 fn compute_direction(
69 &mut self,
70 eval: &FuncEvalMultivariate,
71 ) -> Result<DVector<Floating>, SolverError> {
72 let direction = &self.x - &self.approx_inv_hessian * eval.g();
74 let direction = direction.box_projection(&self.lower_bound, &self.upper_bound);
75 let direction = direction - &self.x;
76 Ok(direction)
77 }
78}
79
80impl LineSearchSolver for SR1B {
81 fn k(&self) -> &usize {
82 &self.k
83 }
84 fn xk(&self) -> &DVector<Floating> {
85 &self.x
86 }
87 fn xk_mut(&mut self) -> &mut DVector<Floating> {
88 &mut self.x
89 }
90 fn k_mut(&mut self) -> &mut usize {
91 &mut self.k
92 }
93 fn has_converged(&self, eval: &FuncEvalMultivariate) -> bool {
94 if self.next_iterate_too_close() {
97 warn!(target: "SR1B","Minimization completed: next iterate too close");
98 true
99 } else if self.gradient_next_iterate_too_close() {
100 warn!(target: "SR1B","Minimization completed: gradient next iterate too close");
101 true
102 } else {
103 eval.g().norm() < self.tol
104 }
105 }
106
107 fn update_next_iterate<LS: LineSearch>(
108 &mut self,
109 line_search: &mut LS,
110 eval_x_k: &FuncEvalMultivariate,
111 oracle: &mut impl FnMut(&DVector<Floating>) -> FuncEvalMultivariate,
112 direction: &DVector<Floating>,
113 max_iter_line_search: usize,
114 ) -> Result<(), SolverError> {
115 let step = line_search.compute_step_len(
116 self.xk(),
117 eval_x_k,
118 direction,
119 oracle,
120 max_iter_line_search,
121 );
122
123 let next_iterate = self.xk() + step * direction;
124
125 let s = &next_iterate - &self.x;
126 self.s_norm = Some(s.norm());
127 let y = oracle(&next_iterate).g() - eval_x_k.g();
128 self.y_norm = Some(y.norm());
129
130 *self.xk_mut() = next_iterate;
132
133 if self.next_iterate_too_close() {
136 return Ok(());
137 }
138
139 if self.gradient_next_iterate_too_close() {
140 return Ok(());
141 }
142
143 let hy = &self.approx_inv_hessian * &y;
145 let shy = s - hy;
146 self.approx_inv_hessian += ­ * shy.transpose() / shy.dot(&y);
147 Ok(())
148 }
149}
150#[cfg(test)]
151mod test_sr1_b {
152 use super::*;
153 #[test]
154 fn test_outer() {
155 let a = DVector::from_vec(vec![1.0, 2.0]);
156 let b = DVector::from_vec(vec![3.0, 4.0]);
157 let c = a * b.transpose();
158 println!("{:?}", c);
159 }
160
161 #[test]
162 pub fn sr1_b_backtracking() {
163 std::env::set_var("RUST_LOG", "info");
164
165 let _ = Tracer::default()
166 .with_stdout_layer(Some(LogFormat::Normal))
167 .build();
168 let gamma = 1.;
169 let f_and_g = |x: &DVector<Floating>| -> FuncEvalMultivariate {
170 let f = 0.5 * ((x[0] + 1.).powi(2) + gamma * (x[1] - 1.).powi(2));
171 let g = DVector::from(vec![x[0] + 1., gamma * (x[1] - 1.)]);
172 (f, g).into()
173 };
174
175 let lower_bounds = DVector::from_vec(vec![-f64::INFINITY, -f64::INFINITY]);
177 let upper_oounds = DVector::from_vec(vec![f64::INFINITY, f64::INFINITY]);
178 let alpha = 1e-4;
180 let beta = 0.5;
181 let mut ls = BackTrackingB::new(alpha, beta, lower_bounds.clone(), upper_oounds.clone());
182
183 let tol = 1e-12;
185 let x_0 = DVector::from(vec![180.0, 152.0]);
186 let mut gd = SR1B::new(tol, x_0, lower_bounds, upper_oounds);
187
188 let max_iter_solver = 1000;
190 let max_iter_line_search = 100000;
191
192 gd.minimize(
193 &mut ls,
194 f_and_g,
195 max_iter_solver,
196 max_iter_line_search,
197 None,
198 )
199 .unwrap();
200
201 println!("Iterate: {:?}", gd.xk());
202
203 let eval = f_and_g(gd.xk());
204 println!("Function eval: {:?}", eval);
205 println!("Gradient norm: {:?}", eval.g().norm());
206 println!("tol: {:?}", tol);
207
208 let convergence = gd.has_converged(&eval);
209 println!("Convergence: {:?}", convergence);
210
211 assert!((eval.f() - 0.0).abs() < 1e-6);
212 }
213}