optimization_solvers/quasi_newton/
broyden.rs

1use super::*;
2
3#[derive(derive_getters::Getters)]
4pub struct Broyden {
5    approx_inv_hessian: DMatrix<Floating>,
6    x: DVector<Floating>,
7    k: usize,
8    tol: Floating,
9    s_norm: Option<Floating>,
10    y_norm: Option<Floating>,
11    identity: DMatrix<Floating>,
12}
13
14impl Broyden {
15    pub fn next_iterate_too_close(&self) -> bool {
16        match self.s_norm() {
17            Some(s) => s < &self.tol,
18            None => false,
19        }
20    }
21    pub fn gradient_next_iterate_too_close(&self) -> bool {
22        match self.y_norm() {
23            Some(y) => y < &self.tol,
24            None => false,
25        }
26    }
27    pub fn new(tol: Floating, x0: DVector<Floating>) -> Self {
28        let n = x0.len();
29        let identity = DMatrix::identity(n, n);
30        Broyden {
31            approx_inv_hessian: identity.clone(),
32            x: x0,
33            k: 0,
34            tol,
35            s_norm: None,
36            y_norm: None,
37            identity,
38        }
39    }
40}
41
42impl ComputeDirection for Broyden {
43    fn compute_direction(
44        &mut self,
45        eval: &FuncEvalMultivariate,
46    ) -> Result<DVector<Floating>, SolverError> {
47        Ok(-&self.approx_inv_hessian * eval.g())
48    }
49}
50
51impl LineSearchSolver for Broyden {
52    fn k(&self) -> &usize {
53        &self.k
54    }
55    fn xk(&self) -> &DVector<Floating> {
56        &self.x
57    }
58    fn xk_mut(&mut self) -> &mut DVector<Floating> {
59        &mut self.x
60    }
61    fn k_mut(&mut self) -> &mut usize {
62        &mut self.k
63    }
64    fn has_converged(&self, eval: &FuncEvalMultivariate) -> bool {
65        // either the gradient is small or the difference between the iterates is small
66        // eval.g().norm() < self.tol || self.next_iterate_too_close()
67        if self.next_iterate_too_close() {
68            warn!(target: "Broyden","Minimization completed: next iterate too close");
69            true
70        } else if self.gradient_next_iterate_too_close() {
71            warn!(target: "Broyden","Minimization completed: gradient next iterate too close");
72            true
73        } else {
74            eval.g().norm() < self.tol
75        }
76    }
77
78    fn update_next_iterate<LS: LineSearch>(
79        &mut self,
80        line_search: &mut LS,
81        eval_x_k: &FuncEvalMultivariate,
82        oracle: &mut impl FnMut(&DVector<Floating>) -> FuncEvalMultivariate,
83        direction: &DVector<Floating>,
84        max_iter_line_search: usize,
85    ) -> Result<(), SolverError> {
86        let step = line_search.compute_step_len(
87            self.xk(),
88            eval_x_k,
89            direction,
90            oracle,
91            max_iter_line_search,
92        );
93
94        let next_iterate = self.xk() + step * direction;
95
96        let s = &next_iterate - &self.x;
97        self.s_norm = Some(s.norm());
98        let y = oracle(&next_iterate).g() - eval_x_k.g();
99        self.y_norm = Some(y.norm());
100
101        //updating iterate here, and then we will update the inverse hessian (if corrections are not too small)
102        *self.xk_mut() = next_iterate;
103
104        // We update the inverse hessian and the corrections in this hook which is triggered just after the calculation of the next iterate
105
106        if self.next_iterate_too_close() {
107            return Ok(());
108        }
109
110        if self.gradient_next_iterate_too_close() {
111            return Ok(());
112        }
113
114        // Broyden update
115        let hy = &self.approx_inv_hessian * &y;
116        let numerator = ((&s - hy) * s.transpose()) * &self.approx_inv_hessian;
117        let denominator = s.dot(&y);
118        self.approx_inv_hessian += numerator / denominator;
119
120        Ok(())
121    }
122}
123#[cfg(test)]
124mod test_broyden {
125    use super::*;
126    #[test]
127    fn test_outer() {
128        let a = DVector::from_vec(vec![1.0, 2.0]);
129        let b = DVector::from_vec(vec![3.0, 4.0]);
130        let c = a * b.transpose();
131        println!("{:?}", c);
132    }
133
134    #[test]
135    pub fn broyden_morethuente() {
136        std::env::set_var("RUST_LOG", "info");
137
138        let _ = Tracer::default()
139            .with_stdout_layer(Some(LogFormat::Normal))
140            .build();
141        let gamma = 1.;
142        let f_and_g = |x: &DVector<Floating>| -> FuncEvalMultivariate {
143            let f = 0.5 * ((x[0] + 1.).powi(2) + gamma * (x[1] - 1.).powi(2));
144            let g = DVector::from(vec![x[0] + 1., gamma * (x[1] - 1.)]);
145            (f, g).into()
146        };
147
148        // Linesearch builder
149
150        let mut ls = MoreThuente::default();
151
152        // pnorm descent builder
153        let tol = 1e-12;
154        let x_0 = DVector::from(vec![180.0, 152.0]);
155        let mut gd = Broyden::new(tol, x_0);
156
157        // Minimization
158        let max_iter_solver = 1000;
159        let max_iter_line_search = 100000;
160
161        gd.minimize(
162            &mut ls,
163            f_and_g,
164            max_iter_solver,
165            max_iter_line_search,
166            None,
167        )
168        .unwrap();
169
170        println!("Iterate: {:?}", gd.xk());
171
172        let eval = f_and_g(gd.xk());
173        println!("Function eval: {:?}", eval);
174        println!("Gradient norm: {:?}", eval.g().norm());
175        println!("tol: {:?}", tol);
176
177        let convergence = gd.has_converged(&eval);
178        println!("Convergence: {:?}", convergence);
179
180        assert!((eval.f() - 0.0).abs() < 1e-6);
181    }
182
183    #[test]
184    pub fn broyden_backtracking() {
185        std::env::set_var("RUST_LOG", "info");
186
187        let _ = Tracer::default()
188            .with_stdout_layer(Some(LogFormat::Normal))
189            .build();
190        let gamma = 1.;
191        let f_and_g = |x: &DVector<Floating>| -> FuncEvalMultivariate {
192            let f = 0.5 * ((x[0] + 1.).powi(2) + gamma * (x[1] - 1.).powi(2));
193            let g = DVector::from(vec![x[0] + 1., gamma * (x[1] - 1.)]);
194            (f, g).into()
195        };
196
197        // Linesearch builder
198        let alpha = 1e-4;
199        let beta = 0.5; //0.5 is like backtracking line search
200                        // let ls = BackTracking::new(alpha, beta);
201        let mut ls = BackTracking::new(alpha, beta);
202
203        // pnorm descent builder
204        let tol = 1e-12;
205        let x_0 = DVector::from(vec![180.0, 152.0]);
206        let mut gd = Broyden::new(tol, x_0);
207
208        // Minimization
209        let max_iter_solver = 1000;
210        let max_iter_line_search = 100000;
211
212        gd.minimize(
213            &mut ls,
214            f_and_g,
215            max_iter_solver,
216            max_iter_line_search,
217            None,
218        )
219        .unwrap();
220
221        println!("Iterate: {:?}", gd.xk());
222
223        let eval = f_and_g(gd.xk());
224        println!("Function eval: {:?}", eval);
225        println!("Gradient norm: {:?}", eval.g().norm());
226        println!("tol: {:?}", tol);
227
228        let convergence = gd.has_converged(&eval);
229        println!("Convergence: {:?}", convergence);
230
231        assert!((eval.f() - 0.0).abs() < 1e-6);
232    }
233}