optimization_solvers/quasi_newton/
dfp.rs

1use super::*;
2
3#[derive(derive_getters::Getters)]
4pub struct DFP {
5    approx_inv_hessian: DMatrix<Floating>,
6    x: DVector<Floating>,
7    k: usize,
8    tol: Floating,
9    s_norm: Option<Floating>,
10    y_norm: Option<Floating>,
11    identity: DMatrix<Floating>,
12}
13
14impl DFP {
15    pub fn next_iterate_too_close(&self) -> bool {
16        match self.s_norm() {
17            Some(s) => s < &self.tol,
18            None => false,
19        }
20    }
21    pub fn gradient_next_iterate_too_close(&self) -> bool {
22        match self.y_norm() {
23            Some(y) => y < &self.tol,
24            None => false,
25        }
26    }
27    pub fn new(tol: Floating, x0: DVector<Floating>) -> Self {
28        let n = x0.len();
29        let identity = DMatrix::identity(n, n);
30        DFP {
31            approx_inv_hessian: identity.clone(),
32            x: x0,
33            k: 0,
34            tol,
35            s_norm: None,
36            y_norm: None,
37            identity,
38        }
39    }
40}
41
42impl ComputeDirection for DFP {
43    fn compute_direction(
44        &mut self,
45        eval: &FuncEvalMultivariate,
46    ) -> Result<DVector<Floating>, SolverError> {
47        Ok(-&self.approx_inv_hessian * eval.g())
48    }
49}
50
51impl LineSearchSolver for DFP {
52    fn k(&self) -> &usize {
53        &self.k
54    }
55    fn xk(&self) -> &DVector<Floating> {
56        &self.x
57    }
58    fn xk_mut(&mut self) -> &mut DVector<Floating> {
59        &mut self.x
60    }
61    fn k_mut(&mut self) -> &mut usize {
62        &mut self.k
63    }
64    fn has_converged(&self, eval: &FuncEvalMultivariate) -> bool {
65        // either the gradient is small or the difference between the iterates is small
66        // eval.g().norm() < self.tol || self.next_iterate_too_close()
67        if self.next_iterate_too_close() {
68            warn!(target: "DFP","Minimization completed: next iterate too close");
69            true
70        } else if self.gradient_next_iterate_too_close() {
71            warn!(target: "DFP","Minimization completed: gradient next iterate too close");
72            true
73        } else {
74            eval.g().norm() < self.tol
75        }
76    }
77
78    fn update_next_iterate<LS: LineSearch>(
79        &mut self,
80        line_search: &mut LS,
81        eval_x_k: &FuncEvalMultivariate,
82        oracle: &mut impl FnMut(&DVector<Floating>) -> FuncEvalMultivariate,
83        direction: &DVector<Floating>,
84        max_iter_line_search: usize,
85    ) -> Result<(), SolverError> {
86        let step = line_search.compute_step_len(
87            self.xk(),
88            eval_x_k,
89            direction,
90            oracle,
91            max_iter_line_search,
92        );
93
94        let next_iterate = self.xk() + step * direction;
95
96        let s = &next_iterate - &self.x;
97        self.s_norm = Some(s.norm());
98        let y = oracle(&next_iterate).g() - eval_x_k.g();
99        self.y_norm = Some(y.norm());
100
101        //updating iterate here, and then we will update the inverse hessian (if corrections are not too small)
102        *self.xk_mut() = next_iterate;
103
104        // We update the inverse hessian and the corrections in this hook which is triggered just after the calculation of the next iterate
105
106        if self.next_iterate_too_close() {
107            return Ok(());
108        }
109
110        if self.gradient_next_iterate_too_close() {
111            return Ok(());
112        }
113
114        // DFP update
115        let ss = &s * s.transpose();
116        let yy = &y * y.transpose();
117        let sy = s.dot(&y);
118        let yhy = y.dot(&(&self.approx_inv_hessian * &y));
119        self.approx_inv_hessian +=
120            ss / sy - (&self.approx_inv_hessian * &yy * &self.approx_inv_hessian) / yhy;
121
122        Ok(())
123    }
124}
125#[cfg(test)]
126mod test_dfp {
127    use super::*;
128    #[test]
129    fn test_outer() {
130        let a = DVector::from_vec(vec![1.0, 2.0]);
131        let b = DVector::from_vec(vec![3.0, 4.0]);
132        let c = a * b.transpose();
133        println!("{:?}", c);
134    }
135
136    #[test]
137    pub fn dfp_morethuente() {
138        std::env::set_var("RUST_LOG", "info");
139
140        let _ = Tracer::default()
141            .with_stdout_layer(Some(LogFormat::Normal))
142            .build();
143        let gamma = 1.;
144        let f_and_g = |x: &DVector<Floating>| -> FuncEvalMultivariate {
145            let f = 0.5 * ((x[0] + 1.).powi(2) + gamma * (x[1] - 1.).powi(2));
146            let g = DVector::from(vec![x[0] + 1., gamma * (x[1] - 1.)]);
147            (f, g).into()
148        };
149
150        // Linesearch builder
151
152        let mut ls = MoreThuente::default();
153
154        // pnorm descent builder
155        let tol = 1e-12;
156        let x_0 = DVector::from(vec![180.0, 152.0]);
157        let mut gd = DFP::new(tol, x_0);
158
159        // Minimization
160        let max_iter_solver = 1000;
161        let max_iter_line_search = 100000;
162
163        gd.minimize(
164            &mut ls,
165            f_and_g,
166            max_iter_solver,
167            max_iter_line_search,
168            None,
169        )
170        .unwrap();
171
172        println!("Iterate: {:?}", gd.xk());
173
174        let eval = f_and_g(gd.xk());
175        println!("Function eval: {:?}", eval);
176        println!("Gradient norm: {:?}", eval.g().norm());
177        println!("tol: {:?}", tol);
178
179        let convergence = gd.has_converged(&eval);
180        println!("Convergence: {:?}", convergence);
181
182        assert!((eval.f() - 0.0).abs() < 1e-6);
183    }
184
185    #[test]
186    pub fn dfp_backtracking() {
187        std::env::set_var("RUST_LOG", "info");
188
189        let _ = Tracer::default()
190            .with_stdout_layer(Some(LogFormat::Normal))
191            .build();
192        let gamma = 1.;
193        let f_and_g = |x: &DVector<Floating>| -> FuncEvalMultivariate {
194            let f = 0.5 * ((x[0] + 1.).powi(2) + gamma * (x[1] - 1.).powi(2));
195            let g = DVector::from(vec![x[0] + 1., gamma * (x[1] - 1.)]);
196            (f, g).into()
197        };
198
199        // Linesearch builder
200        let alpha = 1e-4;
201        let beta = 0.5; //0.5 is like backtracking line search
202                        // let ls = BackTracking::new(alpha, beta);
203        let mut ls = BackTracking::new(alpha, beta);
204
205        // pnorm descent builder
206        let tol = 1e-12;
207        let x_0 = DVector::from(vec![180.0, 152.0]);
208        let mut gd = DFP::new(tol, x_0);
209
210        // Minimization
211        let max_iter_solver = 1000;
212        let max_iter_line_search = 100000;
213
214        gd.minimize(
215            &mut ls,
216            f_and_g,
217            max_iter_solver,
218            max_iter_line_search,
219            None,
220        )
221        .unwrap();
222
223        println!("Iterate: {:?}", gd.xk());
224
225        let eval = f_and_g(gd.xk());
226        println!("Function eval: {:?}", eval);
227        println!("Gradient norm: {:?}", eval.g().norm());
228        println!("tol: {:?}", tol);
229
230        let convergence = gd.has_converged(&eval);
231        println!("Convergence: {:?}", convergence);
232
233        assert!((eval.f() - 0.0).abs() < 1e-6);
234    }
235}