my_bfgs/
lib.rs

1//! This package contains an implementation of
2//! [BFGS](https://en.wikipedia.org/w/index.php?title=BFGS_method), an algorithm for minimizing
3//! convex twice-differentiable functions.
4//!
5//! BFGS is explained at a high level in
6//! [the blog post](https://paulkernfeld.com/2018/08/06/rust-needs-bfgs.html) introducing this
7//! package.
8//!
9//! In this example, we minimize a 2d function:
10//!
11//! ```rust
12//! extern crate my_bfgs as bfgs;
13//! extern crate ndarray;
14//!
15//! use ndarray::{Array, Array1};
16//!
17//! fn main() {
18//!     let x0 = Array::from_vec(vec![8.888, 1.234]);  // Chosen arbitrarily
19//!     let f = |x: &Array1<f64>| x.dot(x);
20//!     let g = |x: &Array1<f64>| 2.0 * x;
21//!     let x_min = bfgs::bfgs(x0, f, g);
22//!     assert_eq!(x_min, Ok(Array::from_vec(vec![0.0, 0.0])));
23//! }
24//! ```
25//!
26//! This project uses [cargo-make](https://sagiegurari.github.io/cargo-make/) for builds; to build,
27//! run `cargo make all`.
28mod benchmark;
29
30use ndarray::{Array1, Array2};
31use std::f64::INFINITY;
32
33const F64_MACHINE_EPSILON: f64 = 2e-53;
34
35// From the L-BFGS paper (Zhu et al. 1994), 1e7 is for "moderate accuracy." 1e12 for "low
36// accuracy," 10 for "high accuracy." If factr is 0, the algorithm will only stop if the value of f
37// stops improving completely.
38const FACTR: f64 = 1e7;
39
40// This is FTOL from Zhu et al.
41const F_TOLERANCE: f64 = FACTR * F64_MACHINE_EPSILON;
42
43// Dumbly try many values of epsilon, taking the best one
44// Return the value of epsilon that minimizes f
45fn line_search<F>(f: F) -> Result<f64, ()>
46where
47    F: Fn(f64) -> f64,
48{
49    let mut best_epsilon = 0.0;
50    let mut best_val_f = INFINITY;
51
52    for i in -20..20 {
53        let epsilon = 2.0_f64.powi(i);
54        let val_f = f(epsilon);
55        if val_f < best_val_f {
56            best_epsilon = epsilon;
57            best_val_f = val_f;
58        }
59    }
60    if best_epsilon == 0.0 {
61        Err(())
62    } else {
63        Ok(best_epsilon)
64    }
65}
66
67fn new_identity_matrix(len: usize) -> Array2<f64> {
68    let mut result = Array2::zeros((len, len));
69    for z in result.diag_mut() {
70        *z = 1.0;
71    }
72    result
73}
74
75// If the improvement in f is not too much bigger than the rounding error, then call it a
76// success. This is the first stopping criterion from Zhu et al.
77fn stop(f_x_old: f64, f_x: f64) -> bool {
78    let negative_delta_f = f_x_old - f_x;
79    let denom = f_x_old.abs().max(f_x.abs()).max(1.0);
80    negative_delta_f / denom <= F_TOLERANCE
81}
82
83/// Returns a value of `x` that should minimize `f`. `f` must be convex and twice-differentiable.
84///
85/// - `x0` is an initial guess for `x`. Often this is chosen randomly.
86/// - `f` is the objective function
87/// - `g` is the gradient of `f`
88#[allow(clippy::many_single_char_names)]
89pub fn bfgs<F, G>(x0: Array1<f64>, f: F, g: G) -> Result<Array1<f64>, ()>
90where
91    F: Fn(&Array1<f64>) -> f64,
92    G: Fn(&Array1<f64>) -> Array1<f64>,
93{
94    let mut x = x0;
95    let mut f_x = f(&x);
96    let mut g_x = g(&x);
97    let p = x.len();
98    assert_eq!(g_x.dim(), x.dim());
99
100    // Initialize the inverse approximate Hessian to the identity matrix
101    let mut b_inv = new_identity_matrix(x.len());
102
103    loop {
104        // Find the search direction
105        let search_dir = -1.0 * b_inv.dot(&g_x);
106
107        // Find a good step size
108        let epsilon = line_search(|epsilon| f(&(&search_dir * epsilon + &x))).map_err(|_| ())?;
109
110        // Save the old values
111        let f_x_old = f_x;
112        let g_x_old = g_x;
113
114        // Take a step in the search direction
115        x.scaled_add(epsilon, &search_dir);
116        f_x = f(&x);
117        g_x = g(&x);
118
119        // Compute deltas between old and new
120        let y: Array2<f64> = (&g_x - &g_x_old)
121            .into_shape((p, 1))
122            .expect("y into_shape failed");
123        let s: Array2<f64> = (epsilon * search_dir)
124            .into_shape((p, 1))
125            .expect("s into_shape failed");
126        let sy: f64 = s.t().dot(&y).into_shape(()).expect("sy into_shape failed")[()];
127        let ss: Array2<f64> = s.dot(&s.t());
128
129        if stop(f_x_old, f_x) {
130            return Ok(x);
131        }
132
133        // Update the Hessian approximation
134        let to_add: Array2<f64> = ss * (sy + &y.t().dot(&b_inv.dot(&y))) / sy.powi(2);
135        let to_sub: Array2<f64> = (b_inv.dot(&y).dot(&s.t()) + s.dot(&y.t().dot(&b_inv))) / sy;
136        b_inv = b_inv + to_add - to_sub;
137    }
138}
139
140#[cfg(test)]
141mod tests {
142    use crate::bfgs;
143    use ndarray::{array, Array1};
144    use spectral::assert_that;
145    use spectral::numeric::OrderedAssertions;
146
147    fn l2_distance(xs: &Array1<f64>, ys: &Array1<f64>) -> f64 {
148        xs.iter().zip(ys.iter()).map(|(x, y)| (y - x).powi(2)).sum()
149    }
150
151    #[test]
152    fn test_x_squared_1d() {
153        let x0 = array![2.0];
154        let f = |x: &Array1<f64>| x.iter().map(|xx| xx * xx).sum();
155        let g = |x: &Array1<f64>| 2.0 * x;
156        let x_min = bfgs(x0, f, g);
157        assert_eq!(x_min, Ok(array![0.0]));
158    }
159
160    #[test]
161    fn test_begin_at_minimum() {
162        let x0 = array![0.0];
163        let f = |x: &Array1<f64>| x.iter().map(|xx| xx * xx).sum();
164        let g = |x: &Array1<f64>| 2.0 * x;
165        let x_min = bfgs(x0, f, g);
166        assert_eq!(x_min, Ok(array![0.0]));
167    }
168
169    // An error because this function has a maximum instead of a minimum
170    #[test]
171    fn test_negative_x_squared() {
172        let x0 = array![2.0];
173        let f = |x: &Array1<f64>| x.iter().map(|xx| -xx * xx).sum();
174        let g = |x: &Array1<f64>| -2.0 * x;
175        let x_min = bfgs(x0, f, g);
176        assert_eq!(x_min, Err(()));
177    }
178
179    #[test]
180    fn test_rosenbrock() {
181        let x0 = array![0.0, 0.0];
182        let f = |x: &Array1<f64>| (1.0 - x[0]).powi(2) + 100.0 * (x[1] - x[0].powi(2)).powi(2);
183        let g = |x: &Array1<f64>| {
184            array![
185                -400.0 * (x[1] - x[0].powi(2)) * x[0] - 2.0 * (1.0 - x[0]),
186                200.0 * (x[1] - x[0].powi(2)),
187            ]
188        };
189        let x_min = bfgs(x0, f, g).expect("Rosenbrock test failed");
190        assert_that(&l2_distance(&x_min, &array![1.0, 1.0])).is_less_than(&0.01);
191    }
192}