rgsl/types/multiroot.rs
1//
2// A rust binding for the GSL library by Guillaume Gomez (guillaume1.gomez@gmail.com)
3//
4
5/*!
6# Multi dimensional Root-Finding
7
8This chapter describes functions for multidimensional root-finding (solving nonlinear systems with
9n equations in n unknowns). The library provides low level components for a variety of iterative
10solvers and convergence tests. These can be combined by the user to achieve the desired solution,
11with full access to the intermediate steps of the iteration. Each class of methods uses the same
12framework, so that you can switch between solvers at runtime without needing to recompile your
13program. Each instance of a solver keeps track of its own state, allowing the solvers to be used
14in multi-threaded programs. The solvers are based on the original Fortran library MINPACK.
15The header file `gsl_multiroots.h` contains prototypes for the multidimensional root finding functions
16and related declarations.
17
18## Overview
19The problem of multidimensional root finding requires the simultaneous solution of n equations,
20f_i, in n variables, x_i,
21
22```text
23f_i (x_1, \dots, x_n) = 0 \qquad\hbox{for}~i = 1 \dots n.
24```
25
26In general there are no bracketing methods available for n dimensional systems, and no way of
27knowing whether any solutions exist. All algorithms proceed from an initial guess using a
28variant of the Newton iteration,
29
30```text
31x \to x' = x - J^{-1} f(x)
32```
33where x, f are vector quantities and J is the Jacobian matrix J_{ij} = \partial f_i / \partial x_j.
34Additional strategies can be used to enlarge the region of convergence. These include requiring a
35decrease in the norm |f| on each step proposed by Newton’s method, or taking steepest-descent
36steps in the direction of the negative gradient of |f|.
37
38Several root-finding algorithms are available within a single framework. The user provides a
39high-level driver for the algorithms, and the library provides the individual functions necessary
40for each of the steps. There are three main phases of the iteration. The steps are,
41
42- initialize solver state, `s`, for algorithm `T`
43- update `s` using the iteration `T`
44- test `s` for convergence, and repeat iteration if necessary
45
46The evaluation of the Jacobian matrix can be problematic, either because programming the derivatives
47is intractable or because computation of the n^2 terms of the matrix becomes too expensive.
48For these reasons the algorithms provided by the library are divided into two classes according to
49whether the derivatives are available or not.
50
51The state for solvers with an analytic Jacobian matrix is held in a `gsl_multiroot_fdfsolver` struct.
52The updating procedure requires both the function and its derivatives to be supplied by the user.
53
54The state for solvers which do not use an analytic Jacobian matrix is held in a
55`gsl_multiroot_fsolver` struct. The updating procedure uses only function evaluations (not derivatives).
56The algorithms estimate the matrix J or J^{-1} by approximate methods.
57!*/
58
59use crate::{Value, VectorF64, View};
60use ffi::FFI;
61use sys;
62use sys::libc::{c_int, c_void};
63
64ffi_wrapper!(
65 MultiRootFSolverType,
66 *const sys::gsl_multiroot_fsolver_type,
67 "The multiroot algorithms described in this section do not require any derivative information to be
68 supplied by the user. Any derivatives needed are approximated by finite differences.
69 Note that if the finite-differencing step size chosen by these routines is inappropriate,
70 an explicit user-supplied numerical derivative can always be used with
71 derivative-based algorithms."
72);
73
74impl MultiRootFSolverType {
75 ///This is a version of the Hybrid algorithm which replaces calls to the Jacobian function by
76 /// its finite difference approximation. The finite difference approximation is computed
77 /// using `gsl_multiroots_fdjac()` with a relative step size of `GSL_SQRT_DBL_EPSILON`.
78 /// Note that this step size will not be suitable for all problems.
79 #[doc(alias = "gsl_multiroot_fsolver_hybrids")]
80 pub fn hybrids() -> MultiRootFSolverType {
81 ffi_wrap!(gsl_multiroot_fsolver_hybrids)
82 }
83
84 ///This is a finite difference version of the Hybrid algorithm without internal scaling.
85 #[doc(alias = "gsl_multiroot_fsolver_hybrid")]
86 pub fn hybrid() -> MultiRootFSolverType {
87 ffi_wrap!(gsl_multiroot_fsolver_hybrid)
88 }
89
90 /// The discrete Newton algorithm is the simplest method of solving a multidimensional
91 /// system. It uses the Newton iteration
92 ///
93 ///```text
94 ///x \to x - J^{-1} f(x)
95 ///```
96 ///
97 /// where the Jacobian matrix J is approximated by taking finite differences of the function f.
98 /// The approximation scheme used by this implementation is,
99 ///
100 ///```text
101 ///J_{ij} = (f_i(x + \delta_j) - f_i(x)) / \delta_j
102 ///```
103 ///
104 /// where \delta_j is a step of size \sqrt\epsilon |x_j| with \epsilon being the machine
105 /// precision (\epsilon \approx 2.22 \times 10^{-16}). The order of convergence of Newton’s
106 /// algorithm is quadratic, but the finite differences require n^2 function evaluations on
107 /// each iteration. The algorithm may become unstable if the finite differences are not a
108 /// good approximation to the true derivatives.
109 #[doc(alias = "gsl_multiroot_fsolver_dnewton")]
110 pub fn dnewton() -> MultiRootFSolverType {
111 ffi_wrap!(gsl_multiroot_fsolver_dnewton)
112 }
113
114 /// The Broyden algorithm is a version of the discrete Newton algorithm which attempts to
115 /// avoids the expensive update of the Jacobian matrix on each iteration. The changes to
116 /// the Jacobian are also approximated, using a rank-1 update,
117 ///
118 ///```text
119 ///J^{-1} \to J^{-1} - (J^{-1} df - dx) dx^T J^{-1} / dx^T J^{-1} df
120 ///```
121 ///
122 /// where the vectors dx and df are the changes in x and f. On the first iteration the inverse
123 /// Jacobian is estimated using finite differences, as in the discrete Newton algorithm.
124 ///
125 /// This approximation gives a fast update but is unreliable if the changes are not small, and
126 /// the estimate of the inverse Jacobian becomes worse as time passes. The algorithm has a
127 /// tendency to become unstable unless it starts close to the root. The Jacobian is refreshed
128 /// if this instability is detected (consult the source for details).
129 ///
130 /// This algorithm is included only for demonstration purposes, and is not recommended for
131 /// serious use.
132 #[doc(alias = "gsl_multiroot_fsolver_broyden")]
133 pub fn broyden() -> MultiRootFSolverType {
134 ffi_wrap!(gsl_multiroot_fsolver_broyden)
135 }
136}
137
138ffi_wrapper!(
139 MultiRootFSolver<'a>,
140 *mut sys::gsl_multiroot_fsolver,
141 gsl_multiroot_fsolver_free
142 ;inner_call: sys::gsl_multiroot_function_struct => sys::gsl_multiroot_function_struct{ f: None, n: 0, params: std::ptr::null_mut() };
143 ;inner_closure: Option<Box<dyn Fn(&VectorF64, &mut VectorF64) -> Value + 'a>> => None;,
144 "This is a workspace for multidimensional root-finding without derivatives."
145);
146
147impl<'a> MultiRootFSolver<'a> {
148 /// This function returns a pointer to a newly allocated instance of a solver of type `T` with
149 /// `n` unknowns.
150 ///
151 /// If there is insufficient memory to create the solver then the function returns a null
152 /// pointer and the error handler is invoked with an error code of `Value::NoMemory`.
153 #[doc(alias = "gsl_multiroot_fsolver_alloc")]
154 pub fn new(t: &MultiRootFSolverType, n: usize) -> Option<MultiRootFSolver<'a>> {
155 let ptr = unsafe { sys::gsl_multiroot_fsolver_alloc(t.unwrap_shared(), n) };
156
157 if ptr.is_null() {
158 None
159 } else {
160 Some(MultiRootFSolver::wrap(ptr))
161 }
162 }
163
164 /// This function initializes, or reinitializes, an existing solver `s` to use the multi
165 /// function `f` with `n` unknowns.
166 #[doc(alias = "gsl_multiroot_fsolver_set")]
167 pub fn set<F: Fn(&VectorF64, &mut VectorF64) -> Value + 'a>(
168 &mut self,
169 f: F,
170 n: usize,
171 x: &VectorF64,
172 ) -> Result<(), Value> {
173 unsafe extern "C" fn inner_f<A: Fn(&VectorF64, &mut VectorF64) -> Value>(
174 x: *const sys::gsl_vector,
175 params: *mut c_void,
176 f: *mut sys::gsl_vector,
177 ) -> c_int {
178 let g: &A = &*(params as *const A);
179 let x_new = VectorF64::soft_wrap(x as *const _ as *mut _);
180 Value::into(g(&x_new, &mut VectorF64::soft_wrap(f)))
181 }
182
183 self.inner_call = sys::gsl_multiroot_function_struct {
184 f: Some(inner_f::<F>),
185 n,
186 params: &f as *const _ as *mut _,
187 };
188 self.inner_closure = Some(Box::new(f));
189
190 let ret = unsafe {
191 sys::gsl_multiroot_fsolver_set(
192 self.unwrap_unique(),
193 &mut self.inner_call,
194 x.unwrap_shared(),
195 )
196 };
197 result_handler!(ret, ())
198 }
199
200 /// This function performs a single iteration of the minimizer s. If the iteration encounters an
201 /// unexpected problem then an error code will be returned,
202 ///
203 /// `Value::BadFunc`
204 /// the iteration encountered a singular point where the function evaluated to Inf or NaN.
205 ///
206 /// `Value::Failure`
207 /// the algorithm could not improve the current best approximation or bounding interval.
208 ///
209 /// The minimizer maintains a current best estimate of the position of the minimum at all times,
210 /// and the current interval bounding the minimum. This information can be accessed with the
211 /// following auxiliary functions,
212 #[doc(alias = "gsl_multiroot_fsolver_iterate")]
213 pub fn iterate(&mut self) -> Result<(), Value> {
214 let ret = unsafe { sys::gsl_multiroot_fsolver_iterate(self.unwrap_unique()) };
215 result_handler!(ret, ())
216 }
217
218 /// This function returns the current estimate of the root for the solver `s`, given by `s->x`.
219 #[doc(alias = "gsl_multiroot_fsolver_root")]
220 pub fn root(&self) -> View<'_, VectorF64> {
221 unsafe { View::new(sys::gsl_multiroot_fsolver_root(self.unwrap_shared())) }
222 }
223
224 /// This function returns the last step `dx` taken by the solver `s`, given by `s->dx`.
225 #[doc(alias = "gsl_multiroot_fsolver_dx")]
226 pub fn dx(&self) -> View<'_, VectorF64> {
227 unsafe { View::new(sys::gsl_multiroot_fsolver_dx(self.unwrap_shared())) }
228 }
229
230 /// This function returns the function value `f(x)` at the current estimate of the root for
231 /// the solver `s`, given by `s->f`.
232 #[doc(alias = "gsl_multiroot_fsolver_f")]
233 pub fn f(&self) -> View<'_, VectorF64> {
234 unsafe { View::new(sys::gsl_multiroot_fsolver_f(self.unwrap_shared())) }
235 }
236}
237
238#[cfg(any(test, doctest))]
239mod tests {
240 /// This doc block will be used to ensure that the closure can't be set everywhere!
241 ///
242 /// ```compile_fail
243 /// use rgsl::*;
244 /// use rgsl::types::multiroot::{MultiRootFSolver, MultiRootFSolverType};
245 ///
246 /// fn set(root: &mut MultiRootFSolver) {
247 /// let dummy = "lalal".to_owned();
248 /// root.set(|x, y| {
249 /// println!("==> {:?}", dummy);
250 /// y.set(0, 1.0 - x.get(0));
251 /// y.set(1, x.get(0) - x.get(1));
252 /// rgsl::Value::Success}, 2, &rgsl::VectorF64::from_slice(&[-10.0, 1.0]).unwrap());
253 /// }
254 ///
255 /// let mut root = MultiRootFSolver::new(&MultiRootFSolverType::hybrid(), 2).unwrap();
256 /// set(&mut root);
257 /// let status = root.iterate();
258 /// ```
259 ///
260 /// Same but a working version:
261 ///
262 /// ```
263 /// use rgsl::types::multiroot::{MultiRootFSolver, MultiRootFSolverType};
264 /// use rgsl::*;
265 ///
266 /// fn set(root: &mut MultiRootFSolver) {
267 /// root.set(|x, y| {
268 /// y.set(0, 1.0 - x.get(0));
269 /// y.set(1, x.get(0) - x.get(1));
270 /// rgsl::Value::Success}, 2, &rgsl::VectorF64::from_slice(&[-10.0, 1.0]).unwrap());
271 /// }
272 ///
273 /// let mut root = MultiRootFSolver::new(&MultiRootFSolverType::hybrid(), 2).unwrap();
274 /// set(&mut root);
275 /// let status = root.iterate();
276 /// ```
277 ///
278 use super::*;
279 use multiroot::test_residual;
280 use VectorF64;
281
282 /// checking a test function
283 /// must return a success criteria (or failure)
284 fn rosenbrock_f(x: &VectorF64, f: &mut VectorF64) -> Value {
285 f.set(0, 1.0 - x.get(0));
286 f.set(1, x.get(0) - x.get(1).powf(2.0));
287 Value::Success
288 }
289
290 fn print_state(solver: &mut MultiRootFSolver, iteration: usize) {
291 let f = solver.f();
292 let x = solver.root();
293 println!(
294 "iter: {}, f = [{:+.2e}, {:+.2e}], x = [{:+.5}, {:+.5}]",
295 iteration,
296 f.get(0),
297 f.get(1),
298 x.get(0),
299 x.get(1)
300 )
301 }
302
303 #[test]
304 fn test_multiroot_fsolver() {
305 // setup workspace
306 let mut multi_root = MultiRootFSolver::new(&MultiRootFSolverType::hybrid(), 2).unwrap();
307 let array_size: usize = 2;
308 let guess_value = VectorF64::from_slice(&[-10.0, -5.0]).unwrap();
309 multi_root
310 .set(rosenbrock_f, array_size, &guess_value)
311 .unwrap();
312
313 // iteration counters
314 let max_iter: usize = 100;
315 let mut iter = 0;
316
317 // convergence checks
318 let mut status = crate::Value::Continue;
319 let epsabs = 1e-6;
320
321 print_state(&mut multi_root, 0);
322
323 while matches!(status, crate::Value::Continue) && iter < max_iter {
324 // iterate solver
325 multi_root.iterate().unwrap();
326
327 // print current iteration
328 print_state(&mut multi_root, iter);
329
330 // test for convergence
331 let f_value = multi_root.f();
332 status = test_residual(&f_value, epsabs);
333
334 // check if iteration succeeded
335 if matches!(status, crate::Value::Success) {
336 println!("Converged");
337 }
338
339 iter += 1;
340 }
341 assert!(matches!(status, crate::Value::Success))
342 }
343}