1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
use crate::func1d::Func1D;
use crate::utils::{matrix_solve, LU_decomp, LU_matrix_solve};
use ndarray::{s, Array1, Array2};

/// Figure of merit that is minimized during the fit procedure
pub fn chi2(y: &Array1<f64>, ymodel: &Array1<f64>, sy: &Array1<f64>) -> f64 {
    ((y - ymodel) / sy).map(|x| x.powi(2)).sum()
}

/// Contains all relevant information after one minimization step
pub struct MinimizationStep {
    parameters: Array1<f64>,
    delta: Array1<f64>,
    ymodel: Array1<f64>,
    chi2: f64,
    redchi2: f64,
    metric: f64,
    metric_gradient: f64,
    metric_parameters: f64,
    JT_W_J: Array2<f64>,
}

/// Container to perform a curve fit for model, given y and & sy
///
/// The Minimizer is used to initialize and perform a curve fit. For now only 1-dim
/// functions and a Levenberg-Marquardt algorithm is implemented for test purposes.
/// Results have only been verified on simple functions by comparison with
/// an LM implementation from MINPACK.
pub struct Minimizer<'a> {
    pub model: &'a Func1D<'a>,
    pub y: &'a Array1<f64>,
    pub sy: &'a Array1<f64>,
    pub vary_parameter: &'a Array1<bool>,
    pub weighting_matrix: Array1<f64>,
    pub minimizer_parameters: Array1<f64>,
    pub minimizer_ymodel: Array1<f64>,
    pub jacobian: Array2<f64>,
    pub parameter_cov_matrix: Array2<f64>,
    pub parameter_errors: Array1<f64>,
    pub lambda: f64,
    pub num_func_evaluation: usize,
    pub max_iterations: usize,
    pub num_varying_params: usize,
    pub num_params: usize,
    pub num_data: usize,
    pub chi2: f64,
    pub dof: usize,
    pub redchi2: f64,
    pub convergence_message: &'a str,
    pub epsilon1: f64,
    pub epsilon2: f64,
    pub epsilon3: f64,
    pub epsilon4: f64,
    pub lambda_UP_fac: f64,
    pub lambda_DOWN_fac: f64,
}

impl<'a> Minimizer<'a> {
    /// Initializes the LM-algorithm. Performs first calculation of model & gradient
    pub fn init<'b>(
        model: &'b Func1D,
        y: &'b Array1<f64>,
        sy: &'b Array1<f64>,
        vary_parameter: &'b Array1<bool>,
        lambda: f64,
    ) -> Minimizer<'b> {
        // at initialization
        let initial_parameters = model.parameters.clone();
        let minimizer_ymodel = model.for_parameters(&initial_parameters);

        // calculate number of parameters that are being varied
        let num_varying_params = vary_parameter
            .iter()
            .fold(0, |sum, val| if *val { sum + 1 } else { sum });
        let num_params = initial_parameters.len();
        let num_data = model.domain.len();
        let chi2 = chi2(&y, &minimizer_ymodel, &sy);
        let dof = num_data - num_varying_params;
        let redchi2 = chi2 / (dof as f64);

        // initialize jacobian
        // J is the parameter gradient of f at the current values
        let j = model.parameter_gradient(&initial_parameters, &vary_parameter, &minimizer_ymodel);

        // W = 1 / sy^2, only diagonal is considered
        let weighting_matrix: Array1<f64> = sy.map(|x| 1.0 / x.powi(2));

        Minimizer {
            model: &model,
            y: &y,
            sy: &sy,
            vary_parameter: &vary_parameter,
            weighting_matrix: weighting_matrix,
            minimizer_parameters: initial_parameters,
            minimizer_ymodel: minimizer_ymodel,
            jacobian: j,
            parameter_cov_matrix: Array2::zeros((num_varying_params, num_varying_params)),
            parameter_errors: Array1::zeros(num_params),
            lambda: lambda,
            num_func_evaluation: 0,
            max_iterations: 10 * num_varying_params,
            num_data: num_data,
            num_varying_params: num_varying_params,
            num_params: num_params,
            chi2: chi2,
            dof: dof,
            redchi2: redchi2,
            convergence_message: "",
            epsilon1: 1e-3,
            epsilon2: 1e-3,
            epsilon3: 1e-1,
            epsilon4: 1e-1,
            lambda_UP_fac: 11.0,
            lambda_DOWN_fac: 9.0,
        }
    }

    /// Performs a Levenberg Marquardt step
    ///
    /// determine change to parameters by solving the equation
    /// [J^T W J + lambda diag(J^T W J)] delta = J^T W (y - f)
    /// for delta
    pub fn lm(&mut self) -> MinimizationStep {
        // J^T is cloned to be multiplied by weighting_matrix later
        let mut jt = self.jacobian.clone().reversed_axes();

        // multiply J^T with W to obtain J^T W
        for i in 0..self.num_data {
            let mut col = jt.column_mut(i);
            col *= self.weighting_matrix[i];
        }

        // calculate J^T W (y - f) (rhs of LM step)
        let b = jt.dot(&(self.y - &self.minimizer_ymodel));

        // calculate J^T W J + lambda*diag(J^T W J)  [lhs of LM step]
        // first J^T W J
        let JT_W_J = jt.dot(&self.jacobian);

        let lambdaDiagJT_W_J = self.lambda * &JT_W_J.diag();
        let mut A = JT_W_J.clone();
        for i in 0..self.num_varying_params {
            A[[i, i]] += lambdaDiagJT_W_J[i];
        }

        // solve LM step for delta
        let delta: Array1<f64> = matrix_solve(&A, &b);

        // create delta with length of total number of parameters
        let mut delta_all: Array1<f64> = Array1::zeros(self.num_params);
        let mut idx_vary_param = 0;
        for i in 0..self.num_params {
            if self.vary_parameter[i] {
                delta_all[i] = delta[idx_vary_param];
                idx_vary_param += 1;
            }
        }

        // calculate metrics to determine convergence
        let mut metric = delta.dot(&b);

        for i in 0..self.num_varying_params {
            metric += delta[i].powi(2) * lambdaDiagJT_W_J[i];
        }

        // take maximum of the absolute value in the respective arrays as metric for the
        // convergence of either the gradient or the parameters
        let metric_gradient = b
            .map(|x| x.abs())
            .to_vec()
            .iter()
            .cloned()
            .fold(0. / 0., f64::max);

        let metric_parameters = (&delta_all / &self.minimizer_parameters)
            .map(|x| x.abs())
            .to_vec()
            .iter()
            .cloned()
            .fold(0. / 0., f64::max);

        let updated_parameters = &self.minimizer_parameters + &delta_all;

        let updated_model = self.model.for_parameters(&updated_parameters);
        let updated_chi2 = chi2(&self.y, &updated_model, &self.sy);
        let redchi2 = updated_chi2 / (self.dof as f64);

        MinimizationStep {
            parameters: updated_parameters,
            delta: delta,
            ymodel: updated_model,
            chi2: updated_chi2,
            redchi2: redchi2,
            metric: metric,
            metric_gradient: metric_gradient,
            metric_parameters: metric_parameters,
            JT_W_J: JT_W_J,
        }
    }

    /// Fit routine that performs LM steps until one convergence criteria is met
    ///
    /// Follows the description from http://people.duke.edu/~hpgavin/ce281/lm.pdf
    pub fn minimize(&mut self) {
        let mut iterations = 0;
        let inverse_parameter_cov_matrix: Array2<f64>;

        loop {
            let update_step = self.lm();
            iterations += 1;

            // compare chi2 before and after with respect to metric to decide if step is accepted
            let rho = (self.chi2 - update_step.chi2) / update_step.metric;

            if rho > self.epsilon4 {
                //new parameters are better, update lambda
                self.lambda = (self.lambda / self.lambda_DOWN_fac).max(1e-7);

                // update jacobian
                if iterations % 2 * self.num_varying_params == 0 {
                    // at every 2*n steps update jacobian by explicit calculation
                    // requires #params function evaluations
                    self.jacobian = self.model.parameter_gradient(
                        &self.minimizer_parameters,
                        &self.vary_parameter,
                        &self.minimizer_ymodel,
                    );
                    self.num_func_evaluation += self.num_varying_params;
                } else {
                    // otherwise update jacobian with Broyden rank-1 update formula
                    let norm_delta = update_step.delta.dot(&update_step.delta);
                    let diff = &update_step.ymodel
                        - &self.minimizer_ymodel
                        - self.jacobian.dot(&update_step.delta);
                    let mut jacobian_change: Array2<f64> =
                        Array2::zeros((self.num_data, self.num_varying_params));

                    for i in 0..self.num_varying_params {
                        let mut col_slice = jacobian_change.slice_mut(s![.., i]);
                        col_slice.assign(&(&diff * update_step.delta[i] / norm_delta));
                    }

                    self.jacobian = &self.jacobian + &jacobian_change;
                }

                // store new state in Minimizer
                self.minimizer_parameters = update_step.parameters;
                self.minimizer_ymodel = update_step.ymodel;
                self.chi2 = update_step.chi2;
                self.redchi2 = update_step.redchi2;

                // check convergence criteria
                // gradient converged
                if update_step.metric_gradient < self.epsilon1 {
                    self.convergence_message = "Gradient converged";
                    inverse_parameter_cov_matrix = update_step.JT_W_J;
                    break;
                };

                // parameters converged
                if update_step.metric_parameters < self.epsilon2 {
                    self.convergence_message = "Parameters converged";
                    inverse_parameter_cov_matrix = update_step.JT_W_J;
                    break;
                };

                // chi2 converged
                if update_step.redchi2 < self.epsilon3 {
                    self.convergence_message = "Chi2 converged";
                    inverse_parameter_cov_matrix = update_step.JT_W_J;
                    break;
                };
                if iterations >= self.max_iterations {
                    self.convergence_message = "Reached max. number of iterations";
                    inverse_parameter_cov_matrix = update_step.JT_W_J;
                    break;
                }
            } else {
                // new chi2 not good enough, increasing lambda
                self.lambda = (self.lambda * self.lambda_UP_fac).min(1e7);
                // step is rejected, update jacobian by explicit calculation
                self.jacobian = self.model.parameter_gradient(
                    &self.minimizer_parameters,
                    &self.vary_parameter,
                    &self.minimizer_ymodel,
                );
            }
        }

        // calculate parameter covariance matrix using the LU decomposition
        let (L, U, P) = LU_decomp(&inverse_parameter_cov_matrix);
        for i in 0..self.num_varying_params {
            let mut unit_vector = Array1::zeros(self.num_varying_params);
            unit_vector[i] = 1.0;
            let mut col_slice = self.parameter_cov_matrix.slice_mut(s![.., i]);
            col_slice.assign(&LU_matrix_solve(&L, &U, &P, &unit_vector));
        }
        // parameter fit errors are the sqrt of the diagonal

        let mut idx_vary_param = 0;
        let mut all_errors: Array1<f64> = Array1::zeros(self.num_params);
        for i in 0..self.num_params {
            if self.vary_parameter[i] {
                all_errors[i] = (self.parameter_cov_matrix[[idx_vary_param, idx_vary_param]]
                    * self.redchi2)
                    .sqrt();
                idx_vary_param += 1;
            }
        }
        self.parameter_errors = all_errors;
    }

    /// Prints report of a performed fit
    pub fn report(&self) {
        // calculate coefficient of determination
        let R2 = self.calculate_R2();

        println!("\t #Chi2:\t{:.6}", self.chi2);
        println!("\t #Red. Chi2:\t{:.6}", self.redchi2);
        println!("\t #R2:\t{:.6}", R2);
        println!("\t #Func. Evaluations:\t{}", self.num_func_evaluation);
        println!("\t #Converged by:\t{}", self.convergence_message);
        println!("---- Parameters ----");
        for i in 0..self.minimizer_parameters.len() {
            if self.vary_parameter[i] {
                println!(
                    "{:.8} +/- {:.8} ({:.2} %)\t(init: {})",
                    self.minimizer_parameters[i],
                    self.parameter_errors[i],
                    (self.parameter_errors[i] / self.minimizer_parameters[i]).abs() * 100.0,
                    self.model.parameters[i]
                );
            } else {
                println!("{:.8}", self.minimizer_parameters[i]);
            }
        }
    }

    /// Calculate the coefficient of determination

    pub fn calculate_R2(&self) -> f64 {
        let mean_y = self.y.sum() / self.y.len() as f64;
        let mut res_sum_sq = 0.0;
        let mut tot_sum_sq = 0.0;
        for i in 0..self.y.len() {
            res_sum_sq += (self.y[i] - self.minimizer_ymodel[i]).powi(2);
            tot_sum_sq += (self.y[i] - mean_y).powi(2);
        }
        1.0 - res_sum_sq / tot_sum_sq
    }
}