1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
use crate::{
    algebra::{
        abstr::Real,
        linear::{matrix::{Transpose, Solve}, Matrix, Vector},
    },
    optimization::{Optim, OptimResult},
};

#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::clone::Clone;

/// Levenberg-Marquardt method
///
/// input: $` f \colon \mathbb{R}^{n} \to \mathbb{R} `$ with initial
/// approximation $` x_{0} \in \mathbb{R}^{n} `$
///
/// output: $` x_{k} `$
///
/// 1. Initialization: $`0 \leq \rho^{-} < \rho^{+} \leq 1 `$, set $` k := 0 `$
/// 2. Solve the equation
/// 3. $`\rho = \frac{\lvert \lvert f(x_{k}) \rvert \rvert_{2}^{2} - \lvert
/// \lvert f(x_{k+1}) \rvert \rvert_{2}^{2}}{2d_{k}^{T}(f^{'}(x_{k})
/// )^T f(x_{k})}`$
/// 4. if $`\rho_{k} > \rho^{-} `$ than $` x_{k + 1} := x_{k} + d_{k}`$, else
/// $`\Delta_{k + 1} := \Delta_{k}/2`$ and go to 2. 5. if $`\rho_{k} > \rho^{+}
/// `$ than $` \Delta_{k + 1} := 2\Delta_{k}`$, else $`\Delta_{k + 1} :=
/// \Delta_{k}`$ 6. set $` k:= k + 1 `$, go to 2.
///
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, Copy, Debug)]
pub struct LevenbergMarquardt<T>
{
    iters: u64,
    beta_0: T,
    beta_1: T,
}

impl<T> LevenbergMarquardt<T>
{
    pub fn new(iters: u64, rho_minus: T, rho_plus: T) -> LevenbergMarquardt<T>
    {
        LevenbergMarquardt { iters,
                             beta_0: rho_minus,
                             beta_1: rho_plus }
    }
}

impl<T> LevenbergMarquardt<T> where T: Real
{
    /// Minimize function func
    ///
    /// # Arguments
    ///
    /// * 'func0': Function to be minimized
    /// * 'x_0': Initial guess for the minimum
    ///
    /// # Return
    ///
    /// local minimum
    pub fn minimize<F>(self: &Self, func: &F, x_0: &Vector<T>) -> OptimResult<Vector<T>>
        where F: Optim<T>
    {
        let mut x_n: Vector<T> = x_0.clone();
        let mut mu_n: T = T::from_f64(0.5);
        //let mut lambda_n: T = T::from_f64(0.4);
        for _n in 0..self.iters
        {
            let mut d_n: Vector<T>;
            loop
            {
                let jacobian_x_n: Matrix<T> = func.jacobian(&x_n);
                let jacobian_x_n_tran: Matrix<T> = jacobian_x_n.clone().transpose();
                let f_x_n: Vector<T> = func.eval(&x_n);

                let p_n: Vector<T> = -(&jacobian_x_n_tran * &f_x_n);
                let (_j_m, j_n) = jacobian_x_n.dim();
                let left_n: Matrix<T> = &jacobian_x_n_tran * &jacobian_x_n + Matrix::one(j_n) * mu_n * mu_n;
                d_n = left_n.solve(&p_n).unwrap();

                let x_n_1 = &x_n + &d_n;
                let f_x_n_1: Vector<T> = func.eval(&x_n_1);

                let numerator: T = f_x_n.dotp(&f_x_n) - f_x_n_1.dotp(&f_x_n_1);
                let term: Vector<T> = &f_x_n + &(&jacobian_x_n * &d_n);
                let denumerator: T = f_x_n.dotp(&f_x_n) - term.dotp(&term);

                let epsilon: T = numerator / denumerator;

                if epsilon < self.beta_0
                {
                    mu_n = mu_n * T::from_f64(2.0);
                }
                else
                {
                    if epsilon > self.beta_1
                    {
                        mu_n = mu_n / T::from_f64(2.0);
                    }
                    break;
                }
            }
            x_n = x_n + d_n;
        }

        return OptimResult::new(x_n);
    }
}