[][src]Struct argmin::solver::conjugategradient::nonlinear_cg::NonlinearConjugateGradient

pub struct NonlinearConjugateGradient<'a, O> where
    O: 'a + ArgminOp<Output = f64>,
    <O as ArgminOp>::Param: ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminScaledAdd<<O as ArgminOp>::Param, f64, <O as ArgminOp>::Param> + ArgminAdd<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminMul<f64, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminNorm<f64>, 
{ /* fields omitted */ }

The nonlinear conjugate gradient is a generalization of the conjugate gradient method for nonlinear optimization problems.

Example

use argmin::prelude::*;
use argmin::solver::conjugategradient::NonlinearConjugateGradient;
use argmin::testfunctions::{rosenbrock_2d, rosenbrock_2d_derivative};

// Set up cost function
let operator = MyProblem {};

// define inital parameter vector
let init_param: Vec<f64> = vec![1.2, 1.2];

// Set up nonlinear conjugate gradient method
let mut solver = NonlinearConjugateGradient::new_pr(operator, init_param)?;

// Set maximum number of iterations
solver.set_max_iters(20);

// Set target cost function value
solver.set_target_cost(0.0);

// Set the number of iterations when a restart should be performed
// This allows the algorithm to "forget" previous information which may not be helpful anymore.
solver.set_restart_iters(10);

// Set the value for the orthogonality measure.
// Setting this parameter leads to a restart of the algorithm (setting beta = 0) after two
// consecutive search directions are not orthogonal anymore. In other words, if this condition
// is met:
//
// `|\nabla f_k^T * \nabla f_{k-1}| / | \nabla f_k ||^2 >= v`
//
// A typical value for `v` is 0.1.
solver.set_restart_orthogonality(0.1);

// Attach a logger
solver.add_logger(ArgminSlogLogger::term());

// Run solver
solver.run()?;

// Wait a second (lets the logger flush everything before printing to screen again)
std::thread::sleep(std::time::Duration::from_secs(1));

// Print result
println!("{:?}", solver.result());

References:

[0] Jorge Nocedal and Stephen J. Wright (2006). Numerical Optimization. Springer. ISBN 0-387-30303-0.

Methods

impl<'a, O> NonlinearConjugateGradient<'a, O> where
    O: 'a + ArgminOp<Output = f64>,
    <O as ArgminOp>::Param: ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminScaledAdd<<O as ArgminOp>::Param, f64, <O as ArgminOp>::Param> + ArgminAdd<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminMul<f64, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminNorm<f64>, 
[src]

pub fn new(
    operator: O,
    init_param: <O as ArgminOp>::Param
) -> Result<Self, Error>
[src]

Constructor (Polak Ribiere Conjugate Gradient (PR-CG))

pub fn new_pr(
    operator: O,
    init_param: <O as ArgminOp>::Param
) -> Result<Self, Error>
[src]

New PolakRibiere CG (PR-CG)

pub fn new_prplus(
    operator: O,
    init_param: <O as ArgminOp>::Param
) -> Result<Self, Error>
[src]

New PolakRibierePlus CG (PR+-CG)

pub fn new_fr(
    operator: O,
    init_param: <O as ArgminOp>::Param
) -> Result<Self, Error>
[src]

New FletcherReeves CG (FR-CG)

pub fn new_hs(
    operator: O,
    init_param: <O as ArgminOp>::Param
) -> Result<Self, Error>
[src]

New HestenesStiefel CG (HS-CG)

pub fn set_linesearch(
    &mut self,
    linesearch: Box<dyn ArgminLineSearch<Param = <O as ArgminOp>::Param, Output = f64, Hessian = <O as ArgminOp>::Hessian> + 'a>
) -> &mut Self
[src]

Specify line search method

pub fn set_beta_update(
    &mut self,
    beta_method: Box<dyn ArgminNLCGBetaUpdate<<O as ArgminOp>::Param> + 'a>
) -> &mut Self
[src]

Specify beta update method

pub fn set_restart_iters(&mut self, iters: u64) -> &mut Self[src]

Specifiy the number of iterations after which a restart should be performed This allows the algorithm to "forget" previous information which may not be helpful anymore.

pub fn set_restart_orthogonality(&mut self, v: f64) -> &mut Self[src]

Set the value for the orthogonality measure. Setting this parameter leads to a restart of the algorithm (setting beta = 0) after two consecutive search directions are not orthogonal anymore. In other words, if this condition is met:

|\nabla f_k^T * \nabla f_{k-1}| / | \nabla f_k ||^2 >= v

A typical value for v is 0.1.

Trait Implementations

impl<'a, O> ArgminSolver for NonlinearConjugateGradient<'a, O> where
    O: 'a + ArgminOp<Output = f64>,
    <O as ArgminOp>::Param: ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminScaledAdd<<O as ArgminOp>::Param, f64, <O as ArgminOp>::Param> + ArgminAdd<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminMul<f64, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminNorm<f64>, 
[src]

fn run(&mut self) -> Result<ArgminResult<Self::Param>, Error>[src]

Run the optimization algorithm

fn run_fast(&mut self) -> Result<ArgminResult<Self::Param>, Error>[src]

Run the essential parts of the optimization algorithm (no logging, no Ctrl-C handling)

fn apply(&mut self, param: &Self::Param) -> Result<Self::Output, Error>[src]

Applies the cost function or operator to a parameter vector param. Returns an Err if apply of ArgminOperator is not implemented.

fn gradient(&mut self, param: &Self::Param) -> Result<Self::Param, Error>[src]

Computes the gradient at parameter param. Returns an Err if gradient of ArgminOperator is not implemented.

fn hessian(&mut self, param: &Self::Param) -> Result<Self::Hessian, Error>[src]

Computes the Hessian at parameter param. Returns an Err if hessian of ArgminOperator is not implemented.

fn cur_param(&self) -> Self::Param[src]

Returns the current parameter vector.

fn cur_grad(&self) -> Self::Param[src]

Returns the most recently stored gradient.

fn cur_hessian(&self) -> Self::Hessian[src]

Returns the most recently stored Hessian.

fn set_cur_param(&mut self, param: Self::Param)[src]

Sets the current parameter to param.

fn set_cur_grad(&mut self, grad: Self::Param)[src]

Sets the current gradient to grad.

fn set_cur_hessian(&mut self, hessian: Self::Hessian)[src]

Sets the current Hessian to hessian.

fn set_best_param(&mut self, param: Self::Param)[src]

Sets the best parameter vector to param.

fn modify(&self, param: &Self::Param, factor: f64) -> Result<Self::Param, Error>[src]

Modify the parameter vector by calling the modify method of the trait ArgminOperator. Will return an Err if modify is not implemented.

fn result(&self) -> ArgminResult<Self::Param>[src]

Returns the result of the optimization.

fn set_max_iters(&mut self, iters: u64)[src]

Sets the maximum number of iterations to iters.

fn max_iters(&self) -> u64[src]

Returns the maximum number of iterations.

fn increment_iter(&mut self)[src]

Increments the iteration counter.

fn cur_iter(&self) -> u64[src]

Returns the current number of iterations.

fn cur_cost(&self) -> f64[src]

Returns the most recently stored cost function value.

fn set_cur_cost(&mut self, cost: f64)[src]

Sets the current cost function value to cost

fn best_cost(&self) -> f64[src]

Returns the best cost function value obtained so far.

fn set_best_cost(&mut self, cost: f64)[src]

Sets the best cost function value.

fn set_target_cost(&mut self, cost: f64)[src]

Sets the target cost function value to cost. The optimization algorithm will be terminated when this limit is reached.

fn increment_cost_func_count(&mut self)[src]

Increments the counter for the computations of the cost function by 1.

fn increase_cost_func_count(&mut self, count: u64)[src]

Increases the counter for the computations of the cost function by count.

fn cost_func_count(&self) -> u64[src]

Returns the current value of the counter for the computations of the cost function.

fn increment_grad_func_count(&mut self)[src]

Increments the counter for the computations of the gradient by 1.

fn increase_grad_func_count(&mut self, count: u64)[src]

Increases the counter for the computations of the gradient by count.

fn grad_func_count(&self) -> u64[src]

Returns the current value of the counter for the computations of the gradient.

fn increment_hessian_func_count(&mut self)[src]

Increments the counter for the computations of the Hessian by 1.

fn increase_hessian_func_count(&mut self, count: u64)[src]

Increases the counter for the computations of the Hessian by count.

fn hessian_func_count(&self) -> u64[src]

Returns the current value of the counter for the computations of the Hessian.

fn add_logger(&mut self, logger: Arc<dyn ArgminLog>)[src]

Attaches a logger which implements ArgminLog to the solver.

fn add_writer(&mut self, writer: Arc<dyn ArgminWrite<Param = Self::Param>>)[src]

Attaches a writer which implements ArgminWrite to the solver.

fn set_termination_reason(&mut self, reason: TerminationReason)[src]

Sets the TerminationReason

fn terminate(&mut self) -> TerminationReason[src]

Checks whether any of the conditions to terminate is true and terminates the algorithm.

fn base_reset(&mut self)[src]

Resets the base field to it's initial conditions. This is helpful for implementing a solver which is initialized once, but called several times. It is recommended to only call this method inside the init function of ArgminNextIter.

impl<'a, O> ArgminIter for NonlinearConjugateGradient<'a, O> where
    O: 'a + ArgminOp<Output = f64>,
    <O as ArgminOp>::Param: ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminScaledAdd<<O as ArgminOp>::Param, f64, <O as ArgminOp>::Param> + ArgminAdd<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminMul<f64, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminNorm<f64>, 
[src]

type Param = <O as ArgminOp>::Param

Parameter vectors

type Output = <O as ArgminOp>::Output

Output of the operator

type Hessian = <O as ArgminOp>::Hessian

Hessian

fn next_iter(&mut self) -> Result<ArgminIterData<Self::Param>, Error>[src]

Perform one iteration of SA algorithm

Auto Trait Implementations

impl<'a, O> !Send for NonlinearConjugateGradient<'a, O>

impl<'a, O> !Sync for NonlinearConjugateGradient<'a, O>

Blanket Implementations

impl<T, U> Into for T where
    U: From<T>, 
[src]

impl<T> From for T[src]

impl<T, U> TryFrom for T where
    U: Into<T>, 
[src]

type Error = !

🔬 This is a nightly-only experimental API. (try_from)

The type returned in the event of a conversion error.

impl<T> Borrow for T where
    T: ?Sized
[src]

impl<T, U> TryInto for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

🔬 This is a nightly-only experimental API. (try_from)

The type returned in the event of a conversion error.

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> BorrowMut for T where
    T: ?Sized
[src]