[−][src]Struct argmin::solver::conjugategradient::nonlinear_cg::NonlinearConjugateGradient
The nonlinear conjugate gradient is a generalization of the conjugate gradient method for nonlinear optimization problems.
Example
use argmin::prelude::*; use argmin::solver::conjugategradient::{NonlinearConjugateGradient, PolakRibiere}; use argmin::solver::linesearch::MoreThuenteLineSearch; use argmin::testfunctions::{rosenbrock_2d, rosenbrock_2d_derivative}; // Set up cost function let operator = MyProblem {}; // define inital parameter vector let init_param: Vec<f64> = vec![1.2, 1.2]; // set up line search let linesearch = MoreThuenteLineSearch::new(operator.clone()); // set up beta update method let beta_method = PolakRibiere::new(); // Set up nonlinear conjugate gradient method let mut solver = NonlinearConjugateGradient::new(operator, init_param, linesearch, beta_method)?; // Set maximum number of iterations solver.set_max_iters(20); // Set target cost function value solver.set_target_cost(0.0); // Set the number of iterations when a restart should be performed // This allows the algorithm to "forget" previous information which may not be helpful anymore. solver.set_restart_iters(10); // Set the value for the orthogonality measure. // Setting this parameter leads to a restart of the algorithm (setting beta = 0) after two // consecutive search directions are not orthogonal anymore. In other words, if this condition // is met: // // `|\nabla f_k^T * \nabla f_{k-1}| / | \nabla f_k ||^2 >= v` // // A typical value for `v` is 0.1. solver.set_restart_orthogonality(0.1); // Attach a logger solver.add_logger(ArgminSlogLogger::term()); // Run solver solver.run()?; // Wait a second (lets the logger flush everything before printing to screen again) std::thread::sleep(std::time::Duration::from_secs(1)); // Print result println!("{:?}", solver.result());
References:
[0] Jorge Nocedal and Stephen J. Wright (2006). Numerical Optimization. Springer. ISBN 0-387-30303-0.
Methods
impl<O, L, B> NonlinearConjugateGradient<O, L, B> where
O: ArgminOp<Output = f64>,
O::Param: ArgminSub<O::Param, O::Param> + ArgminDot<O::Param, f64> + ArgminScaledAdd<O::Param, f64, O::Param> + ArgminAdd<O::Param, O::Param> + ArgminMul<f64, O::Param> + ArgminDot<O::Param, f64> + ArgminNorm<f64>,
L: ArgminLineSearch<Param = O::Param, Output = f64, Hessian = O::Hessian>,
B: ArgminNLCGBetaUpdate<O::Param>,
[src]
O: ArgminOp<Output = f64>,
O::Param: ArgminSub<O::Param, O::Param> + ArgminDot<O::Param, f64> + ArgminScaledAdd<O::Param, f64, O::Param> + ArgminAdd<O::Param, O::Param> + ArgminMul<f64, O::Param> + ArgminDot<O::Param, f64> + ArgminNorm<f64>,
L: ArgminLineSearch<Param = O::Param, Output = f64, Hessian = O::Hessian>,
B: ArgminNLCGBetaUpdate<O::Param>,
pub fn new(
operator: O,
init_param: O::Param,
linesearch: L,
beta_method: B
) -> Result<Self, Error>
[src]
operator: O,
init_param: O::Param,
linesearch: L,
beta_method: B
) -> Result<Self, Error>
Constructor (Polak Ribiere Conjugate Gradient (PR-CG))
pub fn set_restart_iters(&mut self, iters: u64) -> &mut Self
[src]
Specifiy the number of iterations after which a restart should be performed This allows the algorithm to "forget" previous information which may not be helpful anymore.
pub fn set_restart_orthogonality(&mut self, v: f64) -> &mut Self
[src]
Set the value for the orthogonality measure. Setting this parameter leads to a restart of the algorithm (setting beta = 0) after two consecutive search directions are not orthogonal anymore. In other words, if this condition is met:
|\nabla f_k^T * \nabla f_{k-1}| / | \nabla f_k ||^2 >= v
A typical value for v
is 0.1.
Trait Implementations
impl<'de, O, L, B> Deserialize<'de> for NonlinearConjugateGradient<O, L, B> where
O: ArgminOp<Output = f64>,
O::Param: ArgminSub<O::Param, O::Param> + ArgminDot<O::Param, f64> + ArgminScaledAdd<O::Param, f64, O::Param> + ArgminAdd<O::Param, O::Param> + ArgminMul<f64, O::Param> + ArgminDot<O::Param, f64> + ArgminNorm<f64>,
L: ArgminLineSearch<Param = O::Param, Output = f64, Hessian = O::Hessian>,
B: ArgminNLCGBetaUpdate<O::Param>,
O: Deserialize<'de>,
L: Deserialize<'de>,
B: Deserialize<'de>,
O::Param: Deserialize<'de>,
[src]
O: ArgminOp<Output = f64>,
O::Param: ArgminSub<O::Param, O::Param> + ArgminDot<O::Param, f64> + ArgminScaledAdd<O::Param, f64, O::Param> + ArgminAdd<O::Param, O::Param> + ArgminMul<f64, O::Param> + ArgminDot<O::Param, f64> + ArgminNorm<f64>,
L: ArgminLineSearch<Param = O::Param, Output = f64, Hessian = O::Hessian>,
B: ArgminNLCGBetaUpdate<O::Param>,
O: Deserialize<'de>,
L: Deserialize<'de>,
B: Deserialize<'de>,
O::Param: Deserialize<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error> where
__D: Deserializer<'de>,
[src]
__D: Deserializer<'de>,
impl<O, L, B> Serialize for NonlinearConjugateGradient<O, L, B> where
O: ArgminOp<Output = f64>,
O::Param: ArgminSub<O::Param, O::Param> + ArgminDot<O::Param, f64> + ArgminScaledAdd<O::Param, f64, O::Param> + ArgminAdd<O::Param, O::Param> + ArgminMul<f64, O::Param> + ArgminDot<O::Param, f64> + ArgminNorm<f64>,
L: ArgminLineSearch<Param = O::Param, Output = f64, Hessian = O::Hessian>,
B: ArgminNLCGBetaUpdate<O::Param>,
O: Serialize,
L: Serialize,
B: Serialize,
O::Param: Serialize,
[src]
O: ArgminOp<Output = f64>,
O::Param: ArgminSub<O::Param, O::Param> + ArgminDot<O::Param, f64> + ArgminScaledAdd<O::Param, f64, O::Param> + ArgminAdd<O::Param, O::Param> + ArgminMul<f64, O::Param> + ArgminDot<O::Param, f64> + ArgminNorm<f64>,
L: ArgminLineSearch<Param = O::Param, Output = f64, Hessian = O::Hessian>,
B: ArgminNLCGBetaUpdate<O::Param>,
O: Serialize,
L: Serialize,
B: Serialize,
O::Param: Serialize,
fn serialize<__S>(&self, __serializer: __S) -> Result<__S::Ok, __S::Error> where
__S: Serializer,
[src]
__S: Serializer,
impl<O, L, B> ArgminSolver for NonlinearConjugateGradient<O, L, B> where
O: ArgminOp<Output = f64>,
O::Param: ArgminSub<O::Param, O::Param> + ArgminDot<O::Param, f64> + ArgminScaledAdd<O::Param, f64, O::Param> + ArgminAdd<O::Param, O::Param> + ArgminMul<f64, O::Param> + ArgminDot<O::Param, f64> + ArgminNorm<f64>,
L: ArgminLineSearch<Param = O::Param, Output = f64, Hessian = O::Hessian>,
B: ArgminNLCGBetaUpdate<O::Param>,
[src]
O: ArgminOp<Output = f64>,
O::Param: ArgminSub<O::Param, O::Param> + ArgminDot<O::Param, f64> + ArgminScaledAdd<O::Param, f64, O::Param> + ArgminAdd<O::Param, O::Param> + ArgminMul<f64, O::Param> + ArgminDot<O::Param, f64> + ArgminNorm<f64>,
L: ArgminLineSearch<Param = O::Param, Output = f64, Hessian = O::Hessian>,
B: ArgminNLCGBetaUpdate<O::Param>,
fn run(&mut self) -> Result<ArgminResult<Self::Param>, Error>
[src]
Run the optimization algorithm
fn run_fast(&mut self) -> Result<ArgminResult<Self::Param>, Error>
[src]
Run the essential parts of the optimization algorithm (no logging, no Ctrl-C handling)
fn apply(&mut self, param: &Self::Param) -> Result<Self::Output, Error>
[src]
Applies the cost function or operator to a parameter vector param
.
Returns an Err
if apply
of ArgminOperator
is not implemented.
fn gradient(&mut self, param: &Self::Param) -> Result<Self::Param, Error>
[src]
Computes the gradient at parameter param
.
Returns an Err
if gradient
of ArgminOperator
is not implemented.
fn hessian(&mut self, param: &Self::Param) -> Result<Self::Hessian, Error>
[src]
Computes the Hessian at parameter param
.
Returns an Err
if hessian
of ArgminOperator
is not implemented.
fn cur_param(&self) -> Self::Param
[src]
Returns the current parameter vector.
fn cur_grad(&self) -> Self::Param
[src]
Returns the most recently stored gradient.
fn cur_hessian(&self) -> Self::Hessian
[src]
Returns the most recently stored Hessian.
fn set_cur_param(&mut self, param: Self::Param)
[src]
Sets the current parameter to param
.
fn set_cur_grad(&mut self, grad: Self::Param)
[src]
Sets the current gradient to grad
.
fn set_cur_hessian(&mut self, hessian: Self::Hessian)
[src]
Sets the current Hessian to hessian
.
fn set_best_param(&mut self, param: Self::Param)
[src]
Sets the best parameter vector to param
.
fn modify(&self, param: &Self::Param, factor: f64) -> Result<Self::Param, Error>
[src]
Modify the parameter vector by calling the modify
method of the trait
ArgminOperator
. Will return an Err
if modify
is not implemented.
fn result(&self) -> ArgminResult<Self::Param>
[src]
Returns the result of the optimization.
fn set_max_iters(&mut self, iters: u64)
[src]
Sets the maximum number of iterations to iters
.
fn max_iters(&self) -> u64
[src]
Returns the maximum number of iterations.
fn increment_iter(&mut self)
[src]
Increments the iteration counter.
fn cur_iter(&self) -> u64
[src]
Returns the current number of iterations.
fn cur_cost(&self) -> f64
[src]
Returns the most recently stored cost function value.
fn set_cur_cost(&mut self, cost: f64)
[src]
Sets the current cost function value to cost
fn best_cost(&self) -> f64
[src]
Returns the best cost function value obtained so far.
fn set_best_cost(&mut self, cost: f64)
[src]
Sets the best cost function value.
fn set_target_cost(&mut self, cost: f64)
[src]
Sets the target cost function value to cost
. The optimization algorithm will be
terminated when this limit is reached.
fn increment_cost_func_count(&mut self)
[src]
Increments the counter for the computations of the cost function by 1.
fn increase_cost_func_count(&mut self, count: u64)
[src]
Increases the counter for the computations of the cost function by count
.
fn cost_func_count(&self) -> u64
[src]
Returns the current value of the counter for the computations of the cost function.
fn increment_grad_func_count(&mut self)
[src]
Increments the counter for the computations of the gradient by 1.
fn increase_grad_func_count(&mut self, count: u64)
[src]
Increases the counter for the computations of the gradient by count
.
fn grad_func_count(&self) -> u64
[src]
Returns the current value of the counter for the computations of the gradient.
fn increment_hessian_func_count(&mut self)
[src]
Increments the counter for the computations of the Hessian by 1.
fn increase_hessian_func_count(&mut self, count: u64)
[src]
Increases the counter for the computations of the Hessian by count
.
fn hessian_func_count(&self) -> u64
[src]
Returns the current value of the counter for the computations of the Hessian.
fn add_logger(&mut self, logger: Arc<dyn ArgminLog>)
[src]
Attaches a logger which implements ArgminLog
to the solver.
fn add_writer(&mut self, writer: Arc<dyn ArgminWrite<Param = Self::Param>>)
[src]
Attaches a writer which implements ArgminWrite
to the solver.
fn set_termination_reason(&mut self, reason: TerminationReason)
[src]
Sets the TerminationReason
fn terminate(&mut self) -> TerminationReason
[src]
Checks whether any of the conditions to terminate is true and terminates the algorithm.
fn base_reset(&mut self)
[src]
Resets the base
field to it's initial conditions. This is helpful for
implementing a solver which is initialized once, but called several times. It is
recommended to only call this method inside the init
function of
ArgminNextIter
.
fn set_checkpoint_dir(&mut self, dir: &str)
[src]
Set checkpoint directory
fn set_checkpoint_name(&mut self, name: &str)
[src]
Set checkpoint name
fn set_checkpoint_mode(&mut self, mode: CheckpointMode)
[src]
Set checkpoint mode
fn from_checkpoint<P>(path: P) -> Result<Self, Error> where
P: AsRef<Path>,
Self: DeserializeOwned,
[src]
P: AsRef<Path>,
Self: DeserializeOwned,
Load solver from checkpoint
impl<O, L, B> ArgminIter for NonlinearConjugateGradient<O, L, B> where
O: ArgminOp<Output = f64>,
O::Param: ArgminSub<O::Param, O::Param> + ArgminDot<O::Param, f64> + ArgminScaledAdd<O::Param, f64, O::Param> + ArgminAdd<O::Param, O::Param> + ArgminMul<f64, O::Param> + ArgminDot<O::Param, f64> + ArgminNorm<f64>,
L: ArgminLineSearch<Param = O::Param, Output = f64, Hessian = O::Hessian>,
B: ArgminNLCGBetaUpdate<O::Param>,
[src]
O: ArgminOp<Output = f64>,
O::Param: ArgminSub<O::Param, O::Param> + ArgminDot<O::Param, f64> + ArgminScaledAdd<O::Param, f64, O::Param> + ArgminAdd<O::Param, O::Param> + ArgminMul<f64, O::Param> + ArgminDot<O::Param, f64> + ArgminNorm<f64>,
L: ArgminLineSearch<Param = O::Param, Output = f64, Hessian = O::Hessian>,
B: ArgminNLCGBetaUpdate<O::Param>,
Auto Trait Implementations
impl<O, L, B> Send for NonlinearConjugateGradient<O, L, B> where
B: Send,
L: Send,
<O as ArgminOp>::Hessian: Send,
<O as ArgminOp>::Param: Send,
B: Send,
L: Send,
<O as ArgminOp>::Hessian: Send,
<O as ArgminOp>::Param: Send,
impl<O, L, B> Sync for NonlinearConjugateGradient<O, L, B> where
B: Sync,
L: Sync,
<O as ArgminOp>::Hessian: Sync,
<O as ArgminOp>::Param: Sync,
B: Sync,
L: Sync,
<O as ArgminOp>::Hessian: Sync,
<O as ArgminOp>::Param: Sync,
Blanket Implementations
impl<T> From for T
[src]
impl<T, U> Into for T where
U: From<T>,
[src]
U: From<T>,
impl<T, U> TryFrom for T where
U: Into<T>,
[src]
U: Into<T>,
type Error = !
try_from
)The type returned in the event of a conversion error.
fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>
[src]
impl<T> Borrow for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T> BorrowMut for T where
T: ?Sized,
[src]
T: ?Sized,
fn borrow_mut(&mut self) -> &mut T
[src]
impl<T, U> TryInto for T where
U: TryFrom<T>,
[src]
U: TryFrom<T>,
type Error = <U as TryFrom<T>>::Error
try_from
)The type returned in the event of a conversion error.
fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>
[src]
impl<T> Any for T where
T: 'static + ?Sized,
[src]
T: 'static + ?Sized,
impl<T> DeserializeOwned for T where
T: Deserialize<'de>,
[src]
T: Deserialize<'de>,