[][src]Struct argmin::solver::gradientdescent::steepestdescent::SteepestDescent

pub struct SteepestDescent<'a, O> where
    O: 'a + ArgminOp<Output = f64>,
    <O as ArgminOp>::Param: ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminScaledAdd<<O as ArgminOp>::Param, f64, <O as ArgminOp>::Param> + ArgminMul<f64, <O as ArgminOp>::Param> + ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminNorm<f64>, 
{ /* fields omitted */ }

Steepest descent iteratively takes steps in the direction of the strongest negative gradient. In each iteration, a line search is employed to obtain an appropriate step length.

Example

use argmin::prelude::*;
use argmin::solver::gradientdescent::SteepestDescent;
use argmin::solver::linesearch::HagerZhangLineSearch;
use argmin::solver::linesearch::MoreThuenteLineSearch;
use argmin::solver::linesearch::BacktrackingLineSearch;

// Define cost function (must implement `ArgminOperator`)
let cost = MyProblem { };

// Define initial parameter vector
let init_param: Vec<f64> = vec![1.2, 1.2];

// Pick a line search. If no line search algorithm is provided, SteepestDescent defaults to
// HagerZhang.
let linesearch = HagerZhangLineSearch::new(cost.clone());
// let linesearch = MoreThuenteLineSearch::new(cost.clone());
// let linesearch = BacktrackingLineSearch::new(cost.clone());

// Set up solver
let mut solver = SteepestDescent::new(cost, init_param)?;
// Set linesearch. This can be omitted, which will then default to `HagerZhangLineSearch`
solver.set_linesearch(Box::new(linesearch));
// Set maximum number of iterations
solver.set_max_iters(100);

// Attach a logger which will output information in each iteration.
solver.add_logger(ArgminSlogLogger::term_noblock());

// Run the solver
solver.run()?;

// Wait a second (lets the logger flush everything first)
std::thread::sleep(std::time::Duration::from_secs(1));

// Print result
println!("{:?}", solver.result());

References:

[0] Jorge Nocedal and Stephen J. Wright (2006). Numerical Optimization. Springer. ISBN 0-387-30303-0.

Methods

impl<'a, O> SteepestDescent<'a, O> where
    O: 'a + ArgminOp<Output = f64>,
    <O as ArgminOp>::Param: ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminScaledAdd<<O as ArgminOp>::Param, f64, <O as ArgminOp>::Param> + ArgminMul<f64, <O as ArgminOp>::Param> + ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminNorm<f64>, 
[src]

pub fn new(
    cost_function: O,
    init_param: <O as ArgminOp>::Param
) -> Result<Self, Error>
[src]

Constructor

pub fn set_linesearch(
    &mut self,
    linesearch: Box<dyn ArgminLineSearch<Param = <O as ArgminOp>::Param, Output = f64, Hessian = <O as ArgminOp>::Hessian> + 'a>
) -> &mut Self
[src]

Specify line search method

Trait Implementations

impl<'a, O> ArgminSolver for SteepestDescent<'a, O> where
    O: 'a + ArgminOp<Output = f64>,
    <O as ArgminOp>::Param: ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminScaledAdd<<O as ArgminOp>::Param, f64, <O as ArgminOp>::Param> + ArgminMul<f64, <O as ArgminOp>::Param> + ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminNorm<f64>, 
[src]

fn run(&mut self) -> Result<ArgminResult<Self::Param>, Error>[src]

Run the optimization algorithm

fn run_fast(&mut self) -> Result<ArgminResult<Self::Param>, Error>[src]

Run the essential parts of the optimization algorithm (no logging, no Ctrl-C handling)

fn apply(&mut self, param: &Self::Param) -> Result<Self::Output, Error>[src]

Applies the cost function or operator to a parameter vector param. Returns an Err if apply of ArgminOperator is not implemented.

fn gradient(&mut self, param: &Self::Param) -> Result<Self::Param, Error>[src]

Computes the gradient at parameter param. Returns an Err if gradient of ArgminOperator is not implemented.

fn hessian(&mut self, param: &Self::Param) -> Result<Self::Hessian, Error>[src]

Computes the Hessian at parameter param. Returns an Err if hessian of ArgminOperator is not implemented.

fn cur_param(&self) -> Self::Param[src]

Returns the current parameter vector.

fn cur_grad(&self) -> Self::Param[src]

Returns the most recently stored gradient.

fn cur_hessian(&self) -> Self::Hessian[src]

Returns the most recently stored Hessian.

fn set_cur_param(&mut self, param: Self::Param)[src]

Sets the current parameter to param.

fn set_cur_grad(&mut self, grad: Self::Param)[src]

Sets the current gradient to grad.

fn set_cur_hessian(&mut self, hessian: Self::Hessian)[src]

Sets the current Hessian to hessian.

fn set_best_param(&mut self, param: Self::Param)[src]

Sets the best parameter vector to param.

fn modify(&self, param: &Self::Param, factor: f64) -> Result<Self::Param, Error>[src]

Modify the parameter vector by calling the modify method of the trait ArgminOperator. Will return an Err if modify is not implemented.

fn result(&self) -> ArgminResult<Self::Param>[src]

Returns the result of the optimization.

fn set_max_iters(&mut self, iters: u64)[src]

Sets the maximum number of iterations to iters.

fn max_iters(&self) -> u64[src]

Returns the maximum number of iterations.

fn increment_iter(&mut self)[src]

Increments the iteration counter.

fn cur_iter(&self) -> u64[src]

Returns the current number of iterations.

fn cur_cost(&self) -> f64[src]

Returns the most recently stored cost function value.

fn set_cur_cost(&mut self, cost: f64)[src]

Sets the current cost function value to cost

fn best_cost(&self) -> f64[src]

Returns the best cost function value obtained so far.

fn set_best_cost(&mut self, cost: f64)[src]

Sets the best cost function value.

fn set_target_cost(&mut self, cost: f64)[src]

Sets the target cost function value to cost. The optimization algorithm will be terminated when this limit is reached.

fn increment_cost_func_count(&mut self)[src]

Increments the counter for the computations of the cost function by 1.

fn increase_cost_func_count(&mut self, count: u64)[src]

Increases the counter for the computations of the cost function by count.

fn cost_func_count(&self) -> u64[src]

Returns the current value of the counter for the computations of the cost function.

fn increment_grad_func_count(&mut self)[src]

Increments the counter for the computations of the gradient by 1.

fn increase_grad_func_count(&mut self, count: u64)[src]

Increases the counter for the computations of the gradient by count.

fn grad_func_count(&self) -> u64[src]

Returns the current value of the counter for the computations of the gradient.

fn increment_hessian_func_count(&mut self)[src]

Increments the counter for the computations of the Hessian by 1.

fn increase_hessian_func_count(&mut self, count: u64)[src]

Increases the counter for the computations of the Hessian by count.

fn hessian_func_count(&self) -> u64[src]

Returns the current value of the counter for the computations of the Hessian.

fn add_logger(&mut self, logger: Arc<dyn ArgminLog>)[src]

Attaches a logger which implements ArgminLog to the solver.

fn add_writer(&mut self, writer: Arc<dyn ArgminWrite<Param = Self::Param>>)[src]

Attaches a writer which implements ArgminWrite to the solver.

fn set_termination_reason(&mut self, reason: TerminationReason)[src]

Sets the TerminationReason

fn terminate(&mut self) -> TerminationReason[src]

Checks whether any of the conditions to terminate is true and terminates the algorithm.

fn base_reset(&mut self)[src]

Resets the base field to it's initial conditions. This is helpful for implementing a solver which is initialized once, but called several times. It is recommended to only call this method inside the init function of ArgminNextIter.

impl<'a, O> ArgminIter for SteepestDescent<'a, O> where
    O: 'a + ArgminOp<Output = f64>,
    <O as ArgminOp>::Param: ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminDot<<O as ArgminOp>::Param, f64> + ArgminScaledAdd<<O as ArgminOp>::Param, f64, <O as ArgminOp>::Param> + ArgminMul<f64, <O as ArgminOp>::Param> + ArgminSub<<O as ArgminOp>::Param, <O as ArgminOp>::Param> + ArgminNorm<f64>, 
[src]

type Param = <O as ArgminOp>::Param

Parameter vectors

type Output = f64

Output of the operator

type Hessian = <O as ArgminOp>::Hessian

Hessian

fn next_iter(&mut self) -> Result<ArgminIterData<Self::Param>, Error>[src]

Perform one iteration of SA algorithm

fn init(&mut self) -> Result<(), Error>[src]

Initializes the algorithm Read more

Auto Trait Implementations

impl<'a, O> !Send for SteepestDescent<'a, O>

impl<'a, O> !Sync for SteepestDescent<'a, O>

Blanket Implementations

impl<T> From for T[src]

impl<T, U> Into for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom for T where
    U: Into<T>, 
[src]

type Error = !

🔬 This is a nightly-only experimental API. (try_from)

The type returned in the event of a conversion error.

impl<T> Borrow for T where
    T: ?Sized
[src]

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> BorrowMut for T where
    T: ?Sized
[src]

impl<T, U> TryInto for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

🔬 This is a nightly-only experimental API. (try_from)

The type returned in the event of a conversion error.