[][src]Struct argmin::solver::simulatedannealing::SimulatedAnnealing

pub struct SimulatedAnnealing<O> where
    O: ArgminOp<Output = f64>, 
{ /* fields omitted */ }

Simulated Annealing

Example

extern crate argmin;
extern crate rand;
use argmin::prelude::*;
use argmin::solver::simulatedannealing::{SATempFunc, SimulatedAnnealing};
use argmin::testfunctions::rosenbrock;
use rand::prelude::*;
use std::sync::{Arc, Mutex};;

#[derive(Clone)]
struct Rosenbrock {
    /// Parameter a, usually 1.0
    a: f64,
    /// Parameter b, usually 100.0
    b: f64,
    /// lower bound
    lower_bound: Vec<f64>,
    /// upper bound
    upper_bound: Vec<f64>,
    /// Random number generator. We use a `Arc<Mutex<_>>` here because `ArgminOperator` requires
    /// `self` to be passed as an immutable reference. This gives us thread safe interior
    /// mutability.
    rng: Arc<Mutex<SmallRng>>,
}

impl std::default::Default for Rosenbrock {
    fn default() -> Self {
        let lower_bound: Vec<f64> = vec![-5.0, -5.0];
        let upper_bound: Vec<f64> = vec![5.0, 5.0];
        Rosenbrock::new(1.0, 100.0, lower_bound, upper_bound)
    }
}

impl Rosenbrock {
    /// Constructor
    pub fn new(a: f64, b: f64, lower_bound: Vec<f64>, upper_bound: Vec<f64>) -> Self {
        Rosenbrock {
            a,
            b,
            lower_bound,
            upper_bound,
            rng: Arc::new(Mutex::new(SmallRng::from_entropy())),
        }
    }
}

impl ArgminOp for Rosenbrock {
    type Param= Vec<f64>;
    type Output = f64;
    type Hessian = ();

    fn apply(&self, param: &Vec<f64>) -> Result<f64, Error> {
        Ok(rosenbrock(param, self.a, self.b))
    }

    /// This function is called by the annealing function
    fn modify(&self, param: &Vec<f64>, temp: f64) -> Result<Vec<f64>, Error> {
        let mut param_n = param.clone();
        // Perform modifications to a degree proportional to the current temperature `temp`.
        for _ in 0..(temp.floor() as u64 + 1) {
            // Compute random index of the parameter vector using the supplied random number
            // generator.
            let mut rng = self.rng.lock().unwrap();
            let idx = (*rng).gen_range(0, param.len());

            // Compute random number in [0.01, 0.01].
            let val = 0.01 * (*rng).gen_range(-1.0, 1.0);

            // modify previous parameter value at random position `idx` by `val`
            let tmp = param[idx] + val;

            // check if bounds are violated. If yes, project onto bound.
            if tmp > self.upper_bound[idx] {
                param_n[idx] = self.upper_bound[idx];
            } else if tmp < self.lower_bound[idx] {
                param_n[idx] = self.lower_bound[idx];
            } else {
                param_n[idx] = param[idx] + val;
            }
        }
        Ok(param_n)
    }
}

fn run() -> Result<(), Error> {
    // Define bounds
    let lower_bound: Vec<f64> = vec![-5.0, -5.0];
    let upper_bound: Vec<f64> = vec![5.0, 5.0];

    // Define cost function
    let operator = Rosenbrock::new(1.0, 100.0, lower_bound, upper_bound);

    // definie inital parameter vector
    let init_param: Vec<f64> = vec![1.0, 1.2];

    // Define initial temperature
    let temp = 15.0;

    // Set up simulated annealing solver
    let mut solver = SimulatedAnnealing::new(operator, init_param, temp)?;

    // Optional: Define temperature function (defaults to `SATempFunc::TemperatureFast`)
    solver.temp_func(SATempFunc::Boltzmann);

    // Optional: Attach a logger
    solver.add_logger(ArgminSlogLogger::term());

    /////////////////////////
    // Stopping criteria   //
    /////////////////////////

    // Optional: Set maximum number of iterations (defaults to `std::u64::MAX`)
    solver.set_max_iters(1_000);

    // Optional: Set target cost function value (defaults to `std::f64::NEG_INFINITY`)
    solver.set_target_cost(0.0);

    // Optional: stop if there was no new best solution after 100 iterations
    solver.stall_best(100);

    // Optional: stop if there was no accepted solution after 100 iterations
    solver.stall_accepted(100);

    /////////////////////////
    // Reannealing         //
    /////////////////////////

    // Optional: Reanneal after 100 iterations (resets temperature to initial temperature)
    solver.reannealing_fixed(100);

    // Optional: Reanneal after no accepted solution has been found for 50 iterations
    solver.reannealing_accepted(50);

    // Optional: Start reannealing after no new best solution has been found for 80 iterations
    solver.reannealing_best(80);

    /////////////////////////
    // Run solver          //
    /////////////////////////

    solver.run()?;

    // Wait a second (lets the logger flush everything before printing again)
    std::thread::sleep(std::time::Duration::from_secs(1));

    // Print result
    println!("{:?}", solver.result());
    Ok(())
}

fn main() {
    if let Err(ref e) = run() {
        println!("{} {}", e.as_fail(), e.backtrace());
        std::process::exit(1);
    }
}

References

[0] Wikipedia

[1] S Kirkpatrick, CD Gelatt Jr, MP Vecchi. (1983). "Optimization by Simulated Annealing". Science 13 May 1983, Vol. 220, Issue 4598, pp. 671-680 DOI: 10.1126/science.220.4598.671

Methods

impl<O> SimulatedAnnealing<O> where
    O: ArgminOp<Output = f64>, 
[src]

pub fn new(
    cost_function: O,
    init_param: <O as ArgminOp>::Param,
    init_temp: f64
) -> Result<Self, Error>
[src]

Constructor

Parameters:

  • cost_function: cost function
  • init_param: initial parameter vector
  • init_temp: initial temperature

pub fn temp_func(&mut self, temperature_func: SATempFunc) -> &mut Self[src]

Set temperature function to one of the options in SATempFunc.

pub fn stall_accepted(&mut self, iter: u64) -> &mut Self[src]

The optimization stops after there has been no accepted solution after iter iterations

pub fn stall_best(&mut self, iter: u64) -> &mut Self[src]

The optimization stops after there has been no new best solution after iter iterations

pub fn reannealing_fixed(&mut self, iter: u64) -> &mut Self[src]

Start reannealing after iter iterations

pub fn reannealing_accepted(&mut self, iter: u64) -> &mut Self[src]

Start reannealing after no accepted solution has been found for iter iterations

pub fn reannealing_best(&mut self, iter: u64) -> &mut Self[src]

Start reannealing after no new best solution has been found for iter iterations

Trait Implementations

impl<O> ArgminSolver for SimulatedAnnealing<O> where
    O: ArgminOp<Output = f64>, 
[src]

fn run(&mut self) -> Result<ArgminResult<Self::Param>, Error>[src]

Run the optimization algorithm

fn run_fast(&mut self) -> Result<ArgminResult<Self::Param>, Error>[src]

Run the essential parts of the optimization algorithm (no logging, no Ctrl-C handling)

fn apply(&mut self, param: &Self::Param) -> Result<Self::Output, Error>[src]

Applies the cost function or operator to a parameter vector param. Returns an Err if apply of ArgminOperator is not implemented.

fn gradient(&mut self, param: &Self::Param) -> Result<Self::Param, Error>[src]

Computes the gradient at parameter param. Returns an Err if gradient of ArgminOperator is not implemented.

fn hessian(&mut self, param: &Self::Param) -> Result<Self::Hessian, Error>[src]

Computes the Hessian at parameter param. Returns an Err if hessian of ArgminOperator is not implemented.

fn cur_param(&self) -> Self::Param[src]

Returns the current parameter vector.

fn cur_grad(&self) -> Self::Param[src]

Returns the most recently stored gradient.

fn cur_hessian(&self) -> Self::Hessian[src]

Returns the most recently stored Hessian.

fn set_cur_param(&mut self, param: Self::Param)[src]

Sets the current parameter to param.

fn set_cur_grad(&mut self, grad: Self::Param)[src]

Sets the current gradient to grad.

fn set_cur_hessian(&mut self, hessian: Self::Hessian)[src]

Sets the current Hessian to hessian.

fn set_best_param(&mut self, param: Self::Param)[src]

Sets the best parameter vector to param.

fn modify(&self, param: &Self::Param, factor: f64) -> Result<Self::Param, Error>[src]

Modify the parameter vector by calling the modify method of the trait ArgminOperator. Will return an Err if modify is not implemented.

fn result(&self) -> ArgminResult<Self::Param>[src]

Returns the result of the optimization.

fn set_max_iters(&mut self, iters: u64)[src]

Sets the maximum number of iterations to iters.

fn max_iters(&self) -> u64[src]

Returns the maximum number of iterations.

fn increment_iter(&mut self)[src]

Increments the iteration counter.

fn cur_iter(&self) -> u64[src]

Returns the current number of iterations.

fn cur_cost(&self) -> f64[src]

Returns the most recently stored cost function value.

fn set_cur_cost(&mut self, cost: f64)[src]

Sets the current cost function value to cost

fn best_cost(&self) -> f64[src]

Returns the best cost function value obtained so far.

fn set_best_cost(&mut self, cost: f64)[src]

Sets the best cost function value.

fn set_target_cost(&mut self, cost: f64)[src]

Sets the target cost function value to cost. The optimization algorithm will be terminated when this limit is reached.

fn increment_cost_func_count(&mut self)[src]

Increments the counter for the computations of the cost function by 1.

fn increase_cost_func_count(&mut self, count: u64)[src]

Increases the counter for the computations of the cost function by count.

fn cost_func_count(&self) -> u64[src]

Returns the current value of the counter for the computations of the cost function.

fn increment_grad_func_count(&mut self)[src]

Increments the counter for the computations of the gradient by 1.

fn increase_grad_func_count(&mut self, count: u64)[src]

Increases the counter for the computations of the gradient by count.

fn grad_func_count(&self) -> u64[src]

Returns the current value of the counter for the computations of the gradient.

fn increment_hessian_func_count(&mut self)[src]

Increments the counter for the computations of the Hessian by 1.

fn increase_hessian_func_count(&mut self, count: u64)[src]

Increases the counter for the computations of the Hessian by count.

fn hessian_func_count(&self) -> u64[src]

Returns the current value of the counter for the computations of the Hessian.

fn add_logger(&mut self, logger: Arc<dyn ArgminLog>)[src]

Attaches a logger which implements ArgminLog to the solver.

fn add_writer(&mut self, writer: Arc<dyn ArgminWrite<Param = Self::Param>>)[src]

Attaches a writer which implements ArgminWrite to the solver.

fn set_termination_reason(&mut self, reason: TerminationReason)[src]

Sets the TerminationReason

fn terminate(&mut self) -> TerminationReason[src]

Checks whether any of the conditions to terminate is true and terminates the algorithm.

fn base_reset(&mut self)[src]

Resets the base field to it's initial conditions. This is helpful for implementing a solver which is initialized once, but called several times. It is recommended to only call this method inside the init function of ArgminNextIter.

impl<O> ArgminIter for SimulatedAnnealing<O> where
    O: ArgminOp<Output = f64>, 
[src]

type Param = <O as ArgminOp>::Param

Parameter vectors

type Output = <O as ArgminOp>::Output

Output of the operator

type Hessian = <O as ArgminOp>::Hessian

Hessian

fn next_iter(&mut self) -> Result<ArgminIterData<Self::Param>, Error>[src]

Perform one iteration of SA algorithm

fn init(&mut self) -> Result<(), Error>[src]

Initializes the algorithm Read more

Auto Trait Implementations

impl<O> !Send for SimulatedAnnealing<O>

impl<O> !Sync for SimulatedAnnealing<O>

Blanket Implementations

impl<T, U> Into for T where
    U: From<T>, 
[src]

impl<T> From for T[src]

impl<T, U> TryFrom for T where
    U: Into<T>, 
[src]

type Error = !

🔬 This is a nightly-only experimental API. (try_from)

The type returned in the event of a conversion error.

impl<T> Borrow for T where
    T: ?Sized
[src]

impl<T, U> TryInto for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

🔬 This is a nightly-only experimental API. (try_from)

The type returned in the event of a conversion error.

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> BorrowMut for T where
    T: ?Sized
[src]