1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
// Copyright 2018 Stefan Kroboth
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.

//! Argmin Optimizaton toolbox core
//!
//! This crate contains the core functionality of argmin. If you just want to run an optimization
//! method, this is *not* what you are looking for. However, if you want to implement your own
//! solver based on the argmin architecture, you should find all necessary tools here.
//!
//! TODOs:
//!   * Provide an example of how to implement a solver

// I really do not like the a..=b syntax
#![allow(clippy::range_plus_one)]

#[cfg(feature = "ctrlc")]
pub extern crate ctrlc;

/// Macros
#[macro_use]
pub mod macros;
/// base struct
mod base;
/// Error handling
mod errors;
/// Key value datastructure
mod kv;
/// Logging
mod logging;
/// Math utilities
mod math;
/// Phony Operator
// #[cfg(test)]
mod nooperator;
/// Output
mod output;
/// Definition of the return type of the solvers
mod result;
/// Serialization of `ArgminSolver`s
mod serialization;
/// Definition of termination reasons
mod termination;

// TODO: Maybe leave logging/output stuff in its namespace
pub use crate::base::ArgminBase;
pub use crate::errors::*;
pub use crate::kv::ArgminKV;
pub use crate::logging::slog_logger::ArgminSlogLogger;
pub use crate::logging::ArgminLogger;
pub use crate::math::*;
pub use crate::nooperator::*;
pub use crate::output::file::WriteToFile;
pub use crate::output::ArgminWriter;
pub use crate::result::ArgminResult;
pub use crate::termination::TerminationReason;
pub use failure::Error;
pub use serialization::*;

pub mod finitediff {
    //! Finite Differentiation
    //!
    //! Reexport of `finitediff` crate.
    pub use finitediff::*;
}

/// Defines the interface to a solver. Usually, there is no need to implement this manually, use
/// the `argmin_derive` crate instead.
pub trait ArgminSolver: ArgminIter {
    /// apply cost function or operator to a parameter vector
    fn apply(
        &mut self,
        param: &<Self as ArgminIter>::Param,
    ) -> Result<<Self as ArgminIter>::Output, Error>;

    /// compute the gradient for a parameter vector
    fn gradient(
        &mut self,
        param: &<Self as ArgminIter>::Param,
    ) -> Result<<Self as ArgminIter>::Param, Error>;

    /// compute the hessian for a parameter vector
    fn hessian(
        &mut self,
        param: &<Self as ArgminIter>::Param,
    ) -> Result<<Self as ArgminIter>::Hessian, Error>;

    /// modify the parameter vector
    fn modify(
        &self,
        param: &<Self as ArgminIter>::Param,
        extent: f64,
    ) -> Result<<Self as ArgminIter>::Param, Error>;

    /// return current parameter vector
    fn cur_param(&self) -> <Self as ArgminIter>::Param;

    /// return current gradient
    fn cur_grad(&self) -> <Self as ArgminIter>::Param;

    /// return current gradient
    fn cur_hessian(&self) -> <Self as ArgminIter>::Hessian;

    /// set current parameter vector
    fn set_cur_param(&mut self, param: <Self as ArgminIter>::Param);

    /// set current gradient
    fn set_cur_grad(&mut self, grad: <Self as ArgminIter>::Param);

    /// set current gradient
    fn set_cur_hessian(&mut self, hessian: <Self as ArgminIter>::Hessian);

    /// set current parameter vector
    fn set_best_param(&mut self, param: <Self as ArgminIter>::Param);

    /// Execute the optimization algorithm.
    fn run(&mut self) -> Result<ArgminResult<<Self as ArgminIter>::Param>, Error>;

    /// Execute the optimization algorithm without Ctrl-C handling, logging, writing and anything
    /// else which may cost unnecessary time.
    fn run_fast(&mut self) -> Result<ArgminResult<<Self as ArgminIter>::Param>, Error>;

    /// Returns the best solution found during optimization.
    fn result(&self) -> ArgminResult<<Self as ArgminIter>::Param>;

    /// Set termination reason (doesn't terminate yet! -- this is helpful for terminating within
    /// the iterations)
    fn set_termination_reason(&mut self, reason: TerminationReason);

    /// Evaluate all stopping criterions and return the `TerminationReason`
    fn terminate(&mut self) -> TerminationReason;

    /// Set max number of iterations.
    fn set_max_iters(&mut self, iters: u64);

    /// Get max number of iterations.
    fn max_iters(&self) -> u64;

    /// Get current iteration number.
    fn cur_iter(&self) -> u64;

    /// Increment the iteration number by one
    fn increment_iter(&mut self);

    /// Get current cost function value
    fn cur_cost(&self) -> f64;

    /// Get current cost function value
    fn set_cur_cost(&mut self, cost: f64);

    /// Get best cost function value
    // fn best_cost(&self) -> <Self as ArgminIter>::Output;
    fn best_cost(&self) -> f64;

    /// set best cost value
    // fn set_best_cost(&mut self, <Self as ArgminIter>::Output);
    fn set_best_cost(&mut self, cost: f64);

    /// Set the target cost function value which is used as a stopping criterion
    fn set_target_cost(&mut self, cost: f64);

    /// Add a logger to the array of loggers
    fn add_logger(&mut self, logger: std::sync::Arc<ArgminLog>);

    /// Add a writer to the array of writers
    fn add_writer(&mut self, writer: std::sync::Arc<ArgminWrite<Param = Self::Param>>);

    /// Reset the base of the algorithm to its initial state
    fn base_reset(&mut self);

    /// Increment the cost function evaluation count
    fn increment_cost_func_count(&mut self);

    /// Increaese the cost function evaluation count by a given value
    fn increase_cost_func_count(&mut self, count: u64);

    /// Return the cost function evaluation count
    fn cost_func_count(&self) -> u64;

    /// Increment the gradient evaluation count
    fn increment_grad_func_count(&mut self);

    /// Increase the gradient evaluation count by a given value
    fn increase_grad_func_count(&mut self, count: u64);

    /// Return the gradient evaluation count
    fn grad_func_count(&self) -> u64;

    /// Increment the hessian evaluation count
    fn increment_hessian_func_count(&mut self);

    /// Increase the hessian evaluation count by a given value
    fn increase_hessian_func_count(&mut self, count: u64);

    /// Return the gradient evaluation count
    fn hessian_func_count(&self) -> u64;
}

/// Main part of every solver: `next_iter` computes one iteration of the algorithm and `init` is
/// executed before these iterations. The `init` method comes with a default implementation which
/// corresponds to doing nothing.
pub trait ArgminIter {
    /// Parameter vectors
    type Param;
    /// Output of the operator
    type Output;
    /// Hessian
    type Hessian;

    /// Computes one iteration of the algorithm.
    fn next_iter(&mut self) -> Result<ArgminIterData<Self::Param>, Error>;

    /// Initializes the algorithm
    ///
    /// This is executed before any iterations are performed. It can be used to perform
    /// precomputations. The default implementation corresponds to doing nothing.
    fn init(&mut self) -> Result<(), Error> {
        Ok(())
    }
}

/// Defince the interface every logger needs to expose
pub trait ArgminLog: Send + Sync {
    /// Logs general information (a message `msg` and/or key-value pairs `kv`).
    fn log_info(&self, msg: &str, kv: &ArgminKV) -> Result<(), Error>;

    /// Logs information from iterations. Only accepts key-value pairs. `log_iter` is made to log
    /// to a database or a CSV file. Therefore the structure of the key-value pairs should not
    /// change inbetween iterations.
    fn log_iter(&self, kv: &ArgminKV) -> Result<(), Error>;
}

/// Every writer (which is something that writes parameter vectors somewhere after each iteration)
/// needs to implement this.
pub trait ArgminWrite: Send + Sync {
    type Param;

    /// Writes the parameter vector somewhere
    fn write(&self, param: &Self::Param) -> Result<(), Error>;
}

/// The datastructure which is returned by the `next_iter` method of the `ArgminIter` trait.
///
/// TODO: think about removing this or replacing it with something better. Actually, a tuple would
/// be sufficient.
pub struct ArgminIterData<T> {
    /// Current parameter vector
    param: T,
    /// Current cost function value
    cost: f64,
    /// Key value pairs which are currently only used to provide additional information for the
    /// loggers
    kv: Option<ArgminKV>,
}

impl<T: Clone> ArgminIterData<T> {
    /// Constructor
    pub fn new(param: T, cost: f64) -> Self {
        ArgminIterData {
            param,
            cost,
            kv: None,
        }
    }

    /// Returns the parameter vector
    pub fn param(&self) -> T {
        self.param.clone()
    }

    /// Returns the cost function value
    pub fn cost(&self) -> f64 {
        self.cost
    }

    /// Adds an `ArgminKV`
    pub fn add_kv(&mut self, kv: ArgminKV) -> &mut Self {
        self.kv = Some(kv);
        self
    }

    /// Returns an `ArgminKV`
    ///
    /// TODO: Keep it consistent, remove the `get_`.
    pub fn get_kv(&self) -> Option<ArgminKV> {
        self.kv.clone()
    }
}

/// This trait needs to be implemented for every operator/cost function.
///
/// It is required to implement the `apply` method, all others are optional and provide a default
/// implementation which is essentially returning an error which indicates that the method has not
/// been implemented. Those methods (`gradient` and `modify`) only need to be implemented if the
/// uses solver requires it.
pub trait ArgminOp: Clone + Default + Send + Sync {
    /// Type of the parameter vector
    type Param: Clone + Default + Send + Sync + serde::Serialize + serde::de::DeserializeOwned;
    /// Output of the operator. Most solvers expect `f64`.
    type Output;
    /// Type of Hessian
    type Hessian: Clone + Default + Send + Sync + serde::Serialize + serde::de::DeserializeOwned;

    /// Applies the operator/cost function to parameters
    fn apply(&self, _param: &Self::Param) -> Result<Self::Output, Error> {
        Err(ArgminError::NotImplemented {
            text: "Method `apply` of ArgminOp trait not implemented!".to_string(),
        }
        .into())
    }

    /// Computes the gradient at the given parameters
    fn gradient(&self, _param: &Self::Param) -> Result<Self::Param, Error> {
        Err(ArgminError::NotImplemented {
            text: "Method `gradient` of ArgminOp trait not implemented!".to_string(),
        }
        .into())
    }

    /// Computes the hessian at the given parameters
    fn hessian(&self, _param: &Self::Param) -> Result<Self::Hessian, Error> {
        Err(ArgminError::NotImplemented {
            text: "Method `hessian` of ArgminOp trait not implemented!".to_string(),
        }
        .into())
    }

    /// Modifies a parameter vector. Comes with a variable that indicates the "degree" of the
    /// modification.
    fn modify(&self, _param: &Self::Param, _extent: f64) -> Result<Self::Param, Error> {
        Err(ArgminError::NotImplemented {
            text: "Method `modify` of ArgminOp trait not implemented!".to_string(),
        }
        .into())
    }
}

/// Defines a common interface to line search methods. Requires that `ArgminSolver` is implemented
/// for the line search method as well.
///
/// The cost function value and the gradient at the starting point can either be provided
/// (`set_initial_cost` and `set_initial_gradient`) or they can be computed using the operator from
/// the implementation of `ArgminSolver` (see `calc_initial_cost` and `calc_initial_gradient`). The
/// former is convenient if cost and gradient at the starting point are already known for some
/// reason (i.e. the solver which uses the line search has already computed cost and gradient) and
/// avoids unneccessary computation of those values.
pub trait ArgminLineSearch: ArgminSolver {
    /// Set the initial parameter (starting point)
    fn set_initial_parameter(&mut self, param: <Self as ArgminIter>::Param);

    /// Set the search direction
    fn set_search_direction(&mut self, direction: <Self as ArgminIter>::Param);

    /// Set the initial step length
    fn set_initial_alpha(&mut self, step_length: f64) -> Result<(), Error>;

    /// Set the cost function value at the starting point as opposed to computing it (see
    /// `calc_initial_cost`)
    fn set_initial_cost(&mut self, cost: f64);

    /// Set the gradient at the starting point as opposed to computing it (see
    /// `calc_initial_gradient`)
    fn set_initial_gradient(&mut self, grad: <Self as ArgminIter>::Param);

    /// calculate the initial cost function value using an operator as opposed to setting it
    /// manually (see `set_initial_cost`)
    fn calc_initial_cost(&mut self) -> Result<(), Error>;

    /// calculate the initial gradient using an operator as opposed to setting it manually (see
    /// `set_initial_gradient`)
    fn calc_initial_gradient(&mut self) -> Result<(), Error>;
}

/// Defines a common interface to methods which calculate approximate steps for trust region
/// methods. Requires that `ArgminSolver` is implemented as well.
pub trait ArgminTrustRegion: ArgminSolver {
    // /// Set the initial parameter (starting point)
    // fn set_initial_parameter(&mut self, <Self as ArgminIter>::Parameters);

    /// Set the initial step length
    fn set_radius(&mut self, radius: f64);

    /// Set the gradient at the starting point
    fn set_grad(&mut self, grad: <Self as ArgminIter>::Param);

    /// Set the gradient at the starting point
    fn set_hessian(&mut self, hessian: <Self as ArgminIter>::Hessian);
}

/// Every method for the update of beta needs to implement this trait.
pub trait ArgminNLCGBetaUpdate<T> {
    /// Update beta
    /// Parameter 1: \nabla f_k
    /// Parameter 2: \nabla f_{k+1}
    /// Parameter 3: p_k
    fn update(&self, nabla_f_k: &T, nabla_f_k_p_1: &T, p_k: &T) -> f64;
}