use crate::config::{Config, FractionalConfig};
use crate::cost::CostFn;
use crate::model::{ModelOutputFailure, ModelOutputSuccess};
use crate::numerics::{ApplicablePrecision, TOLERANCE};
use log::warn;
use nlopt::{Algorithm, Nlopt, Target};
use noisy_float::prelude::*;
use std::sync::Arc;
static MAX_ITERATIONS_PER_DIM: u32 = 1_000;
#[derive(Clone, Copy)]
enum Direction {
Minimize,
Maximize,
}
type ObjectiveFn<'a, D> = Arc<dyn Fn(&[f64], &mut D) -> N64 + 'a>;
#[derive(Clone)]
pub struct WrappedObjective<'a, D> {
pub data: D,
pub f: ObjectiveFn<'a, D>,
}
impl<'a, D> WrappedObjective<'a, D> {
pub fn new(data: D, f: impl Fn(&[f64], &mut D) -> N64 + 'a) -> Self {
Self {
data,
f: Arc::new(f),
}
}
}
type OptimizationResult = (Vec<f64>, N64);
pub fn find_minimizer_of_hitting_cost<C, D>(
t: i32,
hitting_cost: CostFn<'_, FractionalConfig, C, D>,
bounds: Vec<(f64, f64)>,
) -> OptimizationResult
where
C: ModelOutputSuccess,
D: ModelOutputFailure,
{
let objective = WrappedObjective::new(hitting_cost, |x, hitting_cost| {
hitting_cost.call_certain(t, Config::new(x.to_vec())).cost
});
find_minimizer(objective, bounds)
}
pub fn find_minimizer<C>(
objective: WrappedObjective<C>,
bounds: Vec<(f64, f64)>,
) -> OptimizationResult {
minimize(objective, bounds, None, Vec::<WrappedObjective<()>>::new())
}
pub fn find_unbounded_maximizer<C, D>(
objective: WrappedObjective<C>,
d: i32,
constraints: Vec<WrappedObjective<D>>,
) -> OptimizationResult {
let (bounds, init) = build_empty_bounds(d);
maximize(objective, bounds, Some(init), constraints)
}
pub fn minimize<C, D>(
objective: WrappedObjective<C>,
bounds: Vec<(f64, f64)>,
init: Option<Vec<f64>>,
constraints: Vec<WrappedObjective<D>>,
) -> OptimizationResult {
optimize(Direction::Minimize, objective, bounds, init, constraints)
}
pub fn maximize<C, D>(
objective: WrappedObjective<C>,
bounds: Vec<(f64, f64)>,
init: Option<Vec<f64>>,
constraints: Vec<WrappedObjective<D>>,
) -> OptimizationResult {
optimize(Direction::Maximize, objective, bounds, init, constraints)
}
fn optimize<C, D>(
dir: Direction,
objective: WrappedObjective<C>,
bounds: Vec<(f64, f64)>,
init: Option<Vec<f64>>,
constraints: Vec<WrappedObjective<D>>,
) -> OptimizationResult {
let d = bounds.len();
let (lower, upper): (Vec<_>, Vec<_>) = bounds.into_iter().unzip();
let WrappedObjective { data, f } = objective;
let solver_objective = |xs: &[f64], _: Option<&mut [f64]>, data: &mut C| {
evaluate(xs, data, &f)
};
let mut x = match init {
None => {
assert!(upper.iter().all(|&b| b < f64::INFINITY), "Initial guess must be set explicitly when optimization problem does not have fixed upper bounds.");
upper.clone()
}
Some(x) => {
assert!(x.len() == d);
x
}
};
let mut solver = Nlopt::new(
choose_algorithm(constraints.len()),
d,
solver_objective,
Target::from(dir),
data,
);
solver.set_lower_bounds(&lower).unwrap();
solver.set_upper_bounds(&upper).unwrap();
solver.set_xtol_abs1(TOLERANCE).unwrap();
solver.set_xtol_rel(TOLERANCE).unwrap();
solver
.set_maxeval(d as u32 * MAX_ITERATIONS_PER_DIM)
.unwrap();
for WrappedObjective { f, data } in constraints {
solver
.add_inequality_constraint(
|xs: &[f64], _: Option<&mut [f64]>, data: &mut D| {
evaluate(xs, data, &f)
},
data,
TOLERANCE,
)
.unwrap();
}
let opt = match solver.optimize(&mut x) {
Ok((state, opt)) => Ok(match state {
nlopt::SuccessState::MaxEvalReached
| nlopt::SuccessState::MaxTimeReached => {
warn!("Convex optimization timed out. Assuming solution to be infinity.");
match dir {
Direction::Maximize => f64::NEG_INFINITY,
Direction::Minimize => f64::INFINITY,
}
}
_ => opt,
}),
Err((state, opt)) => match state {
nlopt::FailState::RoundoffLimited => {
warn!("Warning: NLOpt terminated with a roundoff error.");
Ok(opt)
}
_ => Err((state, opt)),
},
}.unwrap();
(x.apply_precision(), n64(opt))
}
fn choose_algorithm(constraints: usize) -> Algorithm {
if constraints > 0 {
Algorithm::Cobyla
} else {
Algorithm::Sbplx
}
}
impl From<Direction> for Target {
fn from(dir: Direction) -> Self {
match dir {
Direction::Minimize => Target::Minimize,
Direction::Maximize => Target::Maximize,
}
}
}
fn build_empty_bounds(d: i32) -> (Vec<(f64, f64)>, Vec<f64>) {
(
vec![(f64::NEG_INFINITY, f64::INFINITY); d as usize],
vec![0.; d as usize],
)
}
fn evaluate<D>(xs_: &[f64], data: &mut D, f: &ObjectiveFn<D>) -> f64 {
let xs: Vec<f64> = xs_.iter().map(|&x| x.apply_precision()).collect();
if xs.iter().any(|x| x.is_nan()) {
f64::NAN
} else {
f(&xs, data).raw()
}
}