use crate::lbfgs;
use crate::options::SolverOptions;
use crate::problem::NlpProblem;
use crate::result::{SolveResult, SolveStatus};
use crate::logging::rip_log;
const MAX_OUTER_ITER: usize = 50;
const RHO_INIT: f64 = 10.0;
const RHO_INCREASE: f64 = 5.0;
const RHO_MAX: f64 = 1e10;
fn constraint_violation_i(g_i: f64, g_l_i: f64, g_u_i: f64) -> f64 {
if (g_l_i - g_u_i).abs() < 1e-20 {
g_i - g_l_i
} else {
if g_l_i.is_finite() && g_i < g_l_i {
g_i - g_l_i } else if g_u_i.is_finite() && g_i > g_u_i {
g_i - g_u_i } else {
0.0 }
}
}
fn effective_sigma(
g_i: f64,
g_l_i: f64,
g_u_i: f64,
lambda_i: f64,
rho: f64,
) -> f64 {
if (g_l_i - g_u_i).abs() < 1e-20 {
lambda_i + rho * (g_i - g_l_i)
} else {
let mut sigma = 0.0;
if g_l_i.is_finite() {
let c_l = g_l_i - g_i; let s_l = (lambda_i + rho * c_l).max(0.0);
sigma -= s_l; }
if g_u_i.is_finite() {
let c_u = g_i - g_u_i; let s_u = (lambda_i + rho * c_u).max(0.0);
sigma += s_u; }
sigma
}
}
struct AugLagProblem<'a, P: NlpProblem> {
inner: &'a P,
lambda: Vec<f64>,
rho: f64,
g_l: Vec<f64>,
g_u: Vec<f64>,
n: usize,
m: usize,
}
impl<P: NlpProblem> NlpProblem for AugLagProblem<'_, P> {
fn num_variables(&self) -> usize {
self.n
}
fn num_constraints(&self) -> usize {
0
}
fn bounds(&self, x_l: &mut [f64], x_u: &mut [f64]) {
self.inner.bounds(x_l, x_u);
}
fn constraint_bounds(&self, _g_l: &mut [f64], _g_u: &mut [f64]) {}
fn initial_point(&self, x0: &mut [f64]) {
self.inner.initial_point(x0);
}
fn objective(&self, x: &[f64], _new_x: bool, obj: &mut f64) -> bool {
let mut g = vec![0.0; self.m];
if !self.inner.constraints(x, _new_x, &mut g) {
return false;
}
let mut f = 0.0;
if !self.inner.objective(x, _new_x, &mut f) {
return false;
}
let mut aug = f;
for i in 0..self.m {
let is_eq = (self.g_l[i] - self.g_u[i]).abs() < 1e-20;
if is_eq {
let c = g[i] - self.g_l[i];
aug += self.lambda[i] * c + 0.5 * self.rho * c * c;
} else {
if self.g_l[i].is_finite() {
let c_l = self.g_l[i] - g[i];
let s_l = (self.lambda[i] + self.rho * c_l).max(0.0);
aug += s_l * c_l + 0.5 * s_l * s_l / self.rho.max(1e-20);
}
if self.g_u[i].is_finite() {
let c_u = g[i] - self.g_u[i];
let s_u = (self.lambda[i] + self.rho * c_u).max(0.0);
aug += s_u * c_u + 0.5 * s_u * s_u / self.rho.max(1e-20);
}
}
}
*obj = aug;
true
}
fn gradient(&self, x: &[f64], _new_x: bool, grad: &mut [f64]) -> bool {
let m = self.m;
let n = self.n;
if !self.inner.gradient(x, _new_x, grad) {
return false;
}
let mut g = vec![0.0; m];
if !self.inner.constraints(x, _new_x, &mut g) {
return false;
}
let (jac_rows, jac_cols) = self.inner.jacobian_structure();
let nnz = jac_rows.len();
let mut jac_vals = vec![0.0; nnz];
if !self.inner.jacobian_values(x, _new_x, &mut jac_vals) {
return false;
}
let sigma: Vec<f64> = (0..m)
.map(|i| {
effective_sigma(g[i], self.g_l[i], self.g_u[i], self.lambda[i], self.rho)
})
.collect();
for k in 0..nnz {
let row = jac_rows[k];
let col = jac_cols[k];
if col < n {
grad[col] += jac_vals[k] * sigma[row];
}
}
true
}
fn constraints(&self, _x: &[f64], _new_x: bool, _g: &mut [f64]) -> bool { true }
fn jacobian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![], vec![])
}
fn jacobian_values(&self, _x: &[f64], _new_x: bool, _vals: &mut [f64]) -> bool { true }
fn hessian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![], vec![])
}
fn hessian_values(&self, _x: &[f64], _new_x: bool, _obj_factor: f64, _lambda: &[f64], _vals: &mut [f64]) -> bool { true }
}
pub fn solve<P: NlpProblem>(problem: &P, options: &SolverOptions) -> SolveResult {
let n = problem.num_variables();
let m = problem.num_constraints();
let mut g_l = vec![0.0; m];
let mut g_u = vec![0.0; m];
problem.constraint_bounds(&mut g_l, &mut g_u);
let mut lambda = vec![0.0; m];
let mut rho = RHO_INIT;
let mut x_current = vec![0.0; n];
problem.initial_point(&mut x_current);
let print_level = options.print_level;
let tol = options.constr_viol_tol.max(options.tol);
let mut total_iters = 0;
let mut prev_violation = f64::INFINITY;
for outer in 0..MAX_OUTER_ITER {
let al_problem = AugLagProblem {
inner: problem,
lambda: lambda.clone(),
rho,
g_l: g_l.clone(),
g_u: g_u.clone(),
n,
m,
};
let mut inner_opts = options.clone();
inner_opts.max_iter = 1000;
inner_opts.print_level = 0;
let inner_result = solve_with_x0(&al_problem, &inner_opts, &x_current);
total_iters += inner_result.iterations;
x_current = inner_result.x;
let mut g = vec![0.0; m];
if !problem.constraints(&x_current, true, &mut g)
|| g.iter().any(|v| !v.is_finite())
{
return SolveResult {
x: x_current,
objective: f64::NAN,
constraint_multipliers: lambda,
bound_multipliers_lower: vec![0.0; n],
bound_multipliers_upper: vec![0.0; n],
constraint_values: g,
status: SolveStatus::EvaluationError,
iterations: total_iters,
diagnostics: Default::default(),
};
}
let violation: f64 = (0..m)
.map(|i| constraint_violation_i(g[i], g_l[i], g_u[i]).abs())
.fold(0.0, f64::max);
let mut f_val = 0.0;
if !problem.objective(&x_current, false, &mut f_val) || !f_val.is_finite() {
return SolveResult {
x: x_current,
objective: f64::NAN,
constraint_multipliers: lambda,
bound_multipliers_lower: vec![0.0; n],
bound_multipliers_upper: vec![0.0; n],
constraint_values: g,
status: SolveStatus::EvaluationError,
iterations: total_iters,
diagnostics: Default::default(),
};
}
if print_level >= 5 {
rip_log!(
"AL outer {}: f={:.6e}, violation={:.6e}, rho={:.2e}",
outer, f_val, violation, rho
);
}
if violation < tol {
let status = SolveStatus::Optimal;
if print_level >= 5 {
rip_log!("AL converged: {:?}", status);
}
return SolveResult {
x: x_current,
objective: f_val,
constraint_multipliers: lambda,
bound_multipliers_lower: vec![0.0; n],
bound_multipliers_upper: vec![0.0; n],
constraint_values: g,
status,
iterations: total_iters,
diagnostics: Default::default(),
};
}
for i in 0..m {
let is_eq = (g_l[i] - g_u[i]).abs() < 1e-20;
if is_eq {
lambda[i] += rho * (g[i] - g_l[i]);
} else {
if g_l[i].is_finite() {
let c_l = g_l[i] - g[i];
lambda[i] = (lambda[i] + rho * c_l).max(0.0);
}
if g_u[i].is_finite() {
let c_u = g[i] - g_u[i];
lambda[i] = (lambda[i] + rho * c_u).max(0.0);
}
}
}
if violation > 0.25 * prev_violation {
rho = (rho * RHO_INCREASE).min(RHO_MAX);
}
prev_violation = violation;
}
let mut f_val = 0.0;
let _ = problem.objective(&x_current, false, &mut f_val);
let mut g = vec![0.0; m];
let _ = problem.constraints(&x_current, true, &mut g);
SolveResult {
x: x_current,
objective: f_val,
constraint_multipliers: lambda,
bound_multipliers_lower: vec![0.0; n],
bound_multipliers_upper: vec![0.0; n],
constraint_values: g,
status: SolveStatus::MaxIterations,
iterations: total_iters,
diagnostics: Default::default(),
}
}
fn solve_with_x0<P: NlpProblem>(
problem: &P,
options: &SolverOptions,
x0: &[f64],
) -> SolveResult {
let wrapper = StartPointWrapper {
inner: problem,
x0: x0.to_vec(),
};
lbfgs::solve(&wrapper, options)
}
struct StartPointWrapper<'a, P: NlpProblem> {
inner: &'a P,
x0: Vec<f64>,
}
impl<P: NlpProblem> NlpProblem for StartPointWrapper<'_, P> {
fn num_variables(&self) -> usize {
self.inner.num_variables()
}
fn num_constraints(&self) -> usize {
self.inner.num_constraints()
}
fn bounds(&self, x_l: &mut [f64], x_u: &mut [f64]) {
self.inner.bounds(x_l, x_u);
}
fn constraint_bounds(&self, g_l: &mut [f64], g_u: &mut [f64]) {
self.inner.constraint_bounds(g_l, g_u);
}
fn initial_point(&self, x0: &mut [f64]) {
x0.copy_from_slice(&self.x0);
}
fn objective(&self, x: &[f64], _new_x: bool, obj: &mut f64) -> bool {
self.inner.objective(x, _new_x, obj)
}
fn gradient(&self, x: &[f64], _new_x: bool, grad: &mut [f64]) -> bool {
self.inner.gradient(x, _new_x, grad)
}
fn constraints(&self, x: &[f64], _new_x: bool, g: &mut [f64]) -> bool {
self.inner.constraints(x, _new_x, g)
}
fn jacobian_structure(&self) -> (Vec<usize>, Vec<usize>) {
self.inner.jacobian_structure()
}
fn jacobian_values(&self, x: &[f64], _new_x: bool, vals: &mut [f64]) -> bool {
self.inner.jacobian_values(x, _new_x, vals)
}
fn hessian_structure(&self) -> (Vec<usize>, Vec<usize>) {
self.inner.hessian_structure()
}
fn hessian_values(&self, x: &[f64], _new_x: bool, obj_factor: f64, lambda: &[f64], vals: &mut [f64]) -> bool {
self.inner.hessian_values(x, _new_x, obj_factor, lambda, vals)
}
}