use std::cell::RefCell;
use std::time::{Duration, Instant};
use crate::convergence::{self, check_convergence, ConvergenceInfo, ConvergenceStatus};
use crate::filter::{self, Filter, FilterEntry};
use crate::kkt::{self, InertiaCorrectionParams};
use crate::linear_solver::banded::BandedLdl;
use crate::linear_solver::dense::DenseLdl;
#[cfg(all(feature = "faer", not(feature = "rmumps")))]
use crate::linear_solver::sparse::SparseLdl;
#[cfg(feature = "rmumps")]
use crate::linear_solver::multifrontal::MultifrontalLdl;
#[cfg(feature = "rmumps")]
use crate::linear_solver::iterative::IterativeMinres;
#[cfg(feature = "rmumps")]
use crate::linear_solver::hybrid::HybridSolver;
use crate::linear_solver::{KktMatrix, LinearSolver, SymmetricMatrix};
use crate::options::LinearSolverChoice;
const AVG_WINDOW: usize = 6;
fn new_sparse_solver() -> Box<dyn LinearSolver> {
new_sparse_solver_with_choice(LinearSolverChoice::Direct)
}
fn new_sparse_solver_with_choice(choice: LinearSolverChoice) -> Box<dyn LinearSolver> {
match choice {
LinearSolverChoice::Direct => {
#[cfg(feature = "rmumps")]
{ return Box::new(MultifrontalLdl::new()); }
#[cfg(all(not(feature = "rmumps"), feature = "faer"))]
{ return Box::new(SparseLdl::new()); }
#[cfg(not(any(feature = "rmumps", feature = "faer")))]
{ return Box::new(DenseLdl::new()); }
}
LinearSolverChoice::Iterative => {
#[cfg(feature = "rmumps")]
{ return Box::new(IterativeMinres::new()); }
#[cfg(not(feature = "rmumps"))]
{
log::warn!("Iterative solver requires rmumps feature; falling back to direct");
return new_sparse_solver_with_choice(LinearSolverChoice::Direct);
}
}
LinearSolverChoice::Hybrid => {
#[cfg(feature = "rmumps")]
{ return Box::new(HybridSolver::new()); }
#[cfg(not(feature = "rmumps"))]
{
log::warn!("Hybrid solver requires rmumps feature; falling back to direct");
return new_sparse_solver_with_choice(LinearSolverChoice::Direct);
}
}
}
}
fn new_fallback_solver(use_sparse: bool) -> Box<dyn LinearSolver> {
if use_sparse {
new_sparse_solver()
} else {
Box::new(DenseLdl::new())
}
}
use crate::options::SolverOptions;
use crate::problem::NlpProblem;
use crate::restoration::RestorationPhase;
use crate::trace;
use crate::restoration_nlp::RestorationNlp;
use crate::result::{SolveResult, SolverDiagnostics, SolveStatus};
use crate::slack_formulation::SlackFormulation;
use crate::warmstart::WarmStartInitializer;
use crate::logging::rip_log;
struct ScaledProblem<'a, P: NlpProblem> {
inner: &'a P,
obj_scaling: f64,
g_scaling: Vec<f64>,
jac_rows: Vec<usize>,
}
impl<P: NlpProblem> NlpProblem for ScaledProblem<'_, P> {
fn num_variables(&self) -> usize {
self.inner.num_variables()
}
fn num_constraints(&self) -> usize {
self.inner.num_constraints()
}
fn bounds(&self, x_l: &mut [f64], x_u: &mut [f64]) {
self.inner.bounds(x_l, x_u);
}
fn constraint_bounds(&self, g_l: &mut [f64], g_u: &mut [f64]) {
self.inner.constraint_bounds(g_l, g_u);
for (i, &s) in self.g_scaling.iter().enumerate() {
if g_l[i].is_finite() {
g_l[i] *= s;
}
if g_u[i].is_finite() {
g_u[i] *= s;
}
}
}
fn initial_point(&self, x0: &mut [f64]) {
self.inner.initial_point(x0);
}
fn objective(&self, x: &[f64], _new_x: bool, obj: &mut f64) -> bool {
if !self.inner.objective(x, _new_x, obj) { return false; }
*obj *= self.obj_scaling;
true
}
fn gradient(&self, x: &[f64], _new_x: bool, grad: &mut [f64]) -> bool {
if !self.inner.gradient(x, _new_x, grad) { return false; }
for g in grad.iter_mut() {
*g *= self.obj_scaling;
}
true
}
fn constraints(&self, x: &[f64], _new_x: bool, g: &mut [f64]) -> bool {
if !self.inner.constraints(x, _new_x, g) { return false; }
for (i, &s) in self.g_scaling.iter().enumerate() {
g[i] *= s;
}
true
}
fn jacobian_structure(&self) -> (Vec<usize>, Vec<usize>) {
self.inner.jacobian_structure()
}
fn jacobian_values(&self, x: &[f64], _new_x: bool, vals: &mut [f64]) -> bool {
if !self.inner.jacobian_values(x, _new_x, vals) { return false; }
for (idx, &row) in self.jac_rows.iter().enumerate() {
vals[idx] *= self.g_scaling[row];
}
true
}
fn hessian_structure(&self) -> (Vec<usize>, Vec<usize>) {
self.inner.hessian_structure()
}
fn hessian_values(&self, x: &[f64], _new_x: bool, obj_factor: f64, lambda: &[f64], vals: &mut [f64]) -> bool {
let scaled_lambda: Vec<f64> = lambda
.iter()
.zip(self.g_scaling.iter())
.map(|(l, s)| l * s)
.collect();
self.inner
.hessian_values(x, _new_x, obj_factor * self.obj_scaling, &scaled_lambda, vals)
}
}
struct XScaledProblem<'a, P: NlpProblem> {
inner: &'a P,
dx: Vec<f64>,
inv_dx: Vec<f64>,
jac_cols: Vec<usize>,
hess_rows: Vec<usize>,
hess_cols: Vec<usize>,
scratch_x: RefCell<Vec<f64>>,
}
impl<'a, P: NlpProblem> XScaledProblem<'a, P> {
fn new(inner: &'a P, dx: Vec<f64>) -> Self {
let n = inner.num_variables();
debug_assert_eq!(dx.len(), n);
let inv_dx: Vec<f64> = dx.iter().map(|&d| 1.0 / d).collect();
let (_, jac_cols) = inner.jacobian_structure();
let (hess_rows, hess_cols) = inner.hessian_structure();
Self {
inner,
dx,
inv_dx,
jac_cols,
hess_rows,
hess_cols,
scratch_x: RefCell::new(vec![0.0; n]),
}
}
fn unscale_x<'b>(&'b self, x: &[f64]) -> std::cell::Ref<'b, Vec<f64>> {
{
let mut buf = self.scratch_x.borrow_mut();
for i in 0..x.len() {
buf[i] = x[i] * self.inv_dx[i];
}
}
self.scratch_x.borrow()
}
}
impl<P: NlpProblem> NlpProblem for XScaledProblem<'_, P> {
fn num_variables(&self) -> usize {
self.inner.num_variables()
}
fn num_constraints(&self) -> usize {
self.inner.num_constraints()
}
fn bounds(&self, x_l: &mut [f64], x_u: &mut [f64]) {
self.inner.bounds(x_l, x_u);
for i in 0..x_l.len() {
if x_l[i].is_finite() {
x_l[i] *= self.dx[i];
}
if x_u[i].is_finite() {
x_u[i] *= self.dx[i];
}
}
}
fn constraint_bounds(&self, g_l: &mut [f64], g_u: &mut [f64]) {
self.inner.constraint_bounds(g_l, g_u);
}
fn initial_point(&self, x0: &mut [f64]) {
self.inner.initial_point(x0);
for i in 0..x0.len() {
x0[i] *= self.dx[i];
}
}
fn initial_multipliers(
&self,
lam_g: &mut [f64],
z_l: &mut [f64],
z_u: &mut [f64],
) -> bool {
if !self.inner.initial_multipliers(lam_g, z_l, z_u) {
return false;
}
for i in 0..z_l.len() {
z_l[i] *= self.inv_dx[i];
z_u[i] *= self.inv_dx[i];
}
true
}
fn objective(&self, x: &[f64], new_x: bool, obj: &mut f64) -> bool {
let x_user = self.unscale_x(x);
self.inner.objective(&x_user, new_x, obj)
}
fn gradient(&self, x: &[f64], new_x: bool, grad: &mut [f64]) -> bool {
let x_user = self.unscale_x(x);
if !self.inner.gradient(&x_user, new_x, grad) {
return false;
}
for i in 0..grad.len() {
grad[i] *= self.inv_dx[i];
}
true
}
fn constraints(&self, x: &[f64], new_x: bool, g: &mut [f64]) -> bool {
let x_user = self.unscale_x(x);
self.inner.constraints(&x_user, new_x, g)
}
fn jacobian_structure(&self) -> (Vec<usize>, Vec<usize>) {
self.inner.jacobian_structure()
}
fn jacobian_values(&self, x: &[f64], new_x: bool, vals: &mut [f64]) -> bool {
let x_user = self.unscale_x(x);
if !self.inner.jacobian_values(&x_user, new_x, vals) {
return false;
}
for (idx, &col) in self.jac_cols.iter().enumerate() {
vals[idx] *= self.inv_dx[col];
}
true
}
fn hessian_structure(&self) -> (Vec<usize>, Vec<usize>) {
self.inner.hessian_structure()
}
fn hessian_values(
&self,
x: &[f64],
new_x: bool,
obj_factor: f64,
lambda: &[f64],
vals: &mut [f64],
) -> bool {
let x_user = self.unscale_x(x);
if !self.inner.hessian_values(&x_user, new_x, obj_factor, lambda, vals) {
return false;
}
for (idx, (&r, &c)) in self
.hess_rows
.iter()
.zip(self.hess_cols.iter())
.enumerate()
{
vals[idx] *= self.inv_dx[r] * self.inv_dx[c];
}
true
}
}
struct WatchdogSavedState {
x: Vec<f64>,
y: Vec<f64>,
z_l: Vec<f64>,
z_u: Vec<f64>,
v_l: Vec<f64>,
v_u: Vec<f64>,
mu: f64,
obj: f64,
g: Vec<f64>,
grad_f: Vec<f64>,
filter_entries: Vec<FilterEntry>,
theta: f64,
phi: f64,
}
impl WatchdogSavedState {
fn snapshot(state: &SolverState, filter: &Filter, theta: f64, phi: f64) -> Self {
Self {
x: state.x.clone(),
y: state.y.clone(),
z_l: state.z_l.clone(),
z_u: state.z_u.clone(),
v_l: state.v_l.clone(),
v_u: state.v_u.clone(),
mu: state.mu,
obj: state.obj,
g: state.g.clone(),
grad_f: state.grad_f.clone(),
filter_entries: filter.save_entries(),
theta,
phi,
}
}
fn restore(&self, state: &mut SolverState) {
state.x = self.x.clone();
state.y = self.y.clone();
state.z_l = self.z_l.clone();
state.z_u = self.z_u.clone();
state.v_l = self.v_l.clone();
state.v_u = self.v_u.clone();
state.mu = self.mu;
state.obj = self.obj;
state.g = self.g.clone();
state.grad_f = self.grad_f.clone();
}
}
pub(crate) struct SolverState {
pub x: Vec<f64>,
pub y: Vec<f64>,
pub z_l: Vec<f64>,
pub z_u: Vec<f64>,
pub v_l: Vec<f64>,
pub v_u: Vec<f64>,
pub dx: Vec<f64>,
pub dy: Vec<f64>,
pub dz_l: Vec<f64>,
pub dz_u: Vec<f64>,
pub mu: f64,
pub alpha_primal: f64,
pub alpha_dual: f64,
pub iter: usize,
pub x_l: Vec<f64>,
pub x_u: Vec<f64>,
pub g_l: Vec<f64>,
pub g_u: Vec<f64>,
pub n: usize,
pub m: usize,
pub obj: f64,
pub grad_f: Vec<f64>,
pub g: Vec<f64>,
pub jac_rows: Vec<usize>,
pub jac_cols: Vec<usize>,
pub jac_vals: Vec<f64>,
pub hess_rows: Vec<usize>,
pub hess_cols: Vec<usize>,
pub hess_vals: Vec<f64>,
pub consecutive_acceptable: usize,
pub obj_scaling: f64,
pub g_scaling: Vec<f64>,
pub diagnostics: SolverDiagnostics,
x_last_eval: Vec<f64>,
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum MuMode {
Free,
Fixed,
}
struct MuState {
mode: MuMode,
ref_vals: Vec<f64>,
num_refs_max: usize,
refs_red_fact: f64,
tiny_step: bool,
first_iter_in_mode: bool,
consecutive_restoration_failures: usize,
consecutive_insufficient: usize,
consecutive_soft_restoration: usize,
dual_inf_window: Vec<f64>,
}
impl MuState {
fn new() -> Self {
Self {
mode: MuMode::Free,
ref_vals: Vec::with_capacity(8),
num_refs_max: 4,
refs_red_fact: 0.999,
tiny_step: false,
first_iter_in_mode: true,
consecutive_restoration_failures: 0,
consecutive_insufficient: 0,
consecutive_soft_restoration: 0,
dual_inf_window: Vec::with_capacity(4),
}
}
fn check_sufficient_progress(&self, kkt_error: f64) -> bool {
if self.ref_vals.len() < self.num_refs_max {
return true; }
self.ref_vals.iter().any(|&r| kkt_error <= self.refs_red_fact * r)
}
fn remember_accepted(&mut self, kkt_error: f64) {
if self.ref_vals.len() >= self.num_refs_max {
self.ref_vals.remove(0);
}
self.ref_vals.push(kkt_error);
}
}
pub struct LbfgsIpmState {
n: usize,
m_max: usize,
s_store: Vec<Vec<f64>>,
y_store: Vec<Vec<f64>>,
prev_x: Vec<f64>,
prev_lag_grad: Vec<f64>,
has_prev: bool,
gamma: f64,
}
impl LbfgsIpmState {
pub fn new(n: usize) -> Self {
Self {
n,
m_max: 10,
s_store: Vec::with_capacity(10),
y_store: Vec::with_capacity(10),
prev_x: vec![0.0; n],
prev_lag_grad: vec![0.0; n],
has_prev: false,
gamma: 1.0,
}
}
fn compute_lagrangian_gradient(
grad_f: &[f64],
jac_rows: &[usize],
jac_cols: &[usize],
jac_vals: &[f64],
lambda: &[f64],
n: usize,
) -> Vec<f64> {
let mut lag_grad = grad_f.to_vec();
for (idx, (&row, &col)) in jac_rows.iter().zip(jac_cols.iter()).enumerate() {
if col < n {
lag_grad[col] += jac_vals[idx] * lambda[row];
}
}
lag_grad
}
pub fn update(
&mut self,
new_x: &[f64],
new_lag_grad: &[f64],
) {
if !self.has_prev {
self.prev_x.copy_from_slice(new_x);
self.prev_lag_grad.copy_from_slice(new_lag_grad);
self.has_prev = true;
return;
}
let n = self.n;
let mut s_k = vec![0.0; n];
let mut y_k = vec![0.0; n];
for i in 0..n {
s_k[i] = new_x[i] - self.prev_x[i];
y_k[i] = new_lag_grad[i] - self.prev_lag_grad[i];
}
let ss: f64 = s_k.iter().map(|v| v * v).sum();
if ss < 1e-30 {
self.prev_x.copy_from_slice(new_x);
self.prev_lag_grad.copy_from_slice(new_lag_grad);
return;
}
let sy: f64 = dot_product(&s_k, &y_k);
let bs = self.multiply_bk(&s_k);
let sbs: f64 = dot_product(&s_k, &bs);
if sy >= 0.2 * sbs {
} else {
let theta = if (sbs - sy).abs() < 1e-30 {
1.0
} else {
0.8 * sbs / (sbs - sy)
};
for i in 0..n {
y_k[i] = theta * y_k[i] + (1.0 - theta) * bs[i];
}
}
let sy_damped: f64 = dot_product(&s_k, &y_k);
if sy_damped <= 1e-20 {
self.prev_x.copy_from_slice(new_x);
self.prev_lag_grad.copy_from_slice(new_lag_grad);
return;
}
let yy: f64 = y_k.iter().map(|v| v * v).sum();
if yy > 1e-30 {
self.gamma = sy_damped / yy;
}
if self.s_store.len() == self.m_max {
self.s_store.remove(0);
self.y_store.remove(0);
}
self.s_store.push(s_k);
self.y_store.push(y_k);
self.prev_x.copy_from_slice(new_x);
self.prev_lag_grad.copy_from_slice(new_lag_grad);
}
pub fn multiply_bk(&self, v: &[f64]) -> Vec<f64> {
let n = self.n;
let k = self.s_store.len();
if k == 0 {
let scale = 1.0 / self.gamma.max(1e-12);
return v.iter().map(|&vi| scale * vi).collect();
}
let mut result = vec![0.0; n];
let bk = self.form_dense_bk();
for i in 0..n {
for j in 0..n {
let (r, c) = if i >= j { (i, j) } else { (j, i) };
let idx = r * (r + 1) / 2 + c;
result[i] += bk[idx] * v[j];
}
}
result
}
pub fn form_dense_bk(&self) -> Vec<f64> {
let n = self.n;
let k = self.s_store.len();
let nnz = n * (n + 1) / 2;
let b0_diag = 1.0 / self.gamma.max(1e-12);
let mut bk = vec![0.0; nnz];
for i in 0..n {
let idx = i * (i + 1) / 2 + i;
bk[idx] = b0_diag;
}
if k == 0 {
return bk;
}
for p in 0..k {
let s = &self.s_store[p];
let y = &self.y_store[p];
let mut bs = vec![0.0; n];
for i in 0..n {
for j in 0..n {
let (r, c) = if i >= j { (i, j) } else { (j, i) };
let idx = r * (r + 1) / 2 + c;
bs[i] += bk[idx] * s[j];
}
}
let sbs: f64 = dot_product(s, &bs);
let sy: f64 = dot_product(s, y);
if sbs.abs() < 1e-30 || sy.abs() < 1e-30 {
continue;
}
for i in 0..n {
for j in 0..=i {
let idx = i * (i + 1) / 2 + j;
bk[idx] += -bs[i] * bs[j] / sbs + y[i] * y[j] / sy;
}
}
}
bk
}
fn fill_hessian(&self, hess_vals: &mut [f64]) {
let bk = self.form_dense_bk();
hess_vals[..bk.len()].copy_from_slice(&bk);
}
}
fn dense_lower_triangle_pattern(n: usize) -> (Vec<usize>, Vec<usize>) {
let nnz = n * (n + 1) / 2;
let mut rows = Vec::with_capacity(nnz);
let mut cols = Vec::with_capacity(nnz);
for i in 0..n {
for j in 0..=i {
rows.push(i);
cols.push(j);
}
}
(rows, cols)
}
impl SolverState {
fn new<P: NlpProblem>(problem: &P, options: &SolverOptions) -> Self {
let n = problem.num_variables();
let m = problem.num_constraints();
let mut x_l = vec![0.0; n];
let mut x_u = vec![0.0; n];
problem.bounds(&mut x_l, &mut x_u);
let mut g_l = vec![0.0; m];
let mut g_u = vec![0.0; m];
problem.constraint_bounds(&mut g_l, &mut g_u);
sentinel_bounds_to_infinity(&mut x_l, &mut x_u, options);
sentinel_bounds_to_infinity(&mut g_l, &mut g_u, options);
let mut x = vec![0.0; n];
problem.initial_point(&mut x);
relax_fixed_variable_bounds(&mut x_l, &mut x_u);
push_initial_point_from_bounds(&mut x, &x_l, &x_u, options);
let initial_mu = match (options.warm_start, options.warm_start_target_mu) {
(true, Some(mu)) if mu > 0.0 => mu,
_ => options.mu_init,
};
let (mut z_l, mut z_u) = init_bound_multipliers(&x, &x_l, &x_u, initial_mu);
let (jac_rows, jac_cols) = problem.jacobian_structure();
let jac_nnz = jac_rows.len();
let (hess_rows, hess_cols) = if options.hessian_approximation_lbfgs {
dense_lower_triangle_pattern(n)
} else {
problem.hessian_structure()
};
let hess_nnz = hess_rows.len();
let mut y = compute_initial_y_with_ls(
problem, options, &x, &jac_rows, &jac_cols, &g_l, &g_u, n, m, jac_nnz,
);
if options.warm_start {
apply_warm_start_multipliers(problem, &mut y, &mut z_l, &mut z_u);
}
Self {
x,
y,
z_l,
z_u,
v_l: vec![0.0; m],
v_u: vec![0.0; m],
dx: vec![0.0; n],
dy: vec![0.0; m],
dz_l: vec![0.0; n],
dz_u: vec![0.0; n],
mu: initial_mu,
alpha_primal: 0.0,
alpha_dual: 0.0,
iter: 0,
x_l,
x_u,
g_l,
g_u,
n,
m,
obj: 0.0,
grad_f: vec![0.0; n],
g: vec![0.0; m],
jac_rows,
jac_cols,
jac_vals: vec![0.0; jac_nnz],
hess_rows,
hess_cols,
hess_vals: vec![0.0; hess_nnz],
consecutive_acceptable: 0,
obj_scaling: 1.0,
g_scaling: vec![1.0; m],
diagnostics: SolverDiagnostics::default(),
x_last_eval: vec![f64::NAN; n],
}
}
fn evaluate_with_linear<P: NlpProblem>(
&mut self,
problem: &P,
obj_factor: f64,
linear_constraints: Option<&[bool]>,
skip_hessian: bool,
) -> bool {
let new_x = self.x != self.x_last_eval;
if !problem.objective(&self.x, new_x, &mut self.obj) { return false; }
if !self.obj.is_finite() { return false; }
if !problem.gradient(&self.x, false, &mut self.grad_f) { return false; }
if self.m > 0 {
if !problem.constraints(&self.x, false, &mut self.g) { return false; }
if !problem.jacobian_values(&self.x, false, &mut self.jac_vals) { return false; }
}
self.x_last_eval.copy_from_slice(&self.x);
if skip_hessian {
return true;
}
if let Some(flags) = linear_constraints {
let mut lambda_for_hess = self.y.clone();
for (i, &is_lin) in flags.iter().enumerate() {
if is_lin {
lambda_for_hess[i] = 0.0;
}
}
if !problem.hessian_values(&self.x, false, obj_factor, &lambda_for_hess, &mut self.hess_vals) { return false; }
} else {
if !problem.hessian_values(&self.x, false, obj_factor, &self.y, &mut self.hess_vals) { return false; }
}
true
}
fn barrier_objective(&self, options: &SolverOptions) -> f64 {
compute_barrier_phi(
self.obj, &self.x, &self.g, self,
self.n, self.m, options.constraint_slack_barrier,
)
}
fn constraint_violation(&self) -> f64 {
convergence::primal_infeasibility(&self.g, &self.g_l, &self.g_u)
}
fn barrier_directional_derivative(&self, options: &SolverOptions) -> f64 {
let mut grad_phi_dx = 0.0;
for i in 0..self.n {
let mut grad_phi_i = self.grad_f[i];
if self.x_l[i].is_finite() {
grad_phi_i -= self.mu / slack_xl(self, i);
}
if self.x_u[i].is_finite() {
grad_phi_i += self.mu / slack_xu(self, i);
}
grad_phi_dx += grad_phi_i * self.dx[i];
}
if options.constraint_slack_barrier && self.m > 0 {
let mut jdx = vec![0.0; self.m];
for (idx, (&row, &col)) in
self.jac_rows.iter().zip(self.jac_cols.iter()).enumerate()
{
jdx[row] += self.jac_vals[idx] * self.dx[col];
}
for i in 0..self.m {
if constraint_is_equality(self, i) {
continue;
}
if self.g_l[i].is_finite() {
let slack = self.g[i] - self.g_l[i];
if slack > self.mu * 1e-2 {
grad_phi_dx -= self.mu * jdx[i] / slack;
}
}
if self.g_u[i].is_finite() {
let slack = self.g_u[i] - self.g[i];
if slack > self.mu * 1e-2 {
grad_phi_dx += self.mu * jdx[i] / slack;
}
}
}
}
grad_phi_dx
}
}
struct LeastSquaresProblem<'a, P: NlpProblem> {
inner: &'a P,
targets: Vec<f64>,
jac_rows: Vec<usize>,
jac_cols: Vec<usize>,
hess_rows: Vec<usize>,
hess_cols: Vec<usize>,
inner_hess_map: Vec<usize>,
}
impl<P: NlpProblem> LeastSquaresProblem<'_, P> {
fn new(inner: &P) -> LeastSquaresProblem<'_, P> {
let n = inner.num_variables();
let m = inner.num_constraints();
let mut g_l = vec![0.0; m];
let mut g_u = vec![0.0; m];
inner.constraint_bounds(&mut g_l, &mut g_u);
let targets: Vec<f64> = (0..m).map(|i| 0.5 * (g_l[i] + g_u[i])).collect();
let (jac_rows, jac_cols) = inner.jacobian_structure();
let mut hess_rows = Vec::with_capacity(n * (n + 1) / 2);
let mut hess_cols = Vec::with_capacity(n * (n + 1) / 2);
for i in 0..n {
for j in 0..=i {
hess_rows.push(i);
hess_cols.push(j);
}
}
let (inner_hess_rows, inner_hess_cols) = inner.hessian_structure();
let mut inner_hess_map = Vec::with_capacity(inner_hess_rows.len());
for k in 0..inner_hess_rows.len() {
let (r, c) = (inner_hess_rows[k], inner_hess_cols[k]);
let (i, j) = if r >= c { (r, c) } else { (c, r) };
let idx = i * (i + 1) / 2 + j;
inner_hess_map.push(idx);
}
LeastSquaresProblem {
inner,
targets,
jac_rows,
jac_cols,
hess_rows,
hess_cols,
inner_hess_map,
}
}
}
impl<P: NlpProblem> NlpProblem for LeastSquaresProblem<'_, P> {
fn num_variables(&self) -> usize {
self.inner.num_variables()
}
fn num_constraints(&self) -> usize {
0 }
fn bounds(&self, x_l: &mut [f64], x_u: &mut [f64]) {
self.inner.bounds(x_l, x_u);
}
fn constraint_bounds(&self, _g_l: &mut [f64], _g_u: &mut [f64]) {
}
fn initial_point(&self, x0: &mut [f64]) {
self.inner.initial_point(x0);
}
fn objective(&self, x: &[f64], _new_x: bool, obj: &mut f64) -> bool {
let m = self.targets.len();
let mut g = vec![0.0; m];
if !self.inner.constraints(x, _new_x, &mut g) { return false; }
let mut sum = 0.0;
for i in 0..m {
let r = g[i] - self.targets[i];
sum += r * r;
}
*obj = 0.5 * sum;
true
}
fn gradient(&self, x: &[f64], _new_x: bool, grad: &mut [f64]) -> bool {
let n = self.inner.num_variables();
let m = self.targets.len();
let mut g = vec![0.0; m];
if !self.inner.constraints(x, _new_x, &mut g) { return false; }
let mut r = vec![0.0; m];
for i in 0..m {
r[i] = g[i] - self.targets[i];
}
let jac_nnz = self.jac_rows.len();
let mut jac_vals = vec![0.0; jac_nnz];
if !self.inner.jacobian_values(x, _new_x, &mut jac_vals) { return false; }
for i in 0..n {
grad[i] = 0.0;
}
for (idx, (&row, &col)) in self.jac_rows.iter().zip(self.jac_cols.iter()).enumerate() {
grad[col] += jac_vals[idx] * r[row];
}
true
}
fn constraints(&self, _x: &[f64], _new_x: bool, _g: &mut [f64]) -> bool { true }
fn jacobian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![], vec![])
}
fn jacobian_values(&self, _x: &[f64], _new_x: bool, _vals: &mut [f64]) -> bool { true }
fn hessian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(self.hess_rows.clone(), self.hess_cols.clone())
}
fn hessian_values(&self, x: &[f64], _new_x: bool, obj_factor: f64, _lambda: &[f64], vals: &mut [f64]) -> bool {
let n = self.inner.num_variables();
let m = self.targets.len();
let mut g = vec![0.0; m];
if !self.inner.constraints(x, _new_x, &mut g) { return false; }
let mut r = vec![0.0; m];
for i in 0..m {
r[i] = g[i] - self.targets[i];
}
let jac_nnz = self.jac_rows.len();
let mut jac_vals = vec![0.0; jac_nnz];
if !self.inner.jacobian_values(x, _new_x, &mut jac_vals) { return false; }
let mut j_dense = vec![0.0; m * n];
for (idx, (&row, &col)) in self.jac_rows.iter().zip(self.jac_cols.iter()).enumerate() {
j_dense[row * n + col] += jac_vals[idx];
}
let mut idx = 0;
for i in 0..n {
for j in 0..=i {
let mut dot = 0.0;
for k in 0..m {
dot += j_dense[k * n + i] * j_dense[k * n + j];
}
vals[idx] = obj_factor * dot;
idx += 1;
}
}
let inner_hess_nnz = self.inner_hess_map.len();
if inner_hess_nnz > 0 {
let mut inner_hess_vals = vec![0.0; inner_hess_nnz];
if !self.inner.hessian_values(x, _new_x, 0.0, &r, &mut inner_hess_vals) { return false; }
for (k, &v) in inner_hess_vals.iter().enumerate() {
vals[self.inner_hess_map[k]] += obj_factor * v;
}
}
true
}
}
fn solve_gn_normal_equations(j_dense: &[f64], r: &[f64], n: usize, m: usize) -> Option<Vec<f64>> {
let mut jtj = vec![0.0; n * n];
let mut jtr = vec![0.0; n];
for i in 0..n {
for j in 0..n {
let mut s = 0.0;
for k in 0..m {
s += j_dense[k * n + i] * j_dense[k * n + j];
}
jtj[i * n + j] = s;
}
let mut s = 0.0;
for k in 0..m {
s += j_dense[k * n + i] * r[k];
}
jtr[i] = s;
}
for i in 0..n {
jtj[i * n + i] += 1e-14;
}
dense_cholesky_solve(&jtj, &jtr, n)
}
fn dense_cholesky_solve(a: &[f64], b: &[f64], n: usize) -> Option<Vec<f64>> {
let mut l = vec![0.0; n * n];
for j in 0..n {
let mut sum = 0.0;
for k in 0..j {
sum += l[j * n + k] * l[j * n + k];
}
let diag = a[j * n + j] - sum;
if diag <= 0.0 {
return None;
}
l[j * n + j] = diag.sqrt();
for i in (j + 1)..n {
let mut sum = 0.0;
for k in 0..j {
sum += l[i * n + k] * l[j * n + k];
}
l[i * n + j] = (a[i * n + j] - sum) / l[j * n + j];
}
}
let mut y = vec![0.0; n];
for i in 0..n {
let mut sum = 0.0;
for k in 0..i {
sum += l[i * n + k] * y[k];
}
y[i] = (b[i] - sum) / l[i * n + i];
}
let mut x = vec![0.0; n];
for i in (0..n).rev() {
let mut sum = 0.0;
for k in (i + 1)..n {
sum += l[k * n + i] * x[k];
}
x[i] = (y[i] - sum) / l[i * n + i];
}
Some(x)
}
fn detect_ne_problem<P: NlpProblem>(problem: &P) -> bool {
let n = problem.num_variables();
let m = problem.num_constraints();
if m < n || m == 0 || n == 0 {
return false;
}
if m == n {
return false;
}
let mut x0 = vec![0.0; n];
problem.initial_point(&mut x0);
let mut f0 = 0.0;
if !problem.objective(&x0, true, &mut f0) {
return false;
}
if f0.abs() > 1e-10 {
return false;
}
let mut grad = vec![0.0; n];
if !problem.gradient(&x0, false, &mut grad) {
return false;
}
let grad_max = linf_norm(&grad);
if grad_max > 1e-10 {
return false;
}
let mut g_l = vec![0.0; m];
let mut g_u = vec![0.0; m];
problem.constraint_bounds(&mut g_l, &mut g_u);
for i in 0..m {
if !convergence::is_equality_constraint(g_l[i], g_u[i]) {
return false;
}
}
let mut g0 = vec![0.0; m];
if !problem.constraints(&x0, false, &mut g0) {
return false;
}
let theta0 = convergence::primal_infeasibility(&g0, &g_l, &g_u);
if theta0 < 1e-8 {
return false;
}
true
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum FailureDiagnosis {
StallAtInfeasibility,
StallNearOptimal,
NumericalBreakdown,
SlowConvergence,
DualDivergence,
}
fn diagnose_failure(result: &SolveResult) -> FailureDiagnosis {
let d = &result.diagnostics;
match result.status {
SolveStatus::NumericalError => {
if d.final_dual_inf > 1e4 {
FailureDiagnosis::DualDivergence
} else {
FailureDiagnosis::NumericalBreakdown
}
}
SolveStatus::RestorationFailed => FailureDiagnosis::StallAtInfeasibility,
SolveStatus::MaxIterations => {
if d.final_dual_inf > 1e4 {
FailureDiagnosis::DualDivergence
} else if d.final_primal_inf > 1e-2 {
FailureDiagnosis::StallAtInfeasibility
} else if d.final_primal_inf < 1e-6 && d.final_compl < 1e-4 {
FailureDiagnosis::StallNearOptimal
} else {
FailureDiagnosis::SlowConvergence
}
}
_ => FailureDiagnosis::SlowConvergence,
}
}
fn is_strictly_better(current: &SolveResult, candidate: &SolveResult) -> bool {
let candidate_solved = matches!(candidate.status, SolveStatus::Optimal);
if !candidate_solved {
return false;
}
let current_solved = matches!(current.status, SolveStatus::Optimal);
let current_has_good_point = current.objective.is_finite()
&& current.diagnostics.final_primal_inf <= 1e-4;
if current_solved || current_has_good_point {
let tol = 1e-8 * current.objective.abs().max(1.0);
candidate.objective < current.objective - tol
} else {
true
}
}
fn adopt_candidate_if_better(
result: &mut SolveResult,
candidate: SolveResult,
options: &SolverOptions,
label: &str,
tag: &str,
) {
if is_strictly_better(result, &candidate) {
if options.print_level >= 5 {
rip_log!(
"ripopt: {} succeeded ({:?}, obj={:.6e})",
label, candidate.status, candidate.objective
);
}
*result = candidate;
result.diagnostics.fallback_used = Some(tag.into());
} else if options.print_level >= 5 {
rip_log!("ripopt: {} did not improve ({:?})", label, candidate.status);
}
}
fn prepare_fallback_opts(options: &SolverOptions, solve_start: &Instant) -> Option<SolverOptions> {
let mut opts = options.clone();
opts.max_iter = options.max_iter.min(500).max(options.max_iter / 3);
if options.max_wall_time > 0.0 {
let remaining = options.max_wall_time - solve_start.elapsed().as_secs_f64();
if remaining <= 0.1 {
return None;
}
opts.max_wall_time = remaining;
}
Some(opts)
}
fn try_lbfgs_hessian_fallback<P: NlpProblem>(
result: &mut SolveResult,
problem: &P,
options: &SolverOptions,
solve_start: Instant,
diagnosis: FailureDiagnosis,
) {
if !options.enable_lbfgs_hessian_fallback || options.hessian_approximation_lbfgs {
return;
}
let Some(mut opts) = prepare_fallback_opts(options, &solve_start) else { return };
opts.hessian_approximation_lbfgs = true;
opts.enable_lbfgs_hessian_fallback = false;
opts.stall_iter_limit = 0;
if options.print_level >= 5 {
rip_log!("ripopt: Trying L-BFGS Hessian fallback ({:?})", diagnosis);
}
let candidate = solve_ipm(problem, &opts);
adopt_candidate_if_better(result, candidate, options, "L-BFGS Hessian fallback", "lbfgs_hessian");
}
fn try_al_fallback<P: NlpProblem>(
result: &mut SolveResult,
problem: &P,
options: &SolverOptions,
solve_start: Instant,
diagnosis: FailureDiagnosis,
has_constraints: bool,
) {
if !options.enable_al_fallback || !has_constraints {
return;
}
let Some(opts) = prepare_fallback_opts(options, &solve_start) else { return };
if options.print_level >= 5 {
rip_log!("ripopt: Trying AL fallback ({:?})", diagnosis);
}
let candidate = crate::augmented_lagrangian::solve(problem, &opts);
adopt_candidate_if_better(result, candidate, options, "AL fallback", "augmented_lagrangian");
}
fn try_sqp_fallback<P: NlpProblem>(
result: &mut SolveResult,
problem: &P,
options: &SolverOptions,
solve_start: Instant,
diagnosis: FailureDiagnosis,
has_constraints: bool,
) {
if !options.enable_sqp_fallback || !has_constraints {
return;
}
let Some(opts) = prepare_fallback_opts(options, &solve_start) else { return };
if options.print_level >= 5 {
rip_log!("ripopt: Trying SQP fallback ({:?})", diagnosis);
}
let candidate = crate::sqp::solve(problem, &opts);
adopt_candidate_if_better(result, candidate, options, "SQP fallback", "sqp");
}
fn try_slack_fallback<P: NlpProblem>(
result: &mut SolveResult,
problem: &P,
options: &SolverOptions,
solve_start: Instant,
diagnosis: FailureDiagnosis,
has_inequalities: bool,
) -> Option<SolveResult> {
if !options.enable_slack_fallback || !has_inequalities {
return None;
}
let mut opts = prepare_fallback_opts(options, &solve_start)?;
opts.enable_slack_fallback = false;
if options.print_level >= 5 {
rip_log!("ripopt: Trying slack fallback ({:?})", diagnosis);
}
let slack_prob = SlackFormulation::new(problem, &result.x);
let candidate = solve_ipm(&slack_prob, &opts);
if is_strictly_better(result, &candidate) {
let n = problem.num_variables();
let m = problem.num_constraints();
if options.print_level >= 5 {
rip_log!(
"ripopt: Slack fallback succeeded ({:?}, obj={:.6e})",
candidate.status, candidate.objective
);
}
let x_out = candidate.x[..n].to_vec();
let mut g_out = vec![0.0; m];
let _ = problem.constraints(&x_out, true, &mut g_out);
let mut diag = candidate.diagnostics;
diag.fallback_used = Some("slack".into());
diag.wall_time_secs = solve_start.elapsed().as_secs_f64();
return Some(SolveResult {
x: x_out,
objective: candidate.objective,
constraint_multipliers: candidate.constraint_multipliers,
bound_multipliers_lower: candidate.bound_multipliers_lower[..n].to_vec(),
bound_multipliers_upper: candidate.bound_multipliers_upper[..n].to_vec(),
constraint_values: g_out,
status: candidate.status,
iterations: result.iterations + candidate.iterations,
diagnostics: diag,
});
} else if options.print_level >= 5 {
rip_log!("ripopt: Slack fallback did not improve ({:?})", candidate.status);
}
None
}
fn try_plain_ipm_retry<P: NlpProblem>(
result: &mut SolveResult,
problem: &P,
options: &SolverOptions,
solve_start: Instant,
) {
let Some(mut opts) = prepare_fallback_opts(options, &solve_start) else { return };
opts.gondzio_mcc_max = 0;
opts.mehrotra_pc = false;
opts.stall_iter_limit = 0;
if options.print_level >= 5 {
rip_log!("ripopt: Trying plain IPM retry (no corrections) for DualDivergence");
}
let candidate = solve_ipm(problem, &opts);
adopt_candidate_if_better(result, candidate, options, "Plain IPM retry", "plain_ipm");
}
fn dispatch_failure_recovery<P: NlpProblem>(
result: &mut SolveResult,
problem: &P,
options: &SolverOptions,
solve_start: Instant,
diagnosis: FailureDiagnosis,
has_constraints: bool,
has_inequalities: bool,
) -> Option<SolveResult> {
match diagnosis {
FailureDiagnosis::StallAtInfeasibility => {
if let Some(slack_result) = try_slack_fallback(
result, problem, options, solve_start, diagnosis, has_inequalities,
) {
return Some(slack_result);
}
try_sqp_fallback(result, problem, options, solve_start, diagnosis, has_constraints);
}
FailureDiagnosis::NumericalBreakdown => {
try_lbfgs_hessian_fallback(result, problem, options, solve_start, diagnosis);
}
FailureDiagnosis::DualDivergence => {
try_plain_ipm_retry(result, problem, options, solve_start);
if !matches!(result.status, SolveStatus::Optimal) {
try_lbfgs_hessian_fallback(result, problem, options, solve_start, diagnosis);
}
}
FailureDiagnosis::SlowConvergence => {
try_al_fallback(result, problem, options, solve_start, diagnosis, has_constraints);
if !matches!(result.status, SolveStatus::Optimal) {
try_sqp_fallback(result, problem, options, solve_start, diagnosis, has_constraints);
}
}
FailureDiagnosis::StallNearOptimal => {
try_lbfgs_hessian_fallback(result, problem, options, solve_start, diagnosis);
if !matches!(result.status, SolveStatus::Optimal) {
try_sqp_fallback(result, problem, options, solve_start, diagnosis, has_constraints);
}
}
}
None
}
fn try_slow_optimal_slack_fallback<P: NlpProblem>(
result: &mut SolveResult,
problem: &P,
options: &SolverOptions,
solve_start: Instant,
diagnosis: FailureDiagnosis,
has_inequalities: bool,
initial_feasible: bool,
initial_obj: f64,
) -> Option<SolveResult> {
if !(matches!(result.status, SolveStatus::Optimal)
&& has_inequalities
&& options.enable_slack_fallback
&& options.max_wall_time > 0.0)
{
return None;
}
let time_used = solve_start.elapsed().as_secs_f64();
let worsened_from_feasible = initial_feasible
&& initial_obj.is_finite()
&& result.objective > initial_obj - 1e-3 * initial_obj.abs().max(1.0);
if !(time_used > 0.05 * options.max_wall_time && worsened_from_feasible) {
return None;
}
if options.print_level >= 5 {
rip_log!(
"ripopt: Slow-optimal detected (obj={:.4e}, init_obj={:.4e}, time={:.1}s/{:.1}s), trying slack fallback",
result.objective, initial_obj, time_used, options.max_wall_time
);
}
try_slack_fallback(result, problem, options, solve_start, diagnosis, has_inequalities)
}
fn try_preprocessed_solve<P: NlpProblem>(
problem: &P,
options: &SolverOptions,
solve_start: Instant,
) -> Option<SolveResult> {
if !options.enable_preprocessing {
return None;
}
let prep = crate::preprocessing::PreprocessedProblem::new(problem as &dyn NlpProblem, options.bound_push);
if !prep.did_reduce() {
return None;
}
if options.print_level >= 5 {
rip_log!(
"ripopt: Preprocessing reduced problem: {} fixed vars, {} redundant constraints ({}x{} -> {}x{})",
prep.num_fixed(), prep.num_redundant(),
problem.num_variables(), problem.num_constraints(),
prep.num_variables(), prep.num_constraints(),
);
}
let mut prep_opts = options.clone();
prep_opts.enable_preprocessing = false;
if options.max_wall_time > 0.0 {
let elapsed = solve_start.elapsed().as_secs_f64();
let remaining = (options.max_wall_time - elapsed).max(1.0);
prep_opts.max_wall_time = remaining * 0.5;
}
let reduced_result = solve(&prep, &prep_opts);
let result = prep.unmap_solution(&reduced_result);
if matches!(result.status, SolveStatus::Optimal) {
return Some(result);
}
if options.print_level >= 5 {
rip_log!(
"ripopt: Preprocessed solve failed ({:?}), retrying without preprocessing",
result.status
);
}
None
}
fn try_polish_step_with_backtrack<P: NlpProblem>(
problem: &P,
polished_x: &[f64],
dx: &[f64],
x_l_var: &[f64],
x_u_var: &[f64],
g_l: &[f64],
g_u: &[f64],
current_theta: f64,
n: usize,
m: usize,
) -> Option<(f64, f64)> {
let mut alpha = 1.0;
let tau = 0.995;
for i in 0..n {
if dx[i] < 0.0 && x_l_var[i].is_finite() {
let max_step = -tau * (polished_x[i] - x_l_var[i]) / dx[i];
if max_step < alpha { alpha = max_step; }
}
if dx[i] > 0.0 && x_u_var[i].is_finite() {
let max_step = tau * (x_u_var[i] - polished_x[i]) / dx[i];
if max_step < alpha { alpha = max_step; }
}
}
alpha = alpha.max(0.0).min(1.0);
let mut trial_x = vec![0.0; n];
let mut trial_g = vec![0.0; m];
let mut best_alpha = alpha;
let mut best_theta = current_theta;
for _ in 0..10 {
for i in 0..n {
trial_x[i] = polished_x[i] - best_alpha * dx[i];
}
if !problem.constraints(&trial_x, true, &mut trial_g) {
best_alpha *= 0.5;
continue;
}
let trial_theta = convergence::primal_infeasibility(&trial_g, g_l, g_u);
if trial_theta < current_theta {
best_theta = trial_theta;
return Some((best_alpha, best_theta));
}
best_alpha *= 0.5;
}
Some((best_alpha, best_theta))
}
fn polish_ls_solution_with_newton<P: NlpProblem>(
problem: &P,
options: &SolverOptions,
polished_x: &mut [f64],
theta: &mut f64,
g_final: &mut [f64],
g_l: &[f64],
g_u: &[f64],
n: usize,
m: usize,
) {
if !(*theta > options.tol && *theta < 1e-2) {
return;
}
let mut x_l_var = vec![0.0; n];
let mut x_u_var = vec![0.0; n];
problem.bounds(&mut x_l_var, &mut x_u_var);
let (jac_rows, jac_cols) = problem.jacobian_structure();
let nnz = jac_rows.len();
let target: Vec<f64> = (0..m).map(|i| {
if (g_u[i] - g_l[i]).abs() < 1e-15 { g_l[i] } else { 0.5 * (g_l[i] + g_u[i]) }
}).collect();
let max_newton_iters = 20;
for newton_iter in 0..max_newton_iters {
let r: Vec<f64> = (0..m).map(|i| g_final[i] - target[i]).collect();
let mut jac_vals = vec![0.0; nnz];
if !problem.jacobian_values(polished_x, true, &mut jac_vals) {
break;
}
let mut j_dense = vec![0.0; m * n];
for k in 0..nnz {
j_dense[jac_rows[k] * n + jac_cols[k]] += jac_vals[k];
}
let dx = match solve_gn_normal_equations(&j_dense, &r, n, m) {
Some(dx) => dx,
None => break,
};
let (best_alpha, best_theta) = match try_polish_step_with_backtrack(
problem, polished_x, &dx, &x_l_var, &x_u_var, g_l, g_u, *theta, n, m,
) {
Some(t) => t,
None => break,
};
if best_theta >= *theta * 0.999 {
break;
}
for i in 0..n {
polished_x[i] -= best_alpha * dx[i];
}
if !problem.constraints(polished_x, true, g_final) {
break;
}
*theta = best_theta;
if options.print_level >= 5 {
rip_log!(
"ripopt: Newton polish iter {}: theta={:.2e}, alpha={:.4}",
newton_iter + 1, *theta, best_alpha,
);
}
if *theta < options.tol {
break;
}
}
}
#[allow(clippy::too_many_arguments)]
fn finalize_ne_to_ls_result<P: NlpProblem>(
problem: &P,
options: &SolverOptions,
ls_problem: &LeastSquaresProblem<'_, P>,
ls_result: &SolveResult,
polished_x: Vec<f64>,
status: SolveStatus,
g_out: Vec<f64>,
g_l: &[f64],
g_u: &[f64],
theta: f64,
m: usize,
) -> SolveResult {
let (final_x, final_status, final_g, final_iters, final_zl, final_zu) =
if status == SolveStatus::LocalInfeasibility && options.enable_lbfgs_fallback {
if options.print_level >= 5 {
rip_log!(
"ripopt: NE-to-LS LocalInfeasibility (theta={:.4e}), trying L-BFGS on LS",
theta
);
}
let lbfgs_ls = crate::lbfgs::solve(ls_problem, options);
let mut g_lb = vec![0.0; m];
let theta_lb = if problem.constraints(&lbfgs_ls.x, true, &mut g_lb) {
convergence::primal_infeasibility(&g_lb, g_l, g_u)
} else {
f64::INFINITY
};
if theta_lb < theta {
let new_status = if theta_lb < options.tol {
SolveStatus::Optimal
} else {
SolveStatus::LocalInfeasibility
};
if options.print_level >= 5 {
rip_log!(
"ripopt: L-BFGS improved NE-to-LS (theta: {:.4e} -> {:.4e}, status={:?})",
theta, theta_lb, new_status
);
}
(lbfgs_ls.x, new_status, g_lb, lbfgs_ls.iterations,
lbfgs_ls.bound_multipliers_lower, lbfgs_ls.bound_multipliers_upper)
} else {
if options.print_level >= 5 {
rip_log!(
"ripopt: L-BFGS did not improve NE-to-LS (theta_lb={:.4e} >= theta={:.4e})",
theta_lb, theta
);
}
(polished_x, status, g_out, ls_result.iterations,
ls_result.bound_multipliers_lower.clone(),
ls_result.bound_multipliers_upper.clone())
}
} else {
(polished_x, status, g_out, ls_result.iterations,
ls_result.bound_multipliers_lower.clone(),
ls_result.bound_multipliers_upper.clone())
};
SolveResult {
x: final_x,
objective: 0.0,
constraint_multipliers: vec![0.0; m],
bound_multipliers_lower: final_zl,
bound_multipliers_upper: final_zu,
constraint_values: final_g,
status: final_status,
iterations: final_iters,
diagnostics: SolverDiagnostics::default(),
}
}
fn run_ne_constrained_ipm_fallback<P: NlpProblem>(
problem: &P,
options: &SolverOptions,
solve_start: Instant,
ls_result: &SolveResult,
g_out: &[f64],
theta: f64,
m: usize,
) -> SolveResult {
if options.print_level >= 5 {
rip_log!(
"ripopt: LS reformulation reports infeasibility (theta={:.4e}, ls_status={:?}), falling back to constrained IPM",
theta, ls_result.status
);
}
let mut fallback_opts = options.clone();
if options.max_wall_time > 0.0 {
let remaining = options.max_wall_time - solve_start.elapsed().as_secs_f64();
if remaining <= 0.1 {
return SolveResult {
x: ls_result.x.clone(),
objective: 0.0,
constraint_multipliers: vec![0.0; m],
bound_multipliers_lower: ls_result.bound_multipliers_lower.clone(),
bound_multipliers_upper: ls_result.bound_multipliers_upper.clone(),
constraint_values: g_out.to_vec(),
status: SolveStatus::MaxIterations,
iterations: ls_result.iterations,
diagnostics: SolverDiagnostics::default(),
};
}
fallback_opts.max_wall_time = remaining;
}
let ipm_result = solve_ipm(problem, &fallback_opts);
if matches!(ipm_result.status, SolveStatus::Optimal) {
return ipm_result;
}
if options.enable_al_fallback {
if options.print_level >= 5 {
rip_log!(
"ripopt: NE constrained IPM fallback failed ({:?}), trying AL",
ipm_result.status
);
}
let mut al_opts = options.clone();
al_opts.max_iter = options.max_iter.min(500).max(options.max_iter / 3);
if options.max_wall_time > 0.0 {
let remaining = options.max_wall_time - solve_start.elapsed().as_secs_f64();
if remaining <= 0.1 {
return ipm_result;
}
al_opts.max_wall_time = remaining;
}
let al_result = crate::augmented_lagrangian::solve(problem, &al_opts);
if matches!(al_result.status, SolveStatus::Optimal) {
if options.print_level >= 5 {
rip_log!(
"ripopt: NE AL fallback succeeded ({:?}, obj={:.6e})",
al_result.status, al_result.objective
);
}
return al_result;
}
}
ipm_result
}
fn try_ne_to_ls_reformulation<P: NlpProblem>(
problem: &P,
options: &SolverOptions,
solve_start: Instant,
) -> Option<SolveResult> {
if !detect_ne_problem(problem) {
return None;
}
let n = problem.num_variables();
let m = problem.num_constraints();
if options.print_level >= 5 {
rip_log!(
"ripopt: Detected overdetermined NE problem (n={}, m={}), reformulating as least-squares",
n, m
);
}
let ls_problem = LeastSquaresProblem::new(problem);
let mut ls_opts = options.clone();
if m == n {
ls_opts.max_iter = (options.max_iter / 10).min(100);
}
let ls_result = solve_ipm(&ls_problem, &ls_opts);
let mut g_final = vec![0.0; m];
if !problem.constraints(&ls_result.x, true, &mut g_final) {
let mut diag = ls_result.diagnostics.clone();
diag.fallback_used = Some("ne-to-ls".into());
return Some(SolveResult {
x: ls_result.x,
objective: ls_result.objective,
constraint_multipliers: vec![0.0; m],
bound_multipliers_lower: ls_result.bound_multipliers_lower,
bound_multipliers_upper: ls_result.bound_multipliers_upper,
constraint_values: g_final,
status: SolveStatus::EvaluationError,
iterations: ls_result.iterations,
diagnostics: diag,
});
}
let mut g_l = vec![0.0; m];
let mut g_u = vec![0.0; m];
problem.constraint_bounds(&mut g_l, &mut g_u);
let mut theta = convergence::primal_infeasibility(&g_final, &g_l, &g_u);
let mut polished_x = ls_result.x.clone();
polish_ls_solution_with_newton(
problem, options,
&mut polished_x, &mut theta, &mut g_final,
&g_l, &g_u, n, m,
);
let g_out = g_final;
let status = if theta < options.tol {
SolveStatus::Optimal
} else {
SolveStatus::LocalInfeasibility
};
if options.print_level >= 5 {
rip_log!(
"ripopt: NE-to-LS result: obj_LS={:.4e}, constraint_violation={:.4e}, status={:?}",
ls_result.objective, theta, status
);
}
let ls_converged = matches!(ls_result.status, SolveStatus::Optimal);
if status == SolveStatus::LocalInfeasibility && (m == n || !ls_converged) {
return Some(run_ne_constrained_ipm_fallback(
problem, options, solve_start, &ls_result, &g_out, theta, m,
));
}
Some(finalize_ne_to_ls_result(
problem, options, &ls_problem, &ls_result,
polished_x, status, g_out, &g_l, &g_u, theta, m,
))
}
pub fn solve<P: NlpProblem>(problem: &P, options: &SolverOptions) -> SolveResult {
let solve_start = Instant::now();
if let Some(ref xs) = options.user_x_scaling {
if !xs.is_empty() {
let n = problem.num_variables();
if xs.len() != n || xs.iter().any(|&v| !v.is_finite() || v <= 0.0) {
rip_log!(
"ripopt: user_x_scaling rejected (len={}, expected n={}, \
all entries must be strictly positive and finite); \
returning InternalError.",
xs.len(), n
);
return SolveResult {
x: vec![0.0; n],
objective: f64::NAN,
constraint_multipliers: vec![0.0; problem.num_constraints()],
bound_multipliers_lower: vec![0.0; n],
bound_multipliers_upper: vec![0.0; n],
constraint_values: vec![0.0; problem.num_constraints()],
status: SolveStatus::InternalError,
iterations: 0,
diagnostics: SolverDiagnostics::default(),
};
}
let wrapped = XScaledProblem::new(problem, xs.clone());
let mut inner_options = options.clone();
inner_options.user_x_scaling = None;
let mut result = solve_inner(&wrapped, &inner_options, solve_start);
for i in 0..n {
result.x[i] *= wrapped.inv_dx[i];
result.bound_multipliers_lower[i] *= wrapped.dx[i];
result.bound_multipliers_upper[i] *= wrapped.dx[i];
}
return result;
}
}
solve_inner(problem, options, solve_start)
}
fn solve_inner<P: NlpProblem>(
problem: &P,
options: &SolverOptions,
solve_start: Instant,
) -> SolveResult {
let (initial_obj, initial_feasible) = (f64::INFINITY, false);
if let Some(result) = try_preprocessed_solve(problem, options, solve_start) {
return result;
}
if let Some(result) = try_ne_to_ls_reformulation(problem, options, solve_start) {
return result;
}
let mut result = run_initial_solve(problem, options);
let diagnosis = diagnose_failure(&result);
let has_constraints = problem.num_constraints() > 0;
let has_inequalities = has_inequality_constraints(problem);
if options.print_level >= 5 && !matches!(result.status, SolveStatus::Optimal) {
rip_log!("ripopt: Failure diagnosis: {:?}", diagnosis);
}
try_conservative_ipm_retry(&mut result, problem, options, solve_start);
if !matches!(result.status, SolveStatus::Optimal) {
if let Some(slack_result) = dispatch_failure_recovery(
&mut result, problem, options, solve_start, diagnosis,
has_constraints, has_inequalities,
) {
return slack_result;
}
}
if let Some(slack_result) = try_slow_optimal_slack_fallback(
&mut result, problem, options, solve_start, diagnosis,
has_inequalities, initial_feasible, initial_obj,
) {
return slack_result;
}
apply_late_optimality_promotion(&mut result, options);
result.diagnostics.wall_time_secs = solve_start.elapsed().as_secs_f64();
result
}
fn run_initial_solve<P: NlpProblem>(problem: &P, options: &SolverOptions) -> SolveResult {
if options.enable_lbfgs_fallback && problem.num_constraints() == 0 {
let lbfgs_result = crate::lbfgs::solve(problem, options);
if matches!(lbfgs_result.status, SolveStatus::Optimal) {
if options.print_level >= 5 {
rip_log!(
"ripopt: L-BFGS solved unconstrained problem ({:?}, obj={:.6e})",
lbfgs_result.status, lbfgs_result.objective
);
}
return lbfgs_result;
}
if options.print_level >= 5 {
rip_log!(
"ripopt: L-BFGS failed ({:?}, obj={:.6e}), trying IPM",
lbfgs_result.status, lbfgs_result.objective
);
}
let ipm_result = solve_ipm(problem, options);
if matches!(ipm_result.status, SolveStatus::Optimal) {
ipm_result
} else if lbfgs_result.objective < ipm_result.objective {
lbfgs_result
} else {
ipm_result
}
} else if options.max_wall_time > 0.0 && problem.num_constraints() > 0 {
let mut main_opts = options.clone();
main_opts.max_wall_time = options.max_wall_time * 0.5;
solve_ipm(problem, &main_opts)
} else {
solve_ipm(problem, options)
}
}
fn try_conservative_ipm_retry<P: NlpProblem>(
result: &mut SolveResult,
problem: &P,
options: &SolverOptions,
solve_start: Instant,
) {
let n_problem = problem.num_variables();
if n_problem > 200 || matches!(result.status, SolveStatus::Optimal) {
return;
}
let Some(mut opts) = prepare_fallback_opts(options, &solve_start) else {
return;
};
opts.gondzio_mcc_max = 0;
opts.mehrotra_pc = false;
opts.stall_iter_limit = 0;
opts.proactive_infeasibility_detection = true;
opts.max_iter = options.max_iter;
if options.max_wall_time > 0.0 {
let remaining = options.max_wall_time - solve_start.elapsed().as_secs_f64();
opts.max_wall_time = remaining * 0.7;
}
if options.print_level >= 5 {
rip_log!("ripopt: Trying conservative IPM retry (no Gondzio/Mehrotra, no stall detection)");
}
let candidate = solve_ipm(problem, &opts);
adopt_candidate_if_better(result, candidate, options, "Conservative retry", "conservative_ipm");
}
fn apply_late_optimality_promotion(result: &mut SolveResult, options: &SolverOptions) {
if !matches!(result.status, SolveStatus::NumericalError | SolveStatus::MaxIterations | SolveStatus::Acceptable) {
return;
}
let d = &result.diagnostics;
let pr_ok = d.final_primal_inf <= options.constr_viol_tol;
let co_ok = d.final_compl <= options.compl_inf_tol;
let du_strict_ok = d.final_dual_inf <= options.dual_inf_tol
&& d.final_dual_inf <= options.tol * 1000.0;
if pr_ok && co_ok && du_strict_ok {
if options.print_level >= 5 {
rip_log!(
"ripopt: Late-optimal (pr={:.2e}, du={:.2e}, co={:.2e}), returning Optimal",
d.final_primal_inf, d.final_dual_inf, d.final_compl
);
}
result.status = SolveStatus::Optimal;
} else if !matches!(result.status, SolveStatus::Acceptable) {
let du_acc_ok = d.final_dual_inf <= 1e-6
&& d.final_primal_inf <= 1e-2
&& d.final_compl <= 1e-2;
if du_acc_ok {
if options.print_level >= 5 {
rip_log!(
"ripopt: Late-acceptable (pr={:.2e}, du={:.2e}, co={:.2e}), returning Acceptable",
d.final_primal_inf, d.final_dual_inf, d.final_compl
);
}
result.status = SolveStatus::Acceptable;
}
}
}
fn has_inequality_constraints<P: NlpProblem>(problem: &P) -> bool {
let m = problem.num_constraints();
if m == 0 {
return false;
}
let mut g_l = vec![0.0; m];
let mut g_u = vec![0.0; m];
problem.constraint_bounds(&mut g_l, &mut g_u);
(0..m).any(|i| (g_l[i] - g_u[i]).abs() > 0.0)
}
struct PhaseTimings {
problem_eval: Duration,
kkt_assembly: Duration,
factorization: Duration,
direction_solve: Duration,
line_search: Duration,
}
impl PhaseTimings {
fn new() -> Self {
PhaseTimings {
problem_eval: Duration::ZERO,
kkt_assembly: Duration::ZERO,
factorization: Duration::ZERO,
direction_solve: Duration::ZERO,
line_search: Duration::ZERO,
}
}
fn print_summary(&self, iterations: usize, total: Duration) {
let total_secs = total.as_secs_f64();
let phases = [
("Problem eval", self.problem_eval),
("KKT assembly", self.kkt_assembly),
("Factorization", self.factorization),
("Direction solve", self.direction_solve),
("Line search", self.line_search),
];
let accounted: Duration = phases.iter().map(|(_, d)| *d).sum();
let other = total.saturating_sub(accounted);
rip_log!("\nPhase breakdown ({} iterations):", iterations);
for (name, dur) in &phases {
let secs = dur.as_secs_f64();
let pct = if total_secs > 0.0 { 100.0 * secs / total_secs } else { 0.0 };
rip_log!(" {:<20} {:>8.3}s ({:>5.1}%)", name, secs, pct);
}
let other_secs = other.as_secs_f64();
let other_pct = if total_secs > 0.0 { 100.0 * other_secs / total_secs } else { 0.0 };
rip_log!(" {:<20} {:>8.3}s ({:>5.1}%)", "Other", other_secs, other_pct);
rip_log!(" {:<20} {:>8.3}s", "Total", total_secs);
}
}
fn collect_kkt_lower_triangle_entries(kkt: &kkt::KktSystem) -> Vec<(usize, usize, f64)> {
let dim = kkt.dim;
match &kkt.matrix {
KktMatrix::Dense(d) => {
let mut v = Vec::with_capacity(dim * (dim + 1) / 2);
for j in 0..dim {
for i in j..dim {
let val = d.get(i, j);
if val != 0.0 {
v.push((i + 1, j + 1, val));
}
}
}
v
}
KktMatrix::Sparse(s) => {
let mut map: std::collections::HashMap<(usize, usize), f64> =
std::collections::HashMap::with_capacity(s.triplet_rows.len());
for k in 0..s.triplet_rows.len() {
let r = s.triplet_rows[k];
let c = s.triplet_cols[k];
*map.entry((c, r)).or_insert(0.0) += s.triplet_vals[k];
}
let mut v: Vec<(usize, usize, f64)> = map
.into_iter()
.filter(|(_, val)| *val != 0.0)
.map(|((i, j), val)| (i + 1, j + 1, val))
.collect();
v.sort_unstable_by_key(|&(i, j, _)| (j, i));
v
}
}
}
fn write_kkt_mtx_file(
mtx_path: &std::path::Path,
dim: usize,
entries: &[(usize, usize, f64)],
) -> std::io::Result<()> {
use std::io::Write;
let mut file = std::fs::File::create(mtx_path)?;
writeln!(file, "%%MatrixMarket matrix coordinate real symmetric")?;
writeln!(file, "{} {} {}", dim, dim, entries.len())?;
for (i, j, v) in entries {
writeln!(file, "{} {} {:.17e}", i, j, v)?;
}
Ok(())
}
fn write_kkt_json_sidecar(
json_path: &std::path::Path,
name: &str,
iteration: usize,
kkt: &kkt::KktSystem,
inertia: Option<(usize, usize, usize)>,
delta_w: f64,
delta_c: f64,
) -> std::io::Result<()> {
use std::io::Write;
let (pos, neg, zer) = inertia.unwrap_or((0, 0, 0));
let meta = serde_json::json!({
"problem_name": name,
"iteration": iteration,
"n": kkt.n,
"m": kkt.m,
"rhs": kkt.rhs,
"inertia": { "positive": pos, "negative": neg, "zero": zer },
"delta_w": delta_w,
"delta_c": delta_c,
"status": "ongoing"
});
let mut file = std::fs::File::create(json_path)?;
write!(file, "{}", meta)?;
Ok(())
}
fn dump_kkt_matrix(
dir: &std::path::Path,
name: &str,
iteration: usize,
kkt: &kkt::KktSystem,
inertia: Option<(usize, usize, usize)>,
delta_w: f64,
delta_c: f64,
) {
if let Err(e) = std::fs::create_dir_all(dir) {
log::warn!("kkt_dump: cannot create directory {}: {}", dir.display(), e);
return;
}
let stem = format!("{}_{:04}", name, iteration);
let mtx_path = dir.join(format!("{}.mtx", stem));
let entries = collect_kkt_lower_triangle_entries(kkt);
if let Err(e) = write_kkt_mtx_file(&mtx_path, kkt.dim, &entries) {
log::warn!("kkt_dump: failed to write {}.mtx: {}", stem, e);
return;
}
let json_path = dir.join(format!("{}.json", stem));
if let Err(e) = write_kkt_json_sidecar(
&json_path, name, iteration, kkt, inertia, delta_w, delta_c,
) {
log::warn!("kkt_dump: failed to write {}.json: {}", stem, e);
}
}
fn update_lbfgs_hessian(
lbfgs_state: &mut Option<LbfgsIpmState>,
state: &mut SolverState,
) {
if let Some(ref mut lbfgs) = lbfgs_state {
let lag_grad = LbfgsIpmState::compute_lagrangian_gradient(
&state.grad_f, &state.jac_rows, &state.jac_cols, &state.jac_vals, &state.y, state.n,
);
lbfgs.update(&state.x, &lag_grad);
lbfgs.fill_hessian(&mut state.hess_vals);
}
}
fn evaluate_and_refresh_lbfgs<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
lbfgs_state: &mut Option<LbfgsIpmState>,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> bool {
let ok = state.evaluate_with_linear(problem, 1.0, linear_constraints, lbfgs_mode);
update_lbfgs_hessian(lbfgs_state, state);
ok
}
struct AssembledKkt {
sigma: Vec<f64>,
use_sparse_condensed: bool,
condensed_system: Option<kkt::CondensedKktSystem>,
sparse_condensed_system: Option<kkt::SparseCondensedKktSystem>,
kkt_system_opt: Option<kkt::KktSystem>,
}
enum LineSearchOutcome {
StepAccepted,
Rejected,
Return(SolveResult),
}
#[allow(clippy::too_many_arguments)]
fn compute_barrier_phi(
obj: f64,
x: &[f64],
g: &[f64],
state: &SolverState,
n: usize,
m: usize,
constraint_slack_barrier: bool,
) -> f64 {
let mut phi = obj;
for i in 0..n {
if state.x_l[i].is_finite() {
let slack = (x[i] - state.x_l[i]).max(1e-20);
phi -= state.mu * slack.ln();
}
if state.x_u[i].is_finite() {
let slack = (state.x_u[i] - x[i]).max(1e-20);
phi -= state.mu * slack.ln();
}
}
if constraint_slack_barrier {
for i in 0..m {
if constraint_is_equality(state, i) {
continue;
}
if state.g_l[i].is_finite() {
let slack = g[i] - state.g_l[i];
if slack > state.mu * 1e-2 {
phi -= state.mu * slack.ln();
}
}
if state.g_u[i].is_finite() {
let slack = state.g_u[i] - g[i];
if slack > state.mu * 1e-2 {
phi -= state.mu * slack.ln();
}
}
}
}
phi
}
#[allow(clippy::too_many_arguments)]
fn dispatch_soc_attempt<P: NlpProblem>(
state: &SolverState,
problem: &P,
x_trial: &[f64],
g_trial: &[f64],
condensed_system: &Option<kkt::CondensedKktSystem>,
cond_solver_for_soc: &mut Option<DenseLdl>,
sparse_condensed_system: &Option<kkt::SparseCondensedKktSystem>,
kkt_system_opt: &Option<kkt::KktSystem>,
lin_solver: &mut dyn LinearSolver,
filter: &Filter,
theta_current: f64,
phi_current: f64,
grad_phi_step: f64,
alpha: f64,
options: &SolverOptions,
) -> Option<(Vec<f64>, f64, Vec<f64>, f64)> {
if let (Some(cond), Some(cs)) = (condensed_system.as_ref(), cond_solver_for_soc.as_mut()) {
attempt_soc_condensed(
state, problem, g_trial, cs, cond, filter,
theta_current, phi_current, grad_phi_step, alpha, options,
)
} else if let Some(sc) = sparse_condensed_system.as_ref() {
attempt_soc_sparse_condensed(
state, problem, g_trial, lin_solver, sc, filter,
theta_current, phi_current, grad_phi_step, alpha, options,
)
} else if let Some(kkt) = kkt_system_opt.as_ref() {
attempt_soc(
state, problem, x_trial, g_trial,
lin_solver, kkt, filter,
theta_current, phi_current, grad_phi_step, alpha, options,
)
} else {
None
}
}
fn evaluate_trial_point<P: NlpProblem>(
state: &SolverState,
problem: &P,
alpha: f64,
m: usize,
) -> Option<(Vec<f64>, f64, Vec<f64>, f64)> {
let x_trial = compute_clamped_trial_x(state, &state.dx, alpha);
let mut obj_trial = f64::INFINITY;
let obj_ok = problem.objective(&x_trial, true, &mut obj_trial);
let mut g_trial = vec![0.0; m];
let constr_ok = if m > 0 {
problem.constraints(&x_trial, true, &mut g_trial)
} else {
true
};
if !obj_ok || !constr_ok || obj_trial.is_nan() || obj_trial.is_infinite()
|| g_trial.iter().any(|v| v.is_nan() || v.is_infinite())
{
return None;
}
let theta_trial = theta_for_g(state, &g_trial);
Some((x_trial, obj_trial, g_trial, theta_trial))
}
fn log_line_search_rejection(
filter: &Filter,
theta_current: f64,
phi_current: f64,
theta_trial: f64,
phi_trial: f64,
grad_phi_step: f64,
alpha: f64,
) {
let sw = filter.switching_condition(theta_current, grad_phi_step, alpha);
let ar = filter.armijo_condition(phi_current, phi_trial, grad_phi_step, alpha);
let sr = filter.sufficient_infeasibility_reduction(theta_current, theta_trial);
let fa = filter.is_acceptable(theta_trial, phi_trial);
rip_log!(
" LS reject: alpha={:.2e} theta_t={:.2e} phi_t={:.2e} switch={} armijo={} suff_red={} filter_ok={}",
alpha, theta_trial, phi_trial, sw, ar, sr, fa
);
}
fn run_line_search_loop<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
filter: &mut Filter,
condensed_system: &Option<kkt::CondensedKktSystem>,
cond_solver_for_soc: &mut Option<DenseLdl>,
sparse_condensed_system: &Option<kkt::SparseCondensedKktSystem>,
kkt_system_opt: &Option<kkt::KktSystem>,
lin_solver: &mut dyn LinearSolver,
alpha_primal_max: f64,
theta_current: f64,
phi_current: f64,
grad_phi_step: f64,
min_alpha: f64,
force_restoration: bool,
watchdog_active: bool,
iteration: usize,
n: usize,
m: usize,
start_time: Instant,
early_timeout: f64,
trace_meta: &mut TraceMetadata,
ls_steps: &mut usize,
) -> LineSearchOutcome {
let mut alpha = alpha_primal_max;
let mut step_accepted = false;
*ls_steps = 0;
for _ls_iter in 0..40 {
if force_restoration {
break;
}
if iteration < 3 && options.early_stall_timeout > 0.0
&& start_time.elapsed().as_secs_f64() > early_timeout
{
return LineSearchOutcome::Return(make_result(state, SolveStatus::NumericalError));
}
if alpha < min_alpha {
break;
}
let (x_trial, obj_trial, g_trial, theta_trial) =
match evaluate_trial_point(state, problem, alpha, m) {
Some(t) => t,
None => {
alpha *= 0.5;
*ls_steps += 1;
continue;
}
};
if watchdog_active && alpha == alpha_primal_max {
commit_trial_point(state, x_trial, obj_trial, g_trial, alpha);
step_accepted = true;
break;
}
let phi_trial = compute_barrier_phi(
obj_trial, &x_trial, &g_trial, state, n, m, options.constraint_slack_barrier,
);
let (acceptable, used_switching) = filter.check_acceptability(
theta_current,
phi_current,
theta_trial,
phi_trial,
grad_phi_step,
alpha,
);
if !acceptable && options.print_level >= 7 && *ls_steps < 5 {
log_line_search_rejection(
filter, theta_current, phi_current, theta_trial, phi_trial,
grad_phi_step, alpha,
);
}
if acceptable {
commit_trial_point(state, x_trial, obj_trial, g_trial, alpha);
step_accepted = true;
if !used_switching {
filter.add(theta_current, phi_current);
}
break;
}
if theta_trial > theta_current && options.max_soc > 0 && *ls_steps == 0 {
let soc_accepted = dispatch_soc_attempt(
state, problem, &x_trial, &g_trial, condensed_system,
cond_solver_for_soc, sparse_condensed_system, kkt_system_opt,
lin_solver, filter, theta_current, phi_current, grad_phi_step,
alpha, options,
);
if let Some((x_soc, obj_soc, g_soc, alpha_soc)) = soc_accepted {
state.diagnostics.soc_corrections += 1;
trace_meta.soc_accepted = true;
commit_trial_point(state, x_soc, obj_soc, g_soc, alpha_soc);
step_accepted = true;
filter.add(theta_current, phi_current);
break;
}
}
alpha *= 0.5;
*ls_steps += 1;
}
if step_accepted {
LineSearchOutcome::StepAccepted
} else {
LineSearchOutcome::Rejected
}
}
fn assemble_kkt_systems(
state: &SolverState,
n: usize,
m: usize,
use_sparse: bool,
disable_sparse_condensed: bool,
) -> AssembledKkt {
let sigma = compute_sigma_from_state(state);
let use_condensed = m >= 2 * n && n > 0 && (!use_sparse || n <= 100);
let use_sparse_condensed = use_sparse && m > 0 && !use_condensed && !disable_sparse_condensed;
let condensed_system = if use_condensed {
Some(kkt::assemble_condensed_kkt(
n, m,
&state.hess_rows, &state.hess_cols, &state.hess_vals,
&state.jac_rows, &state.jac_cols, &state.jac_vals,
&sigma, &state.grad_f, &state.g, &state.g_l, &state.g_u,
&state.y, &state.z_l, &state.z_u,
&state.x, &state.x_l, &state.x_u, state.mu,
&state.v_l, &state.v_u,
))
} else {
None
};
let sparse_condensed_system = if use_sparse_condensed {
Some(kkt::assemble_sparse_condensed_kkt(
n, m,
&state.hess_rows, &state.hess_cols, &state.hess_vals,
&state.jac_rows, &state.jac_cols, &state.jac_vals,
&sigma, &state.grad_f, &state.g, &state.g_l, &state.g_u,
&state.y, &state.z_l, &state.z_u,
&state.x, &state.x_l, &state.x_u, state.mu,
&state.v_l, &state.v_u,
))
} else {
None
};
let kkt_system_opt = if !use_condensed && !use_sparse_condensed {
Some(assemble_kkt_from_state(state, n, m, &sigma, use_sparse))
} else {
None
};
AssembledKkt {
sigma,
use_sparse_condensed,
condensed_system,
sparse_condensed_system,
kkt_system_opt,
}
}
fn apply_kappa_sigma_bound_multiplier_reset(
state: &mut SolverState,
mu_state: &MuState,
alpha_d: f64,
) -> f64 {
let n = state.n;
let kappa_sigma = 1e10;
let mu_ks = if mu_state.mode == MuMode::Free {
compute_avg_complementarity(state)
.max(state.mu)
.min(1e3)
} else {
state.mu
};
for i in 0..n {
if state.x_l[i].is_finite() {
let z_new = (state.z_l[i] + alpha_d * state.dz_l[i]).max(1e-20);
let s_l = slack_xl(state, i);
let z_lo = mu_ks / (kappa_sigma * s_l);
let z_hi = kappa_sigma * mu_ks / s_l;
state.z_l[i] = z_new.clamp(z_lo, z_hi);
}
if state.x_u[i].is_finite() {
let z_new = (state.z_u[i] + alpha_d * state.dz_u[i]).max(1e-20);
let s_u = slack_xu(state, i);
let z_lo = mu_ks / (kappa_sigma * s_u);
let z_hi = kappa_sigma * mu_ks / s_u;
state.z_u[i] = z_new.clamp(z_lo, z_hi);
}
}
mu_ks
}
struct DyOscillationTracker {
prev_dy: Option<Vec<f64>>,
sign_change_count: Vec<u8>,
}
impl DyOscillationTracker {
fn new(m: usize) -> Self {
Self {
prev_dy: None,
sign_change_count: vec![0u8; m],
}
}
}
fn apply_damped_y_update(
state: &mut SolverState,
alpha_y: f64,
tracker: &mut DyOscillationTracker,
) {
let m = state.m;
let near_convergence = state.consecutive_acceptable >= 1;
for i in 0..m {
let sign_change = if let Some(ref pdy) = tracker.prev_dy {
pdy[i] * state.dy[i] < 0.0
} else {
false
};
if near_convergence && sign_change {
tracker.sign_change_count[i] = tracker.sign_change_count[i].saturating_add(1);
} else if !sign_change {
tracker.sign_change_count[i] = 0;
}
let dy_i = if near_convergence && tracker.sign_change_count[i] >= 3 {
0.5 * state.dy[i]
} else {
state.dy[i]
};
state.y[i] += alpha_y * dy_i;
}
tracker.prev_dy = Some(state.dy.clone());
}
fn update_dual_variables(
state: &mut SolverState,
mu_state: &MuState,
alpha_dual_max: f64,
tracker: &mut DyOscillationTracker,
) -> f64 {
let alpha_y = state.alpha_primal;
let alpha_d = alpha_dual_max;
apply_damped_y_update(state, alpha_y, tracker);
let mu_ks = apply_kappa_sigma_bound_multiplier_reset(state, mu_state, alpha_d);
state.alpha_dual = alpha_d;
mu_ks
}
fn build_mcc_corrected_direction(
state: &SolverState,
ddx: &[f64],
ddy: &[f64],
iteration: usize,
n: usize,
m: usize,
dx_norm_orig: f64,
) -> (Vec<f64>, Vec<f64>, Vec<f64>, Vec<f64>) {
let mut ddz_l = vec![0.0_f64; n];
let mut ddz_u = vec![0.0_f64; n];
for i in 0..n {
if state.x_l[i].is_finite() {
ddz_l[i] = -(state.z_l[i] / slack_xl(state, i)) * ddx[i];
}
if state.x_u[i].is_finite() {
ddz_u[i] = (state.z_u[i] / slack_xu(state, i)) * ddx[i];
}
}
let mut dx_c: Vec<f64> = state.dx.iter().zip(ddx.iter()).map(|(a, b)| a + b).collect();
let mut dy_c: Vec<f64> = state.dy.iter().zip(ddy.iter()).map(|(a, b)| a + b).collect();
let mut dz_l_c: Vec<f64> = state.dz_l.iter().zip(ddz_l.iter()).map(|(a, b)| a + b).collect();
let mut dz_u_c: Vec<f64> = state.dz_u.iter().zip(ddz_u.iter()).map(|(a, b)| a + b).collect();
if dx_norm_orig > 1e-30 {
let dx_c_norm: f64 = l2_norm(&dx_c);
if dx_c_norm > 1e-30 {
let dot = dot_product(&state.dx, &dx_c);
let cos_angle = dot / (dx_norm_orig * dx_c_norm);
if cos_angle < 0.7 {
let alpha_damp = 0.3;
for i in 0..n { dx_c[i] = (1.0 - alpha_damp) * state.dx[i] + alpha_damp * dx_c[i]; }
for i in 0..m { dy_c[i] = (1.0 - alpha_damp) * state.dy[i] + alpha_damp * dy_c[i]; }
for i in 0..n { dz_l_c[i] = (1.0 - alpha_damp) * state.dz_l[i] + alpha_damp * dz_l_c[i]; }
for i in 0..n { dz_u_c[i] = (1.0 - alpha_damp) * state.dz_u[i] + alpha_damp * dz_u_c[i]; }
log::debug!(
"Gondzio MCC iter {}: dampened correction (cos={:.3})",
iteration, cos_angle
);
}
}
}
(dx_c, dy_c, dz_l_c, dz_u_c)
}
fn build_mcc_corrector_rhs(
state: &SolverState,
kkt_dim: usize,
alpha_mcc: f64,
mu_target: f64,
beta_min: f64,
beta_max: f64,
n: usize,
) -> (Vec<f64>, bool) {
let mut rhs_mcc = vec![0.0_f64; kkt_dim];
let mut needs_correction = false;
for i in 0..n {
if state.x_l[i].is_finite() {
let s_t = (state.x[i] + alpha_mcc * state.dx[i]
- state.x_l[i]).max(1e-20);
let z_t = (state.z_l[i] + alpha_mcc * state.dz_l[i]).max(1e-20);
let c = z_t * s_t;
if c < beta_min * mu_target || c > beta_max * mu_target {
rhs_mcc[i] += (mu_target - c) / s_t;
needs_correction = true;
}
}
if state.x_u[i].is_finite() {
let s_t = (state.x_u[i] - state.x[i]
- alpha_mcc * state.dx[i]).max(1e-20);
let z_t = (state.z_u[i] + alpha_mcc * state.dz_u[i]).max(1e-20);
let c = z_t * s_t;
if c < beta_min * mu_target || c > beta_max * mu_target {
rhs_mcc[i] -= (mu_target - c) / s_t;
needs_correction = true;
}
}
}
(rhs_mcc, needs_correction)
}
fn compute_mcc_alpha_max(
state: &SolverState,
dx: &[f64],
dz_l: &[f64],
dz_u: &[f64],
tau_mcc: f64,
) -> f64 {
let alpha = fraction_to_boundary_dual_z_min(state, dz_l, dz_u, tau_mcc)
.min(fraction_to_boundary_primal_x(state, dx, tau_mcc));
alpha.clamp(0.0, 1.0)
}
fn try_apply_one_mcc_correction(
state: &mut SolverState,
iteration: usize,
alpha_mcc: f64,
tau_mcc: f64,
dx_norm_orig: f64,
rhs_mcc: &[f64],
ddx: &[f64],
ddy: &[f64],
n: usize,
m: usize,
) -> Option<f64> {
let nrm_rhs: f64 = linf_norm(rhs_mcc);
let nrm_sol: f64 = ddx.iter().chain(ddy.iter()).map(|v| v.abs()).fold(0.0_f64, f64::max);
if nrm_sol > 1e10 * nrm_rhs.max(1.0) {
log::debug!(
"Gondzio MCC iter {}: ||sol||={:.2e}, ||rhs||={:.2e} — rejecting",
iteration, nrm_sol, nrm_rhs,
);
return None;
}
let (dx_c, dy_c, dz_l_c, dz_u_c) = build_mcc_corrected_direction(
state, ddx, ddy, iteration, n, m, dx_norm_orig,
);
let alpha_new = compute_mcc_alpha_max(
state, &dx_c, &dz_l_c, &dz_u_c, tau_mcc,
);
if alpha_new >= 0.9 * alpha_mcc {
install_step_directions(state, dx_c, dy_c, dz_l_c, dz_u_c);
log::debug!(
"Gondzio MCC iter {}: correction accepted, α_mcc={:.4}",
iteration, alpha_new
);
Some(alpha_new)
} else {
None
}
}
fn apply_gondzio_mcc(
state: &mut SolverState,
options: &SolverOptions,
iteration: usize,
mu_state: &MuState,
primal_inf: f64,
dual_inf: f64,
compl_inf: f64,
kkt_system_opt: &Option<kkt::KktSystem>,
lin_solver: &mut dyn LinearSolver,
) {
if options.gondzio_mcc_max == 0 {
return;
}
let Some(kkt) = kkt_system_opt.as_ref() else { return };
let n = state.n;
let m = state.m;
let tau_mcc = compute_tau(state, options, mu_state, primal_inf, dual_inf, compl_inf);
let mut alpha_mcc = compute_mcc_alpha_max(
state, &state.dx, &state.dz_l, &state.dz_u, tau_mcc,
);
let mu_target = state.mu;
let beta_min = 0.01_f64;
let beta_max = 100.0_f64;
let dx_norm_orig: f64 = l2_norm(&state.dx);
for _mcc_iter in 0..options.gondzio_mcc_max {
let (rhs_mcc, needs_correction) = build_mcc_corrector_rhs(
state, kkt.dim, alpha_mcc, mu_target, beta_min, beta_max, n,
);
if !needs_correction {
break;
}
match kkt::solve_with_custom_rhs_refined(&kkt.matrix, kkt.n, kkt.dim, lin_solver, &rhs_mcc) {
Ok((ddx, ddy)) => {
match try_apply_one_mcc_correction(
state, iteration, alpha_mcc, tau_mcc, dx_norm_orig,
&rhs_mcc, &ddx, &ddy, n, m,
) {
Some(alpha_new) => alpha_mcc = alpha_new,
None => break,
}
}
Err(_) => break,
}
}
}
enum WatchdogDecision {
Proceed,
Continue,
}
struct Watchdog {
consecutive_shortened: usize,
active: bool,
trial_count: usize,
saved: Option<WatchdogSavedState>,
}
impl Watchdog {
fn new() -> Self {
Self {
consecutive_shortened: 0,
active: false,
trial_count: 0,
saved: None,
}
}
fn deactivate(&mut self) {
self.active = false;
self.trial_count = 0;
self.saved = None;
}
}
fn try_activate_watchdog(
state: &mut SolverState,
options: &SolverOptions,
iteration: usize,
filter: &Filter,
wd: &mut Watchdog,
) {
if wd.active
|| wd.consecutive_shortened < options.watchdog_shortened_iter_trigger
{
return;
}
state.diagnostics.watchdog_activations += 1;
wd.active = true;
wd.trial_count = 0;
let wd_theta = state.constraint_violation();
let wd_phi = state.barrier_objective(options);
wd.saved = Some(WatchdogSavedState::snapshot(state, filter, wd_theta, wd_phi));
wd.consecutive_shortened = 0;
log::debug!(
"Watchdog activated at iteration {} (theta={:.2e}, phi={:.2e})",
iteration, wd_theta, wd_phi
);
}
fn process_watchdog_trial<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
filter: &mut Filter,
lbfgs_state: &mut Option<LbfgsIpmState>,
wd: &mut Watchdog,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> Option<WatchdogDecision> {
if !wd.active {
return None;
}
wd.trial_count += 1;
let saved = wd.saved.as_ref()?;
let theta_now = state.constraint_violation();
let phi_now = state.barrier_objective(options);
let made_progress = filter.is_acceptable(theta_now, phi_now)
&& (theta_now < (1.0 - 1e-5) * saved.theta
|| phi_now < saved.phi - 1e-5 * saved.theta);
if made_progress {
log::debug!(
"Watchdog succeeded at trial {} (theta: {:.2e} -> {:.2e})",
wd.trial_count, saved.theta, theta_now
);
wd.deactivate();
return None;
}
if wd.trial_count >= options.watchdog_trial_iter_max {
log::debug!(
"Watchdog reverting after {} trials",
wd.trial_count
);
let theta_now = state.constraint_violation();
let phi_now = state.barrier_objective(options);
filter.restore_entries(saved.filter_entries.clone());
filter.add(theta_now, phi_now);
saved.restore(state);
let _ = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
wd.deactivate();
return Some(WatchdogDecision::Continue);
}
None
}
fn update_watchdog<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
iteration: usize,
alpha_primal_max: f64,
filter: &mut Filter,
lbfgs_state: &mut Option<LbfgsIpmState>,
wd: &mut Watchdog,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> WatchdogDecision {
if state.alpha_primal < alpha_primal_max * 0.99 {
wd.consecutive_shortened += 1;
} else {
wd.consecutive_shortened = 0;
}
try_activate_watchdog(state, options, iteration, filter, wd);
if let Some(decision) = process_watchdog_trial(
state, problem, options, filter, lbfgs_state, wd,
linear_constraints, lbfgs_mode,
) {
return decision;
}
WatchdogDecision::Proceed
}
enum DirectionSolveDecision {
Proceed {
dx: Vec<f64>,
dy: Vec<f64>,
cond_solver_for_soc: Option<DenseLdl>,
mehrotra_aff: Option<(Vec<f64>, Vec<f64>, Vec<f64>, f64)>,
},
Continue,
Return(SolveResult),
}
enum CondensedDirectionOutcome {
Solved {
dx: Vec<f64>,
dy: Vec<f64>,
cond_solver: Option<DenseLdl>,
},
Continue,
Return(SolveResult),
}
enum SolveRestoreOutcome {
Continue,
Return(SolveResult),
}
#[allow(clippy::too_many_arguments)]
fn restore_after_solve_failure<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
n: usize,
m: usize,
filter: &Filter,
restoration: &mut RestorationPhase,
lbfgs_state: &mut Option<LbfgsIpmState>,
lbfgs_mode: bool,
linear_constraints: Option<&[bool]>,
deadline: Option<Instant>,
) -> SolveRestoreOutcome {
let (x_rest, success) = restoration.restore(
&state.x, &state.x_l, &state.x_u, &state.g_l, &state.g_u,
&state.jac_rows, &state.jac_cols, n, m, options,
&|theta, phi| filter.is_acceptable(theta, phi),
&|x_eval, g_out| problem.constraints(x_eval, true, g_out),
&|x_eval, jac_out| problem.jacobian_values(x_eval, true, jac_out),
Some(&|x_eval: &[f64], obj_out: &mut f64| problem.objective(x_eval, true, obj_out)),
deadline,
);
if success {
state.x = x_rest;
state.alpha_primal = 0.0;
let _ = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
return SolveRestoreOutcome::Continue;
}
SolveRestoreOutcome::Return(make_result(state, SolveStatus::NumericalError))
}
fn maybe_revert_mehrotra_deflection(
dx_dir: &mut Vec<f64>,
dy_dir: &mut Vec<f64>,
mehrotra_aff: &mut Option<(Vec<f64>, Vec<f64>, Vec<f64>, f64)>,
saved_rhs: &Option<Vec<f64>>,
kkt_system_opt: &mut Option<kkt::KktSystem>,
lin_solver: &mut dyn LinearSolver,
) {
let Some(orig_rhs) = saved_rhs.as_ref() else { return };
let Some(kkt) = kkt_system_opt.as_ref() else { return };
let Ok((dx_orig, dy_orig)) = kkt::solve_with_custom_rhs_refined(
&kkt.matrix, kkt.n, kkt.dim, lin_solver, orig_rhs,
) else { return };
let norm_orig: f64 = l2_norm(&dx_orig);
let norm_pc: f64 = l2_norm(dx_dir);
if norm_orig <= 1e-30 || norm_pc <= 1e-30 {
return;
}
let dot = dot_product(&dx_orig, dx_dir);
let cos_angle = dot / (norm_orig * norm_pc);
if cos_angle >= 0.7 {
return;
}
log::debug!(
"Mehrotra PC deflection too large (cos={:.3}), reverting",
cos_angle
);
*dx_dir = dx_orig;
*dy_dir = dy_orig;
kkt_system_opt.as_mut().unwrap().rhs = orig_rhs.clone();
*mehrotra_aff = None;
}
fn solve_with_quality_escalation(
kkt_system_opt: &mut Option<kkt::KktSystem>,
lin_solver: &mut dyn LinearSolver,
inertia_params: &mut InertiaCorrectionParams,
mut ic_delta_w: f64,
mut ic_delta_c: f64,
n: usize,
m: usize,
) -> (Result<(Vec<f64>, Vec<f64>), crate::linear_solver::SolverError>, f64, f64) {
let mut dir_result = kkt::solve_for_direction(
kkt_system_opt.as_ref().unwrap(), lin_solver, ic_delta_w, ic_delta_c,
);
if matches!(dir_result, Err(crate::linear_solver::SolverError::PretendSingular)) {
if let Some(kkt_system) = kkt_system_opt.as_mut() {
let mut ps_resolved = false;
if !ps_resolved {
if !inertia_params.use_scaling && kkt_system.scale_factors.is_none() {
inertia_params.use_scaling = true;
let scale = kkt::ruiz_equilibrate(&mut kkt_system.matrix, &mut kkt_system.rhs);
kkt_system.scale_factors = Some(scale);
if lin_solver.factor(&kkt_system.matrix).is_ok() {
dir_result = kkt::solve_for_direction(kkt_system, lin_solver, ic_delta_w, ic_delta_c);
ps_resolved = !matches!(dir_result, Err(crate::linear_solver::SolverError::PretendSingular));
}
}
if !ps_resolved && lin_solver.increase_quality() {
if lin_solver.factor(&kkt_system.matrix).is_ok() {
dir_result = kkt::solve_for_direction(kkt_system, lin_solver, ic_delta_w, ic_delta_c);
ps_resolved = !matches!(dir_result, Err(crate::linear_solver::SolverError::PretendSingular));
}
}
}
if !ps_resolved && m > 0 && ic_delta_c == 0.0 {
let dc = inertia_params.delta_c_base;
let mut perturbed = kkt_system.matrix.clone();
perturbed.add_diagonal_range(n, n + m, -dc);
if lin_solver.factor(&perturbed).is_ok() {
kkt_system.matrix = perturbed;
ic_delta_c = dc;
dir_result = kkt::solve_for_direction(kkt_system, lin_solver, ic_delta_w, ic_delta_c);
ps_resolved = !matches!(dir_result, Err(crate::linear_solver::SolverError::PretendSingular));
}
}
if !ps_resolved && matches!(dir_result, Err(crate::linear_solver::SolverError::PretendSingular)) {
let dw = if ic_delta_w == 0.0 { inertia_params.delta_w_init } else { ic_delta_w * inertia_params.delta_w_inc_fact };
let dc = if m > 0 && ic_delta_c == 0.0 { inertia_params.delta_c_base } else { ic_delta_c };
let mut perturbed = kkt_system.matrix.clone();
perturbed.add_diagonal_range(0, n, dw);
if m > 0 && dc > ic_delta_c {
perturbed.add_diagonal_range(n, n + m, -(dc - ic_delta_c));
}
if lin_solver.factor(&perturbed).is_ok() {
kkt_system.matrix = perturbed;
ic_delta_w = dw;
ic_delta_c = dc;
inertia_params.delta_w_last = dw;
dir_result = kkt::solve_for_direction(kkt_system, lin_solver, ic_delta_w, ic_delta_c);
}
}
if !inertia_params.use_scaling {
inertia_params.use_scaling = true;
}
}
}
(dir_result, ic_delta_w, ic_delta_c)
}
fn compute_affine_complementarity(
state: &SolverState,
dx_aff: &[f64],
dz_l_aff: &[f64],
dz_u_aff: &[f64],
alpha_aff: f64,
n: usize,
) -> Option<f64> {
let mut mu_aff_sum = 0.0_f64;
let mut nb: usize = 0;
for i in 0..n {
if state.x_l[i].is_finite() {
let s = (state.x[i] + alpha_aff * dx_aff[i]
- state.x_l[i]).max(1e-20);
let z = (state.z_l[i] + alpha_aff * dz_l_aff[i]).max(1e-20);
mu_aff_sum += s * z;
nb += 1;
}
if state.x_u[i].is_finite() {
let s = (state.x_u[i] - state.x[i]
- alpha_aff * dx_aff[i]).max(1e-20);
let z = (state.z_u[i] + alpha_aff * dz_u_aff[i]).max(1e-20);
mu_aff_sum += s * z;
nb += 1;
}
}
if nb == 0 {
None
} else {
Some(mu_aff_sum / nb as f64)
}
}
fn compute_affine_step_alpha(
state: &SolverState,
dx_aff: &[f64],
dz_l_aff: &[f64],
dz_u_aff: &[f64],
n: usize,
) -> f64 {
let tau_aff = 1.0 - 1e-3;
let mut alpha_aff = fraction_to_boundary_dual_z_min(state, dz_l_aff, dz_u_aff, tau_aff)
.min(1.0);
for i in 0..n {
if state.x_l[i].is_finite() && dx_aff[i] < 0.0 {
alpha_aff = alpha_aff.min(tau_aff * slack_xl(state, i) / (-dx_aff[i]));
}
if state.x_u[i].is_finite() && dx_aff[i] > 0.0 {
alpha_aff = alpha_aff.min(tau_aff * slack_xu(state, i) / dx_aff[i]);
}
}
alpha_aff.clamp(0.0, 1.0)
}
fn try_mehrotra_predictor(
state: &SolverState,
options: &SolverOptions,
kkt: &kkt::KktSystem,
lin_solver: &mut dyn LinearSolver,
iteration: usize,
n: usize,
last_mehrotra_sigma: &mut Option<f64>,
) -> Option<(Vec<f64>, Vec<f64>, Vec<f64>, Vec<f64>, f64)> {
let rhs_aff = kkt::affine_predictor_rhs(
&kkt.rhs, &state.x, &state.x_l, &state.x_u, state.mu,
);
let (dx_aff, _) = kkt::solve_with_custom_rhs_refined(
&kkt.matrix, kkt.n, kkt.dim, lin_solver, &rhs_aff,
).ok()?;
let (dz_l_aff, dz_u_aff) = recover_dz_from_state(state, &dx_aff, 0.0);
let alpha_aff = compute_affine_step_alpha(state, &dx_aff, &dz_l_aff, &dz_u_aff, n);
let mu_aff = compute_affine_complementarity(
state, &dx_aff, &dz_l_aff, &dz_u_aff, alpha_aff, n,
)?;
let sigma_mehr = (mu_aff / state.mu).powi(3).clamp(0.0, 1.0);
*last_mehrotra_sigma = Some(sigma_mehr);
let mu_pc = (sigma_mehr * state.mu).max(options.mu_min);
log::debug!(
"Mehrotra PC iter {}: σ={:.4} α_aff={:.4} μ: {:.2e}→{:.2e}",
iteration, sigma_mehr, alpha_aff, state.mu, mu_pc
);
let new_rhs = kkt::rebuild_rhs_with_mu(
&kkt.rhs, &state.x, &state.x_l, &state.x_u,
state.mu, mu_pc,
);
Some((new_rhs, dx_aff, dz_l_aff, dz_u_aff, mu_pc))
}
#[allow(clippy::too_many_arguments)]
fn solve_sparse_condensed_direction(
state: &SolverState,
sc: &kkt::SparseCondensedKktSystem,
sigma: &[f64],
n: usize,
m: usize,
use_sparse: bool,
lin_solver: &mut dyn LinearSolver,
inertia_params: &mut InertiaCorrectionParams,
) -> (Vec<f64>, Vec<f64>) {
let kkt_sc = KktMatrix::Sparse(sc.matrix.clone());
let factor_ok = lin_solver.factor(&kkt_sc).is_ok();
if factor_ok {
match kkt::solve_sparse_condensed(sc, lin_solver) {
Ok(d) => return d,
Err(_) => {}
}
}
let mut kkt = assemble_kkt_from_state(state, n, m, sigma, use_sparse);
let mut fallback_solver = new_fallback_solver(use_sparse);
if let Ok((fb_dw, fb_dc)) = kkt::factor_with_inertia_correction(
&mut kkt, fallback_solver.as_mut(), inertia_params, state.mu,
) {
kkt::solve_for_direction(&kkt, fallback_solver.as_mut(), fb_dw, fb_dc)
.unwrap_or_else(|_| gradient_descent_fallback(state)
.unwrap_or_else(|| (vec![0.0; n], vec![0.0; m])))
} else {
gradient_descent_fallback(state)
.unwrap_or_else(|| (vec![0.0; n], vec![0.0; m]))
}
}
#[allow(clippy::too_many_arguments)]
fn solve_dense_condensed_direction<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
n: usize,
m: usize,
use_sparse: bool,
cond: &kkt::CondensedKktSystem,
sigma: &[f64],
kkt_system_opt: &mut Option<kkt::KktSystem>,
lin_solver: &mut dyn LinearSolver,
inertia_params: &mut InertiaCorrectionParams,
filter: &Filter,
restoration: &mut RestorationPhase,
lbfgs_state: &mut Option<LbfgsIpmState>,
lbfgs_mode: bool,
linear_constraints: Option<&[bool]>,
deadline: Option<Instant>,
) -> CondensedDirectionOutcome {
let mut cond_solver = DenseLdl::new();
let t_cond_bk = Instant::now();
let cond_ok = cond_solver.bunch_kaufman_factor(&cond.matrix).is_ok();
if options.print_level >= 5 {
rip_log!("ripopt: Dense condensed BK factor n={}: {:.3}s (ok={})",
n, t_cond_bk.elapsed().as_secs_f64(), cond_ok);
}
let cond_result = if cond_ok {
kkt::solve_condensed(cond, &mut cond_solver).ok()
} else {
None
};
if let Some((dx, dy)) = cond_result {
return CondensedDirectionOutcome::Solved {
dx,
dy,
cond_solver: Some(cond_solver),
};
}
fall_back_to_full_kkt_after_condensed_failure(
state, problem, options, n, m, use_sparse, sigma,
kkt_system_opt, lin_solver, inertia_params, filter, restoration,
lbfgs_state, lbfgs_mode, linear_constraints, deadline,
)
}
#[allow(clippy::too_many_arguments)]
#[allow(clippy::too_many_arguments)]
fn apply_solve_failure_restoration<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
n: usize,
m: usize,
filter: &Filter,
restoration: &mut RestorationPhase,
lbfgs_state: &mut Option<LbfgsIpmState>,
lbfgs_mode: bool,
linear_constraints: Option<&[bool]>,
deadline: Option<Instant>,
) -> CondensedDirectionOutcome {
match restore_after_solve_failure(
state, problem, options, n, m, filter, restoration,
lbfgs_state, lbfgs_mode, linear_constraints, deadline,
) {
SolveRestoreOutcome::Continue => CondensedDirectionOutcome::Continue,
SolveRestoreOutcome::Return(r) => CondensedDirectionOutcome::Return(r),
}
}
fn fall_back_to_full_kkt_after_condensed_failure<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
n: usize,
m: usize,
use_sparse: bool,
sigma: &[f64],
kkt_system_opt: &mut Option<kkt::KktSystem>,
lin_solver: &mut dyn LinearSolver,
inertia_params: &mut InertiaCorrectionParams,
filter: &Filter,
restoration: &mut RestorationPhase,
lbfgs_state: &mut Option<LbfgsIpmState>,
lbfgs_mode: bool,
linear_constraints: Option<&[bool]>,
deadline: Option<Instant>,
) -> CondensedDirectionOutcome {
let mut kkt = assemble_kkt_from_state(state, n, m, sigma, use_sparse);
let fb_ic = kkt::factor_with_inertia_correction(
&mut kkt, lin_solver, inertia_params, state.mu,
);
if fb_ic.is_err() {
return apply_solve_failure_restoration(
state, problem, options, n, m, filter, restoration,
lbfgs_state, lbfgs_mode, linear_constraints, deadline,
);
}
let (fb_dw, fb_dc) = fb_ic.unwrap();
match kkt::solve_for_direction(&kkt, lin_solver, fb_dw, fb_dc) {
Ok((dx, dy)) => {
*kkt_system_opt = Some(kkt);
CondensedDirectionOutcome::Solved {
dx,
dy,
cond_solver: None,
}
}
Err(e) => {
log::warn!("KKT solve failed: {}", e);
apply_solve_failure_restoration(
state, problem, options, n, m, filter, restoration,
lbfgs_state, lbfgs_mode, linear_constraints, deadline,
)
}
}
}
#[allow(clippy::too_many_arguments)]
enum FullAugmentedOutcome {
Solved {
dx: Vec<f64>,
dy: Vec<f64>,
mehrotra_aff: Option<(Vec<f64>, Vec<f64>, Vec<f64>, f64)>,
},
Continue,
Return(SolveResult),
}
#[allow(clippy::too_many_arguments)]
fn solve_full_augmented_direction<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
iteration: usize,
n: usize,
m: usize,
kkt_system_opt: &mut Option<kkt::KktSystem>,
lin_solver: &mut dyn LinearSolver,
inertia_params: &mut InertiaCorrectionParams,
ic_delta_w: f64,
ic_delta_c: f64,
filter: &Filter,
restoration: &mut RestorationPhase,
lbfgs_state: &mut Option<LbfgsIpmState>,
lbfgs_mode: bool,
linear_constraints: Option<&[bool]>,
last_mehrotra_sigma: &mut Option<f64>,
deadline: Option<Instant>,
) -> FullAugmentedOutcome {
let saved_rhs = if options.mehrotra_pc {
kkt_system_opt.as_ref().map(|k| k.rhs.clone())
} else {
None
};
let mut mehrotra_applied = false;
let mut mehrotra_aff: Option<(Vec<f64>, Vec<f64>, Vec<f64>, f64)> = None;
if options.mehrotra_pc {
let has_bounds = (0..n).any(|i| state.x_l[i].is_finite() || state.x_u[i].is_finite());
if has_bounds {
let pc_result = try_mehrotra_predictor(
state, options, kkt_system_opt.as_ref().unwrap(), lin_solver,
iteration, n, last_mehrotra_sigma,
);
if let Some((new_rhs, dx_aff_v, dz_l_aff_v, dz_u_aff_v, mu_pc_used)) = pc_result {
kkt_system_opt.as_mut().unwrap().rhs = new_rhs;
mehrotra_applied = true;
mehrotra_aff = Some((dx_aff_v, dz_l_aff_v, dz_u_aff_v, mu_pc_used));
}
}
}
let (dir_result, _, _) = solve_with_quality_escalation(
kkt_system_opt, lin_solver, inertia_params, ic_delta_w, ic_delta_c, n, m,
);
let (mut dx_dir, mut dy_dir) = match dir_result {
Ok(d) => d,
Err(e) => {
log::warn!("KKT solve failed: {}", e);
if let Some(fallback) = gradient_descent_fallback(state) {
fallback
} else {
match restore_after_solve_failure(
state, problem, options, n, m, filter, restoration,
lbfgs_state, lbfgs_mode, linear_constraints, deadline,
) {
SolveRestoreOutcome::Continue => return FullAugmentedOutcome::Continue,
SolveRestoreOutcome::Return(r) => return FullAugmentedOutcome::Return(r),
}
}
}
};
if mehrotra_applied {
maybe_revert_mehrotra_deflection(
&mut dx_dir, &mut dy_dir, &mut mehrotra_aff,
&saved_rhs, kkt_system_opt, lin_solver,
);
}
FullAugmentedOutcome::Solved { dx: dx_dir, dy: dy_dir, mehrotra_aff }
}
fn solve_for_search_direction<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
iteration: usize,
n: usize,
m: usize,
use_sparse: bool,
condensed_system: &Option<kkt::CondensedKktSystem>,
sparse_condensed_system: &Option<kkt::SparseCondensedKktSystem>,
kkt_system_opt: &mut Option<kkt::KktSystem>,
lin_solver: &mut dyn LinearSolver,
sigma: &[f64],
inertia_params: &mut InertiaCorrectionParams,
ic_delta_w: f64,
ic_delta_c: f64,
filter: &Filter,
restoration: &mut RestorationPhase,
lbfgs_state: &mut Option<LbfgsIpmState>,
lbfgs_mode: bool,
linear_constraints: Option<&[bool]>,
last_mehrotra_sigma: &mut Option<f64>,
deadline: Option<Instant>,
) -> DirectionSolveDecision {
let mut cond_solver_for_soc: Option<DenseLdl> = None;
let mut mehrotra_aff: Option<(Vec<f64>, Vec<f64>, Vec<f64>, f64)> = None;
let (dx, dy) = if let Some(cond) = condensed_system.as_ref() {
match solve_dense_condensed_direction(
state, problem, options, n, m, use_sparse, cond, sigma,
kkt_system_opt, lin_solver, inertia_params, filter, restoration,
lbfgs_state, lbfgs_mode, linear_constraints, deadline,
) {
CondensedDirectionOutcome::Solved { dx, dy, cond_solver } => {
cond_solver_for_soc = cond_solver;
(dx, dy)
}
CondensedDirectionOutcome::Continue => return DirectionSolveDecision::Continue,
CondensedDirectionOutcome::Return(r) => return DirectionSolveDecision::Return(r),
}
} else if let Some(sc) = sparse_condensed_system.as_ref() {
solve_sparse_condensed_direction(
state, sc, sigma, n, m, use_sparse, lin_solver, inertia_params,
)
} else {
match solve_full_augmented_direction(
state, problem, options, iteration, n, m, kkt_system_opt, lin_solver,
inertia_params, ic_delta_w, ic_delta_c, filter, restoration,
lbfgs_state, lbfgs_mode, linear_constraints, last_mehrotra_sigma, deadline,
) {
FullAugmentedOutcome::Solved { dx, dy, mehrotra_aff: aff } => {
mehrotra_aff = aff;
(dx, dy)
}
FullAugmentedOutcome::Continue => return DirectionSolveDecision::Continue,
FullAugmentedOutcome::Return(r) => return DirectionSolveDecision::Return(r),
}
};
DirectionSolveDecision::Proceed {
dx,
dy,
cond_solver_for_soc,
mehrotra_aff,
}
}
#[allow(clippy::too_many_arguments)]
fn adjust_sparse_condensed_bandwidth<P: NlpProblem>(
state: &SolverState,
_problem: &P,
options: &SolverOptions,
n: usize,
m: usize,
use_sparse: bool,
use_sparse_condensed: bool,
sigma: &[f64],
sparse_condensed_system: &mut Option<kkt::SparseCondensedKktSystem>,
kkt_system_opt: &mut Option<kkt::KktSystem>,
lin_solver: &mut Box<dyn LinearSolver>,
disable_sparse_condensed: &mut bool,
) {
if !use_sparse_condensed {
return;
}
let sc_bw = sparse_condensed_system.as_ref().map(|sc| {
BandedLdl::compute_bandwidth(&sc.matrix.triplet_rows, &sc.matrix.triplet_cols)
});
let Some(bw) = sc_bw else { return };
if bw > n / 2 {
if options.print_level >= 3 {
rip_log!(
"ripopt: Sparse condensed S has bandwidth {} for n={}, switching to dense condensed KKT",
bw, n
);
}
*disable_sparse_condensed = true;
*sparse_condensed_system = None;
*kkt_system_opt = Some(assemble_kkt_from_state(state, n, m, sigma, use_sparse));
} else if bw * bw <= n {
if options.print_level >= 5 {
rip_log!("ripopt: Sparse condensed S has bandwidth {} for n={}, using banded solver", bw, n);
}
*lin_solver = Box::new(BandedLdl::new());
} else if options.print_level >= 5 {
rip_log!("ripopt: Sparse condensed S has bandwidth {} for n={}, using sparse solver", bw, n);
}
}
fn track_post_step_acceptable(state: &mut SolverState, options: &SolverOptions) {
let n = state.n;
let post_primal = state.constraint_violation();
let (post_zl_opt, post_zu_opt) = {
let mut gj = state.grad_f.clone();
accumulate_jt_y(state, &mut gj);
recover_active_set_z(state, &gj, n)
};
let post_du = dual_inf_with_z(state, &post_zl_opt, &post_zu_opt);
let post_du_unsc = compute_dual_inf_unscaled_at_state(state);
let post_compl = compute_compl_err_at_state(state);
let post_compl_opt = compl_err_with_z(state, &post_zl_opt, &post_zu_opt);
let post_compl_best = post_compl.min(post_compl_opt);
let post_sd = compute_s_d_at_state(state);
let post_near_scaled = post_primal <= 100.0 * options.tol
&& post_du <= 100.0 * options.tol * post_sd
&& post_compl_best <= 100.0 * options.tol * post_sd;
let post_near_unscaled = post_primal <= 10.0 * options.constr_viol_tol
&& post_du_unsc <= 10.0 * options.dual_inf_tol
&& post_compl_best <= 10.0 * options.compl_inf_tol;
if post_near_scaled && post_near_unscaled {
state.consecutive_acceptable += 1;
}
}
enum RestorationCascadeDecision {
Continue,
Return(SolveResult),
}
#[allow(clippy::too_many_arguments)]
fn try_nlp_restoration_phase<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
filter: &mut Filter,
mu_state: &mut MuState,
lbfgs_state: &mut Option<LbfgsIpmState>,
lbfgs_mode: bool,
linear_constraints: Option<&[bool]>,
iteration: usize,
fail_count: usize,
n: usize,
m: usize,
start_time: Instant,
early_timeout: f64,
theta_current: f64,
) -> bool {
let kkt_dim = n + m;
let skip_nlp_restoration = iteration < 3
&& options.early_stall_timeout > 0.0
&& start_time.elapsed().as_secs_f64() > early_timeout * 0.5;
if !((fail_count == 2 || fail_count == 4)
&& !options.disable_nlp_restoration
&& kkt_dim <= 50000
&& !skip_nlp_restoration)
{
return false;
}
state.diagnostics.nlp_restoration_count += 1;
let (x_nlp, outcome) = attempt_nlp_restoration(
problem, state, filter, options, theta_current, start_time,
);
match outcome {
RestorationOutcome::Success => {
apply_restoration_success(
state, filter, mu_state, options, n, m,
problem, &x_nlp,
linear_constraints, lbfgs_mode, lbfgs_state,
);
true
}
RestorationOutcome::LocalInfeasibility | RestorationOutcome::Failed => {
false
}
}
}
#[allow(clippy::too_many_arguments)]
fn try_gn_restoration<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
filter: &mut Filter,
mu_state: &mut MuState,
lbfgs_state: &mut Option<LbfgsIpmState>,
lbfgs_mode: bool,
linear_constraints: Option<&[bool]>,
restoration: &mut RestorationPhase,
n: usize,
m: usize,
deadline: Option<Instant>,
) -> bool {
let (x_rest, gn_success) = restoration.restore(
&state.x,
&state.x_l,
&state.x_u,
&state.g_l,
&state.g_u,
&state.jac_rows,
&state.jac_cols,
n,
m,
options,
&|theta, phi| filter.is_acceptable(theta, phi),
&|x_eval, g_out| problem.constraints(x_eval, true, g_out),
&|x_eval, jac_out| problem.jacobian_values(x_eval, true, jac_out),
Some(&|x_eval: &[f64], obj_out: &mut f64| problem.objective(x_eval, true, obj_out)),
deadline,
);
if gn_success {
state.diagnostics.restoration_count += 1;
apply_restoration_success(
state, filter, mu_state, options, n, m, problem, &x_rest,
linear_constraints, lbfgs_mode, lbfgs_state,
);
true
} else {
false
}
}
#[allow(clippy::too_many_arguments)]
fn apply_restoration_recovery_strategy<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
filter: &mut Filter,
mu_state: &mut MuState,
inertia_params: &mut InertiaCorrectionParams,
lbfgs_state: &mut Option<LbfgsIpmState>,
lbfgs_mode: bool,
linear_constraints: Option<&[bool]>,
fail_count: usize,
n: usize,
) {
log::debug!("Restoration failed (attempt #{}), trying recovery", fail_count);
let mu_factors: [f64; 6] = [10.0, 0.1, 100.0, 0.01, 1000.0, 0.001];
match fail_count {
1 => apply_first_restoration_failure_mu_update(state, mu_state, options),
_ => {
let factor = mu_factors[(fail_count - 2) % mu_factors.len()];
state.mu = (state.mu * factor).max(options.mu_min).min(1e5);
}
}
reset_filter_with_current_theta(state, filter);
inertia_params.delta_w_last = 0.0;
if fail_count >= 3 {
perturb_x_after_repeated_restoration_failures(
state, problem, lbfgs_state, fail_count, n,
linear_constraints, lbfgs_mode,
);
}
}
fn apply_first_restoration_failure_mu_update(
state: &mut SolverState,
mu_state: &mut MuState,
options: &SolverOptions,
) {
if mu_state.mode == MuMode::Free {
switch_mu_mode(state, mu_state, MuMode::Fixed);
let avg_compl = compute_avg_complementarity(state);
if avg_compl > 0.0 {
state.mu = (options.adaptive_mu_monotone_init_factor * avg_compl)
.clamp(options.mu_min, 1e5);
} else {
state.mu = (options.mu_linear_decrease_factor * state.mu)
.max(options.mu_min);
}
} else {
let new_mu = (options.mu_linear_decrease_factor * state.mu)
.min(state.mu.powf(options.mu_superlinear_decrease_power))
.max(options.mu_min);
state.mu = new_mu;
}
}
#[allow(clippy::too_many_arguments)]
fn perturb_x_after_repeated_restoration_failures<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
lbfgs_state: &mut Option<LbfgsIpmState>,
fail_count: usize,
n: usize,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) {
for i in 0..n {
let range = if state.x_l[i].is_finite() && state.x_u[i].is_finite() {
state.x_u[i] - state.x_l[i]
} else {
state.x[i].abs().max(1.0)
};
let sign = if (i * 7 + fail_count * 13) % 3 == 0 { -1.0 } else { 1.0 };
state.x[i] += sign * 1e-4 * range;
clamp_to_open_bounds(&mut state.x, &state.x_l, &state.x_u, i);
}
let _ = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
}
fn classify_exhausted_restoration_attempt(
state: &SolverState,
options: &SolverOptions,
iteration: usize,
fail_count: usize,
feas: &FeasibilityTracker,
) -> SolveResult {
log::warn!(
"Restoration failed at iteration {} (attempt #{})",
iteration, fail_count
);
let current_theta = state.constraint_violation();
if current_theta > options.constr_viol_tol && !feas.ever_feasible {
let grad_theta_norm = compute_grad_theta_norm(state);
let stationarity_tol = 1e-4 * current_theta.max(1.0);
if grad_theta_norm < stationarity_tol {
log::info!(
"Local infeasibility detected: theta={:.2e}, ||∇theta||={:.2e}",
current_theta, grad_theta_norm
);
return make_result(state, SolveStatus::LocalInfeasibility);
}
}
if !feas.ever_feasible && current_theta > 1e4 && iteration > 500
&& feas.history.len() >= feas.history_len
{
let min_theta = slice_min(&feas.history);
if current_theta > 0.01 * min_theta {
return make_result(state, SolveStatus::Infeasible);
}
}
make_result(state, SolveStatus::RestorationFailed)
}
#[allow(clippy::too_many_arguments)]
fn run_post_ls_restoration_cascade<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
filter: &mut Filter,
mu_state: &mut MuState,
inertia_params: &mut InertiaCorrectionParams,
lbfgs_state: &mut Option<LbfgsIpmState>,
lbfgs_mode: bool,
linear_constraints: Option<&[bool]>,
restoration: &mut RestorationPhase,
iteration: usize,
n: usize,
m: usize,
start_time: Instant,
deadline: Option<Instant>,
early_timeout: f64,
feas: &FeasibilityTracker,
theta_current: f64,
phi_current: f64,
) -> RestorationCascadeDecision {
state.diagnostics.filter_rejects += 1;
mu_state.consecutive_soft_restoration = 0;
filter.add(theta_current, phi_current);
filter.augment_for_restoration(theta_current, phi_current);
log::debug!("Line search failed at iteration {}, entering restoration", iteration);
if try_gn_restoration(
state, problem, options, filter, mu_state, lbfgs_state, lbfgs_mode,
linear_constraints, restoration, n, m, deadline,
) {
return RestorationCascadeDecision::Continue;
}
if options.max_wall_time > 0.0 {
let remaining = options.max_wall_time - start_time.elapsed().as_secs_f64();
if remaining < 1.0 {
return RestorationCascadeDecision::Return(make_result(state, SolveStatus::MaxIterations));
}
}
mu_state.consecutive_restoration_failures += 1;
let fail_count = mu_state.consecutive_restoration_failures;
if try_nlp_restoration_phase(
state, problem, options, filter, mu_state, lbfgs_state, lbfgs_mode,
linear_constraints, iteration, fail_count, n, m, start_time,
early_timeout, theta_current,
) {
return RestorationCascadeDecision::Continue;
}
let kkt_dim = n + m;
let max_restore_attempts = if kkt_dim > 50000 { 3 } else { 6 };
if fail_count > max_restore_attempts {
return RestorationCascadeDecision::Return(classify_exhausted_restoration_attempt(
state, options, iteration, fail_count, feas,
));
}
apply_restoration_recovery_strategy(
state, problem, options, filter, mu_state, inertia_params,
lbfgs_state, lbfgs_mode, linear_constraints, fail_count, n,
);
RestorationCascadeDecision::Continue
}
fn try_alpha_halving_post_step_recovery<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
lbfgs_state: &mut Option<LbfgsIpmState>,
x_pre_step: &[f64],
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
n: usize,
) -> bool {
let mut retry_alpha = state.alpha_primal;
let mut retry_alpha_dual = state.alpha_dual;
for _ in 0..5 {
retry_alpha *= 0.5;
retry_alpha_dual *= 0.5;
for i in 0..n {
state.x[i] = x_pre_step[i] + retry_alpha * state.dx[i];
}
if state.evaluate_with_linear(problem, 1.0, linear_constraints, lbfgs_mode) {
state.alpha_primal = retry_alpha;
state.alpha_dual = retry_alpha_dual;
update_lbfgs_hessian(lbfgs_state, state);
return true;
}
}
false
}
enum PostStepEvalDecision {
Proceed,
Continue,
Return(SolveResult),
}
fn reevaluate_after_step<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
lbfgs_state: &mut Option<LbfgsIpmState>,
filter: &mut Filter,
restoration: &mut RestorationPhase,
timings: &mut PhaseTimings,
x_pre_step: &[f64],
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
deadline: Option<Instant>,
) -> PostStepEvalDecision {
let n = state.n;
let m = state.m;
let t_eval = Instant::now();
let eval_ok = state.evaluate_with_linear(problem, 1.0, linear_constraints, lbfgs_mode);
if eval_ok {
update_lbfgs_hessian(lbfgs_state, state);
}
timings.problem_eval += t_eval.elapsed();
if eval_ok {
return PostStepEvalDecision::Proceed;
}
if try_alpha_halving_post_step_recovery(
state, problem, lbfgs_state, x_pre_step,
linear_constraints, lbfgs_mode, n,
) {
return PostStepEvalDecision::Continue;
}
let (x_rest, success) = restoration.restore(
&state.x, &state.x_l, &state.x_u, &state.g_l, &state.g_u,
&state.jac_rows, &state.jac_cols, n, m, options,
&|theta, phi| filter.is_acceptable(theta, phi),
&|x_eval, g_out| problem.constraints(x_eval, true, g_out),
&|x_eval, jac_out| problem.jacobian_values(x_eval, true, jac_out),
Some(&|x_eval: &[f64], obj_out: &mut f64| problem.objective(x_eval, true, obj_out)),
deadline,
);
if success {
state.x = x_rest;
state.alpha_primal = 0.0;
if state.evaluate_with_linear(problem, 1.0, linear_constraints, lbfgs_mode) {
update_lbfgs_hessian(lbfgs_state, state);
return PostStepEvalDecision::Continue;
}
}
PostStepEvalDecision::Return(make_result(state, SolveStatus::NumericalError))
}
fn reset_slack_multipliers(state: &mut SolverState, mu_ks: f64) {
let m = state.m;
for i in 0..m {
if state.v_l[i] > 0.0 && state.g_l[i].is_finite() {
state.v_l[i] = mu_ks / slack_gl(state, i);
}
if state.v_u[i] > 0.0 && state.g_u[i].is_finite() {
state.v_u[i] = mu_ks / slack_gu(state, i);
}
}
}
const MAX_SOFT_RESTO_ITERS: usize = 10;
struct SoftRestoSnapshot {
x: Vec<f64>,
y: Vec<f64>,
z_l: Vec<f64>,
z_u: Vec<f64>,
obj: f64,
g: Vec<f64>,
grad_f: Vec<f64>,
jac_vals: Vec<f64>,
alpha_primal: f64,
}
impl SoftRestoSnapshot {
fn take(state: &SolverState) -> Self {
Self {
x: state.x.clone(),
y: state.y.clone(),
z_l: state.z_l.clone(),
z_u: state.z_u.clone(),
obj: state.obj,
g: state.g.clone(),
grad_f: state.grad_f.clone(),
jac_vals: state.jac_vals.clone(),
alpha_primal: state.alpha_primal,
}
}
fn restore(self, state: &mut SolverState) {
state.x = self.x;
state.y = self.y;
state.z_l = self.z_l;
state.z_u = self.z_u;
state.obj = self.obj;
state.g = self.g;
state.grad_f = self.grad_f;
state.jac_vals = self.jac_vals;
state.alpha_primal = self.alpha_primal;
}
}
#[allow(clippy::too_many_arguments)]
fn attempt_soft_restoration<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
filter: &mut Filter,
mu_state: &mut MuState,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
alpha_primal_max: f64,
alpha_dual_max: f64,
theta_current: f64,
phi_current: f64,
) -> bool {
if mu_state.consecutive_soft_restoration >= MAX_SOFT_RESTO_ITERS {
return false;
}
let n = state.n;
let m = state.m;
let alpha_p = alpha_primal_max.min(alpha_dual_max);
if !alpha_p.is_finite() || alpha_p <= 0.0 {
return false;
}
let alpha_d = alpha_dual_max;
let pderror_curr = compute_pderror_e_mu(state, state.mu);
let snapshot = SoftRestoSnapshot::take(state);
let x_trial = compute_clamped_trial_x(state, &state.dx, alpha_p);
state.x = x_trial;
for i in 0..m {
state.y[i] += alpha_d * state.dy[i];
}
for i in 0..n {
state.z_l[i] = (state.z_l[i] + alpha_d * state.dz_l[i]).max(0.0);
state.z_u[i] = (state.z_u[i] + alpha_d * state.dz_u[i]).max(0.0);
}
let eval_ok = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
state.evaluate_with_linear(problem, 1.0, linear_constraints, true)
}));
let evaluated = matches!(eval_ok, Ok(true));
if !evaluated || !state.obj.is_finite() {
snapshot.restore(state);
return false;
}
let theta_trial = theta_for_g(state, &state.g);
let phi_trial = compute_barrier_phi(
state.obj, &state.x, &state.g, state, n, m, options.constraint_slack_barrier,
);
let filter_ok = filter.is_acceptable(theta_trial, phi_trial);
let pderror_trial = compute_pderror_e_mu(state, state.mu);
let pderror_ok = pderror_trial <= 0.9999 * pderror_curr;
if filter_ok || pderror_ok {
log::debug!(
"Soft restoration accepted (filter={} pderror={}): theta {:.2e} -> {:.2e}, E_mu {:.2e} -> {:.2e}",
filter_ok, pderror_ok, theta_current, theta_trial, pderror_curr, pderror_trial,
);
let _ = lbfgs_mode;
state.alpha_primal = alpha_p;
mu_state.consecutive_soft_restoration += 1;
filter.add(theta_current, phi_current);
true
} else {
snapshot.restore(state);
false
}
}
struct BestFeasibleIterate {
obj: f64,
x: Option<Vec<f64>>,
}
impl BestFeasibleIterate {
fn new() -> Self {
Self { obj: f64::INFINITY, x: None }
}
}
fn track_best_feasible(
state: &SolverState,
options: &SolverOptions,
best: &mut BestFeasibleIterate,
) {
let theta_now = state.constraint_violation();
if theta_now < options.constr_viol_tol && state.obj < best.obj {
best.obj = state.obj;
best.x = Some(state.x.clone());
}
}
fn compute_alpha_max(
state: &SolverState,
options: &SolverOptions,
mu_state: &MuState,
primal_inf: f64,
dual_inf: f64,
compl_inf: f64,
) -> (f64, f64, f64) {
let tau = compute_tau(state, options, mu_state, primal_inf, dual_inf, compl_inf);
let alpha_primal_max =
fraction_to_boundary_primal_x(state, &state.dx, tau).clamp(0.0, 1.0);
let alpha_dual_max = fraction_to_boundary_dual_z_min(state, &state.dz_l, &state.dz_u, tau);
(tau, alpha_primal_max, alpha_dual_max)
}
fn detect_tiny_step(
state: &mut SolverState,
options: &SolverOptions,
mu_state: &mut MuState,
filter: &mut Filter,
consecutive_tiny_steps: &mut usize,
alpha_primal_max: f64,
primal_inf: f64,
) {
let n = state.n;
let max_rel_step: f64 = (0..n)
.map(|i| (alpha_primal_max * state.dx[i]).abs() / (state.x[i].abs() + 1.0))
.fold(0.0f64, f64::max);
if max_rel_step < 1e-14 && primal_inf < 1e-4 {
*consecutive_tiny_steps += 1;
mu_state.tiny_step = true;
if *consecutive_tiny_steps >= 2 {
let new_mu = (options.mu_linear_decrease_factor * state.mu)
.min(state.mu.powf(options.mu_superlinear_decrease_power))
.max(options.mu_min);
if (new_mu - state.mu).abs() < 1e-20 {
log::debug!("Tiny step with mu at minimum, checking acceptability");
} else {
state.mu = new_mu;
reset_filter_with_current_theta(state, filter);
log::debug!("Tiny step detected, forced mu decrease to {:.2e}", state.mu);
}
*consecutive_tiny_steps = 0;
}
} else {
*consecutive_tiny_steps = 0;
mu_state.tiny_step = false;
}
}
enum FactorDecision {
Proceed { ic_delta_w: f64, ic_delta_c: f64 },
Continue,
Return(SolveResult),
}
fn try_early_perturbation_recovery<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
iteration: usize,
n: usize,
m: usize,
use_sparse: bool,
lin_solver: &mut dyn LinearSolver,
inertia_params: &mut InertiaCorrectionParams,
lbfgs_state: &mut Option<LbfgsIpmState>,
filter: &mut Filter,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> bool {
if iteration >= 5 {
return false;
}
for &perturb_scale in &[1e-4, 1e-3, 1e-2, 5e-2, 1e-1] {
let x_saved = state.x.clone();
for i in 0..n {
let mag = state.x[i].abs().max(1.0);
let sign = if (i * 7 + iteration * 13 + (perturb_scale * 1e4) as usize * 3) % 3 == 0 {
-1.0
} else {
1.0
};
state.x[i] += sign * perturb_scale * mag;
clamp_to_open_bounds(&mut state.x, &state.x_l, &state.x_u, i);
}
reseed_bound_multipliers_from_mu(state, state.mu);
let pert_eval_ok = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
if pert_eval_ok && obj_and_grad_finite(state) {
let sigma_p = compute_sigma_from_state(state);
let mut kkt_p = assemble_kkt_from_state(state, n, m, &sigma_p, use_sparse);
if kkt::factor_with_inertia_correction(
&mut kkt_p, lin_solver, inertia_params, state.mu,
).is_ok() {
log::debug!(
"Early perturbation (scale={:.0e}) recovered factorization at iter {}",
perturb_scale, iteration
);
reset_filter_with_current_theta(state, filter);
return true;
}
}
state.x.copy_from_slice(&x_saved);
}
false
}
fn try_gradient_descent_fallback<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
n: usize,
lbfgs_state: &mut Option<LbfgsIpmState>,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> bool {
let Some(fallback) = gradient_descent_fallback(state) else {
return false;
};
install_step_directions(state, fallback.0, fallback.1, vec![0.0; n], vec![0.0; n]);
let mut alpha_fb = 1.0;
let obj_current = state.obj;
let mut fb_accepted = false;
for _ in 0..20 {
let x_trial = compute_clamped_trial_x(state, &state.dx, alpha_fb);
let mut obj_trial = f64::INFINITY;
let obj_ok = problem.objective(&x_trial, true, &mut obj_trial);
if obj_ok && obj_trial.is_finite() && obj_trial < obj_current {
state.x = x_trial;
state.obj = obj_trial;
state.alpha_primal = alpha_fb;
fb_accepted = true;
break;
}
alpha_fb *= 0.5;
}
if fb_accepted {
let _ = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
return true;
}
false
}
#[allow(clippy::too_many_arguments)]
fn recover_from_factor_failure<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
iteration: usize,
n: usize,
m: usize,
use_sparse: bool,
lin_solver: &mut dyn LinearSolver,
inertia_params: &mut InertiaCorrectionParams,
lbfgs_state: &mut Option<LbfgsIpmState>,
filter: &mut Filter,
restoration: &mut RestorationPhase,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
deadline: Option<Instant>,
) -> FactorDecision {
if try_early_perturbation_recovery(
state, problem, iteration, n, m, use_sparse,
lin_solver, inertia_params, lbfgs_state, filter,
linear_constraints, lbfgs_mode,
) {
return FactorDecision::Continue;
}
if try_gradient_descent_fallback(
state, problem, n, lbfgs_state, linear_constraints, lbfgs_mode,
) {
return FactorDecision::Continue;
}
let (x_rest, success) = restoration.restore(
&state.x, &state.x_l, &state.x_u, &state.g_l, &state.g_u,
&state.jac_rows, &state.jac_cols, n, m, options,
&|theta, phi| filter.is_acceptable(theta, phi),
&|x_eval, g_out| problem.constraints(x_eval, true, g_out),
&|x_eval, jac_out| problem.jacobian_values(x_eval, true, jac_out),
Some(&|x_eval: &[f64], obj_out: &mut f64| problem.objective(x_eval, true, obj_out)),
deadline,
);
if success {
state.x = x_rest;
state.alpha_primal = 0.0;
let _ = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
return FactorDecision::Continue;
}
if try_last_resort_perturbation(
state, problem, iteration, n, lbfgs_state, filter,
linear_constraints, lbfgs_mode,
) {
return FactorDecision::Continue;
}
FactorDecision::Return(make_result(state, SolveStatus::NumericalError))
}
fn factor_kkt_with_recovery<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
iteration: usize,
n: usize,
m: usize,
use_sparse: bool,
kkt_system_opt: &mut Option<kkt::KktSystem>,
lin_solver: &mut dyn LinearSolver,
inertia_params: &mut InertiaCorrectionParams,
lbfgs_state: &mut Option<LbfgsIpmState>,
filter: &mut Filter,
restoration: &mut RestorationPhase,
timings: &mut PhaseTimings,
prev_ic_delta_w: &mut f64,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
deadline: Option<Instant>,
) -> FactorDecision {
let mut ic_delta_w = 0.0f64;
let mut ic_delta_c = 0.0f64;
let Some(kkt_system) = kkt_system_opt.as_mut() else {
return FactorDecision::Proceed { ic_delta_w, ic_delta_c };
};
let t_fact = Instant::now();
if options.print_level >= 5 {
let dim = match &kkt_system.matrix {
KktMatrix::Dense(d) => d.n,
KktMatrix::Sparse(s) => s.n,
};
let nnz = match &kkt_system.matrix {
KktMatrix::Dense(d) => d.n * (d.n + 1) / 2,
KktMatrix::Sparse(s) => s.triplet_rows.len(),
};
rip_log!("ripopt: Factoring KKT dim={} nnz={}...", dim, nnz);
}
let inertia_result =
kkt::factor_with_inertia_correction(kkt_system, lin_solver, inertia_params, state.mu);
if options.print_level >= 5 {
rip_log!("ripopt: KKT factorization took {:.3}s (ok={})",
t_fact.elapsed().as_secs_f64(), inertia_result.is_ok());
}
timings.factorization += t_fact.elapsed();
if let Ok((dw, dc)) = &inertia_result {
ic_delta_w = *dw;
ic_delta_c = *dc;
*prev_ic_delta_w = *dw;
}
if let Some(ref dump_dir) = options.kkt_dump_dir {
if inertia_result.is_ok() {
dump_kkt_matrix(
dump_dir,
&options.kkt_dump_name,
iteration,
kkt_system,
Some((n, m, 0)),
ic_delta_w,
ic_delta_c,
);
}
}
let Err(e) = inertia_result else {
return FactorDecision::Proceed { ic_delta_w, ic_delta_c };
};
log::warn!("KKT factorization failed: {}", e);
recover_from_factor_failure(
state, problem, options, iteration, n, m, use_sparse,
lin_solver, inertia_params, lbfgs_state, filter, restoration,
linear_constraints, lbfgs_mode, deadline,
)
}
fn try_last_resort_perturbation<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
iteration: usize,
n: usize,
lbfgs_state: &mut Option<LbfgsIpmState>,
filter: &mut Filter,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> bool {
for &perturb_scale in &[1e-3, 1e-2, 1e-1] {
for i in 0..n {
let mag = state.x[i].abs().max(1.0);
let sign = if (i * 7 + iteration * 13) % 3 == 0 { -1.0 } else { 1.0 };
state.x[i] += sign * perturb_scale * mag;
clamp_to_open_bounds(&mut state.x, &state.x_l, &state.x_u, i);
}
let pert2_ok = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
if pert2_ok && !state.obj.is_nan() && !state.obj.is_infinite() {
reset_filter_with_current_theta(state, filter);
return true;
}
}
false
}
fn compute_centrality_xi(state: &SolverState, avg_compl: f64) -> f64 {
let mut min_compl = f64::INFINITY;
for i in 0..state.n {
if state.x_l[i].is_finite() {
min_compl = min_compl.min(slack_xl(state, i) * state.z_l[i]);
}
if state.x_u[i].is_finite() {
min_compl = min_compl.min(slack_xu(state, i) * state.z_u[i]);
}
}
if avg_compl > 0.0 && min_compl.is_finite() {
(min_compl / avg_compl).clamp(0.0, 1.0)
} else {
1.0
}
}
fn compute_loqo_mu(
state: &SolverState,
options: &SolverOptions,
avg_compl: f64,
) -> f64 {
let barrier_err = compute_barrier_error(state);
let mu_floor = if barrier_err <= options.barrier_tol_factor * state.mu {
options.mu_min
} else {
(state.mu / 5.0).max(options.mu_min)
};
let xi = compute_centrality_xi(state, avg_compl);
let ratio = if xi > 1e-20 {
(0.05 * (1.0 - xi) / xi).min(2.0)
} else {
2.0
};
let sigma = 0.1 * ratio.powi(3);
let loqo_mu = sigma * avg_compl;
let monotone_floor =
(options.mu_linear_decrease_factor * state.mu)
.min(state.mu.powf(options.mu_superlinear_decrease_power));
let new_mu = loqo_mu
.max(monotone_floor)
.clamp(mu_floor, 1e5);
if options.print_level >= 5 {
rip_log!("ripopt: mu loqo: xi={:.4} sigma={:.4} avg_compl={:.3e} floor={:.3e} -> mu={:.3e}",
xi, sigma, avg_compl, monotone_floor, new_mu);
}
new_mu
}
fn update_barrier_parameter(
state: &mut SolverState,
mu_state: &mut MuState,
filter: &mut Filter,
last_mehrotra_sigma: &mut Option<f64>,
options: &SolverOptions,
) {
let n = state.n;
let has_bounds = (0..n).any(|i| state.x_l[i].is_finite() || state.x_u[i].is_finite());
if !has_bounds {
state.mu = state.mu.powf(options.mu_superlinear_decrease_power).max(options.mu_min);
return;
}
let kkt_error = {
let pi = state.constraint_violation();
let di = compute_dual_inf_at_state(state);
let ci = compute_compl_err_at_state(state);
pi * pi + di * di + ci * ci
};
let sufficient = mu_state.check_sufficient_progress(kkt_error);
{
let du_now = compute_dual_inf_at_state(state);
if mu_state.dual_inf_window.len() >= 3 {
mu_state.dual_inf_window.remove(0);
}
mu_state.dual_inf_window.push(du_now);
}
let barrier_err_for_gate = compute_barrier_error(state);
let barrier_subproblem_solved =
barrier_err_for_gate <= options.barrier_tol_factor * state.mu;
match mu_state.mode {
MuMode::Free => {
update_barrier_parameter_free_mode(
state, mu_state, filter, last_mehrotra_sigma, options,
sufficient, kkt_error, barrier_subproblem_solved,
);
}
MuMode::Fixed => {
update_barrier_parameter_fixed_mode(
state, mu_state, filter, options, sufficient, kkt_error,
);
}
}
}
fn compute_du_stagnant_in_free_mode(mu_state: &MuState, options: &SolverOptions) -> bool {
if mu_state.dual_inf_window.len() < 3 {
return false;
}
let w = &mu_state.dual_inf_window;
let recent = w[w.len() - 1];
let oldest = w[w.len() - 3];
recent >= 0.9 * oldest && recent > options.tol * 100.0
}
fn switch_mu_mode(state: &mut SolverState, mu_state: &mut MuState, new_mode: MuMode) {
state.diagnostics.mu_mode_switches += 1;
mu_state.mode = new_mode;
mu_state.first_iter_in_mode = true;
}
fn switch_to_fixed_mode_with_adaptive_init(
state: &mut SolverState,
mu_state: &mut MuState,
filter: &mut Filter,
options: &SolverOptions,
) {
mu_state.consecutive_insufficient = 0;
log::debug!("Switching to fixed mu mode (insufficient progress or tiny step)");
switch_mu_mode(state, mu_state, MuMode::Fixed);
let avg_compl = compute_avg_complementarity(state);
if avg_compl > 0.0 {
state.mu = (options.adaptive_mu_monotone_init_factor * avg_compl)
.clamp(options.mu_min, 1e5);
} else {
state.mu = (options.mu_linear_decrease_factor * state.mu)
.max(options.mu_min);
}
reset_filter_with_current_theta(state, filter);
}
fn apply_free_mode_sufficient_progress_update(
state: &mut SolverState,
mu_state: &mut MuState,
filter: &mut Filter,
options: &SolverOptions,
kkt_error: f64,
) {
mu_state.consecutive_insufficient = 0;
mu_state.remember_accepted(kkt_error);
let avg_compl = compute_avg_complementarity(state);
if options.mu_oracle_quality_function && avg_compl > 0.0 {
state.mu = compute_loqo_mu(state, options, avg_compl);
} else if avg_compl > 0.0 {
let barrier_err = compute_barrier_error(state);
let mu_floor = if barrier_err <= options.barrier_tol_factor * state.mu {
options.mu_min
} else {
(state.mu / 5.0).max(options.mu_min)
};
state.mu = (avg_compl / options.kappa).clamp(mu_floor, 1e5);
} else {
state.mu = (options.mu_linear_decrease_factor * state.mu)
.max(options.mu_min);
}
reset_filter_with_current_theta(state, filter);
}
fn apply_free_mode_conservative_decrease(state: &mut SolverState, options: &SolverOptions) {
let avg_compl = compute_avg_complementarity(state);
if avg_compl > 0.0 {
let mu_floor = options.mu_min;
state.mu = (avg_compl / options.kappa).clamp(mu_floor, 1e5);
} else {
state.mu = (options.mu_linear_decrease_factor * state.mu)
.max(options.mu_min);
}
}
#[allow(clippy::too_many_arguments)]
fn update_barrier_parameter_free_mode(
state: &mut SolverState,
mu_state: &mut MuState,
filter: &mut Filter,
last_mehrotra_sigma: &mut Option<f64>,
options: &SolverOptions,
sufficient: bool,
kkt_error: f64,
barrier_subproblem_solved: bool,
) {
let _sigma_mu = last_mehrotra_sigma.take();
if sufficient && !mu_state.tiny_step && barrier_subproblem_solved {
apply_free_mode_sufficient_progress_update(
state, mu_state, filter, options, kkt_error,
);
} else {
let du_stagnant = compute_du_stagnant_in_free_mode(mu_state, options);
mu_state.consecutive_insufficient += 1;
if mu_state.consecutive_insufficient >= 2 || du_stagnant {
switch_to_fixed_mode_with_adaptive_init(state, mu_state, filter, options);
} else if barrier_subproblem_solved {
apply_free_mode_conservative_decrease(state, options);
}
}
}
fn update_barrier_parameter_fixed_mode(
state: &mut SolverState,
mu_state: &mut MuState,
filter: &mut Filter,
options: &SolverOptions,
sufficient: bool,
kkt_error: f64,
) {
if options.mu_strategy_adaptive && sufficient && !mu_state.tiny_step && !mu_state.first_iter_in_mode {
log::debug!("Switching back to free mu mode (sufficient progress)");
switch_mu_mode(state, mu_state, MuMode::Free);
mu_state.remember_accepted(kkt_error);
} else {
mu_state.first_iter_in_mode = false;
let barrier_err = compute_barrier_error(state);
if barrier_err <= options.barrier_tol_factor * state.mu || mu_state.tiny_step {
let new_mu = (options.mu_linear_decrease_factor * state.mu)
.min(state.mu.powf(options.mu_superlinear_decrease_power))
.max(options.mu_min);
if !(mu_state.tiny_step && (new_mu - state.mu).abs() < 1e-20) {
state.mu = new_mu;
reset_filter_with_current_theta(state, filter);
log::debug!("Fixed mode: mu decreased to {:.2e}", state.mu);
}
}
}
}
struct IterateAveragingState {
du_history: Vec<f64>,
iterate_history: Vec<(Vec<f64>, Vec<f64>, Vec<f64>, Vec<f64>)>,
tried: bool,
}
impl IterateAveragingState {
fn new() -> Self {
Self {
du_history: Vec::with_capacity(AVG_WINDOW + 1),
iterate_history: Vec::new(),
tried: false,
}
}
}
struct ConvergenceWorkspace<'a> {
avg: &'a mut IterateAveragingState,
tried_active_set: &'a mut bool,
tried_compl_polish: &'a mut bool,
}
struct SavedIterate {
x: Vec<f64>,
y: Vec<f64>,
z_l: Vec<f64>,
z_u: Vec<f64>,
}
impl SavedIterate {
fn snapshot(state: &SolverState) -> Self {
Self {
x: state.x.clone(),
y: state.y.clone(),
z_l: state.z_l.clone(),
z_u: state.z_u.clone(),
}
}
fn restore_and_reeval<P: NlpProblem>(
&self,
state: &mut SolverState,
problem: &P,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) {
state.x.copy_from_slice(&self.x);
state.y.copy_from_slice(&self.y);
state.z_l.copy_from_slice(&self.z_l);
state.z_u.copy_from_slice(&self.z_u);
let _ = state.evaluate_with_linear(problem, 1.0, linear_constraints, lbfgs_mode);
}
}
fn compute_iterate_average(
iterate_history: &[(Vec<f64>, Vec<f64>, Vec<f64>, Vec<f64>)],
state: &SolverState,
n: usize,
m: usize,
) -> (Vec<f64>, Vec<f64>, Vec<f64>, Vec<f64>) {
let len = iterate_history.len() as f64;
let mut avg_x = vec![0.0; n];
let mut avg_y = vec![0.0; m];
let mut avg_zl = vec![0.0; n];
let mut avg_zu = vec![0.0; n];
for (hx, hy, hzl, hzu) in iterate_history.iter() {
for i in 0..n { avg_x[i] += hx[i] / len; }
for i in 0..m { avg_y[i] += hy[i] / len; }
for i in 0..n { avg_zl[i] += hzl[i] / len; }
for i in 0..n { avg_zu[i] += hzu[i] / len; }
}
for i in 0..n {
avg_x[i] = avg_x[i].clamp(
if state.x_l[i].is_finite() { state.x_l[i] + 1e-15 } else { f64::NEG_INFINITY },
if state.x_u[i].is_finite() { state.x_u[i] - 1e-15 } else { f64::INFINITY },
);
avg_zl[i] = avg_zl[i].max(0.0);
avg_zu[i] = avg_zu[i].max(0.0);
}
(avg_x, avg_y, avg_zl, avg_zu)
}
fn count_du_history_sign_changes(du_history: &[f64]) -> usize {
let mut sign_changes = 0;
for w in 1..du_history.len().saturating_sub(1) {
let d1 = du_history[w] - du_history[w - 1];
let d2 = du_history[w + 1] - du_history[w];
if d1 * d2 < 0.0 {
sign_changes += 1;
}
}
sign_changes
}
#[allow(clippy::too_many_arguments)]
fn try_iterate_averaging_promotion<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
ws: &mut ConvergenceWorkspace,
n: usize,
m: usize,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> Option<SolveResult> {
if ws.avg.tried || ws.avg.du_history.len() != AVG_WINDOW {
return None;
}
if count_du_history_sign_changes(&ws.avg.du_history) < AVG_WINDOW / 2 {
return None;
}
ws.avg.tried = true;
let (avg_x, avg_y, avg_zl, avg_zu) =
compute_iterate_average(&ws.avg.iterate_history, state, n, m);
let saved = SavedIterate::snapshot(state);
state.x.copy_from_slice(&avg_x);
state.y.copy_from_slice(&avg_y);
state.z_l.copy_from_slice(&avg_zl);
state.z_u.copy_from_slice(&avg_zu);
let _ = state.evaluate_with_linear(problem, 1.0, linear_constraints, lbfgs_mode);
let avg_conv = compute_convergence_info_from_state(state, state.mu, n, m);
if let ConvergenceStatus::Converged = check_convergence(&avg_conv, options, 0) {
if options.print_level >= 3 {
rip_log!("ripopt: Iterate averaging promoted near-tolerance -> Optimal (du={:.2e})", avg_conv.dual_inf);
}
return Some(make_result(state, SolveStatus::Optimal));
}
saved.restore_and_reeval(state, problem, linear_constraints, lbfgs_mode);
None
}
fn try_complementarity_polish_promotion(
state: &mut SolverState,
options: &SolverOptions,
conv_info: &ConvergenceInfo,
ws: &mut ConvergenceWorkspace,
n: usize,
m: usize,
) -> Option<SolveResult> {
if *ws.tried_compl_polish {
return None;
}
let compl_inf_now = conv_info.compl_inf;
let s_d_now = compute_residual_scaling(conv_info.multiplier_sum, conv_info.multiplier_count);
let s_c_now =
compute_residual_scaling(conv_info.bound_multiplier_sum, conv_info.bound_multiplier_count);
let compl_tol_scaled = options.tol * s_c_now;
if !(compl_inf_now > compl_tol_scaled
&& conv_info.primal_inf <= 100.0 * options.tol
&& conv_info.dual_inf <= 100.0 * options.tol * s_d_now)
{
return None;
}
*ws.tried_compl_polish = true;
let saved_zl = state.z_l.clone();
let saved_zu = state.z_u.clone();
let gap_tol = 1e-6;
for i in 0..n {
let gap_l = if state.x_l[i].is_finite() { state.x[i] - state.x_l[i] } else { f64::INFINITY };
let gap_u = if state.x_u[i].is_finite() { state.x_u[i] - state.x[i] } else { f64::INFINITY };
if gap_l > gap_tol {
state.z_l[i] = 0.0;
}
if gap_u > gap_tol {
state.z_u[i] = 0.0;
}
}
let snap_conv = compute_convergence_info_from_state(state, state.mu, n, m);
if let ConvergenceStatus::Converged = check_convergence(&snap_conv, options, 0) {
if options.print_level >= 3 {
rip_log!(
"ripopt: Complementarity snap promoted near-tolerance -> Optimal (compl {:.2e} -> {:.2e}, du {:.2e})",
compl_inf_now, snap_conv.compl_inf, snap_conv.dual_inf
);
}
return Some(make_result(state, SolveStatus::Optimal));
}
state.z_l.copy_from_slice(&saved_zl);
state.z_u.copy_from_slice(&saved_zu);
None
}
#[allow(clippy::too_many_arguments)]
fn handle_acceptable_status_with_promotions<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
conv_info: &ConvergenceInfo,
ws: &mut ConvergenceWorkspace,
timings: &PhaseTimings,
iteration: usize,
ipm_start: Instant,
n: usize,
m: usize,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> SolveResult {
if let Some(result) = try_iterate_averaging_promotion(
state, problem, options, ws, n, m,
linear_constraints, lbfgs_mode,
) {
return result;
}
if !*ws.tried_active_set {
*ws.tried_active_set = true;
if let Some(result) = try_active_set_solve(state, problem, options, linear_constraints, lbfgs_mode) {
if options.print_level >= 3 {
rip_log!("ripopt: Active set solve promoted Acceptable -> Optimal");
}
return result;
}
}
if let Some(result) = try_complementarity_polish_promotion(
state, options, conv_info, ws, n, m,
) {
return result;
}
if options.print_level >= 5 {
timings.print_summary(iteration + 1, ipm_start.elapsed());
}
make_result(state, SolveStatus::Acceptable)
}
fn check_convergence_and_handle_promotions<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
primal_inf_max: f64,
dual_inf: f64,
dual_inf_unscaled: f64,
compl_inf: f64,
multiplier_sum: f64,
multiplier_count: usize,
bound_multiplier_sum: f64,
bound_multiplier_count: usize,
ws: &mut ConvergenceWorkspace,
timings: &PhaseTimings,
iteration: usize,
ipm_start: Instant,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> Option<SolveResult> {
let n = state.n;
let m = state.m;
let conv_info = ConvergenceInfo {
primal_inf: primal_inf_max,
dual_inf,
dual_inf_unscaled,
compl_inf,
mu: state.mu,
objective: state.obj,
multiplier_sum,
multiplier_count,
bound_multiplier_sum,
bound_multiplier_count,
};
ws.avg.du_history.push(dual_inf);
ws.avg.iterate_history.push((
state.x.clone(), state.y.clone(), state.z_l.clone(), state.z_u.clone(),
));
if ws.avg.du_history.len() > AVG_WINDOW {
ws.avg.du_history.remove(0);
ws.avg.iterate_history.remove(0);
}
match check_convergence(&conv_info, options, state.consecutive_acceptable) {
ConvergenceStatus::Converged => {
if options.print_level >= 5 {
timings.print_summary(iteration + 1, ipm_start.elapsed());
}
Some(make_result(state, SolveStatus::Optimal))
}
ConvergenceStatus::Acceptable => Some(handle_acceptable_status_with_promotions(
state, problem, options, &conv_info, ws, timings,
iteration, ipm_start, n, m,
linear_constraints, lbfgs_mode,
)),
ConvergenceStatus::Diverging => {
Some(make_result(state, SolveStatus::Unbounded))
}
ConvergenceStatus::NotConverged => None,
}
}
fn check_time_limits(
state: &SolverState,
iteration: usize,
start_time: Instant,
early_timeout: f64,
options: &SolverOptions,
) -> Option<SolveResult> {
if (iteration < 10 || iteration % 10 == 0) && options.max_wall_time > 0.0 {
if start_time.elapsed().as_secs_f64() >= options.max_wall_time {
return Some(make_result(state, SolveStatus::MaxIterations));
}
}
if iteration < 5 && options.early_stall_timeout > 0.0 {
if start_time.elapsed().as_secs_f64() > early_timeout {
if options.print_level >= 3 {
rip_log!(
"ripopt: Early stall at iteration {} ({:.1}s elapsed), terminating",
iteration, start_time.elapsed().as_secs_f64()
);
}
return Some(make_result(state, SolveStatus::NumericalError));
}
}
None
}
fn reset_filter_with_current_theta(state: &SolverState, filter: &mut Filter) {
filter.reset();
let theta = state.constraint_violation();
filter.set_theta_min_from_initial(theta);
}
struct ProgressStallTracker {
best_pr: f64,
best_du: f64,
no_progress_count: usize,
}
impl ProgressStallTracker {
fn new() -> Self {
Self {
best_pr: f64::INFINITY,
best_du: f64::INFINITY,
no_progress_count: 0,
}
}
fn reset(&mut self) {
self.best_pr = f64::INFINITY;
self.best_du = f64::INFINITY;
self.no_progress_count = 0;
}
}
fn reset_stall_counters_and_filter(
state: &SolverState,
filter: &mut Filter,
stall: &mut ProgressStallTracker,
) {
reset_filter_with_current_theta(state, filter);
stall.reset();
}
struct OptimalityMeasures {
primal_inf: f64,
primal_inf_max: f64,
dual_inf: f64,
dual_inf_unscaled: f64,
compl_inf: f64,
}
fn compute_optimality_measures(state: &SolverState) -> OptimalityMeasures {
let primal_inf = state.constraint_violation();
let primal_inf_max = compute_primal_inf_max_at_state(state);
let dual_inf = compute_dual_inf_at_state(state);
let dual_inf_unscaled = compute_dual_inf_unscaled_at_state(state);
let compl_inf = compute_compl_err_at_state(state);
OptimalityMeasures {
primal_inf,
primal_inf_max,
dual_inf,
dual_inf_unscaled,
compl_inf,
}
}
fn track_consecutive_acceptable(
state: &mut SolverState,
primal_inf: f64,
dual_inf: f64,
dual_inf_unscaled: f64,
compl_inf: f64,
multiplier_sum: f64,
bound_multiplier_sum: f64,
) -> f64 {
let n = state.n;
let m = state.m;
let s_d_for_acc = compute_residual_scaling(multiplier_sum, m + 2 * n);
let s_c_for_acc = compute_residual_scaling(bound_multiplier_sum, 2 * n);
let meets_acc_scaled = primal_inf <= 1e-6
&& dual_inf <= 1e-6 * s_d_for_acc
&& compl_inf <= 1e-6 * s_c_for_acc;
let meets_acc_unscaled = primal_inf <= 1e-2
&& dual_inf_unscaled <= 1e10
&& compl_inf <= 1e-2;
if meets_acc_scaled && meets_acc_unscaled {
state.consecutive_acceptable += 1;
} else {
state.consecutive_acceptable = 0;
}
s_d_for_acc
}
#[derive(Default)]
struct BestDuIterate {
val: f64,
x: Option<Vec<f64>>,
y: Option<Vec<f64>>,
z_l: Option<Vec<f64>>,
z_u: Option<Vec<f64>>,
}
impl BestDuIterate {
fn new() -> Self {
Self {
val: f64::INFINITY,
x: None,
y: None,
z_l: None,
z_u: None,
}
}
}
fn update_best_du_iterate(
state: &SolverState,
dual_inf: f64,
best_du: &mut BestDuIterate,
) {
if dual_inf < best_du.val {
best_du.val = dual_inf;
best_du.x = Some(state.x.clone());
best_du.y = Some(state.y.clone());
best_du.z_l = Some(state.z_l.clone());
best_du.z_u = Some(state.z_u.clone());
}
}
fn detect_unbounded(
state: &SolverState,
options: &SolverOptions,
primal_inf: f64,
consecutive_unbounded: &mut usize,
) -> Option<SolveResult> {
if state.obj < -1e20 && primal_inf < options.constr_viol_tol {
*consecutive_unbounded += 1;
if *consecutive_unbounded >= 10 {
return Some(make_result(state, SolveStatus::Unbounded));
}
} else {
*consecutive_unbounded = 0;
}
None
}
struct PrimalDivergenceTracker {
consecutive_increase: usize,
prev: f64,
start: f64,
}
impl PrimalDivergenceTracker {
fn new() -> Self {
Self {
consecutive_increase: 0,
prev: f64::INFINITY,
start: f64::INFINITY,
}
}
}
struct FeasibilityTracker {
history: Vec<f64>,
history_len: usize,
ever_feasible: bool,
stall_count: usize,
}
impl FeasibilityTracker {
fn new(history_len: usize) -> Self {
Self {
history: Vec::with_capacity(history_len),
history_len,
ever_feasible: false,
stall_count: 0,
}
}
}
fn detect_primal_divergence(
options: &SolverOptions,
iteration: usize,
primal_inf: f64,
pd: &mut PrimalDivergenceTracker,
m: usize,
) -> bool {
let mut force_restoration = false;
if m > 0 && iteration > 5 && primal_inf > options.constr_viol_tol {
if primal_inf > pd.prev * (1.0 + 1e-6) {
if pd.consecutive_increase == 0 {
pd.start = pd.prev;
}
pd.consecutive_increase += 1;
} else {
pd.consecutive_increase = 0;
}
if pd.consecutive_increase >= 8 && primal_inf > 1.2 * pd.start {
log::info!(
"Primal divergence at iter {}: pr grew for {} consecutive iterations ({:.2e} -> {:.2e}), forcing restoration",
iteration, pd.consecutive_increase, pd.start, primal_inf
);
if options.print_level >= 3 {
rip_log!(
"ripopt: Primal divergence detected (pr grew {:.2e} -> {:.2e} over {} iters), re-entering restoration",
pd.start, primal_inf, pd.consecutive_increase
);
}
force_restoration = true;
pd.consecutive_increase = 0;
}
} else {
pd.consecutive_increase = 0;
}
pd.prev = primal_inf;
force_restoration
}
enum StallDecision {
Proceed,
Continue,
Return(SolveResult),
}
#[allow(clippy::too_many_arguments)]
fn check_stall_near_tolerance_via_optimal_duals(
state: &SolverState,
options: &SolverOptions,
primal_inf: f64,
primal_inf_max: f64,
compl_inf: f64,
stall_near_tol: f64,
n: usize,
m: usize,
) -> Option<StallDecision> {
let mut gj = state.grad_f.clone();
accumulate_jt_y(state, &mut gj);
let (opt_zl, opt_zu) = recover_active_set_z(state, &gj, n);
let opt_du = dual_inf_with_z(state, &opt_zl, &opt_zu);
let opt_co = compl_err_with_z(state, &opt_zl, &opt_zu);
let opt_co_best = compl_inf.min(opt_co);
let fmult: f64 = l1_norm(&state.y) + l1_norm(&opt_zl) + l1_norm(&opt_zu);
let fmult_bnd: f64 = l1_norm(&opt_zl) + l1_norm(&opt_zu);
let fsd = compute_residual_scaling(fmult, m + 2 * n);
let fsc = compute_residual_scaling(fmult_bnd, 2 * n);
let stall_fdu_tol = (stall_near_tol * fsd).max(1e-2);
let stall_fco_tol = (stall_near_tol * fsc).max(1e-2);
let stall_fpr_tol = stall_near_tol.max(10.0 * options.constr_viol_tol);
let sc = primal_inf_max <= stall_fpr_tol
&& opt_du <= stall_fdu_tol
&& opt_co_best <= stall_fco_tol;
let du_u = compute_dual_inf_unscaled_at_state(state);
let usc = primal_inf_max <= 10.0 * options.constr_viol_tol
&& du_u <= 10.0 * options.dual_inf_tol
&& opt_co_best <= 10.0 * options.compl_inf_tol;
if sc && usc {
if options.print_level >= 3 {
rip_log!(
"ripopt: Stalled but near-tolerance via optimal duals (pr={:.2e}, du_opt={:.2e}, co={:.2e}), returning NumericalError",
primal_inf, opt_du, opt_co_best
);
}
return Some(StallDecision::Return(make_result(state, SolveStatus::NumericalError)));
}
None
}
#[allow(clippy::too_many_arguments)]
fn try_force_mu_decrease_in_fixed_mode(
state: &mut SolverState,
options: &SolverOptions,
primal_inf: f64,
dual_inf: f64,
compl_inf: f64,
filter: &mut Filter,
stall: &mut ProgressStallTracker,
) -> bool {
if !(!options.mu_strategy_adaptive && state.mu > options.mu_min) {
return false;
}
let new_mu = (options.mu_linear_decrease_factor * state.mu)
.min(state.mu.powf(options.mu_superlinear_decrease_power))
.max(options.mu_min);
if options.print_level >= 3 {
rip_log!(
"ripopt: Fixed mode stall near tolerance (pr={:.2e}, du={:.2e}, co={:.2e}), forcing mu {:.2e} -> {:.2e}",
primal_inf, dual_inf, compl_inf, state.mu, new_mu
);
}
state.mu = new_mu;
reset_stall_counters_and_filter(state, filter, stall);
true
}
#[allow(clippy::too_many_arguments)]
fn handle_near_tolerance_stall(
state: &mut SolverState,
options: &SolverOptions,
primal_inf: f64,
primal_inf_max: f64,
dual_inf: f64,
compl_inf: f64,
s_d_for_acc: f64,
filter: &mut Filter,
mu_state: &mut MuState,
stall: &mut ProgressStallTracker,
) -> StallDecision {
if state.mu < primal_inf_max * 0.01 && primal_inf_max > options.constr_viol_tol {
let new_mu = (primal_inf_max * 0.1).max(1e-6);
if options.print_level >= 3 {
rip_log!(
"ripopt: Near-tolerance stall: boosting mu {:.2e} -> {:.2e} (pr_max={:.2e})",
state.mu, new_mu, primal_inf_max
);
}
boost_mu_and_switch_to_fixed_with_stall_reset(
state, new_mu, mu_state, filter, stall,
);
return StallDecision::Continue;
}
if try_force_mu_decrease_in_fixed_mode(
state, options, primal_inf, dual_inf, compl_inf, filter, stall,
) {
return StallDecision::Continue;
}
classify_near_tolerance_stall_outcome(
state, options, primal_inf, primal_inf_max, dual_inf, compl_inf, s_d_for_acc,
)
}
#[allow(clippy::too_many_arguments)]
fn classify_near_tolerance_stall_outcome(
state: &SolverState,
options: &SolverOptions,
primal_inf: f64,
primal_inf_max: f64,
dual_inf: f64,
compl_inf: f64,
s_d_for_acc: f64,
) -> StallDecision {
let acc_pr_ok = primal_inf_max <= 1e-2;
let acc_du_ok = dual_inf <= 1e10;
let acc_co_ok = compl_inf <= 1e-2;
let acc_scaled_ok = primal_inf_max <= 1e-6
&& dual_inf <= 1e-6 * s_d_for_acc
&& compl_inf <= 1e-6 * s_d_for_acc;
if acc_pr_ok && acc_du_ok && acc_co_ok && acc_scaled_ok {
if options.print_level >= 3 {
rip_log!(
"ripopt: Stalled at acceptable tolerance (pr={:.2e}, du={:.2e}, co={:.2e}), returning Acceptable",
primal_inf, dual_inf, compl_inf
);
}
return StallDecision::Return(make_result(state, SolveStatus::Acceptable));
}
if options.print_level >= 3 {
rip_log!(
"ripopt: Stalled but near-tolerance (pr={:.2e}, du={:.2e}, co={:.2e}), returning NumericalError",
primal_inf, dual_inf, compl_inf
);
}
StallDecision::Return(make_result(state, SolveStatus::NumericalError))
}
fn try_boost_mu_for_stall(
state: &mut SolverState,
options: &SolverOptions,
filter: &mut Filter,
mu_state: &mut MuState,
primal_inf_max: f64,
stall: &mut ProgressStallTracker,
) -> Option<StallDecision> {
if !(primal_inf_max < 0.1 && state.mu < primal_inf_max * 0.01) {
return None;
}
let new_mu = (primal_inf_max * 0.1).max(1e-6);
if options.print_level >= 3 {
rip_log!(
"ripopt: Stall recovery: boosting mu {:.2e} -> {:.2e} (pr_max={:.2e})",
state.mu, new_mu, primal_inf_max
);
}
boost_mu_and_switch_to_fixed_with_stall_reset(
state, new_mu, mu_state, filter, stall,
);
Some(StallDecision::Continue)
}
fn revert_to_best_du_iterate_if_better<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
dual_inf: f64,
best_du: &BestDuIterate,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) {
if best_du.x.is_some() && best_du.val < dual_inf * 0.1 {
let mut no_lbfgs: Option<LbfgsIpmState> = None;
restore_best_du_iterate(
state, problem, &mut no_lbfgs, best_du,
linear_constraints, lbfgs_mode,
);
if options.print_level >= 3 {
rip_log!(
"ripopt: Reverting to best-du iterate (du: {:.2e} -> {:.2e}) before NumericalError exit",
dual_inf, best_du.val
);
}
}
}
fn update_stall_counters_and_check_limit(
stall: &mut ProgressStallTracker,
state: &SolverState,
options: &SolverOptions,
primal_inf_max: f64,
dual_inf: f64,
) -> bool {
let pr_improved = primal_inf_max < 0.99 * stall.best_pr;
let du_improved = dual_inf < 0.99 * stall.best_du;
if pr_improved {
stall.best_pr = primal_inf_max;
}
if du_improved {
stall.best_du = dual_inf;
}
if pr_improved || du_improved {
stall.no_progress_count = 0;
return false;
}
stall.no_progress_count += 1;
let tiny_alpha = state.alpha_primal < 1e-8 && state.alpha_dual < 1e-4;
let stall_limit = if tiny_alpha { options.stall_iter_limit / 2 } else { options.stall_iter_limit };
stall.no_progress_count >= stall_limit
}
fn detect_and_handle_progress_stall<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
iteration: usize,
primal_inf: f64,
primal_inf_max: f64,
dual_inf: f64,
compl_inf: f64,
s_d_for_acc: f64,
filter: &mut Filter,
mu_state: &mut MuState,
stall: &mut ProgressStallTracker,
best_du: &BestDuIterate,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> StallDecision {
if iteration <= 50 || options.stall_iter_limit == 0 {
return StallDecision::Proceed;
}
let n = state.n;
let m = state.m;
if !update_stall_counters_and_check_limit(
stall, state, options, primal_inf_max, dual_inf,
) {
return StallDecision::Proceed;
}
let stall_near_tol = options.tol * 1000.0;
let stall_pr_ok = primal_inf_max <= stall_near_tol.max(10.0 * options.constr_viol_tol);
let stall_du_ok = dual_inf <= (stall_near_tol * s_d_for_acc).max(1e-2);
let stall_co_ok = compl_inf <= (stall_near_tol * s_d_for_acc).max(1e-2);
if stall_pr_ok && stall_du_ok && stall_co_ok {
return handle_near_tolerance_stall(
state, options, primal_inf, primal_inf_max, dual_inf, compl_inf,
s_d_for_acc, filter, mu_state, stall,
);
}
if let Some(decision) = check_stall_near_tolerance_via_optimal_duals(
state, options, primal_inf, primal_inf_max, compl_inf, stall_near_tol, n, m,
) {
return decision;
}
if let Some(decision) = try_boost_mu_for_stall(
state, options, filter, mu_state, primal_inf_max, stall,
) {
return decision;
}
revert_to_best_du_iterate_if_better(
state, problem, options, dual_inf, best_du,
linear_constraints, lbfgs_mode,
);
if options.print_level >= 3 {
rip_log!(
"ripopt: Stalled for {} iterations without progress (alpha_p={:.2e}, pr={:.2e}, du={:.2e}), terminating",
stall.no_progress_count, state.alpha_primal, primal_inf, dual_inf
);
}
StallDecision::Return(make_result(state, SolveStatus::NumericalError))
}
#[allow(clippy::too_many_arguments)]
fn track_feasibility_and_detect_infeasibility(
state: &SolverState,
options: &SolverOptions,
iteration: usize,
primal_inf: f64,
feas: &mut FeasibilityTracker,
) -> Option<SolveResult> {
let m = state.m;
if feas.history.len() >= feas.history_len {
feas.history.remove(0);
}
feas.history.push(primal_inf);
if primal_inf < options.constr_viol_tol {
feas.ever_feasible = true;
}
if options.proactive_infeasibility_detection
&& !feas.ever_feasible
&& m > 0
&& iteration >= 50
&& primal_inf > options.constr_viol_tol
&& feas.history.len() >= feas.history_len
{
let theta_min_h = slice_min(&feas.history);
let theta_max_h = feas.history.iter().cloned().fold(0.0f64, f64::max);
if theta_max_h > 0.0 && (theta_max_h - theta_min_h) < 0.01 * primal_inf {
feas.stall_count += 1;
} else {
feas.stall_count = 0;
}
if feas.stall_count >= 10 {
let grad_theta_norm = compute_grad_theta_norm(state);
let stationarity_tol = 1e-3 * primal_inf.max(1.0);
if grad_theta_norm < stationarity_tol {
log::info!(
"Proactive infeasibility at iter {}: θ stagnated at {:.2e}, ‖∇θ‖={:.2e}",
iteration, primal_inf, grad_theta_norm
);
return Some(make_result(state, SolveStatus::LocalInfeasibility));
}
feas.stall_count = 0;
}
} else if feas.ever_feasible {
feas.stall_count = 0;
}
None
}
#[allow(clippy::too_many_arguments)]
fn check_restored_point_near_tolerance(
state: &SolverState,
options: &SolverOptions,
) -> Option<SolveResult> {
let rest_pr = state.constraint_violation();
let rest_du = compute_dual_inf_at_state(state);
let rest_co = compute_compl_err_at_state(state);
let s_d = compute_s_d_at_state(state);
let near_tol = 100.0 * options.tol;
let du_tol = (near_tol * s_d).max(1e-2);
let co_tol = (near_tol * s_d).max(1e-2);
let pr_tol = near_tol.max(10.0 * options.constr_viol_tol);
if rest_pr <= pr_tol && rest_du <= du_tol && rest_co <= co_tol {
log::debug!(
"Restored best-du point passes near-tolerance (pr={:.2e}, du={:.2e}, co={:.2e})",
rest_pr, rest_du, rest_co
);
return Some(make_result(state, SolveStatus::NumericalError));
}
None
}
fn restore_best_du_iterate<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
lbfgs_state: &mut Option<LbfgsIpmState>,
best_du: &BestDuIterate,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) {
let Some(ref bdx) = best_du.x else { return };
state.x.copy_from_slice(bdx);
if let Some(ref bdy) = best_du.y { state.y.copy_from_slice(bdy); }
if let Some(ref bdzl) = best_du.z_l { state.z_l.copy_from_slice(bdzl); }
if let Some(ref bdzu) = best_du.z_u { state.z_u.copy_from_slice(bdzu); }
let _ = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
}
struct DualStallTracker {
last_good_du: f64,
last_good_iter: usize,
triggered: bool,
}
impl DualStallTracker {
fn new() -> Self {
Self {
last_good_du: f64::INFINITY,
last_good_iter: 0,
triggered: false,
}
}
}
fn handle_dual_stagnation<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
iteration: usize,
filter: &mut Filter,
mu_state: &mut MuState,
inertia_params: &mut InertiaCorrectionParams,
lbfgs_state: &mut Option<LbfgsIpmState>,
dual_stall: &mut DualStallTracker,
best_feasible: &BestFeasibleIterate,
best_du: &BestDuIterate,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> Option<SolveResult> {
if iteration == 0 {
return None;
}
let current_du = compute_dual_inf_at_state(state);
if current_du < 0.5 * dual_stall.last_good_du {
dual_stall.last_good_du = current_du;
dual_stall.last_good_iter = iteration;
}
let stall_iters = iteration.saturating_sub(dual_stall.last_good_iter);
if stall_iters < 500
|| dual_stall.triggered
|| current_du <= 100.0 * options.tol
|| best_feasible.x.is_none()
{
return None;
}
if best_du.x.is_none() {
return None;
}
log::debug!(
"Dual stagnation at iter {}: du={:.2e}, restoring best-du point (du={:.2e} at iter {})",
iteration, current_du, dual_stall.last_good_du, dual_stall.last_good_iter
);
restore_best_du_iterate(
state, problem, lbfgs_state, best_du,
linear_constraints, lbfgs_mode,
);
reset_filter_with_current_theta(state, filter);
state.mu = (state.mu * 100.0).max(1e-4).min(1e-1);
if options.mu_strategy_adaptive {
mu_state.mode = MuMode::Free;
}
mu_state.first_iter_in_mode = true;
mu_state.consecutive_restoration_failures = 0;
inertia_params.delta_w_last = 0.0;
if let Some(result) = check_restored_point_near_tolerance(state, options) {
return Some(result);
}
dual_stall.triggered = true;
None
}
#[allow(clippy::too_many_arguments)]
fn compute_sigma_condition(state: &SolverState) -> f64 {
let mut mn = f64::INFINITY;
let mut mx = 0.0_f64;
for i in 0..state.n {
let mut s_i = 0.0_f64;
if state.x_l[i].is_finite() {
s_i += state.z_l[i] / slack_xl(state, i);
}
if state.x_u[i].is_finite() {
s_i += state.z_u[i] / slack_xu(state, i);
}
if s_i > 0.0 {
mn = mn.min(s_i);
mx = mx.max(s_i);
}
}
if mn.is_finite() && mn > 0.0 && mx > 0.0 {
(mx / mn).log10()
} else {
f64::NAN
}
}
#[derive(Default)]
struct TraceMetadata {
alpha_primal_max: Option<f64>,
tau_used: Option<f64>,
soc_accepted: bool,
}
fn emit_trace_row_if_enabled(
state: &SolverState,
iteration: usize,
primal_inf: f64,
dual_inf: f64,
compl_inf: f64,
ls_steps: usize,
inertia_params: &InertiaCorrectionParams,
last_mehrotra_sigma: Option<f64>,
trace_meta: &mut TraceMetadata,
) {
if !trace::is_enabled() {
return;
}
let dx_inf = linf_norm(&state.dx);
let dzl_inf = linf_norm(&state.dz_l);
let dzu_inf = linf_norm(&state.dz_u);
let sigma_cond = compute_sigma_condition(state);
trace::emit(&trace::TraceRow {
iter: iteration,
obj: state.obj / state.obj_scaling,
inf_pr: primal_inf,
inf_du: dual_inf,
compl: compl_inf,
mu: state.mu,
alpha_pr: state.alpha_primal,
alpha_du: state.alpha_dual,
alpha_aff_p: f64::NAN,
alpha_aff_d: f64::NAN,
mu_aff: f64::NAN,
sigma: last_mehrotra_sigma.unwrap_or(f64::NAN),
mu_pc: f64::NAN,
delta_w: inertia_params.delta_w_last,
delta_c: 0.0,
dx_inf,
dzl_inf,
dzu_inf,
mcc_iters: 0,
ls: ls_steps as u32,
accepted: true,
alpha_primal_max: trace_meta.alpha_primal_max.unwrap_or(f64::NAN),
tau_used: trace_meta.tau_used.unwrap_or(f64::NAN),
sigma_cond,
soc_accepted: trace_meta.soc_accepted,
});
trace_meta.alpha_primal_max = None;
trace_meta.tau_used = None;
trace_meta.soc_accepted = false;
}
fn build_iterate_snapshot(state: &SolverState) -> crate::intermediate::IterateSnapshot {
use crate::intermediate::IterateSnapshot;
let n = state.n;
let m = state.m;
let mut x_l_viol = vec![0.0; n];
let mut x_u_viol = vec![0.0; n];
let mut compl_xl = vec![0.0; n];
let mut compl_xu = vec![0.0; n];
for i in 0..n {
if state.x_l[i].is_finite() {
x_l_viol[i] = (state.x_l[i] - state.x[i]).max(0.0);
compl_xl[i] = (state.x[i] - state.x_l[i]) * state.z_l[i];
}
if state.x_u[i].is_finite() {
x_u_viol[i] = (state.x[i] - state.x_u[i]).max(0.0);
compl_xu[i] = (state.x_u[i] - state.x[i]) * state.z_u[i];
}
}
let mut grad_lag = state.grad_f.clone();
accumulate_jt_y(state, &mut grad_lag);
for i in 0..n {
grad_lag[i] -= state.z_l[i];
grad_lag[i] += state.z_u[i];
}
let mut constr_viol = vec![0.0; m];
let mut compl_g_vec = vec![0.0; m];
for i in 0..m {
if state.g_l[i].is_finite() && state.g[i] < state.g_l[i] {
constr_viol[i] = state.g_l[i] - state.g[i];
} else if state.g_u[i].is_finite() && state.g[i] > state.g_u[i] {
constr_viol[i] = state.g[i] - state.g_u[i];
}
if state.g_l[i].is_finite() && state.g_u[i].is_finite() {
compl_g_vec[i] = state.y[i] * (state.g[i] - state.g_l[i]).min(state.g_u[i] - state.g[i]);
} else if state.g_l[i].is_finite() {
compl_g_vec[i] = state.y[i] * (state.g[i] - state.g_l[i]);
} else if state.g_u[i].is_finite() {
compl_g_vec[i] = state.y[i] * (state.g_u[i] - state.g[i]);
}
}
IterateSnapshot {
x: state.x.clone(),
z_l: state.z_l.clone(),
z_u: state.z_u.clone(),
g: state.g.clone(),
lambda: state.y.clone(),
x_l_violation: x_l_viol,
x_u_violation: x_u_viol,
compl_x_l: compl_xl,
compl_x_u: compl_xu,
grad_lag_x: grad_lag,
constraint_violation: constr_viol,
compl_g: compl_g_vec,
}
}
fn populate_snapshot_and_invoke_callback(
state: &SolverState,
iteration: usize,
primal_inf: f64,
dual_inf: f64,
prev_ic_delta_w: f64,
ls_steps: usize,
options: &SolverOptions,
) -> Option<SolveResult> {
crate::intermediate::set_current_iterate(Some(build_iterate_snapshot(state)));
let d_norm = linf_norm(&state.dx);
let continue_ok = crate::intermediate::invoke_intermediate(
0, iteration,
state.obj / state.obj_scaling,
primal_inf,
dual_inf,
state.mu,
d_norm,
prev_ic_delta_w,
state.alpha_dual,
state.alpha_primal,
ls_steps,
);
crate::intermediate::set_current_iterate(None);
if !continue_ok {
if options.print_level >= 5 {
rip_log!("ripopt: User requested stop via intermediate callback");
}
return Some(make_result(state, SolveStatus::UserRequestedStop));
}
None
}
fn log_iteration_row(
iteration: usize,
state: &SolverState,
primal_inf: f64,
dual_inf: f64,
compl_inf: f64,
ls_steps: usize,
log_line_count: &mut usize,
options: &SolverOptions,
) {
if options.print_level < 3 {
return;
}
if *log_line_count > 0 && *log_line_count % 25 == 0 {
rip_log!(
"{:>4} {:>14} {:>10} {:>10} {:>10} {:>10} {:>8} {:>8} {:>3}",
"iter", "objective", "inf_pr", "inf_du", "compl", "lg(mu)", "alpha_pr", "alpha_du", "ls"
);
}
rip_log!(
"{:>4} {:>14.7e} {:>10.2e} {:>10.2e} {:>10.2e} {:>10.2e} {:>8.2e} {:>8.2e} {:>3}",
iteration,
state.obj / state.obj_scaling,
primal_inf,
dual_inf,
compl_inf,
state.mu,
state.alpha_primal,
state.alpha_dual,
ls_steps,
);
*log_line_count += 1;
}
fn select_linear_solver(use_sparse: bool, n: usize, m: usize, options: &SolverOptions) -> Box<dyn LinearSolver> {
if use_sparse {
#[cfg(feature = "rmumps")]
{
let _ = n;
if m > 0 {
Box::new(MultifrontalLdl::new_kkt(n))
} else {
new_sparse_solver_with_choice(options.linear_solver)
}
}
#[cfg(not(feature = "rmumps"))]
{
let _ = (n, m);
new_sparse_solver_with_choice(options.linear_solver)
}
} else {
Box::new(DenseLdl::new())
}
}
fn estimate_schur_density_disable<P: NlpProblem>(
problem: &P,
n: usize,
m: usize,
use_sparse: bool,
options: &SolverOptions,
) -> bool {
if !(use_sparse && m > 0) {
return false;
}
let (jac_rows_est, _) = problem.jacobian_structure();
let mut row_nnz = vec![0usize; m];
for &r in &jac_rows_est {
row_nnz[r] += 1;
}
let schur_nnz_upper: usize = row_nnz.iter().map(|&k| k * (k + 1) / 2).sum();
let (hess_rows_est, _) = problem.hessian_structure();
let augmented_nnz = hess_rows_est.len() + jac_rows_est.len() + n;
let disable = schur_nnz_upper > 2 * augmented_nnz;
if disable && options.print_level >= 3 {
rip_log!(
"ripopt: Disabling sparse condensed KKT: Schur complement nnz estimate ({}) > 2× augmented KKT nnz ({})",
schur_nnz_upper, augmented_nnz
);
}
disable
}
fn detect_linear_constraint_flags<P: NlpProblem>(
problem: &P,
options: &SolverOptions,
x0: &[f64],
m_sc: usize,
) -> Option<Vec<bool>> {
if !(options.detect_linear_constraints && m_sc > 0) {
return None;
}
let flags = crate::linearity::detect_linear_constraints(problem, x0);
let n_linear = flags.iter().filter(|&&f| f).count();
if n_linear == 0 {
return None;
}
if options.print_level >= 5 {
rip_log!(
"ripopt: Detected {}/{} linear constraints (Hessian contribution skipped)",
n_linear, m_sc
);
}
Some(flags)
}
fn initialize_constraint_slack_multipliers(state: &mut SolverState, m: usize, options: &SolverOptions) {
for i in 0..m {
if constraint_is_equality(state, i) {
continue;
}
if state.g_l[i].is_finite() {
state.v_l[i] = options.mu_init / slack_gl(state, i);
}
if state.g_u[i].is_finite() {
state.v_u[i] = options.mu_init / slack_gu(state, i);
}
}
}
fn apply_warm_start(state: &mut SolverState, options: &SolverOptions) {
if !options.warm_start {
return;
}
if let Some(ref init_y) = options.warm_start_y {
let len = init_y.len().min(state.y.len());
state.y[..len].copy_from_slice(&init_y[..len]);
}
if let Some(ref init_z_l) = options.warm_start_z_l {
let len = init_z_l.len().min(state.z_l.len());
state.z_l[..len].copy_from_slice(&init_z_l[..len]);
}
if let Some(ref init_z_u) = options.warm_start_z_u {
let len = init_z_u.len().min(state.z_u.len());
state.z_u[..len].copy_from_slice(&init_z_u[..len]);
}
state.mu = WarmStartInitializer::initialize(
&mut state.x,
&mut state.z_l,
&mut state.z_u,
&state.x_l,
&state.x_u,
options,
);
}
fn initial_evaluate_with_recovery<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
lbfgs_state: &mut Option<LbfgsIpmState>,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
n: usize,
options: &SolverOptions,
) -> Result<(), SolveResult> {
let init_eval_ok = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
if init_eval_ok && obj_and_grad_finite(state) {
return Ok(());
}
let x_saved = state.x.clone();
for &push_factor in &[1e-2, 1e-1, 0.5] {
state.x.copy_from_slice(&x_saved);
for i in 0..n {
if state.x_l[i].is_finite() && state.x_u[i].is_finite() {
let range = state.x_u[i] - state.x_l[i];
let push = push_factor * range;
if range > 2.0 * push {
state.x[i] = state.x[i].max(state.x_l[i] + push).min(state.x_u[i] - push);
} else {
state.x[i] = 0.5 * (state.x_l[i] + state.x_u[i]);
}
} else if state.x_l[i].is_finite() {
let push = push_factor * state.x_l[i].abs().max(1.0);
state.x[i] = state.x[i].max(state.x_l[i] + push);
} else if state.x_u[i].is_finite() {
let push = push_factor * state.x_u[i].abs().max(1.0);
state.x[i] = state.x[i].min(state.x_u[i] - push);
}
}
reseed_bound_multipliers_from_mu(state, options.mu_init);
let perturb_ok = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
if perturb_ok && obj_and_grad_finite(state) {
return Ok(());
}
}
Err(make_result(state, SolveStatus::EvaluationError))
}
fn compute_nlp_scaling<P: NlpProblem>(
problem: &P,
options: &SolverOptions,
x0: &[f64],
jac_rows_sc: &[usize],
) -> (f64, Vec<f64>) {
let n_sc = problem.num_variables();
let m_sc = problem.num_constraints();
if options.user_obj_scaling.is_some() || options.user_g_scaling.is_some() {
let os = options.user_obj_scaling.unwrap_or(1.0);
let gs = options.user_g_scaling.clone().unwrap_or_else(|| vec![1.0; m_sc]);
return (os, gs);
}
let nlp_scaling_max_gradient = 100.0;
let nlp_scaling_min_value = 1e-2;
let mut grad_f0 = vec![0.0; n_sc];
let grad_ok = problem.gradient(x0, true, &mut grad_f0);
let grad_max = if grad_ok {
linf_norm(&grad_f0)
} else {
0.0
};
let os = if grad_max > nlp_scaling_max_gradient && grad_max.is_finite() {
(nlp_scaling_max_gradient / grad_max).max(nlp_scaling_min_value)
} else {
1.0
};
let gs = compute_constraint_row_scaling(
problem, x0, jac_rows_sc, m_sc,
nlp_scaling_max_gradient, nlp_scaling_min_value,
);
(os, gs)
}
fn compute_constraint_row_scaling<P: NlpProblem>(
problem: &P,
x0: &[f64],
jac_rows_sc: &[usize],
m_sc: usize,
max_gradient: f64,
min_value: f64,
) -> Vec<f64> {
let mut gs = vec![1.0; m_sc];
if m_sc == 0 {
return gs;
}
let mut g0_sc = vec![0.0; m_sc];
let constr_ok = problem.constraints(x0, false, &mut g0_sc);
let mut g_l_sc = vec![0.0; m_sc];
let mut g_u_sc = vec![0.0; m_sc];
problem.constraint_bounds(&mut g_l_sc, &mut g_u_sc);
let init_cv = if constr_ok {
convergence::primal_infeasibility(&g0_sc, &g_l_sc, &g_u_sc)
} else {
f64::INFINITY
};
if init_cv >= 1e6 {
return gs;
}
let mut jac_vals0 = vec![0.0; jac_rows_sc.len()];
if !problem.jacobian_values(x0, false, &mut jac_vals0) {
return gs;
}
let mut row_max = vec![0.0f64; m_sc];
for (idx, &row) in jac_rows_sc.iter().enumerate() {
let v = jac_vals0[idx].abs();
if v.is_finite() && v > row_max[row] {
row_max[row] = v;
}
}
for i in 0..m_sc {
if row_max[i] > max_gradient {
gs[i] = (max_gradient / row_max[i]).max(min_value);
}
}
gs
}
fn solve_ipm<P: NlpProblem>(problem: &P, options: &SolverOptions) -> SolveResult {
let n_sc = problem.num_variables();
let m_sc = problem.num_constraints();
let mut x0 = vec![0.0; n_sc];
problem.initial_point(&mut x0);
let (jac_rows_sc, _) = problem.jacobian_structure();
let (obj_scaling, g_scaling) = compute_nlp_scaling(problem, options, &x0, &jac_rows_sc);
if options.print_level >= 5
&& (obj_scaling != 1.0 || g_scaling.iter().any(|&s| s != 1.0))
{
let n_scaled_g = g_scaling.iter().filter(|&&s| s != 1.0).count();
rip_log!(
"ripopt: NLP scaling: obj_scaling={:.4e}, {}/{} constraints scaled",
obj_scaling, n_scaled_g, m_sc
);
}
let linear_constraints = detect_linear_constraint_flags(problem, options, &x0, m_sc);
let scaled = ScaledProblem {
inner: problem,
obj_scaling,
g_scaling: g_scaling.clone(),
jac_rows: jac_rows_sc,
};
let problem = &scaled;
let mut state = SolverState::new(problem, options);
state.obj_scaling = obj_scaling;
state.g_scaling = g_scaling;
let n = state.n;
let m = state.m;
let lbfgs_mode = options.hessian_approximation_lbfgs;
let mut lbfgs_state = if lbfgs_mode {
if options.print_level >= 5 {
rip_log!("ripopt: Using L-BFGS Hessian approximation (limited-memory mode)");
}
Some(LbfgsIpmState::new(n))
} else {
None
};
apply_warm_start(&mut state, options);
let use_sparse = (n + m) >= options.sparse_threshold;
let mut lin_solver = select_linear_solver(use_sparse, n, m, options);
let mut inertia_params = InertiaCorrectionParams::default();
let mut restoration = RestorationPhase::new(500);
let mut disable_sparse_condensed = estimate_schur_density_disable(problem, n, m, use_sparse, options);
let _use_dense_condensed_fallback = false;
let mut filter = Filter::new(1e4);
let mut last_mehrotra_sigma: Option<f64> = None;
let mut trace_meta = TraceMetadata::default();
let mut mu_state = MuState::new();
if !options.mu_strategy_adaptive {
mu_state.mode = MuMode::Fixed;
}
let start_time = Instant::now();
let deadline = if options.max_wall_time > 0.0 {
Some(start_time + Duration::from_secs_f64(options.max_wall_time))
} else {
None
};
let mut timings = PhaseTimings::new();
let ipm_start = Instant::now();
let mut watchdog = Watchdog::new();
let mut feas = FeasibilityTracker::new(100);
let mut consecutive_tiny_steps: usize = 0;
let mut stall = ProgressStallTracker::new();
let mut ls_steps: usize = 0;
let mut prev_ic_delta_w: f64 = 0.0;
let mut pd_tracker = PrimalDivergenceTracker::new();
let mut consecutive_unbounded: usize = 0;
let mut best_feasible = BestFeasibleIterate::new();
let mut best_du = BestDuIterate::new();
let mut dual_stall = DualStallTracker::new();
let mut avg_state = IterateAveragingState::new();
let mut dy_tracker = DyOscillationTracker::new(m);
let mut tried_active_set: bool = false;
let mut _tried_compl_polish: bool = false;
if let Err(result) = initial_evaluate_with_recovery(
&mut state, problem, &mut lbfgs_state, linear_constraints.as_deref(), lbfgs_mode, n, options,
) {
return result;
}
initialize_constraint_slack_multipliers(&mut state, m, options);
let theta_init = state.constraint_violation();
filter.set_theta_min_from_initial(theta_init);
let mut log_line_count: usize = 0;
if options.print_level >= 3 {
rip_log!(
"{:>4} {:>14} {:>10} {:>10} {:>10} {:>8} {:>8} {:>3}",
"iter", "objective", "inf_pr", "inf_du", "mu", "alpha_pr", "alpha_du", "ls"
);
}
if options.print_level >= 5 {
rip_log!("ripopt: Starting main loop (n={}, m={})", n, m);
}
for iteration in 0..options.max_iter {
state.iter = iteration;
let early_timeout = options.early_stall_timeout * ((n + m) as f64 / 200.0).max(1.0);
if let Some(result) = check_time_limits(&state, iteration, start_time, early_timeout, options) {
return result;
}
if let Some(result) = handle_dual_stagnation(
&mut state,
problem,
options,
iteration,
&mut filter,
&mut mu_state,
&mut inertia_params,
&mut lbfgs_state,
&mut dual_stall,
&best_feasible,
&best_du,
linear_constraints.as_deref(),
lbfgs_mode,
) {
return result;
}
let OptimalityMeasures {
primal_inf,
primal_inf_max,
dual_inf,
dual_inf_unscaled,
compl_inf,
} = compute_optimality_measures(&state);
log_iteration_row(
iteration,
&state,
primal_inf,
dual_inf,
compl_inf,
ls_steps,
&mut log_line_count,
options,
);
emit_trace_row_if_enabled(
&state,
iteration,
primal_inf,
dual_inf,
compl_inf,
ls_steps,
&inertia_params,
last_mehrotra_sigma,
&mut trace_meta,
);
if let Some(result) = populate_snapshot_and_invoke_callback(
&state,
iteration,
primal_inf,
dual_inf,
prev_ic_delta_w,
ls_steps,
options,
) {
return result;
}
let multiplier_sum = compute_multiplier_sum(&state);
let multiplier_count = m + 2 * n;
let bound_multiplier_sum = compute_bound_multiplier_sum(&state);
let bound_multiplier_count = 2 * n;
let mut conv_ws = ConvergenceWorkspace {
avg: &mut avg_state,
tried_active_set: &mut tried_active_set,
tried_compl_polish: &mut _tried_compl_polish,
};
if let Some(result) = check_convergence_and_handle_promotions(
&mut state,
problem,
options,
primal_inf_max,
dual_inf,
dual_inf_unscaled,
compl_inf,
multiplier_sum,
multiplier_count,
bound_multiplier_sum,
bound_multiplier_count,
&mut conv_ws,
&timings,
iteration,
ipm_start,
linear_constraints.as_deref(),
lbfgs_mode,
) {
return result;
}
let s_d_for_acc = track_consecutive_acceptable(
&mut state,
primal_inf,
dual_inf,
dual_inf_unscaled,
compl_inf,
multiplier_sum,
bound_multiplier_sum,
);
update_best_du_iterate(&state, dual_inf, &mut best_du);
if let Some(result) = track_feasibility_and_detect_infeasibility(
&state,
options,
iteration,
primal_inf,
&mut feas,
) {
return result;
}
if let Some(result) = detect_unbounded(
&state,
options,
primal_inf,
&mut consecutive_unbounded,
) {
return result;
}
match detect_and_handle_progress_stall(
&mut state,
problem,
options,
iteration,
primal_inf,
primal_inf_max,
dual_inf,
compl_inf,
s_d_for_acc,
&mut filter,
&mut mu_state,
&mut stall,
&best_du,
linear_constraints.as_deref(),
lbfgs_mode,
) {
StallDecision::Return(r) => return r,
StallDecision::Continue => continue,
StallDecision::Proceed => {}
}
let force_restoration = detect_primal_divergence(
options,
iteration,
primal_inf,
&mut pd_tracker,
m,
);
let t_kkt = Instant::now();
let AssembledKkt {
sigma,
use_sparse_condensed,
condensed_system,
mut sparse_condensed_system,
mut kkt_system_opt,
} = assemble_kkt_systems(&state, n, m, use_sparse, disable_sparse_condensed);
timings.kkt_assembly += t_kkt.elapsed();
if iteration == 0 {
adjust_sparse_condensed_bandwidth(
&state,
problem,
options,
n,
m,
use_sparse,
use_sparse_condensed,
&sigma,
&mut sparse_condensed_system,
&mut kkt_system_opt,
&mut lin_solver,
&mut disable_sparse_condensed,
);
}
let (ic_delta_w, ic_delta_c) = match factor_kkt_with_recovery(
&mut state,
problem,
options,
iteration,
n,
m,
use_sparse,
&mut kkt_system_opt,
lin_solver.as_mut(),
&mut inertia_params,
&mut lbfgs_state,
&mut filter,
&mut restoration,
&mut timings,
&mut prev_ic_delta_w,
linear_constraints.as_deref(),
lbfgs_mode,
deadline,
) {
FactorDecision::Proceed { ic_delta_w, ic_delta_c } => (ic_delta_w, ic_delta_c),
FactorDecision::Continue => continue,
FactorDecision::Return(result) => return result,
};
let t_dir = Instant::now();
let (dx, dy);
let mut cond_solver_for_soc: Option<DenseLdl>;
let mehrotra_aff: Option<(Vec<f64>, Vec<f64>, Vec<f64>, f64)>;
match solve_for_search_direction(
&mut state,
problem,
options,
iteration,
n,
m,
use_sparse,
&condensed_system,
&sparse_condensed_system,
&mut kkt_system_opt,
lin_solver.as_mut(),
&sigma,
&mut inertia_params,
ic_delta_w,
ic_delta_c,
&filter,
&mut restoration,
&mut lbfgs_state,
lbfgs_mode,
linear_constraints.as_deref(),
&mut last_mehrotra_sigma,
deadline,
) {
DirectionSolveDecision::Proceed {
dx: dx_val,
dy: dy_val,
cond_solver_for_soc: cs,
mehrotra_aff: ma,
} => {
dx = dx_val;
dy = dy_val;
cond_solver_for_soc = cs;
mehrotra_aff = ma;
}
DirectionSolveDecision::Continue => continue,
DirectionSolveDecision::Return(r) => return r,
}
timings.direction_solve += t_dir.elapsed();
let mu_for_dz = mehrotra_aff.as_ref().map(|t| t.3).unwrap_or(state.mu);
let (dz_l, dz_u) = recover_dz_from_state(&state, &dx, mu_for_dz);
install_step_directions(&mut state, dx, dy, dz_l, dz_u);
apply_gondzio_mcc(
&mut state,
options,
iteration,
&mu_state,
primal_inf,
dual_inf,
compl_inf,
&kkt_system_opt,
lin_solver.as_mut(),
);
let (tau, alpha_primal_max, alpha_dual_max) = compute_alpha_max(
&state, options, &mu_state, primal_inf, dual_inf, compl_inf,
);
trace_meta.alpha_primal_max = Some(alpha_primal_max);
trace_meta.tau_used = Some(tau);
detect_tiny_step(
&mut state,
options,
&mut mu_state,
&mut filter,
&mut consecutive_tiny_steps,
alpha_primal_max,
primal_inf,
);
let t_ls = Instant::now();
let theta_current = primal_inf;
let phi_current = state.barrier_objective(options);
let grad_phi_step = state.barrier_directional_derivative(options);
let mut step_accepted;
let min_alpha = filter.compute_alpha_min(theta_current, grad_phi_step);
let x_pre_step = state.x.clone();
match run_line_search_loop(
&mut state,
problem,
options,
&mut filter,
&condensed_system,
&mut cond_solver_for_soc,
&sparse_condensed_system,
&kkt_system_opt,
lin_solver.as_mut(),
alpha_primal_max,
theta_current,
phi_current,
grad_phi_step,
min_alpha,
force_restoration,
watchdog.active,
iteration,
n,
m,
start_time,
early_timeout,
&mut trace_meta,
&mut ls_steps,
) {
LineSearchOutcome::StepAccepted => {
step_accepted = true;
}
LineSearchOutcome::Rejected => {
step_accepted = false;
}
LineSearchOutcome::Return(result) => return result,
}
let mut accepted_by_soft_resto = false;
if !step_accepted {
step_accepted = attempt_soft_restoration(
&mut state,
problem,
options,
&mut filter,
&mut mu_state,
linear_constraints.as_deref(),
lbfgs_mode,
alpha_primal_max,
alpha_dual_max,
theta_current,
phi_current,
);
accepted_by_soft_resto = step_accepted;
}
if !step_accepted {
match run_post_ls_restoration_cascade(
&mut state,
problem,
options,
&mut filter,
&mut mu_state,
&mut inertia_params,
&mut lbfgs_state,
lbfgs_mode,
linear_constraints.as_deref(),
&mut restoration,
iteration,
n,
m,
start_time,
deadline,
early_timeout,
&feas,
theta_current,
phi_current,
) {
RestorationCascadeDecision::Continue => continue,
RestorationCascadeDecision::Return(result) => return result,
}
}
mu_state.consecutive_restoration_failures = 0;
if !accepted_by_soft_resto {
mu_state.consecutive_soft_restoration = 0;
}
match update_watchdog(
&mut state,
problem,
options,
iteration,
alpha_primal_max,
&mut filter,
&mut lbfgs_state,
&mut watchdog,
linear_constraints.as_deref(),
lbfgs_mode,
) {
WatchdogDecision::Proceed => {}
WatchdogDecision::Continue => continue,
}
timings.line_search += t_ls.elapsed();
let mu_ks = update_dual_variables(
&mut state,
&mu_state,
alpha_dual_max,
&mut dy_tracker,
);
match reevaluate_after_step(
&mut state,
problem,
options,
&mut lbfgs_state,
&mut filter,
&mut restoration,
&mut timings,
&x_pre_step,
linear_constraints.as_deref(),
lbfgs_mode,
deadline,
) {
PostStepEvalDecision::Proceed => {}
PostStepEvalDecision::Continue => continue,
PostStepEvalDecision::Return(result) => return result,
}
reset_slack_multipliers(&mut state, mu_ks);
track_best_feasible(&state, options, &mut best_feasible);
update_barrier_parameter(
&mut state,
&mut mu_state,
&mut filter,
&mut last_mehrotra_sigma,
options,
);
track_post_step_acceptable(&mut state, options);
}
finalize_after_max_iter(
&state, options, &feas, &timings, ipm_start,
)
}
fn try_classify_max_iter_infeasibility(
state: &SolverState,
final_theta: f64,
feas: &FeasibilityTracker,
) -> Option<SolveResult> {
let grad_theta_norm = compute_grad_theta_norm(state);
let stationarity_tol = 1e-4 * final_theta.max(1.0);
if grad_theta_norm < stationarity_tol {
return Some(make_result(state, SolveStatus::LocalInfeasibility));
}
if final_theta > 1e4 && feas.history.len() >= feas.history_len {
let min_theta = slice_min(&feas.history);
if final_theta > 0.01 * min_theta {
return Some(make_result(state, SolveStatus::Infeasible));
}
}
None
}
fn print_max_iter_diagnostics(
state: &SolverState,
options: &SolverOptions,
) {
if options.print_level < 5 {
return;
}
let final_primal_inf = compute_primal_inf_max_at_state(state);
let final_dual_inf = compute_dual_inf_at_state(state);
let final_dual_inf_unscaled = compute_dual_inf_unscaled_at_state(state);
let final_compl = compute_compl_err_at_state(state);
let s_d = compute_s_d_at_state(state);
rip_log!(
"ripopt: MaxIter diag: pr={:.2e} du={:.2e}(t={:.2e}) du_u={:.2e}(t={:.0e}) co={:.2e}(t={:.2e}) mu={:.2e} sd={:.1} ac={}",
final_primal_inf,
final_dual_inf, options.tol * s_d,
final_dual_inf_unscaled, options.dual_inf_tol,
final_compl, 10.0 * options.compl_inf_tol,
state.mu, s_d, state.consecutive_acceptable,
);
}
fn finalize_after_max_iter(
state: &SolverState,
options: &SolverOptions,
feas: &FeasibilityTracker,
timings: &PhaseTimings,
ipm_start: Instant,
) -> SolveResult {
print_max_iter_diagnostics(state, options);
let final_theta = state.constraint_violation();
if !feas.ever_feasible && final_theta > options.constr_viol_tol {
if let Some(result) = try_classify_max_iter_infeasibility(
state, final_theta, feas,
) {
return result;
}
}
if options.print_level >= 5 {
timings.print_summary(options.max_iter, ipm_start.elapsed());
}
let primal_inf = state.constraint_violation();
let dual_inf = compute_dual_inf_at_state(state);
let compl_inf = compute_compl_err_at_state(state);
if primal_inf <= options.constr_viol_tol
&& dual_inf <= options.dual_inf_tol
&& compl_inf <= options.compl_inf_tol
&& primal_inf <= options.tol
&& dual_inf <= options.tol
&& compl_inf <= options.tol
{
return make_result(state, SolveStatus::Optimal);
}
if primal_inf <= 1e-2
&& dual_inf <= 1e10
&& compl_inf <= 1e-2
&& primal_inf <= 1e-6
&& dual_inf <= 1e-6
&& compl_inf <= 1e-6
{
return make_result(state, SolveStatus::Acceptable);
}
make_result(state, SolveStatus::MaxIterations)
}
fn init_soc_constraint_residuals(
state: &SolverState,
g_trial: &[f64],
) -> (Vec<f64>, Vec<f64>) {
let m = state.m;
let mut c_soc = vec![0.0; m];
let mut latest_trial_c = vec![0.0; m];
for i in 0..m {
if constraint_is_equality(state, i) || state.g_l[i].is_finite() {
c_soc[i] = state.g[i] - state.g_l[i];
latest_trial_c[i] = g_trial[i] - state.g_l[i];
} else if state.g_u[i].is_finite() {
c_soc[i] = state.g[i] - state.g_u[i];
latest_trial_c[i] = g_trial[i] - state.g_u[i];
}
}
(c_soc, latest_trial_c)
}
fn compute_soc_alpha_and_trial_x(
state: &SolverState,
dx_soc: &[f64],
tau: f64,
) -> (Vec<f64>, f64) {
let alpha_primal_soc =
fraction_to_boundary_primal_x(state, dx_soc, tau).clamp(0.0, 1.0);
let x_soc = compute_clamped_trial_x(state, dx_soc, alpha_primal_soc);
(x_soc, alpha_primal_soc)
}
enum SocTrialOutcome {
Accepted { x_soc: Vec<f64>, obj_soc: f64, g_soc: Vec<f64> },
Abort,
NotAccepted { g_soc: Vec<f64> },
}
#[allow(clippy::too_many_arguments)]
fn evaluate_soc_trial_and_check<P: NlpProblem>(
state: &SolverState,
problem: &P,
options: &SolverOptions,
filter: &Filter,
x_soc: Vec<f64>,
n: usize,
m: usize,
theta_current: f64,
phi_current: f64,
grad_phi_step: f64,
alpha: f64,
kappa_soc: f64,
theta_prev_soc: &mut f64,
) -> SocTrialOutcome {
let mut obj_soc = f64::INFINITY;
if !problem.objective(&x_soc, true, &mut obj_soc) || !obj_soc.is_finite() {
return SocTrialOutcome::Abort;
}
let mut g_soc = vec![0.0; m];
if !problem.constraints(&x_soc, false, &mut g_soc) {
return SocTrialOutcome::Abort;
}
if g_soc.iter().any(|v| !v.is_finite()) {
return SocTrialOutcome::Abort;
}
let theta_soc = theta_for_g(state, &g_soc);
if theta_soc >= kappa_soc * *theta_prev_soc {
return SocTrialOutcome::Abort;
}
*theta_prev_soc = theta_soc;
let phi_soc = compute_barrier_phi(
obj_soc, &x_soc, &g_soc, state, n, m, options.constraint_slack_barrier,
);
let (acceptable, _) = filter.check_acceptability(
theta_current,
phi_current,
theta_soc,
phi_soc,
grad_phi_step,
alpha,
);
if acceptable {
SocTrialOutcome::Accepted { x_soc, obj_soc, g_soc }
} else {
SocTrialOutcome::NotAccepted { g_soc }
}
}
fn update_soc_latest_trial_c(
state: &SolverState,
g_soc: &[f64],
latest_trial_c: &mut [f64],
) {
for i in 0..state.m {
if constraint_is_equality(state, i) || state.g_l[i].is_finite() {
latest_trial_c[i] = g_soc[i] - state.g_l[i];
} else if state.g_u[i].is_finite() {
latest_trial_c[i] = g_soc[i] - state.g_u[i];
}
}
}
#[allow(clippy::too_many_arguments)]
fn run_soc_loop<P: NlpProblem, F: FnMut(&[f64]) -> Option<Vec<f64>>>(
state: &SolverState,
problem: &P,
options: &SolverOptions,
filter: &Filter,
g_trial: &[f64],
theta_current: f64,
phi_current: f64,
grad_phi_step: f64,
alpha: f64,
mut solve_dx: F,
) -> Option<(Vec<f64>, f64, Vec<f64>, f64)> {
let n = state.n;
let m = state.m;
if m == 0 {
return None;
}
let kappa_soc = 0.99;
let tau = (1.0 - state.mu).max(options.tau_min);
let (mut c_soc, mut latest_trial_c) = init_soc_constraint_residuals(state, g_trial);
let mut alpha_primal_soc = alpha;
let mut theta_prev_soc = theta_for_g(state, g_trial);
for _soc_iter in 0..options.max_soc {
for i in 0..m {
c_soc[i] += alpha_primal_soc * latest_trial_c[i];
}
let dx_soc = solve_dx(&c_soc)?;
let (x_soc, alpha_primal_soc_new) = compute_soc_alpha_and_trial_x(state, &dx_soc, tau);
alpha_primal_soc = alpha_primal_soc_new;
match evaluate_soc_trial_and_check(
state, problem, options, filter, x_soc, n, m,
theta_current, phi_current, grad_phi_step, alpha,
kappa_soc, &mut theta_prev_soc,
) {
SocTrialOutcome::Accepted { x_soc, obj_soc, g_soc } => {
return Some((x_soc, obj_soc, g_soc, alpha_primal_soc));
}
SocTrialOutcome::Abort => return None,
SocTrialOutcome::NotAccepted { g_soc } => {
update_soc_latest_trial_c(state, &g_soc, &mut latest_trial_c);
}
}
}
None
}
#[allow(clippy::too_many_arguments)]
fn attempt_soc<P: NlpProblem>(
state: &SolverState,
problem: &P,
_x_trial: &[f64],
g_trial: &[f64],
solver: &mut dyn LinearSolver,
kkt: &kkt::KktSystem,
filter: &Filter,
theta_current: f64,
phi_current: f64,
grad_phi_step: f64,
alpha: f64,
options: &SolverOptions,
) -> Option<(Vec<f64>, f64, Vec<f64>, f64)> {
let n = state.n;
let m = state.m;
run_soc_loop(
state, problem, options, filter, g_trial,
theta_current, phi_current, grad_phi_step, alpha,
|c_soc| {
let mut rhs_soc = kkt.rhs.clone();
for i in 0..m {
rhs_soc[n + i] = -c_soc[i];
}
let mut sol_soc = vec![0.0; n + m];
if solver.solve(&rhs_soc, &mut sol_soc).is_err() {
return None;
}
Some(sol_soc[..n].to_vec())
},
)
}
#[allow(clippy::too_many_arguments)]
fn attempt_soc_condensed<P: NlpProblem>(
state: &SolverState,
problem: &P,
g_trial: &[f64],
solver: &mut DenseLdl,
condensed: &kkt::CondensedKktSystem,
filter: &Filter,
theta_current: f64,
phi_current: f64,
grad_phi_step: f64,
alpha: f64,
options: &SolverOptions,
) -> Option<(Vec<f64>, f64, Vec<f64>, f64)> {
run_soc_loop(
state, problem, options, filter, g_trial,
theta_current, phi_current, grad_phi_step, alpha,
|c_soc| kkt::solve_condensed_soc(condensed, solver, c_soc).ok(),
)
}
fn attempt_soc_sparse_condensed<P: NlpProblem>(
state: &SolverState,
problem: &P,
g_trial: &[f64],
solver: &mut dyn LinearSolver,
condensed: &kkt::SparseCondensedKktSystem,
filter: &Filter,
theta_current: f64,
phi_current: f64,
grad_phi_step: f64,
alpha: f64,
options: &SolverOptions,
) -> Option<(Vec<f64>, f64, Vec<f64>, f64)> {
run_soc_loop(
state, problem, options, filter, g_trial,
theta_current, phi_current, grad_phi_step, alpha,
|c_soc| kkt::solve_sparse_condensed_soc(condensed, solver, c_soc).ok(),
)
}
fn reset_bound_multipliers_after_restoration(state: &mut SolverState, n: usize) -> bool {
let bound_mult_reset_threshold = 1000.0;
let mu_for_reset = state.mu;
let mut z_max: f64 = 0.0;
for i in 0..n {
if state.x_l[i].is_finite() {
let slack = (state.x[i] - state.x_l[i]).max(1e-12);
state.z_l[i] = mu_for_reset / slack;
z_max = z_max.max(state.z_l[i]);
} else {
state.z_l[i] = 0.0;
}
if state.x_u[i].is_finite() {
let slack = (state.x_u[i] - state.x[i]).max(1e-12);
state.z_u[i] = mu_for_reset / slack;
z_max = z_max.max(state.z_u[i]);
} else {
state.z_u[i] = 0.0;
}
}
let nuclear_reset = z_max > bound_mult_reset_threshold;
if nuclear_reset {
for i in 0..n {
state.z_l[i] = if state.x_l[i].is_finite() { 1.0 } else { 0.0 };
state.z_u[i] = if state.x_u[i].is_finite() { 1.0 } else { 0.0 };
}
}
nuclear_reset
}
fn recompute_y_after_restoration(
state: &mut SolverState,
options: &SolverOptions,
n: usize,
m: usize,
) {
if m == 0 {
return;
}
let y_ls_result = compute_ls_multiplier_estimate_augmented(
&state.grad_f, &state.jac_rows, &state.jac_cols, &state.jac_vals,
&state.g_l, &state.g_u, n, m,
Some(&state.z_l), Some(&state.z_u),
);
let y_accepted = match y_ls_result {
Some(y_ls) => {
let max_abs = linf_norm(&y_ls);
if max_abs > options.constr_mult_init_max { None } else { Some(y_ls) }
}
None => None,
};
if let Some(y_ls) = y_accepted {
state.y.copy_from_slice(&y_ls);
} else {
state.y.fill(0.0);
}
}
fn reset_constraint_slack_multipliers_after_restoration(
state: &mut SolverState,
m: usize,
nuclear_reset: bool,
) {
let mu_r = state.mu;
for i in 0..m {
if constraint_is_equality(state, i) {
state.v_l[i] = 0.0;
state.v_u[i] = 0.0;
continue;
}
if state.g_l[i].is_finite() {
let slack = (state.g[i] - state.g_l[i]).max(1e-12);
state.v_l[i] = if nuclear_reset { 1.0 } else { mu_r / slack };
} else {
state.v_l[i] = 0.0;
}
if state.g_u[i].is_finite() {
let slack = (state.g_u[i] - state.g[i]).max(1e-12);
state.v_u[i] = if nuclear_reset { 1.0 } else { mu_r / slack };
} else {
state.v_u[i] = 0.0;
}
}
}
fn apply_restoration_success<P: NlpProblem>(
state: &mut SolverState,
filter: &mut Filter,
mu_state: &mut MuState,
options: &SolverOptions,
n: usize,
m: usize,
problem: &P,
x_new: &[f64],
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
lbfgs_state: &mut Option<LbfgsIpmState>,
) {
state.x.copy_from_slice(x_new);
state.alpha_primal = 0.0;
let _ = evaluate_and_refresh_lbfgs(state, problem, lbfgs_state, linear_constraints, lbfgs_mode);
let nuclear_reset = reset_bound_multipliers_after_restoration(state, n);
recompute_y_after_restoration(state, options, n, m);
reset_constraint_slack_multipliers_after_restoration(state, m, nuclear_reset);
reset_filter_with_current_theta(state, filter);
state.consecutive_acceptable = 0;
let mu_compl = compute_avg_complementarity(state);
if mu_compl > 0.0 {
state.mu = mu_compl.max(options.mu_min).min(1e5);
}
if options.mu_strategy_adaptive {
mu_state.mode = MuMode::Free;
}
mu_state.first_iter_in_mode = true;
mu_state.ref_vals.clear();
mu_state.consecutive_restoration_failures = 0;
}
enum RestorationOutcome {
Success,
LocalInfeasibility,
Failed,
}
fn configure_restoration_inner_options(
options: &SolverOptions,
resto_mu: f64,
resto_dim: usize,
start_time: Instant,
) -> Option<SolverOptions> {
let mut inner_opts = options.clone();
inner_opts.max_iter = options.restoration_max_iter.max(500);
inner_opts.disable_nlp_restoration = true;
inner_opts.print_level = if options.print_level >= 5 { 3 } else { 0 };
inner_opts.mu_init = resto_mu;
inner_opts.stall_iter_limit = 0;
inner_opts.tol = 1e-7;
if options.max_wall_time > 0.0 {
let remaining = options.max_wall_time - start_time.elapsed().as_secs_f64();
if remaining < 0.5 {
return None;
}
inner_opts.max_wall_time = remaining;
}
inner_opts.early_stall_timeout = if options.early_stall_timeout > 0.0 {
if resto_dim > 500 {
options.early_stall_timeout
} else {
options.early_stall_timeout.min(3.0)
}
} else {
3.0
};
Some(inner_opts)
}
fn attempt_nlp_restoration<P: NlpProblem>(
problem: &P,
state: &SolverState,
filter: &Filter,
options: &SolverOptions,
theta_current: f64,
start_time: Instant,
) -> (Vec<f64>, RestorationOutcome) {
let n = state.n;
let m = state.m;
if options.print_level >= 5 {
rip_log!(
"ripopt: Entering NLP restoration (theta={:.2e}, mu={:.2e})",
theta_current, state.mu
);
}
let rho = 1000.0;
let c_inf = compute_primal_inf_max_at_state(state);
let resto_mu = state.mu.max(c_inf);
let resto_nlp = RestorationNlp::new(problem, &state.x, resto_mu, rho, 1.0);
let inner_opts = match configure_restoration_inner_options(
options, resto_mu, resto_nlp.num_variables() + resto_nlp.num_constraints(), start_time,
) {
Some(opts) => opts,
None => return (state.x[..n].to_vec(), RestorationOutcome::Failed),
};
let result = solve_ipm(&resto_nlp, &inner_opts);
let x_nlp: Vec<f64> = result.x[..n].to_vec();
let mut g_new = vec![0.0; m];
if !problem.constraints(&x_nlp, true, &mut g_new)
|| g_new.iter().any(|v| !v.is_finite())
{
return (x_nlp, RestorationOutcome::Failed);
}
let theta_new = theta_for_g(state, &g_new);
let mut phi_new = f64::INFINITY;
if !problem.objective(&x_nlp, false, &mut phi_new) || !phi_new.is_finite() {
return (x_nlp, RestorationOutcome::Failed);
}
if options.print_level >= 5 {
let sum_p: f64 = result.x[n..n + m].iter().sum();
let sum_n: f64 = result.x[n + m..n + 2 * m].iter().sum();
rip_log!(
"ripopt: NLP restoration result: status={:?}, theta_new={:.2e} (was {:.2e}), phi_new={:.2e}, sum_p={:.2e}, sum_n={:.2e}, iters={}",
result.status, theta_new, theta_current, phi_new, sum_p, sum_n, result.iterations
);
}
let inner_converged = result.status == SolveStatus::Optimal;
let outcome = classify_restoration_outcome(
filter, options, theta_current, theta_new, phi_new, inner_converged,
);
(x_nlp, outcome)
}
fn filter_accepts_restored_iterate(
filter: &Filter,
theta_new: f64,
phi_new: f64,
) -> bool {
let theta_max = filter.theta_max();
let gamma_theta = filter.gamma_theta();
let gamma_phi = filter.gamma_phi();
if theta_new.is_nan() || phi_new.is_nan() || theta_new > theta_max {
return false;
}
for entry in filter.entries() {
if theta_new >= (1.0 - gamma_theta) * entry.theta
&& phi_new >= entry.phi - gamma_phi * entry.theta
{
return false;
}
}
true
}
fn classify_restoration_outcome(
filter: &Filter,
options: &SolverOptions,
theta_current: f64,
theta_new: f64,
phi_new: f64,
inner_converged: bool,
) -> RestorationOutcome {
if theta_new < options.constr_viol_tol {
return RestorationOutcome::Success;
}
if theta_new <= 0.5 * theta_current {
return RestorationOutcome::Success;
}
if theta_new < 0.9 * theta_current
&& filter_accepts_restored_iterate(filter, theta_new, phi_new)
{
return RestorationOutcome::Success;
}
if inner_converged {
return RestorationOutcome::LocalInfeasibility;
}
RestorationOutcome::Failed
}
#[allow(dead_code)]
fn quality_function_mu(
state: &SolverState,
options: &SolverOptions,
mu_lower: f64,
mu_upper: f64,
n_candidates: usize,
) -> f64 {
if mu_upper <= mu_lower || n_candidates < 2 {
return mu_upper;
}
let pi = state.constraint_violation();
let di = compute_dual_inf_at_state(state);
let log_min = mu_lower.max(1e-20).ln();
let log_max = mu_upper.ln();
let mut best_mu = mu_upper;
let mut best_q = f64::INFINITY;
for k in 0..n_candidates {
let t = k as f64 / (n_candidates - 1) as f64;
let mu_candidate = (log_min + t * (log_max - log_min)).exp();
let ci = convergence::complementarity_error(
&state.x, &state.x_l, &state.x_u, &state.z_l, &state.z_u, mu_candidate,
);
let mut q = pi + di + ci;
if options.quality_function_centrality {
let avg = compute_avg_complementarity(state);
let xi = compute_centrality_xi(state, avg);
if xi > 1e-20 {
q += ci / xi;
} else {
q = f64::INFINITY;
}
}
if q < best_q {
best_q = q;
best_mu = mu_candidate;
}
}
best_mu
}
fn compute_ls_multiplier_rhs(
grad_f: &[f64],
jac_rows: &[usize],
jac_cols: &[usize],
jac_vals: &[f64],
n: usize,
m: usize,
z_l: Option<&[f64]>,
z_u: Option<&[f64]>,
) -> Vec<f64> {
let mut rhs_grad = grad_f.to_vec();
if let Some(zl) = z_l {
for i in 0..n { rhs_grad[i] -= zl[i]; }
}
if let Some(zu) = z_u {
for i in 0..n { rhs_grad[i] += zu[i]; }
}
let mut b = vec![0.0; m];
for (idx, (&row, &col)) in jac_rows.iter().zip(jac_cols.iter()).enumerate() {
b[row] -= jac_vals[idx] * rhs_grad[col];
}
b
}
fn apply_warm_start_multipliers<P: NlpProblem>(
problem: &P,
y: &mut [f64],
z_l: &mut [f64],
z_u: &mut [f64],
) {
let mut ws_lam_g = vec![0.0; y.len()];
let mut ws_z_l = vec![0.0; z_l.len()];
let mut ws_z_u = vec![0.0; z_u.len()];
if problem.initial_multipliers(&mut ws_lam_g, &mut ws_z_l, &mut ws_z_u) {
if !y.is_empty() {
y.copy_from_slice(&ws_lam_g);
}
z_l.copy_from_slice(&ws_z_l);
z_u.copy_from_slice(&ws_z_u);
}
}
fn init_bound_multipliers(
x: &[f64],
x_l: &[f64],
x_u: &[f64],
mu_init: f64,
) -> (Vec<f64>, Vec<f64>) {
let n = x.len();
let mut z_l = vec![0.0; n];
let mut z_u = vec![0.0; n];
for i in 0..n {
if x_l[i].is_finite() {
let slack = (x[i] - x_l[i]).max(1e-20);
z_l[i] = mu_init / slack;
}
if x_u[i].is_finite() {
let slack = (x_u[i] - x[i]).max(1e-20);
z_u[i] = mu_init / slack;
}
}
(z_l, z_u)
}
fn relax_fixed_variable_bounds(x_l: &mut [f64], x_u: &mut [f64]) {
for i in 0..x_l.len() {
if x_l[i].is_finite() && x_u[i].is_finite() && (x_u[i] - x_l[i]).abs() < 1e-10 {
let center = (x_l[i] + x_u[i]) / 2.0;
let relax = 1e-8 * center.abs().max(1.0);
x_l[i] = center - relax;
x_u[i] = center + relax;
}
}
}
fn push_initial_point_from_bounds(
x: &mut [f64],
x_l: &[f64],
x_u: &[f64],
options: &SolverOptions,
) {
for i in 0..x.len() {
if x_l[i].is_finite() && x_u[i].is_finite() {
let range = x_u[i] - x_l[i];
let push = options.bound_push.min(options.bound_frac * range);
x[i] = x[i].max(x_l[i] + push).min(x_u[i] - push);
} else if x_l[i].is_finite() {
x[i] = x[i].max(x_l[i] + options.bound_push);
} else if x_u[i].is_finite() {
x[i] = x[i].min(x_u[i] - options.bound_push);
}
}
}
#[allow(clippy::too_many_arguments)]
fn compute_initial_y_with_ls<P: NlpProblem>(
problem: &P,
options: &SolverOptions,
x: &[f64],
jac_rows: &[usize],
jac_cols: &[usize],
g_l: &[f64],
g_u: &[f64],
n: usize,
m: usize,
jac_nnz: usize,
) -> Vec<f64> {
if !(options.least_squares_mult_init && m > 0 && m <= 500) {
return vec![0.0; m];
}
let mut grad_f_init = vec![0.0; n];
let grad_ok = problem.gradient(x, true, &mut grad_f_init);
let mut jac_vals_init = vec![0.0; jac_nnz];
let jac_ok = problem.jacobian_values(x, false, &mut jac_vals_init);
if !grad_ok || !jac_ok {
return vec![0.0; m];
}
compute_ls_multiplier_estimate(
&grad_f_init,
jac_rows,
jac_cols,
&jac_vals_init,
g_l,
g_u,
n,
m,
options.constr_mult_init_max,
)
.unwrap_or_else(|| vec![0.0; m])
}
fn compute_ls_multiplier_estimate(
grad_f: &[f64],
jac_rows: &[usize],
jac_cols: &[usize],
jac_vals: &[f64],
g_l: &[f64],
g_u: &[f64],
n: usize,
m: usize,
max_abs_threshold: f64,
) -> Option<Vec<f64>> {
compute_ls_multiplier_estimate_with_z(
grad_f, jac_rows, jac_cols, jac_vals, g_l, g_u, n, m, max_abs_threshold, None, None,
)
}
fn build_ls_augmented_matrix(
jac_rows: &[usize],
jac_cols: &[usize],
jac_vals: &[f64],
n: usize,
m: usize,
) -> crate::linear_solver::SparseSymmetricMatrix {
use crate::linear_solver::SparseSymmetricMatrix;
let nnz_est = n + jac_rows.len() + m;
let mut ssm = SparseSymmetricMatrix {
n: n + m,
triplet_rows: Vec::with_capacity(nnz_est),
triplet_cols: Vec::with_capacity(nnz_est),
triplet_vals: Vec::with_capacity(nnz_est),
};
for i in 0..n {
ssm.triplet_rows.push(i);
ssm.triplet_cols.push(i);
ssm.triplet_vals.push(1.0);
}
for (idx, (&row, &col)) in jac_rows.iter().zip(jac_cols.iter()).enumerate() {
ssm.triplet_rows.push(col);
ssm.triplet_cols.push(n + row);
ssm.triplet_vals.push(jac_vals[idx]);
}
for j in 0..m {
ssm.triplet_rows.push(n + j);
ssm.triplet_cols.push(n + j);
ssm.triplet_vals.push(0.0);
}
ssm
}
fn compute_ls_multiplier_estimate_augmented(
grad_f: &[f64],
jac_rows: &[usize],
jac_cols: &[usize],
jac_vals: &[f64],
g_l: &[f64],
g_u: &[f64],
n: usize,
m: usize,
z_l: Option<&[f64]>,
z_u: Option<&[f64]>,
) -> Option<Vec<f64>> {
if m == 0 {
return None;
}
let mut rhs = vec![0.0_f64; n + m];
for i in 0..n {
rhs[i] = grad_f[i];
if let Some(zl) = z_l { rhs[i] -= zl[i]; }
if let Some(zu) = z_u { rhs[i] += zu[i]; }
}
let matrix = KktMatrix::Sparse(build_ls_augmented_matrix(
jac_rows, jac_cols, jac_vals, n, m,
));
let mut solver = new_sparse_solver();
if solver.factor(&matrix).is_err() {
return None;
}
let mut sol = vec![0.0_f64; n + m];
if solver.solve(&rhs, &mut sol).is_err() {
return None;
}
if sol.iter().any(|v| !v.is_finite()) {
return None;
}
let mut y_ls: Vec<f64> = sol[n..].to_vec();
fix_inequality_mult_signs(&mut y_ls, g_l, g_u, m);
Some(y_ls)
}
fn compute_ls_multiplier_estimate_with_z(
grad_f: &[f64],
jac_rows: &[usize],
jac_cols: &[usize],
jac_vals: &[f64],
g_l: &[f64],
g_u: &[f64],
n: usize,
m: usize,
max_abs_threshold: f64,
z_l: Option<&[f64]>,
z_u: Option<&[f64]>,
) -> Option<Vec<f64>> {
if m > 500 {
return compute_ls_multiplier_estimate_sparse(
grad_f, jac_rows, jac_cols, jac_vals, g_l, g_u, n, m, max_abs_threshold, None,
z_l, z_u,
);
}
if m == 0 {
return None;
}
let b = compute_ls_multiplier_rhs(grad_f, jac_rows, jac_cols, jac_vals, n, m, z_l, z_u);
let mut j_dense = vec![0.0; m * n];
for (idx, (&row, &col)) in jac_rows.iter().zip(jac_cols.iter()).enumerate() {
j_dense[row * n + col] = jac_vals[idx];
}
let mut a_mat = SymmetricMatrix::zeros(m);
for i in 0..m {
let row_i = &j_dense[i * n..(i + 1) * n];
for j in 0..=i {
let row_j = &j_dense[j * n..(j + 1) * n];
a_mat.set(i, j, dot_product(row_i, row_j));
}
}
let mut ls_solver = DenseLdl::new();
let mut y_ls = vec![0.0; m];
let factored = ls_solver.bunch_kaufman_factor(&a_mat);
let solved = factored.is_ok() && ls_solver.solve(&b, &mut y_ls).is_ok();
if !solved {
return None;
}
let max_abs = linf_norm(&y_ls);
if max_abs > max_abs_threshold {
return None;
}
fix_inequality_mult_signs(&mut y_ls, g_l, g_u, m);
Some(y_ls)
}
fn build_sparse_normal_matrix_jjt(
jac_rows: &[usize],
jac_cols: &[usize],
jac_vals: &[f64],
n: usize,
m: usize,
) -> crate::linear_solver::SparseSymmetricMatrix {
use crate::linear_solver::SparseSymmetricMatrix;
let mut col_entries: Vec<Vec<(usize, f64)>> = vec![vec![]; n];
for (idx, (&row, &col)) in jac_rows.iter().zip(jac_cols.iter()).enumerate() {
col_entries[col].push((row, jac_vals[idx]));
}
let total_col_nnz: usize = col_entries.iter().map(|c| c.len()).sum();
let nnz_est = total_col_nnz * 4;
use std::collections::HashMap;
let mut triplet_map: HashMap<(usize, usize), f64> = HashMap::with_capacity(nnz_est);
for k in 0..n {
let entries = &col_entries[k];
for &(i, vi) in entries.iter() {
for &(j, vj) in entries.iter() {
if i >= j {
*triplet_map.entry((j, i)).or_insert(0.0) += vi * vj;
}
}
}
}
let reg = 1e-12;
for i in 0..m {
*triplet_map.entry((i, i)).or_insert(0.0) += reg;
}
let nnz = triplet_map.len();
let mut ssm = SparseSymmetricMatrix {
n: m,
triplet_rows: Vec::with_capacity(nnz),
triplet_cols: Vec::with_capacity(nnz),
triplet_vals: Vec::with_capacity(nnz),
};
for (&(r, c), &v) in &triplet_map {
ssm.triplet_rows.push(r);
ssm.triplet_cols.push(c);
ssm.triplet_vals.push(v);
}
ssm
}
fn compute_ls_multiplier_estimate_sparse(
grad_f: &[f64],
jac_rows: &[usize],
jac_cols: &[usize],
jac_vals: &[f64],
g_l: &[f64],
g_u: &[f64],
n: usize,
m: usize,
max_abs_threshold: f64,
solver: Option<&mut Box<dyn LinearSolver>>,
z_l: Option<&[f64]>,
z_u: Option<&[f64]>,
) -> Option<Vec<f64>> {
if m == 0 {
return None;
}
let b = compute_ls_multiplier_rhs(grad_f, jac_rows, jac_cols, jac_vals, n, m, z_l, z_u);
let ssm = build_sparse_normal_matrix_jjt(jac_rows, jac_cols, jac_vals, n, m);
let matrix = KktMatrix::Sparse(ssm);
let mut y_ls = vec![0.0; m];
let solved = if let Some(ls) = solver {
ls.factor(&matrix).is_ok() && ls.solve(&b, &mut y_ls).is_ok()
} else {
let mut ls = new_sparse_solver();
ls.factor(&matrix).is_ok() && ls.solve(&b, &mut y_ls).is_ok()
};
if !solved {
return None;
}
let max_abs = linf_norm(&y_ls);
if max_abs > max_abs_threshold {
return None;
}
fix_inequality_mult_signs(&mut y_ls, g_l, g_u, m);
Some(y_ls)
}
fn fix_inequality_mult_signs(y_ls: &mut [f64], g_l: &[f64], g_u: &[f64], m: usize) {
for i in 0..m {
if convergence::is_equality_constraint(g_l[i], g_u[i]) {
continue;
}
let has_lower = g_l[i].is_finite();
let has_upper = g_u[i].is_finite();
if has_lower && !has_upper && y_ls[i] < 0.0 {
y_ls[i] = 0.0;
} else if has_upper && !has_lower && y_ls[i] > 0.0 {
y_ls[i] = 0.0;
} else if !has_lower && !has_upper {
y_ls[i] = 0.0;
}
}
}
fn compute_residual_scaling(sum: f64, count: usize) -> f64 {
let s_max: f64 = 100.0;
if count > 0 {
s_max.max(sum / count as f64) / s_max
} else {
1.0
}
}
fn compute_s_d_at_state(state: &SolverState) -> f64 {
compute_residual_scaling(compute_multiplier_sum(state), state.m + 2 * state.n)
}
fn theta_for_g(state: &SolverState, g: &[f64]) -> f64 {
convergence::primal_infeasibility(g, &state.g_l, &state.g_u)
}
fn accumulate_jt_y(state: &SolverState, target: &mut [f64]) {
for (idx, (&row, &col)) in state.jac_rows.iter().zip(state.jac_cols.iter()).enumerate() {
target[col] += state.jac_vals[idx] * state.y[row];
}
}
fn recover_active_set_z(state: &SolverState, gj: &[f64], n: usize) -> (Vec<f64>, Vec<f64>) {
let mut zl = vec![0.0; n];
let mut zu = vec![0.0; n];
let kc = 1e10;
for i in 0..n {
if gj[i] > 0.0 && state.x_l[i].is_finite() {
if gj[i] * slack_xl(state, i) <= kc * state.mu.max(1e-20) {
zl[i] = gj[i];
}
} else if gj[i] < 0.0 && state.x_u[i].is_finite() {
if (-gj[i]) * slack_xu(state, i) <= kc * state.mu.max(1e-20) {
zu[i] = -gj[i];
}
}
}
(zl, zu)
}
fn compute_tau(
state: &SolverState,
options: &SolverOptions,
mu_state: &MuState,
primal_inf: f64,
dual_inf: f64,
compl_inf: f64,
) -> f64 {
if mu_state.mode == MuMode::Free {
let nlp_error = primal_inf + dual_inf + compl_inf;
(1.0 - nlp_error).max(options.tau_min)
} else {
(1.0 - state.mu).max(options.tau_min)
}
}
fn fraction_to_boundary_dual_z_min(state: &SolverState, dz_l: &[f64], dz_u: &[f64], tau: f64) -> f64 {
filter::fraction_to_boundary(&state.z_l, dz_l, tau)
.min(filter::fraction_to_boundary(&state.z_u, dz_u, tau))
}
fn fraction_to_boundary_primal_x(state: &SolverState, dx: &[f64], tau: f64) -> f64 {
let mut alpha = 1.0_f64;
for i in 0..state.n {
if state.x_l[i].is_finite() && dx[i] < 0.0 {
let slack = state.x[i] - state.x_l[i];
alpha = alpha.min(-tau * slack / dx[i]);
}
if state.x_u[i].is_finite() && dx[i] > 0.0 {
let slack = state.x_u[i] - state.x[i];
alpha = alpha.min(tau * slack / dx[i]);
}
}
alpha
}
fn install_step_directions(
state: &mut SolverState,
dx: Vec<f64>,
dy: Vec<f64>,
dz_l: Vec<f64>,
dz_u: Vec<f64>,
) {
state.dx = dx;
state.dy = dy;
state.dz_l = dz_l;
state.dz_u = dz_u;
}
fn compute_grad_theta_norm(state: &SolverState) -> f64 {
let n = state.n;
let m = state.m;
let mut violation = vec![0.0; m];
for i in 0..m {
if constraint_is_equality(state, i) {
violation[i] = state.g[i] - state.g_l[i];
} else if state.g_l[i].is_finite() && state.g[i] < state.g_l[i] {
violation[i] = state.g[i] - state.g_l[i];
} else if state.g_u[i].is_finite() && state.g[i] > state.g_u[i] {
violation[i] = state.g[i] - state.g_u[i];
}
}
let mut grad_theta = vec![0.0; n];
for (idx, (&row, &col)) in state.jac_rows.iter().zip(state.jac_cols.iter()).enumerate() {
grad_theta[col] += state.jac_vals[idx] * violation[row];
}
linf_norm(&grad_theta)
}
fn compute_dual_inf_at_state(state: &SolverState) -> f64 {
convergence::dual_infeasibility(
&state.grad_f, &state.jac_rows, &state.jac_cols, &state.jac_vals,
&state.y, &state.z_l, &state.z_u, state.n,
)
}
fn dual_inf_with_z(state: &SolverState, z_l: &[f64], z_u: &[f64]) -> f64 {
convergence::dual_infeasibility(
&state.grad_f, &state.jac_rows, &state.jac_cols, &state.jac_vals,
&state.y, z_l, z_u, state.n,
)
}
fn compute_primal_inf_max_at_state(state: &SolverState) -> f64 {
convergence::primal_infeasibility_max(&state.g, &state.g_l, &state.g_u)
}
fn compute_dual_inf_unscaled_at_state(state: &SolverState) -> f64 {
convergence::dual_infeasibility_scaled(
&state.grad_f, &state.jac_rows, &state.jac_cols, &state.jac_vals,
&state.y, &state.z_l, &state.z_u, state.n,
)
}
fn compl_err_with_z(state: &SolverState, z_l: &[f64], z_u: &[f64]) -> f64 {
convergence::complementarity_error(
&state.x, &state.x_l, &state.x_u, z_l, z_u, 0.0,
)
}
fn compute_compl_err_at_state(state: &SolverState) -> f64 {
convergence::complementarity_error(
&state.x, &state.x_l, &state.x_u, &state.z_l, &state.z_u, 0.0,
)
}
fn compute_pderror_e_mu(state: &SolverState, mu: f64) -> f64 {
let n = state.n;
let m = state.m;
let n_dual = n.max(1) as f64;
let n_pri = m.max(1) as f64;
let mut residual = vec![0.0; n];
residual[..n].copy_from_slice(&state.grad_f[..n]);
for (idx, (&row, &col)) in state.jac_rows.iter().zip(state.jac_cols.iter()).enumerate() {
residual[col] += state.jac_vals[idx] * state.y[row];
}
for i in 0..n {
residual[i] -= state.z_l[i];
residual[i] += state.z_u[i];
}
let dual_l1: f64 = residual.iter().map(|r| r.abs()).sum();
let primal_l1 = convergence::primal_infeasibility(&state.g, &state.g_l, &state.g_u);
let mut compl_l1 = 0.0f64;
let mut n_compl = 0usize;
for i in 0..n {
if state.x_l[i].is_finite() {
let slack = (state.x[i] - state.x_l[i]).max(0.0);
compl_l1 += (slack * state.z_l[i] - mu).abs();
n_compl += 1;
}
if state.x_u[i].is_finite() {
let slack = (state.x_u[i] - state.x[i]).max(0.0);
compl_l1 += (slack * state.z_u[i] - mu).abs();
n_compl += 1;
}
}
let compl_term = if n_compl == 0 { 0.0 } else { compl_l1 / n_compl as f64 };
dual_l1 / n_dual + primal_l1 / n_pri + compl_term
}
fn obj_and_grad_finite(state: &SolverState) -> bool {
state.obj.is_finite() && state.grad_f.iter().all(|v| v.is_finite())
}
fn constraint_is_equality(state: &SolverState, i: usize) -> bool {
state.g_l[i].is_finite() && state.g_u[i].is_finite()
&& (state.g_l[i] - state.g_u[i]).abs() < 1e-15
}
fn reseed_bound_multipliers_from_mu(state: &mut SolverState, mu: f64) {
let n = state.n;
for i in 0..n {
if state.x_l[i].is_finite() {
state.z_l[i] = mu / slack_xl(state, i);
}
if state.x_u[i].is_finite() {
state.z_u[i] = mu / slack_xu(state, i);
}
}
}
fn boost_mu_and_switch_to_fixed_with_stall_reset(
state: &mut SolverState,
new_mu: f64,
mu_state: &mut MuState,
filter: &mut Filter,
stall: &mut ProgressStallTracker,
) {
state.mu = new_mu;
reset_stall_counters_and_filter(state, filter, stall);
mu_state.mode = MuMode::Fixed;
mu_state.first_iter_in_mode = true;
}
fn clamp_to_open_bounds(arr: &mut [f64], x_l: &[f64], x_u: &[f64], i: usize) {
if x_l[i].is_finite() {
arr[i] = arr[i].max(x_l[i] + 1e-14);
}
if x_u[i].is_finite() {
arr[i] = arr[i].min(x_u[i] - 1e-14);
}
}
fn slack_xl(state: &SolverState, i: usize) -> f64 {
(state.x[i] - state.x_l[i]).max(1e-20)
}
fn slack_xu(state: &SolverState, i: usize) -> f64 {
(state.x_u[i] - state.x[i]).max(1e-20)
}
fn slack_gl(state: &SolverState, i: usize) -> f64 {
(state.g[i] - state.g_l[i]).max(1e-20)
}
fn slack_gu(state: &SolverState, i: usize) -> f64 {
(state.g_u[i] - state.g[i]).max(1e-20)
}
fn compute_clamped_trial_x(state: &SolverState, dx: &[f64], alpha: f64) -> Vec<f64> {
let n = state.n;
let mut x_trial = vec![0.0; n];
#[allow(clippy::needless_range_loop)]
for i in 0..n {
x_trial[i] = state.x[i] + alpha * dx[i];
clamp_to_open_bounds(&mut x_trial, &state.x_l, &state.x_u, i);
}
x_trial
}
fn linf_norm(v: &[f64]) -> f64 {
v.iter().map(|x| x.abs()).fold(0.0f64, f64::max)
}
fn l2_norm(v: &[f64]) -> f64 {
v.iter().map(|x| x * x).sum::<f64>().sqrt()
}
fn slice_min(v: &[f64]) -> f64 {
v.iter().cloned().fold(f64::INFINITY, f64::min)
}
fn l1_norm(v: &[f64]) -> f64 {
v.iter().map(|x| x.abs()).sum::<f64>()
}
fn dot_product(a: &[f64], b: &[f64]) -> f64 {
a.iter().zip(b.iter()).map(|(x, y)| x * y).sum::<f64>()
}
fn sentinel_bounds_to_infinity(
lower: &mut [f64],
upper: &mut [f64],
options: &SolverOptions,
) {
for i in 0..lower.len() {
if lower[i] <= options.nlp_lower_bound_inf {
lower[i] = f64::NEG_INFINITY;
}
if upper[i] >= options.nlp_upper_bound_inf {
upper[i] = f64::INFINITY;
}
}
}
fn compute_sigma_from_state(state: &SolverState) -> Vec<f64> {
kkt::compute_sigma(&state.x, &state.x_l, &state.x_u, &state.z_l, &state.z_u)
}
fn recover_dz_from_state(state: &SolverState, dx: &[f64], mu: f64) -> (Vec<f64>, Vec<f64>) {
kkt::recover_dz(
&state.x, &state.x_l, &state.x_u,
&state.z_l, &state.z_u, dx, mu,
)
}
fn assemble_kkt_from_state(
state: &SolverState,
n: usize,
m: usize,
sigma: &[f64],
use_sparse: bool,
) -> kkt::KktSystem {
kkt::assemble_kkt(
n, m,
&state.hess_rows, &state.hess_cols, &state.hess_vals,
&state.jac_rows, &state.jac_cols, &state.jac_vals,
sigma, &state.grad_f, &state.g, &state.g_l, &state.g_u,
&state.y, &state.z_l, &state.z_u,
&state.x, &state.x_l, &state.x_u, state.mu,
use_sparse, &state.v_l, &state.v_u,
)
}
fn commit_trial_point(
state: &mut SolverState,
x_trial: Vec<f64>,
obj_trial: f64,
g_trial: Vec<f64>,
alpha: f64,
) {
state.x = x_trial;
state.obj = obj_trial;
state.g = g_trial;
state.alpha_primal = alpha;
}
fn compute_multiplier_sum(state: &SolverState) -> f64 {
l1_norm(&state.y) + l1_norm(&state.z_l) + l1_norm(&state.z_u)
}
fn compute_bound_multiplier_sum(state: &SolverState) -> f64 {
l1_norm(&state.z_l) + l1_norm(&state.z_u)
}
fn compute_convergence_info_from_state(
state: &SolverState,
mu: f64,
n: usize,
m: usize,
) -> ConvergenceInfo {
let primal_inf = compute_primal_inf_max_at_state(state);
let dual_inf = compute_dual_inf_at_state(state);
let compl_inf = compute_compl_err_at_state(state);
ConvergenceInfo {
primal_inf,
dual_inf,
dual_inf_unscaled: dual_inf,
compl_inf,
mu,
objective: state.obj,
multiplier_sum: compute_multiplier_sum(state),
multiplier_count: m + 2 * n,
bound_multiplier_sum: compute_bound_multiplier_sum(state),
bound_multiplier_count: 2 * n,
}
}
fn compute_avg_complementarity(state: &SolverState) -> f64 {
let mut sum_compl = 0.0;
let mut count = 0;
for i in 0..state.n {
if state.x_l[i].is_finite() {
sum_compl += slack_xl(state, i) * state.z_l[i];
count += 1;
}
if state.x_u[i].is_finite() {
sum_compl += slack_xu(state, i) * state.z_u[i];
count += 1;
}
}
if count == 0 {
for i in 0..state.m {
if state.v_l[i] > 0.0 {
sum_compl += state.v_l[i] * slack_gl(state, i);
count += 1;
}
if state.v_u[i] > 0.0 {
sum_compl += state.v_u[i] * slack_gu(state, i);
count += 1;
}
}
}
if count > 0 {
sum_compl / count as f64
} else {
0.0
}
}
fn compute_barrier_error(state: &SolverState) -> f64 {
let n = state.n;
let mut grad_lag = state.grad_f.clone();
accumulate_jt_y(state, &mut grad_lag);
for i in 0..n {
if state.x_l[i].is_finite() {
grad_lag[i] -= state.z_l[i];
}
if state.x_u[i].is_finite() {
grad_lag[i] += state.z_u[i];
}
}
let sd = n.max(1) as f64;
let dual_err = l1_norm(&grad_lag) / sd;
let mut compl_err = 0.0;
let mut count = 0;
for i in 0..n {
if state.x_l[i].is_finite() {
let slack = slack_xl(state, i);
compl_err += (slack * state.z_l[i] - state.mu).abs();
count += 1;
}
if state.x_u[i].is_finite() {
let slack = slack_xu(state, i);
compl_err += (slack * state.z_u[i] - state.mu).abs();
count += 1;
}
}
if count > 0 {
compl_err /= count as f64;
}
let primal_err = state.constraint_violation();
let unscaled_du = compute_dual_inf_at_state(state);
let du_floor = unscaled_du * 0.1;
dual_err.max(compl_err).max(primal_err).max(du_floor)
}
struct ActiveSet {
active_lower: Vec<bool>,
active_upper: Vec<bool>,
free_idx: Vec<usize>,
orig_to_free: Vec<usize>,
n_free: usize,
dim: usize,
}
fn identify_active_bounds(state: &SolverState, n: usize, m: usize) -> Option<ActiveSet> {
let tol_bound = 1e-6;
let mut is_free = vec![true; n];
let mut active_lower = vec![false; n];
let mut active_upper = vec![false; n];
let mut n_free = 0usize;
for i in 0..n {
let at_lower = state.x_l[i].is_finite()
&& (state.x[i] - state.x_l[i]).abs() < tol_bound * (1.0 + state.x_l[i].abs());
let at_upper = state.x_u[i].is_finite()
&& (state.x_u[i] - state.x[i]).abs() < tol_bound * (1.0 + state.x_u[i].abs());
if at_lower && state.z_l[i] > 1e-8 {
is_free[i] = false;
active_lower[i] = true;
} else if at_upper && state.z_u[i] > 1e-8 {
is_free[i] = false;
active_upper[i] = true;
} else {
n_free += 1;
}
}
if n_free == n {
return None;
}
let dim = n_free + m;
if dim > 500 || dim == 0 {
return None;
}
let mut free_idx = Vec::with_capacity(n_free);
let mut orig_to_free = vec![usize::MAX; n];
for i in 0..n {
if is_free[i] {
orig_to_free[i] = free_idx.len();
free_idx.push(i);
}
}
Some(ActiveSet { active_lower, active_upper, free_idx, orig_to_free, n_free, dim })
}
fn build_reduced_kkt_dense(
state: &SolverState,
orig_to_free: &[usize],
free_idx: &[usize],
n_free: usize,
m: usize,
dim: usize,
) -> (Vec<f64>, Vec<f64>) {
let mut kkt = vec![0.0; dim * dim];
let mut rhs = vec![0.0; dim];
for (idx, (&row, &col)) in state.hess_rows.iter().zip(state.hess_cols.iter()).enumerate() {
let fr = orig_to_free[row];
let fc = orig_to_free[col];
if fr != usize::MAX && fc != usize::MAX {
kkt[fr * dim + fc] += state.hess_vals[idx];
if fr != fc {
kkt[fc * dim + fr] += state.hess_vals[idx];
}
}
}
for (idx, (&row, &col)) in state.jac_rows.iter().zip(state.jac_cols.iter()).enumerate() {
let fc = orig_to_free[col];
if fc != usize::MAX {
let r = n_free + row;
kkt[r * dim + fc] += state.jac_vals[idx];
kkt[fc * dim + r] += state.jac_vals[idx];
}
}
for k in 0..n_free {
rhs[k] = -state.grad_f[free_idx[k]];
}
for i in 0..m {
if (state.g_l[i] - state.g_u[i]).abs() < 1e-20 {
rhs[n_free + i] = state.g_l[i] - state.g[i];
} else if state.g[i] <= state.g_l[i] + 1e-10 {
rhs[n_free + i] = state.g_l[i] - state.g[i];
} else if state.g[i] >= state.g_u[i] - 1e-10 {
rhs[n_free + i] = state.g_u[i] - state.g[i];
} else {
rhs[n_free + i] = 0.0;
}
}
(kkt, rhs)
}
fn recover_z_from_stationarity(state: &mut SolverState, n: usize) {
let mut grad_jty = state.grad_f.clone();
accumulate_jt_y(state, &mut grad_jty);
for i in 0..n {
state.z_l[i] = 0.0;
state.z_u[i] = 0.0;
if state.x_l[i].is_finite() && grad_jty[i] > 0.0 {
state.z_l[i] = grad_jty[i];
} else if state.x_u[i].is_finite() && grad_jty[i] < 0.0 {
state.z_u[i] = -grad_jty[i];
}
}
}
fn snap_active_variables_to_bounds(
state: &mut SolverState,
active_lower: &[bool],
active_upper: &[bool],
n: usize,
) {
for i in 0..n {
if active_lower[i] {
state.x[i] = state.x_l[i];
} else if active_upper[i] {
state.x[i] = state.x_u[i];
}
}
}
fn apply_active_set_step_with_clamping(
state: &mut SolverState,
free_idx: &[usize],
sol: &[f64],
n_free: usize,
) {
for k in 0..n_free {
let i = free_idx[k];
state.x[i] += sol[k];
if state.x_l[i].is_finite() {
state.x[i] = state.x[i].max(state.x_l[i]);
}
if state.x_u[i].is_finite() {
state.x[i] = state.x[i].min(state.x_u[i]);
}
}
}
fn try_active_set_solve<P: NlpProblem>(
state: &mut SolverState,
problem: &P,
options: &SolverOptions,
linear_constraints: Option<&[bool]>,
lbfgs_mode: bool,
) -> Option<SolveResult> {
let n = state.n;
let m = state.m;
let active_set = match identify_active_bounds(state, n, m) {
Some(set) => set,
None => return None,
};
let ActiveSet { active_lower, active_upper, free_idx, orig_to_free, n_free, dim } = active_set;
let saved = SavedIterate::snapshot(state);
snap_active_variables_to_bounds(state, &active_lower, &active_upper, n);
let _ = state.evaluate_with_linear(problem, 1.0, linear_constraints, lbfgs_mode);
let (mut kkt, mut rhs) = build_reduced_kkt_dense(
state, &orig_to_free, &free_idx, n_free, m, dim,
);
let solution = dense_symmetric_solve(dim, &mut kkt, &mut rhs);
if solution.is_none() {
saved.restore_and_reeval(state, problem, linear_constraints, lbfgs_mode);
return None;
}
let sol = solution.unwrap();
apply_active_set_step_with_clamping(state, &free_idx, &sol, n_free);
state.y.copy_from_slice(&sol[n_free..n_free + m]);
let _ = state.evaluate_with_linear(problem, 1.0, linear_constraints, lbfgs_mode);
recover_z_from_stationarity(state, n);
let conv_info = compute_convergence_info_from_state(state, 0.0, n, m);
if let ConvergenceStatus::Converged = check_convergence(&conv_info, options, 0) {
return Some(make_result(state, SolveStatus::Optimal));
}
saved.restore_and_reeval(state, problem, linear_constraints, lbfgs_mode);
None
}
fn dense_symmetric_solve(dim: usize, a: &mut [f64], b: &mut [f64]) -> Option<Vec<f64>> {
let mut piv = vec![0usize; dim];
for i in 0..dim {
piv[i] = i;
}
for k in 0..dim {
let mut max_val = a[k * dim + k].abs();
let mut max_idx = k;
for i in (k + 1)..dim {
if a[i * dim + i].abs() > max_val {
max_val = a[i * dim + i].abs();
max_idx = i;
}
}
if max_val < 1e-15 {
return None; }
if max_idx != k {
piv.swap(k, max_idx);
for j in 0..dim {
let tmp = a[k * dim + j];
a[k * dim + j] = a[max_idx * dim + j];
a[max_idx * dim + j] = tmp;
}
for i in 0..dim {
let tmp = a[i * dim + k];
a[i * dim + k] = a[i * dim + max_idx];
a[i * dim + max_idx] = tmp;
}
b.swap(k, max_idx);
}
let pivot = a[k * dim + k];
for i in (k + 1)..dim {
let factor = a[i * dim + k] / pivot;
a[i * dim + k] = factor;
for j in (k + 1)..dim {
a[i * dim + j] -= factor * a[k * dim + j];
}
b[i] -= factor * b[k];
}
}
let mut x = b.to_vec();
for k in (0..dim).rev() {
for j in (k + 1)..dim {
x[k] -= a[k * dim + j] * x[j];
}
x[k] /= a[k * dim + k];
}
Some(x)
}
fn gradient_descent_fallback(state: &SolverState) -> Option<(Vec<f64>, Vec<f64>)> {
let n = state.n;
let m = state.m;
let grad_norm = l2_norm(&state.grad_f);
if grad_norm < 1e-20 {
return None;
}
let alpha = 1e-4 / grad_norm; let mut dx = vec![0.0; n];
for i in 0..n {
dx[i] = -alpha * state.grad_f[i];
}
let dy = vec![0.0; m];
Some((dx, dy))
}
fn populate_final_diagnostics(state: &SolverState) -> SolverDiagnostics {
let mut diag = state.diagnostics.clone();
diag.final_mu = state.mu;
diag.final_primal_inf = compute_primal_inf_max_at_state(state);
diag.final_dual_inf = compute_dual_inf_at_state(state);
diag.final_compl = compute_compl_err_at_state(state);
diag.final_s_d = compute_s_d_at_state(state);
diag
}
fn unscale_solution_vectors(state: &SolverState) -> (Vec<f64>, Vec<f64>, Vec<f64>, Vec<f64>) {
let n = state.n;
let m = state.m;
let mut z_l_out = vec![0.0; n];
let mut z_u_out = vec![0.0; n];
for i in 0..n {
z_l_out[i] = state.z_l[i] / state.obj_scaling;
z_u_out[i] = state.z_u[i] / state.obj_scaling;
}
let mut y_out = state.y.clone();
for i in 0..m {
y_out[i] = state.y[i] * state.g_scaling[i] / state.obj_scaling;
}
let mut g_out = state.g.clone();
for i in 0..m {
g_out[i] /= state.g_scaling[i];
}
(z_l_out, z_u_out, y_out, g_out)
}
fn make_result(state: &SolverState, status: SolveStatus) -> SolveResult {
let diag = populate_final_diagnostics(state);
let (z_l_out, z_u_out, y_out, g_out) = unscale_solution_vectors(state);
SolveResult {
x: state.x.clone(),
objective: state.obj / state.obj_scaling,
constraint_multipliers: y_out,
bound_multipliers_lower: z_l_out,
bound_multipliers_upper: z_u_out,
constraint_values: g_out,
status,
iterations: state.iter,
diagnostics: diag,
}
}
#[cfg(test)]
mod tests {
use super::*;
fn minimal_state(n: usize, m: usize) -> SolverState {
SolverState {
x: vec![0.0; n],
y: vec![0.0; m],
z_l: vec![0.0; n],
z_u: vec![0.0; n],
v_l: vec![0.0; m],
v_u: vec![0.0; m],
dx: vec![0.0; n],
dy: vec![0.0; m],
dz_l: vec![0.0; n],
dz_u: vec![0.0; n],
mu: 0.1,
alpha_primal: 0.0,
alpha_dual: 0.0,
iter: 0,
x_l: vec![f64::NEG_INFINITY; n],
x_u: vec![f64::INFINITY; n],
g_l: vec![f64::NEG_INFINITY; m],
g_u: vec![f64::INFINITY; m],
n,
m,
obj: 0.0,
grad_f: vec![0.0; n],
g: vec![0.0; m],
jac_rows: Vec::new(),
jac_cols: Vec::new(),
jac_vals: Vec::new(),
hess_rows: Vec::new(),
hess_cols: Vec::new(),
hess_vals: Vec::new(),
consecutive_acceptable: 0,
obj_scaling: 1.0,
g_scaling: vec![1.0; m],
diagnostics: SolverDiagnostics::default(),
x_last_eval: vec![f64::NAN; n],
}
}
#[test]
fn test_avg_compl_variable_bounds_only() {
let mut state = minimal_state(2, 0);
state.x = vec![1.5, 2.0];
state.x_l = vec![1.0, 1.0];
state.z_l = vec![2.0, 3.0];
let avg = compute_avg_complementarity(&state);
assert!((avg - 2.0).abs() < 1e-12, "expected 2.0, got {}", avg);
}
#[test]
fn test_avg_compl_both_bounds() {
let mut state = minimal_state(1, 0);
state.x = vec![1.5];
state.x_l = vec![1.0];
state.x_u = vec![2.0];
state.z_l = vec![2.0];
state.z_u = vec![3.0];
let avg = compute_avg_complementarity(&state);
assert!((avg - 1.25).abs() < 1e-12, "expected 1.25, got {}", avg);
}
#[test]
fn test_avg_compl_inequality_fallback() {
let mut state = minimal_state(1, 1);
state.g = vec![2.0];
state.g_l = vec![1.0];
state.g_u = vec![f64::INFINITY];
state.v_l = vec![0.5];
let avg = compute_avg_complementarity(&state);
assert!((avg - 0.5).abs() < 1e-12, "fallback path: expected 0.5, got {}", avg);
}
#[test]
fn test_avg_compl_inequality_fallback_skipped_when_bounds_exist() {
let mut state = minimal_state(1, 1);
state.x = vec![2.0];
state.x_l = vec![1.0];
state.z_l = vec![1.0];
state.g = vec![2.0];
state.g_l = vec![1.0];
state.v_l = vec![99.0]; let avg = compute_avg_complementarity(&state);
assert!((avg - 1.0).abs() < 1e-12,
"fallback must skip when var bounds present: got {}", avg);
}
#[test]
fn test_avg_compl_no_bounds_anywhere() {
let state = minimal_state(3, 0);
let avg = compute_avg_complementarity(&state);
assert_eq!(avg, 0.0);
}
#[test]
fn test_quality_function_mu_degenerate_range() {
let state = minimal_state(1, 0);
let opts = SolverOptions::default();
let mu = quality_function_mu(&state, &opts, 1.0, 1.0, 5);
assert_eq!(mu, 1.0);
let mu2 = quality_function_mu(&state, &opts, 2.0, 1.0, 5);
assert_eq!(mu2, 1.0, "lower > upper still returns upper");
}
#[test]
fn test_quality_function_mu_too_few_candidates() {
let state = minimal_state(1, 0);
let opts = SolverOptions::default();
let mu = quality_function_mu(&state, &opts, 1e-6, 1e-3, 1);
assert_eq!(mu, 1e-3);
let mu0 = quality_function_mu(&state, &opts, 1e-6, 1e-3, 0);
assert_eq!(mu0, 1e-3);
}
#[test]
fn test_quality_function_mu_picks_candidate_in_range() {
let mut state = minimal_state(1, 0);
state.x = vec![1.5];
state.x_l = vec![1.0];
state.z_l = vec![0.2];
let opts = SolverOptions::default();
let mu = quality_function_mu(&state, &opts, 1e-6, 1e-1, 11);
assert!(mu >= 1e-6 * (1.0 - 1e-12) && mu <= 1e-1 * (1.0 + 1e-12),
"mu must lie in range, got {}", mu);
assert!((mu - 0.1).abs() < 1e-10, "expected mu≈0.1, got {}", mu);
}
#[test]
fn test_quality_function_mu_centrality_term_changes_pick() {
let mut state = minimal_state(2, 0);
state.x = vec![1.5, 1.5];
state.x_l = vec![1.0, 1.0];
state.z_l = vec![0.001, 1.0];
let mut opts_off = SolverOptions::default();
opts_off.quality_function_centrality = false;
let mu_off = quality_function_mu(&state, &opts_off, 1e-6, 1.0, 21);
let mut opts_on = SolverOptions::default();
opts_on.quality_function_centrality = true;
let mu_on = quality_function_mu(&state, &opts_on, 1e-6, 1.0, 21);
assert!(mu_on >= mu_off,
"centrality should raise mu, off={mu_off:e} on={mu_on:e}");
}
}