use ripopt::{NlpProblem, SolveStatus, SolverOptions};
fn silent_opts() -> SolverOptions {
SolverOptions {
print_level: 0,
..SolverOptions::default()
}
}
struct EqualityQP;
impl NlpProblem for EqualityQP {
fn num_variables(&self) -> usize {
2
}
fn num_constraints(&self) -> usize {
1
}
fn bounds(&self, x_l: &mut [f64], x_u: &mut [f64]) {
x_l[0] = f64::NEG_INFINITY;
x_l[1] = f64::NEG_INFINITY;
x_u[0] = f64::INFINITY;
x_u[1] = f64::INFINITY;
}
fn constraint_bounds(&self, g_l: &mut [f64], g_u: &mut [f64]) {
g_l[0] = 1.0;
g_u[0] = 1.0;
}
fn initial_point(&self, x0: &mut [f64]) {
x0[0] = 0.5;
x0[1] = 0.5;
}
fn objective(&self, x: &[f64], _new_x: bool, obj: &mut f64) -> bool {
*obj = x[0] * x[0] + x[1] * x[1];
true
}
fn gradient(&self, x: &[f64], _new_x: bool, grad: &mut [f64]) -> bool {
grad[0] = 2.0 * x[0];
grad[1] = 2.0 * x[1];
true
}
fn constraints(&self, x: &[f64], _new_x: bool, g: &mut [f64]) -> bool {
g[0] = x[0] + x[1];
true
}
fn jacobian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![0, 0], vec![0, 1])
}
fn jacobian_values(&self, _x: &[f64], _new_x: bool, vals: &mut [f64]) -> bool {
vals[0] = 1.0;
vals[1] = 1.0;
true
}
fn hessian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![0, 1], vec![0, 1])
}
fn hessian_values(&self, _x: &[f64], _new_x: bool, obj_factor: f64, _lambda: &[f64], vals: &mut [f64]) -> bool {
vals[0] = 2.0 * obj_factor;
vals[1] = 2.0 * obj_factor;
true
}
}
#[test]
fn al_equality_convergence() {
let result = ripopt::augmented_lagrangian::solve(&EqualityQP, &silent_opts());
assert!(
result.status == SolveStatus::Optimal,
"status={:?}",
result.status
);
assert!((result.objective - 0.5).abs() < 0.1, "obj={}", result.objective);
assert!((result.x[0] - 0.5).abs() < 0.1, "x0={}", result.x[0]);
assert!((result.x[1] - 0.5).abs() < 0.1, "x1={}", result.x[1]);
}
struct InequalityQP;
impl NlpProblem for InequalityQP {
fn num_variables(&self) -> usize {
2
}
fn num_constraints(&self) -> usize {
1
}
fn bounds(&self, x_l: &mut [f64], x_u: &mut [f64]) {
x_l[0] = f64::NEG_INFINITY;
x_l[1] = f64::NEG_INFINITY;
x_u[0] = f64::INFINITY;
x_u[1] = f64::INFINITY;
}
fn constraint_bounds(&self, g_l: &mut [f64], g_u: &mut [f64]) {
g_l[0] = 1.0;
g_u[0] = f64::INFINITY;
}
fn initial_point(&self, x0: &mut [f64]) {
x0[0] = 0.0;
x0[1] = 0.0;
}
fn objective(&self, x: &[f64], _new_x: bool, obj: &mut f64) -> bool {
*obj = (x[0] - 2.0).powi(2) + (x[1] - 2.0).powi(2);
true
}
fn gradient(&self, x: &[f64], _new_x: bool, grad: &mut [f64]) -> bool {
grad[0] = 2.0 * (x[0] - 2.0);
grad[1] = 2.0 * (x[1] - 2.0);
true
}
fn constraints(&self, x: &[f64], _new_x: bool, g: &mut [f64]) -> bool {
g[0] = x[0] + x[1];
true
}
fn jacobian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![0, 0], vec![0, 1])
}
fn jacobian_values(&self, _x: &[f64], _new_x: bool, vals: &mut [f64]) -> bool {
vals[0] = 1.0;
vals[1] = 1.0;
true
}
fn hessian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![0, 1], vec![0, 1])
}
fn hessian_values(&self, _x: &[f64], _new_x: bool, obj_factor: f64, _lambda: &[f64], vals: &mut [f64]) -> bool {
vals[0] = 2.0 * obj_factor;
vals[1] = 2.0 * obj_factor;
true
}
}
#[test]
fn al_inequality_convergence() {
let result = ripopt::augmented_lagrangian::solve(&InequalityQP, &silent_opts());
assert_eq!(result.status, SolveStatus::Optimal);
assert!(result.objective < 0.1, "obj={}", result.objective);
assert!((result.x[0] - 2.0).abs() < 0.1, "x0={}", result.x[0]);
assert!((result.x[1] - 2.0).abs() < 0.1, "x1={}", result.x[1]);
}
struct RhoIncrease;
impl NlpProblem for RhoIncrease {
fn num_variables(&self) -> usize {
1
}
fn num_constraints(&self) -> usize {
1
}
fn bounds(&self, x_l: &mut [f64], x_u: &mut [f64]) {
x_l[0] = f64::NEG_INFINITY;
x_u[0] = f64::INFINITY;
}
fn constraint_bounds(&self, g_l: &mut [f64], g_u: &mut [f64]) {
g_l[0] = 100.0;
g_u[0] = 100.0;
}
fn initial_point(&self, x0: &mut [f64]) {
x0[0] = 0.0;
}
fn objective(&self, x: &[f64], _new_x: bool, obj: &mut f64) -> bool {
*obj = x[0] * x[0];
true
}
fn gradient(&self, x: &[f64], _new_x: bool, grad: &mut [f64]) -> bool {
grad[0] = 2.0 * x[0];
true
}
fn constraints(&self, x: &[f64], _new_x: bool, g: &mut [f64]) -> bool {
g[0] = x[0];
true
}
fn jacobian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![0], vec![0])
}
fn jacobian_values(&self, _x: &[f64], _new_x: bool, vals: &mut [f64]) -> bool {
vals[0] = 1.0;
true
}
fn hessian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![0], vec![0])
}
fn hessian_values(&self, _x: &[f64], _new_x: bool, obj_factor: f64, _lambda: &[f64], vals: &mut [f64]) -> bool {
vals[0] = 2.0 * obj_factor;
true
}
}
#[test]
fn al_rho_increase() {
let result = ripopt::augmented_lagrangian::solve(&RhoIncrease, &silent_opts());
assert!(
result.status == SolveStatus::Optimal,
"status={:?}",
result.status
);
assert!((result.x[0] - 100.0).abs() < 0.1, "x={}", result.x[0]);
}
struct AcceptableProblem;
impl NlpProblem for AcceptableProblem {
fn num_variables(&self) -> usize {
1
}
fn num_constraints(&self) -> usize {
1
}
fn bounds(&self, x_l: &mut [f64], x_u: &mut [f64]) {
x_l[0] = f64::NEG_INFINITY;
x_u[0] = f64::INFINITY;
}
fn constraint_bounds(&self, g_l: &mut [f64], g_u: &mut [f64]) {
g_l[0] = 1000.0;
g_u[0] = 1000.0;
}
fn initial_point(&self, x0: &mut [f64]) {
x0[0] = 5.0;
}
fn objective(&self, x: &[f64], _new_x: bool, obj: &mut f64) -> bool {
*obj = x[0] * x[0];
true
}
fn gradient(&self, x: &[f64], _new_x: bool, grad: &mut [f64]) -> bool {
grad[0] = 2.0 * x[0];
true
}
fn constraints(&self, x: &[f64], _new_x: bool, g: &mut [f64]) -> bool {
g[0] = x[0].powi(3);
true
}
fn jacobian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![0], vec![0])
}
fn jacobian_values(&self, x: &[f64], _new_x: bool, vals: &mut [f64]) -> bool {
vals[0] = 3.0 * x[0].powi(2);
true
}
fn hessian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![0], vec![0])
}
fn hessian_values(&self, x: &[f64], _new_x: bool, obj_factor: f64, lambda: &[f64], vals: &mut [f64]) -> bool {
vals[0] = 2.0 * obj_factor + lambda[0] * 6.0 * x[0];
true
}
}
#[test]
fn al_near_tolerance_convergence() {
let opts = SolverOptions {
print_level: 0,
..SolverOptions::default()
};
let result = ripopt::augmented_lagrangian::solve(&AcceptableProblem, &opts);
assert!(
result.status == SolveStatus::Optimal || result.status == SolveStatus::NumericalError,
"Expected Optimal or NumericalError, got: status={:?}",
result.status
);
assert!((result.x[0] - 10.0).abs() < 0.5, "x={}", result.x[0]);
}
struct Infeasible;
impl NlpProblem for Infeasible {
fn num_variables(&self) -> usize {
1
}
fn num_constraints(&self) -> usize {
2
}
fn bounds(&self, x_l: &mut [f64], x_u: &mut [f64]) {
x_l[0] = f64::NEG_INFINITY;
x_u[0] = f64::INFINITY;
}
fn constraint_bounds(&self, g_l: &mut [f64], g_u: &mut [f64]) {
g_l[0] = 1.0;
g_u[0] = 1.0;
g_l[1] = 2.0;
g_u[1] = 2.0;
}
fn initial_point(&self, x0: &mut [f64]) {
x0[0] = 0.0;
}
fn objective(&self, x: &[f64], _new_x: bool, obj: &mut f64) -> bool {
*obj = x[0] * x[0];
true
}
fn gradient(&self, x: &[f64], _new_x: bool, grad: &mut [f64]) -> bool {
grad[0] = 2.0 * x[0];
true
}
fn constraints(&self, x: &[f64], _new_x: bool, g: &mut [f64]) -> bool {
g[0] = x[0];
g[1] = x[0];
true
}
fn jacobian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![0, 1], vec![0, 0])
}
fn jacobian_values(&self, _x: &[f64], _new_x: bool, vals: &mut [f64]) -> bool {
vals[0] = 1.0;
vals[1] = 1.0;
true
}
fn hessian_structure(&self) -> (Vec<usize>, Vec<usize>) {
(vec![0], vec![0])
}
fn hessian_values(&self, _x: &[f64], _new_x: bool, obj_factor: f64, _lambda: &[f64], vals: &mut [f64]) -> bool {
vals[0] = 2.0 * obj_factor;
true
}
}
#[test]
fn al_max_iterations() {
let result = ripopt::augmented_lagrangian::solve(&Infeasible, &silent_opts());
assert_eq!(result.status, SolveStatus::MaxIterations);
}