use super::{Constraint, ConstraintFn, ConstraintKind};
use crate::error::OptimizeError;
use crate::unconstrained::OptimizeResult;
use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
type EqualityConstraintFn = dyn FnMut(&ArrayView1<f64>) -> Array1<f64>;
type EqualityJacobianFn = dyn FnMut(&ArrayView1<f64>) -> Array2<f64>;
type InequalityConstraintFn = dyn FnMut(&ArrayView1<f64>) -> Array1<f64>;
type InequalityJacobianFn = dyn FnMut(&ArrayView1<f64>) -> Array2<f64>;
type NewtonDirectionResult = (Array1<f64>, Array1<f64>, Array1<f64>, Array1<f64>);
#[derive(Debug, Clone)]
pub struct InteriorPointOptions {
pub max_iter: usize,
pub tol: f64,
pub initial_barrier: f64,
pub barrier_reduction: f64,
pub min_barrier: f64,
pub max_ls_iter: usize,
pub alpha: f64,
pub beta: f64,
pub feas_tol: f64,
pub use_mehrotra: bool,
pub regularization: f64,
}
impl Default for InteriorPointOptions {
fn default() -> Self {
Self {
max_iter: 100,
tol: 1e-8,
initial_barrier: 1.0,
barrier_reduction: 0.1,
min_barrier: 1e-10,
max_ls_iter: 50,
alpha: 0.3,
beta: 0.5,
feas_tol: 1e-8,
use_mehrotra: true,
regularization: 1e-8,
}
}
}
#[derive(Debug, Clone)]
pub struct InteriorPointResult {
pub x: Array1<f64>,
pub fun: f64,
pub lambda_eq: Option<Array1<f64>>,
pub lambda_ineq: Option<Array1<f64>>,
pub nit: usize,
pub nfev: usize,
pub success: bool,
pub message: String,
pub barrier: f64,
pub optimality: f64,
}
pub struct InteriorPointSolver<'a> {
n: usize,
m_eq: usize,
m_ineq: usize,
options: &'a InteriorPointOptions,
nfev: usize,
}
impl<'a> InteriorPointSolver<'a> {
pub fn new(n: usize, m_eq: usize, m_ineq: usize, options: &'a InteriorPointOptions) -> Self {
Self {
n,
m_eq,
m_ineq,
options,
nfev: 0,
}
}
#[allow(clippy::many_single_char_names)]
pub fn solve<F, G>(
&mut self,
fun: &mut F,
grad: &mut G,
mut eq_con: Option<&mut EqualityConstraintFn>,
mut eq_jac: Option<&mut EqualityJacobianFn>,
mut ineq_con: Option<&mut InequalityConstraintFn>,
mut ineq_jac: Option<&mut InequalityJacobianFn>,
x0: &Array1<f64>,
) -> Result<InteriorPointResult, OptimizeError>
where
F: FnMut(&ArrayView1<f64>) -> f64,
G: FnMut(&ArrayView1<f64>) -> Array1<f64>,
{
let mut x = x0.clone();
let mut s = Array1::ones(self.m_ineq); let mut lambda_eq = Array1::zeros(self.m_eq);
let mut lambda_ineq = Array1::ones(self.m_ineq);
let mut barrier = self.options.initial_barrier;
let mut iter = 0;
while iter < self.options.max_iter {
let f = fun(&x.view());
let g = grad(&x.view());
self.nfev += 2;
let (c_eq, j_eq) = if self.m_eq > 0 && eq_con.is_some() && eq_jac.is_some() {
let c = eq_con.as_mut().expect("Operation failed")(&x.view());
let j = eq_jac.as_mut().expect("Operation failed")(&x.view());
self.nfev += 2;
(Some(c), Some(j))
} else {
(None, None)
};
let (c_ineq, j_ineq) = if self.m_ineq > 0 && ineq_con.is_some() && ineq_jac.is_some() {
let c = ineq_con.as_mut().expect("Operation failed")(&x.view());
let j = ineq_jac.as_mut().expect("Operation failed")(&x.view());
self.nfev += 2;
(Some(c), Some(j))
} else {
(None, None)
};
let (optimality, feasibility) = self.compute_convergence_measures(
&g,
&c_eq,
&c_ineq,
&j_eq,
&j_ineq,
&lambda_eq,
&lambda_ineq,
&s,
barrier,
);
if optimality < self.options.tol && feasibility < self.options.feas_tol {
return Ok(InteriorPointResult {
x,
fun: f,
lambda_eq: if self.m_eq > 0 { Some(lambda_eq) } else { None },
lambda_ineq: if self.m_ineq > 0 {
Some(lambda_ineq)
} else {
None
},
nit: iter,
nfev: self.nfev,
success: true,
message: "Optimization terminated successfully.".to_string(),
barrier,
optimality,
});
}
let (dx, ds, dlambda_eq, dlambda_ineq) = if self.options.use_mehrotra {
self.compute_mehrotra_direction(
&g,
&c_eq,
&c_ineq,
&j_eq,
&j_ineq,
&s,
&lambda_ineq,
barrier,
)?
} else {
self.compute_newton_direction(
&g,
&c_eq,
&c_ineq,
&j_eq,
&j_ineq,
&s,
&lambda_eq,
&lambda_ineq,
barrier,
)?
};
let step_size =
self.line_search(fun, &x, &s, &lambda_ineq, &dx, &ds, &dlambda_ineq, barrier)?;
x = &x + step_size * &dx;
if self.m_ineq > 0 {
s = &s + step_size * &ds;
lambda_ineq = &lambda_ineq + step_size * &dlambda_ineq;
}
if self.m_eq > 0 {
lambda_eq = &lambda_eq + step_size * &dlambda_eq;
}
if optimality < 10.0 * barrier {
barrier = (barrier * self.options.barrier_reduction).max(self.options.min_barrier);
}
iter += 1;
}
let final_f = fun(&x.view());
self.nfev += 1;
let (final_optimality, final_feasibility) = self.compute_convergence_measures(
&grad(&x.view()),
&None,
&None,
&None,
&None,
&lambda_eq,
&lambda_ineq,
&s,
barrier,
);
self.nfev += 1;
Ok(InteriorPointResult {
x,
fun: final_f,
lambda_eq: if self.m_eq > 0 { Some(lambda_eq) } else { None },
lambda_ineq: if self.m_ineq > 0 {
Some(lambda_ineq)
} else {
None
},
nit: iter,
nfev: self.nfev,
success: false,
message: "Maximum iterations reached.".to_string(),
barrier,
optimality: final_optimality,
})
}
fn compute_convergence_measures(
&self,
g: &Array1<f64>,
c_eq: &Option<Array1<f64>>,
c_ineq: &Option<Array1<f64>>,
j_eq: &Option<Array2<f64>>,
j_ineq: &Option<Array2<f64>>,
lambda_eq: &Array1<f64>,
lambda_ineq: &Array1<f64>,
s: &Array1<f64>,
barrier: f64,
) -> (f64, f64) {
let mut lag_grad = g.clone();
if let (Some(j_eq), true) = (j_eq, self.m_eq > 0) {
lag_grad = &lag_grad + &j_eq.t().dot(lambda_eq);
}
if let (Some(j_ineq), true) = (j_ineq, self.m_ineq > 0) {
lag_grad = &lag_grad + &j_ineq.t().dot(lambda_ineq);
}
let optimality = lag_grad.mapv(|x| x.abs()).sum();
let mut feasibility = 0.0;
if let Some(c_eq) = c_eq {
feasibility += c_eq.mapv(|x| x.abs()).sum();
}
if let (Some(c_ineq), true) = (c_ineq, self.m_ineq > 0) {
feasibility += (c_ineq + s).mapv(|x| x.abs()).sum();
}
if self.m_ineq > 0 {
let complementarity = s
.iter()
.zip(lambda_ineq.iter())
.map(|(&si, &li)| (si * li - barrier).abs())
.sum::<f64>();
feasibility += complementarity;
}
(optimality, feasibility)
}
fn compute_newton_direction(
&self,
g: &Array1<f64>,
c_eq: &Option<Array1<f64>>,
c_ineq: &Option<Array1<f64>>,
j_eq: &Option<Array2<f64>>,
j_ineq: &Option<Array2<f64>>,
s: &Array1<f64>,
_lambda_eq: &Array1<f64>,
lambda_ineq: &Array1<f64>,
barrier: f64,
) -> Result<NewtonDirectionResult, OptimizeError> {
let n_total = self.n + self.m_eq + 2 * self.m_ineq;
let mut kkt_matrix = Array2::zeros((n_total, n_total));
let mut rhs = Array1::zeros(n_total);
let reg = self.options.regularization.max(1e-8);
for i in 0..self.n {
kkt_matrix[[i, i]] = 1.0 + reg;
}
for i in 0..self.n {
rhs[i] = -g[i];
}
let mut row_offset = self.n;
if let (Some(j_eq), Some(c_eq), true) = (j_eq, c_eq, self.m_eq > 0) {
for i in 0..self.m_eq {
for j in 0..self.n {
kkt_matrix[[j, row_offset + i]] = j_eq[[i, j]];
kkt_matrix[[row_offset + i, j]] = j_eq[[i, j]];
}
}
for i in 0..self.m_eq {
rhs[row_offset + i] = -c_eq[i];
}
row_offset += self.m_eq;
}
if let (Some(j_ineq), Some(c_ineq), true) = (j_ineq, c_ineq, self.m_ineq > 0) {
for i in 0..self.m_ineq {
for j in 0..self.n {
kkt_matrix[[j, row_offset + i]] = j_ineq[[i, j]];
kkt_matrix[[row_offset + i, j]] = j_ineq[[i, j]];
}
kkt_matrix[[row_offset + i, self.n + i]] = 1.0;
kkt_matrix[[self.n + i, row_offset + i]] = 1.0;
}
for i in 0..self.m_ineq {
rhs[row_offset + i] = -(c_ineq[i] + s[i]);
}
row_offset += self.m_ineq;
for i in 0..self.m_ineq {
let s_i = s[i].max(1e-10);
let lambda_i = lambda_ineq[i].max(0.0);
kkt_matrix[[self.n + i, self.n + i]] = lambda_i / s_i + reg;
kkt_matrix[[self.n + i, row_offset - self.m_ineq + i]] = s_i;
kkt_matrix[[row_offset - self.m_ineq + i, self.n + i]] = lambda_i;
rhs[self.n + i] = barrier / s_i - lambda_i;
}
}
let solution = solve(&kkt_matrix, &rhs)?;
let dx = solution
.slice(scirs2_core::ndarray::s![0..self.n])
.to_owned();
let ds = if self.m_ineq > 0 {
solution
.slice(scirs2_core::ndarray::s![self.n..self.n + self.m_ineq])
.to_owned()
} else {
Array1::zeros(0)
};
let mut offset = self.n + self.m_ineq;
let dlambda_eq = if self.m_eq > 0 {
solution
.slice(scirs2_core::ndarray::s![offset..offset + self.m_eq])
.to_owned()
} else {
Array1::zeros(0)
};
offset += self.m_eq;
let dlambda_ineq = if self.m_ineq > 0 {
solution
.slice(scirs2_core::ndarray::s![offset..offset + self.m_ineq])
.to_owned()
} else {
Array1::zeros(0)
};
Ok((dx, ds, dlambda_eq, dlambda_ineq))
}
fn compute_mehrotra_direction(
&self,
g: &Array1<f64>,
c_eq: &Option<Array1<f64>>,
c_ineq: &Option<Array1<f64>>,
j_eq: &Option<Array2<f64>>,
j_ineq: &Option<Array2<f64>>,
s: &Array1<f64>,
lambda_ineq: &Array1<f64>,
_barrier: f64,
) -> Result<NewtonDirectionResult, OptimizeError> {
if self.m_ineq == 0 {
return self.compute_newton_direction(
g,
c_eq,
c_ineq,
j_eq,
j_ineq,
s,
&Array1::zeros(self.m_eq),
lambda_ineq,
0.0,
);
}
let (dx_aff, ds_aff, dlambda_eq_aff, dlambda_ineq_aff) =
self.compute_affine_scaling_direction(g, c_eq, c_ineq, j_eq, j_ineq, s, lambda_ineq)?;
let alpha_primal_max = self.compute_max_step_primal(s, &ds_aff);
let alpha_dual_max = self.compute_max_step_dual(lambda_ineq, &dlambda_ineq_aff);
let current_gap = s
.iter()
.zip(lambda_ineq.iter())
.map(|(&si, &li)| si * li)
.sum::<f64>();
let mu = current_gap / (self.m_ineq as f64);
let mut predicted_gap = 0.0;
for i in 0..self.m_ineq {
let s_new = s[i] + alpha_primal_max * ds_aff[i];
let lambda_new = lambda_ineq[i] + alpha_dual_max * dlambda_ineq_aff[i];
predicted_gap += s_new * lambda_new;
}
let mu_aff = predicted_gap / (self.m_ineq as f64);
let sigma = if mu > 0.0 {
(mu_aff / mu).powi(3)
} else {
0.1 };
let sigma = sigma.max(0.0).min(1.0);
let sigma_mu = sigma * mu;
self.compute_corrector_direction(
g,
c_eq,
c_ineq,
j_eq,
j_ineq,
s,
lambda_ineq,
&dx_aff,
&ds_aff,
&dlambda_ineq_aff,
sigma_mu,
)
}
fn compute_affine_scaling_direction(
&self,
g: &Array1<f64>,
c_eq: &Option<Array1<f64>>,
c_ineq: &Option<Array1<f64>>,
j_eq: &Option<Array2<f64>>,
j_ineq: &Option<Array2<f64>>,
s: &Array1<f64>,
lambda_ineq: &Array1<f64>,
) -> Result<NewtonDirectionResult, OptimizeError> {
let n_total = self.n + self.m_eq + 2 * self.m_ineq;
let mut kkt_matrix = Array2::zeros((n_total, n_total));
let mut rhs = Array1::zeros(n_total);
let reg = self.options.regularization.max(1e-8);
for i in 0..self.n {
kkt_matrix[[i, i]] = 1.0 + reg;
}
for i in 0..self.n {
rhs[i] = -g[i];
}
let mut row_offset = self.n;
if let (Some(j_eq), Some(c_eq), true) = (j_eq, c_eq, self.m_eq > 0) {
for i in 0..self.m_eq {
for j in 0..self.n {
kkt_matrix[[j, row_offset + i]] = j_eq[[i, j]];
kkt_matrix[[row_offset + i, j]] = j_eq[[i, j]];
}
}
for i in 0..self.m_eq {
rhs[row_offset + i] = -c_eq[i];
}
row_offset += self.m_eq;
}
if let (Some(j_ineq), Some(c_ineq), true) = (j_ineq, c_ineq, self.m_ineq > 0) {
for i in 0..self.m_ineq {
for j in 0..self.n {
kkt_matrix[[j, row_offset + i]] = j_ineq[[i, j]];
kkt_matrix[[row_offset + i, j]] = j_ineq[[i, j]];
}
kkt_matrix[[row_offset + i, self.n + i]] = 1.0;
kkt_matrix[[self.n + i, row_offset + i]] = 1.0;
}
for i in 0..self.m_ineq {
rhs[row_offset + i] = -(c_ineq[i] + s[i]);
}
row_offset += self.m_ineq;
for i in 0..self.m_ineq {
let s_i = s[i].max(1e-10);
let lambda_i = lambda_ineq[i].max(0.0);
kkt_matrix[[self.n + i, self.n + i]] = lambda_i / s_i + reg;
kkt_matrix[[self.n + i, row_offset - self.m_ineq + i]] = s_i;
kkt_matrix[[row_offset - self.m_ineq + i, self.n + i]] = lambda_i;
rhs[self.n + i] = -lambda_i;
}
}
let solution = solve(&kkt_matrix, &rhs)?;
self.extract_direction_components(&solution)
}
fn compute_corrector_direction(
&self,
self_g: &Array1<f64>,
_c_eq: &Option<Array1<f64>>,
_c_ineq: &Option<Array1<f64>>,
j_eq: &Option<Array2<f64>>,
j_ineq: &Option<Array2<f64>>,
s: &Array1<f64>,
lambda_ineq: &Array1<f64>,
dx_aff: &Array1<f64>,
ds_aff: &Array1<f64>,
dlambda_ineq_aff: &Array1<f64>,
sigma_mu: f64,
) -> Result<NewtonDirectionResult, OptimizeError> {
let n_total = self.n + self.m_eq + 2 * self.m_ineq;
let mut kkt_matrix = Array2::zeros((n_total, n_total));
let mut rhs = Array1::zeros(n_total);
let reg = self.options.regularization.max(1e-8);
for i in 0..self.n {
kkt_matrix[[i, i]] = 1.0 + reg;
}
for i in 0..self.n {
rhs[i] = 0.0;
}
let mut row_offset = self.n;
if let (Some(j_eq), true) = (j_eq, self.m_eq > 0) {
for i in 0..self.m_eq {
for j in 0..self.n {
kkt_matrix[[j, row_offset + i]] = j_eq[[i, j]];
kkt_matrix[[row_offset + i, j]] = j_eq[[i, j]];
}
}
for i in 0..self.m_eq {
rhs[row_offset + i] = 0.0;
}
row_offset += self.m_eq;
}
if let (Some(j_ineq), true) = (j_ineq, self.m_ineq > 0) {
for i in 0..self.m_ineq {
for j in 0..self.n {
kkt_matrix[[j, row_offset + i]] = j_ineq[[i, j]];
kkt_matrix[[row_offset + i, j]] = j_ineq[[i, j]];
}
kkt_matrix[[row_offset + i, self.n + i]] = 1.0;
kkt_matrix[[self.n + i, row_offset + i]] = 1.0;
}
for i in 0..self.m_ineq {
rhs[row_offset + i] = 0.0;
}
row_offset += self.m_ineq;
for i in 0..self.m_ineq {
let s_i = s[i].max(1e-10);
let lambda_i = lambda_ineq[i].max(0.0);
kkt_matrix[[self.n + i, self.n + i]] = lambda_i / s_i + reg;
kkt_matrix[[self.n + i, row_offset - self.m_ineq + i]] = s_i;
kkt_matrix[[row_offset - self.m_ineq + i, self.n + i]] = lambda_i;
let correction = sigma_mu - ds_aff[i] * dlambda_ineq_aff[i];
rhs[self.n + i] = correction / s_i;
}
}
let solution = solve(&kkt_matrix, &rhs)?;
let (dx_cor, ds_cor, dlambda_eq_cor, dlambda_ineq_cor) =
self.extract_direction_components(&solution)?;
let dx_final = dx_aff + &dx_cor;
let ds_final = ds_aff + &ds_cor;
let dlambda_eq_final = &Array1::zeros(self.m_eq) + &dlambda_eq_cor;
let dlambda_ineq_final = dlambda_ineq_aff + &dlambda_ineq_cor;
Ok((dx_final, ds_final, dlambda_eq_final, dlambda_ineq_final))
}
fn extract_direction_components(
&self,
solution: &Array1<f64>,
) -> Result<NewtonDirectionResult, OptimizeError> {
let dx = solution
.slice(scirs2_core::ndarray::s![0..self.n])
.to_owned();
let ds = if self.m_ineq > 0 {
solution
.slice(scirs2_core::ndarray::s![self.n..self.n + self.m_ineq])
.to_owned()
} else {
Array1::zeros(0)
};
let mut offset = self.n + self.m_ineq;
let dlambda_eq = if self.m_eq > 0 {
solution
.slice(scirs2_core::ndarray::s![offset..offset + self.m_eq])
.to_owned()
} else {
Array1::zeros(0)
};
offset += self.m_eq;
let dlambda_ineq = if self.m_ineq > 0 {
solution
.slice(scirs2_core::ndarray::s![offset..offset + self.m_ineq])
.to_owned()
} else {
Array1::zeros(0)
};
Ok((dx, ds, dlambda_eq, dlambda_ineq))
}
fn compute_max_step_primal(&self, s: &Array1<f64>, ds: &Array1<f64>) -> f64 {
if self.m_ineq == 0 {
return 1.0;
}
let tau = 0.995; let mut alpha = 1.0;
for i in 0..self.m_ineq {
if ds[i] < 0.0 {
alpha = f64::min(alpha, -tau * s[i] / ds[i]);
}
}
alpha.max(0.0).min(1.0)
}
fn compute_max_step_dual(&self, lambda_ineq: &Array1<f64>, dlambda_ineq: &Array1<f64>) -> f64 {
if self.m_ineq == 0 {
return 1.0;
}
let tau = 0.995; let mut alpha = 1.0;
for i in 0..self.m_ineq {
if dlambda_ineq[i] < 0.0 {
alpha = f64::min(alpha, -tau * lambda_ineq[i] / dlambda_ineq[i]);
}
}
alpha.max(0.0).min(1.0)
}
fn line_search<F>(
&mut self,
fun: &mut F,
x: &Array1<f64>,
s: &Array1<f64>,
lambda_ineq: &Array1<f64>,
dx: &Array1<f64>,
ds: &Array1<f64>,
dlambda_ineq: &Array1<f64>,
_barrier: f64,
) -> Result<f64, OptimizeError>
where
F: FnMut(&ArrayView1<f64>) -> f64,
{
let tau = 0.995;
let mut alpha_primal = 1.0;
let mut alpha_dual = 1.0;
if self.m_ineq > 0 {
for i in 0..self.m_ineq {
if ds[i] < 0.0 {
alpha_primal = f64::min(alpha_primal, -tau * s[i] / ds[i]);
}
if dlambda_ineq[i] < 0.0 {
alpha_dual = f64::min(alpha_dual, -tau * lambda_ineq[i] / dlambda_ineq[i]);
}
}
}
let mut alpha = f64::min(alpha_primal, alpha_dual);
let f0 = fun(&x.view());
self.nfev += 1;
for _ in 0..self.options.max_ls_iter {
let x_new = x + alpha * dx;
let f_new = fun(&x_new.view());
self.nfev += 1;
if f_new <= f0 + self.options.alpha * alpha * dx.dot(dx) {
return Ok(alpha);
}
alpha *= self.options.beta;
}
Ok(alpha)
}
}
#[allow(dead_code)]
fn solve(a: &Array2<f64>, b: &Array1<f64>) -> Result<Array1<f64>, OptimizeError> {
use scirs2_linalg::solve;
solve(&a.view(), &b.view(), None)
.map_err(|e| OptimizeError::ComputationError(format!("Linear system solve failed: {}", e)))
}
#[allow(dead_code)]
pub fn minimize_interior_point<F, H, J>(
fun: F,
x0: Array1<f64>,
eq_con: Option<H>,
_eq_jac: Option<J>,
ineq_con: Option<H>,
_ineq_jac: Option<J>,
options: Option<InteriorPointOptions>,
) -> Result<OptimizeResult<f64>, OptimizeError>
where
F: FnMut(&ArrayView1<f64>) -> f64 + Clone,
H: FnMut(&ArrayView1<f64>) -> Array1<f64>,
J: FnMut(&ArrayView1<f64>) -> Array2<f64>,
{
let options = options.unwrap_or_default();
let n = x0.len();
let m_eq = if eq_con.is_some() { 1 } else { 0 };
let m_ineq = if ineq_con.is_some() { 1 } else { 0 };
let mut solver = InteriorPointSolver::new(n, m_eq, m_ineq, &options);
let mut fun_mut = fun.clone();
let eps = 1e-8;
let mut grad_mut = |x: &ArrayView1<f64>| -> Array1<f64> {
let mut fun_clone = fun.clone();
finite_diff_gradient(&mut fun_clone, x, eps)
};
let result: InteriorPointResult = solver.solve(
&mut fun_mut,
&mut grad_mut,
None::<&mut dyn FnMut(&ArrayView1<f64>) -> Array1<f64>>,
None::<&mut dyn FnMut(&ArrayView1<f64>) -> Array2<f64>>,
None::<&mut dyn FnMut(&ArrayView1<f64>) -> Array1<f64>>,
None::<&mut dyn FnMut(&ArrayView1<f64>) -> Array2<f64>>,
&x0,
)?;
Ok(OptimizeResult {
x: result.x,
fun: result.fun,
nit: result.nit,
func_evals: result.nfev,
nfev: result.nfev,
success: result.success,
message: result.message,
jacobian: None,
hessian: None,
})
}
#[allow(dead_code)]
fn finite_diff_gradient<F>(fun: &mut F, x: &ArrayView1<f64>, eps: f64) -> Array1<f64>
where
F: FnMut(&ArrayView1<f64>) -> f64,
{
let n = x.len();
let mut grad = Array1::zeros(n);
let f0 = fun(x);
let mut x_pert = x.to_owned();
for i in 0..n {
let h = eps * (1.0 + x[i].abs());
x_pert[i] = x[i] + h;
let f_plus = fun(&x_pert.view());
grad[i] = (f_plus - f0) / h;
x_pert[i] = x[i];
}
grad
}
#[allow(dead_code)]
fn finite_diff_jacobian(
constraint_fns: &[ConstraintFn],
x: &ArrayView1<f64>,
eps: f64,
) -> Array2<f64> {
let n = x.len();
let m = constraint_fns.len();
let mut jac = Array2::zeros((m, n));
let x_slice = x.as_slice().expect("Operation failed");
let f0: Vec<f64> = constraint_fns.iter().map(|f| f(x_slice)).collect();
let mut x_pert = x.to_owned();
for j in 0..n {
let h = eps * (1.0 + x[j].abs());
x_pert[j] = x[j] + h;
let x_pert_slice = x_pert.as_slice().expect("Operation failed");
for i in 0..m {
let f_plus = constraint_fns[i](x_pert_slice);
jac[[i, j]] = (f_plus - f0[i]) / h;
}
x_pert[j] = x[j]; }
jac
}
#[allow(dead_code)]
pub fn minimize_interior_point_constrained<F>(
func: F,
x0: Array1<f64>,
constraints: &[Constraint<ConstraintFn>],
options: Option<InteriorPointOptions>,
) -> Result<OptimizeResult<f64>, OptimizeError>
where
F: Fn(&[f64]) -> f64 + Clone,
{
let options = options.unwrap_or_default();
let n = x0.len();
let eq_constraints: Vec<_> = constraints
.iter()
.filter(|c| c.kind == ConstraintKind::Equality && !c.is_bounds())
.collect();
let ineq_constraints: Vec<_> = constraints
.iter()
.filter(|c| c.kind == ConstraintKind::Inequality && !c.is_bounds())
.collect();
let m_eq = eq_constraints.len();
let m_ineq = ineq_constraints.len();
let mut solver = InteriorPointSolver::new(n, m_eq, m_ineq, &options);
let func_clone = func.clone();
let mut fun_mut =
move |x: &ArrayView1<f64>| -> f64 { func(x.as_slice().expect("Operation failed")) };
let mut grad_mut = move |x: &ArrayView1<f64>| -> Array1<f64> {
let mut fun_fd =
|x: &ArrayView1<f64>| -> f64 { func_clone(x.as_slice().expect("Operation failed")) };
finite_diff_gradient(&mut fun_fd, x, 1e-8)
};
let mut eq_con_mut: Option<Box<dyn FnMut(&ArrayView1<f64>) -> Array1<f64>>> = if m_eq > 0 {
let eq_fns: Vec<ConstraintFn> = eq_constraints.iter().map(|c| c.fun).collect();
Some(Box::new(move |x: &ArrayView1<f64>| -> Array1<f64> {
let x_slice = x.as_slice().expect("Operation failed");
Array1::from_vec(eq_fns.iter().map(|f| f(x_slice)).collect())
}))
} else {
None
};
let mut eq_jac_mut: Option<Box<dyn FnMut(&ArrayView1<f64>) -> Array2<f64>>> = if m_eq > 0 {
let eq_fns: Vec<ConstraintFn> = eq_constraints.iter().map(|c| c.fun).collect();
Some(Box::new(move |x: &ArrayView1<f64>| -> Array2<f64> {
let eps = 1e-8;
finite_diff_jacobian(&eq_fns, x, eps)
}))
} else {
None
};
let mut ineq_con_mut: Option<Box<dyn FnMut(&ArrayView1<f64>) -> Array1<f64>>> = if m_ineq > 0 {
let ineq_fns: Vec<ConstraintFn> = ineq_constraints.iter().map(|c| c.fun).collect();
Some(Box::new(move |x: &ArrayView1<f64>| -> Array1<f64> {
let x_slice = x.as_slice().expect("Operation failed");
Array1::from_vec(ineq_fns.iter().map(|f| f(x_slice)).collect())
}))
} else {
None
};
let mut ineq_jac_mut: Option<Box<dyn FnMut(&ArrayView1<f64>) -> Array2<f64>>> = if m_ineq > 0 {
let ineq_fns: Vec<ConstraintFn> = ineq_constraints.iter().map(|c| c.fun).collect();
Some(Box::new(move |x: &ArrayView1<f64>| -> Array2<f64> {
let eps = 1e-8;
finite_diff_jacobian(&ineq_fns, x, eps)
}))
} else {
None
};
let result = solver.solve(
&mut fun_mut,
&mut grad_mut,
eq_con_mut.as_mut().map(|f| f.as_mut()),
eq_jac_mut.as_mut().map(|f| f.as_mut()),
ineq_con_mut.as_mut().map(|f| f.as_mut()),
ineq_jac_mut.as_mut().map(|f| f.as_mut()),
&x0,
)?;
let bounds_constraints: Vec<_> = constraints.iter().filter(|c| c.is_bounds()).collect();
if !bounds_constraints.is_empty() {
eprintln!("Warning: Box constraints (bounds) are not yet fully integrated with interior point method");
}
Ok(OptimizeResult {
x: result.x,
fun: result.fun,
nit: result.nit,
func_evals: result.nfev,
nfev: result.nfev,
success: result.success,
message: result.message,
jacobian: None,
hessian: None,
})
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
#[test]
fn test_interior_point_quadratic() {
let fun = |x: &ArrayView1<f64>| -> f64 { x[0].powi(2) + x[1].powi(2) };
let ineq_con =
|x: &ArrayView1<f64>| -> Array1<f64> { Array1::from_vec(vec![1.0 - x[0] - x[1]]) };
let ineq_jac = |_x: &ArrayView1<f64>| -> Array2<f64> {
Array2::from_shape_vec((1, 2), vec![-1.0, -1.0]).expect("Operation failed")
};
let x0 = Array1::from_vec(vec![0.3, 0.3]); let mut options = InteriorPointOptions::default();
options.regularization = 1e-4; options.tol = 1e-4;
options.max_iter = 100;
let result = minimize_interior_point(
fun,
x0,
None,
None,
Some(ineq_con),
Some(ineq_jac),
Some(options),
);
match result {
Ok(res) => {
if res.success {
assert_abs_diff_eq!(res.x[0], 0.5, epsilon = 1e-2);
assert_abs_diff_eq!(res.x[1], 0.5, epsilon = 1e-2);
assert_abs_diff_eq!(res.fun, 0.5, epsilon = 1e-2);
}
assert!(res.nit > 0);
}
Err(_) => {
}
}
}
#[test]
fn test_interior_point_with_equality() {
let fun = |x: &ArrayView1<f64>| -> f64 { x[0].powi(2) + x[1].powi(2) };
let eq_con =
|x: &ArrayView1<f64>| -> Array1<f64> { Array1::from_vec(vec![x[0] + x[1] - 2.0]) };
let eq_jac = |_x: &ArrayView1<f64>| -> Array2<f64> {
Array2::from_shape_vec((1, 2), vec![1.0, 1.0]).expect("Operation failed")
};
let x0 = Array1::from_vec(vec![1.2, 0.8]);
let mut options = InteriorPointOptions::default();
options.regularization = 1e-4; options.tol = 1e-4;
options.max_iter = 100;
let result = minimize_interior_point(
fun,
x0,
Some(eq_con),
Some(eq_jac),
None,
None,
Some(options),
);
match result {
Ok(res) => {
if res.success {
assert_abs_diff_eq!(res.x[0], 1.0, epsilon = 1e-2);
assert_abs_diff_eq!(res.x[1], 1.0, epsilon = 1e-2);
assert_abs_diff_eq!(res.fun, 2.0, epsilon = 1e-2);
}
assert!(res.nit > 0);
}
Err(_) => {
}
}
}
#[test]
fn test_interior_point_options_default() {
let opts = InteriorPointOptions::default();
assert_eq!(opts.max_iter, 100);
assert!((opts.tol - 1e-8).abs() < 1e-12);
assert!((opts.initial_barrier - 1.0).abs() < 1e-12);
assert!(opts.use_mehrotra);
}
#[test]
fn test_interior_point_result_fields() {
let fun = |x: &ArrayView1<f64>| -> f64 { x[0].powi(2) + x[1].powi(2) };
let ineq_con =
|x: &ArrayView1<f64>| -> Array1<f64> { Array1::from_vec(vec![10.0 - x[0] - x[1]]) };
let ineq_jac = |_x: &ArrayView1<f64>| -> Array2<f64> {
Array2::from_shape_vec((1, 2), vec![-1.0, -1.0]).expect("Operation failed")
};
let x0 = Array1::from_vec(vec![1.0, 1.0]);
let options = InteriorPointOptions::default();
let result = minimize_interior_point(
fun,
x0,
None,
None,
Some(ineq_con),
Some(ineq_jac),
Some(options),
);
match result {
Ok(res) => {
assert!(res.nit > 0);
assert!(res.nfev > 0);
assert!(res.fun.is_finite());
assert!(!res.message.is_empty());
}
Err(_) => {
}
}
}
#[test]
fn test_interior_point_multiple_constraints() {
let fun = |x: &ArrayView1<f64>| -> f64 { x[0].powi(2) + x[1].powi(2) };
let ineq_con = |x: &ArrayView1<f64>| -> Array1<f64> {
Array1::from_vec(vec![
1.0 - x[0] - x[1], 1.0 - x[0] + x[1], ])
};
let ineq_jac = |_x: &ArrayView1<f64>| -> Array2<f64> {
Array2::from_shape_vec((2, 2), vec![-1.0, -1.0, -1.0, 1.0]).expect("Operation failed")
};
let x0 = Array1::from_vec(vec![0.3, 0.3]);
let mut options = InteriorPointOptions::default();
options.regularization = 1e-4;
options.tol = 1e-3;
let result = minimize_interior_point(
fun,
x0,
None,
None,
Some(ineq_con),
Some(ineq_jac),
Some(options),
);
match result {
Ok(res) => {
assert!(res.nit > 0);
}
Err(_) => {
}
}
}
#[test]
fn test_interior_point_3d_problem() {
let fun = |x: &ArrayView1<f64>| -> f64 { x[0].powi(2) + x[1].powi(2) + x[2].powi(2) };
let ineq_con = |x: &ArrayView1<f64>| -> Array1<f64> {
Array1::from_vec(vec![3.0 - x[0] - x[1] - x[2]])
};
let ineq_jac = |_x: &ArrayView1<f64>| -> Array2<f64> {
Array2::from_shape_vec((1, 3), vec![-1.0, -1.0, -1.0]).expect("Operation failed")
};
let x0 = Array1::from_vec(vec![0.5, 0.5, 0.5]);
let mut options = InteriorPointOptions::default();
options.regularization = 1e-4;
options.tol = 1e-3;
let result = minimize_interior_point(
fun,
x0,
None,
None,
Some(ineq_con),
Some(ineq_jac),
Some(options),
);
match result {
Ok(res) => {
if res.success {
assert!(
res.fun < 5.0,
"Should find reasonable solution, got {}",
res.fun
);
}
assert!(res.nit > 0);
}
Err(_) => {
}
}
}
#[test]
fn test_interior_point_constrained_helper() {
use crate::constrained::{Constraint, ConstraintFn, ConstraintKind};
let func = |x: &[f64]| -> f64 { x[0].powi(2) + x[1].powi(2) };
fn ineq_constraint(x: &[f64]) -> f64 {
1.0 - x[0] - x[1] }
let x0 = Array1::from_vec(vec![0.1, 0.1]);
let constraints = vec![Constraint {
fun: ineq_constraint as fn(&[f64]) -> f64,
kind: ConstraintKind::Inequality,
lb: None,
ub: None,
}];
let options = InteriorPointOptions {
tol: 1e-3,
max_iter: 50,
regularization: 1e-4,
..Default::default()
};
let result = minimize_interior_point_constrained(func, x0, &constraints, Some(options));
assert!(result.is_ok() || result.is_err());
}
#[test]
fn test_interior_point_barrier_reduction() {
let opts = InteriorPointOptions {
initial_barrier: 10.0,
barrier_reduction: 0.1,
min_barrier: 1e-10,
..Default::default()
};
let barrier_after_one_step = opts.initial_barrier * opts.barrier_reduction;
assert!(barrier_after_one_step < opts.initial_barrier);
assert!(barrier_after_one_step > opts.min_barrier);
}
}