use crate::error::{OptimizeError, OptimizeResult};
use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
#[derive(Debug, Clone)]
pub struct RobustLP {
pub c: Array1<f64>,
pub a_matrix: Array2<f64>,
pub b_rhs: Array1<f64>,
pub lb: Option<Array1<f64>>,
pub ub: Option<Array1<f64>>,
pub c_uncertainty: Option<Array1<f64>>,
pub a_uncertainty: Option<Array2<f64>>,
pub b_uncertainty: Option<Array1<f64>>,
}
impl RobustLP {
pub fn new(c: Array1<f64>, a_matrix: Array2<f64>, b_rhs: Array1<f64>) -> OptimizeResult<Self> {
let n = c.len();
let (m, nc) = (a_matrix.shape()[0], a_matrix.shape()[1]);
if nc != n {
return Err(OptimizeError::ValueError(format!(
"A has {} columns but c has length {}",
nc, n
)));
}
if b_rhs.len() != m {
return Err(OptimizeError::ValueError(format!(
"b has length {} but A has {} rows",
b_rhs.len(),
m
)));
}
Ok(Self {
c,
a_matrix,
b_rhs,
lb: None,
ub: None,
c_uncertainty: None,
a_uncertainty: None,
b_uncertainty: None,
})
}
pub fn with_c_uncertainty(mut self, delta_c: Array1<f64>) -> OptimizeResult<Self> {
if delta_c.len() != self.c.len() {
return Err(OptimizeError::ValueError(format!(
"delta_c has length {} but c has length {}",
delta_c.len(),
self.c.len()
)));
}
self.c_uncertainty = Some(delta_c);
Ok(self)
}
pub fn with_a_uncertainty(mut self, delta_a: Array2<f64>) -> OptimizeResult<Self> {
if delta_a.shape() != self.a_matrix.shape() {
return Err(OptimizeError::ValueError(format!(
"delta_A shape {:?} does not match A shape {:?}",
delta_a.shape(),
self.a_matrix.shape()
)));
}
self.a_uncertainty = Some(delta_a);
Ok(self)
}
pub fn with_b_uncertainty(mut self, delta_b: Array1<f64>) -> OptimizeResult<Self> {
if delta_b.len() != self.b_rhs.len() {
return Err(OptimizeError::ValueError(format!(
"delta_b has length {} but b has length {}",
delta_b.len(),
self.b_rhs.len()
)));
}
self.b_uncertainty = Some(delta_b);
Ok(self)
}
pub fn with_bounds(mut self, lb: Array1<f64>, ub: Array1<f64>) -> OptimizeResult<Self> {
let n = self.c.len();
if lb.len() != n || ub.len() != n {
return Err(OptimizeError::ValueError(format!(
"bounds have length {}/{} but n={}",
lb.len(),
ub.len(),
n
)));
}
self.lb = Some(lb);
self.ub = Some(ub);
Ok(self)
}
pub fn n_vars(&self) -> usize {
self.c.len()
}
pub fn n_constraints(&self) -> usize {
self.b_rhs.len()
}
}
#[derive(Debug, Clone)]
pub struct RobustLPResult {
pub x: Array1<f64>,
pub fun: f64,
pub nominal_fun: f64,
pub constraint_slack: f64,
pub n_iter: usize,
pub converged: bool,
pub message: String,
}
#[derive(Debug, Clone)]
pub struct RobustLPConfig {
pub max_iter: usize,
pub tol: f64,
pub step_size: f64,
pub step_reduction: f64,
pub constraint_penalty: f64,
}
impl Default for RobustLPConfig {
fn default() -> Self {
Self {
max_iter: 5_000,
tol: 1e-6,
step_size: 1e-2,
step_reduction: 0.5,
constraint_penalty: 100.0,
}
}
}
fn project_box(x: &Array1<f64>, lb: &Option<Array1<f64>>, ub: &Option<Array1<f64>>) -> Array1<f64> {
x.iter()
.enumerate()
.map(|(i, &xi)| {
let lo = lb.as_ref().map(|b| b[i]).unwrap_or(f64::NEG_INFINITY);
let hi = ub.as_ref().map(|b| b[i]).unwrap_or(f64::INFINITY);
xi.max(lo).min(hi)
})
.collect()
}
fn constraint_residual(a: &ArrayView2<f64>, x: &ArrayView1<f64>, b: &ArrayView1<f64>) -> Array1<f64> {
let m = b.len();
let n = x.len();
let mut r = Array1::<f64>::zeros(m);
for i in 0..m {
let ax_i: f64 = (0..n).map(|j| a[[i, j]] * x[j]).sum();
r[i] = ax_i - b[i];
}
r
}
fn penalized_gradient(
x: &Array1<f64>,
c_worst: &Array1<f64>,
a: &Array2<f64>,
b: &Array1<f64>,
penalty: f64,
) -> Array1<f64> {
let n = x.len();
let m = b.len();
let mut grad = c_worst.clone();
for i in 0..m {
let ax_i: f64 = (0..n).map(|j| a[[i, j]] * x[j]).sum();
let viol = ax_i - b[i];
if viol > 0.0 {
for j in 0..n {
grad[j] += 2.0 * penalty * viol * a[[i, j]];
}
}
}
grad
}
pub fn box_robust_lp(
problem: &RobustLP,
x0: &ArrayView1<f64>,
config: &RobustLPConfig,
) -> OptimizeResult<RobustLPResult> {
let n = problem.n_vars();
let m = problem.n_constraints();
if x0.len() != n {
return Err(OptimizeError::ValueError(format!(
"x0 has length {} but problem has {} variables",
x0.len(),
n
)));
}
let delta_c = problem
.c_uncertainty
.clone()
.unwrap_or_else(|| Array1::zeros(n));
let delta_b = problem
.b_uncertainty
.clone()
.unwrap_or_else(|| Array1::zeros(m));
let b_robust: Array1<f64> = problem
.b_rhs
.iter()
.zip(delta_b.iter())
.map(|(&bi, &dbi)| bi - dbi.abs())
.collect();
let delta_a = problem
.a_uncertainty
.clone()
.unwrap_or_else(|| Array2::zeros((m, n)));
let mut x = x0.to_owned();
let mut converged = false;
let mut step = config.step_size;
for iter in 0..config.max_iter {
let c_worst: Array1<f64> = problem
.c
.iter()
.zip(delta_c.iter())
.zip(x.iter())
.map(|((&ci, &dci), &xi)| ci + dci * xi.signum())
.collect();
let b_effective: Array1<f64> = (0..m)
.map(|i| {
let reduction: f64 = (0..n).map(|j| delta_a[[i, j]] * x[j].abs()).sum();
b_robust[i] - reduction
})
.collect();
let grad = penalized_gradient(
&x,
&c_worst,
&problem.a_matrix,
&b_effective,
config.constraint_penalty,
);
let grad_norm: f64 = grad.iter().map(|g| g * g).sum::<f64>().sqrt();
if grad_norm < config.tol {
converged = true;
break;
}
let obj_curr: f64 = problem.c.iter().zip(x.iter()).map(|(&ci, &xi)| ci * xi).sum::<f64>()
+ delta_c.iter().zip(x.iter()).map(|(&di, &xi)| di * xi.abs()).sum::<f64>();
let mut accepted = false;
for _ in 0..20 {
let x_new: Array1<f64> = x
.iter()
.zip(grad.iter())
.map(|(&xi, &gi)| xi - step * gi)
.collect();
let x_proj = project_box(&x_new, &problem.lb, &problem.ub);
let obj_new: f64 = problem.c.iter().zip(x_proj.iter()).map(|(&ci, &xi)| ci * xi).sum::<f64>()
+ delta_c.iter().zip(x_proj.iter()).map(|(&di, &xi)| di * xi.abs()).sum::<f64>();
if obj_new <= obj_curr - 1e-4 * step * grad_norm * grad_norm {
x = x_proj;
accepted = true;
break;
}
step *= config.step_reduction;
}
if !accepted {
let x_new: Array1<f64> = x
.iter()
.zip(grad.iter())
.map(|(&xi, &gi)| xi - step * gi)
.collect();
x = project_box(&x_new, &problem.lb, &problem.ub);
}
if iter % 100 == 99 {
step = (step * 1.1).min(config.step_size);
}
}
let nominal_fun: f64 = problem.c.iter().zip(x.iter()).map(|(&ci, &xi)| ci * xi).sum();
let robust_fun: f64 = nominal_fun
+ delta_c
.iter()
.zip(x.iter())
.map(|(&di, &xi)| di * xi.abs())
.sum::<f64>();
let residual = constraint_residual(&problem.a_matrix.view(), &x.view(), &problem.b_rhs.view());
let constraint_slack = -residual
.iter()
.cloned()
.fold(f64::NEG_INFINITY, f64::max);
Ok(RobustLPResult {
x,
fun: robust_fun,
nominal_fun,
constraint_slack,
n_iter: config.max_iter,
converged,
message: if converged {
"Box robust LP converged".to_string()
} else {
"Box robust LP reached maximum iterations".to_string()
},
})
}
pub fn ellipsoidal_robust_lp(
problem: &RobustLP,
c_covariance: &ArrayView2<f64>,
ellipsoid_radius: f64,
x0: &ArrayView1<f64>,
config: &RobustLPConfig,
) -> OptimizeResult<RobustLPResult> {
let n = problem.n_vars();
let m = problem.n_constraints();
if x0.len() != n {
return Err(OptimizeError::ValueError(format!(
"x0 has length {} but problem has {} variables",
x0.len(),
n
)));
}
if c_covariance.shape() != [n, n] {
return Err(OptimizeError::ValueError(format!(
"c_covariance shape {:?} != [{n},{n}]",
c_covariance.shape()
)));
}
if ellipsoid_radius < 0.0 {
return Err(OptimizeError::ValueError(
"ellipsoid_radius must be non-negative".to_string(),
));
}
let l_chol = cholesky_lower_triangular(c_covariance)?;
let mut x = x0.to_owned();
let mut converged = false;
let h = 1e-7;
let step = config.step_size;
for _ in 0..config.max_iter {
let lx = mat_vec_mul_lower(&l_chol, &x.view());
let lx_norm = l2_norm_vec(&lx);
let socp_grad: Array1<f64> = if lx_norm > h {
let lx_normalized: Array1<f64> = lx.iter().map(|&v| v / lx_norm).collect();
let lt_v = mat_vec_mul_lower_transpose(&l_chol, &lx_normalized.view());
problem
.c
.iter()
.zip(lt_v.iter())
.map(|(&ci, &li)| ci + ellipsoid_radius * li)
.collect()
} else {
problem.c.clone()
};
let residual = constraint_residual(
&problem.a_matrix.view(),
&x.view(),
&problem.b_rhs.view(),
);
let mut full_grad = socp_grad;
for i in 0..m {
if residual[i] > 0.0 {
for j in 0..n {
full_grad[j] +=
2.0 * config.constraint_penalty * residual[i] * problem.a_matrix[[i, j]];
}
}
}
let grad_norm = l2_norm_vec(&full_grad);
if grad_norm < config.tol {
converged = true;
break;
}
let x_new: Array1<f64> = x
.iter()
.zip(full_grad.iter())
.map(|(&xi, &gi)| xi - step * gi)
.collect();
x = project_box(&x_new, &problem.lb, &problem.ub);
}
let nominal_fun: f64 = problem.c.iter().zip(x.iter()).map(|(&ci, &xi)| ci * xi).sum();
let lx = mat_vec_mul_lower(&l_chol, &x.view());
let robust_fun = nominal_fun + ellipsoid_radius * l2_norm_vec(&lx);
let residual = constraint_residual(&problem.a_matrix.view(), &x.view(), &problem.b_rhs.view());
let constraint_slack = -residual
.iter()
.cloned()
.fold(f64::NEG_INFINITY, f64::max);
Ok(RobustLPResult {
x,
fun: robust_fun,
nominal_fun,
constraint_slack,
n_iter: config.max_iter,
converged,
message: if converged {
"Ellipsoidal robust LP converged".to_string()
} else {
"Ellipsoidal robust LP reached maximum iterations".to_string()
},
})
}
pub fn robust_objective(
c: &ArrayView1<f64>,
x: &ArrayView1<f64>,
model: &ObjectiveUncertaintyModel,
) -> OptimizeResult<f64> {
let n = c.len();
if x.len() != n {
return Err(OptimizeError::ValueError(format!(
"c has length {} but x has length {}",
n,
x.len()
)));
}
let nominal: f64 = c.iter().zip(x.iter()).map(|(&ci, &xi)| ci * xi).sum();
let penalty = match model {
ObjectiveUncertaintyModel::Box { delta } => {
if delta.len() != n {
return Err(OptimizeError::ValueError(format!(
"delta has length {} but n={}",
delta.len(),
n
)));
}
delta
.iter()
.zip(x.iter())
.map(|(&di, &xi)| di * xi.abs())
.sum::<f64>()
}
ObjectiveUncertaintyModel::Ellipsoidal {
covariance,
radius,
} => {
if covariance.shape() != [n, n] {
return Err(OptimizeError::ValueError(format!(
"covariance shape {:?} != [{n},{n}]",
covariance.shape()
)));
}
let sigma_x: f64 = (0..n)
.map(|i| {
let row: f64 = (0..n).map(|j| covariance[[i, j]] * x[j]).sum();
row * x[i]
})
.sum();
radius * sigma_x.sqrt()
}
ObjectiveUncertaintyModel::Budget { delta, budget } => {
if delta.len() != n {
return Err(OptimizeError::ValueError(format!(
"delta has length {} but n={}",
delta.len(),
n
)));
}
let mut perturbations: Vec<f64> =
delta.iter().zip(x.iter()).map(|(&di, &xi)| di * xi.abs()).collect();
perturbations.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
let gamma = (*budget as usize).min(n);
let floor_gamma = budget.floor() as usize;
let frac = budget - budget.floor();
let int_sum: f64 = perturbations.iter().take(floor_gamma).sum();
let frac_sum = if floor_gamma < perturbations.len() && gamma <= perturbations.len() {
frac * perturbations[floor_gamma]
} else {
0.0
};
int_sum + frac_sum
}
};
Ok(nominal + penalty)
}
#[derive(Debug, Clone)]
pub enum ObjectiveUncertaintyModel {
Box {
delta: Array1<f64>,
},
Ellipsoidal {
covariance: Array2<f64>,
radius: f64,
},
Budget {
delta: Array1<f64>,
budget: f64,
},
}
fn cholesky_lower_triangular(a: &ArrayView2<f64>) -> OptimizeResult<Array2<f64>> {
let n = a.shape()[0];
let mut l = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..=i {
let mut s: f64 = 0.0;
for p in 0..j {
s += l[[i, p]] * l[[j, p]];
}
if i == j {
let diag = a[[i, i]] - s;
l[[i, j]] = if diag < 0.0 { diag.abs().sqrt().max(1e-12) } else { diag.sqrt() };
} else {
let ljj = l[[j, j]];
l[[i, j]] = if ljj.abs() < 1e-14 { 0.0 } else { (a[[i, j]] - s) / ljj };
}
}
}
Ok(l)
}
fn mat_vec_mul_lower(l: &Array2<f64>, x: &ArrayView1<f64>) -> Array1<f64> {
let n = x.len();
let mut y = Array1::<f64>::zeros(n);
for i in 0..n {
for j in 0..=i {
y[i] += l[[i, j]] * x[j];
}
}
y
}
fn mat_vec_mul_lower_transpose(l: &Array2<f64>, x: &ArrayView1<f64>) -> Array1<f64> {
let n = x.len();
let mut y = Array1::<f64>::zeros(n);
for j in 0..n {
for i in j..n {
y[j] += l[[i, j]] * x[i];
}
}
y
}
fn l2_norm_vec(v: &Array1<f64>) -> f64 {
v.iter().map(|vi| vi * vi).sum::<f64>().sqrt()
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::{array, Array2};
fn simple_lp() -> OptimizeResult<RobustLP> {
let c = array![-1.0, -1.0];
let a = array![[1.0, 1.0]];
let b = array![1.0];
RobustLP::new(c, a, b)
}
#[test]
fn test_robust_lp_new_dimension_mismatch() {
let c = array![1.0, 2.0];
let a = array![[1.0, 2.0, 3.0]]; let b = array![1.0];
assert!(RobustLP::new(c, a, b).is_err());
}
#[test]
fn test_box_robust_lp_no_uncertainty() {
let problem = simple_lp().expect("failed to create problem").with_bounds(
array![0.0, 0.0],
array![1.0, 1.0],
).expect("unexpected None or Err");
let x0 = array![0.0, 0.0];
let config = RobustLPConfig {
max_iter: 3_000,
step_size: 1e-2,
constraint_penalty: 50.0,
..Default::default()
};
let result = box_robust_lp(&problem, &x0.view(), &config).expect("failed to create result");
assert!(result.nominal_fun < 0.0, "nominal fun should be negative (minimizing -x)");
}
#[test]
fn test_box_robust_lp_with_uncertainty() {
let problem = simple_lp()
.expect("unexpected None or Err")
.with_c_uncertainty(array![0.1, 0.1])
.expect("unexpected None or Err")
.with_bounds(array![0.0, 0.0], array![1.0, 1.0])
.expect("unexpected None or Err");
let x0 = array![0.1, 0.1];
let config = RobustLPConfig {
max_iter: 2_000,
step_size: 5e-3,
constraint_penalty: 50.0,
..Default::default()
};
let result = box_robust_lp(&problem, &x0.view(), &config).expect("failed to create result");
assert!(
result.fun >= result.nominal_fun - 1e-9,
"robust obj {} should be ≥ nominal obj {}",
result.fun,
result.nominal_fun
);
}
#[test]
fn test_ellipsoidal_robust_lp() {
let problem = simple_lp()
.expect("unexpected None or Err")
.with_bounds(array![0.0, 0.0], array![1.0, 1.0])
.expect("unexpected None or Err");
let cov = Array2::<f64>::eye(2) * 0.01;
let x0 = array![0.1, 0.1];
let config = RobustLPConfig {
max_iter: 2_000,
step_size: 5e-3,
constraint_penalty: 50.0,
..Default::default()
};
let result =
ellipsoidal_robust_lp(&problem, &cov.view(), 0.5, &x0.view(), &config).expect("unexpected None or Err");
assert!(result.x.len() == 2);
assert!(result.fun.is_finite());
}
#[test]
fn test_robust_objective_box() {
let c = array![1.0, 2.0, -1.0];
let x = array![1.0, -1.0, 2.0];
let model = ObjectiveUncertaintyModel::Box {
delta: array![0.1, 0.1, 0.1],
};
let obj = robust_objective(&c.view(), &x.view(), &model).expect("failed to create obj");
assert!((obj - (-3.0 + 0.4)).abs() < 1e-9, "box robust obj={obj}");
}
#[test]
fn test_robust_objective_ellipsoidal() {
let c = array![1.0, 0.0];
let x = array![1.0, 0.0];
let cov = Array2::<f64>::eye(2);
let model = ObjectiveUncertaintyModel::Ellipsoidal {
covariance: cov,
radius: 1.0,
};
let obj = robust_objective(&c.view(), &x.view(), &model).expect("failed to create obj");
assert!((obj - 2.0).abs() < 1e-9, "ellipsoidal robust obj={obj}");
}
#[test]
fn test_robust_objective_budget() {
let c = array![1.0, 1.0, 1.0];
let x = array![1.0, 1.0, 1.0]; let model = ObjectiveUncertaintyModel::Budget {
delta: array![0.5, 0.3, 0.2],
budget: 2.0, };
let obj = robust_objective(&c.view(), &x.view(), &model).expect("failed to create obj");
assert!((obj - 3.8).abs() < 1e-9, "budget robust obj={obj}");
}
#[test]
fn test_robust_objective_dimension_error() {
let c = array![1.0, 2.0];
let x = array![1.0]; let model = ObjectiveUncertaintyModel::Box {
delta: array![0.1, 0.1],
};
assert!(robust_objective(&c.view(), &x.view(), &model).is_err());
}
}