pub mod minimax;
pub mod robust_lp;
pub mod worst_case;
use crate::error::{OptimizeError, OptimizeResult};
use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
#[derive(Debug, Clone)]
pub struct RobustConfig {
pub uncertainty_type: UncertaintyType,
pub robustness_parameter: f64,
pub use_cvar: bool,
pub cvar_alpha: f64,
pub n_inner_samples: usize,
}
impl Default for RobustConfig {
fn default() -> Self {
Self {
uncertainty_type: UncertaintyType::Box,
robustness_parameter: 0.1,
use_cvar: false,
cvar_alpha: 0.95,
n_inner_samples: 200,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum UncertaintyType {
Box,
Ellipsoidal,
Polyhedral,
Budgeted,
}
#[derive(Debug, Clone)]
pub enum UncertaintySet {
Box {
delta: Array1<f64>,
},
Ellipsoidal {
center: Array1<f64>,
covariance: Array2<f64>,
radius: f64,
},
Polyhedral {
a_matrix: Array2<f64>,
b_vector: Array1<f64>,
},
BudgetedUncertainty {
delta: Array1<f64>,
budget: f64,
},
}
#[derive(Debug, Clone)]
pub struct RobustProblem {
pub uncertainty_set: UncertaintySet,
pub n_inner_samples: usize,
pub inner_tol: f64,
}
impl Default for RobustProblem {
fn default() -> Self {
Self {
uncertainty_set: UncertaintySet::Box {
delta: Array1::zeros(0),
},
n_inner_samples: 200,
inner_tol: 1e-8,
}
}
}
pub fn box_robust<F>(f: &F, x: &ArrayView1<f64>, delta: &ArrayView1<f64>) -> OptimizeResult<f64>
where
F: Fn(&ArrayView1<f64>) -> f64,
{
let n = x.len();
if delta.len() != n {
return Err(OptimizeError::ValueError(format!(
"delta length {} does not match x length {}",
delta.len(),
n
)));
}
for i in 0..n {
if delta[i] < 0.0 {
return Err(OptimizeError::ValueError(format!(
"delta[{}] = {} is negative",
i, delta[i]
)));
}
}
let mut worst = f64::NEG_INFINITY;
let mut perturbed = x.to_owned();
for i in 0..n {
perturbed[i] = x[i] + delta[i];
let val_pos = f(&perturbed.view());
if val_pos > worst {
worst = val_pos;
}
perturbed[i] = x[i] - delta[i];
let val_neg = f(&perturbed.view());
if val_neg > worst {
worst = val_neg;
}
perturbed[i] = x[i];
}
let n_grid: usize = 5.min(100 / n.max(1));
let steps = if n_grid < 2 { 1.0 } else { n_grid as f64 - 1.0 };
let mut buf = x.to_owned();
let total = n_grid.pow(n.min(8) as u32); for sample_idx in 0..total.min(200) {
let mut idx = sample_idx;
for i in 0..n {
let dim_idx = idx % n_grid;
idx /= n_grid;
let t = if n_grid <= 1 {
0.0
} else {
(dim_idx as f64 / steps) * 2.0 - 1.0 };
buf[i] = x[i] + t * delta[i];
}
let val = f(&buf.view());
if val > worst {
worst = val;
}
}
Ok(worst)
}
pub fn ellipsoidal_robust<F>(
f: &F,
x: &ArrayView1<f64>,
center: &ArrayView1<f64>,
covariance: &Array2<f64>,
radius: f64,
) -> OptimizeResult<f64>
where
F: Fn(&ArrayView1<f64>) -> f64,
{
let n = x.len();
if center.len() != n {
return Err(OptimizeError::ValueError(format!(
"center length {} != x length {}",
center.len(),
n
)));
}
if covariance.shape() != [n, n] {
return Err(OptimizeError::ValueError(format!(
"covariance shape {:?} != [{n}, {n}]",
covariance.shape()
)));
}
if radius < 0.0 {
return Err(OptimizeError::ValueError(
"radius must be non-negative".to_string(),
));
}
let x_shifted: Array1<f64> = x
.iter()
.zip(center.iter())
.map(|(&xi, &ci)| xi + ci)
.collect();
let chol = cholesky_lower(covariance)?;
let h = 1e-5; let step_size = 0.1 * radius;
let max_iter = 200;
let mut best_val = f(&x_shifted.view());
let start_dirs: Vec<Array1<f64>> = {
let mut dirs = Vec::with_capacity(2 * n + 1);
dirs.push(Array1::zeros(n));
for i in 0..n {
let mut v = Array1::zeros(n);
v[i] = radius;
dirs.push(v.clone());
v[i] = -radius;
dirs.push(v);
}
dirs
};
for init_eta in start_dirs {
let mut eta = project_onto_ball(&init_eta.view(), radius);
for _ in 0..max_iter {
let xi = mat_vec_lower(&chol, &eta.view());
let x_pert: Array1<f64> = x_shifted
.iter()
.zip(xi.iter())
.map(|(&a, &b)| a + b)
.collect();
let mut grad_eta = Array1::<f64>::zeros(n);
for j in 0..n {
let mut eta_fwd = eta.clone();
eta_fwd[j] += h;
let xi_fwd = mat_vec_lower(&chol, &eta_fwd.view());
let x_fwd: Array1<f64> = x_shifted
.iter()
.zip(xi_fwd.iter())
.map(|(&a, &b)| a + b)
.collect();
grad_eta[j] = (f(&x_fwd.view()) - f(&x_pert.view())) / h;
}
let eta_new: Array1<f64> = eta
.iter()
.zip(grad_eta.iter())
.map(|(&e, &g)| e + step_size * g)
.collect();
eta = project_onto_ball(&eta_new.view(), radius);
let xi_new = mat_vec_lower(&chol, &eta.view());
let x_new: Array1<f64> = x_shifted
.iter()
.zip(xi_new.iter())
.map(|(&a, &b)| a + b)
.collect();
let val = f(&x_new.view());
if val > best_val {
best_val = val;
}
}
}
Ok(best_val)
}
pub fn distributionally_robust_cvar<F>(
f: &F,
x: &ArrayView1<f64>,
scenarios: &[Array1<f64>],
alpha: f64,
) -> OptimizeResult<f64>
where
F: Fn(&ArrayView1<f64>, &ArrayView1<f64>) -> f64,
{
if scenarios.is_empty() {
return Err(OptimizeError::ValueError(
"scenarios must be non-empty".to_string(),
));
}
if !(0.0 < alpha && alpha < 1.0) {
return Err(OptimizeError::ValueError(format!(
"alpha must be in (0,1), got {}",
alpha
)));
}
let mut losses: Vec<f64> = scenarios
.iter()
.map(|s| f(x, &s.view()))
.collect();
losses.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let n = losses.len();
let scale = 1.0 / ((1.0 - alpha) * n as f64);
let best_cvar = losses
.iter()
.map(|&t| {
let excess: f64 = losses.iter().map(|&l| (l - t).max(0.0)).sum();
t + scale * excess
})
.fold(f64::INFINITY, f64::min);
Ok(best_cvar)
}
#[derive(Debug, Clone)]
pub struct SaaResult {
pub x: Array1<f64>,
pub fun: f64,
pub n_iter: usize,
pub success: bool,
pub message: String,
}
#[derive(Debug, Clone)]
pub struct SaaConfig {
pub n_samples: usize,
pub max_outer_iter: usize,
pub tol: f64,
pub inner_max_iter: usize,
pub step_size: f64,
}
impl Default for SaaConfig {
fn default() -> Self {
Self {
n_samples: 500,
max_outer_iter: 10,
tol: 1e-6,
inner_max_iter: 500,
step_size: 1e-3,
}
}
}
pub fn saa_solve<F, G>(
f: &F,
sample_generator: &mut G,
x0: &ArrayView1<f64>,
config: &SaaConfig,
) -> OptimizeResult<SaaResult>
where
F: Fn(&ArrayView1<f64>, &ArrayView1<f64>) -> f64,
G: FnMut() -> Array1<f64>,
{
let n = x0.len();
if n == 0 {
return Err(OptimizeError::ValueError(
"x0 must be non-empty".to_string(),
));
}
let mut x = x0.to_owned();
let h = 1e-5; let mut converged = false;
let mut outer_iter = 0;
for outer in 0..config.max_outer_iter {
outer_iter = outer + 1;
let samples: Vec<Array1<f64>> =
(0..config.n_samples).map(|_| sample_generator()).collect();
for _ in 0..config.inner_max_iter {
let f_x: f64 = samples.iter().map(|s| f(&x.view(), &s.view())).sum::<f64>()
/ config.n_samples as f64;
let mut grad = Array1::<f64>::zeros(n);
for j in 0..n {
let mut x_fwd = x.clone();
x_fwd[j] += h;
let f_fwd: f64 = samples
.iter()
.map(|s| f(&x_fwd.view(), &s.view()))
.sum::<f64>()
/ config.n_samples as f64;
grad[j] = (f_fwd - f_x) / h;
}
let grad_norm = grad.iter().map(|g| g * g).sum::<f64>().sqrt();
if grad_norm < config.tol {
converged = true;
break;
}
for j in 0..n {
x[j] -= config.step_size * grad[j];
}
}
if converged {
break;
}
}
let final_samples: Vec<Array1<f64>> = (0..config.n_samples.min(100))
.map(|_| sample_generator())
.collect();
let fun = if final_samples.is_empty() {
0.0
} else {
final_samples
.iter()
.map(|s| f(&x.view(), &s.view()))
.sum::<f64>()
/ final_samples.len() as f64
};
Ok(SaaResult {
x,
fun,
n_iter: outer_iter,
success: converged,
message: if converged {
"SAA converged".to_string()
} else {
"SAA reached maximum outer iterations".to_string()
},
})
}
fn project_onto_ball(v: &ArrayView1<f64>, radius: f64) -> Array1<f64> {
let norm = v.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm <= radius || norm == 0.0 {
v.to_owned()
} else {
v.mapv(|x| x * radius / norm)
}
}
fn mat_vec_lower(l: &Array2<f64>, x: &ArrayView1<f64>) -> Array1<f64> {
let n = x.len();
let mut y = Array1::<f64>::zeros(n);
for i in 0..n {
for j in 0..=i {
y[i] += l[[i, j]] * x[j];
}
}
y
}
fn cholesky_lower(a: &Array2<f64>) -> OptimizeResult<Array2<f64>> {
let n = a.shape()[0];
let mut l = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..=i {
let mut sum = 0.0;
for k in 0..j {
sum += l[[i, k]] * l[[j, k]];
}
if i == j {
let diag = a[[i, i]] - sum;
if diag < 0.0 {
l[[i, j]] = diag.abs().sqrt().max(1e-12);
} else {
l[[i, j]] = diag.sqrt();
}
} else {
let ljj = l[[j, j]];
if ljj.abs() < 1e-14 {
l[[i, j]] = 0.0;
} else {
l[[i, j]] = (a[[i, j]] - sum) / ljj;
}
}
}
}
Ok(l)
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::{array, Array2};
fn quadratic(x: &ArrayView1<f64>) -> f64 {
x.iter().map(|xi| xi * xi).sum()
}
#[test]
fn test_box_robust_basic() {
let x = array![0.0, 0.0];
let delta = array![1.0, 1.0];
let val = box_robust(&quadratic, &x.view(), &delta.view()).expect("failed to create val");
assert!((val - 2.0).abs() < 1e-9, "expected 2.0, got {val}");
}
#[test]
fn test_box_robust_shifted() {
let x = array![1.0];
let delta = array![0.5];
let val = box_robust(
&|v: &ArrayView1<f64>| v[0] * v[0],
&x.view(),
&delta.view(),
)
.expect("unexpected None or Err");
assert!((val - 2.25).abs() < 1e-9, "expected 2.25, got {val}");
}
#[test]
fn test_box_robust_bad_delta() {
let x = array![1.0];
let delta = array![-0.1];
assert!(box_robust(&quadratic, &x.view(), &delta.view()).is_err());
}
#[test]
fn test_ellipsoidal_robust_identity() {
let x = array![0.0, 0.0];
let center = array![0.0, 0.0];
let cov = Array2::<f64>::eye(2);
let val =
ellipsoidal_robust(&quadratic, &x.view(), ¢er.view(), &cov, 1.0).expect("unexpected None or Err");
assert!(
(val - 1.0).abs() < 0.05,
"expected ~1.0, got {val}"
);
}
#[test]
fn test_cvar_basic() {
let x = array![0.0];
let scenarios: Vec<Array1<f64>> = (0..5)
.map(|i| array![i as f64])
.collect();
let f = |_x: &ArrayView1<f64>, s: &ArrayView1<f64>| s[0];
let cvar = distributionally_robust_cvar(&f, &x.view(), &scenarios, 0.8).expect("failed to create cvar");
assert!((cvar - 4.0).abs() < 1e-9, "expected 4.0, got {cvar}");
}
#[test]
fn test_cvar_alpha_error() {
let x = array![0.0];
let scenarios = vec![array![1.0]];
let f = |_: &ArrayView1<f64>, s: &ArrayView1<f64>| s[0];
assert!(distributionally_robust_cvar(&f, &x.view(), &scenarios, 0.0).is_err());
assert!(distributionally_robust_cvar(&f, &x.view(), &scenarios, 1.0).is_err());
}
#[test]
fn test_saa_quadratic() {
let f = |x: &ArrayView1<f64>, xi: &ArrayView1<f64>| {
let diff = x[0] - xi[0];
diff * diff
};
let mut rng_state = 42u64;
let mut sample_gen = || {
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1442695040888963407);
let t = ((rng_state >> 33) as f64) / (u32::MAX as f64) * 2.0;
array![t]
};
let x0 = array![0.0];
let config = SaaConfig {
n_samples: 200,
max_outer_iter: 5,
tol: 1e-4,
inner_max_iter: 200,
step_size: 5e-3,
};
let result = saa_solve(&f, &mut sample_gen, &x0.view(), &config).expect("failed to create result");
assert!(
(result.x[0] - 1.0).abs() < 0.15,
"expected x* ≈ 1.0, got {}",
result.x[0]
);
}
}