use crate::error::{OptimizeError, OptimizeResult};
use crate::conic::sdp::{SDPProblem, SDPSolver, SDPSolverConfig};
use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
use scirs2_linalg::solve;
#[derive(Debug, Clone)]
pub struct SOCConstraint {
pub a: Array2<f64>,
pub b: Array1<f64>,
pub c: Array1<f64>,
pub d: f64,
}
impl SOCConstraint {
pub fn new(
a: Array2<f64>,
b: Array1<f64>,
c: Array1<f64>,
d: f64,
) -> OptimizeResult<Self> {
if a.nrows() != b.len() {
return Err(OptimizeError::ValueError(format!(
"SOCConstraint: A has {} rows but b has {} elements",
a.nrows(),
b.len()
)));
}
if a.ncols() != c.len() {
return Err(OptimizeError::ValueError(format!(
"SOCConstraint: A has {} cols but c has {} elements",
a.ncols(),
c.len()
)));
}
Ok(Self { a, b, c, d })
}
}
#[derive(Debug, Clone)]
pub struct SOCPProblem {
pub obj: Array1<f64>,
pub constraints: Vec<SOCConstraint>,
pub eq_a: Option<Array2<f64>>,
pub eq_b: Option<Array1<f64>>,
}
impl SOCPProblem {
pub fn new(obj: Array1<f64>, constraints: Vec<SOCConstraint>) -> OptimizeResult<Self> {
let n = obj.len();
for (k, con) in constraints.iter().enumerate() {
if con.a.ncols() != n {
return Err(OptimizeError::ValueError(format!(
"Constraint {}: A has {} cols but obj has {} elements",
k,
con.a.ncols(),
n
)));
}
}
Ok(Self {
obj,
constraints,
eq_a: None,
eq_b: None,
})
}
pub fn with_equality(mut self, f: Array2<f64>, g: Array1<f64>) -> OptimizeResult<Self> {
let n = self.obj.len();
if f.ncols() != n {
return Err(OptimizeError::ValueError(format!(
"Equality matrix F has {} cols but problem dimension is {}",
f.ncols(),
n
)));
}
if f.nrows() != g.len() {
return Err(OptimizeError::ValueError(format!(
"F has {} rows but g has {} elements",
f.nrows(),
g.len()
)));
}
self.eq_a = Some(f);
self.eq_b = Some(g);
Ok(self)
}
pub fn n(&self) -> usize {
self.obj.len()
}
}
#[derive(Debug, Clone)]
pub struct SOCPResult {
pub x: Array1<f64>,
pub obj_val: f64,
pub residuals: Vec<f64>,
pub converged: bool,
pub message: String,
pub n_iter: usize,
}
pub fn socp_to_sdp(problem: &SOCPProblem) -> OptimizeResult<SDPProblem> {
let n = problem.n();
let block_sizes: Vec<usize> = problem
.constraints
.iter()
.map(|c| c.a.nrows() + 1)
.collect();
let total_dim: usize = block_sizes.iter().sum();
let k = problem.constraints.len();
let sdp_n = total_dim + n + 1;
let mut c_sdp = Array2::<f64>::zeros((sdp_n, sdp_n));
for i in 0..n {
c_sdp[[total_dim + i, total_dim + n]] = problem.obj[i] * 0.5;
c_sdp[[total_dim + n, total_dim + i]] = problem.obj[i] * 0.5;
}
let mut a_mats: Vec<Array2<f64>> = Vec::new();
let mut b_vals: Vec<f64> = Vec::new();
{
let mut ak = Array2::<f64>::zeros((sdp_n, sdp_n));
ak[[total_dim + n, total_dim + n]] = 1.0;
a_mats.push(ak);
b_vals.push(1.0);
}
let mut off = 0usize;
for (ki, con) in problem.constraints.iter().enumerate() {
let mk = con.a.nrows();
let dk = mk + 1;
{
let mut ak = Array2::<f64>::zeros((sdp_n, sdp_n));
ak[[off, off]] = 1.0;
for i in 0..n {
ak[[total_dim + i, total_dim + n]] = -con.c[i] * 0.5;
ak[[total_dim + n, total_dim + i]] = -con.c[i] * 0.5;
}
a_mats.push(ak);
b_vals.push(con.d);
}
for r in 0..mk {
let mut ak = Array2::<f64>::zeros((sdp_n, sdp_n));
ak[[off + r + 1, off + r + 1]] = 1.0;
for i in 0..n {
ak[[total_dim + i, total_dim + n]] = -con.c[i] * 0.5;
ak[[total_dim + n, total_dim + i]] = -con.c[i] * 0.5;
}
a_mats.push(ak);
b_vals.push(con.d);
}
for r in 0..mk {
let mut ak = Array2::<f64>::zeros((sdp_n, sdp_n));
ak[[off, off + r + 1]] = 0.5;
ak[[off + r + 1, off]] = 0.5;
for i in 0..n {
let a_ri = con.a[[r, i]];
ak[[total_dim + i, total_dim + n]] -= a_ri * 0.5;
ak[[total_dim + n, total_dim + i]] -= a_ri * 0.5;
}
a_mats.push(ak);
b_vals.push(con.b[r]);
}
let _ = ki; off += dk;
}
if let (Some(feq), Some(geq)) = (&problem.eq_a, &problem.eq_b) {
for r in 0..feq.nrows() {
let mut ak = Array2::<f64>::zeros((sdp_n, sdp_n));
for i in 0..n {
ak[[total_dim + i, total_dim + n]] = feq[[r, i]] * 0.5;
ak[[total_dim + n, total_dim + i]] = feq[[r, i]] * 0.5;
}
a_mats.push(ak);
b_vals.push(geq[r]);
}
}
let b_sdp = Array1::from_vec(b_vals);
SDPProblem::new(c_sdp, a_mats, b_sdp)
}
#[derive(Debug, Clone)]
pub struct SOCPConfig {
pub max_iter: usize,
pub tol: f64,
pub step_factor: f64,
}
impl Default for SOCPConfig {
fn default() -> Self {
Self {
max_iter: 300,
tol: 1e-7,
step_factor: 0.95,
}
}
}
pub fn socp_interior_point(
problem: &SOCPProblem,
config: Option<SOCPConfig>,
) -> OptimizeResult<SOCPResult> {
let cfg = config.unwrap_or_default();
let n = problem.n();
let k = problem.constraints.len();
if k == 0 {
let obj_val = 0.0;
return Ok(SOCPResult {
x: Array1::<f64>::zeros(n),
obj_val,
residuals: vec![],
converged: true,
message: "No constraints — trivial solution x=0".into(),
n_iter: 0,
});
}
let mut x = Array1::<f64>::zeros(n);
let mut tau: Vec<f64> = vec![1.0; k]; let mut y: Array1<f64> = Array1::<f64>::zeros(n);
let mut n_iter = 0usize;
let mut converged = false;
let mut message = "maximum iterations reached".to_string();
for iter in 0..cfg.max_iter {
n_iter = iter + 1;
let mut u_vecs: Vec<Array1<f64>> = Vec::with_capacity(k);
let mut t_vals: Vec<f64> = Vec::with_capacity(k);
for ki in 0..k {
let con = &problem.constraints[ki];
let u = con.a.dot(&x) + &con.b;
let t = con.c.dot(&x) + con.d + tau[ki];
u_vecs.push(u);
t_vals.push(t);
}
let mut comp = 0.0_f64;
for ki in 0..k {
let u_norm = u_vecs[ki].iter().map(|v| v * v).sum::<f64>().sqrt();
comp += (t_vals[ki] - u_norm).abs();
}
let mut rd = problem.obj.clone();
for ki in 0..k {
let con = &problem.constraints[ki];
let t = t_vals[ki].max(1e-15);
let u_norm = u_vecs[ki].iter().map(|v| v * v).sum::<f64>().sqrt().max(1e-15);
let lambda = u_norm / t; for i in 0..n {
let mut grad_i = con.c[i] * lambda;
for r in 0..con.a.nrows() {
grad_i -= con.a[[r, i]] * u_vecs[ki][r] / (u_norm * t);
}
rd[i] -= grad_i;
}
}
let rd_norm = rd.iter().map(|v| v * v).sum::<f64>().sqrt();
if comp < cfg.tol && rd_norm < cfg.tol {
converged = true;
message = format!(
"Converged in {} iterations (comp={:.2e}, rd={:.2e})",
n_iter, comp, rd_norm
);
break;
}
let mut h = Array2::<f64>::zeros((n, n));
let mut g = Array1::<f64>::zeros(n);
for i in 0..n {
g[i] = problem.obj[i];
}
for ki in 0..k {
let con = &problem.constraints[ki];
let t = t_vals[ki].max(1e-12);
let u = &u_vecs[ki];
let u_norm2 = u.iter().map(|v| v * v).sum::<f64>();
let u_norm = u_norm2.sqrt().max(1e-15);
let rho = (t * t - u_norm2).max(1e-15);
for i in 0..n {
let mut grad_i = -2.0 * con.c[i] / t;
for r in 0..con.a.nrows() {
grad_i -= 2.0 * con.a[[r, i]] * u[r] / rho;
}
g[i] += grad_i;
}
let inv_t2 = 2.0 / (t * t);
let inv_rho2 = 4.0 / (rho * rho);
let inv_rho = 2.0 / rho;
let mut at_u = Array1::<f64>::zeros(n);
for i in 0..n {
for r in 0..con.a.nrows() {
at_u[i] += con.a[[r, i]] * u[r];
}
}
for i in 0..n {
for j in 0..n {
h[[i, j]] += inv_t2 * con.c[i] * con.c[j]
+ inv_rho2 * at_u[i] * at_u[j];
for r in 0..con.a.nrows() {
h[[i, j]] += inv_rho * con.a[[r, i]] * con.a[[r, j]];
}
}
}
}
let h_norm = h.iter().map(|v| v * v).sum::<f64>().sqrt().max(1.0);
let eps = 1e-8 * h_norm;
for i in 0..n {
h[[i, i]] += eps;
}
let neg_g = g.map(|v| -v);
let dx = solve(&h.view(), &neg_g.view(), None)
.map_err(OptimizeError::from)?;
let mut alpha = 1.0_f64;
let armijo_c = 1e-4;
let f0: f64 = problem.obj.iter().zip(x.iter()).map(|(c, xi)| c * xi).sum();
let slope: f64 = problem.obj.iter().zip(dx.iter()).map(|(c, d)| c * d).sum();
for _ in 0..40 {
let x_trial = &x + &(&dx * alpha);
let f_trial: f64 = problem.obj.iter().zip(x_trial.iter()).map(|(c, xi)| c * xi).sum();
let feasible = (0..k).all(|ki| {
let con = &problem.constraints[ki];
let t_new = con.c.dot(&x_trial) + con.d + tau[ki];
let u_new = con.a.dot(&x_trial) + &con.b;
let u_norm = u_new.iter().map(|v| v * v).sum::<f64>().sqrt();
t_new > u_norm + 1e-12
});
if feasible && f_trial <= f0 + armijo_c * alpha * slope {
break;
}
alpha *= 0.5;
if alpha < 1e-15 {
alpha = 1e-15;
break;
}
}
for i in 0..n {
x[i] += alpha * dx[i];
}
for ki in 0..k {
let con = &problem.constraints[ki];
let u = con.a.dot(&x) + &con.b;
let t_target = con.c.dot(&x) + con.d;
let u_norm = u.iter().map(|v| v * v).sum::<f64>().sqrt();
let margin = (t_target - u_norm).max(0.0);
tau[ki] = margin * 0.5 + 1e-8;
}
let _ = y; }
let obj_val = problem.obj.iter().zip(x.iter()).map(|(c, xi)| c * xi).sum();
let residuals: Vec<f64> = (0..k)
.map(|ki| {
let con = &problem.constraints[ki];
let u = con.a.dot(&x) + &con.b;
let t = con.c.dot(&x) + con.d;
let u_norm = u.iter().map(|v| v * v).sum::<f64>().sqrt();
u_norm - t
})
.collect();
Ok(SOCPResult {
x,
obj_val,
residuals,
converged,
message,
n_iter,
})
}
#[derive(Debug, Clone)]
pub struct RobustLsResult {
pub x: Array1<f64>,
pub worst_case_residual: f64,
pub converged: bool,
}
pub fn robust_ls_socp(
a: &ArrayView2<f64>,
b: &ArrayView1<f64>,
rho: f64,
) -> OptimizeResult<RobustLsResult> {
let (m, n) = (a.nrows(), a.ncols());
if b.len() != m {
return Err(OptimizeError::ValueError(format!(
"A is {}×{} but b has {} elements",
m, n, b.len()
)));
}
if rho < 0.0 {
return Err(OptimizeError::ValueError(format!(
"rho must be non-negative, got {}",
rho
)));
}
let nw = n + 3;
let t_idx = n;
let s1_idx = n + 1;
let s2_idx = n + 2;
let mut obj = Array1::<f64>::zeros(nw);
obj[t_idx] = 1.0;
let mut a1 = Array2::<f64>::zeros((m, nw));
let mut b1 = Array1::<f64>::zeros(m);
for i in 0..m {
for j in 0..n {
a1[[i, j]] = a[[i, j]];
}
b1[i] = -b[i];
}
let mut c1 = Array1::<f64>::zeros(nw);
c1[s1_idx] = 1.0;
let con1 = SOCConstraint::new(a1, b1, c1, 0.0)?;
let mut a2 = Array2::<f64>::zeros((n, nw));
for i in 0..n {
a2[[i, i]] = rho;
}
let b2 = Array1::<f64>::zeros(n);
let mut c2 = Array1::<f64>::zeros(nw);
c2[s2_idx] = 1.0;
let con2 = SOCConstraint::new(a2, b2, c2, 0.0)?;
let a3 = Array2::<f64>::zeros((1, nw));
let b3 = Array1::from_vec(vec![0.0]);
let mut c3 = Array1::<f64>::zeros(nw);
c3[t_idx] = 1.0;
c3[s1_idx] = -1.0;
c3[s2_idx] = -1.0;
let con3 = SOCConstraint::new(a3, b3, c3, 0.0)?;
let problem = SOCPProblem::new(obj, vec![con1, con2, con3])?;
let result = socp_interior_point(&problem, None)?;
let x = result.x.slice(scirs2_core::ndarray::s![..n]).to_owned();
let worst_case_residual = result.x[t_idx];
Ok(RobustLsResult {
x,
worst_case_residual,
converged: result.converged,
})
}
#[derive(Debug, Clone)]
pub struct PortfolioSocpResult {
pub weights: Array1<f64>,
pub expected_return: f64,
pub std_dev: f64,
pub converged: bool,
}
pub fn portfolio_optimization_socp(
mu: &ArrayView1<f64>,
sigma: &ArrayView2<f64>,
gamma: f64,
) -> OptimizeResult<PortfolioSocpResult> {
let n = mu.len();
if sigma.nrows() != n || sigma.ncols() != n {
return Err(OptimizeError::ValueError(format!(
"sigma must be {}×{} but is {}×{}",
n, n, sigma.nrows(), sigma.ncols()
)));
}
if gamma < 0.0 {
return Err(OptimizeError::ValueError(format!(
"gamma must be non-negative, got {}",
gamma
)));
}
let nw = n + 1;
let t_idx = n;
let sigma_arr = sigma.to_owned();
let l = match scirs2_linalg::cholesky(&sigma_arr.view(), None) {
Ok(factor) => factor,
Err(_) => {
let mut reg = sigma_arr.clone();
for i in 0..n {
reg[[i, i]] += 1e-6;
}
scirs2_linalg::cholesky(®.view(), None)
.map_err(|e| OptimizeError::ComputationError(format!("Cholesky: {}", e)))?
}
};
let mut obj = Array1::<f64>::zeros(nw);
for i in 0..n {
obj[i] = -mu[i];
}
obj[t_idx] = gamma;
let mut a_soc = Array2::<f64>::zeros((n, nw));
for i in 0..n {
for j in 0..n {
a_soc[[i, j]] = l[[j, i]]; }
}
let b_soc = Array1::<f64>::zeros(n);
let mut c_soc = Array1::<f64>::zeros(nw);
c_soc[t_idx] = 1.0;
let con_var = SOCConstraint::new(a_soc, b_soc, c_soc, 0.0)?;
let mut weight_constraints = Vec::new();
for i in 0..n {
let a_i = Array2::<f64>::zeros((1, nw));
let b_i = Array1::from_vec(vec![0.0]);
let mut c_i = Array1::<f64>::zeros(nw);
c_i[i] = 1.0;
weight_constraints.push(SOCConstraint::new(a_i, b_i, c_i, 0.0)?);
}
let mut all_cons = vec![con_var];
all_cons.extend(weight_constraints);
let mut f_eq = Array2::<f64>::zeros((1, nw));
for i in 0..n {
f_eq[[0, i]] = 1.0;
}
let g_eq = Array1::from_vec(vec![1.0]);
let problem = SOCPProblem::new(obj, all_cons)?
.with_equality(f_eq, g_eq)?;
let result = socp_interior_point(&problem, None)?;
let weights = result.x.slice(scirs2_core::ndarray::s![..n]).to_owned();
let expected_return: f64 = mu.iter().zip(weights.iter()).map(|(m, w)| m * w).sum();
let mut var = 0.0_f64;
for i in 0..n {
for j in 0..n {
var += weights[i] * sigma[[i, j]] * weights[j];
}
}
let std_dev = var.sqrt();
Ok(PortfolioSocpResult {
weights,
expected_return,
std_dev,
converged: result.converged,
})
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
#[test]
fn test_socp_problem_dim_check() {
let obj = Array1::from_vec(vec![1.0, 2.0]);
let a = Array2::<f64>::zeros((2, 2));
let b = Array1::from_vec(vec![0.0, 0.0]);
let c = Array1::from_vec(vec![1.0, 0.0]);
let con = SOCConstraint::new(a, b, c, 1.0).expect("valid constraint");
let problem = SOCPProblem::new(obj, vec![con]).expect("valid problem");
assert_eq!(problem.n(), 2);
}
#[test]
fn test_socp_constraint_dim_mismatch() {
let a = Array2::<f64>::zeros((2, 3)); let b = Array1::from_vec(vec![0.0]); let c = Array1::from_vec(vec![1.0, 0.0, 0.0]);
let result = SOCConstraint::new(a, b, c, 0.0);
assert!(result.is_err());
}
#[test]
fn test_socp_trivial_no_constraints() {
let obj = Array1::from_vec(vec![1.0]);
let problem = SOCPProblem::new(obj, vec![]).expect("valid");
let result = socp_interior_point(&problem, None).expect("should succeed");
assert!(result.converged);
}
#[test]
fn test_robust_ls_basic() {
let a = Array2::from_shape_vec((2, 2), vec![1.0, 0.0, 0.0, 1.0]).expect("valid");
let b = Array1::from_vec(vec![1.0, 1.0]);
let result = robust_ls_socp(&a.view(), &b.view(), 0.1).expect("robust_ls should not fail");
assert!(result.x[0].is_finite());
assert!(result.x[1].is_finite());
}
#[test]
fn test_socp_to_sdp_builds() {
let obj = Array1::from_vec(vec![1.0]);
let a = Array2::<f64>::zeros((1, 1));
let b = Array1::from_vec(vec![0.0]);
let c = Array1::from_vec(vec![0.0]);
let con = SOCConstraint::new(a, b, c, 1.0).expect("valid");
let problem = SOCPProblem::new(obj, vec![con]).expect("valid");
let sdp = socp_to_sdp(&problem);
assert!(sdp.is_ok(), "SOCP→SDP lift should not fail");
}
#[test]
fn test_portfolio_basic() {
let mu = Array1::from_vec(vec![0.1, 0.2]);
let sigma = Array2::from_shape_vec((2, 2), vec![0.04, 0.0, 0.0, 0.09]).expect("valid");
let result = portfolio_optimization_socp(&mu.view(), &sigma.view(), 1.0)
.expect("portfolio should not fail");
let sum: f64 = result.weights.iter().sum();
assert_abs_diff_eq!(sum, 1.0, epsilon = 0.1); }
}