ruvector_math/optimization/
sdp.rs

1//! Semidefinite Programming (SDP)
2//!
3//! Simple SDP solver for SOS certificates.
4
5/// SDP problem in standard form
6/// minimize: trace(C * X)
7/// subject to: trace(A_i * X) = b_i, X ≽ 0
8#[derive(Debug, Clone)]
9pub struct SDPProblem {
10    /// Matrix dimension
11    pub n: usize,
12    /// Objective matrix C (n × n)
13    pub c: Vec<f64>,
14    /// Constraint matrices A_i
15    pub constraints: Vec<Vec<f64>>,
16    /// Constraint right-hand sides b_i
17    pub b: Vec<f64>,
18}
19
20impl SDPProblem {
21    /// Create new SDP problem
22    pub fn new(n: usize) -> Self {
23        Self {
24            n,
25            c: vec![0.0; n * n],
26            constraints: Vec::new(),
27            b: Vec::new(),
28        }
29    }
30
31    /// Set objective matrix
32    pub fn set_objective(&mut self, c: Vec<f64>) {
33        assert_eq!(c.len(), self.n * self.n);
34        self.c = c;
35    }
36
37    /// Add constraint
38    pub fn add_constraint(&mut self, a: Vec<f64>, bi: f64) {
39        assert_eq!(a.len(), self.n * self.n);
40        self.constraints.push(a);
41        self.b.push(bi);
42    }
43
44    /// Number of constraints
45    pub fn num_constraints(&self) -> usize {
46        self.constraints.len()
47    }
48}
49
50/// SDP solution
51#[derive(Debug, Clone)]
52pub struct SDPSolution {
53    /// Optimal X matrix
54    pub x: Vec<f64>,
55    /// Optimal value
56    pub value: f64,
57    /// Solver status
58    pub status: SDPStatus,
59    /// Number of iterations
60    pub iterations: usize,
61}
62
63/// Solver status
64#[derive(Debug, Clone, PartialEq)]
65pub enum SDPStatus {
66    Optimal,
67    Infeasible,
68    Unbounded,
69    MaxIterations,
70    NumericalError,
71}
72
73/// Simple projected gradient SDP solver
74pub struct SDPSolver {
75    /// Maximum iterations
76    pub max_iters: usize,
77    /// Tolerance
78    pub tolerance: f64,
79    /// Step size
80    pub step_size: f64,
81}
82
83impl SDPSolver {
84    /// Create with default parameters
85    pub fn new() -> Self {
86        Self {
87            max_iters: 1000,
88            tolerance: 1e-6,
89            step_size: 0.01,
90        }
91    }
92
93    /// Solve SDP problem
94    pub fn solve(&self, problem: &SDPProblem) -> SDPSolution {
95        let n = problem.n;
96        let m = problem.num_constraints();
97
98        if n == 0 {
99            return SDPSolution {
100                x: vec![],
101                value: 0.0,
102                status: SDPStatus::Optimal,
103                iterations: 0,
104            };
105        }
106
107        // Initialize X as identity
108        let mut x = vec![0.0; n * n];
109        for i in 0..n {
110            x[i * n + i] = 1.0;
111        }
112
113        // Simple augmented Lagrangian method
114        let mut dual = vec![0.0; m];
115        let rho = 1.0;
116
117        for iter in 0..self.max_iters {
118            // Compute gradient of Lagrangian
119            let mut grad = problem.c.clone();
120
121            for (j, (a, &d)) in problem.constraints.iter().zip(dual.iter()).enumerate() {
122                let ax: f64 = (0..n * n).map(|k| a[k] * x[k]).sum();
123                let residual = ax - problem.b[j];
124
125                // Gradient contribution from constraint
126                for k in 0..n * n {
127                    grad[k] += (d + rho * residual) * a[k];
128                }
129            }
130
131            // Gradient descent step
132            for k in 0..n * n {
133                x[k] -= self.step_size * grad[k];
134            }
135
136            // Project onto PSD cone
137            self.project_psd(&mut x, n);
138
139            // Update dual variables
140            let mut max_violation = 0.0f64;
141            for (j, a) in problem.constraints.iter().enumerate() {
142                let ax: f64 = (0..n * n).map(|k| a[k] * x[k]).sum();
143                let residual = ax - problem.b[j];
144                dual[j] += rho * residual;
145                max_violation = max_violation.max(residual.abs());
146            }
147
148            // Check convergence
149            if max_violation < self.tolerance {
150                let value: f64 = (0..n * n).map(|k| problem.c[k] * x[k]).sum();
151                return SDPSolution {
152                    x,
153                    value,
154                    status: SDPStatus::Optimal,
155                    iterations: iter + 1,
156                };
157            }
158        }
159
160        let value: f64 = (0..n * n).map(|k| problem.c[k] * x[k]).sum();
161        SDPSolution {
162            x,
163            value,
164            status: SDPStatus::MaxIterations,
165            iterations: self.max_iters,
166        }
167    }
168
169    /// Project matrix onto PSD cone via eigendecomposition
170    fn project_psd(&self, x: &mut [f64], n: usize) {
171        // Symmetrize first
172        for i in 0..n {
173            for j in i + 1..n {
174                let avg = (x[i * n + j] + x[j * n + i]) / 2.0;
175                x[i * n + j] = avg;
176                x[j * n + i] = avg;
177            }
178        }
179
180        // For small matrices, use power iteration to find and remove negative eigencomponents
181        // This is a simplified approach
182        if n <= 10 {
183            self.project_psd_small(x, n);
184        } else {
185            // For larger matrices, just ensure diagonal dominance
186            for i in 0..n {
187                let mut row_sum = 0.0;
188                for j in 0..n {
189                    if i != j {
190                        row_sum += x[i * n + j].abs();
191                    }
192                }
193                x[i * n + i] = x[i * n + i].max(row_sum + 0.01);
194            }
195        }
196    }
197
198    fn project_psd_small(&self, x: &mut [f64], n: usize) {
199        // Simple approach: ensure minimum eigenvalue is non-negative
200        // by adding αI where α makes smallest eigenvalue ≥ 0
201
202        // Estimate smallest eigenvalue via power iteration on -X + λ_max I
203        let mut v: Vec<f64> = (0..n).map(|i| 1.0 / (n as f64).sqrt()).collect();
204
205        // First get largest eigenvalue estimate
206        let mut lambda_max = 0.0;
207        for _ in 0..20 {
208            let mut y = vec![0.0; n];
209            for i in 0..n {
210                for j in 0..n {
211                    y[i] += x[i * n + j] * v[j];
212                }
213            }
214            let norm: f64 = y.iter().map(|&yi| yi * yi).sum::<f64>().sqrt();
215            lambda_max = v.iter().zip(y.iter()).map(|(&vi, &yi)| vi * yi).sum();
216            if norm > 1e-15 {
217                for i in 0..n {
218                    v[i] = y[i] / norm;
219                }
220            }
221        }
222
223        // Now find smallest eigenvalue using shifted power iteration
224        let shift = lambda_max.abs() + 1.0;
225        let mut v: Vec<f64> = (0..n).map(|i| 1.0 / (n as f64).sqrt()).collect();
226        let mut lambda_min = 0.0;
227
228        for _ in 0..20 {
229            let mut y = vec![0.0; n];
230            for i in 0..n {
231                for j in 0..n {
232                    let val = if i == j { shift - x[i * n + j] } else { -x[i * n + j] };
233                    y[i] += val * v[j];
234                }
235            }
236            let norm: f64 = y.iter().map(|&yi| yi * yi).sum::<f64>().sqrt();
237            let lambda_shifted: f64 = v.iter().zip(y.iter()).map(|(&vi, &yi)| vi * yi).sum();
238            lambda_min = shift - lambda_shifted;
239            if norm > 1e-15 {
240                for i in 0..n {
241                    v[i] = y[i] / norm;
242                }
243            }
244        }
245
246        // If smallest eigenvalue is negative, shift matrix
247        if lambda_min < 0.0 {
248            let alpha = -lambda_min + 0.01;
249            for i in 0..n {
250                x[i * n + i] += alpha;
251            }
252        }
253    }
254}
255
256impl Default for SDPSolver {
257    fn default() -> Self {
258        Self::new()
259    }
260}
261
262#[cfg(test)]
263mod tests {
264    use super::*;
265
266    #[test]
267    fn test_sdp_simple() {
268        // Minimize trace(X) subject to X_{11} = 1, X ≽ 0
269        let mut problem = SDPProblem::new(2);
270
271        // Objective: trace(X) = X_{00} + X_{11}
272        let mut c = vec![0.0; 4];
273        c[0] = 1.0; // X_{00}
274        c[3] = 1.0; // X_{11}
275        problem.set_objective(c);
276
277        // Constraint: X_{00} = 1
278        let mut a = vec![0.0; 4];
279        a[0] = 1.0;
280        problem.add_constraint(a, 1.0);
281
282        let solver = SDPSolver::new();
283        let solution = solver.solve(&problem);
284
285        // Should find X_{00} = 1, X_{11} close to 0 (or whatever makes X PSD)
286        assert!(solution.status == SDPStatus::Optimal || solution.status == SDPStatus::MaxIterations);
287    }
288
289    #[test]
290    fn test_sdp_feasibility() {
291        // Feasibility: find X ≽ 0 with X_{00} = 1, X_{11} = 1
292        let mut problem = SDPProblem::new(2);
293
294        // Zero objective
295        problem.set_objective(vec![0.0; 4]);
296
297        // X_{00} = 1
298        let mut a1 = vec![0.0; 4];
299        a1[0] = 1.0;
300        problem.add_constraint(a1, 1.0);
301
302        // X_{11} = 1
303        let mut a2 = vec![0.0; 4];
304        a2[3] = 1.0;
305        problem.add_constraint(a2, 1.0);
306
307        let solver = SDPSolver::new();
308        let solution = solver.solve(&problem);
309
310        // Check constraints approximately satisfied
311        let x00 = solution.x[0];
312        let x11 = solution.x[3];
313        assert!((x00 - 1.0).abs() < 0.1 || solution.status == SDPStatus::MaxIterations);
314        assert!((x11 - 1.0).abs() < 0.1 || solution.status == SDPStatus::MaxIterations);
315    }
316}