ruvector_math/optimization/
sdp.rs1#[derive(Debug, Clone)]
9pub struct SDPProblem {
10 pub n: usize,
12 pub c: Vec<f64>,
14 pub constraints: Vec<Vec<f64>>,
16 pub b: Vec<f64>,
18}
19
20impl SDPProblem {
21 pub fn new(n: usize) -> Self {
23 Self {
24 n,
25 c: vec![0.0; n * n],
26 constraints: Vec::new(),
27 b: Vec::new(),
28 }
29 }
30
31 pub fn set_objective(&mut self, c: Vec<f64>) {
33 assert_eq!(c.len(), self.n * self.n);
34 self.c = c;
35 }
36
37 pub fn add_constraint(&mut self, a: Vec<f64>, bi: f64) {
39 assert_eq!(a.len(), self.n * self.n);
40 self.constraints.push(a);
41 self.b.push(bi);
42 }
43
44 pub fn num_constraints(&self) -> usize {
46 self.constraints.len()
47 }
48}
49
50#[derive(Debug, Clone)]
52pub struct SDPSolution {
53 pub x: Vec<f64>,
55 pub value: f64,
57 pub status: SDPStatus,
59 pub iterations: usize,
61}
62
63#[derive(Debug, Clone, PartialEq)]
65pub enum SDPStatus {
66 Optimal,
67 Infeasible,
68 Unbounded,
69 MaxIterations,
70 NumericalError,
71}
72
73pub struct SDPSolver {
75 pub max_iters: usize,
77 pub tolerance: f64,
79 pub step_size: f64,
81}
82
83impl SDPSolver {
84 pub fn new() -> Self {
86 Self {
87 max_iters: 1000,
88 tolerance: 1e-6,
89 step_size: 0.01,
90 }
91 }
92
93 pub fn solve(&self, problem: &SDPProblem) -> SDPSolution {
95 let n = problem.n;
96 let m = problem.num_constraints();
97
98 if n == 0 {
99 return SDPSolution {
100 x: vec![],
101 value: 0.0,
102 status: SDPStatus::Optimal,
103 iterations: 0,
104 };
105 }
106
107 let mut x = vec![0.0; n * n];
109 for i in 0..n {
110 x[i * n + i] = 1.0;
111 }
112
113 let mut dual = vec![0.0; m];
115 let rho = 1.0;
116
117 for iter in 0..self.max_iters {
118 let mut grad = problem.c.clone();
120
121 for (j, (a, &d)) in problem.constraints.iter().zip(dual.iter()).enumerate() {
122 let ax: f64 = (0..n * n).map(|k| a[k] * x[k]).sum();
123 let residual = ax - problem.b[j];
124
125 for k in 0..n * n {
127 grad[k] += (d + rho * residual) * a[k];
128 }
129 }
130
131 for k in 0..n * n {
133 x[k] -= self.step_size * grad[k];
134 }
135
136 self.project_psd(&mut x, n);
138
139 let mut max_violation = 0.0f64;
141 for (j, a) in problem.constraints.iter().enumerate() {
142 let ax: f64 = (0..n * n).map(|k| a[k] * x[k]).sum();
143 let residual = ax - problem.b[j];
144 dual[j] += rho * residual;
145 max_violation = max_violation.max(residual.abs());
146 }
147
148 if max_violation < self.tolerance {
150 let value: f64 = (0..n * n).map(|k| problem.c[k] * x[k]).sum();
151 return SDPSolution {
152 x,
153 value,
154 status: SDPStatus::Optimal,
155 iterations: iter + 1,
156 };
157 }
158 }
159
160 let value: f64 = (0..n * n).map(|k| problem.c[k] * x[k]).sum();
161 SDPSolution {
162 x,
163 value,
164 status: SDPStatus::MaxIterations,
165 iterations: self.max_iters,
166 }
167 }
168
169 fn project_psd(&self, x: &mut [f64], n: usize) {
171 for i in 0..n {
173 for j in i + 1..n {
174 let avg = (x[i * n + j] + x[j * n + i]) / 2.0;
175 x[i * n + j] = avg;
176 x[j * n + i] = avg;
177 }
178 }
179
180 if n <= 10 {
183 self.project_psd_small(x, n);
184 } else {
185 for i in 0..n {
187 let mut row_sum = 0.0;
188 for j in 0..n {
189 if i != j {
190 row_sum += x[i * n + j].abs();
191 }
192 }
193 x[i * n + i] = x[i * n + i].max(row_sum + 0.01);
194 }
195 }
196 }
197
198 fn project_psd_small(&self, x: &mut [f64], n: usize) {
199 let mut v: Vec<f64> = (0..n).map(|i| 1.0 / (n as f64).sqrt()).collect();
204
205 let mut lambda_max = 0.0;
207 for _ in 0..20 {
208 let mut y = vec![0.0; n];
209 for i in 0..n {
210 for j in 0..n {
211 y[i] += x[i * n + j] * v[j];
212 }
213 }
214 let norm: f64 = y.iter().map(|&yi| yi * yi).sum::<f64>().sqrt();
215 lambda_max = v.iter().zip(y.iter()).map(|(&vi, &yi)| vi * yi).sum();
216 if norm > 1e-15 {
217 for i in 0..n {
218 v[i] = y[i] / norm;
219 }
220 }
221 }
222
223 let shift = lambda_max.abs() + 1.0;
225 let mut v: Vec<f64> = (0..n).map(|i| 1.0 / (n as f64).sqrt()).collect();
226 let mut lambda_min = 0.0;
227
228 for _ in 0..20 {
229 let mut y = vec![0.0; n];
230 for i in 0..n {
231 for j in 0..n {
232 let val = if i == j {
233 shift - x[i * n + j]
234 } else {
235 -x[i * n + j]
236 };
237 y[i] += val * v[j];
238 }
239 }
240 let norm: f64 = y.iter().map(|&yi| yi * yi).sum::<f64>().sqrt();
241 let lambda_shifted: f64 = v.iter().zip(y.iter()).map(|(&vi, &yi)| vi * yi).sum();
242 lambda_min = shift - lambda_shifted;
243 if norm > 1e-15 {
244 for i in 0..n {
245 v[i] = y[i] / norm;
246 }
247 }
248 }
249
250 if lambda_min < 0.0 {
252 let alpha = -lambda_min + 0.01;
253 for i in 0..n {
254 x[i * n + i] += alpha;
255 }
256 }
257 }
258}
259
260impl Default for SDPSolver {
261 fn default() -> Self {
262 Self::new()
263 }
264}
265
266#[cfg(test)]
267mod tests {
268 use super::*;
269
270 #[test]
271 fn test_sdp_simple() {
272 let mut problem = SDPProblem::new(2);
274
275 let mut c = vec![0.0; 4];
277 c[0] = 1.0; c[3] = 1.0; problem.set_objective(c);
280
281 let mut a = vec![0.0; 4];
283 a[0] = 1.0;
284 problem.add_constraint(a, 1.0);
285
286 let solver = SDPSolver::new();
287 let solution = solver.solve(&problem);
288
289 assert!(
291 solution.status == SDPStatus::Optimal || solution.status == SDPStatus::MaxIterations
292 );
293 }
294
295 #[test]
296 fn test_sdp_feasibility() {
297 let mut problem = SDPProblem::new(2);
299
300 problem.set_objective(vec![0.0; 4]);
302
303 let mut a1 = vec![0.0; 4];
305 a1[0] = 1.0;
306 problem.add_constraint(a1, 1.0);
307
308 let mut a2 = vec![0.0; 4];
310 a2[3] = 1.0;
311 problem.add_constraint(a2, 1.0);
312
313 let solver = SDPSolver::new();
314 let solution = solver.solve(&problem);
315
316 let x00 = solution.x[0];
318 let x11 = solution.x[3];
319 assert!((x00 - 1.0).abs() < 0.1 || solution.status == SDPStatus::MaxIterations);
320 assert!((x11 - 1.0).abs() < 0.1 || solution.status == SDPStatus::MaxIterations);
321 }
322}