ruvector_math/optimization/
sdp.rs1#[derive(Debug, Clone)]
9pub struct SDPProblem {
10 pub n: usize,
12 pub c: Vec<f64>,
14 pub constraints: Vec<Vec<f64>>,
16 pub b: Vec<f64>,
18}
19
20impl SDPProblem {
21 pub fn new(n: usize) -> Self {
23 Self {
24 n,
25 c: vec![0.0; n * n],
26 constraints: Vec::new(),
27 b: Vec::new(),
28 }
29 }
30
31 pub fn set_objective(&mut self, c: Vec<f64>) {
33 assert_eq!(c.len(), self.n * self.n);
34 self.c = c;
35 }
36
37 pub fn add_constraint(&mut self, a: Vec<f64>, bi: f64) {
39 assert_eq!(a.len(), self.n * self.n);
40 self.constraints.push(a);
41 self.b.push(bi);
42 }
43
44 pub fn num_constraints(&self) -> usize {
46 self.constraints.len()
47 }
48}
49
50#[derive(Debug, Clone)]
52pub struct SDPSolution {
53 pub x: Vec<f64>,
55 pub value: f64,
57 pub status: SDPStatus,
59 pub iterations: usize,
61}
62
63#[derive(Debug, Clone, PartialEq)]
65pub enum SDPStatus {
66 Optimal,
67 Infeasible,
68 Unbounded,
69 MaxIterations,
70 NumericalError,
71}
72
73pub struct SDPSolver {
75 pub max_iters: usize,
77 pub tolerance: f64,
79 pub step_size: f64,
81}
82
83impl SDPSolver {
84 pub fn new() -> Self {
86 Self {
87 max_iters: 1000,
88 tolerance: 1e-6,
89 step_size: 0.01,
90 }
91 }
92
93 pub fn solve(&self, problem: &SDPProblem) -> SDPSolution {
95 let n = problem.n;
96 let m = problem.num_constraints();
97
98 if n == 0 {
99 return SDPSolution {
100 x: vec![],
101 value: 0.0,
102 status: SDPStatus::Optimal,
103 iterations: 0,
104 };
105 }
106
107 let mut x = vec![0.0; n * n];
109 for i in 0..n {
110 x[i * n + i] = 1.0;
111 }
112
113 let mut dual = vec![0.0; m];
115 let rho = 1.0;
116
117 for iter in 0..self.max_iters {
118 let mut grad = problem.c.clone();
120
121 for (j, (a, &d)) in problem.constraints.iter().zip(dual.iter()).enumerate() {
122 let ax: f64 = (0..n * n).map(|k| a[k] * x[k]).sum();
123 let residual = ax - problem.b[j];
124
125 for k in 0..n * n {
127 grad[k] += (d + rho * residual) * a[k];
128 }
129 }
130
131 for k in 0..n * n {
133 x[k] -= self.step_size * grad[k];
134 }
135
136 self.project_psd(&mut x, n);
138
139 let mut max_violation = 0.0f64;
141 for (j, a) in problem.constraints.iter().enumerate() {
142 let ax: f64 = (0..n * n).map(|k| a[k] * x[k]).sum();
143 let residual = ax - problem.b[j];
144 dual[j] += rho * residual;
145 max_violation = max_violation.max(residual.abs());
146 }
147
148 if max_violation < self.tolerance {
150 let value: f64 = (0..n * n).map(|k| problem.c[k] * x[k]).sum();
151 return SDPSolution {
152 x,
153 value,
154 status: SDPStatus::Optimal,
155 iterations: iter + 1,
156 };
157 }
158 }
159
160 let value: f64 = (0..n * n).map(|k| problem.c[k] * x[k]).sum();
161 SDPSolution {
162 x,
163 value,
164 status: SDPStatus::MaxIterations,
165 iterations: self.max_iters,
166 }
167 }
168
169 fn project_psd(&self, x: &mut [f64], n: usize) {
171 for i in 0..n {
173 for j in i + 1..n {
174 let avg = (x[i * n + j] + x[j * n + i]) / 2.0;
175 x[i * n + j] = avg;
176 x[j * n + i] = avg;
177 }
178 }
179
180 if n <= 10 {
183 self.project_psd_small(x, n);
184 } else {
185 for i in 0..n {
187 let mut row_sum = 0.0;
188 for j in 0..n {
189 if i != j {
190 row_sum += x[i * n + j].abs();
191 }
192 }
193 x[i * n + i] = x[i * n + i].max(row_sum + 0.01);
194 }
195 }
196 }
197
198 fn project_psd_small(&self, x: &mut [f64], n: usize) {
199 let mut v: Vec<f64> = (0..n).map(|i| 1.0 / (n as f64).sqrt()).collect();
204
205 let mut lambda_max = 0.0;
207 for _ in 0..20 {
208 let mut y = vec![0.0; n];
209 for i in 0..n {
210 for j in 0..n {
211 y[i] += x[i * n + j] * v[j];
212 }
213 }
214 let norm: f64 = y.iter().map(|&yi| yi * yi).sum::<f64>().sqrt();
215 lambda_max = v.iter().zip(y.iter()).map(|(&vi, &yi)| vi * yi).sum();
216 if norm > 1e-15 {
217 for i in 0..n {
218 v[i] = y[i] / norm;
219 }
220 }
221 }
222
223 let shift = lambda_max.abs() + 1.0;
225 let mut v: Vec<f64> = (0..n).map(|i| 1.0 / (n as f64).sqrt()).collect();
226 let mut lambda_min = 0.0;
227
228 for _ in 0..20 {
229 let mut y = vec![0.0; n];
230 for i in 0..n {
231 for j in 0..n {
232 let val = if i == j { shift - x[i * n + j] } else { -x[i * n + j] };
233 y[i] += val * v[j];
234 }
235 }
236 let norm: f64 = y.iter().map(|&yi| yi * yi).sum::<f64>().sqrt();
237 let lambda_shifted: f64 = v.iter().zip(y.iter()).map(|(&vi, &yi)| vi * yi).sum();
238 lambda_min = shift - lambda_shifted;
239 if norm > 1e-15 {
240 for i in 0..n {
241 v[i] = y[i] / norm;
242 }
243 }
244 }
245
246 if lambda_min < 0.0 {
248 let alpha = -lambda_min + 0.01;
249 for i in 0..n {
250 x[i * n + i] += alpha;
251 }
252 }
253 }
254}
255
256impl Default for SDPSolver {
257 fn default() -> Self {
258 Self::new()
259 }
260}
261
262#[cfg(test)]
263mod tests {
264 use super::*;
265
266 #[test]
267 fn test_sdp_simple() {
268 let mut problem = SDPProblem::new(2);
270
271 let mut c = vec![0.0; 4];
273 c[0] = 1.0; c[3] = 1.0; problem.set_objective(c);
276
277 let mut a = vec![0.0; 4];
279 a[0] = 1.0;
280 problem.add_constraint(a, 1.0);
281
282 let solver = SDPSolver::new();
283 let solution = solver.solve(&problem);
284
285 assert!(solution.status == SDPStatus::Optimal || solution.status == SDPStatus::MaxIterations);
287 }
288
289 #[test]
290 fn test_sdp_feasibility() {
291 let mut problem = SDPProblem::new(2);
293
294 problem.set_objective(vec![0.0; 4]);
296
297 let mut a1 = vec![0.0; 4];
299 a1[0] = 1.0;
300 problem.add_constraint(a1, 1.0);
301
302 let mut a2 = vec![0.0; 4];
304 a2[3] = 1.0;
305 problem.add_constraint(a2, 1.0);
306
307 let solver = SDPSolver::new();
308 let solution = solver.solve(&problem);
309
310 let x00 = solution.x[0];
312 let x11 = solution.x[3];
313 assert!((x00 - 1.0).abs() < 0.1 || solution.status == SDPStatus::MaxIterations);
314 assert!((x11 - 1.0).abs() < 0.1 || solution.status == SDPStatus::MaxIterations);
315 }
316}