scirs2_optimize/differentiable_optimization/
diff_lp.rs1use super::implicit_diff;
18use super::types::{DiffLPConfig, DiffLPResult, ImplicitGradient};
19use crate::error::{OptimizeError, OptimizeResult};
20
21#[derive(Debug, Clone)]
23pub struct DifferentiableLP {
24 pub c: Vec<f64>,
26 pub a_eq: Vec<Vec<f64>>,
28 pub b_eq: Vec<f64>,
30 pub g: Vec<Vec<f64>>,
32 pub h: Vec<f64>,
34}
35
36impl DifferentiableLP {
37 pub fn new(
39 c: Vec<f64>,
40 a_eq: Vec<Vec<f64>>,
41 b_eq: Vec<f64>,
42 g: Vec<Vec<f64>>,
43 h: Vec<f64>,
44 ) -> OptimizeResult<Self> {
45 let n = c.len();
46 for (i, row) in a_eq.iter().enumerate() {
47 if row.len() != n {
48 return Err(OptimizeError::InvalidInput(format!(
49 "A_eq row {} has length {} but expected {}",
50 i,
51 row.len(),
52 n
53 )));
54 }
55 }
56 if a_eq.len() != b_eq.len() {
57 return Err(OptimizeError::InvalidInput(format!(
58 "A_eq has {} rows but b_eq has length {}",
59 a_eq.len(),
60 b_eq.len()
61 )));
62 }
63 for (i, row) in g.iter().enumerate() {
64 if row.len() != n {
65 return Err(OptimizeError::InvalidInput(format!(
66 "G row {} has length {} but expected {}",
67 i,
68 row.len(),
69 n
70 )));
71 }
72 }
73 if g.len() != h.len() {
74 return Err(OptimizeError::InvalidInput(format!(
75 "G has {} rows but h has length {}",
76 g.len(),
77 h.len()
78 )));
79 }
80
81 Ok(Self {
82 c,
83 a_eq,
84 b_eq,
85 g,
86 h,
87 })
88 }
89
90 pub fn n(&self) -> usize {
92 self.c.len()
93 }
94
95 pub fn forward(&self, config: &DiffLPConfig) -> OptimizeResult<DiffLPResult> {
100 let n = self.n();
101 let m = self.h.len();
102 let p = self.b_eq.len();
103
104 let mut q = vec![vec![0.0; n]; n];
106 for i in 0..n {
107 q[i][i] = config.regularization;
108 }
109
110 let qp = super::diff_qp::DifferentiableQP {
112 q,
113 c: self.c.clone(),
114 g: self.g.clone(),
115 h: self.h.clone(),
116 a: self.a_eq.clone(),
117 b: self.b_eq.clone(),
118 };
119
120 let qp_config = super::types::DiffQPConfig {
121 tolerance: config.tolerance,
122 max_iterations: config.max_iterations,
123 regularization: config.regularization,
124 backward_mode: super::types::BackwardMode::FullDifferentiation,
125 };
126
127 let qp_result = qp.forward(&qp_config)?;
128
129 let mut obj = 0.0;
131 for i in 0..n {
132 obj += self.c[i] * qp_result.optimal_x[i];
133 }
134
135 Ok(DiffLPResult {
136 optimal_x: qp_result.optimal_x,
137 optimal_lambda: qp_result.optimal_lambda,
138 optimal_nu: qp_result.optimal_nu,
139 objective: obj,
140 converged: qp_result.converged,
141 iterations: qp_result.iterations,
142 })
143 }
144
145 pub fn backward(
151 &self,
152 result: &DiffLPResult,
153 dl_dx: &[f64],
154 config: &DiffLPConfig,
155 ) -> OptimizeResult<ImplicitGradient> {
156 let n = self.n();
157 if dl_dx.len() != n {
158 return Err(OptimizeError::InvalidInput(format!(
159 "dl_dx length {} != n {}",
160 dl_dx.len(),
161 n
162 )));
163 }
164
165 let mut q = vec![vec![0.0; n]; n];
167 for i in 0..n {
168 q[i][i] = config.regularization;
169 }
170
171 implicit_diff::compute_active_set_implicit_gradient(
173 &q,
174 &self.g,
175 &self.h,
176 &self.a_eq,
177 &result.optimal_x,
178 &result.optimal_lambda,
179 &result.optimal_nu,
180 dl_dx,
181 config.active_constraint_tol,
182 )
183 }
184}
185
186#[cfg(test)]
187mod tests {
188 use super::*;
189
190 #[test]
194 fn test_lp_forward_simple() {
195 let lp = DifferentiableLP::new(
196 vec![-1.0, -1.0],
197 vec![],
198 vec![],
199 vec![
200 vec![1.0, 1.0], vec![-1.0, 0.0], vec![0.0, -1.0], ],
204 vec![1.0, 0.0, 0.0],
205 )
206 .expect("LP creation failed");
207
208 let config = DiffLPConfig::default();
209 let result = lp.forward(&config).expect("Forward failed");
210
211 assert!(result.converged, "LP should converge");
212 let sum: f64 = result.optimal_x.iter().sum();
214 assert!((sum - 1.0).abs() < 0.1, "x+y = {} (expected ~1.0)", sum);
215 assert!(
217 (result.objective - (-1.0)).abs() < 0.1,
218 "obj = {} (expected ~-1.0)",
219 result.objective
220 );
221 }
222
223 #[test]
226 fn test_lp_with_equality() {
227 let lp = DifferentiableLP::new(
228 vec![-1.0, 0.0],
229 vec![vec![1.0, 1.0]],
230 vec![1.0],
231 vec![
232 vec![-1.0, 0.0], vec![0.0, -1.0], ],
235 vec![0.0, 0.0],
236 )
237 .expect("LP creation failed");
238
239 let config = DiffLPConfig::default();
240 let result = lp.forward(&config).expect("Forward failed");
241
242 assert!(result.converged);
243 assert!(
244 (result.optimal_x[0] - 1.0).abs() < 0.1,
245 "x = {} (expected ~1.0)",
246 result.optimal_x[0]
247 );
248 }
249
250 #[test]
251 fn test_lp_backward() {
252 let lp = DifferentiableLP::new(
253 vec![-1.0, -1.0],
254 vec![],
255 vec![],
256 vec![vec![1.0, 1.0], vec![-1.0, 0.0], vec![0.0, -1.0]],
257 vec![1.0, 0.0, 0.0],
258 )
259 .expect("LP creation failed");
260
261 let config = DiffLPConfig::default();
262 let result = lp.forward(&config).expect("Forward failed");
263
264 let dl_dx = vec![1.0, 1.0];
265 let grad = lp
266 .backward(&result, &dl_dx, &config)
267 .expect("Backward failed");
268
269 assert_eq!(grad.dl_dc.len(), 2);
271 assert!(grad.dl_dc[0].is_finite());
272 assert!(grad.dl_dc[1].is_finite());
273 }
274
275 #[test]
276 fn test_lp_dimension_validation() {
277 let result = DifferentiableLP::new(
278 vec![1.0, 2.0],
279 vec![vec![1.0]], vec![1.0],
281 vec![],
282 vec![],
283 );
284 assert!(result.is_err());
285 }
286}