Skip to main content

scirs2_optimize/differentiable_optimization/
diff_lp.rs

1//! Differentiable Linear Programming.
2//!
3//! Solves the LP:
4//!
5//!   min  c'x
6//!   s.t. Ax = b   (p equalities)
7//!        Gx ≤ h   (m inequalities)
8//!
9//! and computes gradients of x* w.r.t. (c, A, b, G, h) via implicit
10//! differentiation through the KKT conditions at the active constraints.
11//!
12//! At an LP optimum, the active inequality constraints define a polyhedron
13//! face. We differentiate as if solving the equality-constrained system
14//! formed by the active set, which is a degenerate QP with Q = 0 (plus
15//! regularization for numerical stability).
16
17use super::implicit_diff;
18use super::types::{DiffLPConfig, DiffLPResult, ImplicitGradient};
19use crate::error::{OptimizeError, OptimizeResult};
20
21/// A differentiable LP layer.
22#[derive(Debug, Clone)]
23pub struct DifferentiableLP {
24    /// Objective coefficient vector c (n).
25    pub c: Vec<f64>,
26    /// Equality constraint matrix A (p×n).
27    pub a_eq: Vec<Vec<f64>>,
28    /// Equality constraint rhs b (p).
29    pub b_eq: Vec<f64>,
30    /// Inequality constraint matrix G (m×n): Gx ≤ h.
31    pub g: Vec<Vec<f64>>,
32    /// Inequality constraint rhs h (m).
33    pub h: Vec<f64>,
34}
35
36impl DifferentiableLP {
37    /// Create a new differentiable LP.
38    pub fn new(
39        c: Vec<f64>,
40        a_eq: Vec<Vec<f64>>,
41        b_eq: Vec<f64>,
42        g: Vec<Vec<f64>>,
43        h: Vec<f64>,
44    ) -> OptimizeResult<Self> {
45        let n = c.len();
46        for (i, row) in a_eq.iter().enumerate() {
47            if row.len() != n {
48                return Err(OptimizeError::InvalidInput(format!(
49                    "A_eq row {} has length {} but expected {}",
50                    i,
51                    row.len(),
52                    n
53                )));
54            }
55        }
56        if a_eq.len() != b_eq.len() {
57            return Err(OptimizeError::InvalidInput(format!(
58                "A_eq has {} rows but b_eq has length {}",
59                a_eq.len(),
60                b_eq.len()
61            )));
62        }
63        for (i, row) in g.iter().enumerate() {
64            if row.len() != n {
65                return Err(OptimizeError::InvalidInput(format!(
66                    "G row {} has length {} but expected {}",
67                    i,
68                    row.len(),
69                    n
70                )));
71            }
72        }
73        if g.len() != h.len() {
74            return Err(OptimizeError::InvalidInput(format!(
75                "G has {} rows but h has length {}",
76                g.len(),
77                h.len()
78            )));
79        }
80
81        Ok(Self {
82            c,
83            a_eq,
84            b_eq,
85            g,
86            h,
87        })
88    }
89
90    /// Number of primal variables.
91    pub fn n(&self) -> usize {
92        self.c.len()
93    }
94
95    /// Solve the LP (forward pass).
96    ///
97    /// Internally converts to a QP with Q = εI (small regularization) and
98    /// solves via interior-point method.
99    pub fn forward(&self, config: &DiffLPConfig) -> OptimizeResult<DiffLPResult> {
100        let n = self.n();
101        let m = self.h.len();
102        let p = self.b_eq.len();
103
104        // Convert LP to a lightly regularised QP: Q = reg * I
105        let mut q = vec![vec![0.0; n]; n];
106        for i in 0..n {
107            q[i][i] = config.regularization;
108        }
109
110        // Use the QP solver from diff_qp
111        let qp = super::diff_qp::DifferentiableQP {
112            q,
113            c: self.c.clone(),
114            g: self.g.clone(),
115            h: self.h.clone(),
116            a: self.a_eq.clone(),
117            b: self.b_eq.clone(),
118        };
119
120        let qp_config = super::types::DiffQPConfig {
121            tolerance: config.tolerance,
122            max_iterations: config.max_iterations,
123            regularization: config.regularization,
124            backward_mode: super::types::BackwardMode::FullDifferentiation,
125        };
126
127        let qp_result = qp.forward(&qp_config)?;
128
129        // Compute LP objective (without the regularization term)
130        let mut obj = 0.0;
131        for i in 0..n {
132            obj += self.c[i] * qp_result.optimal_x[i];
133        }
134
135        Ok(DiffLPResult {
136            optimal_x: qp_result.optimal_x,
137            optimal_lambda: qp_result.optimal_lambda,
138            optimal_nu: qp_result.optimal_nu,
139            objective: obj,
140            converged: qp_result.converged,
141            iterations: qp_result.iterations,
142        })
143    }
144
145    /// Backward pass: compute gradients of loss w.r.t. LP parameters.
146    ///
147    /// Uses active-set implicit differentiation: at the LP optimum, only
148    /// active inequality constraints matter, and we differentiate through
149    /// the resulting equality system.
150    pub fn backward(
151        &self,
152        result: &DiffLPResult,
153        dl_dx: &[f64],
154        config: &DiffLPConfig,
155    ) -> OptimizeResult<ImplicitGradient> {
156        let n = self.n();
157        if dl_dx.len() != n {
158            return Err(OptimizeError::InvalidInput(format!(
159                "dl_dx length {} != n {}",
160                dl_dx.len(),
161                n
162            )));
163        }
164
165        // Build Q = reg * I for the implicit differentiation
166        let mut q = vec![vec![0.0; n]; n];
167        for i in 0..n {
168            q[i][i] = config.regularization;
169        }
170
171        // Use active-set implicit differentiation
172        implicit_diff::compute_active_set_implicit_gradient(
173            &q,
174            &self.g,
175            &self.h,
176            &self.a_eq,
177            &result.optimal_x,
178            &result.optimal_lambda,
179            &result.optimal_nu,
180            dl_dx,
181            config.active_constraint_tol,
182        )
183    }
184}
185
186#[cfg(test)]
187mod tests {
188    use super::*;
189
190    /// Simple LP: min -x - y s.t. x + y <= 1, x >= 0, y >= 0
191    /// Optimal at (1, 0) or (0, 1) — both give obj = -1
192    /// With regularization, solution tends toward (0.5, 0.5)
193    #[test]
194    fn test_lp_forward_simple() {
195        let lp = DifferentiableLP::new(
196            vec![-1.0, -1.0],
197            vec![],
198            vec![],
199            vec![
200                vec![1.0, 1.0],  // x + y <= 1
201                vec![-1.0, 0.0], // -x <= 0
202                vec![0.0, -1.0], // -y <= 0
203            ],
204            vec![1.0, 0.0, 0.0],
205        )
206        .expect("LP creation failed");
207
208        let config = DiffLPConfig::default();
209        let result = lp.forward(&config).expect("Forward failed");
210
211        assert!(result.converged, "LP should converge");
212        // With regularization, x+y should be close to 1
213        let sum: f64 = result.optimal_x.iter().sum();
214        assert!((sum - 1.0).abs() < 0.1, "x+y = {} (expected ~1.0)", sum);
215        // Objective should be close to -1
216        assert!(
217            (result.objective - (-1.0)).abs() < 0.1,
218            "obj = {} (expected ~-1.0)",
219            result.objective
220        );
221    }
222
223    /// LP with equality constraint: min -x s.t. x + y = 1, x >= 0, y >= 0
224    /// Optimal at x=1, y=0
225    #[test]
226    fn test_lp_with_equality() {
227        let lp = DifferentiableLP::new(
228            vec![-1.0, 0.0],
229            vec![vec![1.0, 1.0]],
230            vec![1.0],
231            vec![
232                vec![-1.0, 0.0], // -x <= 0
233                vec![0.0, -1.0], // -y <= 0
234            ],
235            vec![0.0, 0.0],
236        )
237        .expect("LP creation failed");
238
239        let config = DiffLPConfig::default();
240        let result = lp.forward(&config).expect("Forward failed");
241
242        assert!(result.converged);
243        assert!(
244            (result.optimal_x[0] - 1.0).abs() < 0.1,
245            "x = {} (expected ~1.0)",
246            result.optimal_x[0]
247        );
248    }
249
250    #[test]
251    fn test_lp_backward() {
252        let lp = DifferentiableLP::new(
253            vec![-1.0, -1.0],
254            vec![],
255            vec![],
256            vec![vec![1.0, 1.0], vec![-1.0, 0.0], vec![0.0, -1.0]],
257            vec![1.0, 0.0, 0.0],
258        )
259        .expect("LP creation failed");
260
261        let config = DiffLPConfig::default();
262        let result = lp.forward(&config).expect("Forward failed");
263
264        let dl_dx = vec![1.0, 1.0];
265        let grad = lp
266            .backward(&result, &dl_dx, &config)
267            .expect("Backward failed");
268
269        // Gradient should be finite and non-empty
270        assert_eq!(grad.dl_dc.len(), 2);
271        assert!(grad.dl_dc[0].is_finite());
272        assert!(grad.dl_dc[1].is_finite());
273    }
274
275    #[test]
276    fn test_lp_dimension_validation() {
277        let result = DifferentiableLP::new(
278            vec![1.0, 2.0],
279            vec![vec![1.0]], // wrong dimension
280            vec![1.0],
281            vec![],
282            vec![],
283        );
284        assert!(result.is_err());
285    }
286}