scirs2_optimize/differentiable_optimization/mod.rs
1//! Differentiable optimization layers (OptNet-style LP/QP).
2//!
3//! This module implements differentiable quadratic and linear programming
4//! layers that can be embedded in gradient-based training pipelines. The
5//! backward pass uses implicit differentiation of the KKT conditions to
6//! compute gradients of the optimal solution w.r.t. all problem parameters.
7//!
8//! # Submodules
9//!
10//! - `kkt_sensitivity`: KKT bordered matrix assembly and adjoint-method sensitivity.
11//! - [`qp_layer`]: ADMM-based QP layer with warm-start and active-set backward.
12//! - [`lp_layer`]: Entropic LP layer and basis sensitivity analysis.
13//! - [`perturbed_optimizer`]: Black-box differentiable combinatorial optimization.
14//! - [`implicit_diff`]: Core implicit differentiation engine.
15//! - [`combinatorial`]: SparseMAP, soft sort/rank (legacy entry points).
16//! - [`diff_qp`]: Interior-point differentiable QP.
17//! - [`diff_lp`]: Differentiable LP (active-set based).
18//!
19//! # References
20//! - Amos & Kolter (2017). "OptNet: Differentiable Optimization as a Layer
21//! in Neural Networks." ICML.
22//! - Berthet et al. (2020). "Learning with Differentiable Perturbed Optimizers." NeurIPS.
23//! - Niculae & Blondel (2017). "A regularized framework for sparse and structured
24//! neural attention." NeurIPS.
25
26pub mod combinatorial;
27pub mod diff_lp;
28pub mod diff_qp;
29pub mod implicit_diff;
30pub mod kkt_sensitivity;
31pub mod layer;
32pub mod lp_layer;
33pub mod perturbed_optimizer;
34pub mod qp_layer;
35pub mod types;
36
37pub use combinatorial::{
38 diff_topk, soft_rank, soft_sort, sparsemap, sparsemap_gradient,
39 PerturbedOptimizer as PerturbedOptimizerLegacy,
40 PerturbedOptimizerConfig as PerturbedOptimizerLegacyConfig, SparsemapConfig, SparsemapResult,
41 StructureType,
42};
43pub use diff_lp::DifferentiableLP;
44pub use diff_qp::DifferentiableQP;
45pub use kkt_sensitivity::{
46 kkt_matrix, kkt_sensitivity, mat_vec, outer_product, parametric_nlp_adjoint, regularize_q,
47 sym_outer_product, KktGrad, KktSystem, NlpGrad,
48};
49pub use layer::{OptNetLayer, StandardOptNetLayer};
50pub use lp_layer::{lp_gradient, lp_perturbed, LpLayer, LpLayerConfig, LpSensitivity};
51pub use perturbed_optimizer::{
52 PerturbedOptimizer, PerturbedOptimizerConfig, SparseMap, SparseMapConfig,
53};
54pub use qp_layer::{QpLayer, QpLayerConfig};
55pub use types::{
56 BackwardMode, DiffLPConfig, DiffLPResult, DiffOptGrad, DiffOptParams, DiffOptResult,
57 DiffOptStatus, DiffQPConfig, DiffQPResult, ImplicitGradient, KKTSystem,
58};