Skip to main content

oxilean_std/convex_optimization/
functions.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use oxilean_kernel::{BinderInfo, Declaration, Environment, Expr, Level, Name};
6
7use super::types::{
8    BundleMethodSolver, CuttingPlaneSolver, FISTASolver, GeometricProgramSolver, GradientDescent,
9    L1NormFunction, MirrorDescentSolver, OnlineLearner, ProjectedGradient, ProximalGradientSolver,
10    QuadraticFunction, RipVerifier, SDPRelaxation, SinkhornSolver, ADMM,
11};
12
13pub fn app(f: Expr, a: Expr) -> Expr {
14    Expr::App(Box::new(f), Box::new(a))
15}
16pub fn app2(f: Expr, a: Expr, b: Expr) -> Expr {
17    app(app(f, a), b)
18}
19pub fn app3(f: Expr, a: Expr, b: Expr, c: Expr) -> Expr {
20    app(app2(f, a, b), c)
21}
22pub fn cst(s: &str) -> Expr {
23    Expr::Const(Name::str(s), vec![])
24}
25pub fn prop() -> Expr {
26    Expr::Sort(Level::zero())
27}
28pub fn type0() -> Expr {
29    Expr::Sort(Level::succ(Level::zero()))
30}
31pub fn pi(bi: BinderInfo, name: &str, dom: Expr, body: Expr) -> Expr {
32    Expr::Pi(bi, Name::str(name), Box::new(dom), Box::new(body))
33}
34pub fn arrow(a: Expr, b: Expr) -> Expr {
35    pi(BinderInfo::Default, "_", a, b)
36}
37pub fn nat_ty() -> Expr {
38    cst("Nat")
39}
40pub fn real_ty() -> Expr {
41    cst("Real")
42}
43pub fn list_ty(elem: Expr) -> Expr {
44    app(cst("List"), elem)
45}
46pub fn fn_ty(dom: Expr, cod: Expr) -> Expr {
47    arrow(dom, cod)
48}
49/// `ConvexSet : (List Real -> Prop) -> Prop`
50/// Predicate asserting a set (represented as a characteristic function) is convex.
51pub fn convex_set_ty() -> Expr {
52    arrow(fn_ty(list_ty(real_ty()), prop()), prop())
53}
54/// `ConvexFunction : (List Real -> Real) -> Prop`
55/// Predicate asserting f: ℝ^n → ℝ is a convex function.
56pub fn convex_function_ty() -> Expr {
57    arrow(fn_ty(list_ty(real_ty()), real_ty()), prop())
58}
59/// `KktConditions : (List Real -> Real) -> List (List Real -> Real) -> List Real -> Prop`
60/// KKT optimality conditions for a constrained convex program.
61pub fn kkt_conditions_ty() -> Expr {
62    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
63    let list_rn_to_r = list_ty(rn_to_r.clone());
64    arrow(
65        rn_to_r,
66        arrow(list_rn_to_r, arrow(list_ty(real_ty()), prop())),
67    )
68}
69/// `Lagrangian : (List Real -> Real) -> List (List Real -> Real) -> List Real -> List Real -> Real`
70/// Lagrangian function L(x, λ) = f(x) + Σ λ_i g_i(x).
71pub fn lagrangian_ty() -> Expr {
72    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
73    let list_rn_to_r = list_ty(rn_to_r.clone());
74    arrow(
75        rn_to_r,
76        arrow(
77            list_rn_to_r,
78            arrow(list_ty(real_ty()), arrow(list_ty(real_ty()), real_ty())),
79        ),
80    )
81}
82/// `StrongDuality : Prop`
83/// Slater's condition implies strong duality: primal optimal = dual optimal.
84pub fn strong_duality_ty() -> Expr {
85    prop()
86}
87/// `ProjectionTheorem : Prop`
88/// Every nonempty closed convex set has a unique nearest point (projection).
89pub fn projection_theorem_ty() -> Expr {
90    prop()
91}
92/// `SupportingHyperplane : Prop`
93/// At every boundary point of a convex set there exists a supporting hyperplane.
94pub fn supporting_hyperplane_ty() -> Expr {
95    prop()
96}
97/// `JensenInequality : (List Real -> Real) -> Prop`
98/// For convex f: f(E[X]) ≤ E[f(X)].
99pub fn jensen_inequality_ty() -> Expr {
100    arrow(fn_ty(list_ty(real_ty()), real_ty()), prop())
101}
102/// `SlaterCondition : Prop`
103/// Strong duality holds for convex programs satisfying Slater's condition.
104pub fn slater_condition_ty() -> Expr {
105    prop()
106}
107/// `FenchelConjugate : (List Real -> Real) -> List Real -> Real`
108/// The Fenchel conjugate f*(y) = sup_x { <y,x> - f(x) }.
109pub fn fenchel_conjugate_ty() -> Expr {
110    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
111    arrow(rn_to_r, fn_ty(list_ty(real_ty()), real_ty()))
112}
113/// `FenchelRockafellarDuality : Prop`
114/// inf f(x) + g(Ax) = -inf f*(-A^T y) + g*(y) under qualification.
115pub fn fenchel_rockafellar_duality_ty() -> Expr {
116    prop()
117}
118/// `ConjugateSubgradient : (List Real -> Real) -> List Real -> List Real -> Prop`
119/// y ∈ ∂f*(x*) iff x* ∈ ∂f(y) (Fenchel–Young equality condition).
120pub fn conjugate_subgradient_ty() -> Expr {
121    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
122    let lr = list_ty(real_ty());
123    arrow(rn_to_r, arrow(lr.clone(), arrow(lr, prop())))
124}
125/// `LagrangianDualFunction : (List Real -> Real) -> List (List Real -> Real) -> List Real -> Real`
126/// Dual function q(λ) = inf_x L(x, λ).
127pub fn lagrangian_dual_function_ty() -> Expr {
128    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
129    let list_rn_to_r = list_ty(rn_to_r.clone());
130    arrow(
131        rn_to_r,
132        arrow(list_rn_to_r, fn_ty(list_ty(real_ty()), real_ty())),
133    )
134}
135/// `LinearIndependenceConstraintQualification : Prop`
136/// LICQ: gradients of active constraints are linearly independent at x*.
137pub fn licq_ty() -> Expr {
138    prop()
139}
140/// `MangasarianFromovitzCQ : Prop`
141/// MFCQ constraint qualification weaker than LICQ.
142pub fn mfcq_ty() -> Expr {
143    prop()
144}
145/// `ComplementarySlackness : List Real -> List Real -> Prop`
146/// λ_i g_i(x) = 0 for all i (complementary slackness conditions).
147pub fn complementary_slackness_ty() -> Expr {
148    let lr = list_ty(real_ty());
149    arrow(lr.clone(), arrow(lr, prop()))
150}
151/// `KktSufficiency : Prop`
152/// Under convexity, KKT conditions are sufficient for global optimality.
153pub fn kkt_sufficiency_ty() -> Expr {
154    prop()
155}
156/// `BarrierFunction : (List Real -> Prop) -> (List Real -> Real) -> Prop`
157/// A barrier function for a convex set: diverges at boundary.
158pub fn barrier_function_ty() -> Expr {
159    let rn_to_prop = fn_ty(list_ty(real_ty()), prop());
160    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
161    arrow(rn_to_prop, arrow(rn_to_r, prop()))
162}
163/// `PathFollowingMethod : Prop`
164/// Central path: x*(t) minimizes t·f(x) + φ(x) for barrier φ.
165pub fn path_following_method_ty() -> Expr {
166    prop()
167}
168/// `PredictorCorrectorMethod : Prop`
169/// Mehrotra predictor-corrector interior point algorithm converges in O(√n·L) steps.
170pub fn predictor_corrector_method_ty() -> Expr {
171    prop()
172}
173/// `CentralPathConvergence : Real -> Prop`
174/// The central path converges to the primal-dual optimal as t → ∞.
175pub fn central_path_convergence_ty() -> Expr {
176    arrow(real_ty(), prop())
177}
178/// `PositiveSemidefinite : List (List Real) -> Prop`
179/// Predicate that a matrix (as list of rows) is positive semidefinite.
180pub fn positive_semidefinite_ty() -> Expr {
181    let mat = list_ty(list_ty(real_ty()));
182    arrow(mat, prop())
183}
184/// `SdpFeasibility : List (List (List Real)) -> List Real -> Prop`
185/// SDP feasibility: ∑ x_i A_i ⪰ 0 for given data matrices {A_i}.
186pub fn sdp_feasibility_ty() -> Expr {
187    let mat = list_ty(list_ty(real_ty()));
188    let mats = list_ty(mat);
189    let lr = list_ty(real_ty());
190    arrow(mats, arrow(lr, prop()))
191}
192/// `SdpDuality : Prop`
193/// SDP strong duality under Slater's condition (Alaoglu–Fan theorem).
194pub fn sdp_duality_ty() -> Expr {
195    prop()
196}
197/// `SdpOptimality : List (List (List Real)) -> List Real -> List Real -> Real -> Prop`
198/// SDP optimality certificate: primal X and dual y satisfy complementarity.
199pub fn sdp_optimality_ty() -> Expr {
200    let mat = list_ty(list_ty(real_ty()));
201    let mats = list_ty(mat);
202    let lr = list_ty(real_ty());
203    arrow(mats, arrow(lr.clone(), arrow(lr, arrow(real_ty(), prop()))))
204}
205/// `SecondOrderCone : List Real -> Real -> Prop`
206/// (x, t) ∈ SOC iff ‖x‖ ≤ t.
207pub fn second_order_cone_ty() -> Expr {
208    let lr = list_ty(real_ty());
209    arrow(lr, arrow(real_ty(), prop()))
210}
211/// `SocpFeasibility : Prop`
212/// SOCP feasibility: minimize c^T x subject to second-order cone constraints.
213pub fn socp_feasibility_ty() -> Expr {
214    prop()
215}
216/// `Monomial : List Real -> List Real -> Real -> Real`
217/// Monomial c · ∏ x_i^{a_i} in geometric programming.
218pub fn monomial_ty() -> Expr {
219    let lr = list_ty(real_ty());
220    arrow(lr.clone(), arrow(lr, arrow(real_ty(), real_ty())))
221}
222/// `Posynomial : List (List Real -> Real) -> List Real -> Real`
223/// Posynomial: sum of monomials evaluated at x.
224pub fn posynomial_ty() -> Expr {
225    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
226    let lr = list_ty(real_ty());
227    arrow(list_ty(rn_to_r), arrow(lr, real_ty()))
228}
229/// `GeometricProgramDuality : Prop`
230/// GP can be converted to convex form via log transformation.
231pub fn geometric_program_duality_ty() -> Expr {
232    prop()
233}
234/// `SmoothGradientConvergence : Real -> Real -> Nat -> Prop`
235/// GD with step 1/L: f(x_k) - f* ≤ L‖x_0-x*‖²/(2k) for L-smooth f.
236pub fn smooth_gradient_convergence_ty() -> Expr {
237    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
238}
239/// `StronglyConvexConvergence : Real -> Real -> Nat -> Prop`
240/// GD converges linearly for μ-strongly convex L-smooth functions.
241pub fn strongly_convex_convergence_ty() -> Expr {
242    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
243}
244/// `ProximalOperator : (List Real -> Real) -> Real -> List Real -> List Real`
245/// prox_{t·f}(v) = argmin_x { f(x) + 1/(2t) ‖x - v‖² }.
246pub fn proximal_operator_ty() -> Expr {
247    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
248    let lr = list_ty(real_ty());
249    arrow(rn_to_r, arrow(real_ty(), fn_ty(lr.clone(), lr)))
250}
251/// `IstaConvergence : Real -> Real -> Nat -> Prop`
252/// ISTA (proximal gradient): f(x_k) - f* ≤ ‖x_0-x*‖²/(2tk).
253pub fn ista_convergence_ty() -> Expr {
254    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
255}
256/// `FistaConvergence : Real -> Nat -> Prop`
257/// FISTA (accelerated proximal gradient): O(1/k²) convergence rate.
258pub fn fista_convergence_ty() -> Expr {
259    arrow(real_ty(), arrow(nat_ty(), prop()))
260}
261/// `ProximalGradientDescent : (List Real -> Real) -> (List Real -> Real) -> Prop`
262/// Proximal gradient: minimize f(x) + g(x), f smooth, g proxable.
263pub fn proximal_gradient_descent_ty() -> Expr {
264    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
265    arrow(rn_to_r.clone(), arrow(rn_to_r, prop()))
266}
267/// `DouglasRachfordSplitting : Prop`
268/// DR splitting converges for sum of two maximal monotone operators.
269pub fn douglas_rachford_splitting_ty() -> Expr {
270    prop()
271}
272/// `ChambollePockAlgorithm : Real -> Real -> Nat -> Prop`
273/// Chambolle-Pock primal-dual algorithm with step sizes τ, σ satisfying τσ‖K‖² < 1.
274pub fn chambolle_pock_ty() -> Expr {
275    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
276}
277/// `AugmentedLagrangian : (List Real -> Real) -> List (List Real -> Real) -> Real -> List Real -> List Real -> Real`
278/// L_ρ(x, λ) = f(x) + λ^T h(x) + (ρ/2)‖h(x)‖².
279pub fn augmented_lagrangian_ty() -> Expr {
280    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
281    let list_rn_to_r = list_ty(rn_to_r.clone());
282    arrow(
283        rn_to_r,
284        arrow(
285            list_rn_to_r,
286            arrow(
287                real_ty(),
288                arrow(list_ty(real_ty()), arrow(list_ty(real_ty()), real_ty())),
289            ),
290        ),
291    )
292}
293/// `AdmmConvergence : Real -> Prop`
294/// ADMM converges to optimal for convex problems with penalty ρ > 0.
295pub fn admm_convergence_ty() -> Expr {
296    arrow(real_ty(), prop())
297}
298/// `SupportingHyperplaneCut : (List Real -> Real) -> List Real -> List Real -> Real -> Prop`
299/// At x_k: f(x) ≥ f(x_k) + g_k^T (x - x_k) for subgradient g_k.
300pub fn supporting_hyperplane_cut_ty() -> Expr {
301    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
302    let lr = list_ty(real_ty());
303    arrow(
304        rn_to_r,
305        arrow(lr.clone(), arrow(lr, arrow(real_ty(), prop()))),
306    )
307}
308/// `KelleyMethod : Prop`
309/// Kelley's cutting-plane method converges for convex Lipschitz functions.
310pub fn kelley_method_ty() -> Expr {
311    prop()
312}
313/// `BundleMethodConvergence : Real -> Prop`
314/// Bundle method for nonsmooth optimization converges with tolerance ε.
315pub fn bundle_method_convergence_ty() -> Expr {
316    arrow(real_ty(), prop())
317}
318/// `EllipsoidMethodComplexity : Real -> Real -> Nat -> Prop`
319/// Ellipsoid method finds ε-optimal solution in O(n² log(R/ε)) iterations.
320pub fn ellipsoid_method_complexity_ty() -> Expr {
321    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
322}
323/// `CenterOfGravityMethod : Nat -> Prop`
324/// Center-of-gravity method reduces feasible set volume by (1-1/n) per step.
325pub fn center_of_gravity_method_ty() -> Expr {
326    arrow(nat_ty(), prop())
327}
328/// `SubgradientInequality : (List Real -> Real) -> List Real -> List Real -> Prop`
329/// g ∈ ∂f(x): f(y) ≥ f(x) + g^T (y - x) for all y.
330pub fn subgradient_inequality_ty() -> Expr {
331    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
332    let lr = list_ty(real_ty());
333    arrow(rn_to_r, arrow(lr.clone(), arrow(lr, prop())))
334}
335/// `SubgradientMethodConvergence : Real -> Real -> Nat -> Prop`
336/// Subgradient method with step t_k = R/(G√k): best f - f* ≤ RG/√k.
337pub fn subgradient_method_convergence_ty() -> Expr {
338    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
339}
340/// `PolyakStepsize : Real -> Real -> Prop`
341/// Polyak step size t_k = (f(x_k) - f*)/(‖g_k‖²) gives optimal convergence.
342pub fn polyak_stepsize_ty() -> Expr {
343    arrow(real_ty(), arrow(real_ty(), prop()))
344}
345/// `StochasticGradientDescent : Real -> Nat -> Prop`
346/// SGD with step η: E[f(x_k)] - f* = O(σ/(μ√k)) for strongly convex f.
347pub fn sgd_convergence_ty() -> Expr {
348    arrow(real_ty(), arrow(nat_ty(), prop()))
349}
350/// `SvrgConvergence : Real -> Real -> Nat -> Prop`
351/// SVRG: variance-reduced SGD with geometric convergence rate (1 - μη/2)^k.
352pub fn svrg_convergence_ty() -> Expr {
353    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
354}
355/// `SarahConvergence : Real -> Real -> Nat -> Prop`
356/// SARAH: stochastic recursive gradient with near-optimal convergence.
357pub fn sarah_convergence_ty() -> Expr {
358    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
359}
360/// `SpiderConvergence : Real -> Real -> Nat -> Prop`
361/// SPIDER: stochastic path-integrated differential estimator convergence.
362pub fn spider_convergence_ty() -> Expr {
363    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
364}
365/// `DcpAtomConvex : (List Real -> Real) -> Prop`
366/// An atom is declared convex in the DCP ruleset.
367pub fn dcp_atom_convex_ty() -> Expr {
368    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
369    arrow(rn_to_r, prop())
370}
371/// `DcpCompositionRule : Prop`
372/// DCP composition rules: convex nondecreasing ∘ convex = convex, etc.
373pub fn dcp_composition_rule_ty() -> Expr {
374    prop()
375}
376/// `DcpVerification : (List Real -> Real) -> Prop`
377/// An expression satisfies DCP rules and is thus provably convex.
378pub fn dcp_verification_ty() -> Expr {
379    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
380    arrow(rn_to_r, prop())
381}
382/// `SelfConcordantBarrier : (List Real -> Real) -> Real -> Prop`
383/// A function phi is nu-self-concordant: |D^3 phi(x)[h,h,h]| <= 2(D^2 phi(x)[h,h])^{3/2}.
384pub fn self_concordant_barrier_ty() -> Expr {
385    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
386    arrow(rn_to_r, arrow(real_ty(), prop()))
387}
388/// `SelfConcordantComplexity : Real -> Nat -> Prop`
389/// IPM with nu-self-concordant barrier terminates in O(sqrt(nu) * log(1/eps)) Newton steps.
390pub fn self_concordant_complexity_ty() -> Expr {
391    arrow(real_ty(), arrow(nat_ty(), prop()))
392}
393/// `LogarithmicBarrier : Nat -> (List Real -> Real) -> Prop`
394/// The logarithmic barrier -sum log(-g_i(x)) is self-concordant with parameter m.
395pub fn logarithmic_barrier_ty() -> Expr {
396    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
397    arrow(nat_ty(), arrow(rn_to_r, prop()))
398}
399/// `NewtonDecrement : (List Real -> Real) -> List Real -> Real`
400/// lambda(f,x)^2 = grad f(x)^T [Hess f(x)]^{-1} grad f(x), the Newton decrement squared.
401pub fn newton_decrement_ty() -> Expr {
402    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
403    let lr = list_ty(real_ty());
404    arrow(rn_to_r, arrow(lr, real_ty()))
405}
406/// `SdpSlaterCondition : Prop`
407/// SDP Slater: there exists strictly feasible X > 0 satisfying primal constraints.
408pub fn sdp_slater_condition_ty() -> Expr {
409    prop()
410}
411/// `SdpComplementarity : List (List Real) -> List (List Real) -> Prop`
412/// SDP complementarity: X >= 0, S >= 0, and XS = 0 at optimality.
413pub fn sdp_complementarity_ty() -> Expr {
414    let mat = list_ty(list_ty(real_ty()));
415    arrow(mat.clone(), arrow(mat, prop()))
416}
417/// `SdpDualityGap : List (List Real) -> List Real -> Real`
418/// Duality gap = tr(C X) - b^T y for primal X, dual y.
419pub fn sdp_duality_gap_ty() -> Expr {
420    let mat = list_ty(list_ty(real_ty()));
421    let lr = list_ty(real_ty());
422    arrow(mat, arrow(lr, real_ty()))
423}
424/// `LorentzCone : Nat -> Prop`
425/// The Lorentz (ice cream) cone L^n = {(x,t) : ||x|| <= t} in R^{n+1}.
426pub fn lorentz_cone_ty() -> Expr {
427    arrow(nat_ty(), prop())
428}
429/// `SocpDuality : Prop`
430/// SOCP strong duality: primal optimal = dual optimal under Slater's condition.
431pub fn socp_duality_ty() -> Expr {
432    prop()
433}
434/// `RotatedLorentzCone : List Real -> Real -> Real -> Prop`
435/// Rotated Lorentz cone: (x,y,z) with ||x||^2 <= 2yz, y,z >= 0.
436pub fn rotated_lorentz_cone_ty() -> Expr {
437    let lr = list_ty(real_ty());
438    arrow(lr, arrow(real_ty(), arrow(real_ty(), prop())))
439}
440/// `AdmmLinearConvergence : Real -> Real -> Nat -> Prop`
441/// ADMM with penalty rho converges linearly: ||x^k - x*|| <= C * r^k.
442pub fn admm_linear_convergence_ty() -> Expr {
443    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
444}
445/// `AdmmPrimalResidual : List Real -> List Real -> Real -> Prop`
446/// Primal residual ||Ax + Bz - c|| <= eps_pri at iteration k.
447pub fn admm_primal_residual_ty() -> Expr {
448    let lr = list_ty(real_ty());
449    arrow(lr.clone(), arrow(lr, arrow(real_ty(), prop())))
450}
451/// `AdmmDualResidual : List Real -> Real -> Prop`
452/// Dual residual rho * ||B^T(z^k - z^{k-1})|| <= eps_dual.
453pub fn admm_dual_residual_ty() -> Expr {
454    let lr = list_ty(real_ty());
455    arrow(lr, arrow(real_ty(), prop()))
456}
457/// `ProximalPointAlgorithm : (List Real -> Real) -> Real -> Nat -> Prop`
458/// PPA: x^{k+1} = prox_{t f}(x^k) converges to argmin f.
459pub fn proximal_point_algorithm_ty() -> Expr {
460    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
461    arrow(rn_to_r, arrow(real_ty(), arrow(nat_ty(), prop())))
462}
463/// `ResolventOperator : (List Real -> List Real -> Prop) -> Real -> List Real -> List Real`
464/// Resolvent J_{t A}(x) = (I + tA)^{-1}(x) of a monotone operator A.
465pub fn resolvent_operator_ty() -> Expr {
466    let rn_rn_prop = fn_ty(list_ty(real_ty()), fn_ty(list_ty(real_ty()), prop()));
467    let lr = list_ty(real_ty());
468    arrow(rn_rn_prop, arrow(real_ty(), fn_ty(lr.clone(), lr)))
469}
470/// `FirmlyNonexpansive : (List Real -> List Real) -> Prop`
471/// T is firmly nonexpansive: ||Tx - Ty||^2 <= <Tx - Ty, x - y>.
472pub fn firmly_nonexpansive_ty() -> Expr {
473    let lr = list_ty(real_ty());
474    let rn_to_rn = fn_ty(lr.clone(), lr);
475    arrow(rn_to_rn, prop())
476}
477/// `BregmanDivergence : (List Real -> Real) -> List Real -> List Real -> Real`
478/// D_h(x,y) = h(x) - h(y) - <grad h(y), x-y> for strictly convex h.
479pub fn bregman_divergence_ty() -> Expr {
480    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
481    let lr = list_ty(real_ty());
482    arrow(rn_to_r, arrow(lr.clone(), arrow(lr, real_ty())))
483}
484/// `MirrorDescentConvergence : Real -> Real -> Nat -> Prop`
485/// Mirror descent: sum_{t=1}^{T} f(x_t) - f(x*) <= D_h(x*,x_1)/eta + eta*sum||grad f||^2_*.
486pub fn mirror_descent_convergence_ty() -> Expr {
487    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
488}
489/// `NegativeEntropyFunction : List Real -> Real`
490/// Negative entropy: h(x) = sum x_i log(x_i), used as Bregman generating function.
491pub fn negative_entropy_function_ty() -> Expr {
492    let lr = list_ty(real_ty());
493    fn_ty(lr, real_ty())
494}
495/// `ExponentialWeightsAlgorithm : Real -> Nat -> Prop`
496/// EWA/Hedge: mirror descent with negative entropy achieves O(sqrt(T log n)) regret.
497pub fn exponential_weights_algorithm_ty() -> Expr {
498    arrow(real_ty(), arrow(nat_ty(), prop()))
499}
500/// `SagaConvergence : Real -> Real -> Nat -> Prop`
501/// SAGA: variance-reduced SGD with O(1/k) convergence for non-strongly-convex.
502pub fn saga_convergence_ty() -> Expr {
503    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
504}
505/// `AdamConvergence : Real -> Real -> Real -> Nat -> Prop`
506/// Adam optimizer: adaptive moment estimation with O(1/sqrt(T)) regret bound.
507pub fn adam_convergence_ty() -> Expr {
508    arrow(
509        real_ty(),
510        arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop()))),
511    )
512}
513/// `MomentumSgd : Real -> Real -> Nat -> Prop`
514/// SGD with momentum (heavy ball): beta-momentum improves convergence constant.
515pub fn momentum_sgd_ty() -> Expr {
516    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
517}
518/// `MaximalMonotoneOperator : (List Real -> List (List Real)) -> Prop`
519/// A set-valued operator A: R^n -> 2^{R^n} is maximal monotone.
520pub fn maximal_monotone_operator_ty() -> Expr {
521    let lr = list_ty(real_ty());
522    let set_valued = fn_ty(lr.clone(), list_ty(lr));
523    arrow(set_valued, prop())
524}
525/// `MonotoneInclusionProblem : (List Real -> List (List Real)) -> List Real -> Prop`
526/// Find x* such that 0 in A(x*) for maximal monotone operator A.
527pub fn monotone_inclusion_problem_ty() -> Expr {
528    let lr = list_ty(real_ty());
529    let set_valued = fn_ty(lr.clone(), list_ty(lr.clone()));
530    arrow(set_valued, arrow(lr, prop()))
531}
532/// `SplittingConvergence : Prop`
533/// Operator splitting methods (DR, FBS, etc.) converge for sum of monotone operators.
534pub fn splitting_convergence_ty() -> Expr {
535    prop()
536}
537/// `KantorovichProblem : (List Real -> List Real -> Real) -> List Real -> List Real -> Real`
538/// Kantorovich: min_{gamma in Pi(mu,nu)} integral c(x,y) d_gamma(x,y) over transport plans.
539pub fn kantorovich_problem_ty() -> Expr {
540    let lr = list_ty(real_ty());
541    let cost = fn_ty(lr.clone(), fn_ty(lr.clone(), real_ty()));
542    arrow(cost, arrow(lr.clone(), arrow(lr, real_ty())))
543}
544/// `KantorovichDuality : Prop`
545/// min_{gamma} integral c d_gamma = max_{phi,psi: phi(x)+psi(y)<=c(x,y)} integral phi d_mu + integral psi d_nu.
546pub fn kantorovich_duality_ty() -> Expr {
547    prop()
548}
549/// `WassersteinDistance : Real -> List Real -> List Real -> Real`
550/// W_p(mu,nu) = (min_{gamma in Pi(mu,nu)} integral ||x-y||^p d_gamma)^{1/p}.
551pub fn wasserstein_distance_ty() -> Expr {
552    let lr = list_ty(real_ty());
553    arrow(real_ty(), arrow(lr.clone(), arrow(lr, real_ty())))
554}
555/// `SinkhornAlgorithm : Real -> Nat -> Prop`
556/// Sinkhorn: entropic regularization gives O(n^2/eps^2) algorithm for OT.
557pub fn sinkhorn_algorithm_ty() -> Expr {
558    arrow(real_ty(), arrow(nat_ty(), prop()))
559}
560/// `RestrictedIsometryProperty : Nat -> Real -> List (List Real) -> Prop`
561/// RIP-s: (1-delta)||x||^2 <= ||Ax||^2 <= (1+delta)||x||^2 for all s-sparse x.
562pub fn restricted_isometry_property_ty() -> Expr {
563    let mat = list_ty(list_ty(real_ty()));
564    arrow(nat_ty(), arrow(real_ty(), arrow(mat, prop())))
565}
566/// `BasisPursuitRecovery : List (List Real) -> List Real -> Nat -> Prop`
567/// If A satisfies RIP-{2s}, basis pursuit recovers s-sparse x from Ax = b.
568pub fn basis_pursuit_recovery_ty() -> Expr {
569    let mat = list_ty(list_ty(real_ty()));
570    let lr = list_ty(real_ty());
571    arrow(mat, arrow(lr, arrow(nat_ty(), prop())))
572}
573/// `LassoSparsity : Real -> Nat -> Prop`
574/// LASSO with appropriate lambda recovers s-sparse signal with O(s log p / n) samples.
575pub fn lasso_sparsity_ty() -> Expr {
576    arrow(real_ty(), arrow(nat_ty(), prop()))
577}
578/// `NuclearNorm : List (List Real) -> Real`
579/// ||X||_* = sum sigma_i(X), the nuclear norm (sum of singular values).
580pub fn nuclear_norm_ty() -> Expr {
581    let mat = list_ty(list_ty(real_ty()));
582    fn_ty(mat, real_ty())
583}
584/// `MatrixCompletionTheorem : Nat -> Nat -> Nat -> Real -> Prop`
585/// If rank-r matrix M satisfies incoherence, nuclear norm minimization recovers M
586/// from O(r n log n) entries.
587pub fn matrix_completion_theorem_ty() -> Expr {
588    arrow(
589        nat_ty(),
590        arrow(nat_ty(), arrow(nat_ty(), arrow(real_ty(), prop()))),
591    )
592}
593/// `RobustPca : List (List Real) -> List (List Real) -> List (List Real) -> Prop`
594/// Robust PCA: decompose M = L + S, L low-rank, S sparse, via PCP convex program.
595pub fn robust_pca_ty() -> Expr {
596    let mat = list_ty(list_ty(real_ty()));
597    arrow(mat.clone(), arrow(mat.clone(), arrow(mat, prop())))
598}
599/// `ChanceConstraint : (List Real -> Prop) -> Real -> Prop`
600/// P(g(x) <= 0) >= 1 - eps: probabilistic constraint with confidence 1-eps.
601pub fn chance_constraint_ty() -> Expr {
602    let rn_prop = fn_ty(list_ty(real_ty()), prop());
603    arrow(rn_prop, arrow(real_ty(), prop()))
604}
605/// `DistributionallyRobustObjective : (List Real -> Real) -> Real -> Prop`
606/// DRO: min_x max_{P in U} E_P[f(x,xi)] over ambiguity set U.
607pub fn distributionally_robust_objective_ty() -> Expr {
608    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
609    arrow(rn_to_r, arrow(real_ty(), prop()))
610}
611/// `WassersteinAmbiguitySet : List Real -> Real -> Prop`
612/// Wasserstein ball of radius rho around empirical distribution P_N.
613pub fn wasserstein_ambiguity_set_ty() -> Expr {
614    let lr = list_ty(real_ty());
615    arrow(lr, arrow(real_ty(), prop()))
616}
617/// `CvarConstraint : (List Real -> Real) -> Real -> Real -> Prop`
618/// Conditional value-at-risk CVaR_alpha(f(x,xi)) <= t is a convex constraint in x, t.
619pub fn cvar_constraint_ty() -> Expr {
620    let rn_to_r = fn_ty(list_ty(real_ty()), real_ty());
621    arrow(rn_to_r, arrow(real_ty(), arrow(real_ty(), prop())))
622}
623/// `OnlineConvexOptimization : Nat -> Real -> Prop`
624/// OCO regret bound: sum_{t=1}^T f_t(x_t) - min_x sum f_t(x) <= R_T.
625pub fn online_convex_optimization_ty() -> Expr {
626    arrow(nat_ty(), arrow(real_ty(), prop()))
627}
628/// `FtrlRegretBound : Real -> Nat -> Prop`
629/// FTRL achieves O(sqrt(T)) regret for convex losses with regularizer.
630pub fn ftrl_regret_bound_ty() -> Expr {
631    arrow(real_ty(), arrow(nat_ty(), prop()))
632}
633/// `AdaptiveRegretBound : Real -> Real -> Nat -> Prop`
634/// Adaptive online algorithms achieve tighter regret via local norms.
635pub fn adaptive_regret_bound_ty() -> Expr {
636    arrow(real_ty(), arrow(real_ty(), arrow(nat_ty(), prop())))
637}
638/// `OnlineGradientDescentRegret : Real -> Nat -> Prop`
639/// OGD regret: O(G*R*sqrt(T)) where G is gradient bound, R is domain diameter.
640pub fn online_gradient_descent_regret_ty() -> Expr {
641    arrow(real_ty(), arrow(nat_ty(), prop()))
642}
643/// Build an [`Environment`] containing convex optimization axioms and theorems.
644pub fn build_convex_optimization_env() -> Environment {
645    let mut env = Environment::new();
646    let axioms: &[(&str, Expr)] = &[
647        ("ConvexSet", convex_set_ty()),
648        ("ConvexFunction", convex_function_ty()),
649        ("KktConditions", kkt_conditions_ty()),
650        ("Lagrangian", lagrangian_ty()),
651        ("StrongDuality", strong_duality_ty()),
652        ("projection_theorem", projection_theorem_ty()),
653        ("supporting_hyperplane", supporting_hyperplane_ty()),
654        ("jensen_inequality", jensen_inequality_ty()),
655        ("slater_condition", slater_condition_ty()),
656        ("ConvexProgram", prop()),
657        ("DualProgram", prop()),
658        ("OptimalityGap", arrow(real_ty(), prop())),
659        ("FenchelConjugate", fenchel_conjugate_ty()),
660        (
661            "FenchelRockafellarDuality",
662            fenchel_rockafellar_duality_ty(),
663        ),
664        ("ConjugateSubgradient", conjugate_subgradient_ty()),
665        ("LagrangianDualFunction", lagrangian_dual_function_ty()),
666        ("LinearIndependenceCQ", licq_ty()),
667        ("MangasarianFromovitzCQ", mfcq_ty()),
668        ("ComplementarySlackness", complementary_slackness_ty()),
669        ("KktSufficiency", kkt_sufficiency_ty()),
670        ("BarrierFunction", barrier_function_ty()),
671        ("PathFollowingMethod", path_following_method_ty()),
672        ("PredictorCorrectorMethod", predictor_corrector_method_ty()),
673        ("CentralPathConvergence", central_path_convergence_ty()),
674        ("PositiveSemidefinite", positive_semidefinite_ty()),
675        ("SdpFeasibility", sdp_feasibility_ty()),
676        ("SdpDuality", sdp_duality_ty()),
677        ("SdpOptimality", sdp_optimality_ty()),
678        ("SecondOrderCone", second_order_cone_ty()),
679        ("SocpFeasibility", socp_feasibility_ty()),
680        ("Monomial", monomial_ty()),
681        ("Posynomial", posynomial_ty()),
682        ("GeometricProgramDuality", geometric_program_duality_ty()),
683        (
684            "SmoothGradientConvergence",
685            smooth_gradient_convergence_ty(),
686        ),
687        (
688            "StronglyConvexConvergence",
689            strongly_convex_convergence_ty(),
690        ),
691        ("ProximalOperator", proximal_operator_ty()),
692        ("IstaConvergence", ista_convergence_ty()),
693        ("FistaConvergence", fista_convergence_ty()),
694        ("ProximalGradientDescent", proximal_gradient_descent_ty()),
695        ("DouglasRachfordSplitting", douglas_rachford_splitting_ty()),
696        ("ChambollePockAlgorithm", chambolle_pock_ty()),
697        ("AugmentedLagrangian", augmented_lagrangian_ty()),
698        ("AdmmConvergence", admm_convergence_ty()),
699        ("SupportingHyperplaneCut", supporting_hyperplane_cut_ty()),
700        ("KelleyMethod", kelley_method_ty()),
701        ("BundleMethodConvergence", bundle_method_convergence_ty()),
702        (
703            "EllipsoidMethodComplexity",
704            ellipsoid_method_complexity_ty(),
705        ),
706        ("CenterOfGravityMethod", center_of_gravity_method_ty()),
707        ("SubgradientInequality", subgradient_inequality_ty()),
708        (
709            "SubgradientMethodConvergence",
710            subgradient_method_convergence_ty(),
711        ),
712        ("PolyakStepsize", polyak_stepsize_ty()),
713        ("SgdConvergence", sgd_convergence_ty()),
714        ("SvrgConvergence", svrg_convergence_ty()),
715        ("SarahConvergence", sarah_convergence_ty()),
716        ("SpiderConvergence", spider_convergence_ty()),
717        ("DcpAtomConvex", dcp_atom_convex_ty()),
718        ("DcpCompositionRule", dcp_composition_rule_ty()),
719        ("DcpVerification", dcp_verification_ty()),
720        ("SelfConcordantBarrier", self_concordant_barrier_ty()),
721        ("SelfConcordantComplexity", self_concordant_complexity_ty()),
722        ("LogarithmicBarrier", logarithmic_barrier_ty()),
723        ("NewtonDecrement", newton_decrement_ty()),
724        ("SdpSlaterCondition", sdp_slater_condition_ty()),
725        ("SdpComplementarity", sdp_complementarity_ty()),
726        ("SdpDualityGap", sdp_duality_gap_ty()),
727        ("LorentzCone", lorentz_cone_ty()),
728        ("SocpDuality", socp_duality_ty()),
729        ("RotatedLorentzCone", rotated_lorentz_cone_ty()),
730        ("AdmmLinearConvergence", admm_linear_convergence_ty()),
731        ("AdmmPrimalResidual", admm_primal_residual_ty()),
732        ("AdmmDualResidual", admm_dual_residual_ty()),
733        ("ProximalPointAlgorithm", proximal_point_algorithm_ty()),
734        ("ResolventOperator", resolvent_operator_ty()),
735        ("FirmlyNonexpansive", firmly_nonexpansive_ty()),
736        ("BregmanDivergence", bregman_divergence_ty()),
737        ("MirrorDescentConvergence", mirror_descent_convergence_ty()),
738        ("NegativeEntropyFunction", negative_entropy_function_ty()),
739        (
740            "ExponentialWeightsAlgorithm",
741            exponential_weights_algorithm_ty(),
742        ),
743        ("SagaConvergence", saga_convergence_ty()),
744        ("AdamConvergence", adam_convergence_ty()),
745        ("MomentumSgd", momentum_sgd_ty()),
746        ("MaximalMonotoneOperator", maximal_monotone_operator_ty()),
747        ("MonotoneInclusionProblem", monotone_inclusion_problem_ty()),
748        ("SplittingConvergence", splitting_convergence_ty()),
749        ("KantorovichProblem", kantorovich_problem_ty()),
750        ("KantorovichDuality", kantorovich_duality_ty()),
751        ("WassersteinDistance", wasserstein_distance_ty()),
752        ("SinkhornAlgorithm", sinkhorn_algorithm_ty()),
753        (
754            "RestrictedIsometryProperty",
755            restricted_isometry_property_ty(),
756        ),
757        ("BasisPursuitRecovery", basis_pursuit_recovery_ty()),
758        ("LassoSparsity", lasso_sparsity_ty()),
759        ("NuclearNorm", nuclear_norm_ty()),
760        ("MatrixCompletionTheorem", matrix_completion_theorem_ty()),
761        ("RobustPca", robust_pca_ty()),
762        ("ChanceConstraint", chance_constraint_ty()),
763        (
764            "DistributionallyRobustObjective",
765            distributionally_robust_objective_ty(),
766        ),
767        ("WassersteinAmbiguitySet", wasserstein_ambiguity_set_ty()),
768        ("CvarConstraint", cvar_constraint_ty()),
769        ("OnlineConvexOptimization", online_convex_optimization_ty()),
770        ("FtrlRegretBound", ftrl_regret_bound_ty()),
771        ("AdaptiveRegretBound", adaptive_regret_bound_ty()),
772        (
773            "OnlineGradientDescentRegret",
774            online_gradient_descent_regret_ty(),
775        ),
776    ];
777    for (name, ty) in axioms {
778        env.add(Declaration::Axiom {
779            name: Name::str(*name),
780            univ_params: vec![],
781            ty: ty.clone(),
782        })
783        .ok();
784    }
785    env
786}
787/// Trait for convex functions over ℝ^n.
788pub trait ConvexFunction {
789    /// Evaluate f(x).
790    fn eval(&self, x: &[f64]) -> f64;
791    /// Compute the gradient ∇f(x).
792    fn gradient(&self, x: &[f64]) -> Vec<f64>;
793    /// Return `true` if f is strongly convex.
794    fn is_strongly_convex(&self) -> bool;
795}
796/// Trait for functions with a computable proximal operator.
797pub trait ProxableFunction: ConvexFunction {
798    /// prox_{t·self}(v) = argmin_x { self(x) + 1/(2t) ‖x-v‖² }.
799    fn prox(&self, v: &[f64], t: f64) -> Vec<f64>;
800}
801#[cfg(test)]
802mod tests {
803    use super::*;
804    #[test]
805    fn test_quadratic_eval_origin() {
806        let q = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
807        let f = QuadraticFunction::new(q, vec![0.0, 0.0], 0.0);
808        assert!((f.eval(&[0.0, 0.0])).abs() < 1e-12);
809    }
810    #[test]
811    fn test_quadratic_eval_nonzero() {
812        let q = vec![vec![2.0]];
813        let f = QuadraticFunction::new(q, vec![0.0], 0.0);
814        assert!((f.eval(&[2.0]) - 4.0).abs() < 1e-9);
815    }
816    #[test]
817    fn test_quadratic_gradient() {
818        let q = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
819        let f = QuadraticFunction::new(q, vec![0.0, 0.0], 0.0);
820        let grad = f.gradient(&[3.0, -1.0]);
821        assert!((grad[0] - 3.0).abs() < 1e-9);
822        assert!((grad[1] + 1.0).abs() < 1e-9);
823    }
824    #[test]
825    fn test_strongly_convex() {
826        let q = vec![vec![2.0, 0.0], vec![0.0, 3.0]];
827        let f = QuadraticFunction::new(q, vec![0.0, 0.0], 0.0);
828        assert!(f.is_strongly_convex());
829    }
830    #[test]
831    fn test_gradient_descent_minimizes_quadratic() {
832        let q = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
833        let f = QuadraticFunction::new(q, vec![0.0, 0.0], 0.0);
834        let gd = GradientDescent::new(0.1, 500, 1e-6);
835        let (x, fval, _iters) = gd.minimize(&f, &[5.0, -3.0]);
836        assert!(fval < 1e-6, "fval={fval}");
837        assert!(x[0].abs() < 1e-3);
838        assert!(x[1].abs() < 1e-3);
839    }
840    #[test]
841    fn test_projected_gradient_box_constraint() {
842        let q = vec![vec![1.0]];
843        let f = QuadraticFunction::new(q, vec![0.0], 0.0);
844        let pg = ProjectedGradient::new(0.1, 300, 1e-6, vec![1.0], vec![5.0]);
845        let (x, fval) = pg.minimize(&f, &[4.0]);
846        assert!((x[0] - 1.0).abs() < 1e-3, "x={}", x[0]);
847        assert!((fval - 0.5).abs() < 1e-3, "fval={fval}");
848    }
849    #[test]
850    fn test_admm_solve_lasso_stub() {
851        let a = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
852        let b = vec![1.0, 2.0];
853        let admm = ADMM::new(1.0);
854        let x = admm.solve_lasso(&a, &b, 0.1);
855        assert_eq!(x.len(), 2);
856        assert_eq!(x, vec![0.0, 0.0]);
857    }
858    #[test]
859    fn test_build_convex_optimization_env() {
860        let env = build_convex_optimization_env();
861        assert!(env.get(&Name::str("ConvexSet")).is_some());
862        assert!(env.get(&Name::str("ConvexFunction")).is_some());
863        assert!(env.get(&Name::str("KktConditions")).is_some());
864        assert!(env.get(&Name::str("projection_theorem")).is_some());
865        assert!(env.get(&Name::str("jensen_inequality")).is_some());
866        assert!(env.get(&Name::str("FenchelConjugate")).is_some());
867        assert!(env.get(&Name::str("FenchelRockafellarDuality")).is_some());
868        assert!(env.get(&Name::str("PositiveSemidefinite")).is_some());
869        assert!(env.get(&Name::str("SdpDuality")).is_some());
870        assert!(env.get(&Name::str("FistaConvergence")).is_some());
871        assert!(env.get(&Name::str("ProximalOperator")).is_some());
872        assert!(env.get(&Name::str("DouglasRachfordSplitting")).is_some());
873        assert!(env.get(&Name::str("ChambollePockAlgorithm")).is_some());
874        assert!(env.get(&Name::str("EllipsoidMethodComplexity")).is_some());
875        assert!(env.get(&Name::str("SvrgConvergence")).is_some());
876        assert!(env.get(&Name::str("SpiderConvergence")).is_some());
877        assert!(env.get(&Name::str("DcpVerification")).is_some());
878    }
879    #[test]
880    fn test_l1_norm_prox_soft_threshold() {
881        let g = L1NormFunction::new(1.0);
882        let result = g.prox(&[3.0, -0.5], 1.0);
883        assert!((result[0] - 2.0).abs() < 1e-12, "result[0]={}", result[0]);
884        assert!(result[1].abs() < 1e-12, "result[1]={}", result[1]);
885    }
886    #[test]
887    fn test_fista_minimizes_smooth_quadratic() {
888        let smooth = QuadraticFunction::new(vec![vec![1.0]], vec![0.0], 0.0);
889        let reg = L1NormFunction::new(0.0);
890        let solver = FISTASolver::new(1.0, 200, 1e-6);
891        let (x, _iters) = solver.minimize(&smooth, &reg, &[5.0]);
892        assert!(x[0].abs() < 1e-3, "x[0]={}", x[0]);
893    }
894    #[test]
895    fn test_sdp_relaxation_psd_check() {
896        let id = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
897        assert!(SDPRelaxation::is_psd(&id));
898        let neg = vec![vec![-1.0, 0.0], vec![0.0, 1.0]];
899        assert!(!SDPRelaxation::is_psd(&neg));
900    }
901    #[test]
902    fn test_sdp_relaxation_solve_returns_bound() {
903        let q = vec![vec![2.0, 0.0], vec![0.0, 2.0]];
904        let c = vec![0.0, 0.0];
905        let a = vec![vec![1.0, 0.0]];
906        let b = vec![1.0];
907        let sdp = SDPRelaxation::new(q, c, a, b);
908        let bound = sdp.solve();
909        assert!((bound - 0.0).abs() < 1e-12);
910    }
911    #[test]
912    fn test_geometric_program_log_sum_exp() {
913        let monomials = vec![(0.0_f64, vec![1.0_f64])];
914        let lse = GeometricProgramSolver::log_sum_exp_posynomial(&monomials, &[1.0]);
915        assert!((lse - 1.0).abs() < 1e-9, "lse={lse}");
916    }
917    #[test]
918    fn test_cutting_plane_minimizes_quadratic() {
919        let f = QuadraticFunction::new(vec![vec![2.0]], vec![0.0], 0.0);
920        let solver = CuttingPlaneSolver::new(100, 1e-4, 2.0);
921        let (x, fval, _iters) = solver.minimize(&f, &[3.0]);
922        assert!(fval < 1.0, "fval={fval}");
923        let _ = x;
924    }
925    #[test]
926    fn test_bundle_method_minimizes_quadratic() {
927        let f = QuadraticFunction::new(vec![vec![2.0]], vec![0.0], 0.0);
928        let solver = BundleMethodSolver::new(1.0, 0.5, 20, 200, 1e-5);
929        let (x, fval, _iters) = solver.minimize(&f, &[4.0]);
930        assert!(fval < 1.0, "fval={fval}");
931        let _ = x;
932    }
933    #[test]
934    fn test_new_axioms_in_env() {
935        let env = build_convex_optimization_env();
936        assert!(env.get(&Name::str("SelfConcordantBarrier")).is_some());
937        assert!(env.get(&Name::str("SelfConcordantComplexity")).is_some());
938        assert!(env.get(&Name::str("LogarithmicBarrier")).is_some());
939        assert!(env.get(&Name::str("NewtonDecrement")).is_some());
940        assert!(env.get(&Name::str("SdpSlaterCondition")).is_some());
941        assert!(env.get(&Name::str("SdpComplementarity")).is_some());
942        assert!(env.get(&Name::str("SdpDualityGap")).is_some());
943        assert!(env.get(&Name::str("LorentzCone")).is_some());
944        assert!(env.get(&Name::str("SocpDuality")).is_some());
945        assert!(env.get(&Name::str("RotatedLorentzCone")).is_some());
946        assert!(env.get(&Name::str("AdmmLinearConvergence")).is_some());
947        assert!(env.get(&Name::str("AdmmPrimalResidual")).is_some());
948        assert!(env.get(&Name::str("AdmmDualResidual")).is_some());
949        assert!(env.get(&Name::str("ProximalPointAlgorithm")).is_some());
950        assert!(env.get(&Name::str("ResolventOperator")).is_some());
951        assert!(env.get(&Name::str("FirmlyNonexpansive")).is_some());
952        assert!(env.get(&Name::str("BregmanDivergence")).is_some());
953        assert!(env.get(&Name::str("MirrorDescentConvergence")).is_some());
954        assert!(env.get(&Name::str("NegativeEntropyFunction")).is_some());
955        assert!(env.get(&Name::str("ExponentialWeightsAlgorithm")).is_some());
956        assert!(env.get(&Name::str("SagaConvergence")).is_some());
957        assert!(env.get(&Name::str("AdamConvergence")).is_some());
958        assert!(env.get(&Name::str("MomentumSgd")).is_some());
959        assert!(env.get(&Name::str("MaximalMonotoneOperator")).is_some());
960        assert!(env.get(&Name::str("MonotoneInclusionProblem")).is_some());
961        assert!(env.get(&Name::str("SplittingConvergence")).is_some());
962        assert!(env.get(&Name::str("KantorovichProblem")).is_some());
963        assert!(env.get(&Name::str("KantorovichDuality")).is_some());
964        assert!(env.get(&Name::str("WassersteinDistance")).is_some());
965        assert!(env.get(&Name::str("SinkhornAlgorithm")).is_some());
966        assert!(env.get(&Name::str("RestrictedIsometryProperty")).is_some());
967        assert!(env.get(&Name::str("BasisPursuitRecovery")).is_some());
968        assert!(env.get(&Name::str("LassoSparsity")).is_some());
969        assert!(env.get(&Name::str("NuclearNorm")).is_some());
970        assert!(env.get(&Name::str("MatrixCompletionTheorem")).is_some());
971        assert!(env.get(&Name::str("RobustPca")).is_some());
972        assert!(env.get(&Name::str("ChanceConstraint")).is_some());
973        assert!(env
974            .get(&Name::str("DistributionallyRobustObjective"))
975            .is_some());
976        assert!(env.get(&Name::str("WassersteinAmbiguitySet")).is_some());
977        assert!(env.get(&Name::str("CvarConstraint")).is_some());
978        assert!(env.get(&Name::str("OnlineConvexOptimization")).is_some());
979        assert!(env.get(&Name::str("FtrlRegretBound")).is_some());
980        assert!(env.get(&Name::str("AdaptiveRegretBound")).is_some());
981        assert!(env.get(&Name::str("OnlineGradientDescentRegret")).is_some());
982    }
983    #[test]
984    fn test_mirror_descent_project_simplex() {
985        let v = vec![0.5, 0.5];
986        let p = MirrorDescentSolver::project_simplex(&v);
987        assert!((p.iter().sum::<f64>() - 1.0).abs() < 1e-9);
988        assert!(p.iter().all(|&x| x >= 0.0));
989    }
990    #[test]
991    fn test_mirror_descent_simplex_sum_to_one() {
992        let v = vec![3.0, -1.0, 2.0];
993        let p = MirrorDescentSolver::project_simplex(&v);
994        assert!((p.iter().sum::<f64>() - 1.0).abs() < 1e-9);
995        assert!(p.iter().all(|&x| x >= 0.0));
996    }
997    #[test]
998    fn test_mirror_descent_bregman_kl_zero() {
999        let x = vec![0.25, 0.25, 0.5];
1000        let kl = MirrorDescentSolver::bregman_kl(&x, &x);
1001        assert!(kl.abs() < 1e-10, "kl={kl}");
1002    }
1003    #[test]
1004    fn test_mirror_descent_minimizes_quadratic() {
1005        let f = QuadraticFunction::new(vec![vec![1.0]], vec![0.0], 0.0);
1006        let solver = MirrorDescentSolver::new(0.05, 500, 1e-6, false);
1007        let (x, fval, _iters) = solver.minimize(&f, &[3.0]);
1008        assert!(fval < 0.1, "fval={fval}");
1009        let _ = x;
1010    }
1011    #[test]
1012    fn test_proximal_gradient_ista_minimizes() {
1013        let smooth = QuadraticFunction::new(vec![vec![2.0]], vec![0.0], 0.0);
1014        let reg = L1NormFunction::new(0.0);
1015        let solver = ProximalGradientSolver::new(2.0, 300, 1e-7, false);
1016        let (x, iters) = solver.minimize(&smooth, &reg, &[5.0]);
1017        assert!(x[0].abs() < 0.01, "x[0]={}", x[0]);
1018        let _ = iters;
1019    }
1020    #[test]
1021    fn test_proximal_gradient_fista_accelerated() {
1022        let smooth = QuadraticFunction::new(vec![vec![2.0]], vec![0.0], 0.0);
1023        let reg = L1NormFunction::new(0.0);
1024        let solver_fista = ProximalGradientSolver::new(2.0, 200, 1e-7, true);
1025        let solver_ista = ProximalGradientSolver::new(2.0, 200, 1e-7, false);
1026        let (x_fista, iters_fista) = solver_fista.minimize(&smooth, &reg, &[5.0]);
1027        let (x_ista, iters_ista) = solver_ista.minimize(&smooth, &reg, &[5.0]);
1028        assert!(
1029            iters_fista <= iters_ista + 10,
1030            "fista={iters_fista}, ista={iters_ista}"
1031        );
1032        assert!(x_fista[0].abs() < 0.01, "x_fista={}", x_fista[0]);
1033        let _ = x_ista;
1034    }
1035    #[test]
1036    fn test_proximal_gradient_estimate_lipschitz() {
1037        let f = QuadraticFunction::new(vec![vec![2.0]], vec![0.0], 0.0);
1038        let l_est = ProximalGradientSolver::estimate_lipschitz(&f, &[1.0], 1);
1039        assert!(l_est > 0.5, "L_est={l_est}");
1040    }
1041    #[test]
1042    fn test_sinkhorn_uniform_transport() {
1043        let mu = vec![0.5, 0.5];
1044        let nu = vec![0.5, 0.5];
1045        let cost = vec![vec![0.0, 1.0], vec![1.0, 0.0]];
1046        let solver = SinkhornSolver::new(0.1, 200, 1e-8);
1047        let (gamma, w) = solver.solve(&mu, &nu, &cost);
1048        let total: f64 = gamma.iter().flat_map(|r| r.iter()).sum();
1049        assert!((total - 1.0).abs() < 1e-4, "total={total}");
1050        assert!(w >= 0.0, "w={w}");
1051    }
1052    #[test]
1053    fn test_sinkhorn_same_distribution() {
1054        let mu = vec![0.5, 0.5];
1055        let nu = vec![0.5, 0.5];
1056        let cost = vec![vec![0.0, 1.0], vec![1.0, 0.0]];
1057        let solver = SinkhornSolver::new(0.01, 500, 1e-10);
1058        let (_gamma, w) = solver.solve(&mu, &nu, &cost);
1059        assert!(w < 0.6, "w={w}");
1060    }
1061    #[test]
1062    fn test_sinkhorn_wasserstein2_1d_zero() {
1063        let pts = vec![0.0, 1.0];
1064        let wts = vec![0.5, 0.5];
1065        let w2 = SinkhornSolver::wasserstein2_1d(&pts, &wts, &pts, &wts);
1066        assert!(w2 < 0.2, "w2={w2}");
1067    }
1068    #[test]
1069    fn test_rip_identity_satisfies_rip() {
1070        let id = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
1071        let verifier = RipVerifier::new(1, 10);
1072        let (dl, du) = verifier.estimate_rip_constant(&id);
1073        assert!(du < 0.01, "delta_upper={du}");
1074        let _ = dl;
1075    }
1076    #[test]
1077    fn test_rip_soft_threshold() {
1078        let x = vec![3.0, -0.5, 0.2];
1079        let result = RipVerifier::soft_threshold(&x, 1.0);
1080        assert!((result[0] - 2.0).abs() < 1e-12);
1081        assert!(result[1].abs() < 1e-12);
1082        assert!(result[2].abs() < 1e-12);
1083    }
1084    #[test]
1085    fn test_rip_satisfies_rip_check() {
1086        let id = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
1087        let verifier = RipVerifier::new(1, 5);
1088        assert!(verifier.satisfies_rip(&id, 0.5));
1089    }
1090    #[test]
1091    fn test_online_learner_initial_decision() {
1092        let learner = OnlineLearner::new(0.1, 3);
1093        let x = learner.current_decision();
1094        assert_eq!(x.len(), 3);
1095        assert!(x.iter().all(|&xi| xi.abs() < 1e-12));
1096    }
1097    #[test]
1098    fn test_online_learner_update_accumulates() {
1099        let mut learner = OnlineLearner::new(0.1, 2);
1100        learner.update(&[1.0, 0.0]);
1101        let x = learner.current_decision();
1102        assert!((x[0] + 0.1).abs() < 1e-9, "x[0]={}", x[0]);
1103        assert!(x[1].abs() < 1e-9, "x[1]={}", x[1]);
1104    }
1105    #[test]
1106    fn test_online_learner_regret_bound_positive() {
1107        let mut learner = OnlineLearner::new(0.01, 2);
1108        for _ in 0..10 {
1109            learner.update(&[0.5, -0.5]);
1110        }
1111        let bound = learner.regret_bound(1.0, 1.0);
1112        assert!(bound >= 0.0, "bound={bound}");
1113    }
1114    #[test]
1115    fn test_online_learner_reset() {
1116        let mut learner = OnlineLearner::new(0.1, 2);
1117        learner.update(&[1.0, 2.0]);
1118        learner.reset();
1119        assert_eq!(learner.round, 0);
1120        let x = learner.current_decision();
1121        assert!(x.iter().all(|&xi| xi.abs() < 1e-12));
1122    }
1123}