1use super::functions::*;
6use oxilean_kernel::{BinderInfo, Declaration, Environment, Expr, Level, Name};
7
8#[allow(dead_code)]
10#[derive(Debug, Clone)]
11pub struct RecessionCone {
12 pub set_name: String,
13}
14impl RecessionCone {
15 #[allow(dead_code)]
16 pub fn new(set: &str) -> Self {
17 Self {
18 set_name: set.to_string(),
19 }
20 }
21 #[allow(dead_code)]
22 pub fn definition(&self) -> String {
23 format!(
24 "recess({}) = {{d : x + t*d in {} for all x in C, t >= 0}}",
25 self.set_name, self.set_name
26 )
27 }
28 #[allow(dead_code)]
29 pub fn compact_iff_trivial_recession(&self) -> bool {
30 true
31 }
32}
33#[allow(dead_code)]
34#[derive(Debug, Clone)]
35pub struct FenchelDualityPair {
36 pub primal: String,
37 pub dual: String,
38 pub duality_gap: f64,
39 pub strong_duality_holds: bool,
40}
41#[allow(dead_code)]
42impl FenchelDualityPair {
43 pub fn new(primal: &str, dual: &str, gap: f64) -> Self {
44 FenchelDualityPair {
45 primal: primal.to_string(),
46 dual: dual.to_string(),
47 duality_gap: gap,
48 strong_duality_holds: gap.abs() < 1e-12,
49 }
50 }
51 pub fn slater_condition_met(&self) -> bool {
52 self.strong_duality_holds
53 }
54 pub fn duality_gap_description(&self) -> String {
55 if self.strong_duality_holds {
56 format!("Strong duality: val(P) = val(D) = {}", self.primal)
57 } else {
58 format!("Duality gap = {:.6}", self.duality_gap)
59 }
60 }
61}
62#[allow(dead_code)]
64#[derive(Debug, Clone)]
65pub struct Subdifferential {
66 pub function_name: String,
67 pub point: Vec<f64>,
68}
69impl Subdifferential {
70 #[allow(dead_code)]
71 pub fn new(f: &str, x: Vec<f64>) -> Self {
72 Self {
73 function_name: f.to_string(),
74 point: x,
75 }
76 }
77 #[allow(dead_code)]
78 pub fn subgradient_description(&self) -> String {
79 format!(
80 "g in partial f({:?}): f(y) >= f(x) + <g, y-x> for all y",
81 self.point
82 )
83 }
84 #[allow(dead_code)]
85 pub fn is_differentiable_iff_singleton(&self) -> bool {
86 true
87 }
88}
89#[derive(Debug, Clone, Copy)]
91pub enum StepSchedule {
92 Constant(f64),
94 DiminishingSqrt(f64),
96 Polyak { f_star: f64 },
98}
99pub struct AlternatingProjectionSolver {
104 pub max_iter: usize,
106 pub tol: f64,
108}
109impl AlternatingProjectionSolver {
110 pub fn new(max_iter: usize, tol: f64) -> Self {
112 Self { max_iter, tol }
113 }
114 pub fn run(
117 &self,
118 proj_a: impl Fn(&[f64]) -> Vec<f64>,
119 proj_b: impl Fn(&[f64]) -> Vec<f64>,
120 x0: &[f64],
121 ) -> Vec<Vec<f64>> {
122 let mut x = x0.to_vec();
123 let mut iterates = vec![x.clone()];
124 for _ in 0..self.max_iter {
125 let pa = proj_a(&x);
126 let pb = proj_b(&pa);
127 let delta: f64 = x
128 .iter()
129 .zip(pb.iter())
130 .map(|(a, b)| (a - b).powi(2))
131 .sum::<f64>()
132 .sqrt();
133 x = pb;
134 iterates.push(x.clone());
135 if delta < self.tol {
136 break;
137 }
138 }
139 iterates
140 }
141}
142#[allow(dead_code)]
144#[derive(Debug, Clone)]
145pub struct ConvexCone {
146 pub name: String,
147 pub dimension: usize,
148 pub is_pointed: bool,
149 pub is_closed: bool,
150}
151impl ConvexCone {
152 #[allow(dead_code)]
153 pub fn nonneg_orthant(n: usize) -> Self {
154 Self {
155 name: format!("R^{}+", n),
156 dimension: n,
157 is_pointed: true,
158 is_closed: true,
159 }
160 }
161 #[allow(dead_code)]
162 pub fn second_order_cone(n: usize) -> Self {
163 Self {
164 name: format!("SOC({}) = {{(x,t) : ||x|| <= t}}", n),
165 dimension: n + 1,
166 is_pointed: true,
167 is_closed: true,
168 }
169 }
170 #[allow(dead_code)]
171 pub fn psd_cone(n: usize) -> Self {
172 Self {
173 name: format!("PSD({}) = S^{}+", n, n),
174 dimension: n * (n + 1) / 2,
175 is_pointed: true,
176 is_closed: true,
177 }
178 }
179 #[allow(dead_code)]
180 pub fn dual_cone_description(&self) -> String {
181 format!(
182 "C* = {{y : <y, x> >= 0 for all x in {}}} (dual cone)",
183 self.name
184 )
185 }
186 #[allow(dead_code)]
187 pub fn is_self_dual_nonneg_orthant(&self) -> bool {
188 self.name.contains("R^") && self.name.ends_with('+')
189 }
190}
191#[derive(Debug, Clone)]
193pub struct ConvexFunction {
194 pub name: String,
196 eval_fn: fn(&[f64]) -> f64,
198 grad_fn: Option<fn(&[f64]) -> Vec<f64>>,
200 pub lipschitz_grad: Option<f64>,
202 pub strong_convexity: Option<f64>,
204}
205impl ConvexFunction {
206 pub fn new(
208 name: impl Into<String>,
209 eval_fn: fn(&[f64]) -> f64,
210 grad_fn: Option<fn(&[f64]) -> Vec<f64>>,
211 ) -> Self {
212 Self {
213 name: name.into(),
214 eval_fn,
215 grad_fn,
216 lipschitz_grad: None,
217 strong_convexity: None,
218 }
219 }
220 pub fn eval(&self, x: &[f64]) -> f64 {
222 (self.eval_fn)(x)
223 }
224 pub fn gradient(&self, x: &[f64]) -> Vec<f64> {
226 if let Some(g) = self.grad_fn {
227 return g(x);
228 }
229 let h = 1e-7;
230 let mut grad = vec![0.0; x.len()];
231 for i in 0..x.len() {
232 let mut xp = x.to_vec();
233 xp[i] += h;
234 let mut xm = x.to_vec();
235 xm[i] -= h;
236 grad[i] = ((self.eval_fn)(&xp) - (self.eval_fn)(&xm)) / (2.0 * h);
237 }
238 grad
239 }
240 pub fn fenchel_conjugate_approx(&self, y: &[f64], radius: f64, steps: usize) -> f64 {
243 let n = y.len();
244 if n == 0 {
245 return 0.0;
246 }
247 let step_size = 2.0 * radius / steps as f64;
248 if n == 1 {
249 let mut best = f64::NEG_INFINITY;
250 for k in 0..=steps {
251 let x = -radius + k as f64 * step_size;
252 let val = y[0] * x - self.eval(&[x]);
253 if val > best {
254 best = val;
255 }
256 }
257 return best;
258 }
259 let mut best = f64::NEG_INFINITY;
260 for _ in 0..(steps * steps) {
261 let x: Vec<f64> = (0..n)
262 .map(|i| (i as f64 / n as f64) * 2.0 * radius - radius)
263 .collect();
264 let dot: f64 = y.iter().zip(x.iter()).map(|(a, b)| a * b).sum();
265 let val = dot - self.eval(&x);
266 if val > best {
267 best = val;
268 }
269 }
270 best
271 }
272 pub fn in_epigraph(&self, x: &[f64], t: f64) -> bool {
274 self.eval(x) <= t
275 }
276 pub fn in_level_set(&self, x: &[f64], alpha: f64) -> bool {
278 self.eval(x) <= alpha
279 }
280}
281#[derive(Debug, Clone)]
283pub struct Hyperplane {
284 pub normal: Vec<f64>,
286 pub offset: f64,
288}
289impl Hyperplane {
290 pub fn new(normal: Vec<f64>, offset: f64) -> Self {
292 Self { normal, offset }
293 }
294 pub fn signed_distance(&self, x: &[f64]) -> f64 {
296 let dot: f64 = self.normal.iter().zip(x.iter()).map(|(a, xi)| a * xi).sum();
297 dot - self.offset
298 }
299 pub fn separates(&self, a_points: &[Vec<f64>], b_points: &[Vec<f64>]) -> bool {
301 a_points.iter().all(|x| self.signed_distance(x) >= -1e-9)
302 && b_points.iter().all(|x| self.signed_distance(x) <= 1e-9)
303 }
304 pub fn strictly_separates(&self, a_points: &[Vec<f64>], b_points: &[Vec<f64>]) -> bool {
306 let min_a = a_points
307 .iter()
308 .map(|x| self.signed_distance(x))
309 .fold(f64::INFINITY, f64::min);
310 let max_b = b_points
311 .iter()
312 .map(|x| self.signed_distance(x))
313 .fold(f64::NEG_INFINITY, f64::max);
314 min_a > max_b + 1e-9
315 }
316}
317#[allow(dead_code)]
319#[derive(Debug, Clone)]
320pub struct InfimalConvolution {
321 pub f_name: String,
322 pub g_name: String,
323}
324impl InfimalConvolution {
325 #[allow(dead_code)]
326 pub fn new(f: &str, g: &str) -> Self {
327 Self {
328 f_name: f.to_string(),
329 g_name: g.to_string(),
330 }
331 }
332 #[allow(dead_code)]
333 pub fn definition(&self) -> String {
334 format!(
335 "({} inf-conv {})(x) = inf_y {{ {}(y) + {}(x-y) }}",
336 self.f_name, self.g_name, self.f_name, self.g_name
337 )
338 }
339 #[allow(dead_code)]
340 pub fn conjugate_of_infconv(&self) -> String {
341 format!(
342 "({} inf-conv {})* = {}* + {}*",
343 self.f_name, self.g_name, self.f_name, self.g_name
344 )
345 }
346}
347#[derive(Debug, Clone)]
350pub struct ProximalGradientSolver {
351 pub step_size: f64,
353 pub max_iter: usize,
355 pub tol: f64,
357}
358impl ProximalGradientSolver {
359 pub fn new(step_size: f64, max_iter: usize, tol: f64) -> Self {
361 Self {
362 step_size,
363 max_iter,
364 tol,
365 }
366 }
367 pub fn solve(
369 &self,
370 f_smooth: &ConvexFunction,
371 prox_g: impl Fn(&[f64], f64) -> Vec<f64>,
372 x0: &[f64],
373 ) -> Vec<f64> {
374 let mut x = x0.to_vec();
375 let n = x.len();
376 for _ in 0..self.max_iter {
377 let grad = f_smooth.gradient(&x);
378 let gradient_step: Vec<f64> = x
379 .iter()
380 .zip(grad.iter())
381 .map(|(xi, gi)| xi - self.step_size * gi)
382 .collect();
383 let new_x = prox_g(&gradient_step, self.step_size);
384 let delta: f64 = x
385 .iter()
386 .zip(new_x.iter())
387 .map(|(a, b)| (a - b).powi(2))
388 .sum::<f64>()
389 .sqrt();
390 x = new_x;
391 let _ = n;
392 if delta < self.tol {
393 break;
394 }
395 }
396 x
397 }
398}
399#[allow(dead_code)]
400#[derive(Debug, Clone)]
401pub struct ADMMData {
402 pub penalty_parameter: f64,
403 pub num_iterations: usize,
404 pub primal_residual: f64,
405 pub dual_residual: f64,
406 pub convergence_rate: f64,
407}
408#[allow(dead_code)]
409impl ADMMData {
410 pub fn new(rho: f64) -> Self {
411 ADMMData {
412 penalty_parameter: rho,
413 num_iterations: 0,
414 primal_residual: f64::INFINITY,
415 dual_residual: f64::INFINITY,
416 convergence_rate: 1.0 / rho,
417 }
418 }
419 pub fn update_residuals(&mut self, primal: f64, dual: f64) {
420 self.primal_residual = primal;
421 self.dual_residual = dual;
422 self.num_iterations += 1;
423 }
424 pub fn has_converged(&self, tol: f64) -> bool {
425 self.primal_residual < tol && self.dual_residual < tol
426 }
427 pub fn admm_update_description(&self) -> String {
428 format!(
429 "ADMM (ρ={:.3}): x-update → z-update → y-update (dual ascent)",
430 self.penalty_parameter
431 )
432 }
433 pub fn convergence_guarantee(&self) -> String {
434 "ADMM: O(1/k) convergence for convex problems; linear convergence for strongly convex"
435 .to_string()
436 }
437}
438#[allow(dead_code)]
439#[derive(Debug, Clone, PartialEq, Eq)]
440pub enum ConvexProblemClass {
441 Lp,
442 Qp,
443 Qcqp,
444 Socp,
445 Sdp,
446 Gp,
447 Cp,
448}
449#[derive(Debug, Clone)]
451pub struct ProxConfig {
452 pub lambda: f64,
454 pub max_iter: usize,
456 pub tol: f64,
458 pub step_size: f64,
460}
461impl ProxConfig {
462 pub fn new(lambda: f64) -> Self {
464 Self {
465 lambda,
466 max_iter: 500,
467 tol: 1e-8,
468 step_size: 0.01,
469 }
470 }
471}
472#[allow(dead_code)]
474#[derive(Debug, Clone)]
475pub struct ProximalOperator {
476 pub function_name: String,
477 pub step_size: f64,
478}
479impl ProximalOperator {
480 #[allow(dead_code)]
481 pub fn new(f: &str, gamma: f64) -> Self {
482 Self {
483 function_name: f.to_string(),
484 step_size: gamma,
485 }
486 }
487 #[allow(dead_code)]
488 pub fn definition(&self) -> String {
489 format!(
490 "prox_{{gamma {} }}(x) = argmin_y {{ {}(y) + ||y-x||^2/(2*{}) }}",
491 self.function_name, self.function_name, self.step_size
492 )
493 }
494 #[allow(dead_code)]
495 pub fn for_indicator_function(&self) -> String {
496 "prox_{gamma * I_C}(x) = proj_C(x) (projection onto convex set C)".to_string()
497 }
498 #[allow(dead_code)]
499 pub fn moreau_decomposition(&self) -> String {
500 format!(
501 "Moreau: x = prox_{{{}}}(x) + sigma * prox_{{{}*/sigma}}(x/sigma)",
502 self.function_name, self.function_name
503 )
504 }
505}
506#[allow(dead_code)]
508#[derive(Debug, Clone)]
509pub struct AdmmSolver {
510 pub rho: f64,
511 pub max_iter: usize,
512 pub tolerance: f64,
513}
514impl AdmmSolver {
515 #[allow(dead_code)]
516 pub fn new(rho: f64, max_iter: usize, tol: f64) -> Self {
517 Self {
518 rho,
519 max_iter,
520 tolerance: tol,
521 }
522 }
523 #[allow(dead_code)]
524 pub fn update_x_description(&self) -> String {
525 format!(
526 "x-update: x_(k+1) = argmin_x L_rho(x, z_k, u_k) with rho={}",
527 self.rho
528 )
529 }
530 #[allow(dead_code)]
531 pub fn update_z_description(&self) -> String {
532 "z-update: z_{k+1} = prox_{g/rho}(x_{k+1} + u_k)".to_string()
533 }
534 #[allow(dead_code)]
535 pub fn convergence_description(&self) -> String {
536 format!(
537 "ADMM converges for any rho > 0 when f, g convex; tolerance {}",
538 self.tolerance
539 )
540 }
541}
542#[allow(dead_code)]
544#[derive(Debug, Clone)]
545pub struct ConvexProgram {
546 pub name: String,
547 pub num_variables: usize,
548 pub num_constraints: usize,
549 pub problem_class: ConvexProblemClass,
550}
551impl ConvexProgram {
552 #[allow(dead_code)]
553 pub fn new(name: &str, n: usize, m: usize, class: ConvexProblemClass) -> Self {
554 Self {
555 name: name.to_string(),
556 num_variables: n,
557 num_constraints: m,
558 problem_class: class,
559 }
560 }
561 #[allow(dead_code)]
562 pub fn is_lp(&self) -> bool {
563 matches!(self.problem_class, ConvexProblemClass::Lp)
564 }
565 #[allow(dead_code)]
566 pub fn interior_point_complexity(&self) -> String {
567 match &self.problem_class {
568 ConvexProblemClass::Lp => {
569 format!("O(n^3.5 log(1/eps)) for LP n={}", self.num_variables)
570 }
571 ConvexProblemClass::Sdp => {
572 format!("O(n^6 log(1/eps)) for SDP n={}", self.num_variables)
573 }
574 ConvexProblemClass::Socp => {
575 format!("O(n^3.5 log(1/eps)) for SOCP n={}", self.num_variables)
576 }
577 _ => {
578 format!(
579 "Poly(n,m) for n={}, m={}",
580 self.num_variables, self.num_constraints
581 )
582 }
583 }
584 }
585 #[allow(dead_code)]
586 pub fn strong_duality_holds(&self) -> bool {
587 true
588 }
589}
590pub struct FenchelConjugateEvaluator {
592 pub class: FunctionClass,
594}
595impl FenchelConjugateEvaluator {
596 pub fn new(class: FunctionClass) -> Self {
598 Self { class }
599 }
600 pub fn eval(&self, y: &[f64]) -> f64 {
602 match &self.class {
603 FunctionClass::SquaredNorm => 0.5 * y.iter().map(|yi| yi * yi).sum::<f64>(),
604 FunctionClass::EuclideanNorm => {
605 let norm: f64 = y.iter().map(|yi| yi * yi).sum::<f64>().sqrt();
606 if norm <= 1.0 + 1e-9 {
607 0.0
608 } else {
609 f64::INFINITY
610 }
611 }
612 FunctionClass::NegativeEntropy => y.iter().map(|yi| (yi - 1.0).exp()).sum(),
613 FunctionClass::LpNorm { p } => {
614 if *p <= 1.0 + 1e-12 {
615 return f64::INFINITY;
616 }
617 let q = p / (p - 1.0);
618 let lq: f64 = y.iter().map(|yi| yi.abs().powf(q)).sum::<f64>();
619 lq / q
620 }
621 FunctionClass::BoxIndicator { lo, hi } => {
622 y.iter().map(|yi| (lo * yi).max(hi * yi)).sum()
623 }
624 FunctionClass::Numerical => f64::NAN,
625 }
626 }
627 pub fn check_fenchel_young(&self, x: &[f64], y: &[f64], f_val: f64) -> bool {
629 let inner: f64 = x.iter().zip(y.iter()).map(|(xi, yi)| xi * yi).sum();
630 let conj = self.eval(y);
631 inner <= f_val + conj + 1e-9
632 }
633}
634#[derive(Debug, Clone)]
636pub enum FunctionClass {
637 SquaredNorm,
639 EuclideanNorm,
641 NegativeEntropy,
643 LpNorm { p: f64 },
645 BoxIndicator { lo: f64, hi: f64 },
647 Numerical,
649}
650#[allow(dead_code)]
652#[derive(Debug, Clone)]
653pub struct LagrangianDuality {
654 pub primal_name: String,
655 pub dual_name: String,
656 pub duality_gap: f64,
657}
658impl LagrangianDuality {
659 #[allow(dead_code)]
660 pub fn new(primal: &str, dual: &str, gap: f64) -> Self {
661 Self {
662 primal_name: primal.to_string(),
663 dual_name: dual.to_string(),
664 duality_gap: gap,
665 }
666 }
667 #[allow(dead_code)]
668 pub fn strong_duality(&self) -> bool {
669 self.duality_gap < 1e-10
670 }
671 #[allow(dead_code)]
672 pub fn slater_condition_description(&self) -> String {
673 "Slater's condition: exists strictly feasible point => strong duality".to_string()
674 }
675 #[allow(dead_code)]
676 pub fn kkt_conditions(&self) -> Vec<String> {
677 vec![
678 "Primal feasibility: Ax + Bz = c, x >= 0".to_string(),
679 "Dual feasibility: lambda >= 0".to_string(),
680 "Complementary slackness: lambda_i * h_i(x) = 0".to_string(),
681 "Stationarity: grad f + sum lambda_i grad h_i = 0".to_string(),
682 ]
683 }
684}
685#[derive(Debug, Clone)]
687pub struct ProximalPointAlgorithm {
688 pub lambdas: Vec<f64>,
690 pub max_iter: usize,
692 pub tol: f64,
694 pub inner_step: f64,
696 pub inner_iter: usize,
698}
699impl ProximalPointAlgorithm {
700 pub fn constant(lambda: f64, max_iter: usize, tol: f64) -> Self {
702 Self {
703 lambdas: vec![lambda],
704 max_iter,
705 tol,
706 inner_step: 0.01,
707 inner_iter: 300,
708 }
709 }
710 pub fn run(&self, f: &ConvexFunction, x0: &[f64]) -> Vec<Vec<f64>> {
712 let mut x = x0.to_vec();
713 let mut iterates = vec![x.clone()];
714 for k in 0..self.max_iter {
715 let lambda = self
716 .lambdas
717 .get(k)
718 .copied()
719 .unwrap_or_else(|| *self.lambdas.last().unwrap_or(&1.0));
720 let cfg = ProxConfig {
721 lambda,
722 max_iter: self.inner_iter,
723 tol: self.tol * 0.01,
724 step_size: self.inner_step,
725 };
726 let new_x = proximal_operator(f, &x, &cfg);
727 let delta: f64 = x
728 .iter()
729 .zip(new_x.iter())
730 .map(|(a, b)| (a - b).powi(2))
731 .sum::<f64>()
732 .sqrt();
733 x = new_x;
734 iterates.push(x.clone());
735 if delta < self.tol {
736 break;
737 }
738 }
739 iterates
740 }
741}
742#[allow(dead_code)]
744#[derive(Debug, Clone)]
745pub struct SublevelSet {
746 pub function_name: String,
747 pub level: f64,
748}
749impl SublevelSet {
750 #[allow(dead_code)]
751 pub fn new(f: &str, c: f64) -> Self {
752 Self {
753 function_name: f.to_string(),
754 level: c,
755 }
756 }
757 #[allow(dead_code)]
758 pub fn definition(&self) -> String {
759 format!("C_c = {{x : {}(x) <= {}}}", self.function_name, self.level)
760 }
761 #[allow(dead_code)]
762 pub fn is_convex_if_f_quasiconvex(&self) -> bool {
763 true
764 }
765}
766#[allow(dead_code)]
767#[derive(Debug, Clone)]
768pub struct ProximalOperatorNew {
769 pub function_name: String,
770 pub regularization: f64,
771 pub has_closed_form: bool,
772 pub formula: String,
773}
774#[allow(dead_code)]
775impl ProximalOperatorNew {
776 pub fn l1_norm(lambda: f64) -> Self {
777 ProximalOperatorNew {
778 function_name: "||·||_1".to_string(),
779 regularization: lambda,
780 has_closed_form: true,
781 formula: format!(
782 "soft_threshold(x, {:.3}) = sign(x)*max(|x|-{:.3}, 0)",
783 lambda, lambda
784 ),
785 }
786 }
787 pub fn l2_squared(lambda: f64) -> Self {
788 ProximalOperatorNew {
789 function_name: "(1/2)||·||²".to_string(),
790 regularization: lambda,
791 has_closed_form: true,
792 formula: format!("x / (1 + {:.3})", lambda),
793 }
794 }
795 pub fn indicator_halfspace(lambda: f64) -> Self {
796 ProximalOperatorNew {
797 function_name: "δ_{x: a^Tx ≤ b}".to_string(),
798 regularization: lambda,
799 has_closed_form: true,
800 formula: "projection onto halfspace: x - max(0, a^Tx-b)/||a||² * a".to_string(),
801 }
802 }
803 pub fn proximal_point_formula(&self) -> String {
804 format!(
805 "prox_{{λ{}}}(v) = argmin_x [{{{}}}/λ + (1/2)||x-v||²]",
806 self.function_name, self.function_name
807 )
808 }
809 pub fn moreau_decomposition(&self) -> String {
810 format!(
811 "Moreau: v = prox_{{λ{}}}(v) + λ·prox_{{(1/λ){}*}}(v/λ)",
812 self.function_name, self.function_name
813 )
814 }
815}
816#[allow(dead_code)]
817#[derive(Debug, Clone)]
818pub struct ExtragradientMethod {
819 pub step_size: f64,
820 pub operator: String,
821 pub iterations: usize,
822 pub history: Vec<f64>,
823}
824#[allow(dead_code)]
825impl ExtragradientMethod {
826 pub fn new(step: f64, op: &str) -> Self {
827 ExtragradientMethod {
828 step_size: step,
829 operator: op.to_string(),
830 iterations: 0,
831 history: vec![],
832 }
833 }
834 pub fn korpelevich_step_description(&self) -> String {
835 format!(
836 "Extragradient (Korpelevich): y_k = P_C(x_k - τF(x_k)), x_{{k+1}} = P_C(x_k - τF(y_k)), τ={:.3}",
837 self.step_size
838 )
839 }
840 pub fn convergence_condition(&self, lipschitz_l: f64) -> bool {
841 self.step_size < 1.0 / lipschitz_l
842 }
843 pub fn do_step(&mut self, residual: f64) {
844 self.history.push(residual);
845 self.iterations += 1;
846 }
847}
848#[allow(dead_code)]
850#[derive(Debug, Clone)]
851pub struct MirrorDescent {
852 pub step_size: f64,
853 pub bregman_divergence: String,
854 pub num_iterations: usize,
855}
856impl MirrorDescent {
857 #[allow(dead_code)]
858 pub fn with_entropy(eta: f64, iters: usize) -> Self {
859 Self {
860 step_size: eta,
861 bregman_divergence: "KL divergence (entropic MD = Exponentiated Gradient)".to_string(),
862 num_iterations: iters,
863 }
864 }
865 #[allow(dead_code)]
866 pub fn convergence_rate(&self, diameter: f64, lipschitz: f64) -> f64 {
867 lipschitz * diameter / (self.num_iterations as f64).sqrt()
868 }
869}
870#[allow(dead_code)]
871#[derive(Debug, Clone)]
872pub struct ConvexConjugate {
873 pub function_name: String,
874 pub domain: String,
875 pub conjugate_name: String,
876 pub conjugate_domain: String,
877 pub is_proper: bool,
878 pub is_closed: bool,
879}
880#[allow(dead_code)]
881impl ConvexConjugate {
882 pub fn new(name: &str, domain: &str, conj_name: &str, conj_domain: &str) -> Self {
883 ConvexConjugate {
884 function_name: name.to_string(),
885 domain: domain.to_string(),
886 conjugate_name: conj_name.to_string(),
887 conjugate_domain: conj_domain.to_string(),
888 is_proper: true,
889 is_closed: true,
890 }
891 }
892 pub fn quadratic_conjugate() -> Self {
893 ConvexConjugate {
894 function_name: "||x||²/2".to_string(),
895 domain: "R^n".to_string(),
896 conjugate_name: "||y||²/2".to_string(),
897 conjugate_domain: "R^n".to_string(),
898 is_proper: true,
899 is_closed: true,
900 }
901 }
902 pub fn indicator_conjugate(set: &str) -> Self {
903 ConvexConjugate {
904 function_name: format!("δ_{{{}}}", set),
905 domain: set.to_string(),
906 conjugate_name: format!("h_{{{}}}", set),
907 conjugate_domain: "R^n".to_string(),
908 is_proper: true,
909 is_closed: true,
910 }
911 }
912 pub fn biconjugate_equals_f(&self) -> bool {
913 self.is_proper && self.is_closed
914 }
915 pub fn fenchel_moreau_theorem(&self) -> String {
916 if self.biconjugate_equals_f() {
917 format!(
918 "Fenchel-Moreau: ({})** = {} (proper, closed, convex)",
919 self.function_name, self.function_name
920 )
921 } else {
922 format!(
923 "({})** = convex closure of {}",
924 self.function_name, self.function_name
925 )
926 }
927 }
928 pub fn young_fenchel_inequality(&self) -> String {
929 format!(
930 "Young-Fenchel: {}(x) + {}(y) ≥ ⟨x,y⟩ with equality at x = ∂{}(y)",
931 self.function_name, self.conjugate_name, self.conjugate_name
932 )
933 }
934}
935pub struct SeparatingHyperplaneFinder {
938 pub lr: f64,
940 pub max_iter: usize,
942 pub tol: f64,
944}
945impl SeparatingHyperplaneFinder {
946 pub fn new(lr: f64, max_iter: usize, tol: f64) -> Self {
948 Self { lr, max_iter, tol }
949 }
950 pub fn find(&self, a_points: &[Vec<f64>], b_points: &[Vec<f64>]) -> Option<Hyperplane> {
953 if a_points.is_empty() || b_points.is_empty() {
954 return None;
955 }
956 let initial = compute_separating_hyperplane(a_points, b_points)?;
957 let n = initial.normal.len();
958 let mut w = initial.normal.clone();
959 let mut b = initial.offset;
960 for _ in 0..self.max_iter {
961 let mut dw = vec![0.0; n];
962 let mut db = 0.0_f64;
963 let mut total_violation = 0.0_f64;
964 for x in a_points {
965 let margin: f64 = w.iter().zip(x.iter()).map(|(wi, xi)| wi * xi).sum::<f64>() - b;
966 if margin < 1.0 {
967 for i in 0..n {
968 dw[i] += x[i];
969 }
970 db -= 1.0;
971 total_violation += (1.0 - margin).abs();
972 }
973 }
974 for x in b_points {
975 let margin: f64 = w.iter().zip(x.iter()).map(|(wi, xi)| wi * xi).sum::<f64>() - b;
976 if margin > -1.0 {
977 for i in 0..n {
978 dw[i] -= x[i];
979 }
980 db += 1.0;
981 total_violation += (1.0 + margin).abs();
982 }
983 }
984 if total_violation < self.tol {
985 break;
986 }
987 for i in 0..n {
988 w[i] += self.lr * dw[i];
989 }
990 b += self.lr * db;
991 }
992 let norm: f64 = w.iter().map(|wi| wi * wi).sum::<f64>().sqrt();
993 if norm < 1e-12 {
994 return None;
995 }
996 let unit_w: Vec<f64> = w.iter().map(|wi| wi / norm).collect();
997 let unit_b = b / norm;
998 let hp = Hyperplane::new(unit_w, unit_b);
999 if hp.separates(a_points, b_points) {
1000 Some(hp)
1001 } else {
1002 let fb = compute_separating_hyperplane(a_points, b_points)?;
1003 if fb.separates(a_points, b_points) {
1004 Some(fb)
1005 } else {
1006 None
1007 }
1008 }
1009 }
1010}
1011#[allow(dead_code)]
1013#[derive(Debug, Clone)]
1014pub struct Epigraph {
1015 pub function_name: String,
1016}
1017impl Epigraph {
1018 #[allow(dead_code)]
1019 pub fn new(f: &str) -> Self {
1020 Self {
1021 function_name: f.to_string(),
1022 }
1023 }
1024 #[allow(dead_code)]
1025 pub fn definition(&self) -> String {
1026 format!(
1027 "epi({}) = {{(x, t) : {}(x) <= t}}",
1028 self.function_name, self.function_name
1029 )
1030 }
1031 #[allow(dead_code)]
1032 pub fn f_convex_iff_epi_convex(&self) -> bool {
1033 true
1034 }
1035}
1036#[allow(dead_code)]
1038#[derive(Debug, Clone)]
1039pub struct FenchelConjugate {
1040 pub function_name: String,
1041 pub conjugate_name: String,
1042}
1043impl FenchelConjugate {
1044 #[allow(dead_code)]
1045 pub fn new(f: &str) -> Self {
1046 Self {
1047 function_name: f.to_string(),
1048 conjugate_name: format!("{}*", f),
1049 }
1050 }
1051 #[allow(dead_code)]
1052 pub fn definition(&self) -> String {
1053 format!(
1054 "{}*(y) = sup_x <x, y> - {}(x)",
1055 self.function_name, self.function_name
1056 )
1057 }
1058 #[allow(dead_code)]
1059 pub fn biconjugate_is_convex_hull(&self) -> String {
1060 format!(
1061 "{}** = closed convex hull of {}",
1062 self.function_name, self.function_name
1063 )
1064 }
1065 #[allow(dead_code)]
1066 pub fn fenchel_inequality(&self) -> String {
1067 format!(
1068 "<x, y> <= {}(x) + {}*(y) for all x, y",
1069 self.function_name, self.function_name
1070 )
1071 }
1072}
1073#[allow(dead_code)]
1074#[derive(Debug, Clone)]
1075pub struct VariationalInequality {
1076 pub operator_name: String,
1077 pub constraint_set: String,
1078 pub is_monotone: bool,
1079 pub is_strongly_monotone: bool,
1080 pub strong_monotonicity: f64,
1081}
1082#[allow(dead_code)]
1083impl VariationalInequality {
1084 pub fn new(op: &str, set: &str, monotone: bool) -> Self {
1085 VariationalInequality {
1086 operator_name: op.to_string(),
1087 constraint_set: set.to_string(),
1088 is_monotone: monotone,
1089 is_strongly_monotone: false,
1090 strong_monotonicity: 0.0,
1091 }
1092 }
1093 pub fn strongly_monotone(op: &str, set: &str, alpha: f64) -> Self {
1094 VariationalInequality {
1095 operator_name: op.to_string(),
1096 constraint_set: set.to_string(),
1097 is_monotone: true,
1098 is_strongly_monotone: true,
1099 strong_monotonicity: alpha,
1100 }
1101 }
1102 pub fn vi_formulation(&self) -> String {
1103 format!(
1104 "Find x* ∈ {} s.t. ⟨F(x*), x - x*⟩ ≥ 0 ∀x ∈ {} (F = {})",
1105 self.constraint_set, self.constraint_set, self.operator_name
1106 )
1107 }
1108 pub fn stampacchia_existence(&self) -> String {
1109 if self.is_monotone {
1110 "Stampacchia theorem: monotone + coercive → VI has solution".to_string()
1111 } else {
1112 "No monotonicity: existence not guaranteed by Stampacchia".to_string()
1113 }
1114 }
1115 pub fn unique_solution_exists(&self) -> bool {
1116 self.is_strongly_monotone
1117 }
1118}
1119#[derive(Debug, Clone)]
1121pub struct SubgradientMethod {
1122 pub schedule: StepSchedule,
1124 pub max_iter: usize,
1126 pub track_best: bool,
1128}
1129impl SubgradientMethod {
1130 pub fn new(schedule: StepSchedule, max_iter: usize) -> Self {
1132 Self {
1133 schedule,
1134 max_iter,
1135 track_best: true,
1136 }
1137 }
1138 pub fn run(&self, f: &ConvexFunction, x0: &[f64]) -> (Vec<f64>, Vec<f64>) {
1141 let n = x0.len();
1142 let mut x = x0.to_vec();
1143 let mut best_x = x.clone();
1144 let mut best_val = f.eval(&x);
1145 let mut history = Vec::with_capacity(self.max_iter);
1146 for k in 0..self.max_iter {
1147 let val = f.eval(&x);
1148 history.push(val);
1149 if val < best_val {
1150 best_val = val;
1151 best_x = x.clone();
1152 }
1153 let g = f.gradient(&x);
1154 let g_norm_sq: f64 = g.iter().map(|gi| gi * gi).sum();
1155 if g_norm_sq < 1e-30 {
1156 break;
1157 }
1158 let eta = match self.schedule {
1159 StepSchedule::Constant(eta) => eta,
1160 StepSchedule::DiminishingSqrt(eta) => eta / ((k + 1) as f64).sqrt(),
1161 StepSchedule::Polyak { f_star } => ((val - f_star).max(0.0)) / g_norm_sq,
1162 };
1163 let mut new_x = vec![0.0; n];
1164 for i in 0..n {
1165 new_x[i] = x[i] - eta * g[i];
1166 }
1167 x = new_x;
1168 }
1169 let best = if self.track_best { best_x } else { x };
1170 (best, history)
1171 }
1172}