1use super::functions::*;
6use oxilean_kernel::{BinderInfo, Declaration, Environment, Expr, Level, Name};
7
8#[allow(dead_code)]
10pub struct ADMMSolver {
11 pub rho: f64,
13 pub max_iter: usize,
15 pub tol: f64,
17}
18#[allow(dead_code)]
19impl ADMMSolver {
20 pub fn new(rho: f64, max_iter: usize, tol: f64) -> Self {
22 ADMMSolver { rho, max_iter, tol }
23 }
24 pub fn lasso_convergence_rate(&self, lambda: f64, _n: usize) -> f64 {
27 1.0 - 1.0 / (1.0 + lambda / self.rho)
28 }
29 pub fn dual_update(&self, y: f64, primal_residual: f64) -> f64 {
32 y + self.rho * primal_residual
33 }
34 pub fn stopping_criteria(&self, primal_res: f64, dual_res: f64) -> bool {
37 primal_res < self.tol && dual_res < self.tol
38 }
39 pub fn convergence_bound_at(&self, k: usize, _initial_gap: f64) -> f64 {
41 1.0 / k.max(1) as f64
42 }
43}
44pub struct InfConvolution {
46 pub f: String,
48 pub g: String,
50}
51impl InfConvolution {
52 pub fn new(f: impl Into<String>, g: impl Into<String>) -> Self {
54 Self {
55 f: f.into(),
56 g: g.into(),
57 }
58 }
59 pub fn is_convex_if_both_convex(&self) -> bool {
61 true
62 }
63 pub fn epigraph_sum(&self) -> String {
65 format!(
66 "Epigraph sum: epi({} □ {}) = epi({}) + epi({}) (Minkowski sum of epigraphs). \
67 This is why the infimal convolution is also called the epigraph sum.",
68 self.f, self.g, self.f, self.g
69 )
70 }
71}
72pub struct FenchelDuality {
74 pub f: String,
76 pub g: String,
78}
79impl FenchelDuality {
80 pub fn new(f: impl Into<String>, g: impl Into<String>) -> Self {
82 Self {
83 f: f.into(),
84 g: g.into(),
85 }
86 }
87 pub fn strong_duality_holds(&self) -> bool {
89 true
90 }
91 pub fn optimal_condition(&self) -> String {
93 format!(
94 "Fenchel-Rockafellar strong duality: If dom({}) ∩ int(dom({})) ≠ ∅, then \
95 inf_x({}(x) + {}(Ax)) = max_y(-{}*(-A*y) - {}*(y)), \
96 and the gap is zero.",
97 self.f, self.g, self.f, self.g, self.f, self.g
98 )
99 }
100}
101#[allow(dead_code)]
103pub struct ConvexSubdifferential {
104 pub name: String,
106 pub is_differentiable: bool,
108 pub strong_convexity_modulus: f64,
110 pub is_lipschitz: bool,
112 pub lipschitz_constant: f64,
114}
115#[allow(dead_code)]
116impl ConvexSubdifferential {
117 pub fn new(name: &str) -> Self {
119 ConvexSubdifferential {
120 name: name.to_string(),
121 is_differentiable: false,
122 strong_convexity_modulus: 0.0,
123 is_lipschitz: false,
124 lipschitz_constant: 0.0,
125 }
126 }
127 pub fn with_differentiability(mut self) -> Self {
129 self.is_differentiable = true;
130 self
131 }
132 pub fn with_strong_convexity(mut self, mu: f64) -> Self {
134 self.strong_convexity_modulus = mu;
135 self
136 }
137 pub fn with_lipschitz(mut self, l: f64) -> Self {
139 self.is_lipschitz = true;
140 self.lipschitz_constant = l;
141 self
142 }
143 pub fn sum_rule_holds(&self, other: &ConvexSubdifferential) -> bool {
146 self.is_differentiable || other.is_differentiable
147 }
148 pub fn chain_rule_holds(&self) -> bool {
150 self.is_lipschitz
151 }
152 pub fn gradient_descent_rate(&self) -> Option<f64> {
155 if self.strong_convexity_modulus > 0.0 && self.lipschitz_constant > 0.0 {
156 Some(1.0 - self.strong_convexity_modulus / self.lipschitz_constant)
157 } else {
158 None
159 }
160 }
161 pub fn proximal_convergence_rate(&self, k: usize) -> f64 {
163 if self.strong_convexity_modulus > 0.0 {
164 let lambda = 1.0 / (2.0 * self.lipschitz_constant.max(1.0));
165 (1.0 - lambda * self.strong_convexity_modulus)
166 .powi(k as i32)
167 .abs()
168 } else {
169 1.0 / k.max(1) as f64
170 }
171 }
172}
173pub struct Epigraph {
175 pub function: String,
177}
178impl Epigraph {
179 pub fn new(function: impl Into<String>) -> Self {
181 Self {
182 function: function.into(),
183 }
184 }
185 pub fn is_closed_iff_lsc(&self) -> String {
187 format!(
188 "Theorem: epi({}) = {{(x,α) | {}(x) ≤ α}} is a closed set in X×ℝ \
189 if and only if {} is lower semicontinuous.",
190 self.function, self.function, self.function
191 )
192 }
193 pub fn convex_iff_fn_convex(&self) -> String {
195 format!(
196 "Theorem: epi({}) is a convex set iff {} is a convex function \
197 (Jensen's inequality characterisation).",
198 self.function, self.function
199 )
200 }
201}
202pub struct ProximalPointSolver {
204 pub lambda: f64,
206 pub max_iter: usize,
208 pub tol: f64,
210}
211impl ProximalPointSolver {
212 pub fn new(lambda: f64, max_iter: usize, tol: f64) -> Self {
214 Self {
215 lambda,
216 max_iter,
217 tol,
218 }
219 }
220 pub fn prox_step(&self, f: impl Fn(&[f64]) -> f64 + Copy, v: &[f64]) -> Vec<f64> {
222 let n = v.len();
223 let mut x = v.to_vec();
224 let inner_step = self.lambda * 0.01;
225 let inner_iters = 500;
226 for _ in 0..inner_iters {
227 let grad_f = clarke_gradient_approx(f, &x, 1e-6);
228 let mut moved = false;
229 for i in 0..n {
230 let total_grad = grad_f[i] + (x[i] - v[i]) / self.lambda;
231 let new_xi = x[i] - inner_step * total_grad;
232 if (new_xi - x[i]).abs() > 1e-14 {
233 moved = true;
234 }
235 x[i] = new_xi;
236 }
237 if !moved {
238 break;
239 }
240 }
241 x
242 }
243 pub fn solve(&self, f: impl Fn(&[f64]) -> f64 + Copy, x0: &[f64]) -> Vec<Vec<f64>> {
246 let mut iterates = vec![x0.to_vec()];
247 let mut x = x0.to_vec();
248 for _ in 0..self.max_iter {
249 let x_new = self.prox_step(f, &x);
250 let diff: f64 = x
251 .iter()
252 .zip(x_new.iter())
253 .map(|(a, b)| (a - b).powi(2))
254 .sum::<f64>()
255 .sqrt();
256 iterates.push(x_new.clone());
257 x = x_new;
258 if diff < self.tol {
259 break;
260 }
261 }
262 iterates
263 }
264 pub fn has_converged(&self, iterates: &[Vec<f64>]) -> bool {
266 if iterates.len() < 2 {
267 return false;
268 }
269 let last = iterates
270 .last()
271 .expect("iterates has at least 2 elements: checked by early return");
272 let prev = &iterates[iterates.len() - 2];
273 let diff: f64 = last
274 .iter()
275 .zip(prev.iter())
276 .map(|(a, b)| (a - b).powi(2))
277 .sum::<f64>()
278 .sqrt();
279 diff < self.tol
280 }
281}
282#[allow(dead_code)]
284#[derive(Debug, Clone, PartialEq)]
285pub enum ProxFnType {
286 L1Norm,
288 L2NormSquared,
290 NonNegativeOrtHant,
292 L2Ball { radius: f64 },
294}
295pub struct ProximalMapping {
297 pub f: String,
299 pub lambda: f64,
301}
302impl ProximalMapping {
303 pub fn new(f: impl Into<String>, lambda: f64) -> Self {
305 Self {
306 f: f.into(),
307 lambda,
308 }
309 }
310 pub fn prox_point(&self) -> String {
312 format!(
313 "prox_{{λ{}}}(x) = argmin_y {{ {}(y) + ||y - x||²/(2·{:.4}) }}. \
314 Moreau (1962): this is always uniquely defined for proper lsc convex f.",
315 self.f, self.f, self.lambda
316 )
317 }
318 pub fn firm_nonexpansive(&self) -> String {
320 format!(
321 "The proximal mapping prox_{{λ·{}}} is firmly nonexpansive: \
322 ||prox(x) - prox(y)||² ≤ ⟨prox(x)-prox(y), x-y⟩ for all x,y.",
323 self.f
324 )
325 }
326}
327pub struct FunctionSequence {
329 functions: Vec<Box<dyn Fn(&[f64]) -> f64 + Send + Sync>>,
331}
332impl FunctionSequence {
333 pub fn new(functions: Vec<Box<dyn Fn(&[f64]) -> f64 + Send + Sync>>) -> Self {
335 Self { functions }
336 }
337 pub fn len(&self) -> usize {
339 self.functions.len()
340 }
341 pub fn is_empty(&self) -> bool {
343 self.functions.is_empty()
344 }
345 pub fn eval(&self, n: usize, x: &[f64]) -> f64 {
347 self.functions[n](x)
348 }
349 pub fn epi_liminf(&self, x: &[f64], radius: f64, grid_steps: usize) -> f64 {
352 let n_fns = self.functions.len();
353 if n_fns == 0 {
354 return f64::INFINITY;
355 }
356 let mut result = f64::INFINITY;
357 let step = if grid_steps > 0 {
358 2.0 * radius / grid_steps as f64
359 } else {
360 radius
361 };
362 let dim = x.len();
363 let perturb_count = if dim == 1 { grid_steps + 1 } else { grid_steps };
364 for k in 0..perturb_count {
365 let mut y = x.to_vec();
366 if dim >= 1 {
367 y[0] = x[0] - radius + k as f64 * step;
368 }
369 let mut liminf_n = f64::INFINITY;
370 for fn_n in &self.functions {
371 let val = fn_n(&y);
372 if val < liminf_n {
373 liminf_n = val;
374 }
375 }
376 if liminf_n < result {
377 result = liminf_n;
378 }
379 }
380 result
381 }
382 pub fn epi_limsup(&self, x: &[f64], radius: f64, grid_steps: usize) -> f64 {
384 let n_fns = self.functions.len();
385 if n_fns == 0 {
386 return f64::NEG_INFINITY;
387 }
388 let step = if grid_steps > 0 {
389 2.0 * radius / grid_steps as f64
390 } else {
391 radius
392 };
393 let dim = x.len();
394 let mut result = f64::NEG_INFINITY;
395 let perturb_count = if dim == 1 { grid_steps + 1 } else { grid_steps };
396 for k in 0..perturb_count {
397 let mut y = x.to_vec();
398 if dim >= 1 {
399 y[0] = x[0] - radius + k as f64 * step;
400 }
401 let mut limsup_n = f64::NEG_INFINITY;
402 for fn_n in &self.functions {
403 let val = fn_n(&y);
404 if val > limsup_n {
405 limsup_n = val;
406 }
407 }
408 if limsup_n > result {
409 result = limsup_n;
410 }
411 }
412 result
413 }
414 pub fn gamma_liminf(&self, x: &[f64], radii: &[f64]) -> f64 {
417 let mut best = f64::NEG_INFINITY;
418 for &r in radii {
419 let steps = 20;
420 let step = 2.0 * r / steps as f64;
421 let mut inf_over_n = f64::INFINITY;
422 for n_idx in 0..self.functions.len() {
423 let mut local_inf = f64::INFINITY;
424 for k in 0..=steps {
425 let mut y = x.to_vec();
426 if !y.is_empty() {
427 y[0] = x[0] - r + k as f64 * step;
428 }
429 let val = self.functions[n_idx](&y);
430 if val < local_inf {
431 local_inf = val;
432 }
433 }
434 if n_idx == 0 || local_inf < inf_over_n {
435 if local_inf < inf_over_n {
436 inf_over_n = local_inf;
437 }
438 }
439 }
440 if inf_over_n > best {
441 best = inf_over_n;
442 }
443 }
444 best
445 }
446}
447#[derive(Debug, Clone)]
450pub struct ProxRegularSet {
451 pub boundary_points: Vec<Vec<f64>>,
453 pub radius: f64,
455}
456impl ProxRegularSet {
457 pub fn new(boundary_points: Vec<Vec<f64>>, radius: f64) -> Self {
459 Self {
460 boundary_points,
461 radius,
462 }
463 }
464 pub fn project(&self, x: &[f64]) -> Vec<f64> {
466 let mut best_dist = f64::INFINITY;
467 let mut best = x.to_vec();
468 for p in &self.boundary_points {
469 let dist: f64 = x
470 .iter()
471 .zip(p.iter())
472 .map(|(a, b)| (a - b).powi(2))
473 .sum::<f64>()
474 .sqrt();
475 if dist < best_dist {
476 best_dist = dist;
477 best = p.clone();
478 }
479 }
480 best
481 }
482 pub fn has_unique_projection(&self, x: &[f64]) -> bool {
484 let mut dists: Vec<f64> = self
485 .boundary_points
486 .iter()
487 .map(|p| {
488 x.iter()
489 .zip(p.iter())
490 .map(|(a, b)| (a - b).powi(2))
491 .sum::<f64>()
492 .sqrt()
493 })
494 .collect();
495 dists.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
496 if dists.len() < 2 {
497 return true;
498 }
499 (dists[0] - dists[1]).abs() > 1e-9
500 }
501 pub fn check_prox_regular(&self, test_points: &[Vec<f64>]) -> bool {
503 for x in test_points {
504 let dist_to_set = self
505 .boundary_points
506 .iter()
507 .map(|p| {
508 x.iter()
509 .zip(p.iter())
510 .map(|(a, b)| (a - b).powi(2))
511 .sum::<f64>()
512 .sqrt()
513 })
514 .fold(f64::INFINITY, f64::min);
515 if dist_to_set < self.radius && !self.has_unique_projection(x) {
516 return false;
517 }
518 }
519 true
520 }
521}
522pub struct VariationalInequality {
524 pub operator: String,
526 pub domain: String,
528}
529impl VariationalInequality {
530 pub fn new(operator: impl Into<String>, domain: impl Into<String>) -> Self {
532 Self {
533 operator: operator.into(),
534 domain: domain.into(),
535 }
536 }
537 pub fn minty_stampacchia(&self) -> String {
539 format!(
540 "Minty-Stampacchia (1960/1964): For the VI with F='{}' on C='{}': \
541 if F is monotone and hemicontinuous, then x* satisfies the VI iff \
542 ⟨F(y), y-x*⟩ ≥ 0 for all y ∈ C (Minty formulation).",
543 self.operator, self.domain
544 )
545 }
546 pub fn existence_condition(&self) -> String {
548 format!(
549 "Existence (Hartman-Stampacchia 1966): If C='{}' is compact convex and \
550 F='{}' is continuous, then the VI has at least one solution. \
551 For unbounded C, a coercivity condition ⟨F(x),x-x₀⟩/||x||→∞ suffices.",
552 self.domain, self.operator
553 )
554 }
555}
556pub struct Subdifferential {
558 pub point: Vec<f64>,
560}
561impl Subdifferential {
562 pub fn new(point: Vec<f64>) -> Self {
564 Self { point }
565 }
566 pub fn is_nonempty_at_interior(&self) -> bool {
568 true
569 }
570 pub fn optimality_condition(&self) -> String {
572 let x_str: Vec<String> = self.point.iter().map(|v| format!("{:.3}", v)).collect();
573 format!(
574 "Fermat's rule: x = ({}) is a minimiser of f iff 0 ∈ ∂f(x). \
575 For a smooth convex f this reduces to ∇f(x) = 0.",
576 x_str.join(", ")
577 )
578 }
579}
580pub struct MordukhovichSubdiffApprox {
583 pub h: f64,
585 pub num_dirs: usize,
587 pub tolerance: f64,
589}
590impl MordukhovichSubdiffApprox {
591 pub fn new(h: f64, num_dirs: usize, tolerance: f64) -> Self {
593 Self {
594 h,
595 num_dirs,
596 tolerance,
597 }
598 }
599 pub fn frechet_subgradient(&self, f: impl Fn(&[f64]) -> f64, x: &[f64]) -> Vec<f64> {
601 clarke_gradient_approx(f, x, self.h)
602 }
603 pub fn limiting_subdiff_approx(
606 &self,
607 f: impl Fn(&[f64]) -> f64 + Copy,
608 x: &[f64],
609 ) -> Vec<Vec<f64>> {
610 let n = x.len();
611 let mut result = Vec::new();
612 result.push(self.frechet_subgradient(f, x));
613 let mut lcg = crate::random_matrix_theory::Lcg::new(42);
614 for _ in 0..self.num_dirs {
615 let mut y = x.to_vec();
616 for yi in y.iter_mut() {
617 *yi += (lcg.next_f64() - 0.5) * 2.0 * self.tolerance;
618 }
619 let g = self.frechet_subgradient(f, &y);
620 let dist: f64 = (0..n).map(|i| (y[i] - x[i]).powi(2)).sum::<f64>().sqrt();
621 if dist < self.tolerance {
622 result.push(g);
623 }
624 }
625 result
626 }
627 pub fn is_stationary(&self, f: impl Fn(&[f64]) -> f64 + Copy, x: &[f64]) -> bool {
629 let g = self.frechet_subgradient(f, x);
630 let norm: f64 = g.iter().map(|gi| gi * gi).sum::<f64>().sqrt();
631 norm < self.tolerance
632 }
633}
634pub struct SetValuedMap {
636 pub domain: Vec<f64>,
638 pub values: Vec<Vec<f64>>,
640}
641impl SetValuedMap {
642 pub fn new(domain: Vec<f64>, values: Vec<Vec<f64>>) -> Self {
644 Self { domain, values }
645 }
646 pub fn upper_semicontinuous(&self) -> bool {
648 !self.values.iter().any(|v| v.is_empty())
649 }
650 pub fn closed_values(&self) -> bool {
652 !self.values.is_empty() && self.values.iter().all(|v| !v.is_empty())
653 }
654}
655pub struct MoreauEnvelope {
657 pub f: String,
659 pub lambda: f64,
661}
662impl MoreauEnvelope {
663 pub fn new(f: impl Into<String>, lambda: f64) -> Self {
665 Self {
666 f: f.into(),
667 lambda,
668 }
669 }
670 pub fn is_smooth(&self) -> bool {
672 true
673 }
674 pub fn gradient_formula(&self) -> String {
676 format!(
677 "∇(e_{{λ{}}})(x) = (x - prox_{{λ{}}}(x)) / {:.4}. \
678 This gradient is (1/λ)-Lipschitz, making e_{{λf}} a smooth approximation of {}.",
679 self.f, self.f, self.lambda, self.f
680 )
681 }
682}
683#[derive(Debug, Clone)]
685pub struct MountainPassConfig {
686 pub a: Vec<f64>,
688 pub b: Vec<f64>,
689 pub estimated_pass_level: f64,
691 pub path_samples: usize,
693}
694impl MountainPassConfig {
695 pub fn new(a: Vec<f64>, b: Vec<f64>, path_samples: usize) -> Self {
697 Self {
698 a: a.clone(),
699 b: b.clone(),
700 estimated_pass_level: 0.0,
701 path_samples,
702 }
703 }
704 pub fn estimate_pass_level(&mut self, f: &impl Fn(&[f64]) -> f64) -> f64 {
706 let n = self.a.len();
707 let mut max_val = f64::NEG_INFINITY;
708 for k in 0..=self.path_samples {
709 let t = k as f64 / self.path_samples as f64;
710 let x: Vec<f64> = (0..n)
711 .map(|i| (1.0 - t) * self.a[i] + t * self.b[i])
712 .collect();
713 let val = f(&x);
714 if val > max_val {
715 max_val = val;
716 }
717 }
718 self.estimated_pass_level = max_val;
719 max_val
720 }
721 pub fn has_mountain_pass_geometry(&self, f: &impl Fn(&[f64]) -> f64) -> bool {
723 let fa = f(&self.a);
724 let fb = f(&self.b);
725 fa < self.estimated_pass_level && fb < self.estimated_pass_level
726 }
727}
728pub struct ConvexFunction {
730 pub domain: String,
732 pub is_proper: bool,
734 pub is_lsc: bool,
736}
737impl ConvexFunction {
738 pub fn new(domain: impl Into<String>, is_proper: bool, is_lsc: bool) -> Self {
740 Self {
741 domain: domain.into(),
742 is_proper,
743 is_lsc,
744 }
745 }
746 pub fn subdifferential(&self) -> String {
748 if self.is_proper && self.is_lsc {
749 format!(
750 "For a proper lsc convex function on '{}': the subdifferential ∂f(x) is \
751 nonempty on the interior of dom(f), and f = f** (Fenchel-Moreau theorem).",
752 self.domain
753 )
754 } else {
755 format!(
756 "For a convex function on '{}': subdifferential ∂f(x) = {{ v | ∀y, f(y) ≥ f(x) + ⟨v,y-x⟩ }}.",
757 self.domain
758 )
759 }
760 }
761 pub fn conjugate_fn(&self) -> String {
763 format!(
764 "Fenchel conjugate f*(v) = sup_{{x ∈ {}}} (⟨v,x⟩ - f(x)). \
765 For proper lsc convex f: f** = f (Fenchel-Moreau, 1949).",
766 self.domain
767 )
768 }
769}
770pub struct EkelandPrinciple {
773 pub epsilon: f64,
775 pub max_iter: usize,
777 pub step: f64,
779}
780impl EkelandPrinciple {
781 pub fn new(epsilon: f64, max_iter: usize) -> Self {
783 Self {
784 epsilon,
785 max_iter,
786 step: epsilon * 0.1,
787 }
788 }
789 pub fn find_minimiser(&self, f: impl Fn(&[f64]) -> f64, x0: &[f64]) -> Vec<f64> {
791 ekeland_approximate_minimiser(f, x0, self.epsilon, self.max_iter)
792 }
793 pub fn verify_ekeland_condition(
795 &self,
796 f: impl Fn(&[f64]) -> f64,
797 x_eps: &[f64],
798 sample_points: &[Vec<f64>],
799 ) -> bool {
800 let f_eps = f(x_eps);
801 for x in sample_points {
802 let fx = f(x);
803 let dist: f64 = x_eps
804 .iter()
805 .zip(x.iter())
806 .map(|(a, b)| (a - b).powi(2))
807 .sum::<f64>()
808 .sqrt();
809 if fx + self.epsilon * dist < f_eps - 1e-12 {
810 return false;
811 }
812 }
813 true
814 }
815 pub fn near_minimality_gap(&self, f: impl Fn(&[f64]) -> f64, x_eps: &[f64], x0: &[f64]) -> f64 {
817 let f_eps = f(x_eps);
818 let f_x0 = f(x0);
819 f_x0 - f_eps
820 }
821}
822#[allow(dead_code)]
824pub struct ProximalOperator {
825 pub lambda: f64,
827 pub fn_type: ProxFnType,
829}
830#[allow(dead_code)]
831impl ProximalOperator {
832 pub fn new(lambda: f64, fn_type: ProxFnType) -> Self {
834 ProximalOperator { lambda, fn_type }
835 }
836 pub fn apply_scalar(&self, v: f64) -> f64 {
838 match &self.fn_type {
839 ProxFnType::L1Norm => {
840 let threshold = self.lambda;
841 v.signum() * (v.abs() - threshold).max(0.0)
842 }
843 ProxFnType::L2NormSquared => v / (1.0 + 2.0 * self.lambda),
844 ProxFnType::NonNegativeOrtHant => v.max(0.0),
845 ProxFnType::L2Ball { radius } => v.clamp(-radius, *radius),
846 }
847 }
848 pub fn apply_vector(&self, v: &[f64]) -> Vec<f64> {
850 match &self.fn_type {
851 ProxFnType::L2Ball { radius } => {
852 let norm: f64 = v.iter().map(|&x| x * x).sum::<f64>().sqrt();
853 if norm <= *radius {
854 v.to_vec()
855 } else {
856 v.iter().map(|&x| x * radius / norm).collect()
857 }
858 }
859 _ => v.iter().map(|&vi| self.apply_scalar(vi)).collect(),
860 }
861 }
862 pub fn moreau_decomposition(&self, v: f64) -> (f64, f64) {
865 let p = self.apply_scalar(v);
866 let dual = v - p;
867 (p, dual)
868 }
869}
870pub struct MetricRegularityChecker {
872 pub radius: f64,
874 pub num_tests: usize,
876 pub tol: f64,
878}
879impl MetricRegularityChecker {
880 pub fn new(radius: f64, num_tests: usize, tol: f64) -> Self {
882 Self {
883 radius,
884 num_tests,
885 tol,
886 }
887 }
888 pub fn estimate_modulus(
894 &self,
895 f: impl Fn(&[f64]) -> Vec<f64>,
896 x0: &[f64],
897 y0: &[f64],
898 domain_samples: &[Vec<f64>],
899 ) -> f64 {
900 let mut max_ratio = 0.0_f64;
901 let mut lcg = crate::random_matrix_theory::Lcg::new(7);
902 for _ in 0..self.num_tests {
903 let x_pert: Vec<f64> = x0
904 .iter()
905 .map(|&xi| xi + (lcg.next_f64() - 0.5) * 2.0 * self.radius)
906 .collect();
907 let fx_pert = f(&x_pert);
908 let d_y_fx: f64 = y0
909 .iter()
910 .zip(fx_pert.iter())
911 .map(|(a, b)| (a - b).powi(2))
912 .sum::<f64>()
913 .sqrt();
914 if d_y_fx < 1e-14 {
915 continue;
916 }
917 let d_x_finv = domain_samples
918 .iter()
919 .filter(|s| {
920 let fs = f(s);
921 let d: f64 = y0
922 .iter()
923 .zip(fs.iter())
924 .map(|(a, b)| (a - b).powi(2))
925 .sum::<f64>()
926 .sqrt();
927 d < self.tol
928 })
929 .map(|s| {
930 x_pert
931 .iter()
932 .zip(s.iter())
933 .map(|(a, b)| (a - b).powi(2))
934 .sum::<f64>()
935 .sqrt()
936 })
937 .fold(f64::INFINITY, f64::min);
938 if d_x_finv.is_finite() {
939 let ratio = d_x_finv / d_y_fx;
940 if ratio > max_ratio {
941 max_ratio = ratio;
942 }
943 }
944 }
945 max_ratio
946 }
947 pub fn check_constraint_qualification(
950 constraints: &[impl Fn(&[f64]) -> f64],
951 x: &[f64],
952 h: f64,
953 ) -> bool {
954 if constraints.is_empty() {
955 return true;
956 }
957 let mut grads: Vec<Vec<f64>> = Vec::new();
958 for c in constraints {
959 let cx = c(x);
960 if cx.abs() < h {
961 let g = clarke_gradient_approx(|pt| c(pt), x, h);
962 grads.push(g);
963 }
964 }
965 if grads.is_empty() {
966 return true;
967 }
968 for i in 0..grads.len() {
969 for j in (i + 1)..grads.len() {
970 let ni: f64 = grads[i].iter().map(|v| v * v).sum::<f64>().sqrt();
971 let nj: f64 = grads[j].iter().map(|v| v * v).sum::<f64>().sqrt();
972 if ni < 1e-12 || nj < 1e-12 {
973 return false;
974 }
975 let dot: f64 = grads[i]
976 .iter()
977 .zip(grads[j].iter())
978 .map(|(a, b)| a * b)
979 .sum();
980 let cos = (dot / (ni * nj)).abs();
981 if cos > 1.0 - 1e-6 {
982 return false;
983 }
984 }
985 }
986 true
987 }
988 pub fn check_quasiconvex(
991 f: impl Fn(&[f64]) -> f64,
992 x: &[f64],
993 y: &[f64],
994 num_samples: usize,
995 ) -> bool {
996 let max_val = f(x).max(f(y));
997 for k in 1..num_samples {
998 let t = k as f64 / num_samples as f64;
999 let z: Vec<f64> = x
1000 .iter()
1001 .zip(y.iter())
1002 .map(|(xi, yi)| (1.0 - t) * xi + t * yi)
1003 .collect();
1004 if f(&z) > max_val + 1e-12 {
1005 return false;
1006 }
1007 }
1008 true
1009 }
1010}