Skip to main content

oxilean_std/variational_analysis/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use super::functions::*;
6use oxilean_kernel::{BinderInfo, Declaration, Environment, Expr, Level, Name};
7
8/// ADMM solver for problems of the form: min f(x) + g(z) s.t. Ax + Bz = c.
9#[allow(dead_code)]
10pub struct ADMMSolver {
11    /// Penalty parameter ρ > 0.
12    pub rho: f64,
13    /// Maximum iterations.
14    pub max_iter: usize,
15    /// Tolerance.
16    pub tol: f64,
17}
18#[allow(dead_code)]
19impl ADMMSolver {
20    /// Create a new ADMM solver.
21    pub fn new(rho: f64, max_iter: usize, tol: f64) -> Self {
22        ADMMSolver { rho, max_iter, tol }
23    }
24    /// ADMM for LASSO: min (1/2)||Ax-b||^2 + λ||x||_1.
25    /// Simplified: returns the number of iterations to convergence estimate.
26    pub fn lasso_convergence_rate(&self, lambda: f64, _n: usize) -> f64 {
27        1.0 - 1.0 / (1.0 + lambda / self.rho)
28    }
29    /// Dual update: y_{k+1} = y_k + ρ(Ax_{k+1} + Bz_{k+1} - c).
30    /// Simplified for scalar: returns updated dual variable.
31    pub fn dual_update(&self, y: f64, primal_residual: f64) -> f64 {
32        y + self.rho * primal_residual
33    }
34    /// Stopping criteria: primal and dual residuals.
35    /// primal: ||Ax + Bz - c||, dual: ||ρA^T B(z - z_old)||.
36    pub fn stopping_criteria(&self, primal_res: f64, dual_res: f64) -> bool {
37        primal_res < self.tol && dual_res < self.tol
38    }
39    /// Convergence bound: ADMM converges at O(1/k) for general convex problems.
40    pub fn convergence_bound_at(&self, k: usize, _initial_gap: f64) -> f64 {
41        1.0 / k.max(1) as f64
42    }
43}
44/// Infimal convolution (epi-sum) of two functions f and g.
45pub struct InfConvolution {
46    /// Name or formula for f.
47    pub f: String,
48    /// Name or formula for g.
49    pub g: String,
50}
51impl InfConvolution {
52    /// Create a new InfConvolution.
53    pub fn new(f: impl Into<String>, g: impl Into<String>) -> Self {
54        Self {
55            f: f.into(),
56            g: g.into(),
57        }
58    }
59    /// Infimal convolution of two convex functions is convex.
60    pub fn is_convex_if_both_convex(&self) -> bool {
61        true
62    }
63    /// Epigraph characterisation: epi(f □ g) = epi(f) + epi(g) (Minkowski sum).
64    pub fn epigraph_sum(&self) -> String {
65        format!(
66            "Epigraph sum: epi({} □ {}) = epi({}) + epi({}) (Minkowski sum of epigraphs). \
67             This is why the infimal convolution is also called the epigraph sum.",
68            self.f, self.g, self.f, self.g
69        )
70    }
71}
72/// Fenchel duality between convex functions f and g.
73pub struct FenchelDuality {
74    /// Name or formula for the primal function f.
75    pub f: String,
76    /// Name or formula for the perturbation g.
77    pub g: String,
78}
79impl FenchelDuality {
80    /// Create a new FenchelDuality instance.
81    pub fn new(f: impl Into<String>, g: impl Into<String>) -> Self {
82        Self {
83            f: f.into(),
84            g: g.into(),
85        }
86    }
87    /// Returns whether strong duality holds (zero duality gap).
88    pub fn strong_duality_holds(&self) -> bool {
89        true
90    }
91    /// Optimality condition for Fenchel duality.
92    pub fn optimal_condition(&self) -> String {
93        format!(
94            "Fenchel-Rockafellar strong duality: If dom({}) ∩ int(dom({})) ≠ ∅, then \
95             inf_x({}(x) + {}(Ax)) = max_y(-{}*(-A*y) - {}*(y)), \
96             and the gap is zero.",
97            self.f, self.g, self.f, self.g, self.f, self.g
98        )
99    }
100}
101/// Represents a convex function via its subdifferential properties.
102#[allow(dead_code)]
103pub struct ConvexSubdifferential {
104    /// Name/description of the function.
105    pub name: String,
106    /// Whether the function is differentiable.
107    pub is_differentiable: bool,
108    /// Whether the function is strongly convex with modulus μ > 0.
109    pub strong_convexity_modulus: f64,
110    /// Whether the function is Lipschitz continuous.
111    pub is_lipschitz: bool,
112    /// Lipschitz constant L (if is_lipschitz).
113    pub lipschitz_constant: f64,
114}
115#[allow(dead_code)]
116impl ConvexSubdifferential {
117    /// Create a new subdifferential descriptor.
118    pub fn new(name: &str) -> Self {
119        ConvexSubdifferential {
120            name: name.to_string(),
121            is_differentiable: false,
122            strong_convexity_modulus: 0.0,
123            is_lipschitz: false,
124            lipschitz_constant: 0.0,
125        }
126    }
127    /// Set differentiability.
128    pub fn with_differentiability(mut self) -> Self {
129        self.is_differentiable = true;
130        self
131    }
132    /// Set strong convexity modulus μ.
133    pub fn with_strong_convexity(mut self, mu: f64) -> Self {
134        self.strong_convexity_modulus = mu;
135        self
136    }
137    /// Set Lipschitz constant.
138    pub fn with_lipschitz(mut self, l: f64) -> Self {
139        self.is_lipschitz = true;
140        self.lipschitz_constant = l;
141        self
142    }
143    /// Subdifferential sum rule: ∂(f + g)(x) ⊇ ∂f(x) + ∂g(x).
144    /// Equality holds when regularity condition is satisfied.
145    pub fn sum_rule_holds(&self, other: &ConvexSubdifferential) -> bool {
146        self.is_differentiable || other.is_differentiable
147    }
148    /// Chain rule: ∂(f ∘ A)(x) = A^T ∂f(Ax) under constraint qualification.
149    pub fn chain_rule_holds(&self) -> bool {
150        self.is_lipschitz
151    }
152    /// Fenchel conjugate: optimal convergence rate for gradient descent.
153    /// With μ-strong convexity and L-smoothness: rate = 1 - μ/L.
154    pub fn gradient_descent_rate(&self) -> Option<f64> {
155        if self.strong_convexity_modulus > 0.0 && self.lipschitz_constant > 0.0 {
156            Some(1.0 - self.strong_convexity_modulus / self.lipschitz_constant)
157        } else {
158            None
159        }
160    }
161    /// Proximal point algorithm convergence: 1/k rate for convex, linear for strongly convex.
162    pub fn proximal_convergence_rate(&self, k: usize) -> f64 {
163        if self.strong_convexity_modulus > 0.0 {
164            let lambda = 1.0 / (2.0 * self.lipschitz_constant.max(1.0));
165            (1.0 - lambda * self.strong_convexity_modulus)
166                .powi(k as i32)
167                .abs()
168        } else {
169            1.0 / k.max(1) as f64
170        }
171    }
172}
173/// The epigraph of a function f: X → ℝ∪{+∞}.
174pub struct Epigraph {
175    /// Name or formula of the function.
176    pub function: String,
177}
178impl Epigraph {
179    /// Create a new Epigraph.
180    pub fn new(function: impl Into<String>) -> Self {
181        Self {
182            function: function.into(),
183        }
184    }
185    /// Epigraph is closed iff the function is lower semicontinuous (lsc).
186    pub fn is_closed_iff_lsc(&self) -> String {
187        format!(
188            "Theorem: epi({}) = {{(x,α) | {}(x) ≤ α}} is a closed set in X×ℝ \
189             if and only if {} is lower semicontinuous.",
190            self.function, self.function, self.function
191        )
192    }
193    /// Epigraph is convex iff the function is convex.
194    pub fn convex_iff_fn_convex(&self) -> String {
195        format!(
196            "Theorem: epi({}) is a convex set iff {} is a convex function \
197             (Jensen's inequality characterisation).",
198            self.function, self.function
199        )
200    }
201}
202/// Proximal point algorithm solver for minimising a proper lower-semicontinuous function.
203pub struct ProximalPointSolver {
204    /// Regularisation parameter λ > 0.
205    pub lambda: f64,
206    /// Maximum number of iterations.
207    pub max_iter: usize,
208    /// Convergence tolerance.
209    pub tol: f64,
210}
211impl ProximalPointSolver {
212    /// Create a new proximal point solver.
213    pub fn new(lambda: f64, max_iter: usize, tol: f64) -> Self {
214        Self {
215            lambda,
216            max_iter,
217            tol,
218        }
219    }
220    /// Approximate the proximal operator prox_{λf}(v) via gradient descent on f(x) + ‖x-v‖²/(2λ).
221    pub fn prox_step(&self, f: impl Fn(&[f64]) -> f64 + Copy, v: &[f64]) -> Vec<f64> {
222        let n = v.len();
223        let mut x = v.to_vec();
224        let inner_step = self.lambda * 0.01;
225        let inner_iters = 500;
226        for _ in 0..inner_iters {
227            let grad_f = clarke_gradient_approx(f, &x, 1e-6);
228            let mut moved = false;
229            for i in 0..n {
230                let total_grad = grad_f[i] + (x[i] - v[i]) / self.lambda;
231                let new_xi = x[i] - inner_step * total_grad;
232                if (new_xi - x[i]).abs() > 1e-14 {
233                    moved = true;
234                }
235                x[i] = new_xi;
236            }
237            if !moved {
238                break;
239            }
240        }
241        x
242    }
243    /// Run the proximal point algorithm starting from x0.
244    /// Returns the sequence of iterates.
245    pub fn solve(&self, f: impl Fn(&[f64]) -> f64 + Copy, x0: &[f64]) -> Vec<Vec<f64>> {
246        let mut iterates = vec![x0.to_vec()];
247        let mut x = x0.to_vec();
248        for _ in 0..self.max_iter {
249            let x_new = self.prox_step(f, &x);
250            let diff: f64 = x
251                .iter()
252                .zip(x_new.iter())
253                .map(|(a, b)| (a - b).powi(2))
254                .sum::<f64>()
255                .sqrt();
256            iterates.push(x_new.clone());
257            x = x_new;
258            if diff < self.tol {
259                break;
260            }
261        }
262        iterates
263    }
264    /// Check convergence: last step size is smaller than tolerance.
265    pub fn has_converged(&self, iterates: &[Vec<f64>]) -> bool {
266        if iterates.len() < 2 {
267            return false;
268        }
269        let last = iterates
270            .last()
271            .expect("iterates has at least 2 elements: checked by early return");
272        let prev = &iterates[iterates.len() - 2];
273        let diff: f64 = last
274            .iter()
275            .zip(prev.iter())
276            .map(|(a, b)| (a - b).powi(2))
277            .sum::<f64>()
278            .sqrt();
279        diff < self.tol
280    }
281}
282/// Type of function for proximal operator.
283#[allow(dead_code)]
284#[derive(Debug, Clone, PartialEq)]
285pub enum ProxFnType {
286    /// L1 norm: f(x) = ||x||_1 → soft thresholding.
287    L1Norm,
288    /// Squared L2 norm: f(x) = ||x||_2^2 → shrinkage.
289    L2NormSquared,
290    /// Indicator of {x : x ≥ 0}: f(x) = 0 if x≥0, ∞ else → projection.
291    NonNegativeOrtHant,
292    /// Indicator of L2 ball of radius r: ||x||_2 ≤ r.
293    L2Ball { radius: f64 },
294}
295/// Proximal mapping of a function f with parameter λ > 0.
296pub struct ProximalMapping {
297    /// Name or formula of the function f.
298    pub f: String,
299    /// Regularisation parameter λ > 0.
300    pub lambda: f64,
301}
302impl ProximalMapping {
303    /// Create a new ProximalMapping.
304    pub fn new(f: impl Into<String>, lambda: f64) -> Self {
305        Self {
306            f: f.into(),
307            lambda,
308        }
309    }
310    /// Compute the proximal point (conceptually): prox_{λf}(x) = argmin_y {f(y) + |y-x|²/(2λ)}.
311    pub fn prox_point(&self) -> String {
312        format!(
313            "prox_{{λ{}}}(x) = argmin_y {{ {}(y) + ||y - x||²/(2·{:.4}) }}. \
314             Moreau (1962): this is always uniquely defined for proper lsc convex f.",
315            self.f, self.f, self.lambda
316        )
317    }
318    /// Firm nonexpansiveness of the proximal mapping.
319    pub fn firm_nonexpansive(&self) -> String {
320        format!(
321            "The proximal mapping prox_{{λ·{}}} is firmly nonexpansive: \
322             ||prox(x) - prox(y)||² ≤ ⟨prox(x)-prox(y), x-y⟩ for all x,y.",
323            self.f
324        )
325    }
326}
327/// A sequence of functions represented as closures.
328pub struct FunctionSequence {
329    /// The functions f_n, stored by index.
330    functions: Vec<Box<dyn Fn(&[f64]) -> f64 + Send + Sync>>,
331}
332impl FunctionSequence {
333    /// Construct from a vector of boxed functions.
334    pub fn new(functions: Vec<Box<dyn Fn(&[f64]) -> f64 + Send + Sync>>) -> Self {
335        Self { functions }
336    }
337    /// Length of the sequence.
338    pub fn len(&self) -> usize {
339        self.functions.len()
340    }
341    /// Is the sequence empty?
342    pub fn is_empty(&self) -> bool {
343        self.functions.is_empty()
344    }
345    /// Evaluate f_n at x.
346    pub fn eval(&self, n: usize, x: &[f64]) -> f64 {
347        self.functions[n](x)
348    }
349    /// Compute epi-liminf at x using a discrete grid of perturbation vectors.
350    /// epi-liminf_n f_n(x) = liminf_{y→x} liminf_n f_n(y).
351    pub fn epi_liminf(&self, x: &[f64], radius: f64, grid_steps: usize) -> f64 {
352        let n_fns = self.functions.len();
353        if n_fns == 0 {
354            return f64::INFINITY;
355        }
356        let mut result = f64::INFINITY;
357        let step = if grid_steps > 0 {
358            2.0 * radius / grid_steps as f64
359        } else {
360            radius
361        };
362        let dim = x.len();
363        let perturb_count = if dim == 1 { grid_steps + 1 } else { grid_steps };
364        for k in 0..perturb_count {
365            let mut y = x.to_vec();
366            if dim >= 1 {
367                y[0] = x[0] - radius + k as f64 * step;
368            }
369            let mut liminf_n = f64::INFINITY;
370            for fn_n in &self.functions {
371                let val = fn_n(&y);
372                if val < liminf_n {
373                    liminf_n = val;
374                }
375            }
376            if liminf_n < result {
377                result = liminf_n;
378            }
379        }
380        result
381    }
382    /// Compute epi-limsup at x.
383    pub fn epi_limsup(&self, x: &[f64], radius: f64, grid_steps: usize) -> f64 {
384        let n_fns = self.functions.len();
385        if n_fns == 0 {
386            return f64::NEG_INFINITY;
387        }
388        let step = if grid_steps > 0 {
389            2.0 * radius / grid_steps as f64
390        } else {
391            radius
392        };
393        let dim = x.len();
394        let mut result = f64::NEG_INFINITY;
395        let perturb_count = if dim == 1 { grid_steps + 1 } else { grid_steps };
396        for k in 0..perturb_count {
397            let mut y = x.to_vec();
398            if dim >= 1 {
399                y[0] = x[0] - radius + k as f64 * step;
400            }
401            let mut limsup_n = f64::NEG_INFINITY;
402            for fn_n in &self.functions {
403                let val = fn_n(&y);
404                if val > limsup_n {
405                    limsup_n = val;
406                }
407            }
408            if limsup_n > result {
409                result = limsup_n;
410            }
411        }
412        result
413    }
414    /// Approximate Γ-liminf of the sequence at x.
415    /// Γ-liminf_n f_n(x) = sup_{U∋x} liminf_n inf_{y∈U} f_n(y).
416    pub fn gamma_liminf(&self, x: &[f64], radii: &[f64]) -> f64 {
417        let mut best = f64::NEG_INFINITY;
418        for &r in radii {
419            let steps = 20;
420            let step = 2.0 * r / steps as f64;
421            let mut inf_over_n = f64::INFINITY;
422            for n_idx in 0..self.functions.len() {
423                let mut local_inf = f64::INFINITY;
424                for k in 0..=steps {
425                    let mut y = x.to_vec();
426                    if !y.is_empty() {
427                        y[0] = x[0] - r + k as f64 * step;
428                    }
429                    let val = self.functions[n_idx](&y);
430                    if val < local_inf {
431                        local_inf = val;
432                    }
433                }
434                if n_idx == 0 || local_inf < inf_over_n {
435                    if local_inf < inf_over_n {
436                        inf_over_n = local_inf;
437                    }
438                }
439            }
440            if inf_over_n > best {
441                best = inf_over_n;
442            }
443        }
444        best
445    }
446}
447/// Checks approximate r-prox-regularity of a finite point set
448/// by verifying that the projection is unique in an r-tube.
449#[derive(Debug, Clone)]
450pub struct ProxRegularSet {
451    /// Points representing the boundary of the set.
452    pub boundary_points: Vec<Vec<f64>>,
453    /// Prox-regularity radius.
454    pub radius: f64,
455}
456impl ProxRegularSet {
457    /// Construct a new prox-regular set.
458    pub fn new(boundary_points: Vec<Vec<f64>>, radius: f64) -> Self {
459        Self {
460            boundary_points,
461            radius,
462        }
463    }
464    /// Compute projection onto the boundary point set.
465    pub fn project(&self, x: &[f64]) -> Vec<f64> {
466        let mut best_dist = f64::INFINITY;
467        let mut best = x.to_vec();
468        for p in &self.boundary_points {
469            let dist: f64 = x
470                .iter()
471                .zip(p.iter())
472                .map(|(a, b)| (a - b).powi(2))
473                .sum::<f64>()
474                .sqrt();
475            if dist < best_dist {
476                best_dist = dist;
477                best = p.clone();
478            }
479        }
480        best
481    }
482    /// Check if the projection from x is unique (only one nearest neighbour).
483    pub fn has_unique_projection(&self, x: &[f64]) -> bool {
484        let mut dists: Vec<f64> = self
485            .boundary_points
486            .iter()
487            .map(|p| {
488                x.iter()
489                    .zip(p.iter())
490                    .map(|(a, b)| (a - b).powi(2))
491                    .sum::<f64>()
492                    .sqrt()
493            })
494            .collect();
495        dists.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
496        if dists.len() < 2 {
497            return true;
498        }
499        (dists[0] - dists[1]).abs() > 1e-9
500    }
501    /// Check prox-regularity: all points within the r-tube have unique projections.
502    pub fn check_prox_regular(&self, test_points: &[Vec<f64>]) -> bool {
503        for x in test_points {
504            let dist_to_set = self
505                .boundary_points
506                .iter()
507                .map(|p| {
508                    x.iter()
509                        .zip(p.iter())
510                        .map(|(a, b)| (a - b).powi(2))
511                        .sum::<f64>()
512                        .sqrt()
513                })
514                .fold(f64::INFINITY, f64::min);
515            if dist_to_set < self.radius && !self.has_unique_projection(x) {
516                return false;
517            }
518        }
519        true
520    }
521}
522/// A variational inequality: find x ∈ C such that ⟨F(x), y-x⟩ ≥ 0 for all y ∈ C.
523pub struct VariationalInequality {
524    /// Name or description of the operator F.
525    pub operator: String,
526    /// Domain/constraint set C.
527    pub domain: String,
528}
529impl VariationalInequality {
530    /// Create a new VariationalInequality.
531    pub fn new(operator: impl Into<String>, domain: impl Into<String>) -> Self {
532        Self {
533            operator: operator.into(),
534            domain: domain.into(),
535        }
536    }
537    /// Minty-Stampacchia theorem: existence of solutions.
538    pub fn minty_stampacchia(&self) -> String {
539        format!(
540            "Minty-Stampacchia (1960/1964): For the VI with F='{}' on C='{}': \
541             if F is monotone and hemicontinuous, then x* satisfies the VI iff \
542             ⟨F(y), y-x*⟩ ≥ 0 for all y ∈ C (Minty formulation).",
543            self.operator, self.domain
544        )
545    }
546    /// Existence condition for the variational inequality.
547    pub fn existence_condition(&self) -> String {
548        format!(
549            "Existence (Hartman-Stampacchia 1966): If C='{}' is compact convex and \
550             F='{}' is continuous, then the VI has at least one solution. \
551             For unbounded C, a coercivity condition ⟨F(x),x-x₀⟩/||x||→∞ suffices.",
552            self.domain, self.operator
553        )
554    }
555}
556/// Subdifferential of a convex or nonsmooth function at a point.
557pub struct Subdifferential {
558    /// The point at which the subdifferential is evaluated.
559    pub point: Vec<f64>,
560}
561impl Subdifferential {
562    /// Create a new Subdifferential at the given point.
563    pub fn new(point: Vec<f64>) -> Self {
564        Self { point }
565    }
566    /// Returns whether the subdifferential is nonempty at interior points.
567    pub fn is_nonempty_at_interior(&self) -> bool {
568        true
569    }
570    /// Optimality condition: 0 ∈ ∂f(x) iff x is a minimiser.
571    pub fn optimality_condition(&self) -> String {
572        let x_str: Vec<String> = self.point.iter().map(|v| format!("{:.3}", v)).collect();
573        format!(
574            "Fermat's rule: x = ({}) is a minimiser of f iff 0 ∈ ∂f(x). \
575             For a smooth convex f this reduces to ∇f(x) = 0.",
576            x_str.join(", ")
577        )
578    }
579}
580/// Approximate computation of the Mordukhovich (limiting) subdifferential
581/// via a finite-horizon sequence of Fréchet subgradients.
582pub struct MordukhovichSubdiffApprox {
583    /// Finite-difference step for Fréchet subgradient computation.
584    pub h: f64,
585    /// Number of perturbation directions to sample.
586    pub num_dirs: usize,
587    /// Regularisation for approximate subdifferential.
588    pub tolerance: f64,
589}
590impl MordukhovichSubdiffApprox {
591    /// Create a new approximation with given parameters.
592    pub fn new(h: f64, num_dirs: usize, tolerance: f64) -> Self {
593        Self {
594            h,
595            num_dirs,
596            tolerance,
597        }
598    }
599    /// Compute an approximate Fréchet subgradient at x via central differences.
600    pub fn frechet_subgradient(&self, f: impl Fn(&[f64]) -> f64, x: &[f64]) -> Vec<f64> {
601        clarke_gradient_approx(f, x, self.h)
602    }
603    /// Compute approximate limiting subdifferential by sampling nearby Fréchet subgradients.
604    /// Returns a finite set of approximate subgradients at perturbed points near x.
605    pub fn limiting_subdiff_approx(
606        &self,
607        f: impl Fn(&[f64]) -> f64 + Copy,
608        x: &[f64],
609    ) -> Vec<Vec<f64>> {
610        let n = x.len();
611        let mut result = Vec::new();
612        result.push(self.frechet_subgradient(f, x));
613        let mut lcg = crate::random_matrix_theory::Lcg::new(42);
614        for _ in 0..self.num_dirs {
615            let mut y = x.to_vec();
616            for yi in y.iter_mut() {
617                *yi += (lcg.next_f64() - 0.5) * 2.0 * self.tolerance;
618            }
619            let g = self.frechet_subgradient(f, &y);
620            let dist: f64 = (0..n).map(|i| (y[i] - x[i]).powi(2)).sum::<f64>().sqrt();
621            if dist < self.tolerance {
622                result.push(g);
623            }
624        }
625        result
626    }
627    /// Check whether zero is approximately in the subdifferential (stationarity condition).
628    pub fn is_stationary(&self, f: impl Fn(&[f64]) -> f64 + Copy, x: &[f64]) -> bool {
629        let g = self.frechet_subgradient(f, x);
630        let norm: f64 = g.iter().map(|gi| gi * gi).sum::<f64>().sqrt();
631        norm < self.tolerance
632    }
633}
634/// A set-valued map (multifunction) F: X ⇒ Y.
635pub struct SetValuedMap {
636    /// The domain points.
637    pub domain: Vec<f64>,
638    /// The values (each a subset of Y, represented as a Vec).
639    pub values: Vec<Vec<f64>>,
640}
641impl SetValuedMap {
642    /// Create a new SetValuedMap.
643    pub fn new(domain: Vec<f64>, values: Vec<Vec<f64>>) -> Self {
644        Self { domain, values }
645    }
646    /// Returns true if the map appears upper semicontinuous at each domain point.
647    pub fn upper_semicontinuous(&self) -> bool {
648        !self.values.iter().any(|v| v.is_empty())
649    }
650    /// Returns whether all values sets are closed (represented as non-empty).
651    pub fn closed_values(&self) -> bool {
652        !self.values.is_empty() && self.values.iter().all(|v| !v.is_empty())
653    }
654}
655/// Moreau envelope (Moreau-Yosida regularisation) of f with parameter λ.
656pub struct MoreauEnvelope {
657    /// Name or formula of the original function f.
658    pub f: String,
659    /// Regularisation parameter λ > 0.
660    pub lambda: f64,
661}
662impl MoreauEnvelope {
663    /// Create a new MoreauEnvelope.
664    pub fn new(f: impl Into<String>, lambda: f64) -> Self {
665        Self {
666            f: f.into(),
667            lambda,
668        }
669    }
670    /// Returns whether the Moreau envelope is smooth (always C^{1,1}).
671    pub fn is_smooth(&self) -> bool {
672        true
673    }
674    /// Gradient formula for the Moreau envelope.
675    pub fn gradient_formula(&self) -> String {
676        format!(
677            "∇(e_{{λ{}}})(x) = (x - prox_{{λ{}}}(x)) / {:.4}. \
678             This gradient is (1/λ)-Lipschitz, making e_{{λf}} a smooth approximation of {}.",
679            self.f, self.f, self.lambda, self.f
680        )
681    }
682}
683/// Configuration for mountain pass detection.
684#[derive(Debug, Clone)]
685pub struct MountainPassConfig {
686    /// Two base points a, b (distinct).
687    pub a: Vec<f64>,
688    pub b: Vec<f64>,
689    /// Estimated mountain pass level c = inf_{γ} max_{t} f(γ(t)).
690    pub estimated_pass_level: f64,
691    /// Number of sample points on path.
692    pub path_samples: usize,
693}
694impl MountainPassConfig {
695    /// Create a new mountain pass configuration.
696    pub fn new(a: Vec<f64>, b: Vec<f64>, path_samples: usize) -> Self {
697        Self {
698            a: a.clone(),
699            b: b.clone(),
700            estimated_pass_level: 0.0,
701            path_samples,
702        }
703    }
704    /// Estimate mountain pass level via straight-line path γ(t) = (1-t)a + tb.
705    pub fn estimate_pass_level(&mut self, f: &impl Fn(&[f64]) -> f64) -> f64 {
706        let n = self.a.len();
707        let mut max_val = f64::NEG_INFINITY;
708        for k in 0..=self.path_samples {
709            let t = k as f64 / self.path_samples as f64;
710            let x: Vec<f64> = (0..n)
711                .map(|i| (1.0 - t) * self.a[i] + t * self.b[i])
712                .collect();
713            let val = f(&x);
714            if val > max_val {
715                max_val = val;
716            }
717        }
718        self.estimated_pass_level = max_val;
719        max_val
720    }
721    /// Check mountain pass geometry: f(a) < c, f(b) < c, and some x on path achieves c.
722    pub fn has_mountain_pass_geometry(&self, f: &impl Fn(&[f64]) -> f64) -> bool {
723        let fa = f(&self.a);
724        let fb = f(&self.b);
725        fa < self.estimated_pass_level && fb < self.estimated_pass_level
726    }
727}
728/// A convex function with domain and lower-semicontinuity properties.
729pub struct ConvexFunction {
730    /// Domain description (e.g., "R^n", "[0,1]").
731    pub domain: String,
732    /// Whether the function is proper (not identically +∞, never -∞).
733    pub is_proper: bool,
734    /// Whether the function is lower-semicontinuous (closed).
735    pub is_lsc: bool,
736}
737impl ConvexFunction {
738    /// Create a new ConvexFunction.
739    pub fn new(domain: impl Into<String>, is_proper: bool, is_lsc: bool) -> Self {
740        Self {
741            domain: domain.into(),
742            is_proper,
743            is_lsc,
744        }
745    }
746    /// Returns a description of the subdifferential of this function.
747    pub fn subdifferential(&self) -> String {
748        if self.is_proper && self.is_lsc {
749            format!(
750                "For a proper lsc convex function on '{}': the subdifferential ∂f(x) is \
751                 nonempty on the interior of dom(f), and f = f** (Fenchel-Moreau theorem).",
752                self.domain
753            )
754        } else {
755            format!(
756                "For a convex function on '{}': subdifferential ∂f(x) = {{ v | ∀y, f(y) ≥ f(x) + ⟨v,y-x⟩ }}.",
757                self.domain
758            )
759        }
760    }
761    /// Returns a description of the conjugate (Fenchel dual) function.
762    pub fn conjugate_fn(&self) -> String {
763        format!(
764            "Fenchel conjugate f*(v) = sup_{{x ∈ {}}} (⟨v,x⟩ - f(x)). \
765             For proper lsc convex f: f** = f (Fenchel-Moreau, 1949).",
766            self.domain
767        )
768    }
769}
770/// Constructive implementation of Ekeland's variational principle.
771/// Finds xε such that f(xε) ≤ f(x₀) - ε·d(x₀, xε) and 0 ∈ ∂_{ε}f(xε).
772pub struct EkelandPrinciple {
773    /// Regularisation parameter ε > 0.
774    pub epsilon: f64,
775    /// Maximum iterations for approximate minimiser search.
776    pub max_iter: usize,
777    /// Step size for descent.
778    pub step: f64,
779}
780impl EkelandPrinciple {
781    /// Create a new EkelandPrinciple solver.
782    pub fn new(epsilon: f64, max_iter: usize) -> Self {
783        Self {
784            epsilon,
785            max_iter,
786            step: epsilon * 0.1,
787        }
788    }
789    /// Find an approximate Ekeland minimiser starting from x₀.
790    pub fn find_minimiser(&self, f: impl Fn(&[f64]) -> f64, x0: &[f64]) -> Vec<f64> {
791        ekeland_approximate_minimiser(f, x0, self.epsilon, self.max_iter)
792    }
793    /// Verify the Ekeland condition: f(xε) ≤ f(x) + ε·‖x - xε‖ for all x in sample set.
794    pub fn verify_ekeland_condition(
795        &self,
796        f: impl Fn(&[f64]) -> f64,
797        x_eps: &[f64],
798        sample_points: &[Vec<f64>],
799    ) -> bool {
800        let f_eps = f(x_eps);
801        for x in sample_points {
802            let fx = f(x);
803            let dist: f64 = x_eps
804                .iter()
805                .zip(x.iter())
806                .map(|(a, b)| (a - b).powi(2))
807                .sum::<f64>()
808                .sqrt();
809            if fx + self.epsilon * dist < f_eps - 1e-12 {
810                return false;
811            }
812        }
813        true
814    }
815    /// Estimate the near-minimality: how close f(xε) is to the true infimum (approximated).
816    pub fn near_minimality_gap(&self, f: impl Fn(&[f64]) -> f64, x_eps: &[f64], x0: &[f64]) -> f64 {
817        let f_eps = f(x_eps);
818        let f_x0 = f(x0);
819        f_x0 - f_eps
820    }
821}
822/// Represents the proximal operator prox_{λf}(v) = argmin_x f(x) + ||x-v||^2/(2λ).
823#[allow(dead_code)]
824pub struct ProximalOperator {
825    /// Regularization parameter λ > 0.
826    pub lambda: f64,
827    /// Type of function: L1, L2, indicator.
828    pub fn_type: ProxFnType,
829}
830#[allow(dead_code)]
831impl ProximalOperator {
832    /// Create a new proximal operator.
833    pub fn new(lambda: f64, fn_type: ProxFnType) -> Self {
834        ProximalOperator { lambda, fn_type }
835    }
836    /// Apply the proximal operator to a scalar v.
837    pub fn apply_scalar(&self, v: f64) -> f64 {
838        match &self.fn_type {
839            ProxFnType::L1Norm => {
840                let threshold = self.lambda;
841                v.signum() * (v.abs() - threshold).max(0.0)
842            }
843            ProxFnType::L2NormSquared => v / (1.0 + 2.0 * self.lambda),
844            ProxFnType::NonNegativeOrtHant => v.max(0.0),
845            ProxFnType::L2Ball { radius } => v.clamp(-radius, *radius),
846        }
847    }
848    /// Apply the proximal operator component-wise to a vector v.
849    pub fn apply_vector(&self, v: &[f64]) -> Vec<f64> {
850        match &self.fn_type {
851            ProxFnType::L2Ball { radius } => {
852                let norm: f64 = v.iter().map(|&x| x * x).sum::<f64>().sqrt();
853                if norm <= *radius {
854                    v.to_vec()
855                } else {
856                    v.iter().map(|&x| x * radius / norm).collect()
857                }
858            }
859            _ => v.iter().map(|&vi| self.apply_scalar(vi)).collect(),
860        }
861    }
862    /// Moreau decomposition: prox_{λf}(v) + λ prox_{f*/λ}(v/λ) = v.
863    /// Returns both prox_{λf}(v) and the dual part.
864    pub fn moreau_decomposition(&self, v: f64) -> (f64, f64) {
865        let p = self.apply_scalar(v);
866        let dual = v - p;
867        (p, dual)
868    }
869}
870/// Checks metric regularity properties of a mapping at a point.
871pub struct MetricRegularityChecker {
872    /// Perturbation radius for numerical testing.
873    pub radius: f64,
874    /// Number of test perturbations.
875    pub num_tests: usize,
876    /// Tolerance for regularity bound checks.
877    pub tol: f64,
878}
879impl MetricRegularityChecker {
880    /// Create a new checker.
881    pub fn new(radius: f64, num_tests: usize, tol: f64) -> Self {
882        Self {
883            radius,
884            num_tests,
885            tol,
886        }
887    }
888    /// Estimate the metric regularity modulus κ of F at (x₀, y₀):
889    /// κ ≈ sup_{(x,y) near (x₀,y₀)} d(x, F⁻¹(y)) / d(y, F(x)).
890    ///
891    /// Here F is represented as `f: &[f64] -> Vec<f64>` and F⁻¹(y) is approximated
892    /// by the preimage computed via nearest-point search.
893    pub fn estimate_modulus(
894        &self,
895        f: impl Fn(&[f64]) -> Vec<f64>,
896        x0: &[f64],
897        y0: &[f64],
898        domain_samples: &[Vec<f64>],
899    ) -> f64 {
900        let mut max_ratio = 0.0_f64;
901        let mut lcg = crate::random_matrix_theory::Lcg::new(7);
902        for _ in 0..self.num_tests {
903            let x_pert: Vec<f64> = x0
904                .iter()
905                .map(|&xi| xi + (lcg.next_f64() - 0.5) * 2.0 * self.radius)
906                .collect();
907            let fx_pert = f(&x_pert);
908            let d_y_fx: f64 = y0
909                .iter()
910                .zip(fx_pert.iter())
911                .map(|(a, b)| (a - b).powi(2))
912                .sum::<f64>()
913                .sqrt();
914            if d_y_fx < 1e-14 {
915                continue;
916            }
917            let d_x_finv = domain_samples
918                .iter()
919                .filter(|s| {
920                    let fs = f(s);
921                    let d: f64 = y0
922                        .iter()
923                        .zip(fs.iter())
924                        .map(|(a, b)| (a - b).powi(2))
925                        .sum::<f64>()
926                        .sqrt();
927                    d < self.tol
928                })
929                .map(|s| {
930                    x_pert
931                        .iter()
932                        .zip(s.iter())
933                        .map(|(a, b)| (a - b).powi(2))
934                        .sum::<f64>()
935                        .sqrt()
936                })
937                .fold(f64::INFINITY, f64::min);
938            if d_x_finv.is_finite() {
939                let ratio = d_x_finv / d_y_fx;
940                if ratio > max_ratio {
941                    max_ratio = ratio;
942                }
943            }
944        }
945        max_ratio
946    }
947    /// Check whether a constraint qualification (MFCQ-like) holds by verifying
948    /// that the constraint gradients at x span a "rich" direction set.
949    pub fn check_constraint_qualification(
950        constraints: &[impl Fn(&[f64]) -> f64],
951        x: &[f64],
952        h: f64,
953    ) -> bool {
954        if constraints.is_empty() {
955            return true;
956        }
957        let mut grads: Vec<Vec<f64>> = Vec::new();
958        for c in constraints {
959            let cx = c(x);
960            if cx.abs() < h {
961                let g = clarke_gradient_approx(|pt| c(pt), x, h);
962                grads.push(g);
963            }
964        }
965        if grads.is_empty() {
966            return true;
967        }
968        for i in 0..grads.len() {
969            for j in (i + 1)..grads.len() {
970                let ni: f64 = grads[i].iter().map(|v| v * v).sum::<f64>().sqrt();
971                let nj: f64 = grads[j].iter().map(|v| v * v).sum::<f64>().sqrt();
972                if ni < 1e-12 || nj < 1e-12 {
973                    return false;
974                }
975                let dot: f64 = grads[i]
976                    .iter()
977                    .zip(grads[j].iter())
978                    .map(|(a, b)| a * b)
979                    .sum();
980                let cos = (dot / (ni * nj)).abs();
981                if cos > 1.0 - 1e-6 {
982                    return false;
983                }
984            }
985        }
986        true
987    }
988    /// Check whether a set of points satisfies a quasiconvexity condition:
989    /// f on the line segment [x,y] is ≤ max(f(x), f(y)) for sample points.
990    pub fn check_quasiconvex(
991        f: impl Fn(&[f64]) -> f64,
992        x: &[f64],
993        y: &[f64],
994        num_samples: usize,
995    ) -> bool {
996        let max_val = f(x).max(f(y));
997        for k in 1..num_samples {
998            let t = k as f64 / num_samples as f64;
999            let z: Vec<f64> = x
1000                .iter()
1001                .zip(y.iter())
1002                .map(|(xi, yi)| (1.0 - t) * xi + t * yi)
1003                .collect();
1004            if f(&z) > max_val + 1e-12 {
1005                return false;
1006            }
1007        }
1008        true
1009    }
1010}