amari_optimization/
constrained.rs

1//! # Constrained Optimization
2//!
3//! This module implements advanced constrained optimization algorithms for handling
4//! equality and inequality constraints in optimization problems.
5//!
6//! ## Mathematical Background
7//!
8//! Constrained optimization solves problems of the form:
9//!
10//! ```text
11//! minimize f(x)
12//! subject to:
13//!   g_i(x) ≤ 0,  i = 1, ..., m  (inequality constraints)
14//!   h_j(x) = 0,  j = 1, ..., p  (equality constraints)
15//!   x ∈ X                       (bound constraints)
16//! ```
17//!
18//! ## Key Algorithms
19//!
20//! - **Penalty Methods**: Transform constrained problems into unconstrained ones
21//! - **Barrier Methods**: Use barrier functions to enforce inequality constraints
22//! - **Lagrange Multipliers**: Direct handling of KKT conditions
23//! - **Augmented Lagrangian**: Combine penalties with Lagrange multipliers
24//! - **Sequential Quadratic Programming (SQP)**: Newton-like methods for constraints
25//!
26//! ## KKT Conditions
27//!
28//! For a local minimum x*, the Karush-Kuhn-Tucker conditions must hold:
29//! ```text
30//! ∇f(x*) + Σᵢ λᵢ ∇gᵢ(x*) + Σⱼ μⱼ ∇hⱼ(x*) = 0
31//! gᵢ(x*) ≤ 0,  λᵢ ≥ 0,  λᵢ gᵢ(x*) = 0  (complementary slackness)
32//! hⱼ(x*) = 0
33//! ```
34
35use crate::phantom::{Constrained, OptimizationProblem};
36use crate::OptimizationResult;
37
38use num_traits::Float;
39use std::marker::PhantomData;
40
41/// Configuration for constrained optimization algorithms
42#[derive(Clone, Debug)]
43pub struct ConstrainedConfig<T: Float> {
44    /// Maximum number of outer iterations
45    pub max_iterations: usize,
46    /// Maximum number of inner iterations (for penalty/barrier methods)
47    pub max_inner_iterations: usize,
48    /// Tolerance for constraint violations
49    pub constraint_tolerance: T,
50    /// Tolerance for optimality conditions
51    pub optimality_tolerance: T,
52    /// Initial penalty parameter
53    pub initial_penalty: T,
54    /// Penalty growth factor
55    pub penalty_growth: T,
56    /// Initial barrier parameter
57    pub initial_barrier: T,
58    /// Barrier reduction factor
59    pub barrier_reduction: T,
60    /// Line search parameters
61    pub line_search_tolerance: T,
62    /// Enable feasibility restoration
63    pub enable_feasibility_restoration: bool,
64}
65
66impl<T: Float> Default for ConstrainedConfig<T> {
67    fn default() -> Self {
68        Self {
69            max_iterations: 100,
70            max_inner_iterations: 500, // Reduced inner iterations
71            constraint_tolerance: T::from(1e-4).unwrap(), // Relaxed constraint tolerance
72            optimality_tolerance: T::from(1e-4).unwrap(), // Relaxed optimality tolerance
73            initial_penalty: T::from(1.0).unwrap(),
74            penalty_growth: T::from(10.0).unwrap(),
75            initial_barrier: T::from(0.1).unwrap(), // Smaller initial barrier
76            barrier_reduction: T::from(0.5).unwrap(), // Slower barrier reduction
77            line_search_tolerance: T::from(1e-4).unwrap(),
78            enable_feasibility_restoration: true,
79        }
80    }
81}
82
83/// Results from constrained optimization
84#[derive(Clone, Debug)]
85pub struct ConstrainedResult<T: Float> {
86    /// Optimal solution
87    pub solution: Vec<T>,
88    /// Optimal objective value
89    pub objective_value: T,
90    /// Lagrange multipliers for inequality constraints
91    pub lambda: Vec<T>,
92    /// Lagrange multipliers for equality constraints
93    pub mu: Vec<T>,
94    /// Final constraint violations
95    pub constraint_violations: Vec<T>,
96    /// Number of iterations performed
97    pub iterations: usize,
98    /// Convergence status
99    pub converged: bool,
100    /// KKT error (measure of optimality)
101    pub kkt_error: T,
102}
103
104/// Trait for constrained objective functions
105pub trait ConstrainedObjective<T: Float> {
106    /// Evaluate the objective function
107    fn evaluate(&self, x: &[T]) -> T;
108
109    /// Compute the gradient of the objective function
110    fn gradient(&self, x: &[T]) -> Vec<T>;
111
112    /// Compute the Hessian of the objective function (optional)
113    fn hessian(&self, _x: &[T]) -> Option<Vec<Vec<T>>> {
114        None
115    }
116
117    /// Evaluate inequality constraints g(x) ≤ 0
118    fn inequality_constraints(&self, x: &[T]) -> Vec<T>;
119
120    /// Evaluate equality constraints h(x) = 0
121    fn equality_constraints(&self, x: &[T]) -> Vec<T>;
122
123    /// Jacobian of inequality constraints
124    fn inequality_jacobian(&self, x: &[T]) -> Vec<Vec<T>>;
125
126    /// Jacobian of equality constraints
127    fn equality_jacobian(&self, x: &[T]) -> Vec<Vec<T>>;
128
129    /// Get variable bounds (lower, upper)
130    fn variable_bounds(&self) -> Vec<(T, T)>;
131
132    /// Number of inequality constraints
133    fn num_inequality_constraints(&self) -> usize;
134
135    /// Number of equality constraints
136    fn num_equality_constraints(&self) -> usize;
137
138    /// Number of variables
139    fn num_variables(&self) -> usize;
140}
141
142/// Penalty method types
143#[derive(Clone, Copy, Debug)]
144pub enum PenaltyMethod {
145    /// Exterior penalty method
146    Exterior,
147    /// Interior penalty method (barrier)
148    Interior,
149    /// Augmented Lagrangian method
150    AugmentedLagrangian,
151}
152
153/// Constrained optimization solver
154#[derive(Clone, Debug)]
155pub struct ConstrainedOptimizer<T: Float> {
156    config: ConstrainedConfig<T>,
157    method: PenaltyMethod,
158    _phantom: PhantomData<T>,
159}
160
161impl<T: Float> ConstrainedOptimizer<T> {
162    /// Create a new constrained optimizer
163    pub fn new(config: ConstrainedConfig<T>, method: PenaltyMethod) -> Self {
164        Self {
165            config,
166            method,
167            _phantom: PhantomData,
168        }
169    }
170
171    /// Create optimizer with default configuration
172    pub fn with_default_config(method: PenaltyMethod) -> Self {
173        Self::new(ConstrainedConfig::default(), method)
174    }
175
176    /// Solve constrained optimization problem
177    pub fn optimize<
178        const DIM: usize,
179        O: crate::phantom::ObjectiveState,
180        V: crate::phantom::ConvexityState,
181        M: crate::phantom::ManifoldState,
182    >(
183        &self,
184        _problem: &OptimizationProblem<DIM, Constrained, O, V, M>,
185        objective: &impl ConstrainedObjective<T>,
186        initial_point: Vec<T>,
187    ) -> OptimizationResult<ConstrainedResult<T>> {
188        match self.method {
189            PenaltyMethod::Exterior => self.exterior_penalty_method(objective, initial_point),
190            PenaltyMethod::Interior => self.interior_penalty_method(objective, initial_point),
191            PenaltyMethod::AugmentedLagrangian => {
192                self.augmented_lagrangian_method(objective, initial_point)
193            }
194        }
195    }
196
197    /// Exterior penalty method
198    fn exterior_penalty_method(
199        &self,
200        objective: &impl ConstrainedObjective<T>,
201        mut x: Vec<T>,
202    ) -> OptimizationResult<ConstrainedResult<T>> {
203        let mut penalty_param = self.config.initial_penalty;
204        let mut best_objective = T::infinity();
205
206        for iteration in 0..self.config.max_iterations {
207            // Define penalty function
208            let penalty_objective = |vars: &[T]| -> T {
209                let obj = objective.evaluate(vars);
210                let mut penalty = T::zero();
211
212                // Inequality constraints penalty: max(0, g(x))²
213                let ineq_constraints = objective.inequality_constraints(vars);
214                for &g in &ineq_constraints {
215                    if g > T::zero() {
216                        penalty = penalty + g * g;
217                    }
218                }
219
220                // Equality constraints penalty: h(x)²
221                let eq_constraints = objective.equality_constraints(vars);
222                for &h in &eq_constraints {
223                    penalty = penalty + h * h;
224                }
225
226                obj + penalty_param * penalty
227            };
228
229            // Solve unconstrained subproblem
230            x = self.solve_unconstrained_subproblem(&penalty_objective, x)?;
231
232            // Check convergence
233            let current_objective = objective.evaluate(&x);
234            let constraint_violation = self.compute_constraint_violation(objective, &x);
235
236            if constraint_violation < self.config.constraint_tolerance {
237                if (current_objective - best_objective).abs() < self.config.optimality_tolerance {
238                    // Converged
239                    let lambda = self.estimate_lagrange_multipliers(objective, &x, penalty_param);
240                    let kkt_error = self.compute_kkt_error(objective, &x, &lambda.0, &lambda.1);
241
242                    return Ok(ConstrainedResult {
243                        solution: x.clone(),
244                        objective_value: current_objective,
245                        lambda: lambda.0,
246                        mu: lambda.1,
247                        constraint_violations: self.get_constraint_violations(objective, &x),
248                        iterations: iteration + 1,
249                        converged: true,
250                        kkt_error,
251                    });
252                }
253                best_objective = current_objective;
254            }
255
256            // Update penalty parameter
257            penalty_param = penalty_param * self.config.penalty_growth;
258        }
259
260        Err(crate::OptimizationError::ConvergenceFailure {
261            iterations: self.config.max_iterations,
262        })
263    }
264
265    /// Interior penalty method (barrier method)
266    fn interior_penalty_method(
267        &self,
268        objective: &impl ConstrainedObjective<T>,
269        mut x: Vec<T>,
270    ) -> OptimizationResult<ConstrainedResult<T>> {
271        // First, find a feasible starting point
272        if !self.is_feasible(objective, &x) {
273            x = self.find_feasible_point(objective, x)?;
274        }
275
276        let mut barrier_param = self.config.initial_barrier;
277
278        for iteration in 0..self.config.max_iterations {
279            // Define barrier function
280            let barrier_objective = |vars: &[T]| -> T {
281                let obj = objective.evaluate(vars);
282                let mut barrier = T::zero();
283
284                // Logarithmic barrier for inequality constraints: -log(-g(x))
285                let ineq_constraints = objective.inequality_constraints(vars);
286                for &g in &ineq_constraints {
287                    if g >= T::zero() {
288                        // Add a small penalty to avoid infinity and guide towards feasibility
289                        let penalty = T::from(1000.0).unwrap() * (T::one() + g);
290                        return obj + penalty;
291                    }
292                    // Use -log(-g) for feasible points
293                    barrier = barrier - (-g).ln();
294                }
295
296                obj + barrier_param * barrier
297            };
298
299            // Solve unconstrained subproblem
300            x = self.solve_unconstrained_subproblem(&barrier_objective, x)?;
301
302            // Check convergence
303            let constraint_violation = self.compute_constraint_violation(objective, &x);
304
305            if constraint_violation < self.config.constraint_tolerance {
306                let current_objective = objective.evaluate(&x);
307                let lambda = self.estimate_lagrange_multipliers(objective, &x, barrier_param);
308                let kkt_error = self.compute_kkt_error(objective, &x, &lambda.0, &lambda.1);
309
310                if kkt_error < self.config.optimality_tolerance {
311                    return Ok(ConstrainedResult {
312                        solution: x.clone(),
313                        objective_value: current_objective,
314                        lambda: lambda.0,
315                        mu: lambda.1,
316                        constraint_violations: self.get_constraint_violations(objective, &x),
317                        iterations: iteration + 1,
318                        converged: true,
319                        kkt_error,
320                    });
321                }
322            }
323
324            // Update barrier parameter
325            barrier_param = barrier_param * self.config.barrier_reduction;
326        }
327
328        Err(crate::OptimizationError::ConvergenceFailure {
329            iterations: self.config.max_iterations,
330        })
331    }
332
333    /// Augmented Lagrangian method
334    fn augmented_lagrangian_method(
335        &self,
336        objective: &impl ConstrainedObjective<T>,
337        mut x: Vec<T>,
338    ) -> OptimizationResult<ConstrainedResult<T>> {
339        let n_ineq = objective.num_inequality_constraints();
340        let n_eq = objective.num_equality_constraints();
341
342        let mut lambda = vec![T::zero(); n_ineq]; // Inequality multipliers
343        let mut mu = vec![T::zero(); n_eq]; // Equality multipliers
344        let mut penalty_param = self.config.initial_penalty;
345
346        for iteration in 0..self.config.max_iterations {
347            // Define augmented Lagrangian
348            let aug_lag_objective = |vars: &[T]| -> T {
349                let obj = objective.evaluate(vars);
350                let ineq_constraints = objective.inequality_constraints(vars);
351                let eq_constraints = objective.equality_constraints(vars);
352
353                let mut augmented = obj;
354
355                // Inequality constraints: λᵢ gᵢ(x) + (ρ/2) max(0, gᵢ(x))²
356                for (i, &g) in ineq_constraints.iter().enumerate() {
357                    let max_g = if g > T::zero() { g } else { T::zero() };
358                    augmented = augmented
359                        + lambda[i] * g
360                        + penalty_param / T::from(2.0).unwrap() * max_g * max_g;
361                }
362
363                // Equality constraints: μⱼ hⱼ(x) + (ρ/2) hⱼ(x)²
364                for (j, &h) in eq_constraints.iter().enumerate() {
365                    augmented =
366                        augmented + mu[j] * h + penalty_param / T::from(2.0).unwrap() * h * h;
367                }
368
369                augmented
370            };
371
372            // Solve unconstrained subproblem
373            x = self.solve_unconstrained_subproblem(&aug_lag_objective, x)?;
374
375            // Update multipliers
376            let ineq_constraints = objective.inequality_constraints(&x);
377            let eq_constraints = objective.equality_constraints(&x);
378
379            // Update inequality multipliers: λᵢ ← max(0, λᵢ + ρ gᵢ(x))
380            for (i, &g) in ineq_constraints.iter().enumerate() {
381                lambda[i] = (lambda[i] + penalty_param * g).max(T::zero());
382            }
383
384            // Update equality multipliers: μⱼ ← μⱼ + ρ hⱼ(x)
385            for (j, &h) in eq_constraints.iter().enumerate() {
386                mu[j] = mu[j] + penalty_param * h;
387            }
388
389            // Check convergence
390            let constraint_violation = self.compute_constraint_violation(objective, &x);
391            let kkt_error = self.compute_kkt_error(objective, &x, &lambda, &mu);
392
393            if constraint_violation < self.config.constraint_tolerance
394                && kkt_error < self.config.optimality_tolerance
395            {
396                return Ok(ConstrainedResult {
397                    solution: x.clone(),
398                    objective_value: objective.evaluate(&x),
399                    lambda,
400                    mu,
401                    constraint_violations: self.get_constraint_violations(objective, &x),
402                    iterations: iteration + 1,
403                    converged: true,
404                    kkt_error,
405                });
406            }
407
408            // Update penalty parameter if needed
409            if constraint_violation > self.config.constraint_tolerance * T::from(0.1).unwrap() {
410                penalty_param = penalty_param * self.config.penalty_growth;
411            }
412        }
413
414        Err(crate::OptimizationError::ConvergenceFailure {
415            iterations: self.config.max_iterations,
416        })
417    }
418
419    /// Solve unconstrained subproblem using gradient descent
420    fn solve_unconstrained_subproblem(
421        &self,
422        objective: &impl Fn(&[T]) -> T,
423        mut x: Vec<T>,
424    ) -> OptimizationResult<Vec<T>> {
425        let learning_rate = T::from(0.01).unwrap();
426        let eps = T::from(1e-8).unwrap();
427
428        for _ in 0..self.config.max_inner_iterations {
429            // Compute numerical gradient
430            let gradient = self.numerical_gradient(objective, &x, eps);
431            let grad_norm = self.vector_norm(&gradient);
432
433            if grad_norm < self.config.optimality_tolerance {
434                break;
435            }
436
437            // Gradient descent step with line search
438            let step_size = self.line_search(objective, &x, &gradient, learning_rate);
439            for (i, &grad) in gradient.iter().enumerate() {
440                x[i] = x[i] - step_size * grad;
441            }
442        }
443
444        Ok(x)
445    }
446
447    /// Numerical gradient computation
448    fn numerical_gradient(&self, f: &impl Fn(&[T]) -> T, x: &[T], eps: T) -> Vec<T> {
449        let mut gradient = vec![T::zero(); x.len()];
450
451        for i in 0..x.len() {
452            let mut x_plus = x.to_vec();
453            let mut x_minus = x.to_vec();
454
455            x_plus[i] = x_plus[i] + eps;
456            x_minus[i] = x_minus[i] - eps;
457
458            gradient[i] = (f(&x_plus) - f(&x_minus)) / (T::from(2.0).unwrap() * eps);
459        }
460
461        gradient
462    }
463
464    /// Simple line search
465    fn line_search(&self, f: &impl Fn(&[T]) -> T, x: &[T], direction: &[T], initial_step: T) -> T {
466        let mut step = initial_step;
467        let current_value = f(x);
468
469        for _ in 0..20 {
470            let mut x_trial = x.to_vec();
471            for (i, &dir) in direction.iter().enumerate() {
472                x_trial[i] = x_trial[i] - step * dir;
473            }
474
475            if f(&x_trial) < current_value {
476                return step;
477            }
478
479            step = step * T::from(0.5).unwrap();
480        }
481
482        initial_step * T::from(0.01).unwrap()
483    }
484
485    /// Check if point is feasible
486    fn is_feasible(&self, objective: &impl ConstrainedObjective<T>, x: &[T]) -> bool {
487        let ineq_constraints = objective.inequality_constraints(x);
488        let eq_constraints = objective.equality_constraints(x);
489
490        let ineq_feasible = ineq_constraints.iter().all(|&g| g <= T::zero());
491        let eq_feasible = eq_constraints
492            .iter()
493            .all(|&h| h.abs() < self.config.constraint_tolerance);
494
495        ineq_feasible && eq_feasible
496    }
497
498    /// Find a feasible starting point using Phase I optimization
499    fn find_feasible_point(
500        &self,
501        objective: &impl ConstrainedObjective<T>,
502        mut x: Vec<T>,
503    ) -> OptimizationResult<Vec<T>> {
504        // First project to bounds
505        let bounds = objective.variable_bounds();
506        for (i, &(lower, upper)) in bounds.iter().enumerate() {
507            x[i] = x[i].max(lower).min(upper);
508        }
509
510        // Simple feasibility restoration: move towards constraint center
511        for _iteration in 0..50 {
512            let ineq_constraints = objective.inequality_constraints(&x);
513
514            // If already feasible, return
515            if ineq_constraints.iter().all(|&g| g <= T::zero()) {
516                return Ok(x);
517            }
518
519            // Move towards satisfying constraints
520            let mut adjustment = vec![T::zero(); x.len()];
521            let mut adjustment_made = false;
522
523            for (i, &g) in ineq_constraints.iter().enumerate() {
524                if g > T::zero() {
525                    // This constraint is violated, try to reduce violation
526                    let jacobian = objective.inequality_jacobian(&x);
527                    if i < jacobian.len() {
528                        let step_size = T::from(0.1).unwrap() * g;
529                        for (j, &grad_g_ij) in jacobian[i].iter().enumerate() {
530                            if j < adjustment.len() {
531                                adjustment[j] = adjustment[j] - step_size * grad_g_ij;
532                                adjustment_made = true;
533                            }
534                        }
535                    }
536                }
537            }
538
539            if !adjustment_made {
540                // Try a simple heuristic: move towards origin or bounds center
541                let center: Vec<T> = bounds
542                    .iter()
543                    .map(|(lower, upper)| (*lower + *upper) / T::from(2.0).unwrap())
544                    .collect();
545
546                for (i, (&center_i, &x_i)) in center.iter().zip(x.iter()).enumerate() {
547                    adjustment[i] = (center_i - x_i) * T::from(0.1).unwrap();
548                }
549            }
550
551            // Apply adjustment
552            for (i, adj) in adjustment.iter().enumerate() {
553                x[i] = x[i] + *adj;
554                // Keep within bounds
555                let (lower, upper) = bounds[i];
556                x[i] = x[i].max(lower).min(upper);
557            }
558        }
559
560        // If we can't find a strictly feasible point, return a point that's close to feasible
561        // This allows the barrier method to at least start
562        Ok(x)
563    }
564
565    /// Compute constraint violation
566    fn compute_constraint_violation(&self, objective: &impl ConstrainedObjective<T>, x: &[T]) -> T {
567        let ineq_constraints = objective.inequality_constraints(x);
568        let eq_constraints = objective.equality_constraints(x);
569
570        let mut violation = T::zero();
571
572        for &g in &ineq_constraints {
573            if g > T::zero() {
574                violation = violation + g * g;
575            }
576        }
577
578        for &h in &eq_constraints {
579            violation = violation + h * h;
580        }
581
582        violation.sqrt()
583    }
584
585    /// Get constraint violations as vector
586    fn get_constraint_violations(
587        &self,
588        objective: &impl ConstrainedObjective<T>,
589        x: &[T],
590    ) -> Vec<T> {
591        let mut violations = objective.inequality_constraints(x);
592        violations.extend(objective.equality_constraints(x));
593        violations
594    }
595
596    /// Estimate Lagrange multipliers
597    fn estimate_lagrange_multipliers(
598        &self,
599        objective: &impl ConstrainedObjective<T>,
600        x: &[T],
601        penalty: T,
602    ) -> (Vec<T>, Vec<T>) {
603        let ineq_constraints = objective.inequality_constraints(x);
604        let eq_constraints = objective.equality_constraints(x);
605
606        // Simple estimation based on penalty parameter
607        let lambda: Vec<T> = ineq_constraints
608            .iter()
609            .map(|&g| {
610                if g > T::zero() {
611                    penalty * g
612                } else {
613                    T::zero()
614                }
615            })
616            .collect();
617
618        let mu: Vec<T> = eq_constraints.iter().map(|&h| penalty * h).collect();
619
620        (lambda, mu)
621    }
622
623    /// Compute KKT error
624    fn compute_kkt_error(
625        &self,
626        objective: &impl ConstrainedObjective<T>,
627        x: &[T],
628        lambda: &[T],
629        mu: &[T],
630    ) -> T {
631        let obj_grad = objective.gradient(x);
632        let ineq_jac = objective.inequality_jacobian(x);
633        let eq_jac = objective.equality_jacobian(x);
634
635        // Compute ∇L = ∇f + Σλᵢ∇gᵢ + Σμⱼ∇hⱼ
636        let mut lagrangian_grad = obj_grad;
637
638        for (i, lambda_i) in lambda.iter().enumerate() {
639            for (j, &grad_g_ij) in ineq_jac[i].iter().enumerate() {
640                lagrangian_grad[j] = lagrangian_grad[j] + *lambda_i * grad_g_ij;
641            }
642        }
643
644        for (i, mu_i) in mu.iter().enumerate() {
645            for (j, &grad_h_ij) in eq_jac[i].iter().enumerate() {
646                lagrangian_grad[j] = lagrangian_grad[j] + *mu_i * grad_h_ij;
647            }
648        }
649
650        self.vector_norm(&lagrangian_grad)
651    }
652
653    /// Compute vector norm
654    fn vector_norm(&self, v: &[T]) -> T {
655        v.iter()
656            .map(|&x| x * x)
657            .fold(T::zero(), |acc, x| acc + x)
658            .sqrt()
659    }
660}
661
662#[cfg(test)]
663mod tests {
664    use super::*;
665
666    /// Simple test problem: minimize (x-1)² + (y-1)² subject to x + y ≤ 1
667    struct SimpleConstrained;
668
669    impl ConstrainedObjective<f64> for SimpleConstrained {
670        fn evaluate(&self, x: &[f64]) -> f64 {
671            (x[0] - 1.0).powi(2) + (x[1] - 1.0).powi(2)
672        }
673
674        fn gradient(&self, x: &[f64]) -> Vec<f64> {
675            vec![2.0 * (x[0] - 1.0), 2.0 * (x[1] - 1.0)]
676        }
677
678        fn inequality_constraints(&self, x: &[f64]) -> Vec<f64> {
679            vec![x[0] + x[1] - 1.0] // x + y ≤ 1
680        }
681
682        fn equality_constraints(&self, _x: &[f64]) -> Vec<f64> {
683            vec![] // No equality constraints
684        }
685
686        fn inequality_jacobian(&self, _x: &[f64]) -> Vec<Vec<f64>> {
687            vec![vec![1.0, 1.0]] // ∇(x + y - 1) = [1, 1]
688        }
689
690        fn equality_jacobian(&self, _x: &[f64]) -> Vec<Vec<f64>> {
691            vec![] // No equality constraints
692        }
693
694        fn variable_bounds(&self) -> Vec<(f64, f64)> {
695            vec![(-10.0, 10.0), (-10.0, 10.0)]
696        }
697
698        fn num_inequality_constraints(&self) -> usize {
699            1
700        }
701
702        fn num_equality_constraints(&self) -> usize {
703            0
704        }
705
706        fn num_variables(&self) -> usize {
707            2
708        }
709    }
710
711    #[test]
712    fn test_constrained_optimizer_creation() {
713        let config = ConstrainedConfig::<f64>::default();
714        let _optimizer = ConstrainedOptimizer::new(config, PenaltyMethod::Exterior);
715
716        let _default_optimizer =
717            ConstrainedOptimizer::<f64>::with_default_config(PenaltyMethod::AugmentedLagrangian);
718    }
719
720    #[test]
721    fn test_exterior_penalty_method() {
722        let problem = SimpleConstrained;
723        let optimizer = ConstrainedOptimizer::<f64>::with_default_config(PenaltyMethod::Exterior);
724
725        use crate::phantom::{Euclidean, NonConvex, SingleObjective};
726        let opt_problem: OptimizationProblem<
727            2,
728            Constrained,
729            SingleObjective,
730            NonConvex,
731            Euclidean,
732        > = OptimizationProblem::new();
733
734        let initial_point = vec![0.0, 0.0];
735        let result = optimizer.optimize(&opt_problem, &problem, initial_point);
736
737        assert!(result.is_ok());
738        let solution = result.unwrap();
739
740        // Solution should be near (0.5, 0.5) which satisfies x + y = 1
741        assert!(solution.solution[0] + solution.solution[1] <= 1.1); // Allow some tolerance
742        assert!(solution.iterations > 0);
743    }
744
745    #[test]
746    fn test_augmented_lagrangian_method() {
747        let problem = SimpleConstrained;
748        let optimizer =
749            ConstrainedOptimizer::<f64>::with_default_config(PenaltyMethod::AugmentedLagrangian);
750
751        use crate::phantom::{Euclidean, NonConvex, SingleObjective};
752        let opt_problem: OptimizationProblem<
753            2,
754            Constrained,
755            SingleObjective,
756            NonConvex,
757            Euclidean,
758        > = OptimizationProblem::new();
759
760        let initial_point = vec![0.0, 0.0];
761        let result = optimizer.optimize(&opt_problem, &problem, initial_point);
762
763        assert!(result.is_ok());
764        let solution = result.unwrap();
765
766        // Check constraint satisfaction
767        assert!(solution.solution[0] + solution.solution[1] <= 1.1);
768        assert!(solution.iterations > 0);
769    }
770
771    #[test]
772    fn test_constraint_violation_computation() {
773        let problem = SimpleConstrained;
774        let optimizer = ConstrainedOptimizer::<f64>::with_default_config(PenaltyMethod::Exterior);
775
776        let feasible_point = vec![0.25, 0.25]; // Satisfies x + y ≤ 1
777        let infeasible_point = vec![1.0, 1.0]; // Violates x + y ≤ 1
778
779        let violation_feasible = optimizer.compute_constraint_violation(&problem, &feasible_point);
780        let violation_infeasible =
781            optimizer.compute_constraint_violation(&problem, &infeasible_point);
782
783        assert!(violation_feasible < 0.1); // Should be small or zero
784        assert!(violation_infeasible > 0.5); // Should be significant
785    }
786
787    #[test]
788    fn test_lagrange_multiplier_estimation() {
789        let problem = SimpleConstrained;
790        let optimizer = ConstrainedOptimizer::<f64>::with_default_config(PenaltyMethod::Exterior);
791
792        let point = vec![0.5, 0.5]; // On the constraint boundary
793        let (lambda, mu) = optimizer.estimate_lagrange_multipliers(&problem, &point, 1.0);
794
795        assert_eq!(lambda.len(), 1); // One inequality constraint
796        assert_eq!(mu.len(), 0); // No equality constraints
797    }
798
799    #[test]
800    fn test_feasibility_check() {
801        let problem = SimpleConstrained;
802        let optimizer = ConstrainedOptimizer::<f64>::with_default_config(PenaltyMethod::Interior);
803
804        let feasible_point = vec![0.25, 0.25];
805        let infeasible_point = vec![1.0, 1.0];
806
807        assert!(optimizer.is_feasible(&problem, &feasible_point));
808        assert!(!optimizer.is_feasible(&problem, &infeasible_point));
809    }
810}