non_convex_opt/
lib.rs

1use nalgebra::{allocator::Allocator, DefaultAllocator, Dim, OMatrix, OVector, U1};
2
3pub mod algorithms;
4pub mod utils;
5use crate::utils::config::{AlgConf, Config, OptConf};
6
7use crate::algorithms::{
8    adam::adam_opt::Adam, cem::cem_opt::CEM, cma_es::cma_es_opt::CMAES,
9    continuous_genetic::cga::CGA, differential_evolution::de::DE, grasp::grasp_opt::GRASP,
10    limited_memory_bfgs::lbfgs::LBFGS, multi_swarm::mspo::MSPO, nelder_mead::nm::NelderMead,
11    parallel_tempering::pt::PT, sg_ascent::sga::SGAscent,
12    simulated_annealing::sa::SimulatedAnnealing, tabu_search::tabu::TabuSearch, tpe::tpe_opt::TPE,
13};
14
15use crate::utils::opt_prob::{
16    BooleanConstraintFunction, FloatNumber as FloatNum, ObjectiveFunction, OptProb,
17    OptimizationAlgorithm, State,
18};
19
20pub struct Result<T, N, D>
21where
22    T: FloatNum,
23    D: Dim,
24    N: Dim,
25    DefaultAllocator: Allocator<D> + Allocator<N, D> + Allocator<N>,
26{
27    pub best_x: OVector<T, D>,
28    pub best_f: T,
29    pub final_pop: OMatrix<T, N, D>,
30    pub final_fitness: OVector<T, N>,
31    pub final_constraints: OVector<bool, N>,
32    pub convergence_iter: usize,
33}
34
35pub struct NonConvexOpt<T, N, D>
36where
37    T: FloatNum,
38    N: Dim,
39    D: Dim,
40    OVector<bool, N>: Send + Sync,
41    OVector<bool, D>: Send + Sync,
42    OMatrix<bool, U1, N>: Send + Sync,
43    OVector<T, D>: Send + Sync,
44    OVector<T, N>: Send + Sync,
45    OMatrix<T, D, D>: Send + Sync,
46    OMatrix<T, N, D>: Send + Sync,
47    OMatrix<T, U1, D>: Send + Sync,
48    DefaultAllocator: Allocator<D>
49        + Allocator<N>
50        + Allocator<N, D>
51        + Allocator<D, D>
52        + Allocator<U1, D>
53        + Allocator<U1, N>,
54{
55    pub alg: Box<dyn OptimizationAlgorithm<T, N, D>>,
56    pub conf: OptConf,
57    pub converged: bool,
58    best_fitness_history: Vec<T>,
59}
60
61impl<T, N, D> NonConvexOpt<T, N, D>
62where
63    T: FloatNum + nalgebra::RealField + std::iter::Sum,
64    N: Dim,
65    D: Dim + nalgebra::DimSub<nalgebra::Const<1>>,
66    OVector<bool, N>: Send + Sync,
67    OVector<bool, D>: Send + Sync,
68    OMatrix<bool, U1, N>: Send + Sync,
69    OVector<T, D>: Send + Sync,
70    OVector<T, N>: Send + Sync,
71    OMatrix<T, D, D>: Send + Sync,
72    OMatrix<T, N, D>: Send + Sync,
73    OMatrix<T, U1, D>: Send + Sync,
74    DefaultAllocator: Allocator<D>
75        + Allocator<N>
76        + Allocator<N, D>
77        + Allocator<D, D>
78        + Allocator<U1, D>
79        + Allocator<U1, N>
80        + Allocator<<D as nalgebra::DimSub<nalgebra::Const<1>>>::Output>,
81{
82    pub fn new<
83        F: ObjectiveFunction<T, D> + 'static,
84        G: BooleanConstraintFunction<T, D> + 'static,
85    >(
86        conf: Config,
87        init_pop: OMatrix<T, N, D>,
88        obj_f: F,
89        constr_f: Option<G>,
90        seed: u64,
91    ) -> Self {
92        let opt_prob = OptProb::new(
93            Box::new(obj_f),
94            match constr_f {
95                Some(constr_f) => Some(Box::new(constr_f)),
96                None => None,
97            },
98        );
99
100        let alg: Box<dyn OptimizationAlgorithm<T, N, D>> = match conf.alg_conf {
101            AlgConf::CGA(cga_conf) => Box::new(CGA::new(
102                cga_conf,
103                init_pop,
104                opt_prob,
105                conf.opt_conf.max_iter,
106                seed,
107            )),
108            AlgConf::PT(pt_conf) => Box::new(PT::new(
109                pt_conf,
110                init_pop,
111                opt_prob,
112                conf.opt_conf.max_iter,
113                seed,
114            )),
115            AlgConf::TS(ts_conf) => Box::new(TabuSearch::new(
116                ts_conf,
117                init_pop.row(0).into_owned(),
118                opt_prob,
119                seed,
120            )),
121            AlgConf::Adam(adam_conf) => {
122                Box::new(Adam::new(adam_conf, init_pop.row(0).into_owned(), opt_prob))
123            }
124            AlgConf::GRASP(grasp_conf) => Box::new(GRASP::new(
125                grasp_conf,
126                init_pop.row(0).into_owned(),
127                opt_prob,
128                seed,
129            )),
130            AlgConf::SGA(sga_conf) => Box::new(SGAscent::new(
131                sga_conf,
132                init_pop.row(0).into_owned(),
133                opt_prob,
134                seed,
135            )),
136            AlgConf::NM(nm_conf) => Box::new(NelderMead::new(nm_conf, init_pop, opt_prob, seed)),
137            AlgConf::LBFGS(lbfgs_conf) => Box::new(LBFGS::new(
138                lbfgs_conf,
139                init_pop.row(0).into_owned(),
140                opt_prob,
141            )),
142            AlgConf::MSPO(mspo_conf) => Box::new(MSPO::new(
143                mspo_conf,
144                init_pop,
145                opt_prob,
146                conf.opt_conf.max_iter,
147                seed,
148            )),
149            AlgConf::SA(sa_conf) => Box::new(SimulatedAnnealing::new(
150                sa_conf,
151                init_pop.row(0).into_owned(),
152                opt_prob,
153                conf.opt_conf.stagnation_window,
154                seed,
155            )),
156            AlgConf::DE(de_conf) => Box::new(DE::new(de_conf, init_pop, opt_prob, seed)),
157            AlgConf::CMAES(cma_es_conf) => {
158                Box::new(CMAES::new(cma_es_conf, init_pop, opt_prob, seed))
159            }
160            AlgConf::TPE(tpe_conf) => Box::new(TPE::new(
161                tpe_conf,
162                init_pop,
163                opt_prob,
164                conf.opt_conf.stagnation_window,
165                seed,
166            )),
167            AlgConf::CEM(cem_conf) => Box::new(CEM::new(
168                cem_conf,
169                init_pop,
170                opt_prob,
171                conf.opt_conf.stagnation_window,
172                seed,
173            )),
174        };
175
176        Self {
177            alg,
178            conf: conf.opt_conf,
179            converged: false,
180            best_fitness_history: Vec::new(),
181        }
182    }
183
184    fn check_convergence(&self, current_best: T, previous_best: T) -> bool {
185        let atol = T::from_f64(self.conf.atol).unwrap();
186        let rtol = T::from_f64(self.conf.rtol).unwrap();
187        let min_iter_for_rtol =
188            (self.conf.max_iter as f64 * self.conf.rtol_max_iter_fraction).floor() as usize;
189
190        let improvement = current_best - previous_best;
191        let abs_improvement = num_traits::Float::abs(improvement);
192
193        let abs_converged = abs_improvement < atol && self.alg.state().iter > min_iter_for_rtol;
194
195        let rel_converged = if num_traits::Float::abs(current_best) > T::from_f64(1e-10).unwrap() {
196            abs_improvement / num_traits::Float::abs(current_best) <= rtol
197        } else {
198            abs_improvement <= atol
199        };
200
201        // Check for stagnation: no significant improvement over a window of iterations
202        let stagnation_converged = if self.best_fitness_history.len() >= self.conf.stagnation_window
203            && self.alg.state().iter > min_iter_for_rtol
204        {
205            let window_start = self.best_fitness_history.len() - self.conf.stagnation_window;
206            let oldest_in_window = self.best_fitness_history[window_start];
207            let stagnation_improvement = current_best - oldest_in_window;
208            let abs_stagnation_improvement = num_traits::Float::abs(stagnation_improvement);
209
210            abs_stagnation_improvement < atol
211                || (num_traits::Float::abs(current_best) > T::from_f64(1e-10).unwrap()
212                    && abs_stagnation_improvement / num_traits::Float::abs(current_best) <= rtol)
213        } else {
214            false
215        };
216
217        let converged = abs_converged
218            || (rel_converged && self.alg.state().iter > min_iter_for_rtol)
219            || stagnation_converged;
220
221        if converged {
222            let reason = if abs_converged {
223                "absolute tolerance"
224            } else if rel_converged && self.alg.state().iter > min_iter_for_rtol {
225                "relative tolerance"
226            } else if stagnation_converged {
227                "stagnation"
228            } else {
229                "unknown"
230            };
231
232            println!(
233                "Converged in {} iterations due to {} (improvement: {:.2e})",
234                self.alg.state().iter,
235                reason,
236                improvement.to_f64().unwrap_or(0.0)
237            );
238        }
239
240        converged
241    }
242
243    pub fn step(&mut self) {
244        if self.converged {
245            return;
246        }
247
248        let previous_best_fitness = self.alg.state().best_f;
249        self.alg.step();
250        let current_best_fitness = self.alg.state().best_f;
251        self.best_fitness_history.push(current_best_fitness);
252
253        // Avoid unbounded memory growth
254        let max_history = self.conf.stagnation_window * 2;
255        if self.best_fitness_history.len() > max_history {
256            let excess = self.best_fitness_history.len() - max_history;
257            self.best_fitness_history.drain(0..excess);
258        }
259
260        self.converged = self.check_convergence(current_best_fitness, previous_best_fitness);
261    }
262
263    pub fn run(&mut self) -> &State<T, N, D> {
264        while !self.converged && self.alg.state().iter < self.conf.max_iter {
265            self.step();
266        }
267        self.alg.state()
268    }
269
270    pub fn get_best_individual(&self) -> OVector<T, D> {
271        self.alg.state().best_x.clone()
272    }
273
274    pub fn get_population(&self) -> OMatrix<T, N, D> {
275        self.alg.state().pop.clone()
276    }
277
278    pub fn get_pt_replica_populations(&self) -> Option<Vec<OMatrix<T, N, D>>> {
279        self.alg.get_replica_populations()
280    }
281
282    pub fn get_pt_replica_temperatures(&self) -> Option<Vec<T>> {
283        self.alg.get_replica_temperatures()
284    }
285}