math_audio_differential_evolution/
mod.rs

1//! Differential Evolution optimization library.
2//!
3//! This crate provides a Rust implementation of the Differential Evolution (DE)
4//! algorithm, a population-based stochastic optimizer for continuous optimization
5//! problems. The implementation is inspired by SciPy's differential_evolution.
6//!
7//! # Features
8//!
9//! - Multiple mutation strategies (Best1, Rand1, CurrentToBest1, etc.)
10//! - Binomial and exponential crossover
11//! - Adaptive parameter control
12//! - Parallel population evaluation
13//! - Constraint handling via penalty methods
14//! - Latin Hypercube initialization
15//!
16//! # Example
17//!
18//! ```rust
19//! use math_audio_differential_evolution::{differential_evolution, DEConfigBuilder};
20//!
21//! // Minimize the sphere function: f(x) = sum(x_i^2)
22//! let bounds = vec![(-5.0, 5.0), (-5.0, 5.0)];
23//! let config = DEConfigBuilder::new()
24//!     .maxiter(100)
25//!     .seed(42)
26//!     .build()
27//!     .expect("invalid config");
28//!
29//! let result = differential_evolution(
30//!     &|x| x.iter().map(|&xi| xi * xi).sum(),
31//!     &bounds,
32//!     config,
33//! ).expect("optimization should succeed");
34//!
35//! assert!(result.fun < 1e-6);
36//! ```
37#![doc = include_str!("../README.md")]
38#![doc = include_str!("../REFERENCES.md")]
39#![warn(missing_docs)]
40
41pub mod error;
42pub use error::{DEError, Result};
43
44use std::fmt;
45use std::str::FromStr;
46use std::sync::Arc;
47
48use ndarray::{Array1, Array2, Zip};
49use rand::rngs::StdRng;
50use rand::{Rng, SeedableRng};
51use std::time::Instant;
52
53/// Linear penalty stacking utilities for combining multiple constraints.
54pub mod stack_linear_penalty;
55
56/// Integer variable handling for mixed-integer optimization.
57pub mod apply_integrality;
58/// Wrapper Local Search (WLS) application for local refinement.
59pub mod apply_wls;
60
61/// Utilities for selecting distinct random indices from a population.
62pub mod distinct_indices;
63/// Latin Hypercube Sampling initialization strategy.
64pub mod init_latin_hypercube;
65/// Random uniform initialization strategy.
66pub mod init_random;
67
68/// Adaptive mutation strategy with dynamic parameter control.
69pub mod mutant_adaptive;
70/// Best/1 mutation strategy: uses best individual plus one difference vector.
71pub mod mutant_best1;
72/// Best/2 mutation strategy: uses best individual plus two difference vectors.
73pub mod mutant_best2;
74/// Current-to-best/1 mutation: blends current with best individual.
75pub mod mutant_current_to_best1;
76/// Rand/1 mutation strategy: uses random individual plus one difference vector.
77pub mod mutant_rand1;
78/// Rand/2 mutation strategy: uses random individual plus two difference vectors.
79pub mod mutant_rand2;
80/// Rand-to-best/1 mutation: blends random with best individual.
81pub mod mutant_rand_to_best1;
82
83/// Binomial (uniform) crossover implementation.
84pub mod crossover_binomial;
85/// Exponential crossover implementation.
86pub mod crossover_exponential;
87
88/// Comprehensive tests for DE strategies and features.
89#[cfg(test)]
90mod de_tests;
91/// Main differential evolution algorithm implementation.
92pub mod differential_evolution;
93/// Registry of standard test functions for benchmarking.
94pub mod function_registry;
95/// Internal helper functions for DE implementation.
96pub mod impl_helpers;
97/// Metadata-driven optimization examples and tests.
98pub mod metadata;
99/// Parallel population evaluation support.
100pub mod parallel_eval;
101/// Optimization recording for analysis and debugging.
102pub mod recorder;
103/// Recorded optimization wrapper for testing.
104pub mod run_recorded;
105pub use differential_evolution::differential_evolution;
106pub use parallel_eval::ParallelConfig;
107pub use recorder::{OptimizationRecord, OptimizationRecorder};
108pub use run_recorded::run_recorded_differential_evolution;
109
110// Type aliases to reduce complexity
111/// Scalar constraint function type
112pub type ScalarConstraintFn = Arc<dyn Fn(&Array1<f64>) -> f64 + Send + Sync>;
113/// Vector constraint function type
114pub type VectorConstraintFn = Arc<dyn Fn(&Array1<f64>) -> Array1<f64> + Send + Sync>;
115/// Penalty tuple type (function, weight)
116pub type PenaltyTuple = (ScalarConstraintFn, f64);
117/// Callback function type
118pub type CallbackFn = Box<dyn FnMut(&DEIntermediate) -> CallbackAction>;
119
120pub(crate) fn argmin(v: &Array1<f64>) -> (usize, f64) {
121    let mut best_i = 0usize;
122    let mut best_v = v[0];
123    for (i, &val) in v.iter().enumerate() {
124        if val < best_v {
125            best_v = val;
126            best_i = i;
127        }
128    }
129    (best_i, best_v)
130}
131
132/// Differential Evolution mutation/crossover strategy.
133///
134/// The strategy name follows the pattern `{mutation}{n}{crossover}` where:
135/// - `mutation`: Base vector selection (Best, Rand, CurrentToBest, RandToBest, Adaptive)
136/// - `n`: Number of difference vectors (1 or 2)
137/// - `crossover`: Crossover type (Bin = binomial, Exp = exponential)
138#[derive(Debug, Clone, Copy)]
139pub enum Strategy {
140    /// Best/1/Bin: Best individual + 1 difference vector, binomial crossover
141    Best1Bin,
142    /// Best/1/Exp: Best individual + 1 difference vector, exponential crossover
143    Best1Exp,
144    /// Rand/1/Bin: Random individual + 1 difference vector, binomial crossover
145    Rand1Bin,
146    /// Rand/1/Exp: Random individual + 1 difference vector, exponential crossover
147    Rand1Exp,
148    /// Rand/2/Bin: Random individual + 2 difference vectors, binomial crossover
149    Rand2Bin,
150    /// Rand/2/Exp: Random individual + 2 difference vectors, exponential crossover
151    Rand2Exp,
152    /// Current-to-best/1/Bin: Blend of current and best + 1 diff, binomial crossover
153    CurrentToBest1Bin,
154    /// Current-to-best/1/Exp: Blend of current and best + 1 diff, exponential crossover
155    CurrentToBest1Exp,
156    /// Best/2/Bin: Best individual + 2 difference vectors, binomial crossover
157    Best2Bin,
158    /// Best/2/Exp: Best individual + 2 difference vectors, exponential crossover
159    Best2Exp,
160    /// Rand-to-best/1/Bin: Blend of random and best + 1 diff, binomial crossover
161    RandToBest1Bin,
162    /// Rand-to-best/1/Exp: Blend of random and best + 1 diff, exponential crossover
163    RandToBest1Exp,
164    /// Adaptive mutation with binomial crossover (SAM approach)
165    AdaptiveBin,
166    /// Adaptive mutation with exponential crossover
167    AdaptiveExp,
168}
169
170impl FromStr for Strategy {
171    type Err = String;
172    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
173        let t = s.to_lowercase();
174        match t.as_str() {
175            "best1bin" | "best1" => Ok(Strategy::Best1Bin),
176            "best1exp" => Ok(Strategy::Best1Exp),
177            "rand1bin" | "rand1" => Ok(Strategy::Rand1Bin),
178            "rand1exp" => Ok(Strategy::Rand1Exp),
179            "rand2bin" | "rand2" => Ok(Strategy::Rand2Bin),
180            "rand2exp" => Ok(Strategy::Rand2Exp),
181            "currenttobest1bin" | "current-to-best1bin" | "current_to_best1bin" => {
182                Ok(Strategy::CurrentToBest1Bin)
183            }
184            "currenttobest1exp" | "current-to-best1exp" | "current_to_best1exp" => {
185                Ok(Strategy::CurrentToBest1Exp)
186            }
187            "best2bin" | "best2" => Ok(Strategy::Best2Bin),
188            "best2exp" => Ok(Strategy::Best2Exp),
189            "randtobest1bin" | "rand-to-best1bin" | "rand_to_best1bin" => {
190                Ok(Strategy::RandToBest1Bin)
191            }
192            "randtobest1exp" | "rand-to-best1exp" | "rand_to_best1exp" => {
193                Ok(Strategy::RandToBest1Exp)
194            }
195            "adaptivebin" | "adaptive-bin" | "adaptive_bin" | "adaptive" => {
196                Ok(Strategy::AdaptiveBin)
197            }
198            "adaptiveexp" | "adaptive-exp" | "adaptive_exp" => Ok(Strategy::AdaptiveExp),
199            _ => Err(format!("unknown strategy: {}", s)),
200        }
201    }
202}
203
204/// Crossover type
205#[derive(Debug, Clone, Copy, Default)]
206pub enum Crossover {
207    /// Binomial (uniform) crossover
208    #[default]
209    Binomial,
210    /// Exponential crossover
211    Exponential,
212}
213
214/// Mutation setting: either a fixed factor, a uniform range (dithering), or adaptive.
215#[derive(Debug, Clone, Copy)]
216pub enum Mutation {
217    /// Fixed mutation factor F in [0, 2).
218    Factor(f64),
219    /// Dithering range [min, max) with 0 <= min < max <= 2.
220    Range {
221        /// Minimum mutation factor.
222        min: f64,
223        /// Maximum mutation factor.
224        max: f64,
225    },
226    /// Adaptive mutation factor using Cauchy distribution.
227    Adaptive {
228        /// Initial mutation factor before adaptation.
229        initial_f: f64,
230    },
231}
232
233impl Default for Mutation {
234    fn default() -> Self {
235        let _ = Mutation::Factor(0.8);
236        Mutation::Range { min: 0.0, max: 2.0 }
237    }
238}
239
240impl Mutation {
241    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
242        match *self {
243            Mutation::Factor(f) => f,
244            Mutation::Range { min, max } => rng.random_range(min..max),
245            Mutation::Adaptive { initial_f } => initial_f, // Will be overridden by adaptive logic
246        }
247    }
248
249    /// Sample from Cauchy distribution for adaptive mutation (F parameter)
250    #[allow(dead_code)]
251    fn sample_cauchy<R: Rng + ?Sized>(&self, f_m: f64, _scale: f64, rng: &mut R) -> f64 {
252        // Simplified version using normal random for now
253        let perturbation = (rng.random::<f64>() - 0.5) * 0.2; // Small perturbation
254        (f_m + perturbation).clamp(0.0, 2.0) // Clamp to valid range
255    }
256}
257
258/// Initialization scheme for the population.
259#[derive(Debug, Clone, Copy, Default)]
260pub enum Init {
261    /// Latin Hypercube Sampling for better space coverage.
262    #[default]
263    LatinHypercube,
264    /// Uniform random initialization.
265    Random,
266}
267
268/// Whether best updates during a generation (we use Deferred only).
269#[derive(Debug, Clone, Copy, Default)]
270pub enum Updating {
271    /// Deferred update: best is updated after all trials are evaluated.
272    #[default]
273    Deferred,
274}
275
276/// Linear penalty specification: lb <= A x <= ub (component-wise).
277#[derive(Debug, Clone)]
278pub struct LinearPenalty {
279    /// Constraint matrix A (m x n).
280    pub a: Array2<f64>,
281    /// Lower bounds vector (m elements).
282    pub lb: Array1<f64>,
283    /// Upper bounds vector (m elements).
284    pub ub: Array1<f64>,
285    /// Penalty weight for constraint violations.
286    pub weight: f64,
287}
288
289/// SciPy-like linear constraint helper: lb <= A x <= ub.
290#[derive(Debug, Clone)]
291pub struct LinearConstraintHelper {
292    /// Constraint matrix A (m x n).
293    pub a: Array2<f64>,
294    /// Lower bounds vector (m elements).
295    pub lb: Array1<f64>,
296    /// Upper bounds vector (m elements).
297    pub ub: Array1<f64>,
298}
299
300impl LinearConstraintHelper {
301    /// Apply helper by merging into DEConfig.linear_penalty (stacking rows if already present)
302    pub fn apply_to(&self, cfg: &mut DEConfig, weight: f64) {
303        use stack_linear_penalty::stack_linear_penalty;
304
305        let new_lp = LinearPenalty {
306            a: self.a.clone(),
307            lb: self.lb.clone(),
308            ub: self.ub.clone(),
309            weight,
310        };
311        match &mut cfg.linear_penalty {
312            Some(existing) => stack_linear_penalty(existing, &new_lp),
313            None => cfg.linear_penalty = Some(new_lp),
314        }
315    }
316}
317
318/// SciPy-like nonlinear constraint helper: vector-valued fun(x) with lb <= fun(x) <= ub.
319#[derive(Clone)]
320pub struct NonlinearConstraintHelper {
321    /// Vector-valued constraint function.
322    pub fun: VectorConstraintFn,
323    /// Lower bounds for each constraint component.
324    pub lb: Array1<f64>,
325    /// Upper bounds for each constraint component.
326    pub ub: Array1<f64>,
327}
328
329impl NonlinearConstraintHelper {
330    /// Apply helper by emitting penalty closures per component.
331    /// lb <= f_i(x) <= ub becomes two inequalities: f_i(x)-ub <= 0 and lb - f_i(x) <= 0.
332    /// If lb==ub, emit an equality penalty for f_i(x)-lb.
333    pub fn apply_to(&self, cfg: &mut DEConfig, weight_ineq: f64, weight_eq: f64) {
334        let f = self.fun.clone();
335        let lb = self.lb.clone();
336        let ub = self.ub.clone();
337        let m = lb.len().min(ub.len());
338        for i in 0..m {
339            let l = lb[i];
340            let u = ub[i];
341            let tol = 1e-12 * (l.abs() + u.abs()).max(1.0);
342            if (u - l).abs() <= tol {
343                let fi = f.clone();
344                cfg.penalty_eq.push((
345                    Arc::new(move |x: &Array1<f64>| {
346                        let y = (fi)(x);
347                        y[i] - l
348                    }),
349                    weight_eq,
350                ));
351            } else {
352                let fi_u = f.clone();
353                cfg.penalty_ineq.push((
354                    Arc::new(move |x: &Array1<f64>| {
355                        let y = (fi_u)(x);
356                        y[i] - u
357                    }),
358                    weight_ineq,
359                ));
360                let fi_l = f.clone();
361                cfg.penalty_ineq.push((
362                    Arc::new(move |x: &Array1<f64>| {
363                        let y = (fi_l)(x);
364                        l - y[i]
365                    }),
366                    weight_ineq,
367                ));
368            }
369        }
370    }
371}
372
373/// Structures for tracking adaptive parameters
374#[derive(Debug, Clone)]
375struct AdaptiveState {
376    /// Current F_m parameter for Cauchy distribution (mutation)
377    f_m: f64,
378    /// Current CR_m parameter for Gaussian distribution (crossover)
379    cr_m: f64,
380    /// Successful F values from this generation
381    successful_f: Vec<f64>,
382    /// Successful CR values from this generation
383    successful_cr: Vec<f64>,
384    /// Current linearly decreasing weight for adaptive mutation
385    current_w: f64,
386}
387
388impl AdaptiveState {
389    fn new(config: &AdaptiveConfig) -> Self {
390        Self {
391            f_m: config.f_m,
392            cr_m: config.cr_m,
393            successful_f: Vec::new(),
394            successful_cr: Vec::new(),
395            current_w: config.w_max, // Start with maximum weight
396        }
397    }
398
399    /// Update adaptive parameters based on successful trials
400    fn update(&mut self, config: &AdaptiveConfig, iter: usize, max_iter: usize) {
401        // Update linearly decreasing weight (Equation 19 from the paper)
402        let iter_ratio = iter as f64 / max_iter as f64;
403        self.current_w = config.w_max - (config.w_max - config.w_min) * iter_ratio;
404
405        // Update F_m using Lehmer mean of successful F values
406        if !self.successful_f.is_empty() {
407            let mean_f = self.compute_lehmer_mean(&self.successful_f);
408            self.f_m = (1.0 - config.w_f) * self.f_m + config.w_f * mean_f;
409            self.f_m = self.f_m.clamp(0.2, 1.2);
410        }
411
412        // Update CR_m using arithmetic mean of successful CR values
413        if !self.successful_cr.is_empty() {
414            let mean_cr = self.compute_arithmetic_mean(&self.successful_cr);
415            self.cr_m = (1.0 - config.w_cr) * self.cr_m + config.w_cr * mean_cr;
416            self.cr_m = self.cr_m.clamp(0.1, 0.9);
417        }
418
419        // Clear successful values for next generation
420        self.successful_f.clear();
421        self.successful_cr.clear();
422    }
423
424    /// Compute Lehmer mean for successful F values (p=2)
425    fn compute_lehmer_mean(&self, values: &[f64]) -> f64 {
426        if values.is_empty() {
427            return 0.5; // Default fallback
428        }
429
430        let sum_sq: f64 = values.iter().map(|&x| x * x).sum();
431        let sum: f64 = values.iter().sum();
432
433        if sum > 0.0 {
434            sum_sq / sum
435        } else {
436            0.5 // Fallback if sum is zero
437        }
438    }
439
440    /// Compute arithmetic mean for successful CR values
441    fn compute_arithmetic_mean(&self, values: &[f64]) -> f64 {
442        if values.is_empty() {
443            return 0.5; // Default fallback
444        }
445        values.iter().sum::<f64>() / values.len() as f64
446    }
447
448    /// Record successful parameter values
449    fn record_success(&mut self, f_val: f64, cr_val: f64) {
450        self.successful_f.push(f_val);
451        self.successful_cr.push(cr_val);
452    }
453
454    /// Sample adaptive F parameter using conservative normal distribution
455    fn sample_f<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
456        let u1: f64 = rng.random::<f64>().max(1e-15);
457        let u2: f64 = rng.random::<f64>();
458
459        let normal = (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos();
460        let sample = self.f_m + 0.05 * normal;
461
462        sample.clamp(0.3, 1.0)
463    }
464
465    /// Sample adaptive CR parameter using conservative Gaussian distribution
466    fn sample_cr<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
467        let u1: f64 = rng.random::<f64>().max(1e-15);
468        let u2: f64 = rng.random::<f64>();
469
470        let normal = (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos();
471        let sample = self.cr_m + 0.05 * normal;
472
473        sample.clamp(0.1, 0.9)
474    }
475}
476
477/// Adaptive differential evolution configuration
478#[derive(Debug, Clone)]
479pub struct AdaptiveConfig {
480    /// Enable adaptive mutation strategy
481    pub adaptive_mutation: bool,
482    /// Enable Wrapper Local Search (WLS)
483    pub wls_enabled: bool,
484    /// Maximum weight for adaptive mutation (w_max)
485    pub w_max: f64,
486    /// Minimum weight for adaptive mutation (w_min)
487    pub w_min: f64,
488    /// Weight factor for F parameter adaptation (between 0.8 and 1.0)
489    pub w_f: f64,
490    /// Weight factor for CR parameter adaptation (between 0.9 and 1.0)
491    pub w_cr: f64,
492    /// Initial location parameter for Cauchy distribution (F_m)
493    pub f_m: f64,
494    /// Initial location parameter for Gaussian distribution (CR_m)
495    pub cr_m: f64,
496    /// WLS probability (what fraction of population to apply WLS to)
497    pub wls_prob: f64,
498    /// WLS Cauchy scale parameter
499    pub wls_scale: f64,
500}
501
502impl Default for AdaptiveConfig {
503    fn default() -> Self {
504        Self {
505            adaptive_mutation: false,
506            wls_enabled: false,
507            w_max: 0.9,
508            w_min: 0.1,
509            w_f: 0.9,
510            w_cr: 0.9,
511            f_m: 0.5,
512            cr_m: 0.6,
513            wls_prob: 0.1,
514            wls_scale: 0.1,
515        }
516    }
517}
518
519/// Polishing configuration using NLopt local optimizer within bounds.
520#[derive(Debug, Clone)]
521pub struct PolishConfig {
522    /// Whether polishing is enabled.
523    pub enabled: bool,
524    /// Local optimizer algorithm name (e.g., "neldermead", "sbplx", "cobyla").
525    pub algo: String,
526    /// Maximum function evaluations for polishing (e.g., 200*n).
527    pub maxeval: usize,
528}
529
530/// Configuration for the Differential Evolution optimizer.
531///
532/// This struct holds all parameters controlling the DE algorithm behavior,
533/// including population size, mutation/crossover settings, constraints, and
534/// convergence criteria.
535pub struct DEConfig {
536    /// Maximum number of generations (iterations).
537    pub maxiter: usize,
538    /// Population size multiplier (total NP = popsize * n_params_free).
539    pub popsize: usize,
540    /// Relative tolerance for convergence (population energy std dev).
541    pub tol: f64,
542    /// Absolute tolerance for convergence on best fitness.
543    pub atol: f64,
544    /// Mutation factor setting.
545    pub mutation: Mutation,
546    /// Crossover probability CR in [0, 1].
547    pub recombination: f64,
548    /// Mutation/crossover strategy.
549    pub strategy: Strategy,
550    /// Crossover type (binomial or exponential).
551    pub crossover: Crossover,
552    /// Population initialization scheme.
553    pub init: Init,
554    /// Update timing (deferred).
555    pub updating: Updating,
556    /// Optional random seed for reproducibility.
557    pub seed: Option<u64>,
558    /// Optional integrality mask; true => variable is integer-constrained.
559    pub integrality: Option<Vec<bool>>,
560    /// Optional initial guess used to replace the best member after init.
561    pub x0: Option<Array1<f64>>,
562    /// Print objective best at each iteration.
563    pub disp: bool,
564    /// Optional per-iteration callback (may stop early).
565    pub callback: Option<CallbackFn>,
566    /// Penalty-based inequality constraints: fc(x) <= 0.
567    pub penalty_ineq: Vec<PenaltyTuple>,
568    /// Penalty-based equality constraints: h(x) = 0.
569    pub penalty_eq: Vec<PenaltyTuple>,
570    /// Optional linear constraints treated by penalty: lb <= A x <= ub.
571    pub linear_penalty: Option<LinearPenalty>,
572    /// Polishing configuration (optional).
573    pub polish: Option<PolishConfig>,
574    /// Adaptive differential evolution configuration.
575    pub adaptive: AdaptiveConfig,
576    /// Parallel evaluation configuration.
577    pub parallel: parallel_eval::ParallelConfig,
578}
579
580impl Default for DEConfig {
581    fn default() -> Self {
582        Self {
583            maxiter: 1000,
584            popsize: 15,
585            tol: 1e-2,
586            atol: 0.0,
587            mutation: Mutation::default(),
588            recombination: 0.7,
589            strategy: Strategy::Best1Bin,
590            crossover: Crossover::default(),
591            init: Init::default(),
592            updating: Updating::default(),
593            seed: None,
594            integrality: None,
595            x0: None,
596            disp: false,
597            callback: None,
598            penalty_ineq: Vec::new(),
599            penalty_eq: Vec::new(),
600            linear_penalty: None,
601            polish: None,
602            adaptive: AdaptiveConfig::default(),
603            parallel: parallel_eval::ParallelConfig::default(),
604        }
605    }
606}
607
608/// Fluent builder for `DEConfig` for ergonomic configuration.
609///
610/// # Example
611///
612/// ```rust
613/// use math_audio_differential_evolution::{DEConfigBuilder, Strategy, Mutation};
614///
615/// let config = DEConfigBuilder::new()
616///     .maxiter(500)
617///     .popsize(20)
618///     .strategy(Strategy::Best1Bin)
619///     .mutation(Mutation::Factor(0.8))
620///     .recombination(0.9)
621///     .seed(42)
622///     .build();
623/// ```
624pub struct DEConfigBuilder {
625    cfg: DEConfig,
626}
627impl Default for DEConfigBuilder {
628    fn default() -> Self {
629        Self::new()
630    }
631}
632
633impl DEConfigBuilder {
634    /// Creates a new builder with default configuration.
635    pub fn new() -> Self {
636        Self {
637            cfg: DEConfig::default(),
638        }
639    }
640    /// Sets the maximum number of iterations.
641    pub fn maxiter(mut self, v: usize) -> Self {
642        self.cfg.maxiter = v;
643        self
644    }
645    /// Sets the population size multiplier.
646    ///
647    /// # Panics
648    ///
649    /// Panics if `v < 4` since DE requires at least 4 individuals for
650    /// rand/1 and rand/2 mutation strategies.
651    pub fn popsize(mut self, v: usize) -> Self {
652        self.cfg.popsize = v;
653        self
654    }
655    /// Sets the relative convergence tolerance.
656    pub fn tol(mut self, v: f64) -> Self {
657        self.cfg.tol = v;
658        self
659    }
660    /// Sets the absolute convergence tolerance.
661    pub fn atol(mut self, v: f64) -> Self {
662        self.cfg.atol = v;
663        self
664    }
665    /// Sets the mutation factor configuration.
666    pub fn mutation(mut self, v: Mutation) -> Self {
667        self.cfg.mutation = v;
668        self
669    }
670    /// Sets the crossover probability (CR).
671    pub fn recombination(mut self, v: f64) -> Self {
672        self.cfg.recombination = v;
673        self
674    }
675    /// Sets the mutation/crossover strategy.
676    pub fn strategy(mut self, v: Strategy) -> Self {
677        self.cfg.strategy = v;
678        self
679    }
680    /// Sets the crossover type.
681    pub fn crossover(mut self, v: Crossover) -> Self {
682        self.cfg.crossover = v;
683        self
684    }
685    /// Sets the population initialization scheme.
686    pub fn init(mut self, v: Init) -> Self {
687        self.cfg.init = v;
688        self
689    }
690    /// Sets the random seed for reproducibility.
691    pub fn seed(mut self, v: u64) -> Self {
692        self.cfg.seed = Some(v);
693        self
694    }
695    /// Sets the integrality mask for mixed-integer optimization.
696    pub fn integrality(mut self, v: Vec<bool>) -> Self {
697        self.cfg.integrality = Some(v);
698        self
699    }
700    /// Sets an initial guess to seed the population.
701    pub fn x0(mut self, v: Array1<f64>) -> Self {
702        self.cfg.x0 = Some(v);
703        self
704    }
705    /// Enables/disables progress display.
706    pub fn disp(mut self, v: bool) -> Self {
707        self.cfg.disp = v;
708        self
709    }
710    /// Sets a per-iteration callback function.
711    pub fn callback(mut self, cb: Box<dyn FnMut(&DEIntermediate) -> CallbackAction>) -> Self {
712        self.cfg.callback = Some(cb);
713        self
714    }
715    /// Adds an inequality constraint penalty function.
716    pub fn add_penalty_ineq<FN>(mut self, f: FN, w: f64) -> Self
717    where
718        FN: Fn(&Array1<f64>) -> f64 + Send + Sync + 'static,
719    {
720        self.cfg.penalty_ineq.push((Arc::new(f), w));
721        self
722    }
723    /// Adds an equality constraint penalty function.
724    pub fn add_penalty_eq<FN>(mut self, f: FN, w: f64) -> Self
725    where
726        FN: Fn(&Array1<f64>) -> f64 + Send + Sync + 'static,
727    {
728        self.cfg.penalty_eq.push((Arc::new(f), w));
729        self
730    }
731    /// Sets a linear constraint penalty.
732    pub fn linear_penalty(mut self, lp: LinearPenalty) -> Self {
733        self.cfg.linear_penalty = Some(lp);
734        self
735    }
736    /// Sets the polishing configuration.
737    pub fn polish(mut self, pol: PolishConfig) -> Self {
738        self.cfg.polish = Some(pol);
739        self
740    }
741    /// Sets the adaptive DE configuration.
742    pub fn adaptive(mut self, adaptive: AdaptiveConfig) -> Self {
743        self.cfg.adaptive = adaptive;
744        self
745    }
746    /// Enables/disables adaptive mutation.
747    pub fn enable_adaptive_mutation(mut self, enable: bool) -> Self {
748        self.cfg.adaptive.adaptive_mutation = enable;
749        self
750    }
751    /// Enables/disables Wrapper Local Search.
752    pub fn enable_wls(mut self, enable: bool) -> Self {
753        self.cfg.adaptive.wls_enabled = enable;
754        self
755    }
756    /// Sets the adaptive weight bounds.
757    pub fn adaptive_weights(mut self, w_max: f64, w_min: f64) -> Self {
758        self.cfg.adaptive.w_max = w_max;
759        self.cfg.adaptive.w_min = w_min;
760        self
761    }
762    /// Sets the parallel evaluation configuration.
763    pub fn parallel(mut self, parallel: parallel_eval::ParallelConfig) -> Self {
764        self.cfg.parallel = parallel;
765        self
766    }
767    /// Enables/disables parallel evaluation.
768    pub fn enable_parallel(mut self, enable: bool) -> Self {
769        self.cfg.parallel.enabled = enable;
770        self
771    }
772    /// Sets the number of parallel threads.
773    pub fn parallel_threads(mut self, num_threads: usize) -> Self {
774        self.cfg.parallel.num_threads = Some(num_threads);
775        self
776    }
777    /// Builds and returns the configuration.
778    ///
779    /// # Errors
780    ///
781    /// Returns `DEError::PopulationTooSmall` if `popsize < 4`.
782    pub fn build(self) -> error::Result<DEConfig> {
783        if self.cfg.popsize < 4 {
784            return Err(DEError::PopulationTooSmall {
785                pop_size: self.cfg.popsize,
786            });
787        }
788        Ok(self.cfg)
789    }
790}
791
792/// Result/report of a DE optimization run.
793///
794/// Contains the optimal solution, convergence status, and statistics.
795#[derive(Clone)]
796pub struct DEReport {
797    /// The optimal solution vector.
798    pub x: Array1<f64>,
799    /// The objective function value at the optimal solution.
800    pub fun: f64,
801    /// Whether the optimization converged successfully.
802    pub success: bool,
803    /// Human-readable status message.
804    pub message: String,
805    /// Number of iterations (generations) performed.
806    pub nit: usize,
807    /// Number of function evaluations performed.
808    pub nfev: usize,
809    /// Final population matrix (NP x n).
810    pub population: Array2<f64>,
811    /// Fitness values for each population member.
812    pub population_energies: Array1<f64>,
813}
814
815impl fmt::Debug for DEReport {
816    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
817        f.debug_struct("DEReport")
818            .field("x", &format!("len={}", self.x.len()))
819            .field("fun", &self.fun)
820            .field("success", &self.success)
821            .field("message", &self.message)
822            .field("nit", &self.nit)
823            .field("nfev", &self.nfev)
824            .field(
825                "population",
826                &format!("{}x{}", self.population.nrows(), self.population.ncols()),
827            )
828            .field(
829                "population_energies",
830                &format!("len={}", self.population_energies.len()),
831            )
832            .finish()
833    }
834}
835
836/// Information passed to callback after each generation.
837pub struct DEIntermediate {
838    /// Current best solution vector.
839    pub x: Array1<f64>,
840    /// Current best objective value.
841    pub fun: f64,
842    /// Convergence measure (population fitness std dev).
843    pub convergence: f64,
844    /// Current iteration number.
845    pub iter: usize,
846}
847
848/// Action returned by callback to control optimization flow.
849pub enum CallbackAction {
850    /// Continue optimization.
851    Continue,
852    /// Stop optimization early.
853    Stop,
854}
855
856/// Differential Evolution optimizer.
857///
858/// A population-based stochastic optimizer for continuous functions.
859/// Use [`DifferentialEvolution::new`] to create an instance, configure
860/// with [`config_mut`](Self::config_mut), then call [`solve`](Self::solve).
861pub struct DifferentialEvolution<'a, F>
862where
863    F: Fn(&Array1<f64>) -> f64 + Sync,
864{
865    func: &'a F,
866    lower: Array1<f64>,
867    upper: Array1<f64>,
868    config: DEConfig,
869}
870
871impl<'a, F> DifferentialEvolution<'a, F>
872where
873    F: Fn(&Array1<f64>) -> f64 + Sync,
874{
875    /// Creates a new DE optimizer with objective `func` and bounds [lower, upper].
876    ///
877    /// # Errors
878    ///
879    /// Returns `DEError::BoundsMismatch` if `lower` and `upper` have different lengths.
880    /// Returns `DEError::InvalidBounds` if any lower bound exceeds its corresponding upper bound.
881    pub fn new(func: &'a F, lower: Array1<f64>, upper: Array1<f64>) -> Result<Self> {
882        if lower.len() != upper.len() {
883            return Err(DEError::BoundsMismatch {
884                lower_len: lower.len(),
885                upper_len: upper.len(),
886            });
887        }
888
889        // Validate that lower <= upper for all dimensions
890        for i in 0..lower.len() {
891            if lower[i] > upper[i] {
892                return Err(DEError::InvalidBounds {
893                    index: i,
894                    lower: lower[i],
895                    upper: upper[i],
896                });
897            }
898        }
899
900        Ok(Self {
901            func,
902            lower,
903            upper,
904            config: DEConfig::default(),
905        })
906    }
907
908    /// Mutable access to configuration
909    pub fn config_mut(&mut self) -> &mut DEConfig {
910        &mut self.config
911    }
912
913    /// Run the optimization and return a report
914    pub fn solve(&mut self) -> DEReport {
915        use apply_integrality::apply_integrality;
916        use apply_wls::apply_wls;
917        use crossover_binomial::binomial_crossover;
918        use crossover_exponential::exponential_crossover;
919        use init_latin_hypercube::init_latin_hypercube;
920        use init_random::init_random;
921        use mutant_adaptive::mutant_adaptive;
922        use mutant_best1::mutant_best1;
923        use mutant_best2::mutant_best2;
924        use mutant_current_to_best1::mutant_current_to_best1;
925        use mutant_rand_to_best1::mutant_rand_to_best1;
926        use mutant_rand1::mutant_rand1;
927        use mutant_rand2::mutant_rand2;
928        use parallel_eval::evaluate_trials_parallel;
929        use std::sync::Arc;
930
931        let n = self.lower.len();
932
933        // Identify fixed (equal-bounds) and free variables
934        let mut is_free: Vec<bool> = Vec::with_capacity(n);
935        for i in 0..n {
936            is_free.push((self.upper[i] - self.lower[i]).abs() > 0.0);
937        }
938        let n_free = is_free.iter().filter(|&&b| b).count();
939        let _n_equal = n - n_free;
940        if n_free == 0 {
941            // All fixed; just evaluate x = lower
942            let x_fixed = self.lower.clone();
943            let mut x_eval = x_fixed.clone();
944            if let Some(mask) = &self.config.integrality {
945                apply_integrality(&mut x_eval, mask, &self.lower, &self.upper);
946            }
947            let f = (self.func)(&x_eval);
948            return DEReport {
949                x: x_eval,
950                fun: f,
951                success: true,
952                message: "All variables fixed by bounds".into(),
953                nit: 0,
954                nfev: 1,
955                population: Array2::zeros((1, n)),
956                population_energies: Array1::from(vec![f]),
957            };
958        }
959
960        let npop = self.config.popsize * n_free;
961        let _bounds_span = &self.upper - &self.lower;
962
963        if self.config.disp {
964            eprintln!(
965                "DE Init: {} dimensions ({} free), population={}, maxiter={}",
966                n, n_free, npop, self.config.maxiter
967            );
968            eprintln!(
969                "  Strategy: {:?}, Mutation: {:?}, Crossover: CR={:.3}",
970                self.config.strategy, self.config.mutation, self.config.recombination
971            );
972            eprintln!(
973                "  Tolerances: tol={:.2e}, atol={:.2e}",
974                self.config.tol, self.config.atol
975            );
976        }
977
978        // Timing toggle via env var
979        let timing_enabled = std::env::var("AUTOEQ_DE_TIMING")
980            .map(|v| v != "0")
981            .unwrap_or(false);
982
983        // Configure global rayon thread pool once if requested
984        if let Some(n) = self.config.parallel.num_threads {
985            // Ignore error if global pool already set
986            let _ = rayon::ThreadPoolBuilder::new()
987                .num_threads(n)
988                .build_global();
989        }
990
991        // RNG
992        let mut rng: StdRng = match self.config.seed {
993            Some(s) => StdRng::seed_from_u64(s),
994            None => {
995                let mut thread_rng = rand::rng();
996                StdRng::from_rng(&mut thread_rng)
997            }
998        };
999
1000        // Initialize population in [lower, upper]
1001        let mut pop = match self.config.init {
1002            Init::LatinHypercube => {
1003                if self.config.disp {
1004                    eprintln!("  Using Latin Hypercube initialization");
1005                }
1006                init_latin_hypercube(n, npop, &self.lower, &self.upper, &is_free, &mut rng)
1007            }
1008            Init::Random => {
1009                if self.config.disp {
1010                    eprintln!("  Using Random initialization");
1011                }
1012                init_random(n, npop, &self.lower, &self.upper, &is_free, &mut rng)
1013            }
1014        };
1015
1016        // Evaluate energies (objective + penalties)
1017        let mut nfev: usize = 0;
1018        if self.config.disp {
1019            eprintln!("  Evaluating initial population of {} individuals...", npop);
1020        }
1021
1022        // Prepare population for evaluation (apply integrality constraints)
1023        let mut eval_pop = pop.clone();
1024        let t_integrality0 = Instant::now();
1025        if let Some(mask) = &self.config.integrality {
1026            for i in 0..npop {
1027                let mut row = eval_pop.row_mut(i);
1028                let mut x_eval = row.to_owned();
1029                apply_integrality(&mut x_eval, mask, &self.lower, &self.upper);
1030                row.assign(&x_eval);
1031            }
1032        }
1033        let t_integrality = t_integrality0.elapsed();
1034
1035        // Build thread-safe energy function that includes penalties
1036        let func_ref = self.func;
1037        let penalty_ineq_vec: Vec<PenaltyTuple> = self
1038            .config
1039            .penalty_ineq
1040            .iter()
1041            .map(|(f, w)| (f.clone(), *w))
1042            .collect();
1043        let penalty_eq_vec: Vec<PenaltyTuple> = self
1044            .config
1045            .penalty_eq
1046            .iter()
1047            .map(|(f, w)| (f.clone(), *w))
1048            .collect();
1049        let linear_penalty = self.config.linear_penalty.clone();
1050
1051        let energy_fn = Arc::new(move |x: &Array1<f64>| -> f64 {
1052            let base = (func_ref)(x);
1053            let mut p = 0.0;
1054            for (f, w) in &penalty_ineq_vec {
1055                let v = f(x);
1056                let viol = v.max(0.0);
1057                p += w * viol * viol;
1058            }
1059            for (h, w) in &penalty_eq_vec {
1060                let v = h(x);
1061                p += w * v * v;
1062            }
1063            if let Some(ref lp) = linear_penalty {
1064                let ax = lp.a.dot(&x.view());
1065                Zip::from(&ax)
1066                    .and(&lp.lb)
1067                    .and(&lp.ub)
1068                    .for_each(|&v, &lo, &hi| {
1069                        if v < lo {
1070                            let d = lo - v;
1071                            p += lp.weight * d * d;
1072                        } else if v > hi {
1073                            let d = v - hi;
1074                            p += lp.weight * d * d;
1075                        }
1076                    });
1077            }
1078            base + p
1079        });
1080
1081        let t_eval0 = Instant::now();
1082        let mut energies = parallel_eval::evaluate_population_parallel(
1083            &eval_pop,
1084            energy_fn.clone(),
1085            &self.config.parallel,
1086        );
1087        let t_eval_init = t_eval0.elapsed();
1088        nfev += npop;
1089        if timing_enabled {
1090            eprintln!(
1091                "TIMING init: integrality={:.3} ms, eval={:.3} ms",
1092                t_integrality.as_secs_f64() * 1e3,
1093                t_eval_init.as_secs_f64() * 1e3
1094            );
1095        }
1096
1097        // Report initial population statistics
1098        let pop_mean = energies.mean().unwrap_or(0.0);
1099        let pop_std = energies.std(0.0);
1100        if self.config.disp {
1101            eprintln!(
1102                "  Initial population: mean={:.6e}, std={:.6e}",
1103                pop_mean, pop_std
1104            );
1105        }
1106
1107        // If x0 provided, override the best member
1108        if let Some(x0) = &self.config.x0 {
1109            let mut x0c = x0.clone();
1110            // Clip to bounds using ndarray
1111            for i in 0..x0c.len() {
1112                x0c[i] = x0c[i].clamp(self.lower[i], self.upper[i]);
1113            }
1114            if let Some(mask) = &self.config.integrality {
1115                apply_integrality(&mut x0c, mask, &self.lower, &self.upper);
1116            }
1117            let f0 = self.energy(&x0c);
1118            nfev += 1;
1119            // find current best
1120            let (best_idx, _best_f) = argmin(&energies);
1121            pop.row_mut(best_idx).assign(&x0c.view());
1122            energies[best_idx] = f0;
1123        }
1124
1125        let (mut best_idx, mut best_f) = argmin(&energies);
1126        let mut best_x = pop.row(best_idx).to_owned();
1127
1128        if self.config.disp {
1129            eprintln!(
1130                "  Initial best: fitness={:.6e} at index {}",
1131                best_f, best_idx
1132            );
1133            let param_summary: Vec<String> = (0..best_x.len() / 3)
1134                .map(|i| {
1135                    let freq = 10f64.powf(best_x[i * 3]);
1136                    let q = best_x[i * 3 + 1];
1137                    let gain = best_x[i * 3 + 2];
1138                    format!("f{:.0}Hz/Q{:.2}/G{:.2}dB", freq, q, gain)
1139                })
1140                .collect();
1141            eprintln!("  Initial best params: [{}]", param_summary.join(", "));
1142        }
1143
1144        if self.config.disp {
1145            eprintln!("DE iter {:4}  best_f={:.6e}", 0, best_f);
1146        }
1147
1148        // Initialize adaptive state if adaptive strategies are enabled
1149        let mut adaptive_state = if matches!(
1150            self.config.strategy,
1151            Strategy::AdaptiveBin | Strategy::AdaptiveExp
1152        ) || self.config.adaptive.adaptive_mutation
1153        {
1154            Some(AdaptiveState::new(&self.config.adaptive))
1155        } else {
1156            None
1157        };
1158
1159        // Main loop
1160        let mut success = false;
1161        let mut message = String::new();
1162        let mut nit = 0;
1163        let mut accepted_trials;
1164        let mut improvement_count;
1165
1166        let mut t_build_tot = std::time::Duration::ZERO;
1167        let mut t_eval_tot = std::time::Duration::ZERO;
1168        let mut t_select_tot = std::time::Duration::ZERO;
1169        let mut t_iter_tot = std::time::Duration::ZERO;
1170
1171        for iter in 1..=self.config.maxiter {
1172            nit = iter;
1173            accepted_trials = 0;
1174            improvement_count = 0;
1175
1176            let iter_start = Instant::now();
1177
1178            // Pre-sort indices for adaptive strategies to avoid re-sorting in the loop
1179            let sorted_indices = if matches!(
1180                self.config.strategy,
1181                Strategy::AdaptiveBin | Strategy::AdaptiveExp
1182            ) {
1183                let mut indices: Vec<usize> = (0..npop).collect();
1184                indices.sort_by(|&a, &b| {
1185                    energies[a]
1186                        .partial_cmp(&energies[b])
1187                        .unwrap_or(std::cmp::Ordering::Equal)
1188                });
1189                indices
1190            } else {
1191                Vec::new() // Not needed for other strategies
1192            };
1193
1194            // Generate all trials first, then evaluate in parallel
1195            let t_build0 = Instant::now();
1196
1197            // Parallelize trial generation using rayon
1198            use rayon::prelude::*;
1199            let (trials, trial_params): (Vec<_>, Vec<_>) = (0..npop)
1200                .into_par_iter()
1201                .map(|i| {
1202                    // Create thread-local RNG from base seed + iteration + individual index
1203                    let mut local_rng: StdRng = if let Some(base_seed) = self.config.seed {
1204                        StdRng::seed_from_u64(
1205                            base_seed
1206                                .wrapping_add((iter as u64) << 32)
1207                                .wrapping_add(i as u64),
1208                        )
1209                    } else {
1210                        // Use thread_rng for unseeded runs
1211                        let mut thread_rng = rand::rng();
1212                        StdRng::from_rng(&mut thread_rng)
1213                    };
1214
1215                    // Sample mutation factor and crossover rate (adaptive or fixed)
1216                    let (f, cr) = if let Some(ref adaptive) = adaptive_state {
1217                        // Use adaptive parameter sampling
1218                        let adaptive_f = adaptive.sample_f(&mut local_rng);
1219                        let adaptive_cr = adaptive.sample_cr(&mut local_rng);
1220                        (adaptive_f, adaptive_cr)
1221                    } else {
1222                        // Use fixed or dithered parameters
1223                        (
1224                            self.config.mutation.sample(&mut local_rng),
1225                            self.config.recombination,
1226                        )
1227                    };
1228
1229                    // Generate mutant and apply crossover based on strategy
1230                    let (mutant, cross) = match self.config.strategy {
1231                        Strategy::Best1Bin => (
1232                            mutant_best1(i, &pop, best_idx, f, &mut local_rng),
1233                            Crossover::Binomial,
1234                        ),
1235                        Strategy::Best1Exp => (
1236                            mutant_best1(i, &pop, best_idx, f, &mut local_rng),
1237                            Crossover::Exponential,
1238                        ),
1239                        Strategy::Rand1Bin => (
1240                            mutant_rand1(i, &pop, f, &mut local_rng),
1241                            Crossover::Binomial,
1242                        ),
1243                        Strategy::Rand1Exp => (
1244                            mutant_rand1(i, &pop, f, &mut local_rng),
1245                            Crossover::Exponential,
1246                        ),
1247                        Strategy::Rand2Bin => (
1248                            mutant_rand2(i, &pop, f, &mut local_rng),
1249                            Crossover::Binomial,
1250                        ),
1251                        Strategy::Rand2Exp => (
1252                            mutant_rand2(i, &pop, f, &mut local_rng),
1253                            Crossover::Exponential,
1254                        ),
1255                        Strategy::CurrentToBest1Bin => (
1256                            mutant_current_to_best1(i, &pop, best_idx, f, &mut local_rng),
1257                            Crossover::Binomial,
1258                        ),
1259                        Strategy::CurrentToBest1Exp => (
1260                            mutant_current_to_best1(i, &pop, best_idx, f, &mut local_rng),
1261                            Crossover::Exponential,
1262                        ),
1263                        Strategy::Best2Bin => (
1264                            mutant_best2(i, &pop, best_idx, f, &mut local_rng),
1265                            Crossover::Binomial,
1266                        ),
1267                        Strategy::Best2Exp => (
1268                            mutant_best2(i, &pop, best_idx, f, &mut local_rng),
1269                            Crossover::Exponential,
1270                        ),
1271                        Strategy::RandToBest1Bin => (
1272                            mutant_rand_to_best1(i, &pop, best_idx, f, &mut local_rng),
1273                            Crossover::Binomial,
1274                        ),
1275                        Strategy::RandToBest1Exp => (
1276                            mutant_rand_to_best1(i, &pop, best_idx, f, &mut local_rng),
1277                            Crossover::Exponential,
1278                        ),
1279                        Strategy::AdaptiveBin => {
1280                            if let Some(ref adaptive) = adaptive_state {
1281                                (
1282                                    mutant_adaptive(
1283                                        i,
1284                                        &pop,
1285                                        &sorted_indices,
1286                                        adaptive.current_w,
1287                                        f,
1288                                        &mut local_rng,
1289                                    ),
1290                                    Crossover::Binomial,
1291                                )
1292                            } else {
1293                                // Fallback to rand1 if adaptive state not available
1294                                (
1295                                    mutant_rand1(i, &pop, f, &mut local_rng),
1296                                    Crossover::Binomial,
1297                                )
1298                            }
1299                        }
1300                        Strategy::AdaptiveExp => {
1301                            if let Some(ref adaptive) = adaptive_state {
1302                                (
1303                                    mutant_adaptive(
1304                                        i,
1305                                        &pop,
1306                                        &sorted_indices,
1307                                        adaptive.current_w,
1308                                        f,
1309                                        &mut local_rng,
1310                                    ),
1311                                    Crossover::Exponential,
1312                                )
1313                            } else {
1314                                // Fallback to rand1 if adaptive state not available
1315                                (
1316                                    mutant_rand1(i, &pop, f, &mut local_rng),
1317                                    Crossover::Exponential,
1318                                )
1319                            }
1320                        }
1321                    };
1322
1323                    // If strategy didn't dictate crossover, fallback to config
1324                    let crossover = cross;
1325                    let trial = match crossover {
1326                        Crossover::Binomial => {
1327                            binomial_crossover(&pop.row(i).to_owned(), &mutant, cr, &mut local_rng)
1328                        }
1329                        Crossover::Exponential => exponential_crossover(
1330                            &pop.row(i).to_owned(),
1331                            &mutant,
1332                            cr,
1333                            &mut local_rng,
1334                        ),
1335                    };
1336
1337                    // Apply WLS if enabled
1338                    let wls_trial = if self.config.adaptive.wls_enabled
1339                        && local_rng.random::<f64>() < self.config.adaptive.wls_prob
1340                    {
1341                        apply_wls(
1342                            &trial,
1343                            &self.lower,
1344                            &self.upper,
1345                            self.config.adaptive.wls_scale,
1346                            &mut local_rng,
1347                        )
1348                    } else {
1349                        trial.clone()
1350                    };
1351
1352                    // Clip to bounds using vectorized operation
1353                    let mut trial_clipped = wls_trial;
1354                    Zip::from(&mut trial_clipped)
1355                        .and(&self.lower)
1356                        .and(&self.upper)
1357                        .for_each(|x, lo, hi| *x = x.clamp(*lo, *hi));
1358
1359                    // Apply integrality if provided
1360                    if let Some(mask) = &self.config.integrality {
1361                        apply_integrality(&mut trial_clipped, mask, &self.lower, &self.upper);
1362                    }
1363
1364                    // Return trial and parameters
1365                    (trial_clipped, (f, cr))
1366                })
1367                .unzip();
1368
1369            let t_build = t_build0.elapsed();
1370            let t_eval0 = Instant::now();
1371            let trial_energies =
1372                evaluate_trials_parallel(&trials, energy_fn.clone(), &self.config.parallel);
1373            let t_eval = t_eval0.elapsed();
1374            nfev += npop;
1375
1376            let t_select0 = Instant::now();
1377            // Selection phase: update population based on trial results
1378            for (i, (trial, trial_energy)) in
1379                trials.into_iter().zip(trial_energies.iter()).enumerate()
1380            {
1381                let (f, cr) = trial_params[i];
1382
1383                // Selection: replace if better
1384                if *trial_energy <= energies[i] {
1385                    pop.row_mut(i).assign(&trial.view());
1386                    energies[i] = *trial_energy;
1387                    accepted_trials += 1;
1388
1389                    // Update adaptive parameters if improvement
1390                    if let Some(ref mut adaptive) = adaptive_state {
1391                        adaptive.record_success(f, cr);
1392                    }
1393
1394                    // Track if this is an improvement over the current best
1395                    if *trial_energy < best_f {
1396                        improvement_count += 1;
1397                    }
1398                }
1399            }
1400            let t_select = t_select0.elapsed();
1401
1402            t_build_tot += t_build;
1403            t_eval_tot += t_eval;
1404            t_select_tot += t_select;
1405            let iter_dur = iter_start.elapsed();
1406            t_iter_tot += iter_dur;
1407
1408            if timing_enabled && (iter <= 5 || iter % 10 == 0) {
1409                eprintln!(
1410                    "TIMING iter {:4}: build={:.3} ms, eval={:.3} ms, select={:.3} ms, total={:.3} ms",
1411                    iter,
1412                    t_build.as_secs_f64() * 1e3,
1413                    t_eval.as_secs_f64() * 1e3,
1414                    t_select.as_secs_f64() * 1e3,
1415                    iter_dur.as_secs_f64() * 1e3,
1416                );
1417            }
1418
1419            // Update adaptive parameters after each generation
1420            if let Some(ref mut adaptive) = adaptive_state {
1421                adaptive.update(&self.config.adaptive, iter, self.config.maxiter);
1422            }
1423
1424            // Update best solution after generation
1425            let (new_best_idx, new_best_f) = argmin(&energies);
1426            if new_best_f < best_f {
1427                best_idx = new_best_idx;
1428                best_f = new_best_f;
1429                best_x = pop.row(best_idx).to_owned();
1430            }
1431
1432            // Convergence check
1433            let pop_mean = energies.mean().unwrap_or(0.0);
1434            let pop_std = energies.std(0.0);
1435            let convergence_threshold = self.config.atol + self.config.tol * pop_mean.abs();
1436
1437            if self.config.disp {
1438                eprintln!(
1439                    "DE iter {:4}  best_f={:.6e}  std={:.3e}  accepted={}/{}, improved={}",
1440                    iter, best_f, pop_std, accepted_trials, npop, improvement_count
1441                );
1442            }
1443
1444            // Callback
1445            if let Some(ref mut cb) = self.config.callback {
1446                let intermediate = DEIntermediate {
1447                    x: best_x.clone(),
1448                    fun: best_f,
1449                    convergence: pop_std,
1450                    iter,
1451                };
1452                match cb(&intermediate) {
1453                    CallbackAction::Stop => {
1454                        success = true;
1455                        message = "Optimization stopped by callback".to_string();
1456                        break;
1457                    }
1458                    CallbackAction::Continue => {}
1459                }
1460            }
1461
1462            if pop_std <= convergence_threshold {
1463                success = true;
1464                message = format!(
1465                    "Converged: std(pop_f)={:.3e} <= threshold={:.3e}",
1466                    pop_std, convergence_threshold
1467                );
1468                break;
1469            }
1470        }
1471
1472        if !success {
1473            message = format!("Maximum iterations reached: {}", self.config.maxiter);
1474        }
1475
1476        if self.config.disp {
1477            eprintln!("DE finished: {}", message);
1478        }
1479
1480        // Polish if configured
1481        let (final_x, final_f, polish_nfev) = if let Some(ref polish_cfg) = self.config.polish {
1482            if polish_cfg.enabled {
1483                self.polish(&best_x)
1484            } else {
1485                (best_x.clone(), best_f, 0)
1486            }
1487        } else {
1488            (best_x.clone(), best_f, 0)
1489        };
1490
1491        if timing_enabled {
1492            eprintln!(
1493                "TIMING total: build={:.3} s, eval={:.3} s, select={:.3} s, iter_total={:.3} s",
1494                t_build_tot.as_secs_f64(),
1495                t_eval_tot.as_secs_f64(),
1496                t_select_tot.as_secs_f64(),
1497                t_iter_tot.as_secs_f64()
1498            );
1499        }
1500
1501        self.finish_report(
1502            pop,
1503            energies,
1504            final_x,
1505            final_f,
1506            success,
1507            message,
1508            nit,
1509            nfev + polish_nfev,
1510        )
1511    }
1512}
1513
1514#[cfg(test)]
1515mod strategy_tests {
1516    use super::*;
1517
1518    #[test]
1519    fn test_parse_strategy_variants() {
1520        assert!(matches!(
1521            "best1exp".parse::<Strategy>().unwrap(),
1522            Strategy::Best1Exp
1523        ));
1524        assert!(matches!(
1525            "rand1bin".parse::<Strategy>().unwrap(),
1526            Strategy::Rand1Bin
1527        ));
1528        assert!(matches!(
1529            "randtobest1exp".parse::<Strategy>().unwrap(),
1530            Strategy::RandToBest1Exp
1531        ));
1532    }
1533}