autoeq_de/
mod.rs

1//! Differential Evolution optimization library.
2//!
3//! This crate provides a Rust implementation of the Differential Evolution (DE)
4//! algorithm, a population-based stochastic optimizer for continuous optimization
5//! problems. The implementation is inspired by SciPy's differential_evolution.
6//!
7//! # Features
8//!
9//! - Multiple mutation strategies (Best1, Rand1, CurrentToBest1, etc.)
10//! - Binomial and exponential crossover
11//! - Adaptive parameter control
12//! - Parallel population evaluation
13//! - Constraint handling via penalty methods
14//! - Latin Hypercube initialization
15//!
16//! # Example
17//!
18//! ```rust
19//! use autoeq_de::{differential_evolution, DEConfigBuilder};
20//!
21//! // Minimize the sphere function: f(x) = sum(x_i^2)
22//! let bounds = vec![(-5.0, 5.0), (-5.0, 5.0)];
23//! let config = DEConfigBuilder::new()
24//!     .maxiter(100)
25//!     .seed(42)
26//!     .build();
27//!
28//! let result = differential_evolution(
29//!     &|x| x.iter().map(|&xi| xi * xi).sum(),
30//!     &bounds,
31//!     config,
32//! ).expect("optimization should succeed");
33//!
34//! assert!(result.fun < 1e-6);
35//! ```
36#![doc = include_str!("../README.md")]
37#![doc = include_str!("../REFERENCES.md")]
38#![warn(missing_docs)]
39
40pub mod error;
41pub use error::{DEError, Result};
42
43use std::fmt;
44use std::str::FromStr;
45use std::sync::Arc;
46
47use ndarray::{Array1, Array2};
48use rand::rngs::StdRng;
49use rand::{Rng, SeedableRng};
50use std::time::Instant;
51
52/// Linear penalty stacking utilities for combining multiple constraints.
53pub mod stack_linear_penalty;
54
55/// Integer variable handling for mixed-integer optimization.
56pub mod apply_integrality;
57/// Wrapper Local Search (WLS) application for local refinement.
58pub mod apply_wls;
59
60/// Utilities for selecting distinct random indices from a population.
61pub mod distinct_indices;
62/// Latin Hypercube Sampling initialization strategy.
63pub mod init_latin_hypercube;
64/// Random uniform initialization strategy.
65pub mod init_random;
66
67/// Adaptive mutation strategy with dynamic parameter control.
68pub mod mutant_adaptive;
69/// Best/1 mutation strategy: uses best individual plus one difference vector.
70pub mod mutant_best1;
71/// Best/2 mutation strategy: uses best individual plus two difference vectors.
72pub mod mutant_best2;
73/// Current-to-best/1 mutation: blends current with best individual.
74pub mod mutant_current_to_best1;
75/// Rand/1 mutation strategy: uses random individual plus one difference vector.
76pub mod mutant_rand1;
77/// Rand/2 mutation strategy: uses random individual plus two difference vectors.
78pub mod mutant_rand2;
79/// Rand-to-best/1 mutation: blends random with best individual.
80pub mod mutant_rand_to_best1;
81
82/// Binomial (uniform) crossover implementation.
83pub mod crossover_binomial;
84/// Exponential crossover implementation.
85pub mod crossover_exponential;
86
87/// Main differential evolution algorithm implementation.
88pub mod differential_evolution;
89/// Registry of standard test functions for benchmarking.
90pub mod function_registry;
91/// Internal helper functions for DE implementation.
92pub mod impl_helpers;
93/// Metadata-driven optimization examples and tests.
94pub mod metadata;
95/// Parallel population evaluation support.
96pub mod parallel_eval;
97/// Optimization recording for analysis and debugging.
98pub mod recorder;
99/// Recorded optimization wrapper for testing.
100pub mod run_recorded;
101pub use differential_evolution::differential_evolution;
102pub use parallel_eval::ParallelConfig;
103pub use recorder::{OptimizationRecord, OptimizationRecorder};
104pub use run_recorded::run_recorded_differential_evolution;
105
106// Type aliases to reduce complexity
107/// Scalar constraint function type
108pub type ScalarConstraintFn = Arc<dyn Fn(&Array1<f64>) -> f64 + Send + Sync>;
109/// Vector constraint function type
110pub type VectorConstraintFn = Arc<dyn Fn(&Array1<f64>) -> Array1<f64> + Send + Sync>;
111/// Penalty tuple type (function, weight)
112pub type PenaltyTuple = (ScalarConstraintFn, f64);
113/// Callback function type
114pub type CallbackFn = Box<dyn FnMut(&DEIntermediate) -> CallbackAction>;
115
116pub(crate) fn argmin(v: &Array1<f64>) -> (usize, f64) {
117    let mut best_i = 0usize;
118    let mut best_v = v[0];
119    for (i, &val) in v.iter().enumerate() {
120        if val < best_v {
121            best_v = val;
122            best_i = i;
123        }
124    }
125    (best_i, best_v)
126}
127
128/// Differential Evolution mutation/crossover strategy.
129///
130/// The strategy name follows the pattern `{mutation}{n}{crossover}` where:
131/// - `mutation`: Base vector selection (Best, Rand, CurrentToBest, RandToBest, Adaptive)
132/// - `n`: Number of difference vectors (1 or 2)
133/// - `crossover`: Crossover type (Bin = binomial, Exp = exponential)
134#[derive(Debug, Clone, Copy)]
135pub enum Strategy {
136    /// Best/1/Bin: Best individual + 1 difference vector, binomial crossover
137    Best1Bin,
138    /// Best/1/Exp: Best individual + 1 difference vector, exponential crossover
139    Best1Exp,
140    /// Rand/1/Bin: Random individual + 1 difference vector, binomial crossover
141    Rand1Bin,
142    /// Rand/1/Exp: Random individual + 1 difference vector, exponential crossover
143    Rand1Exp,
144    /// Rand/2/Bin: Random individual + 2 difference vectors, binomial crossover
145    Rand2Bin,
146    /// Rand/2/Exp: Random individual + 2 difference vectors, exponential crossover
147    Rand2Exp,
148    /// Current-to-best/1/Bin: Blend of current and best + 1 diff, binomial crossover
149    CurrentToBest1Bin,
150    /// Current-to-best/1/Exp: Blend of current and best + 1 diff, exponential crossover
151    CurrentToBest1Exp,
152    /// Best/2/Bin: Best individual + 2 difference vectors, binomial crossover
153    Best2Bin,
154    /// Best/2/Exp: Best individual + 2 difference vectors, exponential crossover
155    Best2Exp,
156    /// Rand-to-best/1/Bin: Blend of random and best + 1 diff, binomial crossover
157    RandToBest1Bin,
158    /// Rand-to-best/1/Exp: Blend of random and best + 1 diff, exponential crossover
159    RandToBest1Exp,
160    /// Adaptive mutation with binomial crossover (SAM approach)
161    AdaptiveBin,
162    /// Adaptive mutation with exponential crossover
163    AdaptiveExp,
164}
165
166impl FromStr for Strategy {
167    type Err = String;
168    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
169        let t = s.to_lowercase();
170        match t.as_str() {
171            "best1bin" | "best1" => Ok(Strategy::Best1Bin),
172            "best1exp" => Ok(Strategy::Best1Exp),
173            "rand1bin" | "rand1" => Ok(Strategy::Rand1Bin),
174            "rand1exp" => Ok(Strategy::Rand1Exp),
175            "rand2bin" | "rand2" => Ok(Strategy::Rand2Bin),
176            "rand2exp" => Ok(Strategy::Rand2Exp),
177            "currenttobest1bin" | "current-to-best1bin" | "current_to_best1bin" => {
178                Ok(Strategy::CurrentToBest1Bin)
179            }
180            "currenttobest1exp" | "current-to-best1exp" | "current_to_best1exp" => {
181                Ok(Strategy::CurrentToBest1Exp)
182            }
183            "best2bin" | "best2" => Ok(Strategy::Best2Bin),
184            "best2exp" => Ok(Strategy::Best2Exp),
185            "randtobest1bin" | "rand-to-best1bin" | "rand_to_best1bin" => {
186                Ok(Strategy::RandToBest1Bin)
187            }
188            "randtobest1exp" | "rand-to-best1exp" | "rand_to_best1exp" => {
189                Ok(Strategy::RandToBest1Exp)
190            }
191            "adaptivebin" | "adaptive-bin" | "adaptive_bin" | "adaptive" => {
192                Ok(Strategy::AdaptiveBin)
193            }
194            "adaptiveexp" | "adaptive-exp" | "adaptive_exp" => Ok(Strategy::AdaptiveExp),
195            _ => Err(format!("unknown strategy: {}", s)),
196        }
197    }
198}
199
200/// Crossover type
201#[derive(Debug, Clone, Copy, Default)]
202pub enum Crossover {
203    /// Binomial (uniform) crossover
204    #[default]
205    Binomial,
206    /// Exponential crossover
207    Exponential,
208}
209
210/// Mutation setting: either a fixed factor, a uniform range (dithering), or adaptive.
211#[derive(Debug, Clone, Copy)]
212pub enum Mutation {
213    /// Fixed mutation factor F in [0, 2).
214    Factor(f64),
215    /// Dithering range [min, max) with 0 <= min < max <= 2.
216    Range {
217        /// Minimum mutation factor.
218        min: f64,
219        /// Maximum mutation factor.
220        max: f64,
221    },
222    /// Adaptive mutation factor using Cauchy distribution.
223    Adaptive {
224        /// Initial mutation factor before adaptation.
225        initial_f: f64,
226    },
227}
228
229impl Default for Mutation {
230    fn default() -> Self {
231        let _ = Mutation::Factor(0.8);
232        Mutation::Range { min: 0.0, max: 2.0 }
233    }
234}
235
236impl Mutation {
237    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
238        match *self {
239            Mutation::Factor(f) => f,
240            Mutation::Range { min, max } => rng.random_range(min..max),
241            Mutation::Adaptive { initial_f } => initial_f, // Will be overridden by adaptive logic
242        }
243    }
244
245    /// Sample from Cauchy distribution for adaptive mutation (F parameter)
246    #[allow(dead_code)]
247    fn sample_cauchy<R: Rng + ?Sized>(&self, f_m: f64, _scale: f64, rng: &mut R) -> f64 {
248        // Simplified version using normal random for now
249        let perturbation = (rng.random::<f64>() - 0.5) * 0.2; // Small perturbation
250        (f_m + perturbation).clamp(0.0, 2.0) // Clamp to valid range
251    }
252}
253
254/// Initialization scheme for the population.
255#[derive(Debug, Clone, Copy, Default)]
256pub enum Init {
257    /// Latin Hypercube Sampling for better space coverage.
258    #[default]
259    LatinHypercube,
260    /// Uniform random initialization.
261    Random,
262}
263
264/// Whether best updates during a generation (we use Deferred only).
265#[derive(Debug, Clone, Copy, Default)]
266pub enum Updating {
267    /// Deferred update: best is updated after all trials are evaluated.
268    #[default]
269    Deferred,
270}
271
272/// Linear penalty specification: lb <= A x <= ub (component-wise).
273#[derive(Debug, Clone)]
274pub struct LinearPenalty {
275    /// Constraint matrix A (m x n).
276    pub a: Array2<f64>,
277    /// Lower bounds vector (m elements).
278    pub lb: Array1<f64>,
279    /// Upper bounds vector (m elements).
280    pub ub: Array1<f64>,
281    /// Penalty weight for constraint violations.
282    pub weight: f64,
283}
284
285/// SciPy-like linear constraint helper: lb <= A x <= ub.
286#[derive(Debug, Clone)]
287pub struct LinearConstraintHelper {
288    /// Constraint matrix A (m x n).
289    pub a: Array2<f64>,
290    /// Lower bounds vector (m elements).
291    pub lb: Array1<f64>,
292    /// Upper bounds vector (m elements).
293    pub ub: Array1<f64>,
294}
295
296impl LinearConstraintHelper {
297    /// Apply helper by merging into DEConfig.linear_penalty (stacking rows if already present)
298    pub fn apply_to(&self, cfg: &mut DEConfig, weight: f64) {
299        use stack_linear_penalty::stack_linear_penalty;
300
301        let new_lp = LinearPenalty {
302            a: self.a.clone(),
303            lb: self.lb.clone(),
304            ub: self.ub.clone(),
305            weight,
306        };
307        match &mut cfg.linear_penalty {
308            Some(existing) => stack_linear_penalty(existing, &new_lp),
309            None => cfg.linear_penalty = Some(new_lp),
310        }
311    }
312}
313
314/// SciPy-like nonlinear constraint helper: vector-valued fun(x) with lb <= fun(x) <= ub.
315#[derive(Clone)]
316pub struct NonlinearConstraintHelper {
317    /// Vector-valued constraint function.
318    pub fun: VectorConstraintFn,
319    /// Lower bounds for each constraint component.
320    pub lb: Array1<f64>,
321    /// Upper bounds for each constraint component.
322    pub ub: Array1<f64>,
323}
324
325impl NonlinearConstraintHelper {
326    /// Apply helper by emitting penalty closures per component.
327    /// lb <= f_i(x) <= ub becomes two inequalities: f_i(x)-ub <= 0 and lb - f_i(x) <= 0.
328    /// If lb==ub, emit an equality penalty for f_i(x)-lb.
329    pub fn apply_to(&self, cfg: &mut DEConfig, weight_ineq: f64, weight_eq: f64) {
330        let f = self.fun.clone();
331        let lb = self.lb.clone();
332        let ub = self.ub.clone();
333        let m = lb.len().min(ub.len());
334        for i in 0..m {
335            let l = lb[i];
336            let u = ub[i];
337            if (u - l).abs() < 1e-18 {
338                let fi = f.clone();
339                cfg.penalty_eq.push((
340                    Arc::new(move |x: &Array1<f64>| {
341                        let y = (fi)(x);
342                        y[i] - l
343                    }),
344                    weight_eq,
345                ));
346            } else {
347                let fi_u = f.clone();
348                cfg.penalty_ineq.push((
349                    Arc::new(move |x: &Array1<f64>| {
350                        let y = (fi_u)(x);
351                        y[i] - u
352                    }),
353                    weight_ineq,
354                ));
355                let fi_l = f.clone();
356                cfg.penalty_ineq.push((
357                    Arc::new(move |x: &Array1<f64>| {
358                        let y = (fi_l)(x);
359                        l - y[i]
360                    }),
361                    weight_ineq,
362                ));
363            }
364        }
365    }
366}
367
368/// Structures for tracking adaptive parameters
369#[derive(Debug, Clone)]
370struct AdaptiveState {
371    /// Current F_m parameter for Cauchy distribution (mutation)
372    f_m: f64,
373    /// Current CR_m parameter for Gaussian distribution (crossover)
374    cr_m: f64,
375    /// Successful F values from this generation
376    successful_f: Vec<f64>,
377    /// Successful CR values from this generation
378    successful_cr: Vec<f64>,
379    /// Current linearly decreasing weight for adaptive mutation
380    current_w: f64,
381}
382
383impl AdaptiveState {
384    fn new(config: &AdaptiveConfig) -> Self {
385        Self {
386            f_m: config.f_m,
387            cr_m: config.cr_m,
388            successful_f: Vec::new(),
389            successful_cr: Vec::new(),
390            current_w: config.w_max, // Start with maximum weight
391        }
392    }
393
394    /// Update adaptive parameters based on successful trials
395    fn update(&mut self, config: &AdaptiveConfig, iter: usize, max_iter: usize) {
396        // Update linearly decreasing weight (Equation 19 from the paper)
397        let iter_ratio = iter as f64 / max_iter as f64;
398        self.current_w = config.w_max - (config.w_max - config.w_min) * iter_ratio;
399
400        // Update F_m using Lehmer mean of successful F values
401        if !self.successful_f.is_empty() {
402            let mean_f = self.compute_lehmer_mean(&self.successful_f);
403            self.f_m = (1.0 - config.w_f) * self.f_m + config.w_f * mean_f;
404            self.f_m = self.f_m.clamp(0.2, 1.2);
405        }
406
407        // Update CR_m using arithmetic mean of successful CR values
408        if !self.successful_cr.is_empty() {
409            let mean_cr = self.compute_arithmetic_mean(&self.successful_cr);
410            self.cr_m = (1.0 - config.w_cr) * self.cr_m + config.w_cr * mean_cr;
411            self.cr_m = self.cr_m.clamp(0.1, 0.9);
412        }
413
414        // Clear successful values for next generation
415        self.successful_f.clear();
416        self.successful_cr.clear();
417    }
418
419    /// Compute Lehmer mean for successful F values (p=2)
420    fn compute_lehmer_mean(&self, values: &[f64]) -> f64 {
421        if values.is_empty() {
422            return 0.5; // Default fallback
423        }
424
425        let sum_sq: f64 = values.iter().map(|&x| x * x).sum();
426        let sum: f64 = values.iter().sum();
427
428        if sum > 0.0 {
429            sum_sq / sum
430        } else {
431            0.5 // Fallback if sum is zero
432        }
433    }
434
435    /// Compute arithmetic mean for successful CR values
436    fn compute_arithmetic_mean(&self, values: &[f64]) -> f64 {
437        if values.is_empty() {
438            return 0.5; // Default fallback
439        }
440        values.iter().sum::<f64>() / values.len() as f64
441    }
442
443    /// Record successful parameter values
444    fn record_success(&mut self, f_val: f64, cr_val: f64) {
445        self.successful_f.push(f_val);
446        self.successful_cr.push(cr_val);
447    }
448
449    /// Sample adaptive F parameter using conservative normal distribution
450    fn sample_f<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
451        // Use simple normal distribution with smaller variance for stability
452        let u1: f64 = rng.random();
453        let u2: f64 = rng.random();
454
455        // Box-Muller transform with smaller standard deviation
456        let normal = (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos();
457        let sample = self.f_m + 0.05 * normal; // Reduced variance from 0.1 to 0.05
458
459        // Clamp to conservative bounds
460        sample.clamp(0.3, 1.0)
461    }
462
463    /// Sample adaptive CR parameter using conservative Gaussian distribution
464    fn sample_cr<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
465        // Use normal distribution with smaller variance for stability
466        let u1: f64 = rng.random();
467        let u2: f64 = rng.random();
468
469        // Box-Muller transform with smaller standard deviation
470        let normal = (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos();
471        let sample = self.cr_m + 0.05 * normal; // Reduced variance from 0.1 to 0.05
472
473        sample.clamp(0.1, 0.9)
474    }
475}
476
477/// Adaptive differential evolution configuration
478#[derive(Debug, Clone)]
479pub struct AdaptiveConfig {
480    /// Enable adaptive mutation strategy
481    pub adaptive_mutation: bool,
482    /// Enable Wrapper Local Search (WLS)
483    pub wls_enabled: bool,
484    /// Maximum weight for adaptive mutation (w_max)
485    pub w_max: f64,
486    /// Minimum weight for adaptive mutation (w_min)
487    pub w_min: f64,
488    /// Weight factor for F parameter adaptation (between 0.8 and 1.0)
489    pub w_f: f64,
490    /// Weight factor for CR parameter adaptation (between 0.9 and 1.0)
491    pub w_cr: f64,
492    /// Initial location parameter for Cauchy distribution (F_m)
493    pub f_m: f64,
494    /// Initial location parameter for Gaussian distribution (CR_m)
495    pub cr_m: f64,
496    /// WLS probability (what fraction of population to apply WLS to)
497    pub wls_prob: f64,
498    /// WLS Cauchy scale parameter
499    pub wls_scale: f64,
500}
501
502impl Default for AdaptiveConfig {
503    fn default() -> Self {
504        Self {
505            adaptive_mutation: false,
506            wls_enabled: false,
507            w_max: 0.9,
508            w_min: 0.1,
509            w_f: 0.9,
510            w_cr: 0.9,
511            f_m: 0.5,
512            cr_m: 0.6,
513            wls_prob: 0.1,
514            wls_scale: 0.1,
515        }
516    }
517}
518
519/// Polishing configuration using NLopt local optimizer within bounds.
520#[derive(Debug, Clone)]
521pub struct PolishConfig {
522    /// Whether polishing is enabled.
523    pub enabled: bool,
524    /// Local optimizer algorithm name (e.g., "neldermead", "sbplx", "cobyla").
525    pub algo: String,
526    /// Maximum function evaluations for polishing (e.g., 200*n).
527    pub maxeval: usize,
528}
529
530/// Configuration for the Differential Evolution optimizer.
531///
532/// This struct holds all parameters controlling the DE algorithm behavior,
533/// including population size, mutation/crossover settings, constraints, and
534/// convergence criteria.
535pub struct DEConfig {
536    /// Maximum number of generations (iterations).
537    pub maxiter: usize,
538    /// Population size multiplier (total NP = popsize * n_params_free).
539    pub popsize: usize,
540    /// Relative tolerance for convergence (population energy std dev).
541    pub tol: f64,
542    /// Absolute tolerance for convergence on best fitness.
543    pub atol: f64,
544    /// Mutation factor setting.
545    pub mutation: Mutation,
546    /// Crossover probability CR in [0, 1].
547    pub recombination: f64,
548    /// Mutation/crossover strategy.
549    pub strategy: Strategy,
550    /// Crossover type (binomial or exponential).
551    pub crossover: Crossover,
552    /// Population initialization scheme.
553    pub init: Init,
554    /// Update timing (deferred).
555    pub updating: Updating,
556    /// Optional random seed for reproducibility.
557    pub seed: Option<u64>,
558    /// Optional integrality mask; true => variable is integer-constrained.
559    pub integrality: Option<Vec<bool>>,
560    /// Optional initial guess used to replace the best member after init.
561    pub x0: Option<Array1<f64>>,
562    /// Print objective best at each iteration.
563    pub disp: bool,
564    /// Optional per-iteration callback (may stop early).
565    pub callback: Option<CallbackFn>,
566    /// Penalty-based inequality constraints: fc(x) <= 0.
567    pub penalty_ineq: Vec<PenaltyTuple>,
568    /// Penalty-based equality constraints: h(x) = 0.
569    pub penalty_eq: Vec<PenaltyTuple>,
570    /// Optional linear constraints treated by penalty: lb <= A x <= ub.
571    pub linear_penalty: Option<LinearPenalty>,
572    /// Polishing configuration (optional).
573    pub polish: Option<PolishConfig>,
574    /// Adaptive differential evolution configuration.
575    pub adaptive: AdaptiveConfig,
576    /// Parallel evaluation configuration.
577    pub parallel: parallel_eval::ParallelConfig,
578}
579
580impl Default for DEConfig {
581    fn default() -> Self {
582        Self {
583            maxiter: 1000,
584            popsize: 15,
585            tol: 1e-2,
586            atol: 0.0,
587            mutation: Mutation::default(),
588            recombination: 0.7,
589            strategy: Strategy::Best1Bin,
590            crossover: Crossover::default(),
591            init: Init::default(),
592            updating: Updating::default(),
593            seed: None,
594            integrality: None,
595            x0: None,
596            disp: false,
597            callback: None,
598            penalty_ineq: Vec::new(),
599            penalty_eq: Vec::new(),
600            linear_penalty: None,
601            polish: None,
602            adaptive: AdaptiveConfig::default(),
603            parallel: parallel_eval::ParallelConfig::default(),
604        }
605    }
606}
607
608/// Fluent builder for `DEConfig` for ergonomic configuration.
609///
610/// # Example
611///
612/// ```rust
613/// use autoeq_de::{DEConfigBuilder, Strategy, Mutation};
614///
615/// let config = DEConfigBuilder::new()
616///     .maxiter(500)
617///     .popsize(20)
618///     .strategy(Strategy::Best1Bin)
619///     .mutation(Mutation::Factor(0.8))
620///     .recombination(0.9)
621///     .seed(42)
622///     .build();
623/// ```
624pub struct DEConfigBuilder {
625    cfg: DEConfig,
626}
627impl Default for DEConfigBuilder {
628    fn default() -> Self {
629        Self::new()
630    }
631}
632
633impl DEConfigBuilder {
634    /// Creates a new builder with default configuration.
635    pub fn new() -> Self {
636        Self {
637            cfg: DEConfig::default(),
638        }
639    }
640    /// Sets the maximum number of iterations.
641    pub fn maxiter(mut self, v: usize) -> Self {
642        self.cfg.maxiter = v;
643        self
644    }
645    /// Sets the population size multiplier.
646    pub fn popsize(mut self, v: usize) -> Self {
647        self.cfg.popsize = v;
648        self
649    }
650    /// Sets the relative convergence tolerance.
651    pub fn tol(mut self, v: f64) -> Self {
652        self.cfg.tol = v;
653        self
654    }
655    /// Sets the absolute convergence tolerance.
656    pub fn atol(mut self, v: f64) -> Self {
657        self.cfg.atol = v;
658        self
659    }
660    /// Sets the mutation factor configuration.
661    pub fn mutation(mut self, v: Mutation) -> Self {
662        self.cfg.mutation = v;
663        self
664    }
665    /// Sets the crossover probability (CR).
666    pub fn recombination(mut self, v: f64) -> Self {
667        self.cfg.recombination = v;
668        self
669    }
670    /// Sets the mutation/crossover strategy.
671    pub fn strategy(mut self, v: Strategy) -> Self {
672        self.cfg.strategy = v;
673        self
674    }
675    /// Sets the crossover type.
676    pub fn crossover(mut self, v: Crossover) -> Self {
677        self.cfg.crossover = v;
678        self
679    }
680    /// Sets the population initialization scheme.
681    pub fn init(mut self, v: Init) -> Self {
682        self.cfg.init = v;
683        self
684    }
685    /// Sets the random seed for reproducibility.
686    pub fn seed(mut self, v: u64) -> Self {
687        self.cfg.seed = Some(v);
688        self
689    }
690    /// Sets the integrality mask for mixed-integer optimization.
691    pub fn integrality(mut self, v: Vec<bool>) -> Self {
692        self.cfg.integrality = Some(v);
693        self
694    }
695    /// Sets an initial guess to seed the population.
696    pub fn x0(mut self, v: Array1<f64>) -> Self {
697        self.cfg.x0 = Some(v);
698        self
699    }
700    /// Enables/disables progress display.
701    pub fn disp(mut self, v: bool) -> Self {
702        self.cfg.disp = v;
703        self
704    }
705    /// Sets a per-iteration callback function.
706    pub fn callback(mut self, cb: Box<dyn FnMut(&DEIntermediate) -> CallbackAction>) -> Self {
707        self.cfg.callback = Some(cb);
708        self
709    }
710    /// Adds an inequality constraint penalty function.
711    pub fn add_penalty_ineq<FN>(mut self, f: FN, w: f64) -> Self
712    where
713        FN: Fn(&Array1<f64>) -> f64 + Send + Sync + 'static,
714    {
715        self.cfg.penalty_ineq.push((Arc::new(f), w));
716        self
717    }
718    /// Adds an equality constraint penalty function.
719    pub fn add_penalty_eq<FN>(mut self, f: FN, w: f64) -> Self
720    where
721        FN: Fn(&Array1<f64>) -> f64 + Send + Sync + 'static,
722    {
723        self.cfg.penalty_eq.push((Arc::new(f), w));
724        self
725    }
726    /// Sets a linear constraint penalty.
727    pub fn linear_penalty(mut self, lp: LinearPenalty) -> Self {
728        self.cfg.linear_penalty = Some(lp);
729        self
730    }
731    /// Sets the polishing configuration.
732    pub fn polish(mut self, pol: PolishConfig) -> Self {
733        self.cfg.polish = Some(pol);
734        self
735    }
736    /// Sets the adaptive DE configuration.
737    pub fn adaptive(mut self, adaptive: AdaptiveConfig) -> Self {
738        self.cfg.adaptive = adaptive;
739        self
740    }
741    /// Enables/disables adaptive mutation.
742    pub fn enable_adaptive_mutation(mut self, enable: bool) -> Self {
743        self.cfg.adaptive.adaptive_mutation = enable;
744        self
745    }
746    /// Enables/disables Wrapper Local Search.
747    pub fn enable_wls(mut self, enable: bool) -> Self {
748        self.cfg.adaptive.wls_enabled = enable;
749        self
750    }
751    /// Sets the adaptive weight bounds.
752    pub fn adaptive_weights(mut self, w_max: f64, w_min: f64) -> Self {
753        self.cfg.adaptive.w_max = w_max;
754        self.cfg.adaptive.w_min = w_min;
755        self
756    }
757    /// Sets the parallel evaluation configuration.
758    pub fn parallel(mut self, parallel: parallel_eval::ParallelConfig) -> Self {
759        self.cfg.parallel = parallel;
760        self
761    }
762    /// Enables/disables parallel evaluation.
763    pub fn enable_parallel(mut self, enable: bool) -> Self {
764        self.cfg.parallel.enabled = enable;
765        self
766    }
767    /// Sets the number of parallel threads.
768    pub fn parallel_threads(mut self, num_threads: usize) -> Self {
769        self.cfg.parallel.num_threads = Some(num_threads);
770        self
771    }
772    /// Builds and returns the configuration.
773    pub fn build(self) -> DEConfig {
774        self.cfg
775    }
776}
777
778/// Result/report of a DE optimization run.
779///
780/// Contains the optimal solution, convergence status, and statistics.
781#[derive(Clone)]
782pub struct DEReport {
783    /// The optimal solution vector.
784    pub x: Array1<f64>,
785    /// The objective function value at the optimal solution.
786    pub fun: f64,
787    /// Whether the optimization converged successfully.
788    pub success: bool,
789    /// Human-readable status message.
790    pub message: String,
791    /// Number of iterations (generations) performed.
792    pub nit: usize,
793    /// Number of function evaluations performed.
794    pub nfev: usize,
795    /// Final population matrix (NP x n).
796    pub population: Array2<f64>,
797    /// Fitness values for each population member.
798    pub population_energies: Array1<f64>,
799}
800
801impl fmt::Debug for DEReport {
802    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
803        f.debug_struct("DEReport")
804            .field("x", &format!("len={}", self.x.len()))
805            .field("fun", &self.fun)
806            .field("success", &self.success)
807            .field("message", &self.message)
808            .field("nit", &self.nit)
809            .field("nfev", &self.nfev)
810            .field(
811                "population",
812                &format!("{}x{}", self.population.nrows(), self.population.ncols()),
813            )
814            .field(
815                "population_energies",
816                &format!("len={}", self.population_energies.len()),
817            )
818            .finish()
819    }
820}
821
822/// Information passed to callback after each generation.
823pub struct DEIntermediate {
824    /// Current best solution vector.
825    pub x: Array1<f64>,
826    /// Current best objective value.
827    pub fun: f64,
828    /// Convergence measure (population fitness std dev).
829    pub convergence: f64,
830    /// Current iteration number.
831    pub iter: usize,
832}
833
834/// Action returned by callback to control optimization flow.
835pub enum CallbackAction {
836    /// Continue optimization.
837    Continue,
838    /// Stop optimization early.
839    Stop,
840}
841
842/// Differential Evolution optimizer.
843///
844/// A population-based stochastic optimizer for continuous functions.
845/// Use [`DifferentialEvolution::new`] to create an instance, configure
846/// with [`config_mut`](Self::config_mut), then call [`solve`](Self::solve).
847pub struct DifferentialEvolution<'a, F>
848where
849    F: Fn(&Array1<f64>) -> f64 + Sync,
850{
851    func: &'a F,
852    lower: Array1<f64>,
853    upper: Array1<f64>,
854    config: DEConfig,
855}
856
857impl<'a, F> DifferentialEvolution<'a, F>
858where
859    F: Fn(&Array1<f64>) -> f64 + Sync,
860{
861    /// Creates a new DE optimizer with objective `func` and bounds [lower, upper].
862    ///
863    /// # Errors
864    ///
865    /// Returns `DEError::BoundsMismatch` if `lower` and `upper` have different lengths.
866    /// Returns `DEError::InvalidBounds` if any lower bound exceeds its corresponding upper bound.
867    pub fn new(func: &'a F, lower: Array1<f64>, upper: Array1<f64>) -> Result<Self> {
868        if lower.len() != upper.len() {
869            return Err(DEError::BoundsMismatch {
870                lower_len: lower.len(),
871                upper_len: upper.len(),
872            });
873        }
874
875        // Validate that lower <= upper for all dimensions
876        for i in 0..lower.len() {
877            if lower[i] > upper[i] {
878                return Err(DEError::InvalidBounds {
879                    index: i,
880                    lower: lower[i],
881                    upper: upper[i],
882                });
883            }
884        }
885
886        Ok(Self {
887            func,
888            lower,
889            upper,
890            config: DEConfig::default(),
891        })
892    }
893
894    /// Mutable access to configuration
895    pub fn config_mut(&mut self) -> &mut DEConfig {
896        &mut self.config
897    }
898
899    /// Run the optimization and return a report
900    pub fn solve(&mut self) -> DEReport {
901        use apply_integrality::apply_integrality;
902        use apply_wls::apply_wls;
903        use crossover_binomial::binomial_crossover;
904        use crossover_exponential::exponential_crossover;
905        use init_latin_hypercube::init_latin_hypercube;
906        use init_random::init_random;
907        use mutant_adaptive::mutant_adaptive;
908        use mutant_best1::mutant_best1;
909        use mutant_best2::mutant_best2;
910        use mutant_current_to_best1::mutant_current_to_best1;
911        use mutant_rand_to_best1::mutant_rand_to_best1;
912        use mutant_rand1::mutant_rand1;
913        use mutant_rand2::mutant_rand2;
914        use parallel_eval::evaluate_trials_parallel;
915        use std::sync::Arc;
916
917        let n = self.lower.len();
918
919        // Identify fixed (equal-bounds) and free variables
920        let mut is_free: Vec<bool> = Vec::with_capacity(n);
921        for i in 0..n {
922            is_free.push((self.upper[i] - self.lower[i]).abs() > 0.0);
923        }
924        let n_free = is_free.iter().filter(|&&b| b).count();
925        let _n_equal = n - n_free;
926        if n_free == 0 {
927            // All fixed; just evaluate x = lower
928            let x_fixed = self.lower.clone();
929            let mut x_eval = x_fixed.clone();
930            if let Some(mask) = &self.config.integrality {
931                apply_integrality(&mut x_eval, mask, &self.lower, &self.upper);
932            }
933            let f = (self.func)(&x_eval);
934            return DEReport {
935                x: x_eval,
936                fun: f,
937                success: true,
938                message: "All variables fixed by bounds".into(),
939                nit: 0,
940                nfev: 1,
941                population: Array2::zeros((1, n)),
942                population_energies: Array1::from(vec![f]),
943            };
944        }
945
946        let npop = self.config.popsize * n_free;
947        let _bounds_span = &self.upper - &self.lower;
948
949        if self.config.disp {
950            eprintln!(
951                "DE Init: {} dimensions ({} free), population={}, maxiter={}",
952                n, n_free, npop, self.config.maxiter
953            );
954            eprintln!(
955                "  Strategy: {:?}, Mutation: {:?}, Crossover: CR={:.3}",
956                self.config.strategy, self.config.mutation, self.config.recombination
957            );
958            eprintln!(
959                "  Tolerances: tol={:.2e}, atol={:.2e}",
960                self.config.tol, self.config.atol
961            );
962        }
963
964        // Timing toggle via env var
965        let timing_enabled = std::env::var("AUTOEQ_DE_TIMING")
966            .map(|v| v != "0")
967            .unwrap_or(false);
968
969        // Configure global rayon thread pool once if requested
970        if let Some(n) = self.config.parallel.num_threads {
971            // Ignore error if global pool already set
972            let _ = rayon::ThreadPoolBuilder::new()
973                .num_threads(n)
974                .build_global();
975        }
976
977        // RNG
978        let mut rng: StdRng = match self.config.seed {
979            Some(s) => StdRng::seed_from_u64(s),
980            None => {
981                let mut thread_rng = rand::rng();
982                StdRng::from_rng(&mut thread_rng)
983            }
984        };
985
986        // Initialize population in [lower, upper]
987        let mut pop = match self.config.init {
988            Init::LatinHypercube => {
989                if self.config.disp {
990                    eprintln!("  Using Latin Hypercube initialization");
991                }
992                init_latin_hypercube(n, npop, &self.lower, &self.upper, &is_free, &mut rng)
993            }
994            Init::Random => {
995                if self.config.disp {
996                    eprintln!("  Using Random initialization");
997                }
998                init_random(n, npop, &self.lower, &self.upper, &is_free, &mut rng)
999            }
1000        };
1001
1002        // Evaluate energies (objective + penalties)
1003        let mut nfev: usize = 0;
1004        if self.config.disp {
1005            eprintln!("  Evaluating initial population of {} individuals...", npop);
1006        }
1007
1008        // Prepare population for evaluation (apply integrality constraints)
1009        let mut eval_pop = pop.clone();
1010        let t_integrality0 = Instant::now();
1011        if let Some(mask) = &self.config.integrality {
1012            for i in 0..npop {
1013                let mut row = eval_pop.row_mut(i);
1014                let mut x_eval = row.to_owned();
1015                apply_integrality(&mut x_eval, mask, &self.lower, &self.upper);
1016                row.assign(&x_eval);
1017            }
1018        }
1019        let t_integrality = t_integrality0.elapsed();
1020
1021        // Build thread-safe energy function that includes penalties
1022        let func_ref = self.func;
1023        let penalty_ineq_vec: Vec<PenaltyTuple> = self
1024            .config
1025            .penalty_ineq
1026            .iter()
1027            .map(|(f, w)| (f.clone(), *w))
1028            .collect();
1029        let penalty_eq_vec: Vec<PenaltyTuple> = self
1030            .config
1031            .penalty_eq
1032            .iter()
1033            .map(|(f, w)| (f.clone(), *w))
1034            .collect();
1035        let linear_penalty = self.config.linear_penalty.clone();
1036
1037        let energy_fn = Arc::new(move |x: &Array1<f64>| -> f64 {
1038            let base = (func_ref)(x);
1039            let mut p = 0.0;
1040            for (f, w) in &penalty_ineq_vec {
1041                let v = f(x);
1042                let viol = v.max(0.0);
1043                p += w * viol * viol;
1044            }
1045            for (h, w) in &penalty_eq_vec {
1046                let v = h(x);
1047                p += w * v * v;
1048            }
1049            if let Some(ref lp) = linear_penalty {
1050                let ax = lp.a.dot(&x.view());
1051                for i in 0..ax.len() {
1052                    let v = ax[i];
1053                    let lo = lp.lb[i];
1054                    let hi = lp.ub[i];
1055                    if v < lo {
1056                        let d = lo - v;
1057                        p += lp.weight * d * d;
1058                    }
1059                    if v > hi {
1060                        let d = v - hi;
1061                        p += lp.weight * d * d;
1062                    }
1063                }
1064            }
1065            base + p
1066        });
1067
1068        let t_eval0 = Instant::now();
1069        let mut energies = parallel_eval::evaluate_population_parallel(
1070            &eval_pop,
1071            energy_fn.clone(),
1072            &self.config.parallel,
1073        );
1074        let t_eval_init = t_eval0.elapsed();
1075        nfev += npop;
1076        if timing_enabled {
1077            eprintln!(
1078                "TIMING init: integrality={:.3} ms, eval={:.3} ms",
1079                t_integrality.as_secs_f64() * 1e3,
1080                t_eval_init.as_secs_f64() * 1e3
1081            );
1082        }
1083
1084        // Report initial population statistics
1085        let pop_mean = energies.mean().unwrap_or(0.0);
1086        let pop_std = energies.std(0.0);
1087        if self.config.disp {
1088            eprintln!(
1089                "  Initial population: mean={:.6e}, std={:.6e}",
1090                pop_mean, pop_std
1091            );
1092        }
1093
1094        // If x0 provided, override the best member
1095        if let Some(x0) = &self.config.x0 {
1096            let mut x0c = x0.clone();
1097            // Clip to bounds using ndarray
1098            for i in 0..x0c.len() {
1099                x0c[i] = x0c[i].clamp(self.lower[i], self.upper[i]);
1100            }
1101            if let Some(mask) = &self.config.integrality {
1102                apply_integrality(&mut x0c, mask, &self.lower, &self.upper);
1103            }
1104            let f0 = self.energy(&x0c);
1105            nfev += 1;
1106            // find current best
1107            let (best_idx, _best_f) = argmin(&energies);
1108            pop.row_mut(best_idx).assign(&x0c.view());
1109            energies[best_idx] = f0;
1110        }
1111
1112        let (mut best_idx, mut best_f) = argmin(&energies);
1113        let mut best_x = pop.row(best_idx).to_owned();
1114
1115        if self.config.disp {
1116            eprintln!(
1117                "  Initial best: fitness={:.6e} at index {}",
1118                best_f, best_idx
1119            );
1120            let param_summary: Vec<String> = (0..best_x.len() / 3)
1121                .map(|i| {
1122                    let freq = 10f64.powf(best_x[i * 3]);
1123                    let q = best_x[i * 3 + 1];
1124                    let gain = best_x[i * 3 + 2];
1125                    format!("f{:.0}Hz/Q{:.2}/G{:.2}dB", freq, q, gain)
1126                })
1127                .collect();
1128            eprintln!("  Initial best params: [{}]", param_summary.join(", "));
1129        }
1130
1131        if self.config.disp {
1132            eprintln!("DE iter {:4}  best_f={:.6e}", 0, best_f);
1133        }
1134
1135        // Initialize adaptive state if adaptive strategies are enabled
1136        let mut adaptive_state = if matches!(
1137            self.config.strategy,
1138            Strategy::AdaptiveBin | Strategy::AdaptiveExp
1139        ) || self.config.adaptive.adaptive_mutation
1140        {
1141            Some(AdaptiveState::new(&self.config.adaptive))
1142        } else {
1143            None
1144        };
1145
1146        // Main loop
1147        let mut success = false;
1148        let mut message = String::new();
1149        let mut nit = 0;
1150        let mut accepted_trials;
1151        let mut improvement_count;
1152
1153        let mut t_build_tot = std::time::Duration::ZERO;
1154        let mut t_eval_tot = std::time::Duration::ZERO;
1155        let mut t_select_tot = std::time::Duration::ZERO;
1156        let mut t_iter_tot = std::time::Duration::ZERO;
1157
1158        for iter in 1..=self.config.maxiter {
1159            nit = iter;
1160            accepted_trials = 0;
1161            improvement_count = 0;
1162
1163            let iter_start = Instant::now();
1164
1165            // Pre-sort indices for adaptive strategies to avoid re-sorting in the loop
1166            let sorted_indices = if matches!(
1167                self.config.strategy,
1168                Strategy::AdaptiveBin | Strategy::AdaptiveExp
1169            ) {
1170                let mut indices: Vec<usize> = (0..npop).collect();
1171                indices.sort_by(|&a, &b| {
1172                    energies[a]
1173                        .partial_cmp(&energies[b])
1174                        .unwrap_or(std::cmp::Ordering::Equal)
1175                });
1176                indices
1177            } else {
1178                Vec::new() // Not needed for other strategies
1179            };
1180
1181            // Generate all trials first, then evaluate in parallel
1182            let t_build0 = Instant::now();
1183
1184            // Parallelize trial generation using rayon
1185            use rayon::prelude::*;
1186            let (trials, trial_params): (Vec<_>, Vec<_>) = (0..npop)
1187                .into_par_iter()
1188                .map(|i| {
1189                    // Create thread-local RNG from base seed + iteration + individual index
1190                    let mut local_rng: StdRng = if let Some(base_seed) = self.config.seed {
1191                        StdRng::seed_from_u64(
1192                            base_seed
1193                                .wrapping_add((iter as u64) << 32)
1194                                .wrapping_add(i as u64),
1195                        )
1196                    } else {
1197                        // Use thread_rng for unseeded runs
1198                        let mut thread_rng = rand::rng();
1199                        StdRng::from_rng(&mut thread_rng)
1200                    };
1201
1202                    // Sample mutation factor and crossover rate (adaptive or fixed)
1203                    let (f, cr) = if let Some(ref adaptive) = adaptive_state {
1204                        // Use adaptive parameter sampling
1205                        let adaptive_f = adaptive.sample_f(&mut local_rng);
1206                        let adaptive_cr = adaptive.sample_cr(&mut local_rng);
1207                        (adaptive_f, adaptive_cr)
1208                    } else {
1209                        // Use fixed or dithered parameters
1210                        (
1211                            self.config.mutation.sample(&mut local_rng),
1212                            self.config.recombination,
1213                        )
1214                    };
1215
1216                    // Generate mutant and apply crossover based on strategy
1217                    let (mutant, cross) = match self.config.strategy {
1218                        Strategy::Best1Bin => (
1219                            mutant_best1(i, &pop, best_idx, f, &mut local_rng),
1220                            Crossover::Binomial,
1221                        ),
1222                        Strategy::Best1Exp => (
1223                            mutant_best1(i, &pop, best_idx, f, &mut local_rng),
1224                            Crossover::Exponential,
1225                        ),
1226                        Strategy::Rand1Bin => (
1227                            mutant_rand1(i, &pop, f, &mut local_rng),
1228                            Crossover::Binomial,
1229                        ),
1230                        Strategy::Rand1Exp => (
1231                            mutant_rand1(i, &pop, f, &mut local_rng),
1232                            Crossover::Exponential,
1233                        ),
1234                        Strategy::Rand2Bin => (
1235                            mutant_rand2(i, &pop, f, &mut local_rng),
1236                            Crossover::Binomial,
1237                        ),
1238                        Strategy::Rand2Exp => (
1239                            mutant_rand2(i, &pop, f, &mut local_rng),
1240                            Crossover::Exponential,
1241                        ),
1242                        Strategy::CurrentToBest1Bin => (
1243                            mutant_current_to_best1(i, &pop, best_idx, f, &mut local_rng),
1244                            Crossover::Binomial,
1245                        ),
1246                        Strategy::CurrentToBest1Exp => (
1247                            mutant_current_to_best1(i, &pop, best_idx, f, &mut local_rng),
1248                            Crossover::Exponential,
1249                        ),
1250                        Strategy::Best2Bin => (
1251                            mutant_best2(i, &pop, best_idx, f, &mut local_rng),
1252                            Crossover::Binomial,
1253                        ),
1254                        Strategy::Best2Exp => (
1255                            mutant_best2(i, &pop, best_idx, f, &mut local_rng),
1256                            Crossover::Exponential,
1257                        ),
1258                        Strategy::RandToBest1Bin => (
1259                            mutant_rand_to_best1(i, &pop, best_idx, f, &mut local_rng),
1260                            Crossover::Binomial,
1261                        ),
1262                        Strategy::RandToBest1Exp => (
1263                            mutant_rand_to_best1(i, &pop, best_idx, f, &mut local_rng),
1264                            Crossover::Exponential,
1265                        ),
1266                        Strategy::AdaptiveBin => {
1267                            if let Some(ref adaptive) = adaptive_state {
1268                                (
1269                                    mutant_adaptive(
1270                                        i,
1271                                        &pop,
1272                                        &sorted_indices,
1273                                        adaptive.current_w,
1274                                        f,
1275                                        &mut local_rng,
1276                                    ),
1277                                    Crossover::Binomial,
1278                                )
1279                            } else {
1280                                // Fallback to rand1 if adaptive state not available
1281                                (
1282                                    mutant_rand1(i, &pop, f, &mut local_rng),
1283                                    Crossover::Binomial,
1284                                )
1285                            }
1286                        }
1287                        Strategy::AdaptiveExp => {
1288                            if let Some(ref adaptive) = adaptive_state {
1289                                (
1290                                    mutant_adaptive(
1291                                        i,
1292                                        &pop,
1293                                        &sorted_indices,
1294                                        adaptive.current_w,
1295                                        f,
1296                                        &mut local_rng,
1297                                    ),
1298                                    Crossover::Exponential,
1299                                )
1300                            } else {
1301                                // Fallback to rand1 if adaptive state not available
1302                                (
1303                                    mutant_rand1(i, &pop, f, &mut local_rng),
1304                                    Crossover::Exponential,
1305                                )
1306                            }
1307                        }
1308                    };
1309
1310                    // If strategy didn't dictate crossover, fallback to config
1311                    let crossover = cross;
1312                    let trial = match crossover {
1313                        Crossover::Binomial => {
1314                            binomial_crossover(&pop.row(i).to_owned(), &mutant, cr, &mut local_rng)
1315                        }
1316                        Crossover::Exponential => exponential_crossover(
1317                            &pop.row(i).to_owned(),
1318                            &mutant,
1319                            cr,
1320                            &mut local_rng,
1321                        ),
1322                    };
1323
1324                    // Apply WLS if enabled
1325                    let wls_trial = if self.config.adaptive.wls_enabled
1326                        && local_rng.random::<f64>() < self.config.adaptive.wls_prob
1327                    {
1328                        apply_wls(
1329                            &trial,
1330                            &self.lower,
1331                            &self.upper,
1332                            self.config.adaptive.wls_scale,
1333                            &mut local_rng,
1334                        )
1335                    } else {
1336                        trial.clone()
1337                    };
1338
1339                    // Clip to bounds using ndarray
1340                    let mut trial_clipped = wls_trial;
1341                    for j in 0..trial_clipped.len() {
1342                        trial_clipped[j] = trial_clipped[j].clamp(self.lower[j], self.upper[j]);
1343                    }
1344
1345                    // Apply integrality if provided
1346                    if let Some(mask) = &self.config.integrality {
1347                        apply_integrality(&mut trial_clipped, mask, &self.lower, &self.upper);
1348                    }
1349
1350                    // Return trial and parameters
1351                    (trial_clipped, (f, cr))
1352                })
1353                .unzip();
1354
1355            let t_build = t_build0.elapsed();
1356            let t_eval0 = Instant::now();
1357            let trial_energies =
1358                evaluate_trials_parallel(&trials, energy_fn.clone(), &self.config.parallel);
1359            let t_eval = t_eval0.elapsed();
1360            nfev += npop;
1361
1362            let t_select0 = Instant::now();
1363            // Selection phase: update population based on trial results
1364            for (i, (trial, trial_energy)) in
1365                trials.into_iter().zip(trial_energies.iter()).enumerate()
1366            {
1367                let (f, cr) = trial_params[i];
1368
1369                // Selection: replace if better
1370                if *trial_energy <= energies[i] {
1371                    pop.row_mut(i).assign(&trial.view());
1372                    energies[i] = *trial_energy;
1373                    accepted_trials += 1;
1374
1375                    // Update adaptive parameters if improvement
1376                    if let Some(ref mut adaptive) = adaptive_state {
1377                        adaptive.record_success(f, cr);
1378                    }
1379
1380                    // Track if this is an improvement over the current best
1381                    if *trial_energy < best_f {
1382                        improvement_count += 1;
1383                    }
1384                }
1385            }
1386            let t_select = t_select0.elapsed();
1387
1388            t_build_tot += t_build;
1389            t_eval_tot += t_eval;
1390            t_select_tot += t_select;
1391            let iter_dur = iter_start.elapsed();
1392            t_iter_tot += iter_dur;
1393
1394            if timing_enabled && (iter <= 5 || iter % 10 == 0) {
1395                eprintln!(
1396                    "TIMING iter {:4}: build={:.3} ms, eval={:.3} ms, select={:.3} ms, total={:.3} ms",
1397                    iter,
1398                    t_build.as_secs_f64() * 1e3,
1399                    t_eval.as_secs_f64() * 1e3,
1400                    t_select.as_secs_f64() * 1e3,
1401                    iter_dur.as_secs_f64() * 1e3,
1402                );
1403            }
1404
1405            // Update adaptive parameters after each generation
1406            if let Some(ref mut adaptive) = adaptive_state {
1407                adaptive.update(&self.config.adaptive, iter, self.config.maxiter);
1408            }
1409
1410            // Update best solution after generation
1411            let (new_best_idx, new_best_f) = argmin(&energies);
1412            if new_best_f < best_f {
1413                best_idx = new_best_idx;
1414                best_f = new_best_f;
1415                best_x = pop.row(best_idx).to_owned();
1416            }
1417
1418            // Convergence check
1419            let pop_mean = energies.mean().unwrap_or(0.0);
1420            let pop_std = energies.std(0.0);
1421            let convergence_threshold = self.config.atol + self.config.tol * pop_mean.abs();
1422
1423            if self.config.disp {
1424                eprintln!(
1425                    "DE iter {:4}  best_f={:.6e}  std={:.3e}  accepted={}/{}, improved={}",
1426                    iter, best_f, pop_std, accepted_trials, npop, improvement_count
1427                );
1428            }
1429
1430            // Callback
1431            if let Some(ref mut cb) = self.config.callback {
1432                let intermediate = DEIntermediate {
1433                    x: best_x.clone(),
1434                    fun: best_f,
1435                    convergence: pop_std,
1436                    iter,
1437                };
1438                match cb(&intermediate) {
1439                    CallbackAction::Stop => {
1440                        success = true;
1441                        message = "Optimization stopped by callback".to_string();
1442                        break;
1443                    }
1444                    CallbackAction::Continue => {}
1445                }
1446            }
1447
1448            if pop_std <= convergence_threshold {
1449                success = true;
1450                message = format!(
1451                    "Converged: std(pop_f)={:.3e} <= threshold={:.3e}",
1452                    pop_std, convergence_threshold
1453                );
1454                break;
1455            }
1456        }
1457
1458        if !success {
1459            message = format!("Maximum iterations reached: {}", self.config.maxiter);
1460        }
1461
1462        if self.config.disp {
1463            eprintln!("DE finished: {}", message);
1464        }
1465
1466        // Polish if configured
1467        let (final_x, final_f, polish_nfev) = if let Some(ref polish_cfg) = self.config.polish {
1468            if polish_cfg.enabled {
1469                self.polish(&best_x)
1470            } else {
1471                (best_x.clone(), best_f, 0)
1472            }
1473        } else {
1474            (best_x.clone(), best_f, 0)
1475        };
1476
1477        if timing_enabled {
1478            eprintln!(
1479                "TIMING total: build={:.3} s, eval={:.3} s, select={:.3} s, iter_total={:.3} s",
1480                t_build_tot.as_secs_f64(),
1481                t_eval_tot.as_secs_f64(),
1482                t_select_tot.as_secs_f64(),
1483                t_iter_tot.as_secs_f64()
1484            );
1485        }
1486
1487        self.finish_report(
1488            pop,
1489            energies,
1490            final_x,
1491            final_f,
1492            success,
1493            message,
1494            nit,
1495            nfev + polish_nfev,
1496        )
1497    }
1498}
1499
1500#[cfg(test)]
1501mod strategy_tests {
1502    use super::*;
1503
1504    #[test]
1505    fn test_parse_strategy_variants() {
1506        assert!(matches!(
1507            "best1exp".parse::<Strategy>().unwrap(),
1508            Strategy::Best1Exp
1509        ));
1510        assert!(matches!(
1511            "rand1bin".parse::<Strategy>().unwrap(),
1512            Strategy::Rand1Bin
1513        ));
1514        assert!(matches!(
1515            "randtobest1exp".parse::<Strategy>().unwrap(),
1516            Strategy::RandToBest1Exp
1517        ));
1518    }
1519}