Skip to main content

math_audio_optimisation/
mod.rs

1//! Differential Evolution optimization library.
2//!
3//! This crate provides a Rust implementation of the Differential Evolution (DE)
4//! algorithm, a population-based stochastic optimizer for continuous optimization
5//! problems. The implementation is inspired by SciPy's differential_evolution.
6//!
7//! # Features
8//!
9//! - Multiple mutation strategies (Best1, Rand1, CurrentToBest1, etc.)
10//! - Binomial and exponential crossover
11//! - Adaptive parameter control (SHADE-style)
12//! - **L-SHADE**: Linear population size reduction for faster convergence
13//! - **External Archive**: Maintains diversity by storing discarded solutions
14//! - **Current-to-pbest/1**: SHADE mutation strategy for balanced exploration
15//! - Parallel population evaluation
16//! - Constraint handling via penalty methods
17//! - Latin Hypercube initialization
18//!
19//! # Example
20//!
21//! ```rust
22//! use math_audio_optimisation::{differential_evolution, DEConfigBuilder};
23//!
24//! // Minimize the sphere function: f(x) = sum(x_i^2)
25//! let bounds = vec![(-5.0, 5.0), (-5.0, 5.0)];
26//! let config = DEConfigBuilder::new()
27//!     .maxiter(100)
28//!     .seed(42)
29//!     .build()
30//!     .expect("invalid config");
31//!
32//! let result = differential_evolution(
33//!     &|x| x.iter().map(|&xi| xi * xi).sum(),
34//!     &bounds,
35//!     config,
36//! ).expect("optimization should succeed");
37//!
38//! assert!(result.fun < 1e-6);
39//! ```
40//!
41//! # L-SHADE Example
42//!
43//! ```rust
44//! use math_audio_optimisation::{differential_evolution, DEConfigBuilder, Strategy, LShadeConfig};
45//!
46//! let bounds = vec![(-5.0, 5.0), (-5.0, 5.0)];
47//! let lshade_config = LShadeConfig {
48//!     np_init: 18,
49//!     np_final: 4,
50//!     p: 0.11,
51//!     arc_rate: 2.1,
52//!     ..Default::default()
53//! };
54//!
55//! let config = DEConfigBuilder::new()
56//!     .maxiter(100)
57//!     .strategy(Strategy::LShadeBin)
58//!     .lshade(lshade_config)
59//!     .seed(42)
60//!     .build()
61//!     .expect("invalid config");
62//!
63//! let result = differential_evolution(
64//!     &|x| x.iter().map(|&xi| xi * xi).sum(),
65//!     &bounds,
66//!     config,
67//! ).expect("optimization should succeed");
68//!
69//! assert!(result.fun < 1e-6);
70//! ```
71#![doc = include_str!("../README.md")]
72#![doc = include_str!("../REFERENCES.md")]
73#![warn(missing_docs)]
74
75pub mod error;
76pub mod levenberg_marquardt;
77pub use error::{DEError, Result};
78pub use levenberg_marquardt::{
79    levenberg_marquardt, LMCallbackAction, LMConfig, LMConfigBuilder, LMError, LMIntermediate,
80    LMReport, LMResult,
81};
82
83use std::fmt;
84use std::str::FromStr;
85use std::sync::Arc;
86use std::sync::RwLock;
87
88use ndarray::{Array1, Array2, Zip};
89use rand::rngs::StdRng;
90use rand::{Rng, SeedableRng};
91use std::time::Instant;
92
93/// Linear penalty stacking utilities for combining multiple constraints.
94pub mod stack_linear_penalty;
95
96/// Integer variable handling for mixed-integer optimization.
97pub mod apply_integrality;
98/// Wrapper Local Search (WLS) application for local refinement.
99pub mod apply_wls;
100
101/// Utilities for selecting distinct random indices from a population.
102pub mod distinct_indices;
103/// External archive for L-SHADE algorithm.
104pub mod external_archive;
105/// Latin Hypercube Sampling initialization strategy.
106pub mod init_latin_hypercube;
107/// Random uniform initialization strategy.
108pub mod init_random;
109/// L-SHADE configuration.
110pub mod lshade;
111
112/// Adaptive mutation strategy with dynamic parameter control.
113pub mod mutant_adaptive;
114/// Best/1 mutation strategy: uses best individual plus one difference vector.
115pub mod mutant_best1;
116/// Best/2 mutation strategy: uses best individual plus two difference vectors.
117pub mod mutant_best2;
118/// Current-to-best/1 mutation: blends current with best individual.
119pub mod mutant_current_to_best1;
120/// Current-to-pbest/1 mutation: blends current with p-best individual (SHADE).
121pub mod mutant_current_to_pbest1;
122/// Rand/1 mutation strategy: uses random individual plus one difference vector.
123pub mod mutant_rand1;
124/// Rand/2 mutation strategy: uses random individual plus two difference vectors.
125pub mod mutant_rand2;
126/// Rand-to-best/1 mutation: blends random with best individual.
127pub mod mutant_rand_to_best1;
128
129/// Binomial (uniform) crossover implementation.
130pub mod crossover_binomial;
131/// Exponential crossover implementation.
132pub mod crossover_exponential;
133
134/// Comprehensive tests for DE strategies and features.
135#[cfg(test)]
136mod de_tests;
137/// Main differential evolution algorithm implementation.
138pub mod differential_evolution;
139/// Registry of standard test functions for benchmarking.
140pub mod function_registry;
141/// Internal helper functions for DE implementation.
142pub mod impl_helpers;
143/// Metadata-driven optimization examples and tests.
144pub mod metadata;
145/// Parallel population evaluation support.
146pub mod parallel_eval;
147/// Optimization recording for analysis and debugging.
148pub mod recorder;
149/// Recorded optimization wrapper for testing.
150pub mod run_recorded;
151pub use differential_evolution::differential_evolution;
152pub use external_archive::ExternalArchive;
153pub use lshade::LShadeConfig;
154pub use parallel_eval::ParallelConfig;
155pub use recorder::{OptimizationRecord, OptimizationRecorder};
156pub use run_recorded::run_recorded_differential_evolution;
157
158// Type aliases to reduce complexity
159/// Scalar constraint function type
160pub type ScalarConstraintFn = Arc<dyn Fn(&Array1<f64>) -> f64 + Send + Sync>;
161/// Vector constraint function type
162pub type VectorConstraintFn = Arc<dyn Fn(&Array1<f64>) -> Array1<f64> + Send + Sync>;
163/// Penalty tuple type (function, weight)
164pub type PenaltyTuple = (ScalarConstraintFn, f64);
165/// Callback function type
166pub type CallbackFn = Box<dyn FnMut(&DEIntermediate) -> CallbackAction>;
167
168pub(crate) fn argmin(v: &Array1<f64>) -> (usize, f64) {
169    let mut best_i = 0usize;
170    let mut best_v = v[0];
171    for (i, &val) in v.iter().enumerate() {
172        if val < best_v {
173            best_v = val;
174            best_i = i;
175        }
176    }
177    (best_i, best_v)
178}
179
180/// Differential Evolution mutation/crossover strategy.
181///
182/// The strategy name follows the pattern `{mutation}{n}{crossover}` where:
183/// - `mutation`: Base vector selection (Best, Rand, CurrentToBest, RandToBest, Adaptive)
184/// - `n`: Number of difference vectors (1 or 2)
185/// - `crossover`: Crossover type (Bin = binomial, Exp = exponential)
186#[derive(Debug, Clone, Copy)]
187pub enum Strategy {
188    /// Best/1/Bin: Best individual + 1 difference vector, binomial crossover
189    Best1Bin,
190    /// Best/1/Exp: Best individual + 1 difference vector, exponential crossover
191    Best1Exp,
192    /// Rand/1/Bin: Random individual + 1 difference vector, binomial crossover
193    Rand1Bin,
194    /// Rand/1/Exp: Random individual + 1 difference vector, exponential crossover
195    Rand1Exp,
196    /// Rand/2/Bin: Random individual + 2 difference vectors, binomial crossover
197    Rand2Bin,
198    /// Rand/2/Exp: Random individual + 2 difference vectors, exponential crossover
199    Rand2Exp,
200    /// Current-to-best/1/Bin: Blend of current and best + 1 diff, binomial crossover
201    CurrentToBest1Bin,
202    /// Current-to-best/1/Exp: Blend of current and best + 1 diff, exponential crossover
203    CurrentToBest1Exp,
204    /// Best/2/Bin: Best individual + 2 difference vectors, binomial crossover
205    Best2Bin,
206    /// Best/2/Exp: Best individual + 2 difference vectors, exponential crossover
207    Best2Exp,
208    /// Rand-to-best/1/Bin: Blend of random and best + 1 diff, binomial crossover
209    RandToBest1Bin,
210    /// Rand-to-best/1/Exp: Blend of random and best + 1 diff, exponential crossover
211    RandToBest1Exp,
212    /// Adaptive mutation with binomial crossover (SAM approach)
213    AdaptiveBin,
214    /// Adaptive mutation with exponential crossover
215    AdaptiveExp,
216    /// L-SHADE with binomial crossover: current-to-pbest/1 + linear pop reduction + archive
217    LShadeBin,
218    /// L-SHADE with exponential crossover: current-to-pbest/1 + linear pop reduction + archive
219    LShadeExp,
220}
221
222impl FromStr for Strategy {
223    type Err = String;
224    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
225        let t = s.to_lowercase();
226        match t.as_str() {
227            "best1bin" | "best1" => Ok(Strategy::Best1Bin),
228            "best1exp" => Ok(Strategy::Best1Exp),
229            "rand1bin" | "rand1" => Ok(Strategy::Rand1Bin),
230            "rand1exp" => Ok(Strategy::Rand1Exp),
231            "rand2bin" | "rand2" => Ok(Strategy::Rand2Bin),
232            "rand2exp" => Ok(Strategy::Rand2Exp),
233            "currenttobest1bin" | "current-to-best1bin" | "current_to_best1bin" => {
234                Ok(Strategy::CurrentToBest1Bin)
235            }
236            "currenttobest1exp" | "current-to-best1exp" | "current_to_best1exp" => {
237                Ok(Strategy::CurrentToBest1Exp)
238            }
239            "best2bin" | "best2" => Ok(Strategy::Best2Bin),
240            "best2exp" => Ok(Strategy::Best2Exp),
241            "randtobest1bin" | "rand-to-best1bin" | "rand_to_best1bin" => {
242                Ok(Strategy::RandToBest1Bin)
243            }
244            "randtobest1exp" | "rand-to-best1exp" | "rand_to_best1exp" => {
245                Ok(Strategy::RandToBest1Exp)
246            }
247            "adaptivebin" | "adaptive-bin" | "adaptive_bin" | "adaptive" => {
248                Ok(Strategy::AdaptiveBin)
249            }
250            "adaptiveexp" | "adaptive-exp" | "adaptive_exp" => Ok(Strategy::AdaptiveExp),
251            "lshadebin" | "lshade-bin" | "lshade_bin" | "lshade" => Ok(Strategy::LShadeBin),
252            "lshadeexp" | "lshade-exp" | "lshade_exp" => Ok(Strategy::LShadeExp),
253            _ => Err(format!("unknown strategy: {}", s)),
254        }
255    }
256}
257
258/// Crossover type
259#[derive(Debug, Clone, Copy, Default)]
260pub enum Crossover {
261    /// Binomial (uniform) crossover
262    #[default]
263    Binomial,
264    /// Exponential crossover
265    Exponential,
266}
267
268/// Mutation setting: either a fixed factor, a uniform range (dithering), or adaptive.
269#[derive(Debug, Clone, Copy)]
270pub enum Mutation {
271    /// Fixed mutation factor F in [0, 2).
272    Factor(f64),
273    /// Dithering range [min, max) with 0 <= min < max <= 2.
274    Range {
275        /// Minimum mutation factor.
276        min: f64,
277        /// Maximum mutation factor.
278        max: f64,
279    },
280    /// Adaptive mutation factor using Cauchy distribution.
281    Adaptive {
282        /// Initial mutation factor before adaptation.
283        initial_f: f64,
284    },
285}
286
287impl Default for Mutation {
288    fn default() -> Self {
289        let _ = Mutation::Factor(0.8);
290        Mutation::Range { min: 0.0, max: 2.0 }
291    }
292}
293
294impl Mutation {
295    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
296        match *self {
297            Mutation::Factor(f) => f,
298            Mutation::Range { min, max } => rng.random_range(min..max),
299            Mutation::Adaptive { initial_f } => initial_f, // Will be overridden by adaptive logic
300        }
301    }
302
303    /// Sample from Cauchy distribution for adaptive mutation (F parameter)
304    #[allow(dead_code)]
305    fn sample_cauchy<R: Rng + ?Sized>(&self, f_m: f64, _scale: f64, rng: &mut R) -> f64 {
306        // Simplified version using normal random for now
307        let perturbation = (rng.random::<f64>() - 0.5) * 0.2; // Small perturbation
308        (f_m + perturbation).clamp(0.0, 2.0) // Clamp to valid range
309    }
310}
311
312/// Initialization scheme for the population.
313#[derive(Debug, Clone, Copy, Default)]
314pub enum Init {
315    /// Latin Hypercube Sampling for better space coverage.
316    #[default]
317    LatinHypercube,
318    /// Uniform random initialization.
319    Random,
320}
321
322/// Whether best updates during a generation (we use Deferred only).
323#[derive(Debug, Clone, Copy, Default)]
324pub enum Updating {
325    /// Deferred update: best is updated after all trials are evaluated.
326    #[default]
327    Deferred,
328}
329
330/// Linear penalty specification: lb <= A x <= ub (component-wise).
331#[derive(Debug, Clone)]
332pub struct LinearPenalty {
333    /// Constraint matrix A (m x n).
334    pub a: Array2<f64>,
335    /// Lower bounds vector (m elements).
336    pub lb: Array1<f64>,
337    /// Upper bounds vector (m elements).
338    pub ub: Array1<f64>,
339    /// Penalty weight for constraint violations.
340    pub weight: f64,
341}
342
343/// SciPy-like linear constraint helper: lb <= A x <= ub.
344#[derive(Debug, Clone)]
345pub struct LinearConstraintHelper {
346    /// Constraint matrix A (m x n).
347    pub a: Array2<f64>,
348    /// Lower bounds vector (m elements).
349    pub lb: Array1<f64>,
350    /// Upper bounds vector (m elements).
351    pub ub: Array1<f64>,
352}
353
354impl LinearConstraintHelper {
355    /// Apply helper by merging into DEConfig.linear_penalty (stacking rows if already present)
356    pub fn apply_to(&self, cfg: &mut DEConfig, weight: f64) {
357        use stack_linear_penalty::stack_linear_penalty;
358
359        let new_lp = LinearPenalty {
360            a: self.a.clone(),
361            lb: self.lb.clone(),
362            ub: self.ub.clone(),
363            weight,
364        };
365        match &mut cfg.linear_penalty {
366            Some(existing) => stack_linear_penalty(existing, &new_lp),
367            None => cfg.linear_penalty = Some(new_lp),
368        }
369    }
370}
371
372/// SciPy-like nonlinear constraint helper: vector-valued fun(x) with lb <= fun(x) <= ub.
373#[derive(Clone)]
374pub struct NonlinearConstraintHelper {
375    /// Vector-valued constraint function.
376    pub fun: VectorConstraintFn,
377    /// Lower bounds for each constraint component.
378    pub lb: Array1<f64>,
379    /// Upper bounds for each constraint component.
380    pub ub: Array1<f64>,
381}
382
383impl NonlinearConstraintHelper {
384    /// Apply helper by emitting penalty closures per component.
385    /// lb <= f_i(x) <= ub becomes two inequalities: f_i(x)-ub <= 0 and lb - f_i(x) <= 0.
386    /// If lb==ub, emit an equality penalty for f_i(x)-lb.
387    pub fn apply_to(&self, cfg: &mut DEConfig, weight_ineq: f64, weight_eq: f64) {
388        let f = self.fun.clone();
389        let lb = self.lb.clone();
390        let ub = self.ub.clone();
391        let m = lb.len().min(ub.len());
392        for i in 0..m {
393            let l = lb[i];
394            let u = ub[i];
395            let tol = 1e-12 * (l.abs() + u.abs()).max(1.0);
396            if (u - l).abs() <= tol {
397                let fi = f.clone();
398                cfg.penalty_eq.push((
399                    Arc::new(move |x: &Array1<f64>| {
400                        let y = (fi)(x);
401                        y[i] - l
402                    }),
403                    weight_eq,
404                ));
405            } else {
406                let fi_u = f.clone();
407                cfg.penalty_ineq.push((
408                    Arc::new(move |x: &Array1<f64>| {
409                        let y = (fi_u)(x);
410                        y[i] - u
411                    }),
412                    weight_ineq,
413                ));
414                let fi_l = f.clone();
415                cfg.penalty_ineq.push((
416                    Arc::new(move |x: &Array1<f64>| {
417                        let y = (fi_l)(x);
418                        l - y[i]
419                    }),
420                    weight_ineq,
421                ));
422            }
423        }
424    }
425}
426
427/// Structures for tracking adaptive parameters
428#[derive(Debug, Clone)]
429struct AdaptiveState {
430    /// Current F_m parameter for Cauchy distribution (mutation)
431    f_m: f64,
432    /// Current CR_m parameter for Gaussian distribution (crossover)
433    cr_m: f64,
434    /// Successful F values from this generation
435    successful_f: Vec<f64>,
436    /// Successful CR values from this generation
437    successful_cr: Vec<f64>,
438    /// Current linearly decreasing weight for adaptive mutation
439    current_w: f64,
440}
441
442impl AdaptiveState {
443    fn new(config: &AdaptiveConfig) -> Self {
444        Self {
445            f_m: config.f_m,
446            cr_m: config.cr_m,
447            successful_f: Vec::new(),
448            successful_cr: Vec::new(),
449            current_w: config.w_max, // Start with maximum weight
450        }
451    }
452
453    /// Update adaptive parameters based on successful trials
454    fn update(&mut self, config: &AdaptiveConfig, iter: usize, max_iter: usize) {
455        // Update linearly decreasing weight (Equation 19 from the paper)
456        let iter_ratio = iter as f64 / max_iter as f64;
457        self.current_w = config.w_max - (config.w_max - config.w_min) * iter_ratio;
458
459        // Update F_m using Lehmer mean of successful F values
460        if !self.successful_f.is_empty() {
461            let mean_f = self.compute_lehmer_mean(&self.successful_f);
462            self.f_m = (1.0 - config.w_f) * self.f_m + config.w_f * mean_f;
463            self.f_m = self.f_m.clamp(0.2, 1.2);
464        }
465
466        // Update CR_m using arithmetic mean of successful CR values
467        if !self.successful_cr.is_empty() {
468            let mean_cr = self.compute_arithmetic_mean(&self.successful_cr);
469            self.cr_m = (1.0 - config.w_cr) * self.cr_m + config.w_cr * mean_cr;
470            self.cr_m = self.cr_m.clamp(0.1, 0.9);
471        }
472
473        // Clear successful values for next generation
474        self.successful_f.clear();
475        self.successful_cr.clear();
476    }
477
478    /// Compute Lehmer mean for successful F values (p=2)
479    fn compute_lehmer_mean(&self, values: &[f64]) -> f64 {
480        if values.is_empty() {
481            return 0.5; // Default fallback
482        }
483
484        let sum_sq: f64 = values.iter().map(|&x| x * x).sum();
485        let sum: f64 = values.iter().sum();
486
487        if sum > 0.0 {
488            sum_sq / sum
489        } else {
490            0.5 // Fallback if sum is zero
491        }
492    }
493
494    /// Compute arithmetic mean for successful CR values
495    fn compute_arithmetic_mean(&self, values: &[f64]) -> f64 {
496        if values.is_empty() {
497            return 0.5; // Default fallback
498        }
499        values.iter().sum::<f64>() / values.len() as f64
500    }
501
502    /// Record successful parameter values
503    fn record_success(&mut self, f_val: f64, cr_val: f64) {
504        self.successful_f.push(f_val);
505        self.successful_cr.push(cr_val);
506    }
507
508    /// Sample adaptive F parameter using conservative normal distribution
509    fn sample_f<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
510        let u1: f64 = rng.random::<f64>().max(1e-15);
511        let u2: f64 = rng.random::<f64>();
512
513        let normal = (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos();
514        let sample = self.f_m + 0.05 * normal;
515
516        sample.clamp(0.3, 1.0)
517    }
518
519    /// Sample adaptive CR parameter using conservative Gaussian distribution
520    fn sample_cr<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
521        let u1: f64 = rng.random::<f64>().max(1e-15);
522        let u2: f64 = rng.random::<f64>();
523
524        let normal = (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos();
525        let sample = self.cr_m + 0.05 * normal;
526
527        sample.clamp(0.1, 0.9)
528    }
529}
530
531/// Adaptive differential evolution configuration
532#[derive(Debug, Clone)]
533pub struct AdaptiveConfig {
534    /// Enable adaptive mutation strategy
535    pub adaptive_mutation: bool,
536    /// Enable Wrapper Local Search (WLS)
537    pub wls_enabled: bool,
538    /// Maximum weight for adaptive mutation (w_max)
539    pub w_max: f64,
540    /// Minimum weight for adaptive mutation (w_min)
541    pub w_min: f64,
542    /// Weight factor for F parameter adaptation (between 0.8 and 1.0)
543    pub w_f: f64,
544    /// Weight factor for CR parameter adaptation (between 0.9 and 1.0)
545    pub w_cr: f64,
546    /// Initial location parameter for Cauchy distribution (F_m)
547    pub f_m: f64,
548    /// Initial location parameter for Gaussian distribution (CR_m)
549    pub cr_m: f64,
550    /// WLS probability (what fraction of population to apply WLS to)
551    pub wls_prob: f64,
552    /// WLS Cauchy scale parameter
553    pub wls_scale: f64,
554}
555
556impl Default for AdaptiveConfig {
557    fn default() -> Self {
558        Self {
559            adaptive_mutation: false,
560            wls_enabled: false,
561            w_max: 0.9,
562            w_min: 0.1,
563            w_f: 0.9,
564            w_cr: 0.9,
565            f_m: 0.5,
566            cr_m: 0.6,
567            wls_prob: 0.1,
568            wls_scale: 0.1,
569        }
570    }
571}
572
573/// Polishing configuration using NLopt local optimizer within bounds.
574#[derive(Debug, Clone)]
575pub struct PolishConfig {
576    /// Whether polishing is enabled.
577    pub enabled: bool,
578    /// Local optimizer algorithm name (e.g., "neldermead", "sbplx", "cobyla").
579    pub algo: String,
580    /// Maximum function evaluations for polishing (e.g., 200*n).
581    pub maxeval: usize,
582}
583
584/// Configuration for the Differential Evolution optimizer.
585///
586/// This struct holds all parameters controlling the DE algorithm behavior,
587/// including population size, mutation/crossover settings, constraints, and
588/// convergence criteria.
589pub struct DEConfig {
590    /// Maximum number of generations (iterations).
591    pub maxiter: usize,
592    /// Population size multiplier (total NP = popsize * n_params_free).
593    pub popsize: usize,
594    /// Relative tolerance for convergence (population energy std dev).
595    pub tol: f64,
596    /// Absolute tolerance for convergence on best fitness.
597    pub atol: f64,
598    /// Mutation factor setting.
599    pub mutation: Mutation,
600    /// Crossover probability CR in [0, 1].
601    pub recombination: f64,
602    /// Mutation/crossover strategy.
603    pub strategy: Strategy,
604    /// Crossover type (binomial or exponential).
605    pub crossover: Crossover,
606    /// Population initialization scheme.
607    pub init: Init,
608    /// Update timing (deferred).
609    pub updating: Updating,
610    /// Optional random seed for reproducibility.
611    pub seed: Option<u64>,
612    /// Optional integrality mask; true => variable is integer-constrained.
613    pub integrality: Option<Vec<bool>>,
614    /// Optional initial guess used to replace the best member after init.
615    pub x0: Option<Array1<f64>>,
616    /// Print objective best at each iteration.
617    pub disp: bool,
618    /// Optional per-iteration callback (may stop early).
619    pub callback: Option<CallbackFn>,
620    /// Penalty-based inequality constraints: fc(x) <= 0.
621    pub penalty_ineq: Vec<PenaltyTuple>,
622    /// Penalty-based equality constraints: h(x) = 0.
623    pub penalty_eq: Vec<PenaltyTuple>,
624    /// Optional linear constraints treated by penalty: lb <= A x <= ub.
625    pub linear_penalty: Option<LinearPenalty>,
626    /// Polishing configuration (optional).
627    pub polish: Option<PolishConfig>,
628    /// Adaptive differential evolution configuration.
629    pub adaptive: AdaptiveConfig,
630    /// Parallel evaluation configuration.
631    pub parallel: parallel_eval::ParallelConfig,
632    /// L-SHADE configuration for population reduction and pbest selection.
633    pub lshade: lshade::LShadeConfig,
634}
635
636impl Default for DEConfig {
637    fn default() -> Self {
638        Self {
639            maxiter: 1000,
640            popsize: 15,
641            tol: 1e-2,
642            atol: 0.0,
643            mutation: Mutation::default(),
644            recombination: 0.7,
645            strategy: Strategy::Best1Bin,
646            crossover: Crossover::default(),
647            init: Init::default(),
648            updating: Updating::default(),
649            seed: None,
650            integrality: None,
651            x0: None,
652            disp: false,
653            callback: None,
654            penalty_ineq: Vec::new(),
655            penalty_eq: Vec::new(),
656            linear_penalty: None,
657            polish: None,
658            adaptive: AdaptiveConfig::default(),
659            parallel: parallel_eval::ParallelConfig::default(),
660            lshade: lshade::LShadeConfig::default(),
661        }
662    }
663}
664
665/// Fluent builder for `DEConfig` for ergonomic configuration.
666///
667/// # Example
668///
669/// ```rust
670/// use math_audio_optimisation::{DEConfigBuilder, Strategy, Mutation};
671///
672/// let config = DEConfigBuilder::new()
673///     .maxiter(500)
674///     .popsize(20)
675///     .strategy(Strategy::Best1Bin)
676///     .mutation(Mutation::Factor(0.8))
677///     .recombination(0.9)
678///     .seed(42)
679///     .build();
680/// ```
681pub struct DEConfigBuilder {
682    cfg: DEConfig,
683}
684impl Default for DEConfigBuilder {
685    fn default() -> Self {
686        Self::new()
687    }
688}
689
690impl DEConfigBuilder {
691    /// Creates a new builder with default configuration.
692    pub fn new() -> Self {
693        Self {
694            cfg: DEConfig::default(),
695        }
696    }
697    /// Sets the maximum number of iterations.
698    pub fn maxiter(mut self, v: usize) -> Self {
699        self.cfg.maxiter = v;
700        self
701    }
702    /// Sets the population size multiplier.
703    ///
704    /// # Panics
705    ///
706    /// Panics if `v < 4` since DE requires at least 4 individuals for
707    /// rand/1 and rand/2 mutation strategies.
708    pub fn popsize(mut self, v: usize) -> Self {
709        self.cfg.popsize = v;
710        self
711    }
712    /// Sets the relative convergence tolerance.
713    pub fn tol(mut self, v: f64) -> Self {
714        self.cfg.tol = v;
715        self
716    }
717    /// Sets the absolute convergence tolerance.
718    pub fn atol(mut self, v: f64) -> Self {
719        self.cfg.atol = v;
720        self
721    }
722    /// Sets the mutation factor configuration.
723    pub fn mutation(mut self, v: Mutation) -> Self {
724        self.cfg.mutation = v;
725        self
726    }
727    /// Sets the crossover probability (CR).
728    pub fn recombination(mut self, v: f64) -> Self {
729        self.cfg.recombination = v;
730        self
731    }
732    /// Sets the mutation/crossover strategy.
733    pub fn strategy(mut self, v: Strategy) -> Self {
734        self.cfg.strategy = v;
735        self
736    }
737    /// Sets the crossover type.
738    pub fn crossover(mut self, v: Crossover) -> Self {
739        self.cfg.crossover = v;
740        self
741    }
742    /// Sets the population initialization scheme.
743    pub fn init(mut self, v: Init) -> Self {
744        self.cfg.init = v;
745        self
746    }
747    /// Sets the random seed for reproducibility.
748    pub fn seed(mut self, v: u64) -> Self {
749        self.cfg.seed = Some(v);
750        self
751    }
752    /// Sets the integrality mask for mixed-integer optimization.
753    pub fn integrality(mut self, v: Vec<bool>) -> Self {
754        self.cfg.integrality = Some(v);
755        self
756    }
757    /// Sets an initial guess to seed the population.
758    pub fn x0(mut self, v: Array1<f64>) -> Self {
759        self.cfg.x0 = Some(v);
760        self
761    }
762    /// Enables/disables progress display.
763    pub fn disp(mut self, v: bool) -> Self {
764        self.cfg.disp = v;
765        self
766    }
767    /// Sets a per-iteration callback function.
768    pub fn callback(mut self, cb: Box<dyn FnMut(&DEIntermediate) -> CallbackAction>) -> Self {
769        self.cfg.callback = Some(cb);
770        self
771    }
772    /// Adds an inequality constraint penalty function.
773    pub fn add_penalty_ineq<FN>(mut self, f: FN, w: f64) -> Self
774    where
775        FN: Fn(&Array1<f64>) -> f64 + Send + Sync + 'static,
776    {
777        self.cfg.penalty_ineq.push((Arc::new(f), w));
778        self
779    }
780    /// Adds an equality constraint penalty function.
781    pub fn add_penalty_eq<FN>(mut self, f: FN, w: f64) -> Self
782    where
783        FN: Fn(&Array1<f64>) -> f64 + Send + Sync + 'static,
784    {
785        self.cfg.penalty_eq.push((Arc::new(f), w));
786        self
787    }
788    /// Sets a linear constraint penalty.
789    pub fn linear_penalty(mut self, lp: LinearPenalty) -> Self {
790        self.cfg.linear_penalty = Some(lp);
791        self
792    }
793    /// Sets the polishing configuration.
794    pub fn polish(mut self, pol: PolishConfig) -> Self {
795        self.cfg.polish = Some(pol);
796        self
797    }
798    /// Sets the adaptive DE configuration.
799    pub fn adaptive(mut self, adaptive: AdaptiveConfig) -> Self {
800        self.cfg.adaptive = adaptive;
801        self
802    }
803    /// Enables/disables adaptive mutation.
804    pub fn enable_adaptive_mutation(mut self, enable: bool) -> Self {
805        self.cfg.adaptive.adaptive_mutation = enable;
806        self
807    }
808    /// Enables/disables Wrapper Local Search.
809    pub fn enable_wls(mut self, enable: bool) -> Self {
810        self.cfg.adaptive.wls_enabled = enable;
811        self
812    }
813    /// Sets the adaptive weight bounds.
814    pub fn adaptive_weights(mut self, w_max: f64, w_min: f64) -> Self {
815        self.cfg.adaptive.w_max = w_max;
816        self.cfg.adaptive.w_min = w_min;
817        self
818    }
819    /// Sets the parallel evaluation configuration.
820    pub fn parallel(mut self, parallel: parallel_eval::ParallelConfig) -> Self {
821        self.cfg.parallel = parallel;
822        self
823    }
824    /// Enables/disables parallel evaluation.
825    pub fn enable_parallel(mut self, enable: bool) -> Self {
826        self.cfg.parallel.enabled = enable;
827        self
828    }
829    /// Sets the number of parallel threads.
830    pub fn parallel_threads(mut self, num_threads: usize) -> Self {
831        self.cfg.parallel.num_threads = Some(num_threads);
832        self
833    }
834    /// Sets the L-SHADE configuration.
835    pub fn lshade(mut self, lshade: lshade::LShadeConfig) -> Self {
836        self.cfg.lshade = lshade;
837        self
838    }
839    /// Builds and returns the configuration.
840    ///
841    /// # Errors
842    ///
843    /// Returns `DEError::PopulationTooSmall` if `popsize < 4`.
844    pub fn build(self) -> error::Result<DEConfig> {
845        if self.cfg.popsize < 4 {
846            return Err(DEError::PopulationTooSmall {
847                pop_size: self.cfg.popsize,
848            });
849        }
850        Ok(self.cfg)
851    }
852}
853
854/// Result/report of a DE optimization run.
855///
856/// Contains the optimal solution, convergence status, and statistics.
857#[derive(Clone)]
858pub struct DEReport {
859    /// The optimal solution vector.
860    pub x: Array1<f64>,
861    /// The objective function value at the optimal solution.
862    pub fun: f64,
863    /// Whether the optimization converged successfully.
864    pub success: bool,
865    /// Human-readable status message.
866    pub message: String,
867    /// Number of iterations (generations) performed.
868    pub nit: usize,
869    /// Number of function evaluations performed.
870    pub nfev: usize,
871    /// Final population matrix (NP x n).
872    pub population: Array2<f64>,
873    /// Fitness values for each population member.
874    pub population_energies: Array1<f64>,
875}
876
877impl fmt::Debug for DEReport {
878    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
879        f.debug_struct("DEReport")
880            .field("x", &format!("len={}", self.x.len()))
881            .field("fun", &self.fun)
882            .field("success", &self.success)
883            .field("message", &self.message)
884            .field("nit", &self.nit)
885            .field("nfev", &self.nfev)
886            .field(
887                "population",
888                &format!("{}x{}", self.population.nrows(), self.population.ncols()),
889            )
890            .field(
891                "population_energies",
892                &format!("len={}", self.population_energies.len()),
893            )
894            .finish()
895    }
896}
897
898/// Information passed to callback after each generation.
899pub struct DEIntermediate {
900    /// Current best solution vector.
901    pub x: Array1<f64>,
902    /// Current best objective value.
903    pub fun: f64,
904    /// Convergence measure (population fitness std dev).
905    pub convergence: f64,
906    /// Current iteration number.
907    pub iter: usize,
908}
909
910/// Action returned by callback to control optimization flow.
911pub enum CallbackAction {
912    /// Continue optimization.
913    Continue,
914    /// Stop optimization early.
915    Stop,
916}
917
918/// Differential Evolution optimizer.
919///
920/// A population-based stochastic optimizer for continuous functions.
921/// Use [`DifferentialEvolution::new`] to create an instance, configure
922/// with [`config_mut`](Self::config_mut), then call [`solve`](Self::solve).
923pub struct DifferentialEvolution<'a, F>
924where
925    F: Fn(&Array1<f64>) -> f64 + Sync,
926{
927    func: &'a F,
928    lower: Array1<f64>,
929    upper: Array1<f64>,
930    config: DEConfig,
931}
932
933impl<'a, F> DifferentialEvolution<'a, F>
934where
935    F: Fn(&Array1<f64>) -> f64 + Sync,
936{
937    /// Creates a new DE optimizer with objective `func` and bounds [lower, upper].
938    ///
939    /// # Errors
940    ///
941    /// Returns `DEError::BoundsMismatch` if `lower` and `upper` have different lengths.
942    /// Returns `DEError::InvalidBounds` if any lower bound exceeds its corresponding upper bound.
943    pub fn new(func: &'a F, lower: Array1<f64>, upper: Array1<f64>) -> Result<Self> {
944        if lower.len() != upper.len() {
945            return Err(DEError::BoundsMismatch {
946                lower_len: lower.len(),
947                upper_len: upper.len(),
948            });
949        }
950
951        // Validate that lower <= upper for all dimensions
952        for i in 0..lower.len() {
953            if lower[i] > upper[i] {
954                return Err(DEError::InvalidBounds {
955                    index: i,
956                    lower: lower[i],
957                    upper: upper[i],
958                });
959            }
960        }
961
962        Ok(Self {
963            func,
964            lower,
965            upper,
966            config: DEConfig::default(),
967        })
968    }
969
970    /// Mutable access to configuration
971    pub fn config_mut(&mut self) -> &mut DEConfig {
972        &mut self.config
973    }
974
975    /// Run the optimization and return a report
976    pub fn solve(&mut self) -> DEReport {
977        use apply_integrality::apply_integrality;
978        use apply_wls::apply_wls;
979        use crossover_binomial::binomial_crossover;
980        use crossover_exponential::exponential_crossover;
981        use init_latin_hypercube::init_latin_hypercube;
982        use init_random::init_random;
983        use mutant_adaptive::mutant_adaptive;
984        use mutant_best1::mutant_best1;
985        use mutant_best2::mutant_best2;
986        use mutant_current_to_best1::mutant_current_to_best1;
987        use mutant_current_to_pbest1;
988        use mutant_rand_to_best1::mutant_rand_to_best1;
989        use mutant_rand1::mutant_rand1;
990        use mutant_rand2::mutant_rand2;
991        use parallel_eval::evaluate_trials_parallel;
992        use std::sync::Arc;
993
994        let n = self.lower.len();
995
996        // Identify fixed (equal-bounds) and free variables
997        let mut is_free: Vec<bool> = Vec::with_capacity(n);
998        for i in 0..n {
999            is_free.push((self.upper[i] - self.lower[i]).abs() > 0.0);
1000        }
1001        let n_free = is_free.iter().filter(|&&b| b).count();
1002        let _n_equal = n - n_free;
1003        if n_free == 0 {
1004            // All fixed; just evaluate x = lower
1005            let x_fixed = self.lower.clone();
1006            let mut x_eval = x_fixed.clone();
1007            if let Some(mask) = &self.config.integrality {
1008                apply_integrality(&mut x_eval, mask, &self.lower, &self.upper);
1009            }
1010            let f = (self.func)(&x_eval);
1011            return DEReport {
1012                x: x_eval,
1013                fun: f,
1014                success: true,
1015                message: "All variables fixed by bounds".into(),
1016                nit: 0,
1017                nfev: 1,
1018                population: Array2::zeros((1, n)),
1019                population_energies: Array1::from(vec![f]),
1020            };
1021        }
1022
1023        let npop = self.config.popsize * n_free;
1024        let _bounds_span = &self.upper - &self.lower;
1025
1026        if self.config.disp {
1027            eprintln!(
1028                "DE Init: {} dimensions ({} free), population={}, maxiter={}",
1029                n, n_free, npop, self.config.maxiter
1030            );
1031            eprintln!(
1032                "  Strategy: {:?}, Mutation: {:?}, Crossover: CR={:.3}",
1033                self.config.strategy, self.config.mutation, self.config.recombination
1034            );
1035            eprintln!(
1036                "  Tolerances: tol={:.2e}, atol={:.2e}",
1037                self.config.tol, self.config.atol
1038            );
1039        }
1040
1041        // Timing toggle via env var
1042        let timing_enabled = std::env::var("AUTOEQ_DE_TIMING")
1043            .map(|v| v != "0")
1044            .unwrap_or(false);
1045
1046        // Configure global rayon thread pool once if requested
1047        if let Some(n) = self.config.parallel.num_threads {
1048            // Ignore error if global pool already set
1049            let _ = rayon::ThreadPoolBuilder::new()
1050                .num_threads(n)
1051                .build_global();
1052        }
1053
1054        // RNG
1055        let mut rng: StdRng = match self.config.seed {
1056            Some(s) => StdRng::seed_from_u64(s),
1057            None => {
1058                let mut thread_rng = rand::rng();
1059                StdRng::from_rng(&mut thread_rng)
1060            }
1061        };
1062
1063        // Initialize population in [lower, upper]
1064        let mut pop = match self.config.init {
1065            Init::LatinHypercube => {
1066                if self.config.disp {
1067                    eprintln!("  Using Latin Hypercube initialization");
1068                }
1069                init_latin_hypercube(n, npop, &self.lower, &self.upper, &is_free, &mut rng)
1070            }
1071            Init::Random => {
1072                if self.config.disp {
1073                    eprintln!("  Using Random initialization");
1074                }
1075                init_random(n, npop, &self.lower, &self.upper, &is_free, &mut rng)
1076            }
1077        };
1078
1079        // Evaluate energies (objective + penalties)
1080        let mut nfev: usize = 0;
1081        if self.config.disp {
1082            eprintln!("  Evaluating initial population of {} individuals...", npop);
1083        }
1084
1085        // Prepare population for evaluation (apply integrality constraints)
1086        let mut eval_pop = pop.clone();
1087        let t_integrality0 = Instant::now();
1088        if let Some(mask) = &self.config.integrality {
1089            for i in 0..npop {
1090                let mut row = eval_pop.row_mut(i);
1091                let mut x_eval = row.to_owned();
1092                apply_integrality(&mut x_eval, mask, &self.lower, &self.upper);
1093                row.assign(&x_eval);
1094            }
1095        }
1096        let t_integrality = t_integrality0.elapsed();
1097
1098        // Build thread-safe energy function that includes penalties
1099        let func_ref = self.func;
1100        let penalty_ineq_vec: Vec<PenaltyTuple> = self
1101            .config
1102            .penalty_ineq
1103            .iter()
1104            .map(|(f, w)| (f.clone(), *w))
1105            .collect();
1106        let penalty_eq_vec: Vec<PenaltyTuple> = self
1107            .config
1108            .penalty_eq
1109            .iter()
1110            .map(|(f, w)| (f.clone(), *w))
1111            .collect();
1112        let linear_penalty = self.config.linear_penalty.clone();
1113
1114        let energy_fn = Arc::new(move |x: &Array1<f64>| -> f64 {
1115            let base = (func_ref)(x);
1116            let mut p = 0.0;
1117            for (f, w) in &penalty_ineq_vec {
1118                let v = f(x);
1119                let viol = v.max(0.0);
1120                p += w * viol * viol;
1121            }
1122            for (h, w) in &penalty_eq_vec {
1123                let v = h(x);
1124                p += w * v * v;
1125            }
1126            if let Some(ref lp) = linear_penalty {
1127                let ax = lp.a.dot(&x.view());
1128                Zip::from(&ax)
1129                    .and(&lp.lb)
1130                    .and(&lp.ub)
1131                    .for_each(|&v, &lo, &hi| {
1132                        if v < lo {
1133                            let d = lo - v;
1134                            p += lp.weight * d * d;
1135                        } else if v > hi {
1136                            let d = v - hi;
1137                            p += lp.weight * d * d;
1138                        }
1139                    });
1140            }
1141            base + p
1142        });
1143
1144        let t_eval0 = Instant::now();
1145        let mut energies = parallel_eval::evaluate_population_parallel(
1146            &eval_pop,
1147            energy_fn.clone(),
1148            &self.config.parallel,
1149        );
1150        let t_eval_init = t_eval0.elapsed();
1151        nfev += npop;
1152        if timing_enabled {
1153            eprintln!(
1154                "TIMING init: integrality={:.3} ms, eval={:.3} ms",
1155                t_integrality.as_secs_f64() * 1e3,
1156                t_eval_init.as_secs_f64() * 1e3
1157            );
1158        }
1159
1160        // Report initial population statistics
1161        let pop_mean = energies.mean().unwrap_or(0.0);
1162        let pop_std = energies.std(0.0);
1163        if self.config.disp {
1164            eprintln!(
1165                "  Initial population: mean={:.6e}, std={:.6e}",
1166                pop_mean, pop_std
1167            );
1168        }
1169
1170        // If x0 provided, override the best member
1171        if let Some(x0) = &self.config.x0 {
1172            let mut x0c = x0.clone();
1173            // Clip to bounds using ndarray
1174            for i in 0..x0c.len() {
1175                x0c[i] = x0c[i].clamp(self.lower[i], self.upper[i]);
1176            }
1177            if let Some(mask) = &self.config.integrality {
1178                apply_integrality(&mut x0c, mask, &self.lower, &self.upper);
1179            }
1180            let f0 = self.energy(&x0c);
1181            nfev += 1;
1182            // find current best
1183            let (best_idx, _best_f) = argmin(&energies);
1184            pop.row_mut(best_idx).assign(&x0c.view());
1185            energies[best_idx] = f0;
1186        }
1187
1188        let (mut best_idx, mut best_f) = argmin(&energies);
1189        let mut best_x = pop.row(best_idx).to_owned();
1190
1191        if self.config.disp {
1192            eprintln!(
1193                "  Initial best: fitness={:.6e} at index {}",
1194                best_f, best_idx
1195            );
1196            let param_summary: Vec<String> = (0..best_x.len() / 3)
1197                .map(|i| {
1198                    let freq = 10f64.powf(best_x[i * 3]);
1199                    let q = best_x[i * 3 + 1];
1200                    let gain = best_x[i * 3 + 2];
1201                    format!("f{:.0}Hz/Q{:.2}/G{:.2}dB", freq, q, gain)
1202                })
1203                .collect();
1204            eprintln!("  Initial best params: [{}]", param_summary.join(", "));
1205        }
1206
1207        if self.config.disp {
1208            eprintln!("DE iter {:4}  best_f={:.6e}", 0, best_f);
1209        }
1210
1211        // Initialize adaptive state if adaptive strategies are enabled
1212        let mut adaptive_state = if matches!(
1213            self.config.strategy,
1214            Strategy::AdaptiveBin | Strategy::AdaptiveExp
1215        ) || self.config.adaptive.adaptive_mutation
1216        {
1217            Some(AdaptiveState::new(&self.config.adaptive))
1218        } else {
1219            None
1220        };
1221
1222        // Initialize external archive for L-SHADE strategies
1223        let external_archive: Option<Arc<RwLock<external_archive::ExternalArchive>>> = if matches!(
1224            self.config.strategy,
1225            Strategy::LShadeBin | Strategy::LShadeExp
1226        ) {
1227            Some(Arc::new(RwLock::new(
1228                external_archive::ExternalArchive::with_population_size(
1229                    npop,
1230                    self.config.lshade.arc_rate,
1231                ),
1232            )))
1233        } else {
1234            None
1235        };
1236
1237        // Main loop
1238        let mut success = false;
1239        let mut message = String::new();
1240        let mut nit = 0;
1241        let mut accepted_trials;
1242        let mut improvement_count;
1243
1244        let mut t_build_tot = std::time::Duration::ZERO;
1245        let mut t_eval_tot = std::time::Duration::ZERO;
1246        let mut t_select_tot = std::time::Duration::ZERO;
1247        let mut t_iter_tot = std::time::Duration::ZERO;
1248
1249        for iter in 1..=self.config.maxiter {
1250            nit = iter;
1251            accepted_trials = 0;
1252            improvement_count = 0;
1253
1254            let iter_start = Instant::now();
1255
1256            // Pre-sort indices for adaptive/L-SHADE strategies to avoid re-sorting in the loop
1257            let sorted_indices = if matches!(
1258                self.config.strategy,
1259                Strategy::AdaptiveBin
1260                    | Strategy::AdaptiveExp
1261                    | Strategy::LShadeBin
1262                    | Strategy::LShadeExp
1263            ) {
1264                let mut indices: Vec<usize> = (0..npop).collect();
1265                indices.sort_by(|&a, &b| {
1266                    energies[a]
1267                        .partial_cmp(&energies[b])
1268                        .unwrap_or(std::cmp::Ordering::Equal)
1269                });
1270                indices
1271            } else {
1272                Vec::new() // Not needed for other strategies
1273            };
1274
1275            // Generate all trials first, then evaluate in parallel
1276            let t_build0 = Instant::now();
1277
1278            // Parallelize trial generation using rayon
1279            use rayon::prelude::*;
1280            let (trials, trial_params): (Vec<_>, Vec<_>) = (0..npop)
1281                .into_par_iter()
1282                .map(|i| {
1283                    // Create thread-local RNG from base seed + iteration + individual index
1284                    let mut local_rng: StdRng = if let Some(base_seed) = self.config.seed {
1285                        StdRng::seed_from_u64(
1286                            base_seed
1287                                .wrapping_add((iter as u64) << 32)
1288                                .wrapping_add(i as u64),
1289                        )
1290                    } else {
1291                        // Use thread_rng for unseeded runs
1292                        let mut thread_rng = rand::rng();
1293                        StdRng::from_rng(&mut thread_rng)
1294                    };
1295
1296                    // Sample mutation factor and crossover rate (adaptive or fixed)
1297                    let (f, cr) = if let Some(ref adaptive) = adaptive_state {
1298                        // Use adaptive parameter sampling
1299                        let adaptive_f = adaptive.sample_f(&mut local_rng);
1300                        let adaptive_cr = adaptive.sample_cr(&mut local_rng);
1301                        (adaptive_f, adaptive_cr)
1302                    } else {
1303                        // Use fixed or dithered parameters
1304                        (
1305                            self.config.mutation.sample(&mut local_rng),
1306                            self.config.recombination,
1307                        )
1308                    };
1309
1310                    // Generate mutant and apply crossover based on strategy
1311                    let (mutant, cross) = match self.config.strategy {
1312                        Strategy::Best1Bin => (
1313                            mutant_best1(i, &pop, best_idx, f, &mut local_rng),
1314                            Crossover::Binomial,
1315                        ),
1316                        Strategy::Best1Exp => (
1317                            mutant_best1(i, &pop, best_idx, f, &mut local_rng),
1318                            Crossover::Exponential,
1319                        ),
1320                        Strategy::Rand1Bin => (
1321                            mutant_rand1(i, &pop, f, &mut local_rng),
1322                            Crossover::Binomial,
1323                        ),
1324                        Strategy::Rand1Exp => (
1325                            mutant_rand1(i, &pop, f, &mut local_rng),
1326                            Crossover::Exponential,
1327                        ),
1328                        Strategy::Rand2Bin => (
1329                            mutant_rand2(i, &pop, f, &mut local_rng),
1330                            Crossover::Binomial,
1331                        ),
1332                        Strategy::Rand2Exp => (
1333                            mutant_rand2(i, &pop, f, &mut local_rng),
1334                            Crossover::Exponential,
1335                        ),
1336                        Strategy::CurrentToBest1Bin => (
1337                            mutant_current_to_best1(i, &pop, best_idx, f, &mut local_rng),
1338                            Crossover::Binomial,
1339                        ),
1340                        Strategy::CurrentToBest1Exp => (
1341                            mutant_current_to_best1(i, &pop, best_idx, f, &mut local_rng),
1342                            Crossover::Exponential,
1343                        ),
1344                        Strategy::Best2Bin => (
1345                            mutant_best2(i, &pop, best_idx, f, &mut local_rng),
1346                            Crossover::Binomial,
1347                        ),
1348                        Strategy::Best2Exp => (
1349                            mutant_best2(i, &pop, best_idx, f, &mut local_rng),
1350                            Crossover::Exponential,
1351                        ),
1352                        Strategy::RandToBest1Bin => (
1353                            mutant_rand_to_best1(i, &pop, best_idx, f, &mut local_rng),
1354                            Crossover::Binomial,
1355                        ),
1356                        Strategy::RandToBest1Exp => (
1357                            mutant_rand_to_best1(i, &pop, best_idx, f, &mut local_rng),
1358                            Crossover::Exponential,
1359                        ),
1360                        Strategy::AdaptiveBin => {
1361                            if let Some(ref adaptive) = adaptive_state {
1362                                (
1363                                    mutant_adaptive(
1364                                        i,
1365                                        &pop,
1366                                        &sorted_indices,
1367                                        adaptive.current_w,
1368                                        f,
1369                                        &mut local_rng,
1370                                    ),
1371                                    Crossover::Binomial,
1372                                )
1373                            } else {
1374                                // Fallback to rand1 if adaptive state not available
1375                                (
1376                                    mutant_rand1(i, &pop, f, &mut local_rng),
1377                                    Crossover::Binomial,
1378                                )
1379                            }
1380                        }
1381                        Strategy::AdaptiveExp => {
1382                            if let Some(ref adaptive) = adaptive_state {
1383                                (
1384                                    mutant_adaptive(
1385                                        i,
1386                                        &pop,
1387                                        &sorted_indices,
1388                                        adaptive.current_w,
1389                                        f,
1390                                        &mut local_rng,
1391                                    ),
1392                                    Crossover::Exponential,
1393                                )
1394                            } else {
1395                                // Fallback to rand1 if adaptive state not available
1396                                (
1397                                    mutant_rand1(i, &pop, f, &mut local_rng),
1398                                    Crossover::Exponential,
1399                                )
1400                            }
1401                        }
1402                        Strategy::LShadeBin => {
1403                            let pbest_size = mutant_current_to_pbest1::compute_pbest_size(
1404                                self.config.lshade.p,
1405                                pop.nrows(),
1406                            );
1407                            let archive_ref = external_archive.as_ref().and_then(|a| a.read().ok());
1408                            (
1409                                mutant_current_to_pbest1::mutant_current_to_pbest1(
1410                                    i,
1411                                    &pop,
1412                                    &sorted_indices,
1413                                    pbest_size,
1414                                    archive_ref.as_deref(),
1415                                    f,
1416                                    &mut local_rng,
1417                                ),
1418                                Crossover::Binomial,
1419                            )
1420                        }
1421                        Strategy::LShadeExp => {
1422                            let pbest_size = mutant_current_to_pbest1::compute_pbest_size(
1423                                self.config.lshade.p,
1424                                pop.nrows(),
1425                            );
1426                            let archive_ref = external_archive.as_ref().and_then(|a| a.read().ok());
1427                            (
1428                                mutant_current_to_pbest1::mutant_current_to_pbest1(
1429                                    i,
1430                                    &pop,
1431                                    &sorted_indices,
1432                                    pbest_size,
1433                                    archive_ref.as_deref(),
1434                                    f,
1435                                    &mut local_rng,
1436                                ),
1437                                Crossover::Exponential,
1438                            )
1439                        }
1440                    };
1441
1442                    // If strategy didn't dictate crossover, fallback to config
1443                    let crossover = cross;
1444                    let trial = match crossover {
1445                        Crossover::Binomial => {
1446                            binomial_crossover(&pop.row(i).to_owned(), &mutant, cr, &mut local_rng)
1447                        }
1448                        Crossover::Exponential => exponential_crossover(
1449                            &pop.row(i).to_owned(),
1450                            &mutant,
1451                            cr,
1452                            &mut local_rng,
1453                        ),
1454                    };
1455
1456                    // Apply WLS if enabled
1457                    let wls_trial = if self.config.adaptive.wls_enabled
1458                        && local_rng.random::<f64>() < self.config.adaptive.wls_prob
1459                    {
1460                        apply_wls(
1461                            &trial,
1462                            &self.lower,
1463                            &self.upper,
1464                            self.config.adaptive.wls_scale,
1465                            &mut local_rng,
1466                        )
1467                    } else {
1468                        trial.clone()
1469                    };
1470
1471                    // Clip to bounds using vectorized operation
1472                    let mut trial_clipped = wls_trial;
1473                    Zip::from(&mut trial_clipped)
1474                        .and(&self.lower)
1475                        .and(&self.upper)
1476                        .for_each(|x, lo, hi| *x = x.clamp(*lo, *hi));
1477
1478                    // Apply integrality if provided
1479                    if let Some(mask) = &self.config.integrality {
1480                        apply_integrality(&mut trial_clipped, mask, &self.lower, &self.upper);
1481                    }
1482
1483                    // Return trial and parameters
1484                    (trial_clipped, (f, cr))
1485                })
1486                .unzip();
1487
1488            let t_build = t_build0.elapsed();
1489            let t_eval0 = Instant::now();
1490            let trial_energies =
1491                evaluate_trials_parallel(&trials, energy_fn.clone(), &self.config.parallel);
1492            let t_eval = t_eval0.elapsed();
1493            nfev += npop;
1494
1495            let t_select0 = Instant::now();
1496            // Selection phase: update population based on trial results
1497            for (i, (trial, trial_energy)) in
1498                trials.into_iter().zip(trial_energies.iter()).enumerate()
1499            {
1500                let (f, cr) = trial_params[i];
1501
1502                // Selection: replace if better
1503                if *trial_energy <= energies[i] {
1504                    // For L-SHADE: add replaced solution to archive
1505                    if let Some(ref archive) = external_archive
1506                        && *trial_energy < energies[i]
1507                        && let Ok(mut arch) = archive.write()
1508                    {
1509                        arch.add(pop.row(i).to_owned());
1510                    }
1511                    pop.row_mut(i).assign(&trial.view());
1512                    energies[i] = *trial_energy;
1513                    accepted_trials += 1;
1514
1515                    // Update adaptive parameters if improvement
1516                    if let Some(ref mut adaptive) = adaptive_state {
1517                        adaptive.record_success(f, cr);
1518                    }
1519
1520                    // Track if this is an improvement over the current best
1521                    if *trial_energy < best_f {
1522                        improvement_count += 1;
1523                    }
1524                }
1525            }
1526            let t_select = t_select0.elapsed();
1527
1528            t_build_tot += t_build;
1529            t_eval_tot += t_eval;
1530            t_select_tot += t_select;
1531            let iter_dur = iter_start.elapsed();
1532            t_iter_tot += iter_dur;
1533
1534            if timing_enabled && (iter <= 5 || iter % 10 == 0) {
1535                eprintln!(
1536                    "TIMING iter {:4}: build={:.3} ms, eval={:.3} ms, select={:.3} ms, total={:.3} ms",
1537                    iter,
1538                    t_build.as_secs_f64() * 1e3,
1539                    t_eval.as_secs_f64() * 1e3,
1540                    t_select.as_secs_f64() * 1e3,
1541                    iter_dur.as_secs_f64() * 1e3,
1542                );
1543            }
1544
1545            // Update adaptive parameters after each generation
1546            if let Some(ref mut adaptive) = adaptive_state {
1547                adaptive.update(&self.config.adaptive, iter, self.config.maxiter);
1548            }
1549
1550            // Update best solution after generation
1551            let (new_best_idx, new_best_f) = argmin(&energies);
1552            if new_best_f < best_f {
1553                best_idx = new_best_idx;
1554                best_f = new_best_f;
1555                best_x = pop.row(best_idx).to_owned();
1556            }
1557
1558            // Convergence check
1559            let pop_mean = energies.mean().unwrap_or(0.0);
1560            let pop_std = energies.std(0.0);
1561            let convergence_threshold = self.config.atol + self.config.tol * pop_mean.abs();
1562
1563            if self.config.disp {
1564                eprintln!(
1565                    "DE iter {:4}  best_f={:.6e}  std={:.3e}  accepted={}/{}, improved={}",
1566                    iter, best_f, pop_std, accepted_trials, npop, improvement_count
1567                );
1568            }
1569
1570            // Callback
1571            if let Some(ref mut cb) = self.config.callback {
1572                let intermediate = DEIntermediate {
1573                    x: best_x.clone(),
1574                    fun: best_f,
1575                    convergence: pop_std,
1576                    iter,
1577                };
1578                match cb(&intermediate) {
1579                    CallbackAction::Stop => {
1580                        success = true;
1581                        message = "Optimization stopped by callback".to_string();
1582                        break;
1583                    }
1584                    CallbackAction::Continue => {}
1585                }
1586            }
1587
1588            if pop_std <= convergence_threshold {
1589                success = true;
1590                message = format!(
1591                    "Converged: std(pop_f)={:.3e} <= threshold={:.3e}",
1592                    pop_std, convergence_threshold
1593                );
1594                break;
1595            }
1596        }
1597
1598        if !success {
1599            message = format!("Maximum iterations reached: {}", self.config.maxiter);
1600        }
1601
1602        if self.config.disp {
1603            eprintln!("DE finished: {}", message);
1604        }
1605
1606        // Polish if configured
1607        let (final_x, final_f, polish_nfev) = if let Some(ref polish_cfg) = self.config.polish {
1608            if polish_cfg.enabled {
1609                self.polish(&best_x)
1610            } else {
1611                (best_x.clone(), best_f, 0)
1612            }
1613        } else {
1614            (best_x.clone(), best_f, 0)
1615        };
1616
1617        if timing_enabled {
1618            eprintln!(
1619                "TIMING total: build={:.3} s, eval={:.3} s, select={:.3} s, iter_total={:.3} s",
1620                t_build_tot.as_secs_f64(),
1621                t_eval_tot.as_secs_f64(),
1622                t_select_tot.as_secs_f64(),
1623                t_iter_tot.as_secs_f64()
1624            );
1625        }
1626
1627        self.finish_report(
1628            pop,
1629            energies,
1630            final_x,
1631            final_f,
1632            success,
1633            message,
1634            nit,
1635            nfev + polish_nfev,
1636        )
1637    }
1638}
1639
1640#[cfg(test)]
1641mod strategy_tests {
1642    use super::*;
1643
1644    #[test]
1645    fn test_parse_strategy_variants() {
1646        assert!(matches!(
1647            "best1exp".parse::<Strategy>().unwrap(),
1648            Strategy::Best1Exp
1649        ));
1650        assert!(matches!(
1651            "rand1bin".parse::<Strategy>().unwrap(),
1652            Strategy::Rand1Bin
1653        ));
1654        assert!(matches!(
1655            "randtobest1exp".parse::<Strategy>().unwrap(),
1656            Strategy::RandToBest1Exp
1657        ));
1658    }
1659}