autoeq 0.4.24

Automatic equalization for speakers, headphones and rooms!
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
//! AutoEQ - A library for audio equalization and filter optimization
//!
//! Copyright (C) 2025-2026 Pierre Aubert pierre(at)spinorama(dot)org
//!
//! This program is free software: you can redistribute it and/or modify
//! it under the terms of the GNU General Public License as published by
//! the Free Software Foundation, either version 3 of the License, or
//! (at your option) any later version.
//!
//! This program is distributed in the hope that it will be useful,
//! but WITHOUT ANY WARRANTY; without even the implied warranty of
//! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
//! GNU General Public License for more details.
//!
//! You should have received a copy of the GNU General Public License
//! along with this program.  If not, see <https://www.gnu.org/licenses/>.

use super::cli::PeqModel;
use super::constraints::{viol_ceiling_from_spl, viol_min_gain_from_xs, viol_spacing_from_xs};
use super::loss::{
    DriversLossData, HeadphoneLossData, LossType, SpeakerLossData, drivers_flat_loss, flat_loss,
    flat_loss_asymmetric, headphone_loss, speaker_score_loss,
};
use super::optim_de::{optimize_filters_autoeq, optimize_filters_autoeq_with_callback};
use super::optim_mh::{optimize_filters_mh, optimize_filters_mh_with_callback};
#[cfg(feature = "nlopt")]
use super::optim_nlopt::optimize_filters_nlopt;
use super::x2peq::x2spl;
use crate::Curve;
use ndarray::Array1;
#[cfg(feature = "nlopt")]
use nlopt::Algorithm;

pub mod pareto;

/// Algorithm metadata structure
#[derive(Debug, Clone)]
pub struct AlgorithmInfo {
    /// Algorithm name with library prefix (e.g., "nlopt:isres", "mh:de", "autoeq:de")
    pub name: &'static str,
    /// Library providing this algorithm (e.g., "NLOPT", "Metaheuristics", "AutoEQ")
    pub library: &'static str,
    /// Classification as global or local optimizer
    pub algorithm_type: AlgorithmType,
    /// Whether the algorithm supports linear constraint handling
    pub supports_linear_constraints: bool,
    /// Whether the algorithm supports nonlinear constraint handling
    pub supports_nonlinear_constraints: bool,
}

/// Algorithm classification
#[derive(Debug, Clone, PartialEq)]
pub enum AlgorithmType {
    /// Global optimization algorithm - explores entire solution space, good for finding global optimum
    Global,
    /// Local optimization algorithm - refines solution from starting point, fast but may get trapped in local optimum
    Local,
}

/// Get all available algorithms with their metadata
pub fn get_all_algorithms() -> Vec<AlgorithmInfo> {
    let algorithms = vec![
        #[cfg(feature = "nlopt")]
        // NLOPT algorithms - Global with nonlinear constraint support
        AlgorithmInfo {
            name: "nlopt:isres",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: true,
            supports_nonlinear_constraints: true,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:ags",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: true,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:origdirect",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: true,
        },
        #[cfg(feature = "nlopt")]
        // NLOPT algorithms - Global without nonlinear constraint support
        AlgorithmInfo {
            name: "nlopt:crs2lm",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:direct",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:directl",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:gmlsl",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:gmlsllds",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:sbplx",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Local,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:slsqp",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Local,
            supports_linear_constraints: true,
            supports_nonlinear_constraints: true,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:stogo",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:stogorand",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        #[cfg(feature = "nlopt")]
        // NLOPT algorithms - Local
        AlgorithmInfo {
            name: "nlopt:bobyqa",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Local,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:cobyla",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Local,
            supports_linear_constraints: true,
            supports_nonlinear_constraints: true,
        },
        #[cfg(feature = "nlopt")]
        AlgorithmInfo {
            name: "nlopt:neldermead",
            library: "NLOPT",
            algorithm_type: AlgorithmType::Local,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        // Metaheuristics algorithms (all global, no constraint support)
        AlgorithmInfo {
            name: "mh:de",
            library: "Metaheuristics",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        AlgorithmInfo {
            name: "mh:pso",
            library: "Metaheuristics",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        AlgorithmInfo {
            name: "mh:rga",
            library: "Metaheuristics",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        AlgorithmInfo {
            name: "mh:tlbo",
            library: "Metaheuristics",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        AlgorithmInfo {
            name: "mh:firefly",
            library: "Metaheuristics",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: false,
            supports_nonlinear_constraints: false,
        },
        AlgorithmInfo {
            name: "autoeq:de",
            library: "AutoEQ",
            algorithm_type: AlgorithmType::Global,
            supports_linear_constraints: true,
            supports_nonlinear_constraints: true,
        },
    ];
    algorithms
}

/// Find algorithm info by name (supports both prefixed and unprefixed names for backward compatibility)
pub fn find_algorithm_info(name: &str) -> Option<AlgorithmInfo> {
    let algorithms = get_all_algorithms();

    // First try exact match
    if let Some(algo) = algorithms
        .iter()
        .find(|a| a.name.eq_ignore_ascii_case(name))
    {
        return Some(algo.clone());
    }

    // Then try without prefix for backward compatibility
    let name_lower = name.to_lowercase();
    for algo in &algorithms {
        if let Some(suffix) = algo.name.split(':').nth(1)
            && suffix.eq_ignore_ascii_case(&name_lower)
        {
            return Some(algo.clone());
        }
    }

    None
}

/// Data structure for holding objective function parameters
///
/// This struct contains all the data needed to compute the objective function
/// for filter optimization.
#[derive(Debug, Clone)]
pub struct ObjectiveData {
    /// Frequency points for evaluation
    pub freqs: Array1<f64>,
    /// Target spl
    pub target: Array1<f64>,
    /// Target error values
    pub deviation: Array1<f64>,
    /// Sample rate in Hz
    pub srate: f64,
    #[allow(dead_code)]
    /// Minimum spacing between filters in octaves
    pub min_spacing_oct: f64,
    /// Weight for spacing penalty term
    pub spacing_weight: f64,
    /// Maximum allowed dB level
    pub max_db: f64,
    /// Minimum absolute gain for filters
    pub min_db: f64,
    /// Minimum frequency in Hz for loss function evaluation
    pub min_freq: f64,
    /// Maximum frequency in Hz for loss function evaluation
    pub max_freq: f64,
    /// PEQ model that defines the filter structure
    pub peq_model: PeqModel,
    /// Type of loss function to use
    pub loss_type: LossType,
    /// Optional score data for SpeakerScore loss type
    pub speaker_score_data: Option<SpeakerLossData>,
    /// Optional score data for HeadphoneScore loss type
    pub headphone_score_data: Option<HeadphoneLossData>,
    /// Input curve for headphone loss (optional)
    pub input_curve: Option<Curve>,
    /// Optional data for multi-driver crossover optimization
    pub drivers_data: Option<DriversLossData>,
    /// Fixed crossover frequencies (when not optimizing frequencies)
    pub fixed_crossover_freqs: Option<Vec<f64>>,
    /// Penalty weights used when the optimizer does not support nonlinear constraints
    /// If zero, penalties are disabled and true constraints (if any) are used.
    /// Penalty for ceiling constraint
    pub penalty_w_ceiling: f64,
    /// Penalty for spacing constraint
    pub penalty_w_spacing: f64,
    /// Penalty for min gain constraint
    pub penalty_w_mingain: f64,
    /// Integrality constraints - true for integer parameters, false for continuous
    pub integrality: Option<Vec<bool>>,
    /// Multi-objective data for multi-measurement optimization.
    /// When `Some`, `compute_base_fitness` delegates to `compute_multi_objective_fitness`.
    pub multi_objective: Option<MultiObjectiveData>,
    /// Whether to smooth the error before computing the loss
    pub smooth: bool,
    /// Smoothing resolution as 1/N octave
    pub smooth_n: usize,
    /// Frequency-dependent maximum boost envelope for per-filter gain clamping.
    /// Each entry is (frequency_hz, max_boost_db). Interpolated in log-frequency.
    /// When Some, positive filter gains are clamped before loss evaluation.
    pub max_boost_envelope: Option<Vec<(f64, f64)>>,
    /// CDT-aware minimum cut envelope: limits how deep the optimizer can cut
    /// at frequencies where the ear generates Cubic Distortion Tones.
    /// Each entry is (frequency_hz, max_cut_db) where max_cut_db is negative.
    /// When Some, negative filter gains are clamped before loss evaluation.
    pub min_cut_envelope: Option<Vec<(f64, f64)>>,
}

/// Data for multi-objective optimization across multiple measurements
#[derive(Debug, Clone)]
pub struct MultiObjectiveData {
    /// One ObjectiveData per measurement curve
    pub objectives: Vec<ObjectiveData>,
    /// Strategy for combining per-measurement losses
    pub strategy: crate::roomeq::MultiMeasurementStrategy,
    /// Normalized weights (len == objectives.len()), used by WeightedSum
    pub weights: Vec<f64>,
    /// Lambda for VariancePenalized strategy
    pub variance_lambda: f64,
}

/// Penalty configuration mode for optimizers.
///
/// Different optimizers require different penalty weights depending on whether
/// they support native constraints or need penalty-based enforcement.
///
/// # Penalty Scale Rationale
///
/// - **Disabled**: Optimizers with native constraint support (DE) - penalties are handled by the optimizer
/// - **Standard**: Traditional optimizers (NLOPT algorithms like COBYLA, Nelder-Mead) - use 1e4 scale
/// - **Pso**: Particle Swarm Optimization - uses 5e2 scale because PSO needs more exploration space
///
/// The penalty weight determines how strongly constraint violations are penalized.
/// Higher weights push the optimizer away from constraint violations but can also
/// restrict exploration of the solution space.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PenaltyMode {
    /// Disable all penalties (use native constraints)
    Disabled,
    /// Standard penalty weights for most optimizers
    Standard,
    /// Moderate penalties for PSO (needs more exploration)
    Pso,
}

impl PenaltyMode {
    /// Ceiling penalty weight - penalizes exceeding the target response ceiling
    pub const fn ceiling_weight(&self) -> f64 {
        match self {
            PenaltyMode::Disabled => 0.0,
            PenaltyMode::Standard => 1e4,
            PenaltyMode::Pso => 5e2,
        }
    }

    /// Minimum gain penalty weight - penalizes going below minimum gain
    pub const fn mingain_weight(&self) -> f64 {
        match self {
            PenaltyMode::Disabled => 0.0,
            PenaltyMode::Standard => 1e3,
            PenaltyMode::Pso => 50.0,
        }
    }
}

impl ObjectiveData {
    /// Configure penalty weights based on the optimizer's requirements.
    ///
    /// Call this before optimization to set appropriate penalty weights.
    /// Use `PenaltyMode::Disabled` when the optimizer supports native constraints.
    pub fn configure_penalties(&mut self, mode: PenaltyMode) {
        self.penalty_w_ceiling = mode.ceiling_weight();
        self.penalty_w_mingain = mode.mingain_weight();
        // Spacing weight is computed from spacing_weight config, scaled by mode
        let spacing_scale = match mode {
            PenaltyMode::Disabled => 0.0,
            PenaltyMode::Standard => 1e3,
            PenaltyMode::Pso => 5e2,
        };
        self.penalty_w_spacing = self.spacing_weight.max(0.0) * spacing_scale;
    }
}

/// Determine algorithm type and return normalized name
#[derive(Debug, Clone)]
pub enum AlgorithmCategory {
    /// NLOPT library algorithm with specific algorithm type
    #[cfg(feature = "nlopt")]
    Nlopt(Algorithm),
    /// Metaheuristics library algorithm with algorithm name
    Metaheuristics(String),
    /// AutoEQ custom algorithm with algorithm name
    AutoEQ(String),
}

/// Parse algorithm name and return category with normalized name
pub fn parse_algorithm_name(name: &str) -> Option<AlgorithmCategory> {
    if let Some(algo_info) = find_algorithm_info(name) {
        let normalized_name = algo_info.name;

        #[cfg(feature = "nlopt")]
        if normalized_name.starts_with("nlopt:") {
            let nlopt_name = normalized_name.strip_prefix("nlopt:").unwrap();
            let nlopt_algo = match nlopt_name {
                "bobyqa" => Algorithm::Bobyqa,
                "cobyla" => Algorithm::Cobyla,
                "neldermead" => Algorithm::Neldermead,
                "isres" => Algorithm::Isres,
                "ags" => Algorithm::Ags,
                "origdirect" => Algorithm::OrigDirect,
                "crs2lm" => Algorithm::Crs2Lm,
                "direct" => Algorithm::Direct,
                "directl" => Algorithm::DirectL,
                "gmlsl" => Algorithm::GMlsl,
                "gmlsllds" => Algorithm::GMlslLds,
                "sbplx" => Algorithm::Sbplx,
                "slsqp" => Algorithm::Slsqp,
                "stogo" => Algorithm::StoGo,
                "stogorand" => Algorithm::StoGoRand,
                _ => Algorithm::Isres, // fallback
            };
            return Some(AlgorithmCategory::Nlopt(nlopt_algo));
        }
        if normalized_name.starts_with("mh:") {
            let mh_name = normalized_name.strip_prefix("mh:").unwrap();
            return Some(AlgorithmCategory::Metaheuristics(mh_name.to_string()));
        } else if normalized_name.starts_with("autoeq:") {
            let autoeq_name = normalized_name.strip_prefix("autoeq:").unwrap();
            return Some(AlgorithmCategory::AutoEQ(autoeq_name.to_string()));
        }
    }

    None
}

/// Compute multi-objective fitness across multiple measurement curves.
///
/// Each objective shares the same filter parameters `x` but evaluates against
/// a different measurement curve. The per-curve losses are combined according
/// to the configured strategy.
fn compute_multi_objective_fitness(x: &[f64], mo: &MultiObjectiveData) -> f64 {
    use crate::roomeq::MultiMeasurementStrategy;

    let losses: Vec<f64> = mo
        .objectives
        .iter()
        .map(|obj| compute_base_fitness_single(x, obj))
        .collect();

    match mo.strategy {
        MultiMeasurementStrategy::Average => {
            // Should not reach here (average mode uses pre-averaged curves),
            // but handle gracefully: simple mean of losses
            let sum: f64 = losses.iter().sum();
            sum / losses.len() as f64
        }
        MultiMeasurementStrategy::WeightedSum => {
            losses.iter().zip(&mo.weights).map(|(l, w)| l * w).sum()
        }
        MultiMeasurementStrategy::Minimax => {
            losses.iter().cloned().fold(f64::NEG_INFINITY, f64::max)
        }
        MultiMeasurementStrategy::VariancePenalized => {
            let n = losses.len() as f64;
            let mean = losses.iter().sum::<f64>() / n;
            let variance = losses.iter().map(|l| (l - mean).powi(2)).sum::<f64>() / n;
            mean + mo.variance_lambda * variance
        }
        MultiMeasurementStrategy::SpatialRobustness => {
            // SpatialRobustness uses single-curve optimization on the RMS-averaged curve
            // and should never reach the multi-objective loss computation.
            unreachable!("SpatialRobustness strategy should not use multi-objective loss path")
        }
    }
}

/// Clamp positive filter gains in the parameter vector using a frequency-dependent envelope.
///
/// For each filter, if its gain is positive (boost), clamp it to the envelope's
/// max boost at that filter's center frequency. Returns a new owned vector.
pub fn clamp_gains_to_envelope(x: &[f64], envelope: &[(f64, f64)], peq_model: PeqModel) -> Vec<f64> {
    use crate::param_utils;
    let mut clamped = x.to_vec();
    let num_filters = param_utils::num_filters(x, peq_model);
    for i in 0..num_filters {
        let params = param_utils::get_filter_params(x, i, peq_model);
        let freq_hz = 10f64.powf(params.freq);
        if params.gain > 0.0 {
            let max_boost = interpolate_boost_envelope(envelope, freq_hz);
            if params.gain > max_boost {
                let ppf = param_utils::params_per_filter(peq_model);
                // gain is the last parameter in each filter's group
                let gain_idx = i * ppf + (ppf - 1);
                clamped[gain_idx] = max_boost;
            }
        }
    }
    clamped
}

/// Interpolate a frequency-dependent envelope in log-frequency space.
fn interpolate_boost_envelope(envelope: &[(f64, f64)], freq_hz: f64) -> f64 {
    if envelope.is_empty() {
        return f64::INFINITY;
    }
    if freq_hz <= envelope[0].0 {
        return envelope[0].1;
    }
    let last = envelope.len() - 1;
    if freq_hz >= envelope[last].0 {
        return envelope[last].1;
    }
    for i in 0..last {
        let (f0, db0) = envelope[i];
        let (f1, db1) = envelope[i + 1];
        if freq_hz >= f0 && freq_hz <= f1 {
            let t = (freq_hz.ln() - f0.ln()) / (f1.ln() - f0.ln());
            return db0 + t * (db1 - db0);
        }
    }
    envelope[last].1
}

/// Clamp negative filter gains (cuts) to a frequency-dependent minimum.
///
/// Mirrors `clamp_gains_to_envelope` but for cuts: if a filter's gain is negative
/// and exceeds the envelope's limit (more negative), it is clamped.
/// Used for CDT protection — prevents over-cutting at frequencies where
/// the ear generates distortion tones.
pub fn clamp_cuts_to_envelope(x: &[f64], envelope: &[(f64, f64)], peq_model: PeqModel) -> Vec<f64> {
    use crate::param_utils;
    let mut clamped = x.to_vec();
    let num_filters = param_utils::num_filters(x, peq_model);
    for i in 0..num_filters {
        let params = param_utils::get_filter_params(x, i, peq_model);
        let freq_hz = 10f64.powf(params.freq);
        if params.gain < 0.0 {
            let max_cut = interpolate_boost_envelope(envelope, freq_hz); // returns negative dB
            if params.gain < max_cut {
                let ppf = param_utils::params_per_filter(peq_model);
                let gain_idx = i * ppf + (ppf - 1);
                clamped[gain_idx] = max_cut;
            }
        }
    }
    clamped
}

/// Compute the base fitness for a single ObjectiveData (no multi-objective delegation).
/// This is the inner implementation that does not check `multi_objective`.
fn compute_base_fitness_single(x: &[f64], data: &ObjectiveData) -> f64 {
    // Clamp gains to envelopes before evaluation (boost limits + CDT cut limits).
    let clamped_boost;
    let clamped_cut;
    let x = {
        let skip = matches!(data.loss_type, LossType::DriversFlat | LossType::MultiSubFlat);
        let x = if !skip && let Some(ref env) = data.max_boost_envelope {
            clamped_boost = clamp_gains_to_envelope(x, env, data.peq_model);
            &clamped_boost
        } else {
            x
        };
        if !skip && let Some(ref env) = data.min_cut_envelope {
            clamped_cut = clamp_cuts_to_envelope(x, env, data.peq_model);
            &clamped_cut
        } else {
            x
        }
    };

    match data.loss_type {
        LossType::DriversFlat => {
            if let Some(ref drivers_data) = data.drivers_data {
                let n_drivers = drivers_data.drivers.len();
                let gains = &x[0..n_drivers];
                let delays = &x[n_drivers..2 * n_drivers];
                let xover_freqs: Vec<f64> = if let Some(ref fixed) = data.fixed_crossover_freqs {
                    fixed.clone()
                } else {
                    let xover_freqs_log10 = &x[2 * n_drivers..];
                    xover_freqs_log10
                        .iter()
                        .map(|f| 10.0_f64.powf(*f))
                        .collect()
                };
                drivers_flat_loss(
                    drivers_data,
                    gains,
                    &xover_freqs,
                    Some(delays),
                    data.srate,
                    data.min_freq,
                    data.max_freq,
                )
            } else {
                log::error!("drivers-flat loss requested but driver data is missing");
                f64::INFINITY
            }
        }
        LossType::MultiSubFlat => {
            if let Some(ref drivers_data) = data.drivers_data {
                let n_drivers = drivers_data.drivers.len();
                let gains = &x[0..n_drivers];
                let delays = &x[n_drivers..2 * n_drivers];
                crate::loss::multisub_flat_loss(
                    drivers_data,
                    gains,
                    delays,
                    data.srate,
                    data.min_freq,
                    data.max_freq,
                )
            } else {
                log::error!("multi-sub-flat loss requested but driver data is missing");
                f64::INFINITY
            }
        }
        LossType::HeadphoneFlat | LossType::SpeakerFlat => {
            let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
            let error = &peq_spl - &data.deviation;
            if data.smooth {
                let curve = Curve {
                    freq: data.freqs.clone(),
                    spl: error,
                    phase: None,
                };
                let smoothed = crate::read::smooth_one_over_n_octave(&curve, data.smooth_n);
                flat_loss(&data.freqs, &smoothed.spl, data.min_freq, data.max_freq)
            } else {
                flat_loss(&data.freqs, &error, data.min_freq, data.max_freq)
            }
        }
        LossType::SpeakerFlatAsymmetric => {
            let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
            let error = &peq_spl - &data.deviation;
            if data.smooth {
                let curve = Curve {
                    freq: data.freqs.clone(),
                    spl: error,
                    phase: None,
                };
                let smoothed = crate::read::smooth_one_over_n_octave(&curve, data.smooth_n);
                flat_loss_asymmetric(&data.freqs, &smoothed.spl, data.min_freq, data.max_freq)
            } else {
                flat_loss_asymmetric(&data.freqs, &error, data.min_freq, data.max_freq)
            }
        }
        LossType::SpeakerScore => {
            let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
            if let Some(ref sd) = data.speaker_score_data {
                let error = &peq_spl - &data.deviation;
                let s = speaker_score_loss(sd, &data.freqs, &peq_spl);
                let p = flat_loss(&data.freqs, &error, data.min_freq, data.max_freq) / 3.0;
                // SpeakerScore fitness: minimize (100 - score + flatness/3)
                // - 100.0: reference ceiling for Harman speaker score (typical range 0-100)
                // - /3.0: reduces flatness weight to ~25% vs score (empirically tuned)
                100.0 - s + p
            } else {
                log::error!("speaker score loss requested but score data is missing");
                f64::INFINITY
            }
        }
        LossType::HeadphoneScore => {
            let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
            if let Some(ref _hd) = data.headphone_score_data {
                let error = &data.deviation - &peq_spl;
                let error_curve = Curve {
                    freq: data.freqs.clone(),
                    spl: error.clone(),
                    phase: None,
                };
                let s = headphone_loss(&error_curve);
                let p = flat_loss(&data.freqs, &error, data.min_freq, data.max_freq);
                1000.0 - s + p * 20.0
            } else {
                log::error!("headphone score loss requested but headphone data is missing");
                f64::INFINITY
            }
        }
        LossType::Epa => {
            let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
            let error = &peq_spl - &data.deviation;
            let flatness = flat_loss(&data.freqs, &error, data.min_freq, data.max_freq);
            let freqs_vec: Vec<f64> = data.freqs.iter().copied().collect();
            // The corrected SPL = target + deviation (measurement) + peq correction
            let corrected_spl: Vec<f64> = data
                .freqs
                .iter()
                .enumerate()
                .map(|(i, _)| data.target[i] + data.deviation[i] + peq_spl[i])
                .collect();
            let epa_config = crate::epa::score::EpaConfig::default();
            crate::epa::score::epa_loss(&freqs_vec, &corrected_spl, &epa_config, flatness)
        }
    }
}

/// Compute the base fitness value (without penalties) for given parameters
///
/// This is the unified fitness function used by both NLOPT and metaheuristics optimizers.
/// If `multi_objective` is set, delegates to multi-objective fitness computation.
pub fn compute_base_fitness(x: &[f64], data: &ObjectiveData) -> f64 {
    // If multi-objective data is present, delegate to multi-objective fitness
    if let Some(ref mo) = data.multi_objective {
        return compute_multi_objective_fitness(x, mo);
    }

    match data.loss_type {
        LossType::DriversFlat => {
            // Multi-driver crossover optimization
            if let Some(ref drivers_data) = data.drivers_data {
                let n_drivers = drivers_data.drivers.len();
                // Parameter layout depends on whether frequencies are fixed:
                // - Fixed freqs: [gains(N), delays(N)]
                // - Optimizing freqs: [gains(N), delays(N), xovers(N-1)]
                let gains = &x[0..n_drivers];
                let delays = &x[n_drivers..2 * n_drivers];

                // Use fixed frequencies if provided, otherwise extract from parameter vector
                let xover_freqs: Vec<f64> = if let Some(ref fixed) = data.fixed_crossover_freqs {
                    fixed.clone()
                } else {
                    let xover_freqs_log10 = &x[2 * n_drivers..];
                    xover_freqs_log10
                        .iter()
                        .map(|f| 10.0_f64.powf(*f))
                        .collect()
                };

                drivers_flat_loss(
                    drivers_data,
                    gains,
                    &xover_freqs,
                    Some(delays),
                    data.srate,
                    data.min_freq,
                    data.max_freq,
                )
            } else {
                log::error!("drivers-flat loss requested but driver data is missing");
                f64::INFINITY
            }
        }
        LossType::MultiSubFlat => {
            if let Some(ref drivers_data) = data.drivers_data {
                let n_drivers = drivers_data.drivers.len();
                let gains = &x[0..n_drivers];
                let delays = &x[n_drivers..2 * n_drivers];

                crate::loss::multisub_flat_loss(
                    drivers_data,
                    gains,
                    delays,
                    data.srate,
                    data.min_freq,
                    data.max_freq,
                )
            } else {
                log::error!("multi-sub-flat loss requested but driver data is missing");
                f64::INFINITY
            }
        }
        LossType::HeadphoneFlat | LossType::SpeakerFlat => {
            let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
            let error = &peq_spl - &data.deviation;
            if data.smooth {
                let curve = Curve {
                    freq: data.freqs.clone(),
                    spl: error,
                    phase: None,
                };
                let smoothed = crate::read::smooth_one_over_n_octave(&curve, data.smooth_n);
                flat_loss(&data.freqs, &smoothed.spl, data.min_freq, data.max_freq)
            } else {
                flat_loss(&data.freqs, &error, data.min_freq, data.max_freq)
            }
        }
        LossType::SpeakerFlatAsymmetric => {
            let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
            let error = &peq_spl - &data.deviation;
            if data.smooth {
                let curve = Curve {
                    freq: data.freqs.clone(),
                    spl: error,
                    phase: None,
                };
                let smoothed = crate::read::smooth_one_over_n_octave(&curve, data.smooth_n);
                flat_loss_asymmetric(&data.freqs, &smoothed.spl, data.min_freq, data.max_freq)
            } else {
                flat_loss_asymmetric(&data.freqs, &error, data.min_freq, data.max_freq)
            }
        }
        LossType::SpeakerScore => {
            let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
            if let Some(ref sd) = data.speaker_score_data {
                let error = &peq_spl - &data.deviation;
                let s = speaker_score_loss(sd, &data.freqs, &peq_spl);
                let p = flat_loss(&data.freqs, &error, data.min_freq, data.max_freq) / 3.0;
                // SpeakerScore fitness: minimize (100 - score + flatness/3)
                // - 100.0: reference ceiling for Harman speaker score (typical range 0-100)
                // - /3.0: reduces flatness weight to ~25% vs score (empirically tuned)
                100.0 - s + p
            } else {
                log::error!("speaker score loss requested but score data is missing");
                f64::INFINITY
            }
        }
        LossType::HeadphoneScore => {
            let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
            if let Some(ref _hd) = data.headphone_score_data {
                // Compute remaining deviation: target - (input + peq) = deviation - peq
                // where deviation = target - input
                let error = &data.deviation - &peq_spl;

                // Use headphone_loss on the remaining deviation
                let error_curve = Curve {
                    freq: data.freqs.clone(),
                    spl: error.clone(),
                    phase: None,
                };
                let s = headphone_loss(&error_curve);
                let p = flat_loss(&data.freqs, &error, data.min_freq, data.max_freq);
                // HeadphoneScore fitness: minimize (1000 - score + flatness*20)
                // - 1000.0: reference ceiling for Olive preference score (max ~114.49)
                // - *20.0: amplifies flatness term (headphone score has small dynamic range)
                1000.0 - s + p * 20.0
            } else {
                log::error!("headphone score loss requested but headphone data is missing");
                f64::INFINITY
            }
        }
        LossType::Epa => {
            let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
            let error = &peq_spl - &data.deviation;
            let flatness = flat_loss(&data.freqs, &error, data.min_freq, data.max_freq);
            let freqs_vec: Vec<f64> = data.freqs.iter().copied().collect();
            let corrected_spl: Vec<f64> = data
                .freqs
                .iter()
                .enumerate()
                .map(|(i, _)| data.target[i] + data.deviation[i] + peq_spl[i])
                .collect();
            let epa_config = crate::epa::score::EpaConfig::default();
            crate::epa::score::epa_loss(&freqs_vec, &corrected_spl, &epa_config, flatness)
        }
    }
}

/// Compute objective function value including penalty terms for constraints
///
/// Non-mutating version used by optimizers that don't require `&mut` data
/// (e.g., metaheuristics). Avoids cloning ObjectiveData on every evaluation.
pub fn compute_fitness_penalties_ref(x: &[f64], data: &ObjectiveData) -> f64 {
    let fit = compute_base_fitness(x, data);

    // PEQ-specific penalties only apply when the parameter vector has PEQ layout
    // (freq/Q/gain triplets). DriversFlat and MultiSubFlat use a different layout
    // (gains/delays/crossovers) and these penalty functions would misinterpret the values.
    let is_peq_loss = !matches!(
        data.loss_type,
        LossType::DriversFlat | LossType::MultiSubFlat
    );

    // When penalties are enabled (weights > 0), add them to the base fit so that
    // optimizers without nonlinear constraints can still respect our limits.
    let mut penalized = fit;

    if data.penalty_w_ceiling > 0.0 && is_peq_loss {
        let peq_spl = x2spl(&data.freqs, x, data.srate, data.peq_model);
        let viol = viol_ceiling_from_spl(&peq_spl, data.max_db, data.peq_model);
        penalized += data.penalty_w_ceiling * viol * viol;
    }

    if data.penalty_w_spacing > 0.0 && is_peq_loss {
        let viol = viol_spacing_from_xs(x, data.peq_model, data.min_spacing_oct);
        penalized += data.penalty_w_spacing * viol * viol;
    }

    if data.penalty_w_mingain > 0.0 && data.min_db > 0.0 && is_peq_loss {
        let viol = viol_min_gain_from_xs(x, data.peq_model, data.min_db);
        penalized += data.penalty_w_mingain * viol * viol;
    }

    penalized
}

/// Compute objective function value including penalty terms for constraints
///
/// NLOPT-compatible wrapper that takes `&mut ObjectiveData` (required by NLOPT's callback
/// signature). Delegates to `compute_fitness_penalties_ref`.
///
/// # Arguments
/// * `x` - Parameter vector
/// * `_gradient` - Gradient vector (unused, for NLOPT compatibility)
/// * `data` - Objective data containing penalty weights and parameters
///
/// # Returns
/// Base fitness value plus weighted penalty terms
pub fn compute_fitness_penalties(
    x: &[f64],
    _gradient: Option<&mut [f64]>,
    data: &mut ObjectiveData,
) -> f64 {
    compute_fitness_penalties_ref(x, data)
}

/// Optimize filter parameters using global optimization algorithms
///
/// # Arguments
/// * `x` - Initial parameter vector to optimize (modified in place)
/// * `lower_bounds` - Lower bounds for each parameter
/// * `upper_bounds` - Upper bounds for each parameter
/// * `objective_data` - Data structure containing optimization parameters
/// * `cli_args` - CLI arguments containing algorithm, population, maxeval, and other parameters
///
/// # Returns
/// * Result containing (status, optimal value) or (error, value)
///
/// # Details
/// Dispatches to appropriate library-specific optimizer based on algorithm name.
/// The parameter vector is organized as [freq, Q, gain] triplets for each filter.
pub fn optimize_filters(
    x: &mut [f64],
    lower_bounds: &[f64],
    upper_bounds: &[f64],
    objective_data: ObjectiveData,
    cli_args: &crate::cli::Args,
) -> Result<(String, f64), (String, f64)> {
    optimize_filters_with_algo_override(
        x,
        lower_bounds,
        upper_bounds,
        objective_data,
        cli_args,
        None,
    )
}

/// Optimize filter parameters with optional algorithm override
///
/// # Arguments
/// * `x` - Initial parameter vector to optimize (modified in place)
/// * `lower_bounds` - Lower bounds for each parameter
/// * `upper_bounds` - Upper bounds for each parameter
/// * `objective_data` - Data structure containing optimization parameters
/// * `cli_args` - CLI arguments containing algorithm, population, maxeval, and other parameters
/// * `algo_override` - Optional algorithm override (e.g., for local refinement)
///
/// # Returns
/// * Result containing (status, optimal value) or (error, value)
pub fn optimize_filters_with_algo_override(
    x: &mut [f64],
    lower_bounds: &[f64],
    upper_bounds: &[f64],
    objective_data: ObjectiveData,
    cli_args: &crate::cli::Args,
    algo_override: Option<&str>,
) -> Result<(String, f64), (String, f64)> {
    // Extract parameters from args
    let algo = algo_override.unwrap_or(&cli_args.algo);
    let population = cli_args.population;
    let maxeval = cli_args.maxeval;

    // Parse algorithm and dispatch to appropriate function
    match parse_algorithm_name(algo) {
        #[cfg(feature = "nlopt")]
        Some(AlgorithmCategory::Nlopt(nlopt_algo)) => optimize_filters_nlopt(
            x,
            lower_bounds,
            upper_bounds,
            objective_data,
            nlopt_algo,
            population,
            maxeval,
        ),
        Some(AlgorithmCategory::Metaheuristics(mh_name)) => optimize_filters_mh(
            x,
            lower_bounds,
            upper_bounds,
            objective_data,
            &mh_name,
            population,
            maxeval,
        ),
        Some(AlgorithmCategory::AutoEQ(autoeq_name)) => optimize_filters_autoeq(
            x,
            lower_bounds,
            upper_bounds,
            objective_data,
            &autoeq_name,
            cli_args,
        ),
        None => Err((format!("Unknown algorithm: {}", algo), f64::INFINITY)),
    }
}

/// Simple progress callback: (iteration, best_loss) -> continue/stop
///
/// Used to thread per-iteration optimizer progress through the room EQ call chain.
pub type OptimProgressCallback = Box<dyn FnMut(usize, f64) -> crate::de::CallbackAction + Send>;

/// Optimize filter parameters with a progress callback for per-iteration updates.
///
/// Wraps the simple `(iteration, loss)` callback into the library-specific
/// intermediate types expected by DE and MH optimizers. NLopt has no callback
/// support so the callback is ignored for NLopt algorithms.
#[allow(clippy::too_many_arguments)]
pub fn optimize_filters_with_callback(
    x: &mut [f64],
    lower_bounds: &[f64],
    upper_bounds: &[f64],
    objective_data: ObjectiveData,
    cli_args: &crate::cli::Args,
    mut callback: OptimProgressCallback,
) -> Result<(String, f64), (String, f64)> {
    let algo = &cli_args.algo;
    let population = cli_args.population;
    let maxeval = cli_args.maxeval;

    match parse_algorithm_name(algo) {
        #[cfg(feature = "nlopt")]
        Some(AlgorithmCategory::Nlopt(nlopt_algo)) => {
            // NLopt does not support iteration callbacks — run without one
            optimize_filters_nlopt(
                x,
                lower_bounds,
                upper_bounds,
                objective_data,
                nlopt_algo,
                population,
                maxeval,
            )
        }
        Some(AlgorithmCategory::Metaheuristics(mh_name)) => {
            let mh_cb: Box<
                dyn FnMut(&super::optim_mh::MHIntermediate) -> crate::de::CallbackAction + Send,
            > = Box::new(move |intermediate| callback(intermediate.iter, intermediate.fun));
            optimize_filters_mh_with_callback(
                x,
                lower_bounds,
                upper_bounds,
                objective_data,
                &mh_name,
                population,
                maxeval,
                mh_cb,
            )
        }
        Some(AlgorithmCategory::AutoEQ(autoeq_name)) => {
            let de_cb: Box<
                dyn FnMut(&crate::de::DEIntermediate) -> crate::de::CallbackAction + Send,
            > = Box::new(move |intermediate| callback(intermediate.iter, intermediate.fun));
            optimize_filters_autoeq_with_callback(
                x,
                lower_bounds,
                upper_bounds,
                objective_data,
                &autoeq_name,
                cli_args,
                de_cb,
            )
        }
        None => Err((format!("Unknown algorithm: {}", algo), f64::INFINITY)),
    }
}

/// Extract sorted center frequencies from parameter vector and compute adjacent spacings in octaves.
pub fn compute_sorted_freqs_and_adjacent_octave_spacings(
    x: &[f64],
    peq_model: PeqModel,
) -> (Vec<f64>, Vec<f64>) {
    let n = crate::param_utils::num_filters(x, peq_model);
    let mut freqs: Vec<f64> = Vec::with_capacity(n);
    for i in 0..n {
        let params = crate::param_utils::get_filter_params(x, i, peq_model);
        freqs.push(10f64.powf(params.freq));
    }
    freqs.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
    let spacings: Vec<f64> = if freqs.len() < 2 {
        Vec::new()
    } else {
        freqs
            .windows(2)
            .map(|w| (w[1].max(1e-9) / w[0].max(1e-9)).log2().abs())
            .collect()
    };
    (freqs, spacings)
}

#[cfg(test)]
mod spacing_diag_tests {
    use super::compute_sorted_freqs_and_adjacent_octave_spacings;

    #[test]
    fn adjacent_octave_spacings_basic() {
        // x: [f,q,g, f,q,g, f,q,g]
        let x = [
            100f64.log10(),
            1.0,
            0.0,
            200f64.log10(),
            1.0,
            0.0,
            400f64.log10(),
            1.0,
            0.0,
        ];
        use crate::cli::PeqModel;
        let (_freqs, spacings) =
            compute_sorted_freqs_and_adjacent_octave_spacings(&x, PeqModel::Pk);
        assert!((spacings[0] - 1.0).abs() < 1e-12);
        assert!((spacings[1] - 1.0).abs() < 1e-12);
    }
}