1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
// Metaheuristics-specific optimization code
use super::optim::{ObjectiveData, PenaltyMode, compute_fitness_penalties_ref};
use super::optim_callback::{ProgressTracker, format_param_summary};
use ndarray::Array1;
#[allow(unused_imports)]
use metaheuristics_nature as mh;
#[allow(unused_imports)]
use mh::methods::{De as MhDe, Fa as MhFa, Pso as MhPso, Rga as MhRga, Tlbo as MhTlbo};
#[allow(unused_imports)]
use mh::{Bounded as MhBounded, Fitness as MhFitness, ObjFunc as MhObjFunc, Solver as MhSolver};
/// Information passed to callback after each generation.
///
/// Similar to DEIntermediate but for metaheuristics optimizers.
pub struct MHIntermediate {
/// Current best solution vector.
pub x: Array1<f64>,
/// Current best fitness value.
pub fun: f64,
/// Current iteration number.
pub iter: usize,
}
/// Callback action - shared with DE module for consistency
pub use crate::de::CallbackAction;
// ---------------- Metaheuristics objective and utilities ----------------
use std::sync::{Arc, Mutex};
/// Objective function wrapper for metaheuristics optimizers.
#[derive(Clone)]
pub struct MHObjective {
/// Objective data containing target curves and loss parameters.
pub data: ObjectiveData,
/// Parameter bounds as [min, max] pairs.
pub bounds: Vec<[f64; 2]>,
/// Optional callback state for tracking progress.
pub callback_state: Option<Arc<Mutex<CallbackState>>>,
}
/// State tracked across fitness evaluations for callback reporting.
pub struct CallbackState {
/// Best fitness value found so far.
pub best_fitness: f64,
/// Parameters corresponding to best fitness.
pub best_params: Vec<f64>,
/// Total number of fitness evaluations.
pub eval_count: usize,
/// Evaluation count at last callback report.
pub last_report_eval: usize,
}
impl MhBounded for MHObjective {
fn bound(&self) -> &[[f64; 2]] {
self.bounds.as_slice()
}
}
impl MhObjFunc for MHObjective {
type Ys = f64;
fn fitness(&self, xs: &[f64]) -> Self::Ys {
let fitness_val = compute_fitness_penalties_ref(xs, &self.data);
// Update callback state if present
if let Some(ref state_arc) = self.callback_state
&& let Ok(mut state) = state_arc.lock()
{
state.eval_count += 1;
// Track best solution
if fitness_val < state.best_fitness {
state.best_fitness = fitness_val;
state.best_params = xs.to_vec();
}
}
fitness_val
}
}
/// Create a default callback for metaheuristics that prints progress
pub fn create_mh_callback(
algo_name: &str,
) -> Box<dyn FnMut(&MHIntermediate) -> CallbackAction + Send> {
let name = algo_name.to_string();
let mut tracker = ProgressTracker::default();
Box::new(move |intermediate: &MHIntermediate| -> CallbackAction {
let (improvement, _) = tracker.update(intermediate.fun);
// Print when stalling or periodically
if tracker.just_started_stalling()
|| tracker.stall_at_interval(25)
|| intermediate.iter.is_multiple_of(10)
{
crate::qa_println!(
"{} iter {:4} fitness={:.6e} {}",
name,
intermediate.iter,
intermediate.fun,
improvement
);
}
// Show parameter details every 50 iterations
if intermediate.iter > 0 && intermediate.iter.is_multiple_of(50) {
let summary = format_param_summary(intermediate.x.as_slice().unwrap(), 3);
crate::qa_println!(" --> Best params: {}", summary);
}
CallbackAction::Continue
})
}
/// Optimize filter parameters using metaheuristics algorithms
pub fn optimize_filters_mh(
x: &mut [f64],
lower_bounds: &[f64],
upper_bounds: &[f64],
objective_data: ObjectiveData,
mh_name: &str,
population: usize,
maxeval: usize,
) -> Result<(String, f64), (String, f64)> {
// Create default callback for terminal output
let callback = create_mh_callback(&format!("mh::{}", mh_name));
// Delegate to callback version
optimize_filters_mh_with_callback(
x,
lower_bounds,
upper_bounds,
objective_data,
mh_name,
population,
maxeval,
callback,
)
}
/// Optimize filter parameters using metaheuristics algorithms with callback support
#[allow(clippy::too_many_arguments)]
pub fn optimize_filters_mh_with_callback(
x: &mut [f64],
lower_bounds: &[f64],
upper_bounds: &[f64],
objective_data: ObjectiveData,
mh_name: &str,
population: usize,
maxeval: usize,
mut callback: Box<dyn FnMut(&MHIntermediate) -> CallbackAction + Send>,
) -> Result<(String, f64), (String, f64)> {
let num_params = x.len();
// Build bounds for metaheuristics (as pairs)
assert_eq!(lower_bounds.len(), num_params);
assert_eq!(upper_bounds.len(), num_params);
let mut bounds: Vec<[f64; 2]> = Vec::with_capacity(num_params);
for i in 0..num_params {
bounds.push([lower_bounds[i], upper_bounds[i]]);
}
// Create objective with penalties (metaheuristics don't support native constraints)
let mut penalty_data = objective_data.clone();
let penalty_mode = if mh_name == "pso" {
PenaltyMode::Pso
} else {
PenaltyMode::Standard
};
penalty_data.configure_penalties(penalty_mode);
// Create callback state
let callback_state = Arc::new(Mutex::new(CallbackState {
best_fitness: f64::INFINITY,
best_params: vec![],
eval_count: 0,
last_report_eval: 0,
}));
// Clone for the task closure
let callback_state_task = Arc::clone(&callback_state);
// Simple objective function wrapper for metaheuristics
let mh_obj = MHObjective {
data: penalty_data,
bounds,
callback_state: Some(Arc::clone(&callback_state)),
};
// Choose algorithm configuration
// Use boxed builder to allow runtime selection with unified type
let builder = match mh_name {
"de" => MhSolver::build_boxed(MhDe::default(), mh_obj),
"pso" => {
// Tuned PSO parameters for this implementation
// This PSO uses: v = velocity*x + cognition*r1*(pbest-x) + social*r2*(gbest-x)
// where v becomes the new position (not standard PSO)
// Balance exploration and exploitation
let pso_tuned = MhPso::default()
.cognition(1.0) // Equal personal best influence
.social(1.5) // Stronger global best attraction
.velocity(0.9); // Moderate inertia for gradual convergence
MhSolver::build_boxed(pso_tuned, mh_obj)
}
"rga" => {
// RGA works well for constrained optimization with default parameters
// Note: RGA benefits from larger populations (recommended: 100+)
MhSolver::build_boxed(MhRga::default(), mh_obj)
}
"tlbo" => MhSolver::build_boxed(MhTlbo, mh_obj),
"fa" | "firefly" => {
// Firefly works well for constrained optimization
// alpha: randomization parameter (exploration)
// beta_min: minimum attractiveness (exploitation)
// gamma: light absorption coefficient (distance sensitivity)
let fa_tuned = MhFa::default()
.alpha(0.5) // Reduced randomization for more focused search
.beta_min(1.0) // Keep default attractiveness
.gamma(0.01); // Keep default absorption
MhSolver::build_boxed(fa_tuned, mh_obj)
}
_ => MhSolver::build_boxed(MhDe::default(), mh_obj),
};
// Estimate generations from maxeval and population
let pop = population.max(1);
let gens = (maxeval.max(pop)).div_ceil(pop); // ceil(maxeval/pop)
// Track iteration count
let mut current_iter = 0_usize;
let report_interval = 100; // Report every N evaluations
let solver = builder
.seed(0)
.pop_num(pop)
.task(move |_ctx| {
current_iter += 1;
// Report progress periodically
if let Ok(mut state) = callback_state_task.lock() {
let evals_since_last = state.eval_count.saturating_sub(state.last_report_eval);
if evals_since_last >= report_interval {
// Create intermediate state for callback
let x_array = Array1::from(state.best_params.clone());
let intermediate = MHIntermediate {
x: x_array,
fun: state.best_fitness,
iter: current_iter,
};
// Call the callback
let action = callback(&intermediate);
state.last_report_eval = state.eval_count;
// Check if user wants to stop
if matches!(action, CallbackAction::Stop) {
return true; // Signal to stop optimization
}
}
}
// Continue until max generations
current_iter >= gens
})
.solve();
// Write back the best parameters
let best_xs = solver.as_best_xs();
if best_xs.len() == x.len() {
x.copy_from_slice(best_xs);
}
let best_val = *solver.as_best_fit();
Ok((format!("Metaheuristics({})", mh_name), best_val))
}