1use super::config::*;
4use crate::DeviceResult;
5use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
6use scirs2_core::random::prelude::*;
7use scirs2_core::SliceRandomExt;
8use std::collections::HashMap;
9use std::time::Duration;
10
11#[cfg(not(feature = "scirs2"))]
13use super::fallback_scirs2;
14#[cfg(feature = "scirs2")]
15use super::scirs2_optimize;
16
17pub struct OptimizationProblem {
19 pub ansatz: super::circuits::ParametricCircuit,
20 pub objective_function: Box<dyn super::objectives::ObjectiveFunction>,
21 pub bounds: Option<Vec<(f64, f64)>>,
22 pub constraints: Vec<OptimizationConstraint>,
23}
24
25impl OptimizationProblem {
26 pub fn new(
28 ansatz: super::circuits::ParametricCircuit,
29 objective_function: Box<dyn super::objectives::ObjectiveFunction>,
30 ) -> Self {
31 Self {
32 ansatz,
33 objective_function,
34 bounds: None,
35 constraints: Vec::new(),
36 }
37 }
38
39 #[must_use]
41 pub fn with_bounds(mut self, bounds: Vec<(f64, f64)>) -> Self {
42 self.bounds = Some(bounds);
43 self
44 }
45
46 #[must_use]
48 pub fn with_constraint(mut self, constraint: OptimizationConstraint) -> Self {
49 self.constraints.push(constraint);
50 self
51 }
52
53 pub fn evaluate_objective(&self, parameters: &Array1<f64>) -> DeviceResult<f64> {
55 Ok(parameters.iter().map(|&x| x.powi(2)).sum::<f64>())
58 }
59
60 pub fn compute_gradient(&self, parameters: &Array1<f64>) -> DeviceResult<Array1<f64>> {
62 let n_params = parameters.len();
63 let mut gradient = Array1::zeros(n_params);
64 let shift = std::f64::consts::PI / 2.0;
65
66 for i in 0..n_params {
67 let mut params_plus = parameters.clone();
68 let mut params_minus = parameters.clone();
69
70 params_plus[i] += shift;
71 params_minus[i] -= shift;
72
73 let f_plus = self.evaluate_objective(¶ms_plus)?;
74 let f_minus = self.evaluate_objective(¶ms_minus)?;
75
76 gradient[i] = (f_plus - f_minus) / 2.0;
77 }
78
79 Ok(gradient)
80 }
81
82 pub fn check_constraints(&self, parameters: &Array1<f64>) -> bool {
84 for constraint in &self.constraints {
85 if !constraint.is_satisfied(parameters) {
86 return false;
87 }
88 }
89 true
90 }
91}
92
93#[derive(Debug, Clone)]
95pub struct OptimizationConstraint {
96 pub constraint_type: ConstraintType,
97 pub bounds: Vec<f64>,
98 pub tolerance: f64,
99}
100
101impl OptimizationConstraint {
102 pub fn equality(target: f64, tolerance: f64) -> Self {
104 Self {
105 constraint_type: ConstraintType::Equality,
106 bounds: vec![target],
107 tolerance,
108 }
109 }
110
111 pub fn inequality(upper_bound: f64) -> Self {
113 Self {
114 constraint_type: ConstraintType::Inequality,
115 bounds: vec![upper_bound],
116 tolerance: 0.0,
117 }
118 }
119
120 pub fn bounds(lower: f64, upper: f64) -> Self {
122 Self {
123 constraint_type: ConstraintType::Bounds,
124 bounds: vec![lower, upper],
125 tolerance: 0.0,
126 }
127 }
128
129 pub fn is_satisfied(&self, parameters: &Array1<f64>) -> bool {
131 match self.constraint_type {
132 ConstraintType::Equality => {
133 if self.bounds.is_empty() {
134 return true;
135 }
136 let target = self.bounds[0];
137 let value = parameters.sum(); (value - target).abs() <= self.tolerance
139 }
140 ConstraintType::Inequality => {
141 if self.bounds.is_empty() {
142 return true;
143 }
144 let upper = self.bounds[0];
145 let value = parameters.sum(); value <= upper + self.tolerance
147 }
148 ConstraintType::Bounds => {
149 if self.bounds.len() < 2 {
150 return true;
151 }
152 let lower = self.bounds[0];
153 let upper = self.bounds[1];
154 parameters.iter().all(|&x| x >= lower && x <= upper)
155 }
156 }
157 }
158}
159
160#[derive(Debug, Clone, PartialEq, Eq)]
161pub enum ConstraintType {
162 Equality,
163 Inequality,
164 Bounds,
165}
166
167#[derive(Debug, Clone)]
169pub struct OptimizationResult {
170 pub optimal_parameters: Array1<f64>,
171 pub optimal_value: f64,
172 pub success: bool,
173 pub num_iterations: usize,
174 pub num_function_evaluations: usize,
175 pub message: String,
176 pub optimization_time: Duration,
177 pub trajectory: OptimizationTrajectory,
178 pub num_restarts: usize,
179 pub optimizer_comparison: OptimizerComparison,
180}
181
182impl OptimizationResult {
183 pub fn new(optimal_parameters: Array1<f64>, optimal_value: f64, success: bool) -> Self {
185 Self {
186 optimal_parameters,
187 optimal_value,
188 success,
189 num_iterations: 0,
190 num_function_evaluations: 0,
191 message: String::new(),
192 optimization_time: Duration::from_secs(0),
193 trajectory: OptimizationTrajectory::new(),
194 num_restarts: 0,
195 optimizer_comparison: OptimizerComparison::new(),
196 }
197 }
198
199 #[must_use]
201 pub const fn with_iterations(mut self, iterations: usize, evaluations: usize) -> Self {
202 self.num_iterations = iterations;
203 self.num_function_evaluations = evaluations;
204 self
205 }
206
207 #[must_use]
209 pub const fn with_timing(mut self, duration: Duration) -> Self {
210 self.optimization_time = duration;
211 self
212 }
213
214 #[must_use]
216 pub fn with_trajectory(mut self, trajectory: OptimizationTrajectory) -> Self {
217 self.trajectory = trajectory;
218 self
219 }
220
221 pub const fn is_converged(&self) -> bool {
223 self.success && self.trajectory.convergence_indicators.objective_convergence
224 }
225
226 pub fn convergence_rate(&self) -> f64 {
228 if self.trajectory.objective_history.len() < 2 {
229 return 0.0;
230 }
231
232 let initial = self.trajectory.objective_history[0];
233 let final_val = self.optimal_value;
234
235 if initial == 0.0 {
236 0.0
237 } else {
238 (initial - final_val).abs() / initial.abs()
239 }
240 }
241}
242
243#[derive(Debug, Clone)]
245pub struct OptimizerComparison {
246 pub optimizer_results: HashMap<String, OptimizerPerformance>,
247 pub best_optimizer: Option<String>,
248 pub ranking: Vec<String>,
249}
250
251impl Default for OptimizerComparison {
252 fn default() -> Self {
253 Self::new()
254 }
255}
256
257impl OptimizerComparison {
258 pub fn new() -> Self {
259 Self {
260 optimizer_results: HashMap::new(),
261 best_optimizer: None,
262 ranking: Vec::new(),
263 }
264 }
265
266 pub fn add_result(&mut self, optimizer_name: String, performance: OptimizerPerformance) {
268 let best_value = performance.best_value;
269 self.optimizer_results
270 .insert(optimizer_name.clone(), performance);
271
272 if let Some(ref current_best) = self.best_optimizer {
274 let current_best_value = self.optimizer_results[current_best].best_value;
275 if best_value < current_best_value {
276 self.best_optimizer = Some(optimizer_name);
277 }
278 } else {
279 self.best_optimizer = Some(optimizer_name);
280 }
281
282 self.update_ranking();
284 }
285
286 fn update_ranking(&mut self) {
288 let mut optimizers: Vec<(String, f64)> = self
289 .optimizer_results
290 .iter()
291 .map(|(name, perf)| (name.clone(), perf.best_value))
292 .collect();
293
294 optimizers.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
295
296 self.ranking = optimizers.into_iter().map(|(name, _)| name).collect();
297 }
298}
299
300#[derive(Debug, Clone)]
302pub struct OptimizerPerformance {
303 pub best_value: f64,
304 pub convergence_iterations: usize,
305 pub total_evaluations: usize,
306 pub execution_time: Duration,
307 pub success_rate: f64,
308 pub robustness_score: f64,
309}
310
311impl OptimizerPerformance {
312 pub const fn new(best_value: f64) -> Self {
313 Self {
314 best_value,
315 convergence_iterations: 0,
316 total_evaluations: 0,
317 execution_time: Duration::from_secs(0),
318 success_rate: 0.0,
319 robustness_score: 0.0,
320 }
321 }
322}
323
324pub struct OptimizationStrategy {
326 pub config: VQAOptimizationConfig,
327}
328
329impl OptimizationStrategy {
330 pub const fn new(config: VQAOptimizationConfig) -> Self {
332 Self { config }
333 }
334
335 pub fn generate_initial_parameters(&self, num_params: usize) -> DeviceResult<Array1<f64>> {
337 match self.config.multi_start_config.initial_point_strategy {
338 InitialPointStrategy::Random => self.generate_random_initial(num_params),
339 InitialPointStrategy::LatinHypercube => self.generate_latin_hypercube(num_params),
340 InitialPointStrategy::Sobol => self.generate_sobol_sequence(num_params),
341 InitialPointStrategy::Grid => self.generate_grid_points(num_params),
342 InitialPointStrategy::PreviousBest => self.generate_from_previous_best(num_params),
343 InitialPointStrategy::AdaptiveSampling => self.generate_adaptive_sample(num_params),
344 }
345 }
346
347 fn generate_random_initial(&self, num_params: usize) -> DeviceResult<Array1<f64>> {
349 use scirs2_core::random::prelude::*;
350 let mut rng = thread_rng();
351
352 let params = Array1::from_shape_fn(num_params, |_| {
353 rng.gen_range(-std::f64::consts::PI..std::f64::consts::PI)
354 });
355
356 Ok(params)
357 }
358
359 fn generate_latin_hypercube(&self, num_params: usize) -> DeviceResult<Array1<f64>> {
361 use scirs2_core::random::prelude::*;
362 let mut rng = thread_rng();
363
364 let mut indices: Vec<usize> = (0..num_params).collect();
366 indices.shuffle(&mut rng);
367
368 let params = Array1::from_shape_fn(num_params, |i| {
369 let segment = indices[i] as f64 / num_params as f64;
370 let offset = rng.gen::<f64>() / num_params as f64;
371 let uniform_sample = segment + offset;
372
373 (2.0 * std::f64::consts::PI).mul_add(uniform_sample, -std::f64::consts::PI)
375 });
376
377 Ok(params)
378 }
379
380 fn generate_sobol_sequence(&self, num_params: usize) -> DeviceResult<Array1<f64>> {
382 use scirs2_core::random::prelude::*;
384 let mut rng = thread_rng();
385
386 let params = Array1::from_shape_fn(num_params, |i| {
387 let sobol_val = (i as f64 + 0.5) / num_params as f64;
388 let jittered = rng.gen::<f64>().mul_add(0.1, sobol_val) - 0.05;
389 (2.0 * std::f64::consts::PI).mul_add(jittered.clamp(0.0, 1.0), -std::f64::consts::PI)
390 });
391
392 Ok(params)
393 }
394
395 fn generate_grid_points(&self, num_params: usize) -> DeviceResult<Array1<f64>> {
397 let grid_size = (num_params as f64).powf(1.0 / num_params as f64).ceil() as usize;
398
399 let params = Array1::from_shape_fn(num_params, |i| {
400 let grid_pos = i % grid_size;
401 let grid_val = grid_pos as f64 / (grid_size - 1).max(1) as f64;
402 (2.0 * std::f64::consts::PI).mul_add(grid_val, -std::f64::consts::PI)
403 });
404
405 Ok(params)
406 }
407
408 fn generate_from_previous_best(&self, num_params: usize) -> DeviceResult<Array1<f64>> {
410 use scirs2_core::random::prelude::*;
412 let mut rng = thread_rng();
413
414 let params = Array1::from_shape_fn(num_params, |_| {
415 rng.gen_range(-std::f64::consts::PI..std::f64::consts::PI)
416 });
417
418 Ok(params)
419 }
420
421 fn generate_adaptive_sample(&self, num_params: usize) -> DeviceResult<Array1<f64>> {
423 self.generate_latin_hypercube(num_params)
425 }
426
427 pub fn execute_optimization(
429 &self,
430 problem: &OptimizationProblem,
431 initial_params: &Array1<f64>,
432 ) -> DeviceResult<OptimizationResult> {
433 let mut best_result = None;
434 let mut comparison = OptimizerComparison::new();
435
436 let primary_result =
438 self.run_single_optimizer(&self.config.primary_optimizer, problem, initial_params)?;
439
440 let primary_performance = OptimizerPerformance::new(primary_result.optimal_value);
441 comparison.add_result(
442 format!("{}", self.config.primary_optimizer),
443 primary_performance,
444 );
445
446 best_result = Some(primary_result);
447
448 for fallback_optimizer in &self.config.fallback_optimizers {
450 let fallback_result =
451 self.run_single_optimizer(fallback_optimizer, problem, initial_params)?;
452
453 let fallback_performance = OptimizerPerformance::new(fallback_result.optimal_value);
454 comparison.add_result(format!("{fallback_optimizer}"), fallback_performance);
455
456 if let Some(ref current_best) = best_result {
457 if fallback_result.optimal_value < current_best.optimal_value {
458 best_result = Some(fallback_result);
459 }
460 }
461 }
462
463 let mut final_result = best_result.ok_or_else(|| {
464 crate::DeviceError::OptimizationError("No optimizer succeeded".to_string())
465 })?;
466
467 final_result.optimizer_comparison = comparison;
468 Ok(final_result)
469 }
470
471 fn run_single_optimizer(
473 &self,
474 optimizer: &VQAOptimizer,
475 problem: &OptimizationProblem,
476 initial_params: &Array1<f64>,
477 ) -> DeviceResult<OptimizationResult> {
478 let start_time = std::time::Instant::now();
479
480 let result = match optimizer {
482 VQAOptimizer::LBFGSB => self.run_lbfgsb(problem, initial_params),
483 VQAOptimizer::COBYLA => self.run_cobyla(problem, initial_params),
484 VQAOptimizer::NelderMead => self.run_nelder_mead(problem, initial_params),
485 VQAOptimizer::DifferentialEvolution => {
486 self.run_differential_evolution(problem, initial_params)
487 }
488 _ => self.run_fallback_optimizer(problem, initial_params),
489 }?;
490
491 let duration = start_time.elapsed();
492 Ok(result.with_timing(duration))
493 }
494
495 fn run_lbfgsb(
497 &self,
498 problem: &OptimizationProblem,
499 initial_params: &Array1<f64>,
500 ) -> DeviceResult<OptimizationResult> {
501 #[cfg(feature = "scirs2")]
502 {
503 use super::scirs2_optimize::{minimize, unconstrained::Method};
504
505 let mut objective = |x: &scirs2_core::ndarray::ArrayView1<f64>| -> f64 {
507 let owned_array = Array1::from_vec(x.to_vec());
508 problem
509 .evaluate_objective(&owned_array)
510 .unwrap_or(f64::INFINITY)
511 };
512
513 let options = if let Some(ref bounds) = problem.bounds {
515 use super::scirs2_optimize::unconstrained::{Bounds, Options};
516 let bound_pairs: Vec<(Option<f64>, Option<f64>)> = bounds
517 .iter()
518 .map(|(low, high)| (Some(*low), Some(*high)))
519 .collect();
520 let scirs2_bounds = Bounds::new(&bound_pairs);
521 Some(scirs2_optimize::prelude::Options {
522 bounds: Some(scirs2_bounds),
523 max_iter: self.config.max_iterations,
524 ftol: self.config.convergence_tolerance,
525 ..Default::default()
526 })
527 } else {
528 Some(scirs2_optimize::prelude::Options {
529 max_iter: self.config.max_iterations,
530 ftol: self.config.convergence_tolerance,
531 ..Default::default()
532 })
533 };
534
535 let params_slice = initial_params.as_slice().ok_or_else(|| {
537 crate::DeviceError::ExecutionFailed(
538 "Failed to get contiguous slice from parameters".into(),
539 )
540 })?;
541 let result =
542 minimize(objective, params_slice, Method::LBFGSB, options).map_err(|e| {
543 crate::DeviceError::OptimizationError(format!("L-BFGS-B failed: {e}"))
544 })?;
545
546 let mut trajectory = OptimizationTrajectory::new();
547 trajectory.objective_history = Array1::from(vec![result.fun]);
548 trajectory.convergence_indicators.objective_convergence = result.success;
549
550 Ok(
551 OptimizationResult::new(result.x, result.fun, result.success)
552 .with_iterations(result.nit, result.nfev)
553 .with_trajectory(trajectory),
554 )
555 }
556
557 #[cfg(not(feature = "scirs2"))]
558 {
559 let mut params = initial_params.clone();
561 let mut value = problem.evaluate_objective(¶ms)?;
562 let learning_rate = 0.01;
563 let max_iterations = self.config.max_iterations;
564 let mut history = Vec::new();
565
566 for iteration in 0..max_iterations {
567 history.push(value);
568
569 let gradient = problem.compute_gradient(¶ms)?;
570 let gradient_norm = gradient.iter().map(|&x| x * x).sum::<f64>().sqrt();
571
572 if gradient_norm < self.config.convergence_tolerance {
573 break;
574 }
575
576 let step_size = learning_rate / (1.0 + iteration as f64 * 0.01);
578 params = ¶ms - &(&gradient * step_size);
579 value = problem.evaluate_objective(¶ms)?;
580
581 if value < self.config.convergence_tolerance {
582 break;
583 }
584 }
585
586 let mut trajectory = OptimizationTrajectory::new();
587 trajectory.objective_history = Array1::from(history);
588 trajectory.convergence_indicators.objective_convergence =
589 value < self.config.convergence_tolerance;
590
591 Ok(OptimizationResult::new(params, value, true)
592 .with_iterations(history.len(), history.len())
593 .with_trajectory(trajectory))
594 }
595 }
596
597 fn run_cobyla(
599 &self,
600 problem: &OptimizationProblem,
601 initial_params: &Array1<f64>,
602 ) -> DeviceResult<OptimizationResult> {
603 #[cfg(feature = "scirs2")]
604 {
605 use super::scirs2_optimize::{minimize, unconstrained::Method};
606
607 let mut objective = |x: &scirs2_core::ndarray::ArrayView1<f64>| -> f64 {
609 let owned_array = Array1::from_vec(x.to_vec());
610 problem
611 .evaluate_objective(&owned_array)
612 .unwrap_or(f64::INFINITY)
613 };
614
615 let params_slice = initial_params.as_slice().ok_or_else(|| {
616 crate::DeviceError::ExecutionFailed(
617 "Failed to get contiguous slice from parameters".into(),
618 )
619 })?;
620 let result =
621 minimize(objective, params_slice, Method::NelderMead, None).map_err(|e| {
622 crate::DeviceError::OptimizationError(format!("Optimization failed: {e}"))
623 })?;
624
625 let mut trajectory = OptimizationTrajectory::new();
626 trajectory.objective_history = Array1::from(vec![result.fun]);
627 trajectory.convergence_indicators.objective_convergence = result.success;
628
629 Ok(
630 OptimizationResult::new(result.x, result.fun, result.success)
631 .with_iterations(result.nit, result.nfev)
632 .with_trajectory(trajectory),
633 )
634 }
635
636 #[cfg(not(feature = "scirs2"))]
637 {
638 let mut params = initial_params.clone();
640 let mut value = problem.evaluate_objective(¶ms)?;
641 let mut history = Vec::new();
642 let penalty_weight = 1000.0;
643
644 for iteration in 0..self.config.max_iterations {
645 history.push(value);
646
647 let mut penalty = 0.0;
649 for constraint in &problem.constraints {
650 if !constraint.is_satisfied(¶ms) {
651 penalty += penalty_weight;
652 }
653 }
654
655 let penalized_value = value + penalty;
656
657 if penalized_value < self.config.convergence_tolerance {
658 break;
659 }
660
661 use scirs2_core::random::prelude::*;
663 let mut rng = thread_rng();
664
665 for param in params.iter_mut() {
666 *param += rng.gen_range(-0.1..0.1);
667 }
668
669 if let Some(ref bounds) = problem.bounds {
671 for (i, param) in params.iter_mut().enumerate() {
672 if i < bounds.len() {
673 *param = param.clamp(bounds[i].0, bounds[i].1);
674 }
675 }
676 }
677
678 value = problem.evaluate_objective(¶ms)?;
679 }
680
681 let mut trajectory = OptimizationTrajectory::new();
682 trajectory.objective_history = Array1::from(history);
683 trajectory.convergence_indicators.objective_convergence =
684 value < self.config.convergence_tolerance;
685
686 Ok(OptimizationResult::new(params, value, true)
687 .with_iterations(history.len(), history.len())
688 .with_trajectory(trajectory))
689 }
690 }
691
692 fn run_nelder_mead(
694 &self,
695 problem: &OptimizationProblem,
696 initial_params: &Array1<f64>,
697 ) -> DeviceResult<OptimizationResult> {
698 let optimal_value = problem.evaluate_objective(initial_params)?;
700 Ok(OptimizationResult::new(
701 initial_params.clone(),
702 optimal_value,
703 true,
704 ))
705 }
706
707 fn run_differential_evolution(
709 &self,
710 problem: &OptimizationProblem,
711 initial_params: &Array1<f64>,
712 ) -> DeviceResult<OptimizationResult> {
713 #[cfg(feature = "scirs2")]
714 {
715 use super::scirs2_optimize::{
716 differential_evolution, global::DifferentialEvolutionOptions,
717 };
718
719 let objective = |x: &ArrayView1<f64>| -> f64 {
721 let array1 = x.to_owned();
722 problem.evaluate_objective(&array1).unwrap_or(f64::INFINITY)
723 };
724
725 let bounds = if let Some(ref bounds) = problem.bounds {
727 bounds.clone()
728 } else {
729 vec![(-std::f64::consts::PI, std::f64::consts::PI); initial_params.len()]
731 };
732
733 let de_params = DifferentialEvolutionOptions {
735 popsize: (initial_params.len() * 15).max(30), mutation: (0.5, 1.0), recombination: 0.7, seed: None,
739 atol: self.config.convergence_tolerance,
740 tol: self.config.convergence_tolerance,
741 maxiter: self.config.max_iterations,
742 polish: true, init: "latinhypercube".to_string(),
744 updating: "immediate".to_string(),
745 x0: Some(initial_params.clone()),
746 parallel: None,
747 };
748
749 let result = differential_evolution(
750 objective,
751 bounds,
752 Some(de_params),
753 None, )
755 .map_err(|e| {
756 crate::DeviceError::OptimizationError(format!("Differential Evolution failed: {e}"))
757 })?;
758
759 let mut trajectory = OptimizationTrajectory::new();
760 trajectory.objective_history = Array1::from(vec![result.fun]);
761 trajectory.convergence_indicators.objective_convergence = result.success;
762
763 Ok(
764 OptimizationResult::new(result.x, result.fun, result.success)
765 .with_iterations(result.nit, result.nfev)
766 .with_trajectory(trajectory),
767 )
768 }
769
770 #[cfg(not(feature = "scirs2"))]
771 {
772 let num_params = initial_params.len();
774 let population_size = (num_params * 10).max(20);
775 let mutation_factor = 0.8;
776 let crossover_prob = 0.7;
777
778 use scirs2_core::random::prelude::*;
780 let mut rng = thread_rng();
781 let mut population = Vec::new();
782 let mut fitness = Vec::new();
783
784 let bounds = if let Some(ref bounds) = problem.bounds {
786 bounds.clone()
787 } else {
788 vec![(-std::f64::consts::PI, std::f64::consts::PI); num_params]
789 };
790
791 for _ in 0..population_size {
793 let individual =
794 Array1::from_shape_fn(num_params, |i| rng.gen_range(bounds[i].0..bounds[i].1));
795 let fit = problem.evaluate_objective(&individual)?;
796 population.push(individual);
797 fitness.push(fit);
798 }
799
800 let mut best_idx = 0;
801 let mut best_fitness = fitness[0];
802 for (i, &fit) in fitness.iter().enumerate() {
803 if fit < best_fitness {
804 best_fitness = fit;
805 best_idx = i;
806 }
807 }
808
809 let mut history = vec![best_fitness];
810
811 for _generation in 0..self.config.max_iterations {
813 for i in 0..population_size {
814 let mut indices: Vec<usize> =
816 (0..population_size).filter(|&x| x != i).collect();
817 indices.shuffle(&mut rng);
818 let (a, b, c) = (indices[0], indices[1], indices[2]);
819
820 let mut mutant =
822 &population[a] + &((&population[b] - &population[c]) * mutation_factor);
823
824 for (j, param) in mutant.iter_mut().enumerate() {
826 *param = param.clamp(bounds[j].0, bounds[j].1);
827 }
828
829 let mut trial = population[i].clone();
831 let crossover_point = rng.gen_range(0..num_params);
832 for j in 0..num_params {
833 if rng.gen::<f64>() < crossover_prob || j == crossover_point {
834 trial[j] = mutant[j];
835 }
836 }
837
838 let trial_fitness = problem.evaluate_objective(&trial)?;
840 if trial_fitness < fitness[i] {
841 population[i] = trial;
842 fitness[i] = trial_fitness;
843
844 if trial_fitness < best_fitness {
845 best_fitness = trial_fitness;
846 best_idx = i;
847 }
848 }
849 }
850
851 history.push(best_fitness);
852
853 if best_fitness < self.config.convergence_tolerance {
854 break;
855 }
856 }
857
858 let mut trajectory = OptimizationTrajectory::new();
859 trajectory.objective_history = Array1::from(history);
860 trajectory.convergence_indicators.objective_convergence =
861 best_fitness < self.config.convergence_tolerance;
862
863 Ok(
864 OptimizationResult::new(population[best_idx].clone(), best_fitness, true)
865 .with_iterations(
866 trajectory.objective_history.len(),
867 trajectory.objective_history.len() * population_size,
868 )
869 .with_trajectory(trajectory),
870 )
871 }
872 }
873
874 fn run_fallback_optimizer(
876 &self,
877 problem: &OptimizationProblem,
878 initial_params: &Array1<f64>,
879 ) -> DeviceResult<OptimizationResult> {
880 let mut params = initial_params.clone();
882 let mut value = problem.evaluate_objective(¶ms)?;
883 let learning_rate = 0.01;
884 let max_iterations = 100;
885
886 for _ in 0..max_iterations {
887 let gradient = problem.compute_gradient(¶ms)?;
888 params = ¶ms - &(&gradient * learning_rate);
889 let new_value = problem.evaluate_objective(¶ms)?;
890
891 if (value - new_value).abs() < self.config.convergence_tolerance {
892 break;
893 }
894 value = new_value;
895 }
896
897 Ok(OptimizationResult::new(params, value, true))
898 }
899}