1use super::*;
7use crate::{DeviceError, DeviceResult, QuantumDevice};
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10use std::sync::Arc;
11use tokio::sync::RwLock;
12
13pub trait QuantumOptimizer: Send + Sync {
15 fn optimize(
17 &mut self,
18 initial_parameters: Vec<f64>,
19 objective_function: Box<dyn ObjectiveFunction + Send + Sync>,
20 ) -> DeviceResult<OptimizationResult>;
21
22 fn config(&self) -> &OptimizerConfig;
24
25 fn reset(&mut self);
27}
28
29pub trait ObjectiveFunction: Send + Sync {
31 fn evaluate(&self, parameters: &[f64]) -> DeviceResult<f64>;
33
34 fn gradient(&self, parameters: &[f64]) -> DeviceResult<Option<Vec<f64>>>;
36
37 fn metadata(&self) -> HashMap<String, String>;
39}
40
41#[derive(Debug, Clone, Serialize, Deserialize)]
43pub struct OptimizerConfig {
44 pub optimizer_type: OptimizerType,
45 pub max_iterations: usize,
46 pub tolerance: f64,
47 pub learning_rate: f64,
48 pub momentum: Option<f64>,
49 pub adaptive_learning_rate: bool,
50 pub bounds: Option<(f64, f64)>,
51 pub noise_resilience: bool,
52 pub convergence_window: usize,
53}
54
55impl Default for OptimizerConfig {
56 fn default() -> Self {
57 Self {
58 optimizer_type: OptimizerType::Adam,
59 max_iterations: 1000,
60 tolerance: 1e-6,
61 learning_rate: 0.01,
62 momentum: Some(0.9),
63 adaptive_learning_rate: true,
64 bounds: None,
65 noise_resilience: true,
66 convergence_window: 10,
67 }
68 }
69}
70
71#[derive(Debug, Clone, Serialize, Deserialize)]
73pub struct OptimizationResult {
74 pub optimal_parameters: Vec<f64>,
75 pub optimal_value: f64,
76 pub iterations: usize,
77 pub converged: bool,
78 pub function_evaluations: usize,
79 pub gradient_evaluations: usize,
80 pub optimization_history: Vec<OptimizationStep>,
81 pub final_gradient_norm: Option<f64>,
82 pub execution_time: std::time::Duration,
83}
84
85#[derive(Debug, Clone, Serialize, Deserialize)]
87pub struct OptimizationStep {
88 pub iteration: usize,
89 pub parameters: Vec<f64>,
90 pub objective_value: f64,
91 pub gradient_norm: Option<f64>,
92 pub learning_rate: f64,
93 pub step_size: f64,
94}
95
96pub struct GradientBasedOptimizer {
98 config: OptimizerConfig,
99 device: Arc<RwLock<dyn QuantumDevice + Send + Sync>>,
100 state: OptimizerState,
101}
102
103#[derive(Debug, Clone)]
104struct OptimizerState {
105 iteration: usize,
106 momentum_buffer: Vec<f64>,
107 velocity: Vec<f64>, squared_gradients: Vec<f64>, learning_rate: f64,
110 convergence_history: Vec<f64>,
111}
112
113impl GradientBasedOptimizer {
114 pub fn new(
115 device: Arc<RwLock<dyn QuantumDevice + Send + Sync>>,
116 config: OptimizerConfig,
117 ) -> Self {
118 let state = OptimizerState {
119 iteration: 0,
120 momentum_buffer: Vec::new(),
121 velocity: Vec::new(),
122 squared_gradients: Vec::new(),
123 learning_rate: config.learning_rate,
124 convergence_history: Vec::new(),
125 };
126
127 Self {
128 config,
129 device,
130 state,
131 }
132 }
133
134 fn update_parameters_adam(
135 &mut self,
136 parameters: &[f64],
137 gradients: &[f64],
138 ) -> DeviceResult<Vec<f64>> {
139 if self.state.velocity.is_empty() {
140 self.state.velocity = vec![0.0; parameters.len()];
141 self.state.squared_gradients = vec![0.0; parameters.len()];
142 }
143
144 let beta1 = 0.9;
145 let beta2 = 0.999;
146 let epsilon = 1e-8;
147
148 let mut updated_params = parameters.to_vec();
149
150 for i in 0..parameters.len() {
151 self.state.velocity[i] = beta1 * self.state.velocity[i] + (1.0 - beta1) * gradients[i];
153
154 self.state.squared_gradients[i] = beta2 * self.state.squared_gradients[i]
156 + (1.0 - beta2) * gradients[i] * gradients[i];
157
158 let m_hat =
160 self.state.velocity[i] / (1.0 - beta1.powi(self.state.iteration as i32 + 1));
161
162 let v_hat = self.state.squared_gradients[i]
164 / (1.0 - beta2.powi(self.state.iteration as i32 + 1));
165
166 updated_params[i] -= self.state.learning_rate * m_hat / (v_hat.sqrt() + epsilon);
168
169 if let Some((min_bound, max_bound)) = self.config.bounds {
171 updated_params[i] = updated_params[i].clamp(min_bound, max_bound);
172 }
173 }
174
175 Ok(updated_params)
176 }
177
178 fn update_parameters_sgd(
179 &mut self,
180 parameters: &[f64],
181 gradients: &[f64],
182 ) -> DeviceResult<Vec<f64>> {
183 if self.state.momentum_buffer.is_empty() {
184 self.state.momentum_buffer = vec![0.0; parameters.len()];
185 }
186
187 let momentum = self.config.momentum.unwrap_or(0.0);
188 let mut updated_params = parameters.to_vec();
189
190 for i in 0..parameters.len() {
191 self.state.momentum_buffer[i] =
193 momentum * self.state.momentum_buffer[i] - self.state.learning_rate * gradients[i];
194
195 updated_params[i] += self.state.momentum_buffer[i];
197
198 if let Some((min_bound, max_bound)) = self.config.bounds {
200 updated_params[i] = updated_params[i].clamp(min_bound, max_bound);
201 }
202 }
203
204 Ok(updated_params)
205 }
206
207 fn update_learning_rate(&mut self, gradient_norm: f64) {
208 if self.config.adaptive_learning_rate {
209 if gradient_norm > 1.0 {
211 self.state.learning_rate *= 0.95;
212 } else if gradient_norm < 0.1 {
213 self.state.learning_rate *= 1.05;
214 }
215
216 self.state.learning_rate = self.state.learning_rate.clamp(1e-6, 1.0);
218 }
219 }
220
221 fn check_convergence(&mut self, objective_value: f64) -> bool {
222 self.state.convergence_history.push(objective_value);
223
224 if self.state.convergence_history.len() < self.config.convergence_window {
225 return false;
226 }
227
228 if self.state.convergence_history.len() > self.config.convergence_window {
230 self.state.convergence_history.remove(0);
231 }
232
233 let recent_values = &self.state.convergence_history;
235 let max_val = recent_values
236 .iter()
237 .fold(f64::NEG_INFINITY, |a, &b| a.max(b));
238 let min_val = recent_values.iter().fold(f64::INFINITY, |a, &b| a.min(b));
239
240 (max_val - min_val).abs() < self.config.tolerance
241 }
242}
243
244impl QuantumOptimizer for GradientBasedOptimizer {
245 fn optimize(
246 &mut self,
247 initial_parameters: Vec<f64>,
248 objective_function: Box<dyn ObjectiveFunction + Send + Sync>,
249 ) -> DeviceResult<OptimizationResult> {
250 let start_time = std::time::Instant::now();
251 let mut parameters = initial_parameters;
252 let mut optimization_history = Vec::new();
253 let mut function_evaluations = 0;
254 let mut gradient_evaluations = 0;
255 let mut best_value = f64::INFINITY;
256 let mut best_parameters = parameters.clone();
257
258 for iteration in 0..self.config.max_iterations {
259 self.state.iteration = iteration;
260
261 let objective_value = objective_function.evaluate(¶meters)?;
263 function_evaluations += 1;
264
265 if objective_value < best_value {
266 best_value = objective_value;
267 best_parameters.clone_from(¶meters);
268 }
269
270 let gradients = objective_function.gradient(¶meters)?;
272
273 let (gradient_norm, final_gradient_norm) = if let Some(grad) = gradients {
274 gradient_evaluations += 1;
275 let norm = grad.iter().map(|g| g * g).sum::<f64>().sqrt();
276
277 parameters = match self.config.optimizer_type {
279 OptimizerType::Adam => self.update_parameters_adam(¶meters, &grad)?,
280 OptimizerType::GradientDescent => {
281 self.update_parameters_sgd(¶meters, &grad)?
282 }
283 _ => {
284 return Err(DeviceError::InvalidInput(format!(
285 "Optimizer type {:?} not supported in gradient-based optimizer",
286 self.config.optimizer_type
287 )))
288 }
289 };
290
291 self.update_learning_rate(norm);
293
294 (Some(norm), Some(norm))
295 } else {
296 (None, None)
297 };
298
299 let step_size = if let Some(norm) = gradient_norm {
300 self.state.learning_rate * norm
301 } else {
302 0.0
303 };
304
305 optimization_history.push(OptimizationStep {
307 iteration,
308 parameters: parameters.clone(),
309 objective_value,
310 gradient_norm,
311 learning_rate: self.state.learning_rate,
312 step_size,
313 });
314
315 if self.check_convergence(objective_value) {
317 return Ok(OptimizationResult {
318 optimal_parameters: best_parameters,
319 optimal_value: best_value,
320 iterations: iteration + 1,
321 converged: true,
322 function_evaluations,
323 gradient_evaluations,
324 optimization_history,
325 final_gradient_norm,
326 execution_time: start_time.elapsed(),
327 });
328 }
329 }
330
331 Ok(OptimizationResult {
332 optimal_parameters: best_parameters,
333 optimal_value: best_value,
334 iterations: self.config.max_iterations,
335 converged: false,
336 function_evaluations,
337 gradient_evaluations,
338 optimization_history: optimization_history.clone(),
339 final_gradient_norm: optimization_history
340 .last()
341 .and_then(|step| step.gradient_norm),
342 execution_time: start_time.elapsed(),
343 })
344 }
345
346 fn config(&self) -> &OptimizerConfig {
347 &self.config
348 }
349
350 fn reset(&mut self) {
351 self.state = OptimizerState {
352 iteration: 0,
353 momentum_buffer: Vec::new(),
354 velocity: Vec::new(),
355 squared_gradients: Vec::new(),
356 learning_rate: self.config.learning_rate,
357 convergence_history: Vec::new(),
358 };
359 }
360}
361
362pub struct GradientFreeOptimizer {
364 config: OptimizerConfig,
365 device: Arc<RwLock<dyn QuantumDevice + Send + Sync>>,
366 population: Vec<Individual>,
367 generation: usize,
368}
369
370#[derive(Debug, Clone)]
371struct Individual {
372 parameters: Vec<f64>,
373 fitness: Option<f64>,
374}
375
376impl GradientFreeOptimizer {
377 pub fn new(
378 device: Arc<RwLock<dyn QuantumDevice + Send + Sync>>,
379 config: OptimizerConfig,
380 ) -> Self {
381 Self {
382 config,
383 device,
384 population: Vec::new(),
385 generation: 0,
386 }
387 }
388
389 fn initialize_population(&mut self, parameter_count: usize, population_size: usize) {
390 self.population.clear();
391
392 for _ in 0..population_size {
393 let parameters = if let Some((min_bound, max_bound)) = self.config.bounds {
394 (0..parameter_count)
395 .map(|_| fastrand::f64().mul_add(max_bound - min_bound, min_bound))
396 .collect()
397 } else {
398 (0..parameter_count)
399 .map(|_| fastrand::f64() * 2.0 * std::f64::consts::PI)
400 .collect()
401 };
402
403 self.population.push(Individual {
404 parameters,
405 fitness: None,
406 });
407 }
408 }
409
410 fn evaluate_population(
411 &mut self,
412 objective_function: &dyn ObjectiveFunction,
413 ) -> DeviceResult<()> {
414 for individual in &mut self.population {
415 if individual.fitness.is_none() {
416 individual.fitness = Some(objective_function.evaluate(&individual.parameters)?);
417 }
418 }
419 Ok(())
420 }
421
422 fn evolve_population(&mut self) -> DeviceResult<()> {
423 self.population.sort_by(|a, b| {
425 let fitness_a = a.fitness.unwrap_or(f64::INFINITY);
426 let fitness_b = b.fitness.unwrap_or(f64::INFINITY);
427 fitness_a
428 .partial_cmp(&fitness_b)
429 .unwrap_or(std::cmp::Ordering::Equal)
430 });
431
432 let elite_count = self.population.len() / 4; let new_population_size = self.population.len();
434
435 let mut new_population = self.population[..elite_count].to_vec();
437
438 while new_population.len() < new_population_size {
440 let parent1 = self.tournament_selection(3)?;
442 let parent2 = self.tournament_selection(3)?;
443
444 let mut offspring = self.crossover(&parent1, &parent2)?;
446 self.mutate(&mut offspring)?;
447
448 new_population.push(offspring);
449 }
450
451 self.population = new_population;
452 Ok(())
453 }
454
455 fn tournament_selection(&self, tournament_size: usize) -> DeviceResult<Individual> {
456 let mut best_individual = None;
457 let mut best_fitness = f64::INFINITY;
458
459 for _ in 0..tournament_size {
460 let idx = fastrand::usize(0..self.population.len());
461 let individual = &self.population[idx];
462 let fitness = individual.fitness.unwrap_or(f64::INFINITY);
463
464 if fitness < best_fitness {
465 best_fitness = fitness;
466 best_individual = Some(individual.clone());
467 }
468 }
469
470 best_individual
471 .ok_or_else(|| DeviceError::InvalidInput("Tournament selection failed".to_string()))
472 }
473
474 fn crossover(&self, parent1: &Individual, parent2: &Individual) -> DeviceResult<Individual> {
475 let parameter_count = parent1.parameters.len();
476 let mut offspring_params = Vec::with_capacity(parameter_count);
477
478 for i in 0..parameter_count {
479 let param = if fastrand::bool() {
481 parent1.parameters[i]
482 } else {
483 parent2.parameters[i]
484 };
485 offspring_params.push(param);
486 }
487
488 Ok(Individual {
489 parameters: offspring_params,
490 fitness: None,
491 })
492 }
493
494 fn mutate(&self, individual: &mut Individual) -> DeviceResult<()> {
495 let mutation_rate = 0.1;
496 let mutation_strength = 0.1;
497
498 for param in &mut individual.parameters {
499 if fastrand::f64() < mutation_rate {
500 let mutation = (fastrand::f64() - 0.5) * 2.0 * mutation_strength;
501 *param += mutation;
502
503 if let Some((min_bound, max_bound)) = self.config.bounds {
505 *param = param.clamp(min_bound, max_bound);
506 }
507 }
508 }
509
510 individual.fitness = None; Ok(())
512 }
513
514 fn get_best_individual(&self) -> Option<&Individual> {
515 self.population.iter().min_by(|a, b| {
516 let fitness_a = a.fitness.unwrap_or(f64::INFINITY);
517 let fitness_b = b.fitness.unwrap_or(f64::INFINITY);
518 fitness_a
519 .partial_cmp(&fitness_b)
520 .unwrap_or(std::cmp::Ordering::Equal)
521 })
522 }
523}
524
525impl QuantumOptimizer for GradientFreeOptimizer {
526 fn optimize(
527 &mut self,
528 initial_parameters: Vec<f64>,
529 objective_function: Box<dyn ObjectiveFunction + Send + Sync>,
530 ) -> DeviceResult<OptimizationResult> {
531 let start_time = std::time::Instant::now();
532 let population_size = 20; let parameter_count = initial_parameters.len();
534 let mut optimization_history = Vec::new();
535 let mut function_evaluations = 0;
536
537 self.initialize_population(parameter_count, population_size - 1);
539 self.population.push(Individual {
540 parameters: initial_parameters,
541 fitness: None,
542 });
543
544 for generation in 0..self.config.max_iterations {
545 self.generation = generation;
546
547 self.evaluate_population(objective_function.as_ref())?;
549 function_evaluations += self.population.len();
550
551 let best_individual = self.get_best_individual().ok_or_else(|| {
553 DeviceError::InvalidInput("No valid individuals in population".to_string())
554 })?;
555
556 let best_fitness = best_individual.fitness.unwrap_or(f64::INFINITY);
557
558 optimization_history.push(OptimizationStep {
560 iteration: generation,
561 parameters: best_individual.parameters.clone(),
562 objective_value: best_fitness,
563 gradient_norm: None,
564 learning_rate: 0.0, step_size: 0.0,
566 });
567
568 if generation > 10 {
570 let recent_best: Vec<f64> = optimization_history
571 .iter()
572 .rev()
573 .take(10)
574 .map(|step| step.objective_value)
575 .collect();
576
577 let max_recent = recent_best.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b));
578 let min_recent = recent_best.iter().fold(f64::INFINITY, |a, &b| a.min(b));
579
580 if (max_recent - min_recent).abs() < self.config.tolerance {
581 return Ok(OptimizationResult {
582 optimal_parameters: best_individual.parameters.clone(),
583 optimal_value: best_fitness,
584 iterations: generation + 1,
585 converged: true,
586 function_evaluations,
587 gradient_evaluations: 0,
588 optimization_history,
589 final_gradient_norm: None,
590 execution_time: start_time.elapsed(),
591 });
592 }
593 }
594
595 if generation < self.config.max_iterations - 1 {
597 self.evolve_population()?;
598 }
599 }
600
601 let best_individual = self.get_best_individual().ok_or_else(|| {
602 DeviceError::InvalidInput("No valid individuals in final population".to_string())
603 })?;
604
605 Ok(OptimizationResult {
606 optimal_parameters: best_individual.parameters.clone(),
607 optimal_value: best_individual.fitness.unwrap_or(f64::INFINITY),
608 iterations: self.config.max_iterations,
609 converged: false,
610 function_evaluations,
611 gradient_evaluations: 0,
612 optimization_history,
613 final_gradient_norm: None,
614 execution_time: start_time.elapsed(),
615 })
616 }
617
618 fn config(&self) -> &OptimizerConfig {
619 &self.config
620 }
621
622 fn reset(&mut self) {
623 self.population.clear();
624 self.generation = 0;
625 }
626}
627
628pub struct VQEObjectiveFunction {
630 hamiltonian: super::variational_algorithms::Hamiltonian,
631 ansatz: Box<dyn super::variational_algorithms::VariationalAnsatz>,
632 device: Arc<RwLock<dyn QuantumDevice + Send + Sync>>,
633 shots: usize,
634}
635
636impl VQEObjectiveFunction {
637 pub fn new(
638 hamiltonian: super::variational_algorithms::Hamiltonian,
639 ansatz: Box<dyn super::variational_algorithms::VariationalAnsatz>,
640 device: Arc<RwLock<dyn QuantumDevice + Send + Sync>>,
641 shots: usize,
642 ) -> Self {
643 Self {
644 hamiltonian,
645 ansatz,
646 device,
647 shots,
648 }
649 }
650}
651
652impl ObjectiveFunction for VQEObjectiveFunction {
653 fn evaluate(&self, parameters: &[f64]) -> DeviceResult<f64> {
654 let rt = tokio::runtime::Runtime::new().map_err(|e| {
656 DeviceError::ExecutionFailed(format!("Failed to create tokio runtime: {e}"))
657 })?;
658 rt.block_on(async {
659 let circuit = self.ansatz.build_circuit(parameters)?;
660 let device = self.device.read().await;
661 let result = Self::execute_circuit_helper(&*device, &circuit, self.shots).await?;
662
663 let mut energy = 0.0;
665 let total_shots = result.shots as f64;
666
667 for term in &self.hamiltonian.terms {
668 let mut term_expectation = 0.0;
669
670 for (bitstring, count) in &result.counts {
671 let probability = *count as f64 / total_shots;
672 let mut eigenvalue = term.coefficient;
673
674 for (qubit_idx, pauli_op) in &term.paulis {
675 if let Some(bit_char) = bitstring.chars().nth(*qubit_idx) {
676 let bit_value = if bit_char == '1' { -1.0 } else { 1.0 };
677
678 match pauli_op {
679 super::variational_algorithms::PauliOperator::Z => {
680 eigenvalue *= bit_value;
681 }
682 super::variational_algorithms::PauliOperator::I | _ => {
683 }
685 }
686 }
687 }
688
689 term_expectation += probability * eigenvalue;
690 }
691
692 energy += term_expectation;
693 }
694
695 Ok(energy)
696 })
697 }
698
699 fn gradient(&self, _parameters: &[f64]) -> DeviceResult<Option<Vec<f64>>> {
700 Ok(None)
702 }
703
704 fn metadata(&self) -> HashMap<String, String> {
705 let mut metadata = HashMap::new();
706 metadata.insert("objective_type".to_string(), "VQE".to_string());
707 metadata.insert(
708 "hamiltonian_terms".to_string(),
709 self.hamiltonian.terms.len().to_string(),
710 );
711 metadata.insert("shots".to_string(), self.shots.to_string());
712 metadata
713 }
714}
715
716impl VQEObjectiveFunction {
717 async fn execute_circuit_helper(
719 device: &(dyn QuantumDevice + Send + Sync),
720 circuit: &ParameterizedQuantumCircuit,
721 shots: usize,
722 ) -> DeviceResult<CircuitResult> {
723 let mut counts = std::collections::HashMap::new();
726 counts.insert("0".repeat(circuit.num_qubits()), shots / 2);
727 counts.insert("1".repeat(circuit.num_qubits()), shots / 2);
728
729 Ok(CircuitResult {
730 counts,
731 shots,
732 metadata: std::collections::HashMap::new(),
733 })
734 }
735}
736
737pub fn create_gradient_optimizer(
739 device: Arc<RwLock<dyn QuantumDevice + Send + Sync>>,
740 optimizer_type: OptimizerType,
741 learning_rate: f64,
742) -> Box<dyn QuantumOptimizer> {
743 let config = OptimizerConfig {
744 optimizer_type,
745 learning_rate,
746 ..Default::default()
747 };
748
749 Box::new(GradientBasedOptimizer::new(device, config))
750}
751
752pub fn create_gradient_free_optimizer(
754 device: Arc<RwLock<dyn QuantumDevice + Send + Sync>>,
755 max_iterations: usize,
756) -> Box<dyn QuantumOptimizer> {
757 let config = OptimizerConfig {
758 optimizer_type: OptimizerType::GradientDescent, max_iterations,
760 ..Default::default()
761 };
762
763 Box::new(GradientFreeOptimizer::new(device, config))
764}
765
766#[cfg(test)]
767mod tests {
768 use super::*;
769 use crate::test_utils::create_mock_quantum_device;
770
771 struct QuadraticObjective {
773 target: Vec<f64>,
774 }
775
776 impl ObjectiveFunction for QuadraticObjective {
777 fn evaluate(&self, parameters: &[f64]) -> DeviceResult<f64> {
778 let mut sum = 0.0;
779 for (i, ¶m) in parameters.iter().enumerate() {
780 let target = self.target.get(i).unwrap_or(&0.0);
781 sum += (param - target).powi(2);
782 }
783 Ok(sum)
784 }
785
786 fn gradient(&self, parameters: &[f64]) -> DeviceResult<Option<Vec<f64>>> {
787 let mut grad = Vec::new();
788 for (i, ¶m) in parameters.iter().enumerate() {
789 let target = self.target.get(i).unwrap_or(&0.0);
790 grad.push(2.0 * (param - target));
791 }
792 Ok(Some(grad))
793 }
794
795 fn metadata(&self) -> HashMap<String, String> {
796 let mut metadata = HashMap::new();
797 metadata.insert("objective_type".to_string(), "quadratic".to_string());
798 metadata
799 }
800 }
801
802 #[test]
803 fn test_gradient_based_optimizer() {
804 let device = create_mock_quantum_device();
805 let config = OptimizerConfig {
806 learning_rate: 0.3, max_iterations: 500,
808 tolerance: 1e-6,
809 ..Default::default()
810 };
811 let mut optimizer = GradientBasedOptimizer::new(device, config);
812
813 let objective = Box::new(QuadraticObjective {
814 target: vec![1.0, 2.0, 3.0],
815 });
816
817 let initial_params = vec![0.0, 0.0, 0.0];
818 let result = optimizer
819 .optimize(initial_params, objective)
820 .expect("Gradient-based optimization should succeed with quadratic objective");
821
822 assert!(result.optimal_value < 1.0); assert!(result.function_evaluations > 0);
824 assert!(result.gradient_evaluations > 0);
825 }
826
827 #[test]
828 fn test_gradient_free_optimizer() {
829 let device = create_mock_quantum_device();
830 let config = OptimizerConfig {
831 max_iterations: 50,
832 tolerance: 1e-3,
833 ..Default::default()
834 };
835 let mut optimizer = GradientFreeOptimizer::new(device, config);
836
837 let objective = Box::new(QuadraticObjective {
838 target: vec![1.0, 2.0],
839 });
840
841 let initial_params = vec![0.0, 0.0];
842 let result = optimizer
843 .optimize(initial_params, objective)
844 .expect("Gradient-free optimization should succeed with quadratic objective");
845
846 assert!(result.optimal_value < 5.0); assert!(result.function_evaluations > 0);
848 assert_eq!(result.gradient_evaluations, 0); }
850
851 #[test]
852 fn test_optimization_result() {
853 let result = OptimizationResult {
854 optimal_parameters: vec![1.0, 2.0],
855 optimal_value: 0.5,
856 iterations: 100,
857 converged: true,
858 function_evaluations: 200,
859 gradient_evaluations: 100,
860 optimization_history: vec![],
861 final_gradient_norm: Some(1e-6),
862 execution_time: std::time::Duration::from_millis(500),
863 };
864
865 assert_eq!(result.optimal_parameters.len(), 2);
866 assert_eq!(result.optimal_value, 0.5);
867 assert!(result.converged);
868 }
869
870 #[test]
871 fn test_optimizer_config() {
872 let config = OptimizerConfig {
873 optimizer_type: OptimizerType::Adam,
874 learning_rate: 0.001,
875 max_iterations: 500,
876 ..Default::default()
877 };
878
879 assert_eq!(config.optimizer_type, OptimizerType::Adam);
880 assert_eq!(config.learning_rate, 0.001);
881 assert_eq!(config.max_iterations, 500);
882 }
883}