1use crate::error::{ScirsError, ScirsResult};
7use scirs2_core::error_context;
8use scirs2_core::ndarray::{Array1, ArrayView1};
9use std::path::Path;
10use crate::distributed::{DistributedConfig, DistributedOptimizationContext, MPIInterface};
13use crate::gpu::{
14 acceleration::{AccelerationConfig, AccelerationManager},
15 GpuOptimizationConfig, GpuOptimizationContext,
16};
17use crate::result::OptimizeResults;
18use crate::self_tuning::{
19 AdaptationStrategy, ParameterValue, SelfTuningConfig, SelfTuningOptimizer, TunableParameter,
20};
21use crate::visualization::{
22 tracking::TrajectoryTracker, OptimizationVisualizer, VisualizationConfig,
23};
24
25#[derive(Clone)]
27pub struct UnifiedOptimizationConfig {
28 pub use_distributed: bool,
30 pub distributedconfig: Option<DistributedConfig>,
32
33 pub use_self_tuning: bool,
35 pub self_tuningconfig: Option<SelfTuningConfig>,
37
38 pub use_gpu: bool,
40 pub gpuconfig: Option<GpuOptimizationConfig>,
42 pub accelerationconfig: Option<AccelerationConfig>,
44
45 pub enable_visualization: bool,
47 pub visualizationconfig: Option<VisualizationConfig>,
49
50 pub output_directory: Option<String>,
52
53 pub max_nit: usize,
55 pub function_tolerance: f64,
57 pub gradient_tolerance: f64,
59}
60
61impl Default for UnifiedOptimizationConfig {
62 fn default() -> Self {
63 Self {
64 use_distributed: false,
65 distributedconfig: None,
66 use_self_tuning: true,
67 self_tuningconfig: Some(SelfTuningConfig::default()),
68 use_gpu: false,
69 gpuconfig: None,
70 accelerationconfig: None,
71 enable_visualization: true,
72 visualizationconfig: Some(VisualizationConfig::default()),
73 output_directory: Some("optimization_output".to_string()),
74 max_nit: 1000,
75 function_tolerance: 1e-6,
76 gradient_tolerance: 1e-6,
77 }
78 }
79}
80
81pub struct UnifiedOptimizer<M: MPIInterface> {
83 config: UnifiedOptimizationConfig,
84
85 distributed_context: Option<DistributedOptimizationContext<M>>,
87 self_tuning_optimizer: Option<SelfTuningOptimizer>,
88 gpu_context: Option<GpuOptimizationContext>,
89 acceleration_manager: Option<AccelerationManager>,
90 visualizer: Option<OptimizationVisualizer>,
91 trajectory_tracker: Option<TrajectoryTracker>,
92
93 previous_function_value: Option<f64>,
95}
96
97impl<M: MPIInterface> UnifiedOptimizer<M> {
98 pub fn new(config: UnifiedOptimizationConfig, mpi: Option<M>) -> ScirsResult<Self> {
100 let distributed_context = if config.use_distributed {
101 if let (Some(mpi_interface), Some(distconfig)) = (mpi, &config.distributedconfig) {
102 Some(DistributedOptimizationContext::new(
103 mpi_interface,
104 distconfig.clone(),
105 ))
106 } else {
107 return Err(ScirsError::InvalidInput(error_context!(
108 "MPI interface and distributed config required for distributed optimization"
109 )));
110 }
111 } else {
112 None
113 };
114
115 let self_tuning_optimizer = if config.use_self_tuning {
116 let tuningconfig = config.self_tuningconfig.clone().unwrap_or_default();
117 Some(SelfTuningOptimizer::new(tuningconfig))
118 } else {
119 None
120 };
121
122 let (gpu_context, acceleration_manager) = if config.use_gpu {
123 let gpuconfig = config.gpuconfig.clone().unwrap_or_default();
124 let gpu_ctx = GpuOptimizationContext::new(gpuconfig)?;
125
126 let accelconfig = config.accelerationconfig.clone().unwrap_or_default();
127 let accel_mgr = AccelerationManager::new(accelconfig);
128
129 (Some(gpu_ctx), Some(accel_mgr))
130 } else {
131 (None, None)
132 };
133
134 let (visualizer, trajectory_tracker) = if config.enable_visualization {
135 let visconfig = config.visualizationconfig.clone().unwrap_or_default();
136 let vis = OptimizationVisualizer::with_config(visconfig);
137 let tracker = TrajectoryTracker::new();
138 (Some(vis), Some(tracker))
139 } else {
140 (None, None)
141 };
142
143 Ok(Self {
144 config,
145 distributed_context,
146 self_tuning_optimizer,
147 gpu_context,
148 acceleration_manager,
149 visualizer,
150 trajectory_tracker,
151 previous_function_value: None,
152 })
153 }
154
155 pub fn register_tunable_parameter<T>(
157 &mut self,
158 name: &str,
159 param: TunableParameter<T>,
160 ) -> ScirsResult<()>
161 where
162 T: Clone + PartialOrd + std::fmt::Debug + Send + Sync + 'static,
163 {
164 if let Some(ref mut tuner) = self.self_tuning_optimizer {
165 tuner.register_parameter(name, param)?;
166 }
167 Ok(())
168 }
169
170 pub fn optimize<F, G>(
172 &mut self,
173 function: F,
174 gradient: Option<G>,
175 initial_guess: &Array1<f64>,
176 bounds: Option<&[(Option<f64>, Option<f64>)]>,
177 ) -> ScirsResult<UnifiedOptimizationResults>
178 where
179 F: Fn(&ArrayView1<f64>) -> f64 + Clone + Send + Sync,
180 G: Fn(&ArrayView1<f64>) -> Array1<f64> + Clone + Send + Sync,
181 {
182 let mut current_x = initial_guess.clone();
184 let mut current_f = function(¤t_x.view());
185 let mut iteration = 0;
186 let mut function_evaluations = 1;
187 let mut gradient_evaluations = 0;
188
189 if let Some(ref mut tracker) = self.trajectory_tracker {
191 tracker.record(iteration, ¤t_x.view(), current_f);
192 }
193
194 let start_time = std::time::Instant::now();
195
196 while iteration < self.config.max_nit {
198 iteration += 1;
199
200 let current_grad = if let Some(ref grad_fn) = gradient {
202 gradient_evaluations += 1;
203 grad_fn(¤t_x.view())
204 } else {
205 self.compute_numerical_gradient(&function, ¤t_x)?
207 };
208
209 let grad_norm = current_grad.iter().map(|&g| g * g).sum::<f64>().sqrt();
210
211 if grad_norm < self.config.gradient_tolerance {
213 break;
214 }
215
216 let tuning_params = {
218 if let Some(ref mut tuner) = self.self_tuning_optimizer {
219 let improvement = if iteration > 1 {
220 let prev_f = self.previous_function_value.unwrap_or(current_f);
222 if prev_f.abs() > 1e-14 {
223 (prev_f - current_f) / prev_f.abs()
224 } else {
225 prev_f - current_f
226 }
227 } else {
228 0.0
229 };
230
231 let params_changed = tuner.update_parameters(
232 iteration,
233 current_f,
234 Some(grad_norm),
235 improvement,
236 )?;
237
238 if params_changed {
239 Some(tuner.get_parameters().clone())
241 } else {
242 None
243 }
244 } else {
245 None
246 }
247 };
248
249 if let Some(params) = tuning_params {
251 self.apply_tuned_parameters(¶ms)?;
252 }
253
254 let search_direction = if self.config.use_gpu {
256 self.compute_gpu_search_direction(¤t_grad)?
257 } else {
258 self.compute_cpu_search_direction(¤t_grad)?
259 };
260
261 let step_size = if self.distributed_context.is_some() {
263 let current_x_copy = current_x.clone();
264 let search_direction_copy = search_direction.clone();
265 let mut dist_ctx = self.distributed_context.take();
267 let result = if let Some(ref mut ctx) = dist_ctx {
268 self.distributed_line_search(
269 ctx,
270 &function,
271 ¤t_x_copy,
272 &search_direction_copy,
273 )
274 } else {
275 unreachable!()
276 };
277 self.distributed_context = dist_ctx;
279 result?
280 } else {
281 self.standard_line_search(&function, ¤t_x, &search_direction)?
282 };
283
284 for i in 0..current_x.len() {
286 current_x[i] += step_size * search_direction[i];
287 }
288
289 if let Some(bounds) = bounds {
291 self.apply_bounds(&mut current_x, bounds);
292 }
293
294 current_f = function(¤t_x.view());
296 function_evaluations += 1;
297
298 if let Some(ref mut tracker) = self.trajectory_tracker {
300 tracker.record(iteration, ¤t_x.view(), current_f);
301 tracker.record_gradient_norm(grad_norm);
302 tracker.record_step_size(step_size);
303 }
304
305 if let Some(prev_f) = self.previous_function_value {
307 let abs_improvement = (prev_f - current_f).abs();
308 let rel_improvement = if prev_f.abs() > 1e-14 {
309 abs_improvement / prev_f.abs()
310 } else {
311 abs_improvement
312 };
313
314 if abs_improvement < self.config.function_tolerance
316 || rel_improvement < self.config.function_tolerance
317 {
318 break;
319 }
320 }
321
322 self.previous_function_value = Some(current_f);
324 }
325
326 let total_time = start_time.elapsed().as_secs_f64();
327
328 let success = iteration < self.config.max_nit;
330 let message = if success {
331 "Optimization completed successfully".to_string()
332 } else {
333 "Maximum iterations reached".to_string()
334 };
335
336 let visualization_paths = if let (Some(ref visualizer), Some(ref tracker)) =
338 (&self.visualizer, &self.trajectory_tracker)
339 {
340 self.generate_visualization(visualizer, tracker.trajectory())?
341 } else {
342 Vec::new()
343 };
344
345 let performance_report = self.generate_performance_report(
347 total_time,
348 function_evaluations,
349 gradient_evaluations,
350 )?;
351
352 Ok(UnifiedOptimizationResults {
353 base_result: OptimizeResults::<f64> {
354 x: current_x,
355 fun: current_f,
356 success,
357 message,
358 nit: iteration,
359 nfev: function_evaluations,
360 njev: gradient_evaluations,
361 ..OptimizeResults::<f64>::default()
362 },
363 visualization_paths,
364 performance_report,
365 self_tuning_report: self
366 .self_tuning_optimizer
367 .as_ref()
368 .map(|t| t.generate_report()),
369 distributed_stats: self.distributed_context.as_ref().map(|d| d.stats().clone()),
370 })
371 }
372
373 fn compute_numerical_gradient<F>(
375 &self,
376 function: &F,
377 x: &Array1<f64>,
378 ) -> ScirsResult<Array1<f64>>
379 where
380 F: Fn(&ArrayView1<f64>) -> f64,
381 {
382 let n = x.len();
383 let mut gradient = Array1::zeros(n);
384 let h = 1e-8;
385
386 if self.config.use_gpu {
387 if let Some(ref gpu_ctx) = self.gpu_context {
389 return gpu_ctx.compute_gradient_finite_diff(function, x, h);
390 }
391 }
392
393 for i in 0..n {
395 let mut x_plus = x.clone();
396 let mut x_minus = x.clone();
397 x_plus[i] += h;
398 x_minus[i] -= h;
399
400 gradient[i] = (function(&x_plus.view()) - function(&x_minus.view())) / (2.0 * h);
401 }
402
403 Ok(gradient)
404 }
405
406 fn compute_gpu_search_direction(&self, gradient: &Array1<f64>) -> ScirsResult<Array1<f64>> {
408 if let Some(ref gpu_ctx) = self.gpu_context {
409 gpu_ctx.compute_search_direction(gradient)
411 } else {
412 Ok(-gradient.clone())
414 }
415 }
416
417 fn compute_cpu_search_direction(&self, gradient: &Array1<f64>) -> ScirsResult<Array1<f64>> {
419 Ok(-gradient.clone())
421 }
422
423 fn distributed_line_search<F>(
425 &mut self,
426 _dist_ctx: &mut DistributedOptimizationContext<M>,
427 function: &F,
428 x: &Array1<f64>,
429 direction: &Array1<f64>,
430 ) -> ScirsResult<f64>
431 where
432 F: Fn(&ArrayView1<f64>) -> f64 + Clone + Send + Sync,
433 {
434 let _step_sizes = Array1::from(vec![0.001, 0.01, 0.1, 1.0, 10.0]);
436
437 self.standard_line_search(function, x, direction)
440 }
441
442 fn standard_line_search<F>(
444 &self,
445 function: &F,
446 x: &Array1<f64>,
447 direction: &Array1<f64>,
448 ) -> ScirsResult<f64>
449 where
450 F: Fn(&ArrayView1<f64>) -> f64,
451 {
452 let mut alpha = 1.0;
454 let c1 = 1e-4;
455 let rho = 0.5;
456
457 let f0 = function(&x.view());
458 let grad_dot_dir = -direction.iter().map(|&d| d * d).sum::<f64>(); for _ in 0..20 {
461 let mut x_new = x.clone();
462 for i in 0..x.len() {
463 x_new[i] += alpha * direction[i];
464 }
465
466 let f_new = function(&x_new.view());
467
468 if f_new <= f0 + c1 * alpha * grad_dot_dir {
469 return Ok(alpha);
470 }
471
472 alpha *= rho;
473 }
474
475 Ok(alpha)
476 }
477
478 fn apply_bounds(&self, x: &mut Array1<f64>, bounds: &[(Option<f64>, Option<f64>)]) {
480 for (i, &mut ref mut xi) in x.iter_mut().enumerate() {
481 if i < bounds.len() {
482 if let Some(lower) = bounds[i].0 {
483 if *xi < lower {
484 *xi = lower;
485 }
486 }
487 if let Some(upper) = bounds[i].1 {
488 if *xi > upper {
489 *xi = upper;
490 }
491 }
492 }
493 }
494 }
495
496 fn apply_tuned_parameters(
498 &mut self,
499 parameters: &std::collections::HashMap<String, ParameterValue>,
500 ) -> ScirsResult<()> {
501 for (name, value) in parameters {
502 match name.as_str() {
503 "learning_rate" | "step_size" => {
504 if let Some(f_val) = value.as_f64() {
506 self.update_algorithm_parameter("step_size", f_val)?;
508 }
509 }
510 "tolerance" => {
511 if let Some(f_val) = value.as_f64() {
512 self.update_algorithm_parameter("tolerance", f_val)?;
514 }
515 }
516 _ => {
517 }
519 }
520 }
521 Ok(())
522 }
523
524 fn update_algorithm_parameter(&mut self, name: &str, value: f64) -> ScirsResult<()> {
526 Ok(())
529 }
530
531 fn generate_visualization(
533 &self,
534 visualizer: &OptimizationVisualizer,
535 trajectory: &crate::visualization::OptimizationTrajectory,
536 ) -> ScirsResult<Vec<String>> {
537 let mut paths = Vec::new();
538
539 if let Some(ref output_dir) = self.config.output_directory {
540 let output_path = Path::new(output_dir);
541 std::fs::create_dir_all(output_path)?;
542
543 let convergence_path = output_path.join("convergence.svg");
545 visualizer.plot_convergence(trajectory, &convergence_path)?;
546 paths.push(convergence_path.to_string_lossy().to_string());
547
548 if !trajectory.parameters.is_empty() && trajectory.parameters[0].len() == 2 {
550 let trajectory_path = output_path.join("trajectory.svg");
551 visualizer.plot_parameter_trajectory(trajectory, &trajectory_path)?;
552 paths.push(trajectory_path.to_string_lossy().to_string());
553 }
554
555 visualizer.create_optimization_report(trajectory, output_path)?;
557 paths.push(
558 output_path
559 .join("summary.html")
560 .to_string_lossy()
561 .to_string(),
562 );
563 }
564
565 Ok(paths)
566 }
567
568 fn generate_performance_report(
570 &self,
571 total_time: f64,
572 function_evaluations: usize,
573 gradient_evaluations: usize,
574 ) -> ScirsResult<String> {
575 let mut report = String::from("Unified Optimization Performance Report\n");
576 report.push_str("=========================================\n\n");
577
578 report.push_str(&format!("Total Time: {:.3}s\n", total_time));
579 report.push_str(&format!("Function Evaluations: {}\n", function_evaluations));
580 report.push_str(&format!("Gradient Evaluations: {}\n", gradient_evaluations));
581
582 if total_time > 0.0 {
583 report.push_str(&format!(
584 "Function Evaluations per Second: {:.2}\n",
585 function_evaluations as f64 / total_time
586 ));
587 }
588
589 if let Some(ref dist_ctx) = self.distributed_context {
591 report.push_str("\nDistributed Performance:\n");
592 report.push_str(&dist_ctx.stats().generate_report());
593 }
594
595 if let Some(ref accel_mgr) = self.acceleration_manager {
597 report.push_str("\nGPU Acceleration Performance:\n");
598 report.push_str("GPU acceleration metrics available\n");
599 }
601
602 Ok(report)
603 }
604}
605
606#[derive(Debug, Clone)]
608pub struct UnifiedOptimizationResults {
609 pub base_result: OptimizeResults<f64>,
611 pub visualization_paths: Vec<String>,
613 pub performance_report: String,
615 pub self_tuning_report: Option<String>,
617 pub distributed_stats: Option<crate::distributed::DistributedStats>,
619}
620
621impl UnifiedOptimizationResults {
622 pub fn x(&self) -> &Array1<f64> {
624 &self.base_result.x
625 }
626
627 pub fn fun(&self) -> f64 {
629 self.base_result.fun
630 }
631
632 pub fn success(&self) -> bool {
634 self.base_result.success
635 }
636
637 pub fn message(&self) -> &str {
639 &self.base_result.message
640 }
641
642 pub fn iterations(&self) -> usize {
644 self.base_result.nit
645 }
646
647 pub fn nit(&self) -> usize {
649 self.base_result.nit
650 }
651
652 pub fn print_summary(&self) {
654 println!("Unified Optimization Results");
655 println!("============================");
656 println!("Success: {}", self.success());
657 println!("Final function value: {:.6e}", self.fun());
658 println!("Iterations: {}", self.nit());
659 println!("Function evaluations: {}", self.base_result.nfev);
660
661 if self.base_result.njev > 0 {
662 println!("Gradient evaluations: {}", self.base_result.njev);
663 }
664
665 if !self.visualization_paths.is_empty() {
666 println!("\nGenerated visualizations:");
667 for path in &self.visualization_paths {
668 println!(" {}", path);
669 }
670 }
671
672 if let Some(ref self_tuning) = self.self_tuning_report {
673 println!("\nSelf-Tuning Report:");
674 println!("{}", self_tuning);
675 }
676
677 if let Some(ref dist_stats) = self.distributed_stats {
678 println!("\nDistributed Performance:");
679 println!("{}", dist_stats.generate_report());
680 }
681
682 println!("\nPerformance Report:");
683 println!("{}", self.performance_report);
684 }
685}
686
687pub mod presets {
689 use super::*;
690
691 pub fn distributed_gpuconfig(_numprocesses: usize) -> UnifiedOptimizationConfig {
693 UnifiedOptimizationConfig {
694 use_distributed: true,
695 distributedconfig: Some(crate::distributed::DistributedConfig {
696 distribution_strategy: crate::distributed::DistributionStrategy::DataParallel,
697 load_balancing: crate::distributed::LoadBalancingConfig::default(),
698 communication: crate::distributed::CommunicationConfig::default(),
699 fault_tolerance: crate::distributed::FaultToleranceConfig::default(),
700 }),
701 use_gpu: true,
702 gpuconfig: Some(GpuOptimizationConfig::default()),
703 accelerationconfig: Some(AccelerationConfig::default()),
704 use_self_tuning: true,
705 self_tuningconfig: Some(SelfTuningConfig {
706 adaptation_strategy: AdaptationStrategy::Hybrid,
707 update_frequency: 25,
708 learning_rate: 0.1,
709 memory_window: 100,
710 use_bayesian_tuning: true,
711 exploration_factor: 0.15,
712 }),
713 enable_visualization: true,
714 visualizationconfig: Some(VisualizationConfig::default()),
715 output_directory: Some("distributed_gpu_optimization".to_string()),
716 max_nit: 2000,
717 function_tolerance: 1e-8,
718 gradient_tolerance: 1e-8,
719 }
720 }
721
722 pub fn large_scaleconfig() -> UnifiedOptimizationConfig {
724 UnifiedOptimizationConfig {
725 use_distributed: false,
726 distributedconfig: None,
727 use_gpu: true,
728 gpuconfig: Some(GpuOptimizationConfig::default()),
729 accelerationconfig: Some(AccelerationConfig::default()),
730 use_self_tuning: true,
731 self_tuningconfig: Some(SelfTuningConfig {
732 adaptation_strategy: AdaptationStrategy::PerformanceBased,
733 update_frequency: 50,
734 learning_rate: 0.05,
735 memory_window: 200,
736 use_bayesian_tuning: false,
737 exploration_factor: 0.1,
738 }),
739 enable_visualization: true,
740 visualizationconfig: Some(VisualizationConfig::default()),
741 output_directory: Some("large_scale_optimization".to_string()),
742 max_nit: 5000,
743 function_tolerance: 1e-6,
744 gradient_tolerance: 1e-6,
745 }
746 }
747
748 pub fn interactiveconfig() -> UnifiedOptimizationConfig {
750 UnifiedOptimizationConfig {
751 use_distributed: false,
752 distributedconfig: None,
753 use_gpu: false,
754 gpuconfig: None,
755 accelerationconfig: None,
756 use_self_tuning: true,
757 self_tuningconfig: Some(SelfTuningConfig {
758 adaptation_strategy: AdaptationStrategy::ConvergenceBased,
759 update_frequency: 10,
760 learning_rate: 0.2,
761 memory_window: 50,
762 use_bayesian_tuning: true,
763 exploration_factor: 0.2,
764 }),
765 enable_visualization: true,
766 visualizationconfig: Some(VisualizationConfig {
767 format: crate::visualization::OutputFormat::Html,
768 width: 1200,
769 height: 800,
770 title: Some("Interactive Optimization".to_string()),
771 show_grid: true,
772 log_scale_y: false,
773 color_scheme: crate::visualization::ColorScheme::Scientific,
774 show_legend: true,
775 custom_style: None,
776 }),
777 output_directory: Some("interactive_optimization".to_string()),
778 max_nit: 500,
779 function_tolerance: 1e-4,
780 gradient_tolerance: 1e-4,
781 }
782 }
783}
784
785#[cfg(test)]
786mod tests {
787 use super::*;
788 use scirs2_core::ndarray::array;
789
790 #[test]
791 fn test_unifiedconfig_creation() {
792 let config = UnifiedOptimizationConfig::default();
793 assert!(!config.use_distributed);
794 assert!(config.use_self_tuning);
795 assert!(!config.use_gpu);
796 assert!(config.enable_visualization);
797 }
798
799 #[test]
800 fn test_presetconfigs() {
801 let large_scale = presets::large_scaleconfig();
802 assert!(large_scale.use_gpu);
803 assert!(large_scale.use_self_tuning);
804 assert!(!large_scale.use_distributed);
805
806 let interactive = presets::interactiveconfig();
807 assert!(!interactive.use_gpu);
808 assert!(interactive.use_self_tuning);
809 assert!(interactive.enable_visualization);
810 }
811
812 #[test]
813 fn test_bounds_application() {
814 let config = UnifiedOptimizationConfig::default();
816 let _optimizer: Result<UnifiedOptimizer<crate::distributed::MockMPI>, _> =
817 UnifiedOptimizer::new(config, None);
818
819 }
822
823 #[test]
824 fn test_rosenbrock_optimization() {
825 let config = presets::interactiveconfig();
827
828 let rosenbrock = |x: &ArrayView1<f64>| -> f64 {
829 let x0 = x[0];
830 let x1 = x[1];
831 (1.0 - x0).powi(2) + 100.0 * (x1 - x0.powi(2)).powi(2)
832 };
833
834 let initial_guess = array![-1.0, 1.0];
835
836 let mut optimizer: UnifiedOptimizer<crate::distributed::MockMPI> =
838 UnifiedOptimizer::new(config, None).unwrap();
839
840 optimizer
842 .register_tunable_parameter("step_size", TunableParameter::new(0.01, 0.001, 0.1))
843 .unwrap();
844
845 assert!(true);
848 }
849}