1use crate::error::{ScirsError, ScirsResult};
7use ndarray::{Array1, ArrayView1};
8use scirs2_core::error_context;
9use std::path::Path;
10use crate::distributed::{DistributedConfig, DistributedOptimizationContext, MPIInterface};
13use crate::gpu::{
14 acceleration::{AccelerationConfig, AccelerationManager},
15 GpuOptimizationConfig, GpuOptimizationContext,
16};
17use crate::result::OptimizeResults;
18use crate::self_tuning::{
19 AdaptationStrategy, ParameterValue, SelfTuningConfig, SelfTuningOptimizer, TunableParameter,
20};
21use crate::visualization::{
22 tracking::TrajectoryTracker, OptimizationVisualizer, VisualizationConfig,
23};
24
25#[derive(Clone)]
27pub struct UnifiedOptimizationConfig {
28 pub use_distributed: bool,
30 pub distributedconfig: Option<DistributedConfig>,
32
33 pub use_self_tuning: bool,
35 pub self_tuningconfig: Option<SelfTuningConfig>,
37
38 pub use_gpu: bool,
40 pub gpuconfig: Option<GpuOptimizationConfig>,
42 pub accelerationconfig: Option<AccelerationConfig>,
44
45 pub enable_visualization: bool,
47 pub visualizationconfig: Option<VisualizationConfig>,
49
50 pub output_directory: Option<String>,
52
53 pub max_nit: usize,
55 pub function_tolerance: f64,
57 pub gradient_tolerance: f64,
59}
60
61impl Default for UnifiedOptimizationConfig {
62 fn default() -> Self {
63 Self {
64 use_distributed: false,
65 distributedconfig: None,
66 use_self_tuning: true,
67 self_tuningconfig: Some(SelfTuningConfig::default()),
68 use_gpu: false,
69 gpuconfig: None,
70 accelerationconfig: None,
71 enable_visualization: true,
72 visualizationconfig: Some(VisualizationConfig::default()),
73 output_directory: Some("optimization_output".to_string()),
74 max_nit: 1000,
75 function_tolerance: 1e-6,
76 gradient_tolerance: 1e-6,
77 }
78 }
79}
80
81pub struct UnifiedOptimizer<M: MPIInterface> {
83 config: UnifiedOptimizationConfig,
84
85 distributed_context: Option<DistributedOptimizationContext<M>>,
87 self_tuning_optimizer: Option<SelfTuningOptimizer>,
88 gpu_context: Option<GpuOptimizationContext>,
89 acceleration_manager: Option<AccelerationManager>,
90 visualizer: Option<OptimizationVisualizer>,
91 trajectory_tracker: Option<TrajectoryTracker>,
92
93 previous_function_value: Option<f64>,
95}
96
97impl<M: MPIInterface> UnifiedOptimizer<M> {
98 pub fn new(config: UnifiedOptimizationConfig, mpi: Option<M>) -> ScirsResult<Self> {
100 let distributed_context = if config.use_distributed {
101 if let (Some(mpi_interface), Some(distconfig)) = (mpi, &config.distributedconfig) {
102 Some(DistributedOptimizationContext::new(
103 mpi_interface,
104 distconfig.clone(),
105 ))
106 } else {
107 return Err(ScirsError::InvalidInput(error_context!(
108 "MPI interface and distributed config required for distributed optimization"
109 )));
110 }
111 } else {
112 None
113 };
114
115 let self_tuning_optimizer = if config.use_self_tuning {
116 let tuningconfig = config
117 .self_tuningconfig
118 .clone()
119 .unwrap_or_else(SelfTuningConfig::default);
120 Some(SelfTuningOptimizer::new(tuningconfig))
121 } else {
122 None
123 };
124
125 let (gpu_context, acceleration_manager) = if config.use_gpu {
126 let gpuconfig = config
127 .gpuconfig
128 .clone()
129 .unwrap_or_else(GpuOptimizationConfig::default);
130 let gpu_ctx = GpuOptimizationContext::new(gpuconfig)?;
131
132 let accelconfig = config
133 .accelerationconfig
134 .clone()
135 .unwrap_or_else(AccelerationConfig::default);
136 let accel_mgr = AccelerationManager::new(accelconfig);
137
138 (Some(gpu_ctx), Some(accel_mgr))
139 } else {
140 (None, None)
141 };
142
143 let (visualizer, trajectory_tracker) = if config.enable_visualization {
144 let visconfig = config
145 .visualizationconfig
146 .clone()
147 .unwrap_or_else(VisualizationConfig::default);
148 let vis = OptimizationVisualizer::with_config(visconfig);
149 let tracker = TrajectoryTracker::new();
150 (Some(vis), Some(tracker))
151 } else {
152 (None, None)
153 };
154
155 Ok(Self {
156 config,
157 distributed_context,
158 self_tuning_optimizer,
159 gpu_context,
160 acceleration_manager,
161 visualizer,
162 trajectory_tracker,
163 previous_function_value: None,
164 })
165 }
166
167 pub fn register_tunable_parameter<T>(
169 &mut self,
170 name: &str,
171 param: TunableParameter<T>,
172 ) -> ScirsResult<()>
173 where
174 T: Clone + PartialOrd + std::fmt::Debug + Send + Sync + 'static,
175 {
176 if let Some(ref mut tuner) = self.self_tuning_optimizer {
177 tuner.register_parameter(name, param)?;
178 }
179 Ok(())
180 }
181
182 pub fn optimize<F, G>(
184 &mut self,
185 function: F,
186 gradient: Option<G>,
187 initial_guess: &Array1<f64>,
188 bounds: Option<&[(Option<f64>, Option<f64>)]>,
189 ) -> ScirsResult<UnifiedOptimizationResults>
190 where
191 F: Fn(&ArrayView1<f64>) -> f64 + Clone + Send + Sync,
192 G: Fn(&ArrayView1<f64>) -> Array1<f64> + Clone + Send + Sync,
193 {
194 let mut current_x = initial_guess.clone();
196 let mut current_f = function(¤t_x.view());
197 let mut iteration = 0;
198 let mut function_evaluations = 1;
199 let mut gradient_evaluations = 0;
200
201 if let Some(ref mut tracker) = self.trajectory_tracker {
203 tracker.record(iteration, ¤t_x.view(), current_f);
204 }
205
206 let start_time = std::time::Instant::now();
207
208 while iteration < self.config.max_nit {
210 iteration += 1;
211
212 let current_grad = if let Some(ref grad_fn) = gradient {
214 gradient_evaluations += 1;
215 grad_fn(¤t_x.view())
216 } else {
217 self.compute_numerical_gradient(&function, ¤t_x)?
219 };
220
221 let grad_norm = current_grad.iter().map(|&g| g * g).sum::<f64>().sqrt();
222
223 if grad_norm < self.config.gradient_tolerance {
225 break;
226 }
227
228 let tuning_params = {
230 if let Some(ref mut tuner) = self.self_tuning_optimizer {
231 let improvement = if iteration > 1 {
232 let prev_f = self.previous_function_value.unwrap_or(current_f);
234 if prev_f.abs() > 1e-14 {
235 (prev_f - current_f) / prev_f.abs()
236 } else {
237 prev_f - current_f
238 }
239 } else {
240 0.0
241 };
242
243 let params_changed = tuner.update_parameters(
244 iteration,
245 current_f,
246 Some(grad_norm),
247 improvement,
248 )?;
249
250 if params_changed {
251 Some(tuner.get_parameters().clone())
253 } else {
254 None
255 }
256 } else {
257 None
258 }
259 };
260
261 if let Some(params) = tuning_params {
263 self.apply_tuned_parameters(¶ms)?;
264 }
265
266 let search_direction = if self.config.use_gpu {
268 self.compute_gpu_search_direction(¤t_grad)?
269 } else {
270 self.compute_cpu_search_direction(¤t_grad)?
271 };
272
273 let step_size = if self.distributed_context.is_some() {
275 let current_x_copy = current_x.clone();
276 let search_direction_copy = search_direction.clone();
277 let mut dist_ctx = self.distributed_context.take();
279 let result = if let Some(ref mut ctx) = dist_ctx {
280 self.distributed_line_search(
281 ctx,
282 &function,
283 ¤t_x_copy,
284 &search_direction_copy,
285 )
286 } else {
287 unreachable!()
288 };
289 self.distributed_context = dist_ctx;
291 result?
292 } else {
293 self.standard_line_search(&function, ¤t_x, &search_direction)?
294 };
295
296 for i in 0..current_x.len() {
298 current_x[i] += step_size * search_direction[i];
299 }
300
301 if let Some(bounds) = bounds {
303 self.apply_bounds(&mut current_x, bounds);
304 }
305
306 current_f = function(¤t_x.view());
308 function_evaluations += 1;
309
310 if let Some(ref mut tracker) = self.trajectory_tracker {
312 tracker.record(iteration, ¤t_x.view(), current_f);
313 tracker.record_gradient_norm(grad_norm);
314 tracker.record_step_size(step_size);
315 }
316
317 if let Some(prev_f) = self.previous_function_value {
319 let abs_improvement = (prev_f - current_f).abs();
320 let rel_improvement = if prev_f.abs() > 1e-14 {
321 abs_improvement / prev_f.abs()
322 } else {
323 abs_improvement
324 };
325
326 if abs_improvement < self.config.function_tolerance
328 || rel_improvement < self.config.function_tolerance
329 {
330 break;
331 }
332 }
333
334 self.previous_function_value = Some(current_f);
336 }
337
338 let total_time = start_time.elapsed().as_secs_f64();
339
340 let success = iteration < self.config.max_nit;
342 let message = if success {
343 "Optimization completed successfully".to_string()
344 } else {
345 "Maximum iterations reached".to_string()
346 };
347
348 let visualization_paths = if let (Some(ref visualizer), Some(ref tracker)) =
350 (&self.visualizer, &self.trajectory_tracker)
351 {
352 self.generate_visualization(visualizer, tracker.trajectory())?
353 } else {
354 Vec::new()
355 };
356
357 let performance_report = self.generate_performance_report(
359 total_time,
360 function_evaluations,
361 gradient_evaluations,
362 )?;
363
364 Ok(UnifiedOptimizationResults {
365 base_result: OptimizeResults::<f64> {
366 x: current_x,
367 fun: current_f,
368 success,
369 message,
370 nit: iteration,
371 nfev: function_evaluations,
372 njev: gradient_evaluations,
373 ..OptimizeResults::<f64>::default()
374 },
375 visualization_paths,
376 performance_report,
377 self_tuning_report: self
378 .self_tuning_optimizer
379 .as_ref()
380 .map(|t| t.generate_report()),
381 distributed_stats: self.distributed_context.as_ref().map(|d| d.stats().clone()),
382 })
383 }
384
385 fn compute_numerical_gradient<F>(
387 &self,
388 function: &F,
389 x: &Array1<f64>,
390 ) -> ScirsResult<Array1<f64>>
391 where
392 F: Fn(&ArrayView1<f64>) -> f64,
393 {
394 let n = x.len();
395 let mut gradient = Array1::zeros(n);
396 let h = 1e-8;
397
398 if self.config.use_gpu {
399 if let Some(ref gpu_ctx) = self.gpu_context {
401 return gpu_ctx.compute_gradient_finite_diff(function, x, h);
402 }
403 }
404
405 for i in 0..n {
407 let mut x_plus = x.clone();
408 let mut x_minus = x.clone();
409 x_plus[i] += h;
410 x_minus[i] -= h;
411
412 gradient[i] = (function(&x_plus.view()) - function(&x_minus.view())) / (2.0 * h);
413 }
414
415 Ok(gradient)
416 }
417
418 fn compute_gpu_search_direction(&self, gradient: &Array1<f64>) -> ScirsResult<Array1<f64>> {
420 if let Some(ref gpu_ctx) = self.gpu_context {
421 gpu_ctx.compute_search_direction(gradient)
423 } else {
424 Ok(-gradient.clone())
426 }
427 }
428
429 fn compute_cpu_search_direction(&self, gradient: &Array1<f64>) -> ScirsResult<Array1<f64>> {
431 Ok(-gradient.clone())
433 }
434
435 fn distributed_line_search<F>(
437 &mut self,
438 _dist_ctx: &mut DistributedOptimizationContext<M>,
439 function: &F,
440 x: &Array1<f64>,
441 direction: &Array1<f64>,
442 ) -> ScirsResult<f64>
443 where
444 F: Fn(&ArrayView1<f64>) -> f64 + Clone + Send + Sync,
445 {
446 let _step_sizes = Array1::from(vec![0.001, 0.01, 0.1, 1.0, 10.0]);
448
449 self.standard_line_search(function, x, direction)
452 }
453
454 fn standard_line_search<F>(
456 &self,
457 function: &F,
458 x: &Array1<f64>,
459 direction: &Array1<f64>,
460 ) -> ScirsResult<f64>
461 where
462 F: Fn(&ArrayView1<f64>) -> f64,
463 {
464 let mut alpha = 1.0;
466 let c1 = 1e-4;
467 let rho = 0.5;
468
469 let f0 = function(&x.view());
470 let grad_dot_dir = -direction.iter().map(|&d| d * d).sum::<f64>(); for _ in 0..20 {
473 let mut x_new = x.clone();
474 for i in 0..x.len() {
475 x_new[i] += alpha * direction[i];
476 }
477
478 let f_new = function(&x_new.view());
479
480 if f_new <= f0 + c1 * alpha * grad_dot_dir {
481 return Ok(alpha);
482 }
483
484 alpha *= rho;
485 }
486
487 Ok(alpha)
488 }
489
490 fn apply_bounds(&self, x: &mut Array1<f64>, bounds: &[(Option<f64>, Option<f64>)]) {
492 for (i, &mut ref mut xi) in x.iter_mut().enumerate() {
493 if i < bounds.len() {
494 if let Some(lower) = bounds[i].0 {
495 if *xi < lower {
496 *xi = lower;
497 }
498 }
499 if let Some(upper) = bounds[i].1 {
500 if *xi > upper {
501 *xi = upper;
502 }
503 }
504 }
505 }
506 }
507
508 fn apply_tuned_parameters(
510 &mut self,
511 parameters: &std::collections::HashMap<String, ParameterValue>,
512 ) -> ScirsResult<()> {
513 for (name, value) in parameters {
514 match name.as_str() {
515 "learning_rate" | "step_size" => {
516 if let Some(f_val) = value.as_f64() {
518 self.update_algorithm_parameter("step_size", f_val)?;
520 }
521 }
522 "tolerance" => {
523 if let Some(f_val) = value.as_f64() {
524 self.update_algorithm_parameter("tolerance", f_val)?;
526 }
527 }
528 _ => {
529 }
531 }
532 }
533 Ok(())
534 }
535
536 fn update_algorithm_parameter(&mut self, name: &str, value: f64) -> ScirsResult<()> {
538 Ok(())
541 }
542
543 fn generate_visualization(
545 &self,
546 visualizer: &OptimizationVisualizer,
547 trajectory: &crate::visualization::OptimizationTrajectory,
548 ) -> ScirsResult<Vec<String>> {
549 let mut paths = Vec::new();
550
551 if let Some(ref output_dir) = self.config.output_directory {
552 let output_path = Path::new(output_dir);
553 std::fs::create_dir_all(output_path)?;
554
555 let convergence_path = output_path.join("convergence.svg");
557 visualizer.plot_convergence(trajectory, &convergence_path)?;
558 paths.push(convergence_path.to_string_lossy().to_string());
559
560 if !trajectory.parameters.is_empty() && trajectory.parameters[0].len() == 2 {
562 let trajectory_path = output_path.join("trajectory.svg");
563 visualizer.plot_parameter_trajectory(trajectory, &trajectory_path)?;
564 paths.push(trajectory_path.to_string_lossy().to_string());
565 }
566
567 visualizer.create_optimization_report(trajectory, output_path)?;
569 paths.push(
570 output_path
571 .join("summary.html")
572 .to_string_lossy()
573 .to_string(),
574 );
575 }
576
577 Ok(paths)
578 }
579
580 fn generate_performance_report(
582 &self,
583 total_time: f64,
584 function_evaluations: usize,
585 gradient_evaluations: usize,
586 ) -> ScirsResult<String> {
587 let mut report = String::from("Unified Optimization Performance Report\n");
588 report.push_str("=========================================\n\n");
589
590 report.push_str(&format!("Total Time: {:.3}s\n", total_time));
591 report.push_str(&format!("Function Evaluations: {}\n", function_evaluations));
592 report.push_str(&format!("Gradient Evaluations: {}\n", gradient_evaluations));
593
594 if total_time > 0.0 {
595 report.push_str(&format!(
596 "Function Evaluations per Second: {:.2}\n",
597 function_evaluations as f64 / total_time
598 ));
599 }
600
601 if let Some(ref dist_ctx) = self.distributed_context {
603 report.push_str("\nDistributed Performance:\n");
604 report.push_str(&dist_ctx.stats().generate_report());
605 }
606
607 if let Some(ref accel_mgr) = self.acceleration_manager {
609 report.push_str("\nGPU Acceleration Performance:\n");
610 report.push_str("GPU acceleration metrics available\n");
611 }
613
614 Ok(report)
615 }
616}
617
618#[derive(Debug, Clone)]
620pub struct UnifiedOptimizationResults {
621 pub base_result: OptimizeResults<f64>,
623 pub visualization_paths: Vec<String>,
625 pub performance_report: String,
627 pub self_tuning_report: Option<String>,
629 pub distributed_stats: Option<crate::distributed::DistributedStats>,
631}
632
633impl UnifiedOptimizationResults {
634 pub fn x(&self) -> &Array1<f64> {
636 &self.base_result.x
637 }
638
639 pub fn fun(&self) -> f64 {
641 self.base_result.fun
642 }
643
644 pub fn success(&self) -> bool {
646 self.base_result.success
647 }
648
649 pub fn message(&self) -> &str {
651 &self.base_result.message
652 }
653
654 pub fn iterations(&self) -> usize {
656 self.base_result.nit
657 }
658
659 pub fn nit(&self) -> usize {
661 self.base_result.nit
662 }
663
664 pub fn print_summary(&self) {
666 println!("Unified Optimization Results");
667 println!("============================");
668 println!("Success: {}", self.success());
669 println!("Final function value: {:.6e}", self.fun());
670 println!("Iterations: {}", self.nit());
671 println!("Function evaluations: {}", self.base_result.nfev);
672
673 if self.base_result.njev > 0 {
674 println!("Gradient evaluations: {}", self.base_result.njev);
675 }
676
677 if !self.visualization_paths.is_empty() {
678 println!("\nGenerated visualizations:");
679 for path in &self.visualization_paths {
680 println!(" {}", path);
681 }
682 }
683
684 if let Some(ref self_tuning) = self.self_tuning_report {
685 println!("\nSelf-Tuning Report:");
686 println!("{}", self_tuning);
687 }
688
689 if let Some(ref dist_stats) = self.distributed_stats {
690 println!("\nDistributed Performance:");
691 println!("{}", dist_stats.generate_report());
692 }
693
694 println!("\nPerformance Report:");
695 println!("{}", self.performance_report);
696 }
697}
698
699pub mod presets {
701 use super::*;
702
703 pub fn distributed_gpuconfig(_numprocesses: usize) -> UnifiedOptimizationConfig {
705 UnifiedOptimizationConfig {
706 use_distributed: true,
707 distributedconfig: Some(crate::distributed::DistributedConfig {
708 distribution_strategy: crate::distributed::DistributionStrategy::DataParallel,
709 load_balancing: crate::distributed::LoadBalancingConfig::default(),
710 communication: crate::distributed::CommunicationConfig::default(),
711 fault_tolerance: crate::distributed::FaultToleranceConfig::default(),
712 }),
713 use_gpu: true,
714 gpuconfig: Some(GpuOptimizationConfig::default()),
715 accelerationconfig: Some(AccelerationConfig::default()),
716 use_self_tuning: true,
717 self_tuningconfig: Some(SelfTuningConfig {
718 adaptation_strategy: AdaptationStrategy::Hybrid,
719 update_frequency: 25,
720 learning_rate: 0.1,
721 memory_window: 100,
722 use_bayesian_tuning: true,
723 exploration_factor: 0.15,
724 }),
725 enable_visualization: true,
726 visualizationconfig: Some(VisualizationConfig::default()),
727 output_directory: Some("distributed_gpu_optimization".to_string()),
728 max_nit: 2000,
729 function_tolerance: 1e-8,
730 gradient_tolerance: 1e-8,
731 }
732 }
733
734 pub fn large_scaleconfig() -> UnifiedOptimizationConfig {
736 UnifiedOptimizationConfig {
737 use_distributed: false,
738 distributedconfig: None,
739 use_gpu: true,
740 gpuconfig: Some(GpuOptimizationConfig::default()),
741 accelerationconfig: Some(AccelerationConfig::default()),
742 use_self_tuning: true,
743 self_tuningconfig: Some(SelfTuningConfig {
744 adaptation_strategy: AdaptationStrategy::PerformanceBased,
745 update_frequency: 50,
746 learning_rate: 0.05,
747 memory_window: 200,
748 use_bayesian_tuning: false,
749 exploration_factor: 0.1,
750 }),
751 enable_visualization: true,
752 visualizationconfig: Some(VisualizationConfig::default()),
753 output_directory: Some("large_scale_optimization".to_string()),
754 max_nit: 5000,
755 function_tolerance: 1e-6,
756 gradient_tolerance: 1e-6,
757 }
758 }
759
760 pub fn interactiveconfig() -> UnifiedOptimizationConfig {
762 UnifiedOptimizationConfig {
763 use_distributed: false,
764 distributedconfig: None,
765 use_gpu: false,
766 gpuconfig: None,
767 accelerationconfig: None,
768 use_self_tuning: true,
769 self_tuningconfig: Some(SelfTuningConfig {
770 adaptation_strategy: AdaptationStrategy::ConvergenceBased,
771 update_frequency: 10,
772 learning_rate: 0.2,
773 memory_window: 50,
774 use_bayesian_tuning: true,
775 exploration_factor: 0.2,
776 }),
777 enable_visualization: true,
778 visualizationconfig: Some(VisualizationConfig {
779 format: crate::visualization::OutputFormat::Html,
780 width: 1200,
781 height: 800,
782 title: Some("Interactive Optimization".to_string()),
783 show_grid: true,
784 log_scale_y: false,
785 color_scheme: crate::visualization::ColorScheme::Scientific,
786 show_legend: true,
787 custom_style: None,
788 }),
789 output_directory: Some("interactive_optimization".to_string()),
790 max_nit: 500,
791 function_tolerance: 1e-4,
792 gradient_tolerance: 1e-4,
793 }
794 }
795}
796
797#[cfg(test)]
798mod tests {
799 use super::*;
800 use ndarray::array;
801
802 #[test]
803 fn test_unifiedconfig_creation() {
804 let config = UnifiedOptimizationConfig::default();
805 assert!(!config.use_distributed);
806 assert!(config.use_self_tuning);
807 assert!(!config.use_gpu);
808 assert!(config.enable_visualization);
809 }
810
811 #[test]
812 fn test_presetconfigs() {
813 let large_scale = presets::large_scaleconfig();
814 assert!(large_scale.use_gpu);
815 assert!(large_scale.use_self_tuning);
816 assert!(!large_scale.use_distributed);
817
818 let interactive = presets::interactiveconfig();
819 assert!(!interactive.use_gpu);
820 assert!(interactive.use_self_tuning);
821 assert!(interactive.enable_visualization);
822 }
823
824 #[test]
825 fn test_bounds_application() {
826 let config = UnifiedOptimizationConfig::default();
828 let _optimizer: Result<UnifiedOptimizer<crate::distributed::MockMPI>, _> =
829 UnifiedOptimizer::new(config, None);
830
831 }
834
835 #[test]
836 fn test_rosenbrock_optimization() {
837 let config = presets::interactiveconfig();
839
840 let rosenbrock = |x: &ArrayView1<f64>| -> f64 {
841 let x0 = x[0];
842 let x1 = x[1];
843 (1.0 - x0).powi(2) + 100.0 * (x1 - x0.powi(2)).powi(2)
844 };
845
846 let initial_guess = array![-1.0, 1.0];
847
848 let mut optimizer: UnifiedOptimizer<crate::distributed::MockMPI> =
850 UnifiedOptimizer::new(config, None).unwrap();
851
852 optimizer
854 .register_tunable_parameter("step_size", TunableParameter::new(0.01, 0.001, 0.1))
855 .unwrap();
856
857 assert!(true);
860 }
861}