scirs2_optimize/
unified_pipeline.rs

1//! Unified optimization pipeline combining all advanced features
2//!
3//! This module provides a high-level interface that integrates distributed optimization,
4//! self-tuning, GPU acceleration, and visualization into a single cohesive pipeline.
5
6use crate::error::{ScirsError, ScirsResult};
7use scirs2_core::error_context;
8use scirs2_core::ndarray::{Array1, ArrayView1};
9use std::path::Path;
10// Unused import: std::sync::Arc
11
12use crate::distributed::{DistributedConfig, DistributedOptimizationContext, MPIInterface};
13use crate::gpu::{
14    acceleration::{AccelerationConfig, AccelerationManager},
15    GpuOptimizationConfig, GpuOptimizationContext,
16};
17use crate::result::OptimizeResults;
18use crate::self_tuning::{
19    AdaptationStrategy, ParameterValue, SelfTuningConfig, SelfTuningOptimizer, TunableParameter,
20};
21use crate::visualization::{
22    tracking::TrajectoryTracker, OptimizationVisualizer, VisualizationConfig,
23};
24
25/// Comprehensive optimization pipeline configuration
26#[derive(Clone)]
27pub struct UnifiedOptimizationConfig {
28    /// Enable distributed optimization
29    pub use_distributed: bool,
30    /// Distributed optimization settings
31    pub distributedconfig: Option<DistributedConfig>,
32
33    /// Enable self-tuning parameter adaptation
34    pub use_self_tuning: bool,
35    /// Self-tuning configuration
36    pub self_tuningconfig: Option<SelfTuningConfig>,
37
38    /// Enable GPU acceleration
39    pub use_gpu: bool,
40    /// GPU acceleration settings
41    pub gpuconfig: Option<GpuOptimizationConfig>,
42    /// GPU acceleration configuration
43    pub accelerationconfig: Option<AccelerationConfig>,
44
45    /// Enable optimization visualization
46    pub enable_visualization: bool,
47    /// Visualization settings
48    pub visualizationconfig: Option<VisualizationConfig>,
49
50    /// Output directory for results and visualization
51    pub output_directory: Option<String>,
52
53    /// Maximum number of iterations
54    pub max_nit: usize,
55    /// Function tolerance
56    pub function_tolerance: f64,
57    /// Gradient tolerance
58    pub gradient_tolerance: f64,
59}
60
61impl Default for UnifiedOptimizationConfig {
62    fn default() -> Self {
63        Self {
64            use_distributed: false,
65            distributedconfig: None,
66            use_self_tuning: true,
67            self_tuningconfig: Some(SelfTuningConfig::default()),
68            use_gpu: false,
69            gpuconfig: None,
70            accelerationconfig: None,
71            enable_visualization: true,
72            visualizationconfig: Some(VisualizationConfig::default()),
73            output_directory: Some("optimization_output".to_string()),
74            max_nit: 1000,
75            function_tolerance: 1e-6,
76            gradient_tolerance: 1e-6,
77        }
78    }
79}
80
81/// Unified optimization pipeline
82pub struct UnifiedOptimizer<M: MPIInterface> {
83    config: UnifiedOptimizationConfig,
84
85    // Optional components based on configuration
86    distributed_context: Option<DistributedOptimizationContext<M>>,
87    self_tuning_optimizer: Option<SelfTuningOptimizer>,
88    gpu_context: Option<GpuOptimizationContext>,
89    acceleration_manager: Option<AccelerationManager>,
90    visualizer: Option<OptimizationVisualizer>,
91    trajectory_tracker: Option<TrajectoryTracker>,
92
93    // Internal state for convergence checking
94    previous_function_value: Option<f64>,
95}
96
97impl<M: MPIInterface> UnifiedOptimizer<M> {
98    /// Create a new unified optimizer
99    pub fn new(config: UnifiedOptimizationConfig, mpi: Option<M>) -> ScirsResult<Self> {
100        let distributed_context = if config.use_distributed {
101            if let (Some(mpi_interface), Some(distconfig)) = (mpi, &config.distributedconfig) {
102                Some(DistributedOptimizationContext::new(
103                    mpi_interface,
104                    distconfig.clone(),
105                ))
106            } else {
107                return Err(ScirsError::InvalidInput(error_context!(
108                    "MPI interface and distributed config required for distributed optimization"
109                )));
110            }
111        } else {
112            None
113        };
114
115        let self_tuning_optimizer = if config.use_self_tuning {
116            let tuningconfig = config.self_tuningconfig.clone().unwrap_or_default();
117            Some(SelfTuningOptimizer::new(tuningconfig))
118        } else {
119            None
120        };
121
122        let (gpu_context, acceleration_manager) = if config.use_gpu {
123            let gpuconfig = config.gpuconfig.clone().unwrap_or_default();
124            let gpu_ctx = GpuOptimizationContext::new(gpuconfig)?;
125
126            let accelconfig = config.accelerationconfig.clone().unwrap_or_default();
127            let accel_mgr = AccelerationManager::new(accelconfig);
128
129            (Some(gpu_ctx), Some(accel_mgr))
130        } else {
131            (None, None)
132        };
133
134        let (visualizer, trajectory_tracker) = if config.enable_visualization {
135            let visconfig = config.visualizationconfig.clone().unwrap_or_default();
136            let vis = OptimizationVisualizer::with_config(visconfig);
137            let tracker = TrajectoryTracker::new();
138            (Some(vis), Some(tracker))
139        } else {
140            (None, None)
141        };
142
143        Ok(Self {
144            config,
145            distributed_context,
146            self_tuning_optimizer,
147            gpu_context,
148            acceleration_manager,
149            visualizer,
150            trajectory_tracker,
151            previous_function_value: None,
152        })
153    }
154
155    /// Register tunable parameters for self-tuning optimization
156    pub fn register_tunable_parameter<T>(
157        &mut self,
158        name: &str,
159        param: TunableParameter<T>,
160    ) -> ScirsResult<()>
161    where
162        T: Clone + PartialOrd + std::fmt::Debug + Send + Sync + 'static,
163    {
164        if let Some(ref mut tuner) = self.self_tuning_optimizer {
165            tuner.register_parameter(name, param)?;
166        }
167        Ok(())
168    }
169
170    /// Optimize a function using the unified pipeline
171    pub fn optimize<F, G>(
172        &mut self,
173        function: F,
174        gradient: Option<G>,
175        initial_guess: &Array1<f64>,
176        bounds: Option<&[(Option<f64>, Option<f64>)]>,
177    ) -> ScirsResult<UnifiedOptimizationResults>
178    where
179        F: Fn(&ArrayView1<f64>) -> f64 + Clone + Send + Sync,
180        G: Fn(&ArrayView1<f64>) -> Array1<f64> + Clone + Send + Sync,
181    {
182        // Initialize optimization state
183        let mut current_x = initial_guess.clone();
184        let mut current_f = function(&current_x.view());
185        let mut iteration = 0;
186        let mut function_evaluations = 1;
187        let mut gradient_evaluations = 0;
188
189        // Initialize tracking
190        if let Some(ref mut tracker) = self.trajectory_tracker {
191            tracker.record(iteration, &current_x.view(), current_f);
192        }
193
194        let start_time = std::time::Instant::now();
195
196        // Main optimization loop
197        while iteration < self.config.max_nit {
198            iteration += 1;
199
200            // Compute gradient
201            let current_grad = if let Some(ref grad_fn) = gradient {
202                gradient_evaluations += 1;
203                grad_fn(&current_x.view())
204            } else {
205                // Use numerical differentiation
206                self.compute_numerical_gradient(&function, &current_x)?
207            };
208
209            let grad_norm = current_grad.iter().map(|&g| g * g).sum::<f64>().sqrt();
210
211            // Check convergence
212            if grad_norm < self.config.gradient_tolerance {
213                break;
214            }
215
216            // Self-tuning parameter adaptation
217            let tuning_params = {
218                if let Some(ref mut tuner) = self.self_tuning_optimizer {
219                    let improvement = if iteration > 1 {
220                        // Compute relative improvement in function value
221                        let prev_f = self.previous_function_value.unwrap_or(current_f);
222                        if prev_f.abs() > 1e-14 {
223                            (prev_f - current_f) / prev_f.abs()
224                        } else {
225                            prev_f - current_f
226                        }
227                    } else {
228                        0.0
229                    };
230
231                    let params_changed = tuner.update_parameters(
232                        iteration,
233                        current_f,
234                        Some(grad_norm),
235                        improvement,
236                    )?;
237
238                    if params_changed {
239                        // Store parameters to apply later (clone to avoid borrow issues)
240                        Some(tuner.get_parameters().clone())
241                    } else {
242                        None
243                    }
244                } else {
245                    None
246                }
247            };
248
249            // Apply tuning parameters if available (mutable reference to tuner is now dropped)
250            if let Some(params) = tuning_params {
251                self.apply_tuned_parameters(&params)?;
252            }
253
254            // GPU-accelerated computation if enabled
255            let search_direction = if self.config.use_gpu {
256                self.compute_gpu_search_direction(&current_grad)?
257            } else {
258                self.compute_cpu_search_direction(&current_grad)?
259            };
260
261            // Distributed evaluation for line search if enabled
262            let step_size = if self.distributed_context.is_some() {
263                let current_x_copy = current_x.clone();
264                let search_direction_copy = search_direction.clone();
265                // Extract distributed context temporarily to avoid borrowing conflicts
266                let mut dist_ctx = self.distributed_context.take();
267                let result = if let Some(ref mut ctx) = dist_ctx {
268                    self.distributed_line_search(
269                        ctx,
270                        &function,
271                        &current_x_copy,
272                        &search_direction_copy,
273                    )
274                } else {
275                    unreachable!()
276                };
277                // Restore the distributed context
278                self.distributed_context = dist_ctx;
279                result?
280            } else {
281                self.standard_line_search(&function, &current_x, &search_direction)?
282            };
283
284            // Update position
285            for i in 0..current_x.len() {
286                current_x[i] += step_size * search_direction[i];
287            }
288
289            // Apply bounds if specified
290            if let Some(bounds) = bounds {
291                self.apply_bounds(&mut current_x, bounds);
292            }
293
294            // Evaluate new function value
295            current_f = function(&current_x.view());
296            function_evaluations += 1;
297
298            // Record trajectory
299            if let Some(ref mut tracker) = self.trajectory_tracker {
300                tracker.record(iteration, &current_x.view(), current_f);
301                tracker.record_gradient_norm(grad_norm);
302                tracker.record_step_size(step_size);
303            }
304
305            // Check function tolerance convergence
306            if let Some(prev_f) = self.previous_function_value {
307                let abs_improvement = (prev_f - current_f).abs();
308                let rel_improvement = if prev_f.abs() > 1e-14 {
309                    abs_improvement / prev_f.abs()
310                } else {
311                    abs_improvement
312                };
313
314                // Check both absolute and relative improvements
315                if abs_improvement < self.config.function_tolerance
316                    || rel_improvement < self.config.function_tolerance
317                {
318                    break;
319                }
320            }
321
322            // Update previous function value for next iteration
323            self.previous_function_value = Some(current_f);
324        }
325
326        let total_time = start_time.elapsed().as_secs_f64();
327
328        // Generate results
329        let success = iteration < self.config.max_nit;
330        let message = if success {
331            "Optimization completed successfully".to_string()
332        } else {
333            "Maximum iterations reached".to_string()
334        };
335
336        // Create visualization if enabled
337        let visualization_paths = if let (Some(ref visualizer), Some(ref tracker)) =
338            (&self.visualizer, &self.trajectory_tracker)
339        {
340            self.generate_visualization(visualizer, tracker.trajectory())?
341        } else {
342            Vec::new()
343        };
344
345        // Generate performance report
346        let performance_report = self.generate_performance_report(
347            total_time,
348            function_evaluations,
349            gradient_evaluations,
350        )?;
351
352        Ok(UnifiedOptimizationResults {
353            base_result: OptimizeResults::<f64> {
354                x: current_x,
355                fun: current_f,
356                success,
357                message,
358                nit: iteration,
359                nfev: function_evaluations,
360                njev: gradient_evaluations,
361                ..OptimizeResults::<f64>::default()
362            },
363            visualization_paths,
364            performance_report,
365            self_tuning_report: self
366                .self_tuning_optimizer
367                .as_ref()
368                .map(|t| t.generate_report()),
369            distributed_stats: self.distributed_context.as_ref().map(|d| d.stats().clone()),
370        })
371    }
372
373    /// Compute numerical gradient using finite differences
374    fn compute_numerical_gradient<F>(
375        &self,
376        function: &F,
377        x: &Array1<f64>,
378    ) -> ScirsResult<Array1<f64>>
379    where
380        F: Fn(&ArrayView1<f64>) -> f64,
381    {
382        let n = x.len();
383        let mut gradient = Array1::zeros(n);
384        let h = 1e-8;
385
386        if self.config.use_gpu {
387            // Use GPU-accelerated finite differences if available
388            if let Some(ref gpu_ctx) = self.gpu_context {
389                return gpu_ctx.compute_gradient_finite_diff(function, x, h);
390            }
391        }
392
393        // CPU finite differences
394        for i in 0..n {
395            let mut x_plus = x.clone();
396            let mut x_minus = x.clone();
397            x_plus[i] += h;
398            x_minus[i] -= h;
399
400            gradient[i] = (function(&x_plus.view()) - function(&x_minus.view())) / (2.0 * h);
401        }
402
403        Ok(gradient)
404    }
405
406    /// Compute search direction using GPU acceleration
407    fn compute_gpu_search_direction(&self, gradient: &Array1<f64>) -> ScirsResult<Array1<f64>> {
408        if let Some(ref gpu_ctx) = self.gpu_context {
409            // Use GPU-accelerated quasi-Newton or other advanced methods
410            gpu_ctx.compute_search_direction(gradient)
411        } else {
412            // Fallback to simple steepest descent
413            Ok(-gradient.clone())
414        }
415    }
416
417    /// Compute search direction using CPU
418    fn compute_cpu_search_direction(&self, gradient: &Array1<f64>) -> ScirsResult<Array1<f64>> {
419        // Simple steepest descent for now
420        Ok(-gradient.clone())
421    }
422
423    /// Perform distributed line search
424    fn distributed_line_search<F>(
425        &mut self,
426        _dist_ctx: &mut DistributedOptimizationContext<M>,
427        function: &F,
428        x: &Array1<f64>,
429        direction: &Array1<f64>,
430    ) -> ScirsResult<f64>
431    where
432        F: Fn(&ArrayView1<f64>) -> f64 + Clone + Send + Sync,
433    {
434        // Distribute line search evaluation across processes
435        let _step_sizes = Array1::from(vec![0.001, 0.01, 0.1, 1.0, 10.0]);
436
437        // For simplicity, return a basic step size
438        // In a full implementation, this would distribute evaluations
439        self.standard_line_search(function, x, direction)
440    }
441
442    /// Standard CPU line search
443    fn standard_line_search<F>(
444        &self,
445        function: &F,
446        x: &Array1<f64>,
447        direction: &Array1<f64>,
448    ) -> ScirsResult<f64>
449    where
450        F: Fn(&ArrayView1<f64>) -> f64,
451    {
452        // Simple backtracking line search
453        let mut alpha = 1.0;
454        let c1 = 1e-4;
455        let rho = 0.5;
456
457        let f0 = function(&x.view());
458        let grad_dot_dir = -direction.iter().map(|&d| d * d).sum::<f64>(); // Since direction = -gradient
459
460        for _ in 0..20 {
461            let mut x_new = x.clone();
462            for i in 0..x.len() {
463                x_new[i] += alpha * direction[i];
464            }
465
466            let f_new = function(&x_new.view());
467
468            if f_new <= f0 + c1 * alpha * grad_dot_dir {
469                return Ok(alpha);
470            }
471
472            alpha *= rho;
473        }
474
475        Ok(alpha)
476    }
477
478    /// Apply bounds constraints
479    fn apply_bounds(&self, x: &mut Array1<f64>, bounds: &[(Option<f64>, Option<f64>)]) {
480        for (i, &mut ref mut xi) in x.iter_mut().enumerate() {
481            if i < bounds.len() {
482                if let Some(lower) = bounds[i].0 {
483                    if *xi < lower {
484                        *xi = lower;
485                    }
486                }
487                if let Some(upper) = bounds[i].1 {
488                    if *xi > upper {
489                        *xi = upper;
490                    }
491                }
492            }
493        }
494    }
495
496    /// Apply tuned parameters from self-tuning optimizer
497    fn apply_tuned_parameters(
498        &mut self,
499        parameters: &std::collections::HashMap<String, ParameterValue>,
500    ) -> ScirsResult<()> {
501        for (name, value) in parameters {
502            match name.as_str() {
503                "learning_rate" | "step_size" => {
504                    // Update step size or learning rate
505                    if let Some(f_val) = value.as_f64() {
506                        // Apply to algorithm-specific parameters
507                        self.update_algorithm_parameter("step_size", f_val)?;
508                    }
509                }
510                "tolerance" => {
511                    if let Some(f_val) = value.as_f64() {
512                        // Update convergence tolerance
513                        self.update_algorithm_parameter("tolerance", f_val)?;
514                    }
515                }
516                _ => {
517                    // Handle other tunable parameters
518                }
519            }
520        }
521        Ok(())
522    }
523
524    /// Update specific algorithm parameters
525    fn update_algorithm_parameter(&mut self, name: &str, value: f64) -> ScirsResult<()> {
526        // Update internal algorithm parameters based on self-tuning
527        // This would be algorithm-specific
528        Ok(())
529    }
530
531    /// Generate visualization outputs
532    fn generate_visualization(
533        &self,
534        visualizer: &OptimizationVisualizer,
535        trajectory: &crate::visualization::OptimizationTrajectory,
536    ) -> ScirsResult<Vec<String>> {
537        let mut paths = Vec::new();
538
539        if let Some(ref output_dir) = self.config.output_directory {
540            let output_path = Path::new(output_dir);
541            std::fs::create_dir_all(output_path)?;
542
543            // Generate convergence plot
544            let convergence_path = output_path.join("convergence.svg");
545            visualizer.plot_convergence(trajectory, &convergence_path)?;
546            paths.push(convergence_path.to_string_lossy().to_string());
547
548            // Generate parameter trajectory if 2D
549            if !trajectory.parameters.is_empty() && trajectory.parameters[0].len() == 2 {
550                let trajectory_path = output_path.join("trajectory.svg");
551                visualizer.plot_parameter_trajectory(trajectory, &trajectory_path)?;
552                paths.push(trajectory_path.to_string_lossy().to_string());
553            }
554
555            // Generate comprehensive report
556            visualizer.create_optimization_report(trajectory, output_path)?;
557            paths.push(
558                output_path
559                    .join("summary.html")
560                    .to_string_lossy()
561                    .to_string(),
562            );
563        }
564
565        Ok(paths)
566    }
567
568    /// Generate performance report
569    fn generate_performance_report(
570        &self,
571        total_time: f64,
572        function_evaluations: usize,
573        gradient_evaluations: usize,
574    ) -> ScirsResult<String> {
575        let mut report = String::from("Unified Optimization Performance Report\n");
576        report.push_str("=========================================\n\n");
577
578        report.push_str(&format!("Total Time: {:.3}s\n", total_time));
579        report.push_str(&format!("Function Evaluations: {}\n", function_evaluations));
580        report.push_str(&format!("Gradient Evaluations: {}\n", gradient_evaluations));
581
582        if total_time > 0.0 {
583            report.push_str(&format!(
584                "Function Evaluations per Second: {:.2}\n",
585                function_evaluations as f64 / total_time
586            ));
587        }
588
589        // Add distributed performance if available
590        if let Some(ref dist_ctx) = self.distributed_context {
591            report.push_str("\nDistributed Performance:\n");
592            report.push_str(&dist_ctx.stats().generate_report());
593        }
594
595        // Add GPU performance if available
596        if let Some(ref accel_mgr) = self.acceleration_manager {
597            report.push_str("\nGPU Acceleration Performance:\n");
598            report.push_str("GPU acceleration metrics available\n");
599            // Note: Performance reporting requires specific GPU optimizer instance
600        }
601
602        Ok(report)
603    }
604}
605
606/// Results from unified optimization
607#[derive(Debug, Clone)]
608pub struct UnifiedOptimizationResults {
609    /// Base optimization results
610    pub base_result: OptimizeResults<f64>,
611    /// Paths to generated visualization files
612    pub visualization_paths: Vec<String>,
613    /// Performance report
614    pub performance_report: String,
615    /// Self-tuning report if enabled
616    pub self_tuning_report: Option<String>,
617    /// Distributed statistics if enabled
618    pub distributed_stats: Option<crate::distributed::DistributedStats>,
619}
620
621impl UnifiedOptimizationResults {
622    /// Get the final optimized parameters
623    pub fn x(&self) -> &Array1<f64> {
624        &self.base_result.x
625    }
626
627    /// Get the final function value
628    pub fn fun(&self) -> f64 {
629        self.base_result.fun
630    }
631
632    /// Check if optimization was successful
633    pub fn success(&self) -> bool {
634        self.base_result.success
635    }
636
637    /// Get the optimization message
638    pub fn message(&self) -> &str {
639        &self.base_result.message
640    }
641
642    /// Get number of iterations performed
643    pub fn iterations(&self) -> usize {
644        self.base_result.nit
645    }
646
647    /// Get number of iterations performed (alias for iterations)
648    pub fn nit(&self) -> usize {
649        self.base_result.nit
650    }
651
652    /// Print comprehensive results summary
653    pub fn print_summary(&self) {
654        println!("Unified Optimization Results");
655        println!("============================");
656        println!("Success: {}", self.success());
657        println!("Final function value: {:.6e}", self.fun());
658        println!("Iterations: {}", self.nit());
659        println!("Function evaluations: {}", self.base_result.nfev);
660
661        if self.base_result.njev > 0 {
662            println!("Gradient evaluations: {}", self.base_result.njev);
663        }
664
665        if !self.visualization_paths.is_empty() {
666            println!("\nGenerated visualizations:");
667            for path in &self.visualization_paths {
668                println!("  {}", path);
669            }
670        }
671
672        if let Some(ref self_tuning) = self.self_tuning_report {
673            println!("\nSelf-Tuning Report:");
674            println!("{}", self_tuning);
675        }
676
677        if let Some(ref dist_stats) = self.distributed_stats {
678            println!("\nDistributed Performance:");
679            println!("{}", dist_stats.generate_report());
680        }
681
682        println!("\nPerformance Report:");
683        println!("{}", self.performance_report);
684    }
685}
686
687/// Convenience functions for common optimization scenarios
688pub mod presets {
689    use super::*;
690
691    /// Create configuration for high-performance distributed optimization
692    pub fn distributed_gpuconfig(_numprocesses: usize) -> UnifiedOptimizationConfig {
693        UnifiedOptimizationConfig {
694            use_distributed: true,
695            distributedconfig: Some(crate::distributed::DistributedConfig {
696                distribution_strategy: crate::distributed::DistributionStrategy::DataParallel,
697                load_balancing: crate::distributed::LoadBalancingConfig::default(),
698                communication: crate::distributed::CommunicationConfig::default(),
699                fault_tolerance: crate::distributed::FaultToleranceConfig::default(),
700            }),
701            use_gpu: true,
702            gpuconfig: Some(GpuOptimizationConfig::default()),
703            accelerationconfig: Some(AccelerationConfig::default()),
704            use_self_tuning: true,
705            self_tuningconfig: Some(SelfTuningConfig {
706                adaptation_strategy: AdaptationStrategy::Hybrid,
707                update_frequency: 25,
708                learning_rate: 0.1,
709                memory_window: 100,
710                use_bayesian_tuning: true,
711                exploration_factor: 0.15,
712            }),
713            enable_visualization: true,
714            visualizationconfig: Some(VisualizationConfig::default()),
715            output_directory: Some("distributed_gpu_optimization".to_string()),
716            max_nit: 2000,
717            function_tolerance: 1e-8,
718            gradient_tolerance: 1e-8,
719        }
720    }
721
722    /// Create configuration for memory-efficient large-scale optimization
723    pub fn large_scaleconfig() -> UnifiedOptimizationConfig {
724        UnifiedOptimizationConfig {
725            use_distributed: false,
726            distributedconfig: None,
727            use_gpu: true,
728            gpuconfig: Some(GpuOptimizationConfig::default()),
729            accelerationconfig: Some(AccelerationConfig::default()),
730            use_self_tuning: true,
731            self_tuningconfig: Some(SelfTuningConfig {
732                adaptation_strategy: AdaptationStrategy::PerformanceBased,
733                update_frequency: 50,
734                learning_rate: 0.05,
735                memory_window: 200,
736                use_bayesian_tuning: false,
737                exploration_factor: 0.1,
738            }),
739            enable_visualization: true,
740            visualizationconfig: Some(VisualizationConfig::default()),
741            output_directory: Some("large_scale_optimization".to_string()),
742            max_nit: 5000,
743            function_tolerance: 1e-6,
744            gradient_tolerance: 1e-6,
745        }
746    }
747
748    /// Create configuration for interactive optimization with real-time visualization
749    pub fn interactiveconfig() -> UnifiedOptimizationConfig {
750        UnifiedOptimizationConfig {
751            use_distributed: false,
752            distributedconfig: None,
753            use_gpu: false,
754            gpuconfig: None,
755            accelerationconfig: None,
756            use_self_tuning: true,
757            self_tuningconfig: Some(SelfTuningConfig {
758                adaptation_strategy: AdaptationStrategy::ConvergenceBased,
759                update_frequency: 10,
760                learning_rate: 0.2,
761                memory_window: 50,
762                use_bayesian_tuning: true,
763                exploration_factor: 0.2,
764            }),
765            enable_visualization: true,
766            visualizationconfig: Some(VisualizationConfig {
767                format: crate::visualization::OutputFormat::Html,
768                width: 1200,
769                height: 800,
770                title: Some("Interactive Optimization".to_string()),
771                show_grid: true,
772                log_scale_y: false,
773                color_scheme: crate::visualization::ColorScheme::Scientific,
774                show_legend: true,
775                custom_style: None,
776            }),
777            output_directory: Some("interactive_optimization".to_string()),
778            max_nit: 500,
779            function_tolerance: 1e-4,
780            gradient_tolerance: 1e-4,
781        }
782    }
783}
784
785#[cfg(test)]
786mod tests {
787    use super::*;
788    use scirs2_core::ndarray::array;
789
790    #[test]
791    fn test_unifiedconfig_creation() {
792        let config = UnifiedOptimizationConfig::default();
793        assert!(!config.use_distributed);
794        assert!(config.use_self_tuning);
795        assert!(!config.use_gpu);
796        assert!(config.enable_visualization);
797    }
798
799    #[test]
800    fn test_presetconfigs() {
801        let large_scale = presets::large_scaleconfig();
802        assert!(large_scale.use_gpu);
803        assert!(large_scale.use_self_tuning);
804        assert!(!large_scale.use_distributed);
805
806        let interactive = presets::interactiveconfig();
807        assert!(!interactive.use_gpu);
808        assert!(interactive.use_self_tuning);
809        assert!(interactive.enable_visualization);
810    }
811
812    #[test]
813    fn test_bounds_application() {
814        // Test bounds constraint application
815        let config = UnifiedOptimizationConfig::default();
816        let _optimizer: Result<UnifiedOptimizer<crate::distributed::MockMPI>, _> =
817            UnifiedOptimizer::new(config, None);
818
819        // This would test the bounds application logic
820        // Implementation depends on the specific test setup
821    }
822
823    #[test]
824    fn test_rosenbrock_optimization() {
825        // Test optimization on the Rosenbrock function
826        let config = presets::interactiveconfig();
827
828        let rosenbrock = |x: &ArrayView1<f64>| -> f64 {
829            let x0 = x[0];
830            let x1 = x[1];
831            (1.0 - x0).powi(2) + 100.0 * (x1 - x0.powi(2)).powi(2)
832        };
833
834        let initial_guess = array![-1.0, 1.0];
835
836        // Create optimizer without MPI for testing
837        let mut optimizer: UnifiedOptimizer<crate::distributed::MockMPI> =
838            UnifiedOptimizer::new(config, None).unwrap();
839
840        // Register tunable parameters
841        optimizer
842            .register_tunable_parameter("step_size", TunableParameter::new(0.01, 0.001, 0.1))
843            .unwrap();
844
845        // This would run the actual optimization in a full test
846        // For now, just test that the setup works
847        assert!(true);
848    }
849}