1#![allow(dead_code)]
8
9#[cfg(feature = "dwave")]
10use crate::compile::CompiledModel;
11use crate::sampler::Sampler;
12use scirs2_core::random::prelude::*;
13use scirs2_core::SliceRandomExt;
14use std::collections::HashMap;
15use std::error::Error;
16
17#[cfg(feature = "scirs")]
18use crate::scirs_stub::{
19 scirs2_optimization::bayesian::{BayesianOptimizer, GaussianProcess},
20 scirs2_plot::{Bar, Heatmap, Line, Plot, Scatter},
21 scirs2_statistics::descriptive::{mean, std_dev},
22};
23
24#[derive(Debug, Clone)]
26pub enum ParameterType {
27 SamplerParameter {
29 name: String,
30 default_value: f64,
31 min_value: f64,
32 max_value: f64,
33 },
34 PenaltyWeight {
36 constraint_name: String,
37 default_weight: f64,
38 min_weight: f64,
39 max_weight: f64,
40 },
41 FormulationParameter {
43 name: String,
44 default_value: f64,
45 variation_range: (f64, f64),
46 },
47 DiscreteChoice {
49 name: String,
50 options: Vec<String>,
51 default_index: usize,
52 },
53}
54
55#[derive(Debug, Clone)]
57pub enum SensitivityMethod {
58 OneAtATime {
60 num_points: usize,
61 include_interactions: bool,
62 },
63 Morris {
65 num_trajectories: usize,
66 num_levels: usize,
67 },
68 Sobol {
70 num_samples: usize,
71 compute_second_order: bool,
72 },
73 LatinHypercube { num_samples: usize },
75 Factorial { levels_per_factor: usize },
77}
78
79#[derive(Debug, Clone)]
81pub struct SensitivityResults {
82 pub sensitivities: HashMap<String, ParameterSensitivity>,
84 pub main_effects: HashMap<String, f64>,
86 pub interaction_effects: Option<HashMap<(String, String), f64>>,
88 pub sobol_indices: Option<SobolIndices>,
90 pub optimal_parameters: HashMap<String, f64>,
92 pub robustness_metrics: RobustnessMetrics,
94}
95
96#[derive(Debug, Clone)]
98pub struct ParameterSensitivity {
99 pub name: String,
101 pub sensitivity_index: f64,
103 pub objective_gradient: f64,
105 pub constraint_impact: f64,
107 pub optimal_value: f64,
109 pub confidence_interval: (f64, f64),
111 pub response_curve: Vec<(f64, f64)>,
113}
114
115#[derive(Debug, Clone)]
117pub struct SobolIndices {
118 pub first_order: HashMap<String, f64>,
120 pub total_indices: HashMap<String, f64>,
122 pub second_order: Option<HashMap<(String, String), f64>>,
124}
125
126#[derive(Debug, Clone)]
128pub struct RobustnessMetrics {
129 pub objective_cv: f64,
131 pub constraint_satisfaction_prob: f64,
133 pub worst_case_objective: f64,
135 pub best_case_objective: f64,
137 pub stability_regions: HashMap<String, (f64, f64)>,
139}
140
141pub struct SensitivityAnalyzer<S: Sampler> {
143 sampler: S,
145 parameters: Vec<ParameterType>,
147 method: SensitivityMethod,
149 num_reads_per_eval: usize,
151 use_parallel: bool,
153}
154
155impl<S: Sampler> SensitivityAnalyzer<S> {
156 pub const fn new(
158 sampler: S,
159 parameters: Vec<ParameterType>,
160 method: SensitivityMethod,
161 ) -> Self {
162 Self {
163 sampler,
164 parameters,
165 method,
166 num_reads_per_eval: 100,
167 use_parallel: true,
168 }
169 }
170
171 pub const fn with_reads_per_eval(mut self, num_reads: usize) -> Self {
173 self.num_reads_per_eval = num_reads;
174 self
175 }
176
177 #[cfg(feature = "dwave")]
179 pub fn analyze(
180 &mut self,
181 problem: &CompiledModel,
182 ) -> Result<SensitivityResults, Box<dyn Error>> {
183 match &self.method {
184 SensitivityMethod::OneAtATime {
185 num_points,
186 include_interactions,
187 } => self.one_at_a_time_analysis(problem, *num_points, *include_interactions),
188 SensitivityMethod::Morris {
189 num_trajectories,
190 num_levels,
191 } => self.morris_analysis(problem, *num_trajectories, *num_levels),
192 SensitivityMethod::Sobol {
193 num_samples,
194 compute_second_order,
195 } => self.sobol_analysis(problem, *num_samples, *compute_second_order),
196 SensitivityMethod::LatinHypercube { num_samples } => {
197 self.latin_hypercube_analysis(problem, *num_samples)
198 }
199 SensitivityMethod::Factorial { levels_per_factor } => {
200 self.factorial_analysis(problem, *levels_per_factor)
201 }
202 }
203 }
204
205 #[cfg(feature = "dwave")]
207 fn one_at_a_time_analysis(
208 &mut self,
209 problem: &CompiledModel,
210 num_points: usize,
211 include_interactions: bool,
212 ) -> Result<SensitivityResults, Box<dyn Error>> {
213 let mut sensitivities = HashMap::new();
214 let mut main_effects = HashMap::new();
215 let mut response_data = HashMap::new();
216
217 let baseline_params = self.get_default_parameters();
219 let baseline_result = self.evaluate_configuration(problem, &baseline_params)?;
220 let baseline_objective = baseline_result.best_objective;
221
222 for param in self.parameters.clone() {
224 let param_name = self.get_parameter_name(¶m);
225 let (min_val, max_val) = self.get_parameter_range(¶m);
226
227 let mut response_curve = Vec::new();
228 let mut objectives = Vec::new();
229
230 for i in 0..num_points {
232 let t = i as f64 / (num_points - 1) as f64;
233 let mut value = min_val + t * (max_val - min_val);
234
235 let mut params = baseline_params.clone();
237 params.insert(param_name.clone(), value);
238
239 let mut result = self.evaluate_configuration(problem, ¶ms)?;
241 response_curve.push((value, result.best_objective));
242 objectives.push(result.best_objective);
243 }
244
245 let gradient = self.compute_gradient(&response_curve);
247 let sensitivity_index = self.compute_sensitivity_index(&objectives, baseline_objective);
248 let (optimal_value, _) = response_curve
249 .iter()
250 .min_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal))
251 .copied()
252 .unwrap_or_else(|| (baseline_params[¶m_name], baseline_objective));
253
254 sensitivities.insert(
255 param_name.clone(),
256 ParameterSensitivity {
257 name: param_name.clone(),
258 sensitivity_index,
259 objective_gradient: gradient,
260 constraint_impact: 0.0, optimal_value,
262 confidence_interval: self.compute_confidence_interval(&objectives),
263 response_curve: response_curve.clone(),
264 },
265 );
266
267 main_effects.insert(param_name.clone(), sensitivity_index);
268 response_data.insert(param_name, response_curve);
269 }
270
271 let interaction_effects = if include_interactions {
273 Some(self.compute_interaction_effects(problem, &baseline_params)?)
274 } else {
275 None
276 };
277
278 let robustness_metrics = self.compute_robustness_metrics(&response_data);
280
281 Ok(SensitivityResults {
282 sensitivities,
283 main_effects,
284 interaction_effects,
285 sobol_indices: None,
286 optimal_parameters: self.find_optimal_parameters(&response_data),
287 robustness_metrics,
288 })
289 }
290
291 #[cfg(feature = "dwave")]
293 fn morris_analysis(
294 &mut self,
295 problem: &CompiledModel,
296 num_trajectories: usize,
297 num_levels: usize,
298 ) -> Result<SensitivityResults, Box<dyn Error>> {
299 let mut elementary_effects = HashMap::new();
300
301 for _ in 0..num_trajectories {
302 let trajectory = self.generate_morris_trajectory(num_levels)?;
304
305 for i in 0..trajectory.len() - 1 {
307 let params1 = &trajectory[i];
308 let params2 = &trajectory[i + 1];
309
310 let result1 = self.evaluate_configuration(problem, params1)?;
311 let result2 = self.evaluate_configuration(problem, params2)?;
312
313 for (key, &val1) in params1 {
315 if let Some(&val2) = params2.get(key) {
316 if (val1 - val2).abs() > 1e-10 {
317 let delta = val2 - val1;
318 let effect = (result2.best_objective - result1.best_objective) / delta;
319
320 elementary_effects
321 .entry(key.clone())
322 .or_insert_with(Vec::new)
323 .push(effect);
324 }
325 }
326 }
327 }
328 }
329
330 let mut sensitivities = HashMap::new();
332 let mut main_effects = HashMap::new();
333
334 for (param_name, effects) in elementary_effects {
335 let mean_effect = effects.iter().sum::<f64>() / effects.len() as f64;
336 let mean_abs_effect =
337 effects.iter().map(|e| e.abs()).sum::<f64>() / effects.len() as f64;
338 let std_effect = {
339 let variance = effects
340 .iter()
341 .map(|e| (e - mean_effect).powi(2))
342 .sum::<f64>()
343 / effects.len() as f64;
344 variance.sqrt()
345 };
346
347 sensitivities.insert(
348 param_name.clone(),
349 ParameterSensitivity {
350 name: param_name.clone(),
351 sensitivity_index: mean_abs_effect,
352 objective_gradient: mean_effect,
353 constraint_impact: 0.0,
354 optimal_value: 0.0, confidence_interval: (
356 2.0f64.mul_add(-std_effect, mean_effect),
357 2.0f64.mul_add(std_effect, mean_effect),
358 ),
359 response_curve: Vec::new(),
360 },
361 );
362
363 main_effects.insert(param_name, mean_abs_effect);
364 }
365
366 Ok(SensitivityResults {
367 sensitivities,
368 main_effects,
369 interaction_effects: None,
370 sobol_indices: None,
371 optimal_parameters: HashMap::new(),
372 robustness_metrics: RobustnessMetrics {
373 objective_cv: 0.0,
374 constraint_satisfaction_prob: 1.0,
375 worst_case_objective: 0.0,
376 best_case_objective: 0.0,
377 stability_regions: HashMap::new(),
378 },
379 })
380 }
381
382 #[cfg(feature = "dwave")]
384 fn sobol_analysis(
385 &mut self,
386 problem: &CompiledModel,
387 num_samples: usize,
388 compute_second_order: bool,
389 ) -> Result<SensitivityResults, Box<dyn Error>> {
390 let (sample_a, sample_b) = self.generate_sobol_samples(num_samples)?;
392
393 let mut y_a = Vec::new();
395 let mut y_b = Vec::new();
396
397 for i in 0..num_samples {
398 let result_a = self.evaluate_configuration(problem, &sample_a[i])?;
399 let result_b = self.evaluate_configuration(problem, &sample_b[i])?;
400 y_a.push(result_a.best_objective);
401 y_b.push(result_b.best_objective);
402 }
403
404 let mut first_order = HashMap::new();
406 let mut total_indices = HashMap::new();
407
408 let var_total = variance(&y_a);
409
410 for param in self.parameters.clone() {
411 let param_name = self.get_parameter_name(¶m);
412
413 let mut y_ab_i = Vec::new();
415 for i in 0..num_samples {
416 let mut params_ab_i = sample_a[i].clone();
417 params_ab_i.insert(param_name.clone(), sample_b[i][¶m_name]);
418
419 let mut result = self.evaluate_configuration(problem, ¶ms_ab_i)?;
420 y_ab_i.push(result.best_objective);
421 }
422
423 let s_i = self.compute_first_order_index(&y_a, &y_b, &y_ab_i, var_total);
425 let st_i = self.compute_total_index(&y_a, &y_ab_i, var_total);
426
427 first_order.insert(param_name.clone(), s_i);
428 total_indices.insert(param_name, st_i);
429 }
430
431 let second_order = if compute_second_order {
433 Some(self.compute_second_order_indices(
434 problem, &sample_a, &sample_b, &y_a, &y_b, var_total,
435 )?)
436 } else {
437 None
438 };
439
440 let mut sensitivities = HashMap::new();
442 for param in self.parameters.clone() {
443 let param_name = self.get_parameter_name(¶m);
444 sensitivities.insert(
445 param_name.clone(),
446 ParameterSensitivity {
447 name: param_name.clone(),
448 sensitivity_index: first_order[¶m_name],
449 objective_gradient: 0.0,
450 constraint_impact: 0.0,
451 optimal_value: 0.0,
452 confidence_interval: (0.0, 0.0),
453 response_curve: Vec::new(),
454 },
455 );
456 }
457
458 Ok(SensitivityResults {
459 sensitivities,
460 main_effects: first_order.clone(),
461 interaction_effects: None,
462 sobol_indices: Some(SobolIndices {
463 first_order,
464 total_indices,
465 second_order,
466 }),
467 optimal_parameters: HashMap::new(),
468 robustness_metrics: self.compute_robustness_from_samples(&y_a, &y_b),
469 })
470 }
471
472 #[cfg(feature = "dwave")]
474 fn latin_hypercube_analysis(
475 &mut self,
476 problem: &CompiledModel,
477 num_samples: usize,
478 ) -> Result<SensitivityResults, Box<dyn Error>> {
479 let samples = self.generate_latin_hypercube_samples(num_samples)?;
481
482 let mut results = Vec::new();
484 for sample in &samples {
485 let mut result = self.evaluate_configuration(problem, sample)?;
486 results.push((sample.clone(), result.best_objective));
487 }
488
489 let sensitivities = self.compute_regression_sensitivities(&results)?;
491
492 let (optimal_params, _) = results
494 .iter()
495 .min_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal))
496 .cloned()
497 .ok_or("No results available for Latin hypercube analysis")?;
498
499 Ok(SensitivityResults {
500 sensitivities: sensitivities.clone(),
501 main_effects: sensitivities
502 .iter()
503 .map(|(k, v)| (k.clone(), v.sensitivity_index))
504 .collect(),
505 interaction_effects: None,
506 sobol_indices: None,
507 optimal_parameters: optimal_params,
508 robustness_metrics: self.compute_robustness_from_results(&results),
509 })
510 }
511
512 #[cfg(feature = "dwave")]
514 fn factorial_analysis(
515 &mut self,
516 problem: &CompiledModel,
517 levels_per_factor: usize,
518 ) -> Result<SensitivityResults, Box<dyn Error>> {
519 let design = self.generate_factorial_design(levels_per_factor)?;
521
522 let mut results = Vec::new();
524 for config in &design {
525 let mut result = self.evaluate_configuration(problem, config)?;
526 results.push((config.clone(), result.best_objective));
527 }
528
529 let (main_effects, interaction_effects) =
531 self.analyze_factorial_results(&results, levels_per_factor)?;
532
533 let mut sensitivities = HashMap::new();
535 for (param_name, effect) in &main_effects {
536 sensitivities.insert(
537 param_name.clone(),
538 ParameterSensitivity {
539 name: param_name.clone(),
540 sensitivity_index: effect.abs(),
541 objective_gradient: *effect,
542 constraint_impact: 0.0,
543 optimal_value: 0.0,
544 confidence_interval: (0.0, 0.0),
545 response_curve: Vec::new(),
546 },
547 );
548 }
549
550 let (optimal_params, _) = results
552 .iter()
553 .min_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal))
554 .cloned()
555 .ok_or("No results available for factorial analysis")?;
556
557 Ok(SensitivityResults {
558 sensitivities,
559 main_effects,
560 interaction_effects: Some(interaction_effects),
561 sobol_indices: None,
562 optimal_parameters: optimal_params,
563 robustness_metrics: self.compute_robustness_from_results(&results),
564 })
565 }
566
567 #[cfg(feature = "dwave")]
569 fn evaluate_configuration(
570 &mut self,
571 problem: &CompiledModel,
572 params: &HashMap<String, f64>,
573 ) -> Result<EvaluationResult, Box<dyn Error>> {
574 self.apply_parameters(params)?;
576
577 let mut qubo = problem.to_qubo();
579 let qubo_tuple = (qubo.to_dense_matrix(), qubo.variable_map());
580 let solutions = self
581 .sampler
582 .run_qubo(&qubo_tuple, self.num_reads_per_eval)?;
583
584 let best_objective = solutions
586 .iter()
587 .map(|s| s.energy)
588 .min_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
589 .unwrap_or(f64::INFINITY);
590
591 let mut constraint_violations = 0.0; Ok(EvaluationResult {
595 best_objective,
596 avg_objective: solutions.iter().map(|s| s.energy).sum::<f64>() / solutions.len() as f64,
597 constraint_violations,
598 num_feasible: solutions.len(),
599 })
600 }
601
602 fn get_parameter_name(&self, param: &ParameterType) -> String {
604 match param {
605 ParameterType::SamplerParameter { name, .. } => name.clone(),
606 ParameterType::PenaltyWeight {
607 constraint_name, ..
608 } => format!("penalty_{constraint_name}"),
609 ParameterType::FormulationParameter { name, .. } => name.clone(),
610 ParameterType::DiscreteChoice { name, .. } => name.clone(),
611 }
612 }
613
614 fn get_parameter_range(&self, param: &ParameterType) -> (f64, f64) {
616 match param {
617 ParameterType::SamplerParameter {
618 min_value,
619 max_value,
620 ..
621 } => (*min_value, *max_value),
622 ParameterType::PenaltyWeight {
623 min_weight,
624 max_weight,
625 ..
626 } => (*min_weight, *max_weight),
627 ParameterType::FormulationParameter {
628 variation_range, ..
629 } => *variation_range,
630 ParameterType::DiscreteChoice { options, .. } => (0.0, options.len() as f64 - 1.0),
631 }
632 }
633
634 fn get_default_parameters(&self) -> HashMap<String, f64> {
636 let mut params = HashMap::new();
637
638 for param in self.parameters.clone() {
639 let name = self.get_parameter_name(¶m);
640 let value = match param {
641 ParameterType::SamplerParameter { default_value, .. } => default_value,
642 ParameterType::PenaltyWeight { default_weight, .. } => default_weight,
643 ParameterType::FormulationParameter { default_value, .. } => default_value,
644 ParameterType::DiscreteChoice { default_index, .. } => default_index as f64,
645 };
646 params.insert(name, value);
647 }
648
649 params
650 }
651
652 fn apply_parameters(&self, _params: &HashMap<String, f64>) -> Result<(), Box<dyn Error>> {
654 Ok(())
657 }
658
659 fn compute_gradient(&self, response_curve: &[(f64, f64)]) -> f64 {
661 if response_curve.len() < 2 {
662 return 0.0;
663 }
664
665 let n = response_curve.len();
667 let dx = response_curve[n - 1].0 - response_curve[0].0;
668 let dy = response_curve[n - 1].1 - response_curve[0].1;
669
670 dy / dx
671 }
672
673 fn compute_sensitivity_index(&self, objectives: &[f64], baseline: f64) -> f64 {
675 let max_diff = objectives
676 .iter()
677 .map(|&obj| (obj - baseline).abs())
678 .max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
679 .unwrap_or(0.0);
680
681 max_diff / baseline.abs().max(1.0)
682 }
683
684 fn compute_confidence_interval(&self, values: &[f64]) -> (f64, f64) {
686 if values.is_empty() {
687 return (0.0, 0.0);
688 }
689
690 let mean = values.iter().sum::<f64>() / values.len() as f64;
691 let std = {
692 let variance =
693 values.iter().map(|v| (v - mean).powi(2)).sum::<f64>() / values.len() as f64;
694 variance.sqrt()
695 };
696
697 (2.0f64.mul_add(-std, mean), 2.0f64.mul_add(std, mean))
698 }
699
700 #[cfg(feature = "dwave")]
703 fn compute_interaction_effects(
704 &self,
705 _problem: &CompiledModel,
706 _baseline_params: &HashMap<String, f64>,
707 ) -> Result<HashMap<(String, String), f64>, Box<dyn Error>> {
708 Ok(HashMap::new())
710 }
711
712 fn compute_robustness_metrics(
713 &self,
714 response_data: &HashMap<String, Vec<(f64, f64)>>,
715 ) -> RobustnessMetrics {
716 let all_objectives: Vec<f64> = response_data
717 .values()
718 .flat_map(|curve| curve.iter().map(|(_, obj)| *obj))
719 .collect();
720
721 let mean_obj = all_objectives.iter().sum::<f64>() / all_objectives.len() as f64;
722 let std_obj = {
723 let variance = all_objectives
724 .iter()
725 .map(|obj| (obj - mean_obj).powi(2))
726 .sum::<f64>()
727 / all_objectives.len() as f64;
728 variance.sqrt()
729 };
730
731 RobustnessMetrics {
732 objective_cv: std_obj / mean_obj.abs().max(1.0),
733 constraint_satisfaction_prob: 1.0, worst_case_objective: all_objectives
735 .iter()
736 .copied()
737 .fold(f64::NEG_INFINITY, f64::max),
738 best_case_objective: all_objectives.iter().copied().fold(f64::INFINITY, f64::min),
739 stability_regions: HashMap::new(), }
741 }
742
743 fn find_optimal_parameters(
744 &self,
745 response_data: &HashMap<String, Vec<(f64, f64)>>,
746 ) -> HashMap<String, f64> {
747 let mut optimal = HashMap::new();
748
749 for (param_name, curve) in response_data {
750 if let Some((opt_val, _)) = curve
751 .iter()
752 .min_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal))
753 {
754 optimal.insert(param_name.clone(), *opt_val);
755 }
756 }
757
758 optimal
759 }
760
761 fn generate_morris_trajectory(
762 &self,
763 _num_levels: usize,
764 ) -> Result<Vec<HashMap<String, f64>>, Box<dyn Error>> {
765 Ok(vec![self.get_default_parameters()])
767 }
768
769 fn generate_sobol_samples(
770 &self,
771 num_samples: usize,
772 ) -> Result<(Vec<HashMap<String, f64>>, Vec<HashMap<String, f64>>), Box<dyn Error>> {
773 use scirs2_core::random::prelude::*;
775 let mut rng = thread_rng();
776
777 let mut sample_a = Vec::new();
778 let mut sample_b = Vec::new();
779
780 for _ in 0..num_samples {
781 let mut params_a = HashMap::new();
782 let mut params_b = HashMap::new();
783
784 for param in self.parameters.clone() {
785 let name = self.get_parameter_name(¶m);
786 let (min_val, max_val) = self.get_parameter_range(¶m);
787
788 params_a.insert(name.clone(), rng.gen_range(min_val..max_val));
789 params_b.insert(name, rng.gen_range(min_val..max_val));
790 }
791
792 sample_a.push(params_a);
793 sample_b.push(params_b);
794 }
795
796 Ok((sample_a, sample_b))
797 }
798
799 fn compute_first_order_index(
800 &self,
801 y_a: &[f64],
802 y_b: &[f64],
803 y_ab_i: &[f64],
804 var_total: f64,
805 ) -> f64 {
806 let n = y_a.len() as f64;
807 let mean_product: f64 = y_a
808 .iter()
809 .zip(y_ab_i.iter())
810 .map(|(a, ab)| a * ab)
811 .sum::<f64>()
812 / n;
813 let mean_a = y_a.iter().sum::<f64>() / n;
814 let mean_b = y_b.iter().sum::<f64>() / n;
815
816 mean_a.mul_add(-mean_b, mean_product) / var_total
817 }
818
819 fn compute_total_index(&self, y_a: &[f64], y_ab_i: &[f64], var_total: f64) -> f64 {
820 let n = y_a.len() as f64;
821 let mean_sq_diff: f64 = y_a
822 .iter()
823 .zip(y_ab_i.iter())
824 .map(|(a, ab)| (a - ab).powi(2))
825 .sum::<f64>()
826 / n;
827
828 0.5 * mean_sq_diff / var_total
829 }
830
831 #[cfg(feature = "dwave")]
832 fn compute_second_order_indices(
833 &self,
834 _problem: &CompiledModel,
835 _sample_a: &[HashMap<String, f64>],
836 _sample_b: &[HashMap<String, f64>],
837 _y_a: &[f64],
838 _y_b: &[f64],
839 _var_total: f64,
840 ) -> Result<HashMap<(String, String), f64>, Box<dyn Error>> {
841 Ok(HashMap::new())
843 }
844
845 fn compute_robustness_from_samples(&self, y_a: &[f64], y_b: &[f64]) -> RobustnessMetrics {
846 let all_y: Vec<f64> = y_a.iter().chain(y_b.iter()).copied().collect();
847
848 let mean_y = all_y.iter().sum::<f64>() / all_y.len() as f64;
849 let std_y = {
850 let variance =
851 all_y.iter().map(|y| (y - mean_y).powi(2)).sum::<f64>() / all_y.len() as f64;
852 variance.sqrt()
853 };
854
855 RobustnessMetrics {
856 objective_cv: std_y / mean_y.abs().max(1.0),
857 constraint_satisfaction_prob: 1.0,
858 worst_case_objective: all_y.iter().copied().fold(f64::NEG_INFINITY, f64::max),
859 best_case_objective: all_y.iter().copied().fold(f64::INFINITY, f64::min),
860 stability_regions: HashMap::new(),
861 }
862 }
863
864 fn generate_latin_hypercube_samples(
865 &self,
866 num_samples: usize,
867 ) -> Result<Vec<HashMap<String, f64>>, Box<dyn Error>> {
868 use scirs2_core::random::prelude::*;
870 let mut rng = thread_rng();
871
872 let mut samples = Vec::new();
873 let n_params = self.parameters.len();
874
875 let mut permutations: Vec<Vec<usize>> = Vec::new();
877 for _ in 0..n_params {
878 let mut perm: Vec<usize> = (0..num_samples).collect();
879 perm.shuffle(&mut rng);
880 permutations.push(perm);
881 }
882
883 for i in 0..num_samples {
885 let mut sample = HashMap::new();
886
887 for (j, param) in self.parameters.iter().enumerate() {
888 let name = self.get_parameter_name(param);
889 let (min_val, max_val) = self.get_parameter_range(param);
890
891 let level = permutations[j][i];
892 let value = ((level as f64 + rng.gen::<f64>()) / num_samples as f64)
893 .mul_add(max_val - min_val, min_val);
894
895 sample.insert(name, value);
896 }
897
898 samples.push(sample);
899 }
900
901 Ok(samples)
902 }
903
904 fn compute_regression_sensitivities(
905 &self,
906 results: &[(HashMap<String, f64>, f64)],
907 ) -> Result<HashMap<String, ParameterSensitivity>, Box<dyn Error>> {
908 let mut sensitivities = HashMap::new();
910
911 for param in self.parameters.clone() {
912 let param_name = self.get_parameter_name(¶m);
913
914 let x: Vec<f64> = results
916 .iter()
917 .map(|(params, _)| params[¶m_name])
918 .collect();
919 let y: Vec<f64> = results.iter().map(|(_, obj)| *obj).collect();
920
921 let coeff = simple_linear_regression(&x, &y);
923
924 sensitivities.insert(
925 param_name.clone(),
926 ParameterSensitivity {
927 name: param_name.clone(),
928 sensitivity_index: coeff.abs(),
929 objective_gradient: coeff,
930 constraint_impact: 0.0,
931 optimal_value: 0.0,
932 confidence_interval: (0.0, 0.0),
933 response_curve: Vec::new(),
934 },
935 );
936 }
937
938 Ok(sensitivities)
939 }
940
941 fn compute_robustness_from_results(
942 &self,
943 results: &[(HashMap<String, f64>, f64)],
944 ) -> RobustnessMetrics {
945 let objectives: Vec<f64> = results.iter().map(|(_, obj)| *obj).collect();
946
947 let mean_obj = objectives.iter().sum::<f64>() / objectives.len() as f64;
948 let std_obj = {
949 let variance = objectives
950 .iter()
951 .map(|obj| (obj - mean_obj).powi(2))
952 .sum::<f64>()
953 / objectives.len() as f64;
954 variance.sqrt()
955 };
956
957 RobustnessMetrics {
958 objective_cv: std_obj / mean_obj.abs().max(1.0),
959 constraint_satisfaction_prob: 1.0,
960 worst_case_objective: objectives.iter().copied().fold(f64::NEG_INFINITY, f64::max),
961 best_case_objective: objectives.iter().copied().fold(f64::INFINITY, f64::min),
962 stability_regions: HashMap::new(),
963 }
964 }
965
966 fn generate_factorial_design(
967 &self,
968 levels_per_factor: usize,
969 ) -> Result<Vec<HashMap<String, f64>>, Box<dyn Error>> {
970 let n_params = self.parameters.len();
972 let total_combinations = levels_per_factor.pow(n_params as u32);
973
974 let mut design = Vec::new();
975
976 for i in 0..total_combinations {
977 let mut config = HashMap::new();
978 let mut idx = i;
979
980 for param in self.parameters.clone() {
981 let name = self.get_parameter_name(¶m);
982 let (min_val, max_val) = self.get_parameter_range(¶m);
983
984 let level = idx % levels_per_factor;
985 idx /= levels_per_factor;
986
987 let value = if levels_per_factor == 1 {
988 f64::midpoint(min_val, max_val)
989 } else {
990 (level as f64 / (levels_per_factor - 1) as f64)
991 .mul_add(max_val - min_val, min_val)
992 };
993
994 config.insert(name, value);
995 }
996
997 design.push(config);
998 }
999
1000 Ok(design)
1001 }
1002
1003 fn analyze_factorial_results(
1004 &self,
1005 results: &[(HashMap<String, f64>, f64)],
1006 levels_per_factor: usize,
1007 ) -> Result<(HashMap<String, f64>, HashMap<(String, String), f64>), Box<dyn Error>> {
1008 let mut main_effects = HashMap::new();
1010 let mut interaction_effects = HashMap::new();
1011
1012 for param in self.parameters.clone() {
1014 let param_name = self.get_parameter_name(¶m);
1015
1016 let mut level_sums = vec![0.0; levels_per_factor];
1017 let mut level_counts = vec![0; levels_per_factor];
1018
1019 for (config, obj) in results {
1020 let value = config[¶m_name];
1021 let (min_val, max_val) = self.get_parameter_range(¶m);
1022
1023 let level = if levels_per_factor == 1 {
1024 0
1025 } else {
1026 ((value - min_val) / (max_val - min_val) * (levels_per_factor - 1) as f64)
1027 .round() as usize
1028 };
1029
1030 if level < levels_per_factor {
1031 level_sums[level] += obj;
1032 level_counts[level] += 1;
1033 }
1034 }
1035
1036 if levels_per_factor >= 2
1038 && level_counts[0] > 0
1039 && level_counts[levels_per_factor - 1] > 0
1040 {
1041 let low_mean = level_sums[0] / level_counts[0] as f64;
1042 let high_mean =
1043 level_sums[levels_per_factor - 1] / level_counts[levels_per_factor - 1] as f64;
1044 main_effects.insert(param_name, high_mean - low_mean);
1045 } else {
1046 main_effects.insert(param_name, 0.0);
1047 }
1048 }
1049
1050 for i in 0..self.parameters.len() {
1052 for j in i + 1..self.parameters.len() {
1053 let param1 = self.get_parameter_name(&self.parameters[i]);
1054 let param2 = self.get_parameter_name(&self.parameters[j]);
1055
1056 interaction_effects.insert((param1, param2), 0.0);
1058 }
1059 }
1060
1061 Ok((main_effects, interaction_effects))
1062 }
1063}
1064
1065struct EvaluationResult {
1067 best_objective: f64,
1068 avg_objective: f64,
1069 constraint_violations: f64,
1070 num_feasible: usize,
1071}
1072
1073pub mod visualization {
1075 use super::*;
1076
1077 pub fn plot_tornado_chart(
1079 results: &SensitivityResults,
1080 output_path: &str,
1081 ) -> Result<(), Box<dyn Error>> {
1082 #[cfg(feature = "scirs")]
1083 {
1084 let mut plot = Plot::new();
1085
1086 let mut params: Vec<_> = results.main_effects.iter().collect();
1088 params.sort_by(|a, b| {
1089 b.1.abs()
1090 .partial_cmp(&a.1.abs())
1091 .unwrap_or(std::cmp::Ordering::Equal)
1092 });
1093
1094 let names: Vec<String> = params.iter().map(|(k, _)| (*k).clone()).collect();
1095 let values: Vec<f64> = params.iter().map(|(_, v)| **v).collect();
1096
1097 let bar = Bar::new(names, values).name("Sensitivity");
1098
1099 plot.add_trace(bar);
1100 plot.set_title("Parameter Sensitivity (Tornado Chart)");
1101 plot.set_xlabel("Main Effect");
1102 plot.save(output_path)?;
1103 }
1104
1105 Ok(())
1106 }
1107
1108 pub fn plot_response_curves(
1110 results: &SensitivityResults,
1111 output_path: &str,
1112 ) -> Result<(), Box<dyn Error>> {
1113 #[cfg(feature = "scirs")]
1114 {
1115 let mut plot = Plot::new();
1116
1117 for (param_name, sensitivity) in &results.sensitivities {
1118 if !sensitivity.response_curve.is_empty() {
1119 let x: Vec<f64> = sensitivity.response_curve.iter().map(|(x, _)| *x).collect();
1120 let y: Vec<f64> = sensitivity.response_curve.iter().map(|(_, y)| *y).collect();
1121
1122 let mut line = Line::new(x, y).name(param_name);
1123
1124 plot.add_trace(line);
1125 }
1126 }
1127
1128 plot.set_title("Parameter Response Curves");
1129 plot.set_xlabel("Parameter Value");
1130 plot.set_ylabel("Objective");
1131 plot.save(output_path)?;
1132 }
1133
1134 Ok(())
1135 }
1136}
1137
1138fn variance(values: &[f64]) -> f64 {
1140 let n = values.len() as f64;
1141 let mean = values.iter().sum::<f64>() / n;
1142 values.iter().map(|v| (v - mean).powi(2)).sum::<f64>() / (n - 1.0)
1143}
1144
1145fn simple_linear_regression(x: &[f64], y: &[f64]) -> f64 {
1146 let n = x.len() as f64;
1147 let sum_x: f64 = x.iter().sum();
1148 let sum_y: f64 = y.iter().sum();
1149 let sum_xy: f64 = x.iter().zip(y.iter()).map(|(a, b)| a * b).sum();
1150 let sum_x2: f64 = x.iter().map(|a| a * a).sum();
1151
1152 n.mul_add(sum_xy, -(sum_x * sum_y)) / n.mul_add(sum_x2, -(sum_x * sum_x))
1153}
1154
1155#[cfg(test)]
1156mod tests {
1157 use super::*;
1158 use crate::sampler::SASampler;
1159
1160 #[test]
1161 fn test_one_at_a_time_analysis() {
1162 let sampler = SASampler::new(None);
1163 let mut parameters = vec![ParameterType::SamplerParameter {
1164 name: "temperature".to_string(),
1165 default_value: 1.0,
1166 min_value: 0.1,
1167 max_value: 10.0,
1168 }];
1169
1170 let mut analyzer = SensitivityAnalyzer::new(
1171 sampler,
1172 parameters,
1173 SensitivityMethod::OneAtATime {
1174 num_points: 5,
1175 include_interactions: false,
1176 },
1177 );
1178
1179 }
1182}