quantrs2_anneal/meta_learning_optimization/
multi_objective.rs

1//! Multi-objective optimization for meta-learning
2
3use std::collections::{HashMap, VecDeque};
4use std::time::Instant;
5
6use super::config::{
7    ConstraintHandling, MultiObjectiveConfig, OptimizationObjective, ParetoFrontierConfig,
8    ScalarizationMethod,
9};
10use super::feature_extraction::{AlgorithmType, OptimizationConfiguration};
11
12/// Multi-objective optimizer
13pub struct MultiObjectiveOptimizer {
14    /// Configuration
15    pub config: MultiObjectiveConfig,
16    /// Pareto frontier
17    pub pareto_frontier: ParetoFrontier,
18    /// Scalarization methods
19    pub scalarizers: Vec<Scalarizer>,
20    /// Constraint handlers
21    pub constraint_handlers: Vec<ConstraintHandler>,
22    /// Decision maker
23    pub decision_maker: DecisionMaker,
24}
25
26impl MultiObjectiveOptimizer {
27    #[must_use]
28    pub fn new(config: MultiObjectiveConfig) -> Self {
29        Self {
30            pareto_frontier: ParetoFrontier::new(&config.pareto_config),
31            scalarizers: vec![Scalarizer::new(config.scalarization.clone())],
32            constraint_handlers: vec![ConstraintHandler::new(config.constraint_handling.clone())],
33            decision_maker: DecisionMaker::new(),
34            config,
35        }
36    }
37
38    pub fn optimize(
39        &mut self,
40        candidates: Vec<OptimizationConfiguration>,
41    ) -> Result<Vec<MultiObjectiveSolution>, String> {
42        let mut solutions = Vec::new();
43
44        for (i, candidate) in candidates.iter().enumerate() {
45            let objective_values = self.evaluate_objectives(candidate)?;
46
47            let solution = MultiObjectiveSolution {
48                id: format!("solution_{i}"),
49                objective_values,
50                decision_variables: candidate.clone(),
51                dominance_rank: 0,      // Will be calculated later
52                crowding_distance: 0.0, // Will be calculated later
53            };
54
55            solutions.push(solution);
56        }
57
58        // Update Pareto frontier
59        self.update_pareto_frontier(&mut solutions)?;
60
61        // Calculate dominance ranks and crowding distances
62        self.calculate_dominance_ranks(&mut solutions);
63        self.calculate_crowding_distances(&mut solutions);
64
65        Ok(solutions)
66    }
67
68    fn evaluate_objectives(&self, config: &OptimizationConfiguration) -> Result<Vec<f64>, String> {
69        let mut objective_values = Vec::new();
70
71        for objective in &self.config.objectives {
72            let value = match objective {
73                OptimizationObjective::SolutionQuality => {
74                    // Estimate solution quality based on algorithm type
75                    match config.algorithm {
76                        AlgorithmType::SimulatedAnnealing => 0.8,
77                        AlgorithmType::QuantumAnnealing => 0.9,
78                        AlgorithmType::TabuSearch => 0.7,
79                        _ => 0.6,
80                    }
81                }
82                OptimizationObjective::Runtime => {
83                    // Estimate runtime (lower is better, so we use negative)
84                    -config.resources.time.as_secs_f64() / 3600.0 // Convert to hours
85                }
86                OptimizationObjective::ResourceUsage => {
87                    // Estimate resource usage (lower is better)
88                    -(config.resources.memory as f64 / 1024.0 + config.resources.cpu)
89                }
90                OptimizationObjective::EnergyConsumption => {
91                    // Estimate energy consumption (lower is better)
92                    -(config.resources.cpu * config.resources.time.as_secs_f64() / 3600.0)
93                }
94                OptimizationObjective::Robustness => {
95                    // Estimate robustness based on algorithm characteristics
96                    match config.algorithm {
97                        AlgorithmType::QuantumAnnealing => 0.9,
98                        AlgorithmType::SimulatedAnnealing => 0.7,
99                        _ => 0.6,
100                    }
101                }
102                OptimizationObjective::Scalability => {
103                    // Estimate scalability
104                    match config.algorithm {
105                        AlgorithmType::TabuSearch => 0.9,
106                        AlgorithmType::QuantumAnnealing => 0.6,
107                        _ => 0.7,
108                    }
109                }
110                OptimizationObjective::Custom(_) => 0.5, // Default value
111            };
112
113            objective_values.push(value);
114        }
115
116        Ok(objective_values)
117    }
118
119    fn update_pareto_frontier(
120        &mut self,
121        solutions: &Vec<MultiObjectiveSolution>,
122    ) -> Result<(), String> {
123        let mut new_solutions = Vec::new();
124        let mut updated_solutions: Vec<MultiObjectiveSolution> = Vec::new();
125
126        // Check each solution for dominance
127        for solution in solutions {
128            let mut is_dominated = false;
129            let mut dominated_solutions = Vec::new();
130
131            // Compare with existing frontier solutions
132            for frontier_solution in &self.pareto_frontier.solutions {
133                if self.dominates(
134                    &frontier_solution.objective_values,
135                    &solution.objective_values,
136                ) {
137                    is_dominated = true;
138                    break;
139                } else if self.dominates(
140                    &solution.objective_values,
141                    &frontier_solution.objective_values,
142                ) {
143                    dominated_solutions.push(frontier_solution.id.clone());
144                }
145            }
146
147            if !is_dominated {
148                new_solutions.push(solution.clone());
149
150                // Remove dominated solutions from frontier
151                self.pareto_frontier
152                    .solutions
153                    .retain(|s| !dominated_solutions.contains(&s.id));
154            }
155        }
156
157        // Add new non-dominated solutions to frontier
158        self.pareto_frontier.solutions.extend(new_solutions);
159
160        // Limit frontier size
161        if self.pareto_frontier.solutions.len() > self.config.pareto_config.max_frontier_size {
162            self.prune_frontier()?;
163        }
164
165        // Update frontier statistics
166        self.update_frontier_statistics();
167
168        Ok(())
169    }
170
171    fn dominates(&self, obj1: &[f64], obj2: &[f64]) -> bool {
172        if obj1.len() != obj2.len() {
173            return false;
174        }
175
176        let mut at_least_one_better = false;
177        for (v1, v2) in obj1.iter().zip(obj2.iter()) {
178            if *v1 < *v2 - self.config.pareto_config.dominance_tolerance {
179                return false; // obj1 is worse in this objective
180            }
181            if *v1 > *v2 + self.config.pareto_config.dominance_tolerance {
182                at_least_one_better = true;
183            }
184        }
185
186        at_least_one_better
187    }
188
189    fn prune_frontier(&mut self) -> Result<(), String> {
190        // Use crowding distance to maintain diversity
191        self.calculate_crowding_distances_frontier();
192
193        // Sort by crowding distance (descending) and keep top solutions
194        self.pareto_frontier.solutions.sort_by(|a, b| {
195            b.crowding_distance
196                .partial_cmp(&a.crowding_distance)
197                .unwrap_or(std::cmp::Ordering::Equal)
198        });
199
200        self.pareto_frontier
201            .solutions
202            .truncate(self.config.pareto_config.max_frontier_size);
203
204        Ok(())
205    }
206
207    fn calculate_crowding_distances_frontier(&mut self) {
208        let num_solutions = self.pareto_frontier.solutions.len();
209        let num_objectives = if let Some(first) = self.pareto_frontier.solutions.first() {
210            first.objective_values.len()
211        } else {
212            return;
213        };
214
215        // Initialize crowding distances
216        for solution in &mut self.pareto_frontier.solutions {
217            solution.crowding_distance = 0.0;
218        }
219
220        // Calculate crowding distance for each objective
221        for obj_idx in 0..num_objectives {
222            // Sort by objective value
223            self.pareto_frontier.solutions.sort_by(|a, b| {
224                a.objective_values[obj_idx]
225                    .partial_cmp(&b.objective_values[obj_idx])
226                    .unwrap_or(std::cmp::Ordering::Equal)
227            });
228
229            // Set boundary solutions to infinite distance
230            if num_solutions > 2 {
231                self.pareto_frontier.solutions[0].crowding_distance = f64::INFINITY;
232                self.pareto_frontier.solutions[num_solutions - 1].crowding_distance = f64::INFINITY;
233
234                // Calculate distance for interior solutions
235                let obj_range = self.pareto_frontier.solutions[num_solutions - 1].objective_values
236                    [obj_idx]
237                    - self.pareto_frontier.solutions[0].objective_values[obj_idx];
238
239                if obj_range > 0.0 {
240                    for i in 1..num_solutions - 1 {
241                        let distance = (self.pareto_frontier.solutions[i + 1].objective_values
242                            [obj_idx]
243                            - self.pareto_frontier.solutions[i - 1].objective_values[obj_idx])
244                            / obj_range;
245                        self.pareto_frontier.solutions[i].crowding_distance += distance;
246                    }
247                }
248            }
249        }
250    }
251
252    fn calculate_dominance_ranks(&self, solutions: &mut Vec<MultiObjectiveSolution>) {
253        // Simple dominance ranking - all solutions get rank 1 for now
254        for solution in solutions.iter_mut() {
255            solution.dominance_rank = 1;
256        }
257    }
258
259    fn calculate_crowding_distances(&self, solutions: &mut Vec<MultiObjectiveSolution>) {
260        // Simple crowding distance calculation
261        for (i, solution) in solutions.iter_mut().enumerate() {
262            solution.crowding_distance = i as f64; // Simplified
263        }
264    }
265
266    fn update_frontier_statistics(&mut self) {
267        self.pareto_frontier.statistics.size = self.pareto_frontier.solutions.len();
268
269        // Calculate hypervolume (simplified)
270        self.pareto_frontier.statistics.hypervolume =
271            self.pareto_frontier.solutions.len() as f64 * 0.1;
272
273        // Calculate spread (simplified)
274        if self.pareto_frontier.solutions.len() > 1 {
275            self.pareto_frontier.statistics.spread = 1.0;
276        } else {
277            self.pareto_frontier.statistics.spread = 0.0;
278        }
279
280        // Update convergence and coverage
281        self.pareto_frontier.statistics.convergence = 0.8;
282        self.pareto_frontier.statistics.coverage = 0.9;
283    }
284
285    pub fn make_decision(
286        &mut self,
287        preferences: Option<UserPreferences>,
288    ) -> Result<MultiObjectiveSolution, String> {
289        if self.pareto_frontier.solutions.is_empty() {
290            return Err("No solutions in Pareto frontier".to_string());
291        }
292
293        self.decision_maker
294            .make_decision(&self.pareto_frontier.solutions, preferences)
295    }
296
297    pub fn scalarize(
298        &self,
299        solution: &MultiObjectiveSolution,
300        weights: &[f64],
301    ) -> Result<f64, String> {
302        if let Some(scalarizer) = self.scalarizers.first() {
303            scalarizer.scalarize(&solution.objective_values, weights)
304        } else {
305            Err("No scalarizer available".to_string())
306        }
307    }
308}
309
310/// Pareto frontier representation
311#[derive(Debug)]
312pub struct ParetoFrontier {
313    /// Non-dominated solutions
314    pub solutions: Vec<MultiObjectiveSolution>,
315    /// Frontier statistics
316    pub statistics: FrontierStatistics,
317    /// Update history
318    pub update_history: VecDeque<FrontierUpdate>,
319}
320
321impl ParetoFrontier {
322    #[must_use]
323    pub const fn new(config: &ParetoFrontierConfig) -> Self {
324        Self {
325            solutions: Vec::new(),
326            statistics: FrontierStatistics {
327                size: 0,
328                hypervolume: 0.0,
329                spread: 0.0,
330                convergence: 0.0,
331                coverage: 0.0,
332            },
333            update_history: VecDeque::new(),
334        }
335    }
336}
337
338/// Multi-objective solution
339#[derive(Debug, Clone)]
340pub struct MultiObjectiveSolution {
341    /// Solution identifier
342    pub id: String,
343    /// Objective values
344    pub objective_values: Vec<f64>,
345    /// Decision variables
346    pub decision_variables: OptimizationConfiguration,
347    /// Dominance rank
348    pub dominance_rank: usize,
349    /// Crowding distance
350    pub crowding_distance: f64,
351}
352
353/// Frontier statistics
354#[derive(Debug, Clone)]
355pub struct FrontierStatistics {
356    /// Frontier size
357    pub size: usize,
358    /// Hypervolume
359    pub hypervolume: f64,
360    /// Spread
361    pub spread: f64,
362    /// Convergence metric
363    pub convergence: f64,
364    /// Coverage
365    pub coverage: f64,
366}
367
368/// Frontier update
369#[derive(Debug, Clone)]
370pub struct FrontierUpdate {
371    /// Update timestamp
372    pub timestamp: Instant,
373    /// Solutions added
374    pub solutions_added: Vec<String>,
375    /// Solutions removed
376    pub solutions_removed: Vec<String>,
377    /// Update reason
378    pub reason: UpdateReason,
379}
380
381/// Update reasons
382#[derive(Debug, Clone, PartialEq, Eq)]
383pub enum UpdateReason {
384    /// New non-dominated solution
385    NewNonDominated,
386    /// Dominance detected
387    DominanceUpdate,
388    /// Crowding-based pruning
389    CrowdingPruning,
390    /// Size limit reached
391    SizeLimitReached,
392}
393
394/// Scalarization function
395#[derive(Debug)]
396pub struct Scalarizer {
397    /// Scalarization method
398    pub method: ScalarizationMethod,
399    /// Reference point (if applicable)
400    pub reference_point: Option<Vec<f64>>,
401    /// Weights
402    pub weights: Vec<f64>,
403}
404
405impl Scalarizer {
406    #[must_use]
407    pub const fn new(method: ScalarizationMethod) -> Self {
408        Self {
409            method,
410            reference_point: None,
411            weights: Vec::new(),
412        }
413    }
414
415    pub fn scalarize(&self, objectives: &[f64], weights: &[f64]) -> Result<f64, String> {
416        if objectives.len() != weights.len() {
417            return Err("Objectives and weights length mismatch".to_string());
418        }
419
420        match &self.method {
421            ScalarizationMethod::WeightedSum => Ok(objectives
422                .iter()
423                .zip(weights.iter())
424                .map(|(obj, w)| obj * w)
425                .sum()),
426            ScalarizationMethod::WeightedTchebycheff => {
427                let default_reference = vec![0.0; objectives.len()];
428                let reference = self.reference_point.as_ref().unwrap_or(&default_reference);
429
430                let mut max_weighted_diff: f64 = 0.0;
431                for ((obj, ref_val), weight) in
432                    objectives.iter().zip(reference.iter()).zip(weights.iter())
433                {
434                    let weighted_diff = weight * (ref_val - obj).abs();
435                    max_weighted_diff = max_weighted_diff.max(weighted_diff);
436                }
437                Ok(max_weighted_diff)
438            }
439            ScalarizationMethod::AchievementScalarizing => {
440                // Simplified achievement scalarizing function
441                let weighted_sum: f64 = objectives
442                    .iter()
443                    .zip(weights.iter())
444                    .map(|(obj, w)| obj * w)
445                    .sum();
446                let max_objective: f64 = objectives.iter().fold(0.0_f64, |acc, &obj| acc.max(obj));
447                Ok(0.01f64.mul_add(max_objective, weighted_sum))
448            }
449            ScalarizationMethod::PenaltyBoundaryIntersection => {
450                // Simplified PBI
451                let weighted_sum: f64 = objectives
452                    .iter()
453                    .zip(weights.iter())
454                    .map(|(obj, w)| obj * w)
455                    .sum();
456                Ok(weighted_sum)
457            }
458            ScalarizationMethod::ReferencePoint => {
459                if let Some(ref_point) = &self.reference_point {
460                    let distance: f64 = objectives
461                        .iter()
462                        .zip(ref_point.iter())
463                        .map(|(obj, ref_val)| (obj - ref_val).powi(2))
464                        .sum::<f64>()
465                        .sqrt();
466                    Ok(-distance) // Negative because we want to minimize distance
467                } else {
468                    Err("Reference point not set for reference point method".to_string())
469                }
470            }
471        }
472    }
473}
474
475/// Constraint handler
476#[derive(Debug)]
477pub struct ConstraintHandler {
478    /// Handling method
479    pub method: ConstraintHandling,
480    /// Constraints
481    pub constraints: Vec<Constraint>,
482}
483
484impl ConstraintHandler {
485    #[must_use]
486    pub const fn new(method: ConstraintHandling) -> Self {
487        Self {
488            method,
489            constraints: Vec::new(),
490        }
491    }
492
493    pub const fn handle_constraints(
494        &self,
495        solution: &MultiObjectiveSolution,
496    ) -> Result<f64, String> {
497        // Simplified constraint handling - return penalty value
498        match self.method {
499            ConstraintHandling::PenaltyMethod => Ok(0.0), // No penalty for now
500            ConstraintHandling::BarrierMethod => Ok(0.0),
501            ConstraintHandling::LagrangianMethod => Ok(0.0),
502            ConstraintHandling::FeasibilityRules => Ok(0.0),
503            ConstraintHandling::MultiObjectiveConstraint => Ok(0.0),
504        }
505    }
506}
507
508/// Constraint definition
509#[derive(Debug, Clone)]
510pub struct Constraint {
511    /// Constraint identifier
512    pub id: String,
513    /// Constraint type
514    pub constraint_type: ConstraintType,
515    /// Bounds or parameters
516    pub parameters: Vec<f64>,
517    /// Violation tolerance
518    pub tolerance: f64,
519}
520
521/// Constraint types
522#[derive(Debug, Clone, PartialEq, Eq)]
523pub enum ConstraintType {
524    /// Equality constraint
525    Equality,
526    /// Inequality constraint (<=)
527    Inequality,
528    /// Bound constraint
529    Bound,
530    /// Custom constraint
531    Custom(String),
532}
533
534/// Decision maker for multi-objective problems
535#[derive(Debug)]
536pub struct DecisionMaker {
537    /// Decision strategy
538    pub strategy: DecisionStrategy,
539    /// User preferences
540    pub preferences: Option<UserPreferences>,
541    /// Decision history
542    pub decision_history: VecDeque<Decision>,
543}
544
545impl DecisionMaker {
546    #[must_use]
547    pub const fn new() -> Self {
548        Self {
549            strategy: DecisionStrategy::WeightedSum,
550            preferences: None,
551            decision_history: VecDeque::new(),
552        }
553    }
554
555    pub fn make_decision(
556        &mut self,
557        solutions: &[MultiObjectiveSolution],
558        preferences: Option<UserPreferences>,
559    ) -> Result<MultiObjectiveSolution, String> {
560        if solutions.is_empty() {
561            return Err("No solutions to choose from".to_string());
562        }
563
564        let selected_solution = match &self.strategy {
565            DecisionStrategy::WeightedSum => {
566                // Use equal weights if no preferences provided
567                let weights = if let Some(ref prefs) = preferences {
568                    prefs.objective_weights.clone()
569                } else {
570                    vec![
571                        1.0 / solutions[0].objective_values.len() as f64;
572                        solutions[0].objective_values.len()
573                    ]
574                };
575
576                // Find solution with best weighted sum
577                solutions
578                    .iter()
579                    .max_by(|a, b| {
580                        let score_a: f64 = a
581                            .objective_values
582                            .iter()
583                            .zip(weights.iter())
584                            .map(|(obj, w)| obj * w)
585                            .sum();
586                        let score_b: f64 = b
587                            .objective_values
588                            .iter()
589                            .zip(weights.iter())
590                            .map(|(obj, w)| obj * w)
591                            .sum();
592                        score_a
593                            .partial_cmp(&score_b)
594                            .unwrap_or(std::cmp::Ordering::Equal)
595                    })
596                    .ok_or("Failed to select solution")?
597            }
598            DecisionStrategy::Lexicographic => {
599                // Simplified lexicographic ordering - use first objective
600                solutions
601                    .iter()
602                    .max_by(|a, b| {
603                        a.objective_values[0]
604                            .partial_cmp(&b.objective_values[0])
605                            .unwrap_or(std::cmp::Ordering::Equal)
606                    })
607                    .ok_or("Failed to select solution")?
608            }
609            DecisionStrategy::InteractiveMethod => {
610                // Simplified - just return first solution
611                &solutions[0]
612            }
613            DecisionStrategy::GoalProgramming => {
614                // Simplified goal programming - return solution closest to ideal
615                solutions
616                    .iter()
617                    .min_by(|a, b| {
618                        let dist_a: f64 = a
619                            .objective_values
620                            .iter()
621                            .map(|obj| (1.0 - obj).powi(2))
622                            .sum::<f64>()
623                            .sqrt();
624                        let dist_b: f64 = b
625                            .objective_values
626                            .iter()
627                            .map(|obj| (1.0 - obj).powi(2))
628                            .sum::<f64>()
629                            .sqrt();
630                        dist_a
631                            .partial_cmp(&dist_b)
632                            .unwrap_or(std::cmp::Ordering::Equal)
633                    })
634                    .ok_or("Failed to select solution")?
635            }
636            DecisionStrategy::TOPSIS => {
637                // Simplified TOPSIS
638                solutions
639                    .iter()
640                    .max_by(|a, b| {
641                        a.crowding_distance
642                            .partial_cmp(&b.crowding_distance)
643                            .unwrap_or(std::cmp::Ordering::Equal)
644                    })
645                    .ok_or("Failed to select solution")?
646            }
647        };
648
649        // Record decision
650        let decision = Decision {
651            timestamp: Instant::now(),
652            selected_solution_id: selected_solution.id.clone(),
653            strategy_used: self.strategy.clone(),
654            preferences_used: preferences,
655            confidence: 0.8,
656        };
657
658        self.decision_history.push_back(decision);
659
660        // Limit history size
661        while self.decision_history.len() > 100 {
662            self.decision_history.pop_front();
663        }
664
665        Ok(selected_solution.clone())
666    }
667}
668
669/// Decision strategies
670#[derive(Debug, Clone, PartialEq, Eq)]
671pub enum DecisionStrategy {
672    /// Weighted sum approach
673    WeightedSum,
674    /// Lexicographic ordering
675    Lexicographic,
676    /// Interactive methods
677    InteractiveMethod,
678    /// Goal programming
679    GoalProgramming,
680    /// TOPSIS
681    TOPSIS,
682}
683
684/// User preferences
685#[derive(Debug, Clone)]
686pub struct UserPreferences {
687    /// Objective weights
688    pub objective_weights: Vec<f64>,
689    /// Preference functions
690    pub preference_functions: Vec<PreferenceFunction>,
691    /// Aspiration levels
692    pub aspiration_levels: Vec<f64>,
693    /// Reservation levels
694    pub reservation_levels: Vec<f64>,
695}
696
697/// Preference function
698#[derive(Debug, Clone)]
699pub struct PreferenceFunction {
700    /// Function type
701    pub function_type: PreferenceFunctionType,
702    /// Parameters
703    pub parameters: Vec<f64>,
704    /// Objective index
705    pub objective_index: usize,
706}
707
708/// Types of preference functions
709#[derive(Debug, Clone, PartialEq, Eq)]
710pub enum PreferenceFunctionType {
711    /// Linear preference
712    Linear,
713    /// Piecewise linear
714    PiecewiseLinear,
715    /// Exponential
716    Exponential,
717    /// Gaussian
718    Gaussian,
719    /// Step function
720    Step,
721}
722
723/// Decision record
724#[derive(Debug, Clone)]
725pub struct Decision {
726    /// Decision timestamp
727    pub timestamp: Instant,
728    /// Selected solution ID
729    pub selected_solution_id: String,
730    /// Strategy used
731    pub strategy_used: DecisionStrategy,
732    /// Preferences used
733    pub preferences_used: Option<UserPreferences>,
734    /// Decision confidence
735    pub confidence: f64,
736}
737
738#[cfg(test)]
739mod tests {
740    use super::*;
741    use crate::meta_learning_optimization::feature_extraction::ResourceAllocation;
742
743    #[test]
744    fn test_multi_objective_optimizer_creation() {
745        let config = MultiObjectiveConfig::default();
746        let optimizer = MultiObjectiveOptimizer::new(config);
747        assert!(optimizer.config.enable_multi_objective);
748    }
749
750    #[test]
751    fn test_pareto_frontier() {
752        let config = ParetoFrontierConfig::default();
753        let frontier = ParetoFrontier::new(&config);
754        assert_eq!(frontier.solutions.len(), 0);
755    }
756
757    #[test]
758    fn test_scalarizer() {
759        let scalarizer = Scalarizer::new(ScalarizationMethod::WeightedSum);
760        let objectives = vec![0.8, 0.6, 0.9];
761        let weights = vec![0.5, 0.3, 0.2];
762
763        let result = scalarizer.scalarize(&objectives, &weights);
764        assert!(result.is_ok());
765
766        let score = result.expect("scalarize should succeed");
767        assert!((score - 0.76).abs() < 1e-10); // 0.8*0.5 + 0.6*0.3 + 0.9*0.2 = 0.76
768    }
769
770    #[test]
771    fn test_decision_maker() {
772        let mut decision_maker = DecisionMaker::new();
773
774        let solutions = vec![MultiObjectiveSolution {
775            id: "sol1".to_string(),
776            objective_values: vec![0.8, 0.6],
777            decision_variables: OptimizationConfiguration {
778                algorithm: AlgorithmType::SimulatedAnnealing,
779                hyperparameters: HashMap::new(),
780                architecture: None,
781                resources: ResourceAllocation {
782                    cpu: 1.0,
783                    memory: 256,
784                    gpu: 0.0,
785                    time: std::time::Duration::from_secs(60),
786                },
787            },
788            dominance_rank: 1,
789            crowding_distance: 1.0,
790        }];
791
792        let result = decision_maker.make_decision(&solutions, None);
793        assert!(result.is_ok());
794    }
795}