use std::collections::{HashMap, VecDeque};
use std::time::Instant;
use super::config::{
ConstraintHandling, MultiObjectiveConfig, OptimizationObjective, ParetoFrontierConfig,
ScalarizationMethod,
};
use super::feature_extraction::{AlgorithmType, OptimizationConfiguration};
pub struct MultiObjectiveOptimizer {
pub config: MultiObjectiveConfig,
pub pareto_frontier: ParetoFrontier,
pub scalarizers: Vec<Scalarizer>,
pub constraint_handlers: Vec<ConstraintHandler>,
pub decision_maker: DecisionMaker,
}
impl MultiObjectiveOptimizer {
#[must_use]
pub fn new(config: MultiObjectiveConfig) -> Self {
Self {
pareto_frontier: ParetoFrontier::new(&config.pareto_config),
scalarizers: vec![Scalarizer::new(config.scalarization.clone())],
constraint_handlers: vec![ConstraintHandler::new(config.constraint_handling.clone())],
decision_maker: DecisionMaker::new(),
config,
}
}
pub fn optimize(
&mut self,
candidates: Vec<OptimizationConfiguration>,
) -> Result<Vec<MultiObjectiveSolution>, String> {
let mut solutions = Vec::new();
for (i, candidate) in candidates.iter().enumerate() {
let objective_values = self.evaluate_objectives(candidate)?;
let solution = MultiObjectiveSolution {
id: format!("solution_{i}"),
objective_values,
decision_variables: candidate.clone(),
dominance_rank: 0, crowding_distance: 0.0, };
solutions.push(solution);
}
self.update_pareto_frontier(&mut solutions)?;
self.calculate_dominance_ranks(&mut solutions);
self.calculate_crowding_distances(&mut solutions);
Ok(solutions)
}
fn evaluate_objectives(&self, config: &OptimizationConfiguration) -> Result<Vec<f64>, String> {
let mut objective_values = Vec::new();
for objective in &self.config.objectives {
let value = match objective {
OptimizationObjective::SolutionQuality => {
match config.algorithm {
AlgorithmType::SimulatedAnnealing => 0.8,
AlgorithmType::QuantumAnnealing => 0.9,
AlgorithmType::TabuSearch => 0.7,
_ => 0.6,
}
}
OptimizationObjective::Runtime => {
-config.resources.time.as_secs_f64() / 3600.0 }
OptimizationObjective::ResourceUsage => {
-(config.resources.memory as f64 / 1024.0 + config.resources.cpu)
}
OptimizationObjective::EnergyConsumption => {
-(config.resources.cpu * config.resources.time.as_secs_f64() / 3600.0)
}
OptimizationObjective::Robustness => {
match config.algorithm {
AlgorithmType::QuantumAnnealing => 0.9,
AlgorithmType::SimulatedAnnealing => 0.7,
_ => 0.6,
}
}
OptimizationObjective::Scalability => {
match config.algorithm {
AlgorithmType::TabuSearch => 0.9,
AlgorithmType::QuantumAnnealing => 0.6,
_ => 0.7,
}
}
OptimizationObjective::Custom(_) => 0.5, };
objective_values.push(value);
}
Ok(objective_values)
}
fn update_pareto_frontier(
&mut self,
solutions: &Vec<MultiObjectiveSolution>,
) -> Result<(), String> {
let mut new_solutions = Vec::new();
let mut updated_solutions: Vec<MultiObjectiveSolution> = Vec::new();
for solution in solutions {
let mut is_dominated = false;
let mut dominated_solutions = Vec::new();
for frontier_solution in &self.pareto_frontier.solutions {
if self.dominates(
&frontier_solution.objective_values,
&solution.objective_values,
) {
is_dominated = true;
break;
} else if self.dominates(
&solution.objective_values,
&frontier_solution.objective_values,
) {
dominated_solutions.push(frontier_solution.id.clone());
}
}
if !is_dominated {
new_solutions.push(solution.clone());
self.pareto_frontier
.solutions
.retain(|s| !dominated_solutions.contains(&s.id));
}
}
self.pareto_frontier.solutions.extend(new_solutions);
if self.pareto_frontier.solutions.len() > self.config.pareto_config.max_frontier_size {
self.prune_frontier()?;
}
self.update_frontier_statistics();
Ok(())
}
fn dominates(&self, obj1: &[f64], obj2: &[f64]) -> bool {
if obj1.len() != obj2.len() {
return false;
}
let mut at_least_one_better = false;
for (v1, v2) in obj1.iter().zip(obj2.iter()) {
if *v1 < *v2 - self.config.pareto_config.dominance_tolerance {
return false; }
if *v1 > *v2 + self.config.pareto_config.dominance_tolerance {
at_least_one_better = true;
}
}
at_least_one_better
}
fn prune_frontier(&mut self) -> Result<(), String> {
self.calculate_crowding_distances_frontier();
self.pareto_frontier.solutions.sort_by(|a, b| {
b.crowding_distance
.partial_cmp(&a.crowding_distance)
.unwrap_or(std::cmp::Ordering::Equal)
});
self.pareto_frontier
.solutions
.truncate(self.config.pareto_config.max_frontier_size);
Ok(())
}
fn calculate_crowding_distances_frontier(&mut self) {
let num_solutions = self.pareto_frontier.solutions.len();
let num_objectives = if let Some(first) = self.pareto_frontier.solutions.first() {
first.objective_values.len()
} else {
return;
};
for solution in &mut self.pareto_frontier.solutions {
solution.crowding_distance = 0.0;
}
for obj_idx in 0..num_objectives {
self.pareto_frontier.solutions.sort_by(|a, b| {
a.objective_values[obj_idx]
.partial_cmp(&b.objective_values[obj_idx])
.unwrap_or(std::cmp::Ordering::Equal)
});
if num_solutions > 2 {
self.pareto_frontier.solutions[0].crowding_distance = f64::INFINITY;
self.pareto_frontier.solutions[num_solutions - 1].crowding_distance = f64::INFINITY;
let obj_range = self.pareto_frontier.solutions[num_solutions - 1].objective_values
[obj_idx]
- self.pareto_frontier.solutions[0].objective_values[obj_idx];
if obj_range > 0.0 {
for i in 1..num_solutions - 1 {
let distance = (self.pareto_frontier.solutions[i + 1].objective_values
[obj_idx]
- self.pareto_frontier.solutions[i - 1].objective_values[obj_idx])
/ obj_range;
self.pareto_frontier.solutions[i].crowding_distance += distance;
}
}
}
}
}
fn calculate_dominance_ranks(&self, solutions: &mut Vec<MultiObjectiveSolution>) {
for solution in solutions.iter_mut() {
solution.dominance_rank = 1;
}
}
fn calculate_crowding_distances(&self, solutions: &mut Vec<MultiObjectiveSolution>) {
for (i, solution) in solutions.iter_mut().enumerate() {
solution.crowding_distance = i as f64; }
}
fn update_frontier_statistics(&mut self) {
self.pareto_frontier.statistics.size = self.pareto_frontier.solutions.len();
self.pareto_frontier.statistics.hypervolume =
self.pareto_frontier.solutions.len() as f64 * 0.1;
if self.pareto_frontier.solutions.len() > 1 {
self.pareto_frontier.statistics.spread = 1.0;
} else {
self.pareto_frontier.statistics.spread = 0.0;
}
self.pareto_frontier.statistics.convergence = 0.8;
self.pareto_frontier.statistics.coverage = 0.9;
}
pub fn make_decision(
&mut self,
preferences: Option<UserPreferences>,
) -> Result<MultiObjectiveSolution, String> {
if self.pareto_frontier.solutions.is_empty() {
return Err("No solutions in Pareto frontier".to_string());
}
self.decision_maker
.make_decision(&self.pareto_frontier.solutions, preferences)
}
pub fn scalarize(
&self,
solution: &MultiObjectiveSolution,
weights: &[f64],
) -> Result<f64, String> {
if let Some(scalarizer) = self.scalarizers.first() {
scalarizer.scalarize(&solution.objective_values, weights)
} else {
Err("No scalarizer available".to_string())
}
}
}
#[derive(Debug)]
pub struct ParetoFrontier {
pub solutions: Vec<MultiObjectiveSolution>,
pub statistics: FrontierStatistics,
pub update_history: VecDeque<FrontierUpdate>,
}
impl ParetoFrontier {
#[must_use]
pub const fn new(config: &ParetoFrontierConfig) -> Self {
Self {
solutions: Vec::new(),
statistics: FrontierStatistics {
size: 0,
hypervolume: 0.0,
spread: 0.0,
convergence: 0.0,
coverage: 0.0,
},
update_history: VecDeque::new(),
}
}
}
#[derive(Debug, Clone)]
pub struct MultiObjectiveSolution {
pub id: String,
pub objective_values: Vec<f64>,
pub decision_variables: OptimizationConfiguration,
pub dominance_rank: usize,
pub crowding_distance: f64,
}
#[derive(Debug, Clone)]
pub struct FrontierStatistics {
pub size: usize,
pub hypervolume: f64,
pub spread: f64,
pub convergence: f64,
pub coverage: f64,
}
#[derive(Debug, Clone)]
pub struct FrontierUpdate {
pub timestamp: Instant,
pub solutions_added: Vec<String>,
pub solutions_removed: Vec<String>,
pub reason: UpdateReason,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum UpdateReason {
NewNonDominated,
DominanceUpdate,
CrowdingPruning,
SizeLimitReached,
}
#[derive(Debug)]
pub struct Scalarizer {
pub method: ScalarizationMethod,
pub reference_point: Option<Vec<f64>>,
pub weights: Vec<f64>,
}
impl Scalarizer {
#[must_use]
pub const fn new(method: ScalarizationMethod) -> Self {
Self {
method,
reference_point: None,
weights: Vec::new(),
}
}
pub fn scalarize(&self, objectives: &[f64], weights: &[f64]) -> Result<f64, String> {
if objectives.len() != weights.len() {
return Err("Objectives and weights length mismatch".to_string());
}
match &self.method {
ScalarizationMethod::WeightedSum => Ok(objectives
.iter()
.zip(weights.iter())
.map(|(obj, w)| obj * w)
.sum()),
ScalarizationMethod::WeightedTchebycheff => {
let default_reference = vec![0.0; objectives.len()];
let reference = self.reference_point.as_ref().unwrap_or(&default_reference);
let mut max_weighted_diff: f64 = 0.0;
for ((obj, ref_val), weight) in
objectives.iter().zip(reference.iter()).zip(weights.iter())
{
let weighted_diff = weight * (ref_val - obj).abs();
max_weighted_diff = max_weighted_diff.max(weighted_diff);
}
Ok(max_weighted_diff)
}
ScalarizationMethod::AchievementScalarizing => {
let weighted_sum: f64 = objectives
.iter()
.zip(weights.iter())
.map(|(obj, w)| obj * w)
.sum();
let max_objective: f64 = objectives.iter().fold(0.0_f64, |acc, &obj| acc.max(obj));
Ok(0.01f64.mul_add(max_objective, weighted_sum))
}
ScalarizationMethod::PenaltyBoundaryIntersection => {
let weighted_sum: f64 = objectives
.iter()
.zip(weights.iter())
.map(|(obj, w)| obj * w)
.sum();
Ok(weighted_sum)
}
ScalarizationMethod::ReferencePoint => {
if let Some(ref_point) = &self.reference_point {
let distance: f64 = objectives
.iter()
.zip(ref_point.iter())
.map(|(obj, ref_val)| (obj - ref_val).powi(2))
.sum::<f64>()
.sqrt();
Ok(-distance) } else {
Err("Reference point not set for reference point method".to_string())
}
}
}
}
}
#[derive(Debug)]
pub struct ConstraintHandler {
pub method: ConstraintHandling,
pub constraints: Vec<Constraint>,
}
impl ConstraintHandler {
#[must_use]
pub const fn new(method: ConstraintHandling) -> Self {
Self {
method,
constraints: Vec::new(),
}
}
pub const fn handle_constraints(
&self,
solution: &MultiObjectiveSolution,
) -> Result<f64, String> {
match self.method {
ConstraintHandling::PenaltyMethod => Ok(0.0), ConstraintHandling::BarrierMethod => Ok(0.0),
ConstraintHandling::LagrangianMethod => Ok(0.0),
ConstraintHandling::FeasibilityRules => Ok(0.0),
ConstraintHandling::MultiObjectiveConstraint => Ok(0.0),
}
}
}
#[derive(Debug, Clone)]
pub struct Constraint {
pub id: String,
pub constraint_type: ConstraintType,
pub parameters: Vec<f64>,
pub tolerance: f64,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ConstraintType {
Equality,
Inequality,
Bound,
Custom(String),
}
#[derive(Debug)]
pub struct DecisionMaker {
pub strategy: DecisionStrategy,
pub preferences: Option<UserPreferences>,
pub decision_history: VecDeque<Decision>,
}
impl DecisionMaker {
#[must_use]
pub const fn new() -> Self {
Self {
strategy: DecisionStrategy::WeightedSum,
preferences: None,
decision_history: VecDeque::new(),
}
}
pub fn make_decision(
&mut self,
solutions: &[MultiObjectiveSolution],
preferences: Option<UserPreferences>,
) -> Result<MultiObjectiveSolution, String> {
if solutions.is_empty() {
return Err("No solutions to choose from".to_string());
}
let selected_solution = match &self.strategy {
DecisionStrategy::WeightedSum => {
let weights = if let Some(ref prefs) = preferences {
prefs.objective_weights.clone()
} else {
vec![
1.0 / solutions[0].objective_values.len() as f64;
solutions[0].objective_values.len()
]
};
solutions
.iter()
.max_by(|a, b| {
let score_a: f64 = a
.objective_values
.iter()
.zip(weights.iter())
.map(|(obj, w)| obj * w)
.sum();
let score_b: f64 = b
.objective_values
.iter()
.zip(weights.iter())
.map(|(obj, w)| obj * w)
.sum();
score_a
.partial_cmp(&score_b)
.unwrap_or(std::cmp::Ordering::Equal)
})
.ok_or("Failed to select solution")?
}
DecisionStrategy::Lexicographic => {
solutions
.iter()
.max_by(|a, b| {
a.objective_values[0]
.partial_cmp(&b.objective_values[0])
.unwrap_or(std::cmp::Ordering::Equal)
})
.ok_or("Failed to select solution")?
}
DecisionStrategy::InteractiveMethod => {
&solutions[0]
}
DecisionStrategy::GoalProgramming => {
solutions
.iter()
.min_by(|a, b| {
let dist_a: f64 = a
.objective_values
.iter()
.map(|obj| (1.0 - obj).powi(2))
.sum::<f64>()
.sqrt();
let dist_b: f64 = b
.objective_values
.iter()
.map(|obj| (1.0 - obj).powi(2))
.sum::<f64>()
.sqrt();
dist_a
.partial_cmp(&dist_b)
.unwrap_or(std::cmp::Ordering::Equal)
})
.ok_or("Failed to select solution")?
}
DecisionStrategy::TOPSIS => {
solutions
.iter()
.max_by(|a, b| {
a.crowding_distance
.partial_cmp(&b.crowding_distance)
.unwrap_or(std::cmp::Ordering::Equal)
})
.ok_or("Failed to select solution")?
}
};
let decision = Decision {
timestamp: Instant::now(),
selected_solution_id: selected_solution.id.clone(),
strategy_used: self.strategy.clone(),
preferences_used: preferences,
confidence: 0.8,
};
self.decision_history.push_back(decision);
while self.decision_history.len() > 100 {
self.decision_history.pop_front();
}
Ok(selected_solution.clone())
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum DecisionStrategy {
WeightedSum,
Lexicographic,
InteractiveMethod,
GoalProgramming,
TOPSIS,
}
#[derive(Debug, Clone)]
pub struct UserPreferences {
pub objective_weights: Vec<f64>,
pub preference_functions: Vec<PreferenceFunction>,
pub aspiration_levels: Vec<f64>,
pub reservation_levels: Vec<f64>,
}
#[derive(Debug, Clone)]
pub struct PreferenceFunction {
pub function_type: PreferenceFunctionType,
pub parameters: Vec<f64>,
pub objective_index: usize,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PreferenceFunctionType {
Linear,
PiecewiseLinear,
Exponential,
Gaussian,
Step,
}
#[derive(Debug, Clone)]
pub struct Decision {
pub timestamp: Instant,
pub selected_solution_id: String,
pub strategy_used: DecisionStrategy,
pub preferences_used: Option<UserPreferences>,
pub confidence: f64,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::meta_learning_optimization::feature_extraction::ResourceAllocation;
#[test]
fn test_multi_objective_optimizer_creation() {
let config = MultiObjectiveConfig::default();
let optimizer = MultiObjectiveOptimizer::new(config);
assert!(optimizer.config.enable_multi_objective);
}
#[test]
fn test_pareto_frontier() {
let config = ParetoFrontierConfig::default();
let frontier = ParetoFrontier::new(&config);
assert_eq!(frontier.solutions.len(), 0);
}
#[test]
fn test_scalarizer() {
let scalarizer = Scalarizer::new(ScalarizationMethod::WeightedSum);
let objectives = vec![0.8, 0.6, 0.9];
let weights = vec![0.5, 0.3, 0.2];
let result = scalarizer.scalarize(&objectives, &weights);
assert!(result.is_ok());
let score = result.expect("scalarize should succeed");
assert!((score - 0.76).abs() < 1e-10); }
#[test]
fn test_decision_maker() {
let mut decision_maker = DecisionMaker::new();
let solutions = vec![MultiObjectiveSolution {
id: "sol1".to_string(),
objective_values: vec![0.8, 0.6],
decision_variables: OptimizationConfiguration {
algorithm: AlgorithmType::SimulatedAnnealing,
hyperparameters: HashMap::new(),
architecture: None,
resources: ResourceAllocation {
cpu: 1.0,
memory: 256,
gpu: 0.0,
time: std::time::Duration::from_secs(60),
},
},
dominance_rank: 1,
crowding_distance: 1.0,
}];
let result = decision_maker.make_decision(&solutions, None);
assert!(result.is_ok());
}
}