quantrs2_device/ml_optimization/
optimization.rs

1//! Optimization Strategy Configuration Types
2
3use serde::{Deserialize, Serialize};
4use std::collections::HashMap;
5
6/// Optimization strategy configuration
7#[derive(Debug, Clone, Serialize, Deserialize)]
8pub struct OptimizationStrategyConfig {
9    /// Multi-objective optimization
10    pub multi_objective: MultiObjectiveConfig,
11    /// Constraint handling
12    pub constraint_handling: ConstraintHandlingConfig,
13    /// Search strategies
14    pub search_strategies: Vec<SearchStrategy>,
15    /// Exploration-exploitation balance
16    pub exploration_exploitation: ExplorationExploitationConfig,
17    /// Adaptive strategies
18    pub adaptive_strategies: AdaptiveStrategyConfig,
19}
20
21/// Multi-objective optimization configuration
22#[derive(Debug, Clone, Serialize, Deserialize)]
23pub struct MultiObjectiveConfig {
24    /// Enable multi-objective optimization
25    pub enable_multi_objective: bool,
26    /// Objectives and weights
27    pub objectives: HashMap<String, f64>,
28    /// Pareto optimization
29    pub pareto_optimization: bool,
30    /// Scalarization methods
31    pub scalarization_methods: Vec<ScalarizationMethod>,
32}
33
34/// Scalarization methods
35#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
36pub enum ScalarizationMethod {
37    WeightedSum,
38    Chebyshev,
39    AugmentedChebyshev,
40    BoundaryIntersection,
41    AchievementFunction,
42}
43
44/// Constraint handling configuration
45#[derive(Debug, Clone, Serialize, Deserialize)]
46pub struct ConstraintHandlingConfig {
47    /// Constraint types
48    pub constraint_types: Vec<ConstraintType>,
49    /// Penalty methods
50    pub penalty_methods: Vec<PenaltyMethod>,
51    /// Constraint tolerance
52    pub constraint_tolerance: f64,
53    /// Feasibility preservation
54    pub feasibility_preservation: bool,
55}
56
57/// Constraint types
58#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
59pub enum ConstraintType {
60    Equality,
61    Inequality,
62    Box,
63    Nonlinear,
64    Integer,
65}
66
67/// Penalty methods
68#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
69pub enum PenaltyMethod {
70    ExteriorPenalty,
71    InteriorPenalty,
72    AugmentedLagrangian,
73    BarrierMethod,
74    FilterMethod,
75}
76
77/// Search strategies
78#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
79pub enum SearchStrategy {
80    GradientBased,
81    EvolutionaryAlgorithm,
82    SwarmIntelligence,
83    SimulatedAnnealing,
84    BayesianOptimization,
85    ReinforcementLearning,
86    HybridMethods,
87}
88
89/// Exploration-exploitation configuration
90#[derive(Debug, Clone, Serialize, Deserialize)]
91pub struct ExplorationExploitationConfig {
92    /// Initial exploration rate
93    pub initial_exploration_rate: f64,
94    /// Exploration decay
95    pub exploration_decay: f64,
96    /// Minimum exploration rate
97    pub min_exploration_rate: f64,
98    /// Exploitation threshold
99    pub exploitation_threshold: f64,
100    /// Adaptive balancing
101    pub adaptive_balancing: bool,
102}
103
104/// Adaptive strategy configuration
105#[derive(Debug, Clone, Serialize, Deserialize)]
106pub struct AdaptiveStrategyConfig {
107    /// Enable adaptive strategies
108    pub enable_adaptive: bool,
109    /// Strategy selection methods
110    pub strategy_selection: Vec<StrategySelectionMethod>,
111    /// Performance feedback
112    pub performance_feedback: bool,
113    /// Strategy mutation
114    pub strategy_mutation: bool,
115}
116
117/// Strategy selection methods
118#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
119pub enum StrategySelectionMethod {
120    PerformanceBased,
121    BanditAlgorithm,
122    ReinforcementLearning,
123    HeuristicRules,
124    MachineLearning,
125}