quantrs2_anneal/meta_learning_optimization/
meta_learning.rs

1//! Core meta-learning optimization engine
2
3use std::collections::{HashMap, VecDeque};
4use std::sync::{Arc, Mutex, RwLock};
5use std::time::{Duration, Instant};
6
7use crate::applications::{ApplicationError, ApplicationResult};
8use crate::ising::{IsingModel, QuboModel};
9use crate::simulator::{AnnealingParams, AnnealingResult, QuantumAnnealingSimulator};
10
11use super::config::MetaLearningConfig;
12use super::feature_extraction::{
13    AlgorithmType, DistributionStats, ExperienceDatabase, FeatureExtractor,
14    OptimizationConfiguration, OptimizationExperience, ProblemDomain, ProblemFeatures,
15    ResourceAllocation,
16};
17use super::multi_objective::MultiObjectiveOptimizer;
18use super::neural_architecture_search::NeuralArchitectureSearch;
19use super::portfolio_management::{AlgorithmPortfolio, PerformanceRecord};
20use super::transfer_learning::{
21    DomainCharacteristics, SourceDomain, TransferLearner, TransferableModel,
22};
23use super::{AlternativeStrategy, MetaLearningStatistics, RecommendedStrategy};
24
25/// Main meta-learning optimization engine
26pub struct MetaLearningOptimizer {
27    /// Configuration
28    pub config: MetaLearningConfig,
29    /// Experience database
30    pub experience_db: Arc<RwLock<ExperienceDatabase>>,
31    /// Feature extractor
32    pub feature_extractor: Arc<Mutex<FeatureExtractor>>,
33    /// Meta-learner
34    pub meta_learner: Arc<Mutex<MetaLearner>>,
35    /// Neural architecture search engine
36    pub nas_engine: Arc<Mutex<NeuralArchitectureSearch>>,
37    /// Algorithm portfolio manager
38    pub portfolio_manager: Arc<Mutex<AlgorithmPortfolio>>,
39    /// Multi-objective optimizer
40    pub multi_objective_optimizer: Arc<Mutex<MultiObjectiveOptimizer>>,
41    /// Transfer learning system
42    pub transfer_learner: Arc<Mutex<TransferLearner>>,
43}
44
45impl MetaLearningOptimizer {
46    #[must_use]
47    pub fn new(config: MetaLearningConfig) -> Self {
48        Self {
49            experience_db: Arc::new(RwLock::new(ExperienceDatabase::new())),
50            feature_extractor: Arc::new(Mutex::new(FeatureExtractor::new(
51                config.feature_config.clone(),
52            ))),
53            meta_learner: Arc::new(Mutex::new(MetaLearner::new())),
54            nas_engine: Arc::new(Mutex::new(NeuralArchitectureSearch::new(
55                config.nas_config.clone(),
56            ))),
57            portfolio_manager: Arc::new(Mutex::new(AlgorithmPortfolio::new(
58                config.portfolio_config.clone(),
59            ))),
60            multi_objective_optimizer: Arc::new(Mutex::new(MultiObjectiveOptimizer::new(
61                config.multi_objective_config.clone(),
62            ))),
63            transfer_learner: Arc::new(Mutex::new(TransferLearner::new())),
64            config,
65        }
66    }
67
68    /// Recommend optimization strategy for a given problem
69    pub fn recommend_strategy(
70        &mut self,
71        problem: &IsingModel,
72    ) -> ApplicationResult<RecommendedStrategy> {
73        // Extract problem features
74        let features = {
75            let mut extractor = self.feature_extractor.lock().map_err(|_| {
76                ApplicationError::ConfigurationError("Failed to lock feature extractor".to_string())
77            })?;
78            extractor.extract_features(problem)?
79        };
80
81        // Find similar experiences
82        let similar_experiences = {
83            let db = self.experience_db.read().map_err(|_| {
84                ApplicationError::ConfigurationError(
85                    "Failed to lock experience database".to_string(),
86                )
87            })?;
88            db.find_similar_experiences(&features, 10)?
89        };
90
91        // Apply transfer learning if enabled
92        let transferred_knowledge = if self.config.enable_transfer_learning {
93            let mut transfer_learner = self.transfer_learner.lock().map_err(|_| {
94                ApplicationError::ConfigurationError("Failed to lock transfer learner".to_string())
95            })?;
96            let domain = self.infer_problem_domain(&features);
97            transfer_learner
98                .transfer_knowledge(&features, &domain)
99                .unwrap_or_default()
100        } else {
101            Vec::new()
102        };
103
104        // Get meta-learning recommendation
105        let meta_recommendation = {
106            let mut meta_learner = self.meta_learner.lock().map_err(|_| {
107                ApplicationError::ConfigurationError("Failed to lock meta learner".to_string())
108            })?;
109            meta_learner.recommend_strategy(&features, &similar_experiences)?
110        };
111
112        // Select algorithm from portfolio
113        let algorithm_id = {
114            let mut portfolio = self.portfolio_manager.lock().map_err(|_| {
115                ApplicationError::ConfigurationError("Failed to lock portfolio manager".to_string())
116            })?;
117            portfolio
118                .select_algorithm(&features)
119                .map_err(|e| ApplicationError::ConfigurationError(e))?
120        };
121
122        // Get algorithm configuration
123        let algorithm_config = {
124            let portfolio = self.portfolio_manager.lock().map_err(|_| {
125                ApplicationError::ConfigurationError("Failed to lock portfolio manager".to_string())
126            })?;
127            portfolio
128                .algorithms
129                .get(&algorithm_id)
130                .map(|alg| alg.default_config.clone())
131                .ok_or_else(|| {
132                    ApplicationError::ConfigurationError(
133                        "Algorithm not found in portfolio".to_string(),
134                    )
135                })?
136        };
137
138        // Apply neural architecture search if needed
139        let optimized_config =
140            if self.config.nas_config.enable_nas && algorithm_config.architecture.is_some() {
141                let mut nas = self.nas_engine.lock().map_err(|_| {
142                    ApplicationError::ConfigurationError("Failed to lock NAS engine".to_string())
143                })?;
144                if let Ok(architecture_candidate) = nas.search_architecture(&features) {
145                    let mut config = algorithm_config;
146                    config.architecture = Some(architecture_candidate.architecture);
147                    config
148                } else {
149                    algorithm_config
150                }
151            } else {
152                algorithm_config
153            };
154
155        // Apply multi-objective optimization if enabled
156        let final_config = if self.config.multi_objective_config.enable_multi_objective {
157            let mut mo_optimizer = self.multi_objective_optimizer.lock().map_err(|_| {
158                ApplicationError::ConfigurationError(
159                    "Failed to lock multi-objective optimizer".to_string(),
160                )
161            })?;
162            let candidates = vec![optimized_config.clone()];
163            if let Ok(solutions) = mo_optimizer.optimize(candidates) {
164                if let Ok(best_solution) = mo_optimizer.make_decision(None) {
165                    best_solution.decision_variables
166                } else {
167                    optimized_config
168                }
169            } else {
170                optimized_config
171            }
172        } else {
173            optimized_config
174        };
175
176        // Combine recommendations and create final strategy
177        let mut final_hyperparameters = meta_recommendation.hyperparameters.clone();
178
179        // Merge with transferred knowledge
180        for model in &transferred_knowledge {
181            for (param_name, param_value) in &model.parameters {
182                // Use transferred parameter if confidence is high
183                if model.confidence > 0.7 {
184                    final_hyperparameters.insert(param_name.clone(), *param_value);
185                }
186            }
187        }
188
189        // Merge with final configuration
190        for (param_name, param_value) in &final_config.hyperparameters {
191            final_hyperparameters.insert(param_name.clone(), *param_value);
192        }
193
194        let recommended_strategy = RecommendedStrategy {
195            algorithm: algorithm_id,
196            hyperparameters: final_hyperparameters,
197            confidence: self
198                .calculate_recommendation_confidence(&similar_experiences, &transferred_knowledge),
199            expected_performance: meta_recommendation.expected_performance,
200            alternatives: self.generate_alternative_strategies(&features)?,
201        };
202
203        Ok(recommended_strategy)
204    }
205
206    /// Record optimization experience for learning
207    pub fn record_experience(
208        &mut self,
209        experience: OptimizationExperience,
210    ) -> ApplicationResult<()> {
211        // Add to experience database
212        {
213            let mut db = self.experience_db.write().map_err(|_| {
214                ApplicationError::ConfigurationError(
215                    "Failed to lock experience database".to_string(),
216                )
217            })?;
218            db.add_experience(experience.clone());
219        }
220
221        // Update meta-learner
222        {
223            let mut meta_learner = self.meta_learner.lock().map_err(|_| {
224                ApplicationError::ConfigurationError("Failed to lock meta learner".to_string())
225            })?;
226            meta_learner.add_training_episode(experience.clone())?;
227        }
228
229        // Update portfolio performance
230        if let Some(algorithm_name) =
231            self.algorithm_type_to_name(&experience.configuration.algorithm)
232        {
233            let mut portfolio = self.portfolio_manager.lock().map_err(|_| {
234                ApplicationError::ConfigurationError("Failed to lock portfolio manager".to_string())
235            })?;
236
237            let performance_record = PerformanceRecord {
238                timestamp: experience.timestamp,
239                problem_features: experience.problem_features.clone(),
240                performance: experience.results.quality_metrics.objective_value,
241                resource_usage: experience.results.resource_usage.clone(),
242                context: HashMap::new(),
243            };
244
245            portfolio.record_performance(&algorithm_name, performance_record);
246            portfolio.update_composition();
247        }
248
249        // Update transfer learning
250        if self.config.enable_transfer_learning {
251            let mut transfer_learner = self.transfer_learner.lock().map_err(|_| {
252                ApplicationError::ConfigurationError("Failed to lock transfer learner".to_string())
253            })?;
254            let domain_characteristics = self.create_domain_characteristics(&experience);
255            let source_domain = SourceDomain {
256                id: format!("domain_{:?}", experience.domain),
257                characteristics: domain_characteristics,
258                experiences: vec![experience],
259                models: Vec::new(),
260                last_updated: Instant::now(),
261            };
262            transfer_learner.add_source_domain(source_domain);
263        }
264
265        Ok(())
266    }
267
268    /// Get meta-learning statistics
269    pub fn get_statistics(&self) -> ApplicationResult<MetaLearningStatistics> {
270        let db_stats = {
271            let db = self.experience_db.read().map_err(|_| {
272                ApplicationError::ConfigurationError(
273                    "Failed to lock experience database".to_string(),
274                )
275            })?;
276            db.statistics.clone()
277        };
278
279        let transfer_success_rate = if self.config.enable_transfer_learning {
280            let transfer_learner = self.transfer_learner.lock().map_err(|_| {
281                ApplicationError::ConfigurationError("Failed to lock transfer learner".to_string())
282            })?;
283            transfer_learner.evaluate_transfer_success()
284        } else {
285            0.0
286        };
287
288        Ok(MetaLearningStatistics {
289            total_episodes: db_stats.total_experiences,
290            average_improvement: db_stats.avg_performance,
291            transfer_success_rate,
292            feature_extraction_time: Duration::from_millis(10), // Simplified
293            model_training_time: Duration::from_millis(100),
294            prediction_time: Duration::from_millis(5),
295        })
296    }
297
298    const fn infer_problem_domain(&self, features: &ProblemFeatures) -> ProblemDomain {
299        // Simple domain inference based on problem characteristics
300        if features.graph_features.num_edges > 0 {
301            ProblemDomain::Graph
302        } else if features.size > 1000 {
303            ProblemDomain::Combinatorial
304        } else {
305            ProblemDomain::Combinatorial
306        }
307    }
308
309    fn calculate_recommendation_confidence(
310        &self,
311        similar_experiences: &[OptimizationExperience],
312        transferred_knowledge: &[TransferableModel],
313    ) -> f64 {
314        let experience_confidence = if similar_experiences.is_empty() {
315            0.3
316        } else {
317            0.3f64.mul_add((similar_experiences.len() as f64 / 10.0).min(1.0), 0.7)
318        };
319
320        let transfer_confidence = if transferred_knowledge.is_empty() {
321            0.0
322        } else {
323            transferred_knowledge
324                .iter()
325                .map(|m| m.confidence)
326                .sum::<f64>()
327                / transferred_knowledge.len() as f64
328        };
329
330        (experience_confidence + transfer_confidence * 0.3).min(1.0)
331    }
332
333    fn generate_alternative_strategies(
334        &self,
335        features: &ProblemFeatures,
336    ) -> ApplicationResult<Vec<AlternativeStrategy>> {
337        let mut alternatives = Vec::new();
338
339        // Generate alternatives based on problem size
340        if features.size < 100 {
341            alternatives.push(AlternativeStrategy {
342                algorithm: "simulated_annealing".to_string(),
343                relative_performance: 0.9,
344            });
345        } else if features.size < 500 {
346            alternatives.push(AlternativeStrategy {
347                algorithm: "quantum_annealing".to_string(),
348                relative_performance: 0.95,
349            });
350        } else {
351            alternatives.push(AlternativeStrategy {
352                algorithm: "tabu_search".to_string(),
353                relative_performance: 0.85,
354            });
355        }
356
357        Ok(alternatives)
358    }
359
360    fn algorithm_type_to_name(&self, algorithm_type: &AlgorithmType) -> Option<String> {
361        match algorithm_type {
362            AlgorithmType::SimulatedAnnealing => Some("simulated_annealing".to_string()),
363            AlgorithmType::QuantumAnnealing => Some("quantum_annealing".to_string()),
364            AlgorithmType::TabuSearch => Some("tabu_search".to_string()),
365            AlgorithmType::GeneticAlgorithm => Some("genetic_algorithm".to_string()),
366            _ => None,
367        }
368    }
369
370    fn create_domain_characteristics(
371        &self,
372        experience: &OptimizationExperience,
373    ) -> DomainCharacteristics {
374        DomainCharacteristics {
375            domain: experience.domain.clone(),
376            avg_problem_size: experience.problem_features.size as f64,
377            avg_density: experience.problem_features.density,
378            typical_algorithms: vec![experience.configuration.algorithm.clone()],
379            performance_distribution: DistributionStats {
380                mean: experience.results.quality_metrics.objective_value,
381                std_dev: 0.1,
382                min: experience.results.quality_metrics.objective_value * 0.8,
383                max: experience.results.quality_metrics.objective_value * 1.2,
384                skewness: 0.0,
385                kurtosis: 3.0,
386            },
387            feature_importance: HashMap::new(),
388        }
389    }
390}
391
392/// Meta-learning system
393pub struct MetaLearner {
394    /// Learning algorithm
395    pub algorithm: MetaLearningAlgorithm,
396    /// Model parameters
397    pub parameters: Vec<f64>,
398    /// Training history
399    pub training_history: VecDeque<TrainingEpisode>,
400    /// Performance evaluator
401    pub evaluator: PerformanceEvaluator,
402}
403
404impl MetaLearner {
405    #[must_use]
406    pub fn new() -> Self {
407        Self {
408            algorithm: MetaLearningAlgorithm::MAML,
409            parameters: Vec::new(),
410            training_history: VecDeque::new(),
411            evaluator: PerformanceEvaluator {
412                metrics: vec![
413                    EvaluationMetric::MeanSquaredError,
414                    EvaluationMetric::Accuracy,
415                ],
416                cv_strategy: CrossValidationStrategy::KFold(5),
417                statistical_tests: vec![StatisticalTest::TTest],
418            },
419        }
420    }
421
422    pub fn recommend_strategy(
423        &mut self,
424        features: &ProblemFeatures,
425        experiences: &[OptimizationExperience],
426    ) -> ApplicationResult<RecommendedStrategy> {
427        // Simple strategy recommendation based on problem size
428        let algorithm = if features.size < 100 {
429            AlgorithmType::SimulatedAnnealing
430        } else if features.size < 500 {
431            AlgorithmType::QuantumAnnealing
432        } else {
433            AlgorithmType::TabuSearch
434        };
435
436        let mut hyperparameters = HashMap::new();
437
438        // Set hyperparameters based on experiences
439        if experiences.is_empty() {
440            // Default hyperparameters
441            hyperparameters.insert("initial_temperature".to_string(), 10.0);
442            hyperparameters.insert("final_temperature".to_string(), 0.1);
443        } else {
444            let avg_initial_temp = experiences
445                .iter()
446                .filter_map(|exp| exp.configuration.hyperparameters.get("initial_temperature"))
447                .sum::<f64>()
448                / experiences.len() as f64;
449            hyperparameters.insert("initial_temperature".to_string(), avg_initial_temp.max(1.0));
450
451            let avg_final_temp = experiences
452                .iter()
453                .filter_map(|exp| exp.configuration.hyperparameters.get("final_temperature"))
454                .sum::<f64>()
455                / experiences.len() as f64;
456            hyperparameters.insert("final_temperature".to_string(), avg_final_temp.max(0.01));
457        }
458
459        hyperparameters.insert(
460            "num_sweeps".to_string(),
461            (features.size as f64 * 10.0).min(10_000.0),
462        );
463
464        let configuration = OptimizationConfiguration {
465            algorithm,
466            hyperparameters,
467            architecture: None,
468            resources: ResourceAllocation {
469                cpu: 1.0,
470                memory: 512,
471                gpu: 0.0,
472                time: Duration::from_secs(60),
473            },
474        };
475
476        let confidence = if experiences.len() >= 5 { 0.9 } else { 0.6 };
477
478        Ok(RecommendedStrategy {
479            algorithm: "meta_learner_recommendation".to_string(),
480            hyperparameters: configuration.hyperparameters,
481            confidence,
482            expected_performance: 0.8,
483            alternatives: Vec::new(),
484        })
485    }
486
487    pub fn add_training_episode(
488        &mut self,
489        experience: OptimizationExperience,
490    ) -> ApplicationResult<()> {
491        let episode = TrainingEpisode {
492            id: experience.id.clone(),
493            support_set: vec![experience.clone()],
494            query_set: vec![experience.clone()],
495            loss: 1.0 - experience.results.quality_metrics.objective_value,
496            accuracy: experience.results.quality_metrics.objective_value,
497            timestamp: experience.timestamp,
498        };
499
500        self.training_history.push_back(episode);
501
502        // Limit history size
503        while self.training_history.len() > 1000 {
504            self.training_history.pop_front();
505        }
506
507        Ok(())
508    }
509}
510
511/// Meta-learning algorithms
512#[derive(Debug, Clone, PartialEq, Eq)]
513pub enum MetaLearningAlgorithm {
514    /// Model-Agnostic Meta-Learning
515    MAML,
516    /// Prototypical Networks
517    PrototypicalNetworks,
518    /// Matching Networks
519    MatchingNetworks,
520    /// Relation Networks
521    RelationNetworks,
522    /// Memory-Augmented Networks
523    MemoryAugmented,
524    /// Gradient-Based Meta-Learning
525    GradientBased,
526}
527
528/// Training episode
529#[derive(Debug, Clone)]
530pub struct TrainingEpisode {
531    /// Episode identifier
532    pub id: String,
533    /// Support set
534    pub support_set: Vec<OptimizationExperience>,
535    /// Query set
536    pub query_set: Vec<OptimizationExperience>,
537    /// Loss achieved
538    pub loss: f64,
539    /// Accuracy achieved
540    pub accuracy: f64,
541    /// Timestamp
542    pub timestamp: Instant,
543}
544
545/// Performance evaluator
546#[derive(Debug)]
547pub struct PerformanceEvaluator {
548    /// Evaluation metrics
549    pub metrics: Vec<EvaluationMetric>,
550    /// Cross-validation strategy
551    pub cv_strategy: CrossValidationStrategy,
552    /// Statistical tests
553    pub statistical_tests: Vec<StatisticalTest>,
554}
555
556/// Evaluation metrics
557#[derive(Debug, Clone, PartialEq, Eq)]
558pub enum EvaluationMetric {
559    /// Mean squared error
560    MeanSquaredError,
561    /// Mean absolute error
562    MeanAbsoluteError,
563    /// R-squared
564    RSquared,
565    /// Accuracy
566    Accuracy,
567    /// Precision
568    Precision,
569    /// Recall
570    Recall,
571    /// F1 score
572    F1Score,
573    /// Custom metric
574    Custom(String),
575}
576
577/// Cross-validation strategies
578#[derive(Debug, Clone, PartialEq, Eq)]
579pub enum CrossValidationStrategy {
580    /// K-fold cross-validation
581    KFold(usize),
582    /// Leave-one-out
583    LeaveOneOut,
584    /// Time series split
585    TimeSeriesSplit,
586    /// Stratified K-fold
587    StratifiedKFold(usize),
588    /// Custom strategy
589    Custom(String),
590}
591
592/// Statistical tests
593#[derive(Debug, Clone, PartialEq, Eq)]
594pub enum StatisticalTest {
595    /// t-test
596    TTest,
597    /// Wilcoxon signed-rank test
598    WilcoxonSignedRank,
599    /// Mann-Whitney U test
600    MannWhitneyU,
601    /// Kolmogorov-Smirnov test
602    KolmogorovSmirnov,
603    /// Chi-square test
604    ChiSquare,
605}
606
607/// Meta-optimization result
608#[derive(Debug, Clone)]
609pub struct MetaOptimizationResult {
610    /// Recommended strategy
611    pub strategy: RecommendedStrategy,
612    /// Alternative strategies
613    pub alternatives: Vec<AlternativeStrategy>,
614    /// Confidence in recommendation
615    pub confidence: f64,
616    /// Expected performance gain
617    pub expected_gain: f64,
618    /// Reasoning for recommendation
619    pub reasoning: String,
620    /// Meta-learning statistics
621    pub statistics: MetaLearningStatistics,
622}
623
624#[cfg(test)]
625mod tests {
626    use super::*;
627
628    #[test]
629    fn test_meta_learning_optimizer_creation() {
630        let config = MetaLearningConfig::default();
631        let optimizer = MetaLearningOptimizer::new(config);
632        assert!(optimizer.config.enable_transfer_learning);
633    }
634
635    #[test]
636    fn test_meta_learner() {
637        let meta_learner = MetaLearner::new();
638        assert_eq!(meta_learner.algorithm, MetaLearningAlgorithm::MAML);
639        assert_eq!(meta_learner.training_history.len(), 0);
640    }
641
642    #[test]
643    fn test_training_episode() {
644        let episode = TrainingEpisode {
645            id: "test_episode".to_string(),
646            support_set: Vec::new(),
647            query_set: Vec::new(),
648            loss: 0.5,
649            accuracy: 0.8,
650            timestamp: Instant::now(),
651        };
652
653        assert_eq!(episode.id, "test_episode");
654        assert_eq!(episode.loss, 0.5);
655        assert_eq!(episode.accuracy, 0.8);
656    }
657}