quantrs2_anneal/
advanced_meta_optimizer.rs

1//! Advanced Meta-Learning Optimization System
2//!
3//! This module implements a sophisticated meta-learning system that learns from
4//! optimization history to improve future optimization runs. It includes:
5//! - Optimization history analysis and pattern recognition
6//! - Transfer learning between similar problems
7//! - Adaptive strategy selection based on problem characteristics
8//! - Performance prediction using learned models
9
10use crate::applications::{ApplicationError, ApplicationResult};
11use crate::ising::IsingModel;
12use scirs2_core::ndarray::{Array1, Array2};
13use scirs2_core::random::prelude::*;
14use scirs2_core::ChaCha8Rng;
15use scirs2_core::Complex64;
16use serde::{Deserialize, Serialize};
17use std::collections::{HashMap, VecDeque};
18use std::time::{Duration, Instant};
19
20// Re-export for convenience
21pub use crate::ising::Coupling;
22
23/// Problem features extracted for meta-learning
24#[derive(Debug, Clone, Serialize, Deserialize)]
25pub struct ProblemFeatures {
26    /// Number of variables
27    pub num_variables: usize,
28    /// Problem density (ratio of non-zero couplings)
29    pub density: f64,
30    /// Coupling strength statistics
31    pub coupling_mean: f64,
32    pub coupling_std: f64,
33    pub coupling_max: f64,
34    /// Bias statistics
35    pub bias_mean: f64,
36    pub bias_std: f64,
37    /// Graph properties
38    pub average_degree: f64,
39    pub max_degree: usize,
40    pub clustering_coefficient: f64,
41    /// Energy landscape properties
42    pub estimated_barriers: f64,
43    pub frustration_index: f64,
44    /// Problem symmetry
45    pub symmetry_score: f64,
46}
47
48/// Optimization run record for learning
49#[derive(Debug, Clone, Serialize, Deserialize)]
50pub struct OptimizationRecord {
51    /// Problem features
52    pub features: ProblemFeatures,
53    /// Strategy used
54    pub strategy: OptimizationStrategy,
55    /// Parameters used
56    pub parameters: HashMap<String, f64>,
57    /// Performance metrics
58    pub best_energy: f64,
59    pub convergence_time: Duration,
60    pub iterations_to_converge: usize,
61    pub success_rate: f64,
62    /// Resource usage
63    pub cpu_time: Duration,
64    pub memory_peak_mb: f64,
65    /// Timestamp (not serialized, defaults to now)
66    #[serde(skip, default = "Instant::now")]
67    pub timestamp: Instant,
68}
69
70/// Available optimization strategies
71#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
72pub enum OptimizationStrategy {
73    ClassicalAnnealing,
74    QuantumAnnealing,
75    PopulationAnnealing,
76    CoherentIsingMachine,
77    QuantumWalk,
78    HybridQCML,
79    AdaptiveSchedule,
80    ReversedAnnealing,
81}
82
83/// Performance prediction model
84#[derive(Debug, Clone)]
85pub struct PerformancePredictor {
86    /// Feature weights learned from history
87    feature_weights: Array1<f64>,
88    /// Strategy-specific adjustments
89    strategy_adjustments: HashMap<OptimizationStrategy, f64>,
90    /// Prediction confidence
91    confidence: f64,
92}
93
94impl PerformancePredictor {
95    /// Create a new predictor
96    #[must_use]
97    pub fn new(num_features: usize) -> Self {
98        Self {
99            feature_weights: Array1::zeros(num_features),
100            strategy_adjustments: HashMap::new(),
101            confidence: 0.0,
102        }
103    }
104
105    /// Predict performance for given features and strategy
106    #[must_use]
107    pub fn predict(
108        &self,
109        features: &ProblemFeatures,
110        strategy: OptimizationStrategy,
111    ) -> PredictedPerformance {
112        let feature_vec = self.features_to_vector(features);
113        let base_score = feature_vec.dot(&self.feature_weights);
114
115        let strategy_adj = self
116            .strategy_adjustments
117            .get(&strategy)
118            .copied()
119            .unwrap_or(0.0);
120
121        let predicted_quality = (base_score + strategy_adj).tanh();
122        let predicted_time = self.estimate_time(features, strategy);
123
124        PredictedPerformance {
125            strategy,
126            quality_score: predicted_quality,
127            estimated_time: predicted_time,
128            confidence: self.confidence,
129        }
130    }
131
132    /// Convert features to vector for prediction
133    fn features_to_vector(&self, features: &ProblemFeatures) -> Array1<f64> {
134        Array1::from_vec(vec![
135            features.num_variables as f64,
136            features.density,
137            features.coupling_mean,
138            features.coupling_std,
139            features.average_degree,
140            features.clustering_coefficient,
141            features.frustration_index,
142            features.symmetry_score,
143        ])
144    }
145
146    /// Estimate execution time based on problem size and strategy
147    fn estimate_time(
148        &self,
149        features: &ProblemFeatures,
150        strategy: OptimizationStrategy,
151    ) -> Duration {
152        let base_complexity = match strategy {
153            OptimizationStrategy::ClassicalAnnealing => features.num_variables as f64,
154            OptimizationStrategy::QuantumAnnealing => (features.num_variables as f64).powf(1.5),
155            OptimizationStrategy::PopulationAnnealing => {
156                features.num_variables as f64 * features.density
157            }
158            OptimizationStrategy::CoherentIsingMachine => (features.num_variables as f64).powi(2),
159            OptimizationStrategy::QuantumWalk => {
160                features.num_variables as f64 * features.average_degree
161            }
162            OptimizationStrategy::HybridQCML => features.num_variables as f64 * 1.5,
163            OptimizationStrategy::AdaptiveSchedule => features.num_variables as f64 * 0.8,
164            OptimizationStrategy::ReversedAnnealing => features.num_variables as f64 * 0.6,
165        };
166
167        // Scale by problem difficulty
168        let difficulty_factor = 1.0 + features.frustration_index + (1.0 - features.symmetry_score);
169        let estimated_ms = base_complexity * difficulty_factor * 10.0;
170
171        Duration::from_millis(estimated_ms as u64)
172    }
173
174    /// Update predictor with new observation
175    pub fn update(&mut self, record: &OptimizationRecord, learning_rate: f64) {
176        let feature_vec = self.features_to_vector(&record.features);
177
178        // Simple gradient update (could be replaced with more sophisticated learning)
179        let predicted = self.predict(&record.features, record.strategy);
180        let error = record.success_rate - predicted.quality_score;
181
182        for i in 0..self.feature_weights.len() {
183            self.feature_weights[i] += learning_rate * error * feature_vec[i];
184        }
185
186        // Update strategy adjustment
187        let current_adj = self
188            .strategy_adjustments
189            .entry(record.strategy)
190            .or_insert(0.0);
191        *current_adj += learning_rate * error * 0.1;
192
193        // Update confidence based on error
194        self.confidence = 0.9f64.mul_add(self.confidence, 0.1 * (1.0 - error.abs()));
195    }
196}
197
198/// Predicted performance metrics
199#[derive(Debug, Clone)]
200pub struct PredictedPerformance {
201    pub strategy: OptimizationStrategy,
202    pub quality_score: f64,
203    pub estimated_time: Duration,
204    pub confidence: f64,
205}
206
207/// Transfer learning engine for cross-problem knowledge transfer
208#[derive(Debug, Clone)]
209pub struct TransferLearningEngine {
210    /// Source domain knowledge
211    source_records: Vec<OptimizationRecord>,
212    /// Similarity metrics between problems
213    similarity_cache: HashMap<(usize, usize), f64>,
214    /// Transfer weights
215    transfer_weights: Vec<f64>,
216}
217
218impl TransferLearningEngine {
219    /// Create new transfer learning engine
220    #[must_use]
221    pub fn new() -> Self {
222        Self {
223            source_records: Vec::new(),
224            similarity_cache: HashMap::new(),
225            transfer_weights: Vec::new(),
226        }
227    }
228
229    /// Add source domain knowledge
230    pub fn add_source_knowledge(&mut self, records: Vec<OptimizationRecord>) {
231        self.source_records.extend(records);
232        self.similarity_cache.clear(); // Invalidate cache
233    }
234
235    /// Compute similarity between two problems
236    #[must_use]
237    pub fn compute_similarity(
238        &self,
239        features1: &ProblemFeatures,
240        features2: &ProblemFeatures,
241    ) -> f64 {
242        // Euclidean distance in normalized feature space
243        let diff_size = ((features1.num_variables as f64 - features2.num_variables as f64)
244            / features1.num_variables.max(features2.num_variables) as f64)
245            .powi(2);
246        let diff_density = (features1.density - features2.density).powi(2);
247        let diff_frustration = (features1.frustration_index - features2.frustration_index).powi(2);
248        let diff_symmetry = (features1.symmetry_score - features2.symmetry_score).powi(2);
249
250        let distance = (diff_size + diff_density + diff_frustration + diff_symmetry).sqrt();
251
252        // Convert distance to similarity (Gaussian kernel)
253        let bandwidth: f64 = 0.5;
254        (-distance.powi(2) / (2.0 * bandwidth.powi(2))).exp()
255    }
256
257    /// Transfer knowledge to target problem
258    pub fn transfer_knowledge(
259        &self,
260        target_features: &ProblemFeatures,
261    ) -> Vec<(OptimizationStrategy, f64)> {
262        let mut strategy_scores: HashMap<OptimizationStrategy, Vec<f64>> = HashMap::new();
263
264        for record in &self.source_records {
265            let similarity = self.compute_similarity(target_features, &record.features);
266
267            // Only transfer from sufficiently similar problems
268            if similarity > 0.3 {
269                let weighted_score = record.success_rate * similarity;
270                strategy_scores
271                    .entry(record.strategy)
272                    .or_insert_with(Vec::new)
273                    .push(weighted_score);
274            }
275        }
276
277        // Aggregate scores for each strategy
278        let mut recommendations: Vec<(OptimizationStrategy, f64)> = strategy_scores
279            .into_iter()
280            .map(|(strategy, scores)| {
281                let avg_score = scores.iter().sum::<f64>() / scores.len() as f64;
282                (strategy, avg_score)
283            })
284            .collect();
285
286        // Sort by score descending
287        recommendations.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
288
289        recommendations
290    }
291}
292
293impl Default for TransferLearningEngine {
294    fn default() -> Self {
295        Self::new()
296    }
297}
298
299/// Adaptive strategy selector
300#[derive(Debug)]
301pub struct AdaptiveStrategySelector {
302    /// Performance predictor
303    predictor: PerformancePredictor,
304    /// Transfer learning engine
305    transfer_engine: TransferLearningEngine,
306    /// Strategy exploration rate
307    exploration_rate: f64,
308    /// Random number generator
309    rng: ChaCha8Rng,
310}
311
312impl AdaptiveStrategySelector {
313    /// Create new adaptive selector
314    #[must_use]
315    pub fn new(seed: u64) -> Self {
316        Self {
317            predictor: PerformancePredictor::new(8),
318            transfer_engine: TransferLearningEngine::new(),
319            exploration_rate: 0.1,
320            rng: ChaCha8Rng::seed_from_u64(seed),
321        }
322    }
323
324    /// Select best strategy for given problem
325    pub fn select_strategy(
326        &mut self,
327        features: &ProblemFeatures,
328        available_strategies: &[OptimizationStrategy],
329    ) -> OptimizationStrategy {
330        // Exploration: randomly select strategy
331        if self.rng.gen::<f64>() < self.exploration_rate {
332            return *available_strategies
333                .get(self.rng.gen_range(0..available_strategies.len()))
334                .expect("random index should be within bounds");
335        }
336
337        // Exploitation: use learned knowledge
338        let mut best_strategy = available_strategies[0];
339        let mut best_score = f64::NEG_INFINITY;
340
341        for &strategy in available_strategies {
342            let prediction = self.predictor.predict(features, strategy);
343            let transfer_bonus = self.get_transfer_bonus(features, strategy);
344            let total_score = 0.3f64.mul_add(transfer_bonus, prediction.quality_score);
345
346            if total_score > best_score {
347                best_score = total_score;
348                best_strategy = strategy;
349            }
350        }
351
352        best_strategy
353    }
354
355    /// Get transfer learning bonus for strategy
356    fn get_transfer_bonus(
357        &self,
358        features: &ProblemFeatures,
359        strategy: OptimizationStrategy,
360    ) -> f64 {
361        let recommendations = self.transfer_engine.transfer_knowledge(features);
362
363        recommendations
364            .iter()
365            .find(|(s, _)| *s == strategy)
366            .map_or(0.0, |(_, score)| *score)
367    }
368
369    /// Update selector with new observation
370    pub fn update(&mut self, record: OptimizationRecord) {
371        self.predictor.update(&record, 0.01);
372        self.transfer_engine.add_source_knowledge(vec![record]);
373
374        // Decay exploration rate
375        self.exploration_rate *= 0.999;
376    }
377
378    /// Get performance prediction for strategy
379    #[must_use]
380    pub fn predict_performance(
381        &self,
382        features: &ProblemFeatures,
383        strategy: OptimizationStrategy,
384    ) -> PredictedPerformance {
385        self.predictor.predict(features, strategy)
386    }
387}
388
389/// Meta-learning optimizer that learns from optimization history
390#[derive(Debug)]
391pub struct MetaLearningOptimizer {
392    /// Optimization history
393    history: VecDeque<OptimizationRecord>,
394    /// Maximum history size
395    max_history: usize,
396    /// Strategy selector
397    selector: AdaptiveStrategySelector,
398    /// Statistics
399    pub total_optimizations: usize,
400    pub average_success_rate: f64,
401}
402
403impl MetaLearningOptimizer {
404    /// Create new meta-learning optimizer
405    #[must_use]
406    pub fn new(max_history: usize, seed: u64) -> Self {
407        Self {
408            history: VecDeque::new(),
409            max_history,
410            selector: AdaptiveStrategySelector::new(seed),
411            total_optimizations: 0,
412            average_success_rate: 0.0,
413        }
414    }
415
416    /// Extract features from Ising model
417    pub fn extract_features(&self, model: &IsingModel) -> ProblemFeatures {
418        let num_variables = model.num_qubits;
419
420        // Calculate coupling statistics using public API
421        let couplings = model.couplings();
422        let mut coupling_values = Vec::new();
423        let mut degrees = vec![0; num_variables];
424
425        for coupling in &couplings {
426            coupling_values.push(coupling.strength.abs());
427            degrees[coupling.i] += 1;
428            degrees[coupling.j] += 1;
429        }
430
431        let density = couplings.len() as f64 / (num_variables * (num_variables - 1) / 2) as f64;
432
433        let coupling_mean = if coupling_values.is_empty() {
434            0.0
435        } else {
436            coupling_values.iter().sum::<f64>() / coupling_values.len() as f64
437        };
438
439        let coupling_std = if coupling_values.len() > 1 {
440            let variance = coupling_values
441                .iter()
442                .map(|v| (v - coupling_mean).powi(2))
443                .sum::<f64>()
444                / coupling_values.len() as f64;
445            variance.sqrt()
446        } else {
447            0.0
448        };
449
450        let coupling_max = coupling_values.iter().copied().fold(0.0, f64::max);
451
452        // Bias statistics using public API
453        let biases = model.biases();
454        let bias_values: Vec<f64> = biases.iter().map(|(_, b)| *b).collect();
455        let bias_mean = if bias_values.is_empty() {
456            0.0
457        } else {
458            bias_values.iter().sum::<f64>() / bias_values.len() as f64
459        };
460
461        let bias_std = if bias_values.len() > 1 {
462            let variance = bias_values
463                .iter()
464                .map(|v| (v - bias_mean).powi(2))
465                .sum::<f64>()
466                / bias_values.len() as f64;
467            variance.sqrt()
468        } else {
469            0.0
470        };
471
472        // Graph properties
473        let average_degree = if degrees.is_empty() {
474            0.0
475        } else {
476            degrees.iter().sum::<usize>() as f64 / degrees.len() as f64
477        };
478
479        let max_degree = degrees.iter().copied().max().unwrap_or(0);
480
481        // Simple clustering coefficient estimate
482        let clustering_coefficient = self.estimate_clustering(model);
483
484        // Energy landscape properties (simplified)
485        let estimated_barriers = coupling_std / (1.0 + density);
486        let frustration_index = self.estimate_frustration(model);
487
488        // Symmetry (simplified - based on bias uniformity)
489        let symmetry_score = 1.0 - (bias_std / (1.0 + bias_mean.abs()));
490
491        ProblemFeatures {
492            num_variables,
493            density,
494            coupling_mean,
495            coupling_std,
496            coupling_max,
497            bias_mean,
498            bias_std,
499            average_degree,
500            max_degree,
501            clustering_coefficient,
502            estimated_barriers,
503            frustration_index,
504            symmetry_score: symmetry_score.clamp(0.0, 1.0),
505        }
506    }
507
508    /// Estimate clustering coefficient
509    fn estimate_clustering(&self, model: &IsingModel) -> f64 {
510        // Simplified clustering coefficient calculation using public API
511        let couplings = model.couplings();
512
513        // Build adjacency map
514        let mut adj: HashMap<usize, Vec<usize>> = HashMap::new();
515        for coupling in &couplings {
516            adj.entry(coupling.i)
517                .or_insert_with(Vec::new)
518                .push(coupling.j);
519            adj.entry(coupling.j)
520                .or_insert_with(Vec::new)
521                .push(coupling.i);
522        }
523
524        let mut triangles = 0;
525        let mut triples = 0;
526
527        for i in 0..model.num_qubits {
528            if let Some(neighbors) = adj.get(&i) {
529                for k in 0..neighbors.len() {
530                    for l in (k + 1)..neighbors.len() {
531                        triples += 1;
532                        let j1 = neighbors[k];
533                        let j2 = neighbors[l];
534
535                        // Check if j1 and j2 are connected
536                        if let Some(j1_neighbors) = adj.get(&j1) {
537                            if j1_neighbors.contains(&j2) {
538                                triangles += 1;
539                            }
540                        }
541                    }
542                }
543            }
544        }
545
546        if triples > 0 {
547            f64::from(triangles) / f64::from(triples)
548        } else {
549            0.0
550        }
551    }
552
553    /// Estimate frustration index
554    fn estimate_frustration(&self, model: &IsingModel) -> f64 {
555        // Count frustrated interactions (antiferromagnetic couplings) using public API
556        let couplings = model.couplings();
557        let mut frustrated = 0;
558        let total = couplings.len();
559
560        for coupling in &couplings {
561            if coupling.strength > 0.0 {
562                // Antiferromagnetic
563                frustrated += 1;
564            }
565        }
566
567        if total > 0 {
568            f64::from(frustrated) / total as f64
569        } else {
570            0.0
571        }
572    }
573
574    /// Select best strategy for problem
575    pub fn select_strategy(&mut self, model: &IsingModel) -> OptimizationStrategy {
576        let features = self.extract_features(model);
577        let available = vec![
578            OptimizationStrategy::ClassicalAnnealing,
579            OptimizationStrategy::QuantumAnnealing,
580            OptimizationStrategy::PopulationAnnealing,
581            OptimizationStrategy::AdaptiveSchedule,
582        ];
583
584        self.selector.select_strategy(&features, &available)
585    }
586
587    /// Record optimization result
588    pub fn record_optimization(&mut self, record: OptimizationRecord) {
589        self.total_optimizations += 1;
590
591        // Update running average
592        self.average_success_rate = self
593            .average_success_rate
594            .mul_add((self.total_optimizations - 1) as f64, record.success_rate)
595            / self.total_optimizations as f64;
596
597        // Update selector
598        self.selector.update(record.clone());
599
600        // Add to history
601        self.history.push_back(record);
602
603        // Limit history size
604        if self.history.len() > self.max_history {
605            self.history.pop_front();
606        }
607    }
608
609    /// Get recommended strategies for problem
610    pub fn recommend_strategies(
611        &mut self,
612        model: &IsingModel,
613        top_k: usize,
614    ) -> Vec<(OptimizationStrategy, PredictedPerformance)> {
615        let features = self.extract_features(model);
616        let strategies = vec![
617            OptimizationStrategy::ClassicalAnnealing,
618            OptimizationStrategy::QuantumAnnealing,
619            OptimizationStrategy::PopulationAnnealing,
620            OptimizationStrategy::CoherentIsingMachine,
621            OptimizationStrategy::QuantumWalk,
622            OptimizationStrategy::HybridQCML,
623            OptimizationStrategy::AdaptiveSchedule,
624            OptimizationStrategy::ReversedAnnealing,
625        ];
626
627        let mut recommendations: Vec<_> = strategies
628            .iter()
629            .map(|&strategy| {
630                let prediction = self.selector.predict_performance(&features, strategy);
631                (strategy, prediction)
632            })
633            .collect();
634
635        // Sort by quality score
636        recommendations.sort_by(|a, b| {
637            b.1.quality_score
638                .partial_cmp(&a.1.quality_score)
639                .unwrap_or(std::cmp::Ordering::Equal)
640        });
641
642        recommendations.into_iter().take(top_k).collect()
643    }
644
645    /// Get optimization statistics
646    #[must_use]
647    pub fn get_statistics(&self) -> MetaLearningStatistics {
648        MetaLearningStatistics {
649            total_optimizations: self.total_optimizations,
650            average_success_rate: self.average_success_rate,
651            history_size: self.history.len(),
652            exploration_rate: self.selector.exploration_rate,
653        }
654    }
655}
656
657/// Meta-learning statistics
658#[derive(Debug, Clone, Serialize, Deserialize)]
659pub struct MetaLearningStatistics {
660    pub total_optimizations: usize,
661    pub average_success_rate: f64,
662    pub history_size: usize,
663    pub exploration_rate: f64,
664}
665
666#[cfg(test)]
667mod tests {
668    use super::*;
669
670    #[test]
671    fn test_meta_learning_optimizer_creation() {
672        let optimizer = MetaLearningOptimizer::new(100, 42);
673        assert_eq!(optimizer.total_optimizations, 0);
674        assert_eq!(optimizer.average_success_rate, 0.0);
675    }
676
677    #[test]
678    fn test_feature_extraction() {
679        let optimizer = MetaLearningOptimizer::new(100, 42);
680        let mut model = IsingModel::new(5);
681        model.set_coupling(0, 1, -1.0).expect("should set coupling");
682        model.set_coupling(1, 2, -1.0).expect("should set coupling");
683        model.set_bias(0, 0.5).expect("should set bias");
684
685        let features = optimizer.extract_features(&model);
686        assert_eq!(features.num_variables, 5);
687        assert!(features.density > 0.0);
688    }
689
690    #[test]
691    fn test_strategy_selection() {
692        let mut optimizer = MetaLearningOptimizer::new(100, 42);
693        let mut model = IsingModel::new(10);
694        model.set_coupling(0, 1, -1.0).expect("should set coupling");
695
696        let strategy = optimizer.select_strategy(&model);
697        assert!(matches!(
698            strategy,
699            OptimizationStrategy::ClassicalAnnealing
700                | OptimizationStrategy::QuantumAnnealing
701                | OptimizationStrategy::PopulationAnnealing
702                | OptimizationStrategy::AdaptiveSchedule
703        ));
704    }
705
706    #[test]
707    fn test_performance_predictor() {
708        let predictor = PerformancePredictor::new(8);
709        let features = ProblemFeatures {
710            num_variables: 20,
711            density: 0.3,
712            coupling_mean: 1.0,
713            coupling_std: 0.5,
714            coupling_max: 2.0,
715            bias_mean: 0.0,
716            bias_std: 0.2,
717            average_degree: 6.0,
718            max_degree: 10,
719            clustering_coefficient: 0.3,
720            estimated_barriers: 0.5,
721            frustration_index: 0.4,
722            symmetry_score: 0.7,
723        };
724
725        let prediction = predictor.predict(&features, OptimizationStrategy::ClassicalAnnealing);
726        assert!(prediction.quality_score.abs() <= 1.0);
727        assert!(prediction.estimated_time.as_millis() > 0);
728    }
729
730    #[test]
731    fn test_transfer_learning() {
732        let mut engine = TransferLearningEngine::new();
733
734        let features1 = ProblemFeatures {
735            num_variables: 20,
736            density: 0.3,
737            coupling_mean: 1.0,
738            coupling_std: 0.5,
739            coupling_max: 2.0,
740            bias_mean: 0.0,
741            bias_std: 0.2,
742            average_degree: 6.0,
743            max_degree: 10,
744            clustering_coefficient: 0.3,
745            estimated_barriers: 0.5,
746            frustration_index: 0.4,
747            symmetry_score: 0.7,
748        };
749
750        let features2 = features1.clone();
751
752        let similarity = engine.compute_similarity(&features1, &features2);
753        assert!((similarity - 1.0).abs() < 0.01); // Should be very similar to itself
754    }
755
756    #[test]
757    fn test_record_optimization() {
758        let mut optimizer = MetaLearningOptimizer::new(100, 42);
759
760        let features = ProblemFeatures {
761            num_variables: 10,
762            density: 0.2,
763            coupling_mean: 1.0,
764            coupling_std: 0.3,
765            coupling_max: 1.5,
766            bias_mean: 0.0,
767            bias_std: 0.1,
768            average_degree: 4.0,
769            max_degree: 8,
770            clustering_coefficient: 0.2,
771            estimated_barriers: 0.3,
772            frustration_index: 0.3,
773            symmetry_score: 0.8,
774        };
775
776        let record = OptimizationRecord {
777            features,
778            strategy: OptimizationStrategy::ClassicalAnnealing,
779            parameters: HashMap::new(),
780            best_energy: -10.0,
781            convergence_time: Duration::from_secs(1),
782            iterations_to_converge: 100,
783            success_rate: 0.95,
784            cpu_time: Duration::from_secs(1),
785            memory_peak_mb: 50.0,
786            timestamp: Instant::now(),
787        };
788
789        optimizer.record_optimization(record);
790        assert_eq!(optimizer.total_optimizations, 1);
791        assert_eq!(optimizer.average_success_rate, 0.95);
792    }
793
794    #[test]
795    fn test_recommend_strategies() {
796        let mut optimizer = MetaLearningOptimizer::new(100, 42);
797        let mut model = IsingModel::new(15);
798        model.set_coupling(0, 1, -1.0).expect("should set coupling");
799        model.set_coupling(1, 2, 1.0).expect("should set coupling");
800
801        let recommendations = optimizer.recommend_strategies(&model, 3);
802        assert_eq!(recommendations.len(), 3);
803
804        // Should be sorted by quality
805        if recommendations.len() >= 2 {
806            assert!(recommendations[0].1.quality_score >= recommendations[1].1.quality_score);
807        }
808    }
809}