scirs2_optimize/learned_optimizers/
few_shot_learning_enhancement.rs

1//! Few-Shot Learning Enhancement for Optimization
2//!
3//! This module implements few-shot learning capabilities that allow optimizers
4//! to quickly adapt to new optimization problems with minimal training data.
5//! The system leverages meta-learning and rapid adaptation techniques.
6
7use super::{
8    ActivationType, LearnedOptimizationConfig, LearnedOptimizer, MetaOptimizerState,
9    OptimizationProblem, TrainingTask,
10};
11use crate::error::OptimizeResult;
12use crate::result::OptimizeResults;
13use scirs2_core::ndarray::{Array1, Array2, Array3, ArrayView1};
14use scirs2_core::random::Rng;
15use std::collections::{HashMap, VecDeque};
16
17/// Few-Shot Learning Optimizer with Rapid Adaptation
18#[derive(Debug, Clone)]
19pub struct FewShotLearningOptimizer {
20    /// Configuration
21    config: LearnedOptimizationConfig,
22    /// Meta-learner network
23    meta_learner: MetaLearnerNetwork,
24    /// Fast adaptation mechanism
25    fast_adapter: FastAdaptationMechanism,
26    /// Problem similarity matcher
27    similarity_matcher: ProblemSimilarityMatcher,
28    /// Experience memory
29    experience_memory: ExperienceMemory,
30    /// Meta-optimizer state
31    meta_state: MetaOptimizerState,
32    /// Adaptation statistics
33    adaptation_stats: FewShotAdaptationStats,
34    /// Current task context
35    current_task_context: Option<TaskContext>,
36}
37
38/// Meta-learner network for few-shot optimization
39#[derive(Debug, Clone)]
40pub struct MetaLearnerNetwork {
41    /// Feature extractor
42    feature_extractor: FeatureExtractor,
43    /// Context encoder
44    context_encoder: ContextEncoder,
45    /// Parameter generator
46    parameter_generator: ParameterGenerator,
47    /// Update network
48    update_network: UpdateNetwork,
49    /// Memory networks
50    memory_networks: Vec<MemoryNetwork>,
51}
52
53/// Feature extractor for optimization problems
54#[derive(Debug, Clone)]
55pub struct FeatureExtractor {
56    /// Convolutional layers for structured features
57    conv_layers: Vec<ConvLayer>,
58    /// Dense layers for feature processing
59    dense_layers: Vec<DenseLayer>,
60    /// Attention mechanism for feature selection
61    attention_mechanism: FeatureAttention,
62    /// Feature dimension
63    feature_dim: usize,
64}
65
66/// Convolutional layer
67#[derive(Debug, Clone)]
68pub struct ConvLayer {
69    /// Weights
70    weights: Array3<f64>,
71    /// Bias
72    bias: Array1<f64>,
73    /// Kernel size
74    kernel_size: usize,
75    /// Stride
76    stride: usize,
77    /// Activation
78    activation: ActivationType,
79}
80
81/// Dense layer
82#[derive(Debug, Clone)]
83pub struct DenseLayer {
84    /// Weights
85    weights: Array2<f64>,
86    /// Bias
87    bias: Array1<f64>,
88    /// Activation
89    activation: ActivationType,
90}
91
92/// Feature attention mechanism
93#[derive(Debug, Clone)]
94pub struct FeatureAttention {
95    /// Query weights
96    query_weights: Array2<f64>,
97    /// Key weights
98    key_weights: Array2<f64>,
99    /// Value weights
100    value_weights: Array2<f64>,
101    /// Attention scores
102    attention_scores: Array1<f64>,
103}
104
105/// Context encoder for task understanding
106#[derive(Debug, Clone)]
107pub struct ContextEncoder {
108    /// LSTM for sequential context
109    lstm: LSTMCell,
110    /// Embedding layer for discrete features
111    embedding_layer: Array2<f64>,
112    /// Context aggregation network
113    aggregation_network: Array2<f64>,
114    /// Context dimension
115    context_dim: usize,
116}
117
118/// LSTM cell
119#[derive(Debug, Clone)]
120pub struct LSTMCell {
121    /// Input gate weights
122    w_i: Array2<f64>,
123    /// Forget gate weights
124    w_f: Array2<f64>,
125    /// Cell gate weights
126    w_c: Array2<f64>,
127    /// Output gate weights
128    w_o: Array2<f64>,
129    /// Hidden state
130    hidden_state: Array1<f64>,
131    /// Cell state
132    cell_state: Array1<f64>,
133}
134
135/// Parameter generator for optimization strategies
136#[derive(Debug, Clone)]
137pub struct ParameterGenerator {
138    /// Generator network
139    generator_network: Array2<f64>,
140    /// Conditioning network
141    conditioning_network: Array2<f64>,
142    /// Output projection
143    output_projection: Array2<f64>,
144    /// Generated parameter dimension
145    param_dim: usize,
146}
147
148/// Update network for parameter adaptation
149#[derive(Debug, Clone)]
150pub struct UpdateNetwork {
151    /// Update computation network
152    update_network: Array2<f64>,
153    /// Meta-gradient network
154    meta_gradient_network: Array2<f64>,
155    /// Learning rate network
156    lr_network: Array2<f64>,
157    /// Update history
158    update_history: VecDeque<Array1<f64>>,
159}
160
161/// Memory network for storing optimization patterns
162#[derive(Debug, Clone)]
163pub struct MemoryNetwork {
164    /// Memory bank
165    memory_bank: Array2<f64>,
166    /// Memory keys
167    memory_keys: Array2<f64>,
168    /// Memory values
169    memory_values: Array2<f64>,
170    /// Access patterns
171    access_patterns: Vec<Array1<f64>>,
172    /// Memory size
173    memory_size: usize,
174}
175
176/// Fast adaptation mechanism
177#[derive(Debug, Clone)]
178pub struct FastAdaptationMechanism {
179    /// Gradient-based adaptation
180    gradient_adapter: GradientBasedAdapter,
181    /// Prototype-based adaptation
182    prototype_adapter: PrototypeBasedAdapter,
183    /// Model-agnostic meta-learning (MAML)
184    maml_adapter: MAMLAdapter,
185    /// Adaptation strategy selector
186    strategy_selector: AdaptationStrategySelector,
187}
188
189/// Gradient-based adaptation
190#[derive(Debug, Clone)]
191pub struct GradientBasedAdapter {
192    /// Meta-learning rate
193    meta_lr: f64,
194    /// Inner learning rate
195    inner_lr: f64,
196    /// Number of adaptation steps
197    adaptation_steps: usize,
198    /// Gradient accumulator
199    gradient_accumulator: Array1<f64>,
200}
201
202/// Prototype-based adaptation
203#[derive(Debug, Clone)]
204pub struct PrototypeBasedAdapter {
205    /// Prototype embeddings
206    prototypes: Array2<f64>,
207    /// Prototype labels
208    prototype_labels: Vec<String>,
209    /// Distance metric
210    distance_metric: DistanceMetric,
211    /// Adaptation weights
212    adaptation_weights: Array1<f64>,
213}
214
215/// Distance metrics for prototype matching
216#[derive(Debug, Clone)]
217pub enum DistanceMetric {
218    Euclidean,
219    Cosine,
220    Mahalanobis { covariance_inv: Array2<f64> },
221    Learned { distance_network: Array2<f64> },
222}
223
224/// Model-Agnostic Meta-Learning adapter
225#[derive(Debug, Clone)]
226pub struct MAMLAdapter {
227    /// Meta-parameters
228    meta_parameters: Array1<f64>,
229    /// Task-specific parameters
230    task_parameters: HashMap<String, Array1<f64>>,
231    /// Inner loop optimizer
232    inner_optimizer: InnerLoopOptimizer,
233    /// Meta-optimizer
234    meta_optimizer: MetaOptimizer,
235}
236
237/// Inner loop optimizer for MAML
238#[derive(Debug, Clone)]
239pub struct InnerLoopOptimizer {
240    /// Learning rate
241    learning_rate: f64,
242    /// Momentum
243    momentum: f64,
244    /// Velocity
245    velocity: Array1<f64>,
246}
247
248/// Meta-optimizer for MAML
249#[derive(Debug, Clone)]
250pub struct MetaOptimizer {
251    /// Meta-learning rate
252    meta_learning_rate: f64,
253    /// Meta-momentum
254    meta_momentum: f64,
255    /// Meta-velocity
256    meta_velocity: Array1<f64>,
257}
258
259/// Strategy selector for adaptation methods
260#[derive(Debug, Clone)]
261pub struct AdaptationStrategySelector {
262    /// Strategy scores
263    strategy_scores: HashMap<String, f64>,
264    /// Selection network
265    selection_network: Array2<f64>,
266    /// Current strategy
267    current_strategy: AdaptationStrategy,
268}
269
270/// Types of adaptation strategies
271#[derive(Debug, Clone)]
272pub enum AdaptationStrategy {
273    Gradient,
274    Prototype,
275    MAML,
276    Hybrid { weights: Array1<f64> },
277}
278
279/// Problem similarity matcher
280#[derive(Debug, Clone)]
281pub struct ProblemSimilarityMatcher {
282    /// Problem embeddings
283    problem_embeddings: HashMap<String, Array1<f64>>,
284    /// Similarity computation network
285    similarity_network: Array2<f64>,
286    /// Similarity threshold
287    similarity_threshold: f64,
288    /// Cached similarities
289    similarity_cache: HashMap<(String, String), f64>,
290}
291
292/// Experience memory for few-shot learning
293#[derive(Debug, Clone)]
294pub struct ExperienceMemory {
295    /// Support set examples
296    support_set: Vec<SupportExample>,
297    /// Query set examples
298    query_set: Vec<QueryExample>,
299    /// Memory capacity
300    capacity: usize,
301    /// Episodic memory
302    episodic_memory: VecDeque<Episode>,
303}
304
305/// Support example for few-shot learning
306#[derive(Debug, Clone)]
307pub struct SupportExample {
308    /// Problem encoding
309    problem_encoding: Array1<f64>,
310    /// Optimization trajectory
311    trajectory: OptimizationTrajectory,
312    /// Success indicator
313    success: bool,
314    /// Problem metadata
315    metadata: HashMap<String, f64>,
316}
317
318/// Query example for evaluation
319#[derive(Debug, Clone)]
320pub struct QueryExample {
321    /// Problem encoding
322    problem_encoding: Array1<f64>,
323    /// Target optimization strategy
324    target_strategy: Array1<f64>,
325    /// Expected performance
326    expected_performance: f64,
327}
328
329/// Episode in episodic memory
330#[derive(Debug, Clone)]
331pub struct Episode {
332    /// Task identifier
333    task_id: String,
334    /// Support examples
335    support_examples: Vec<SupportExample>,
336    /// Query examples
337    query_examples: Vec<QueryExample>,
338    /// Adaptation performance
339    adaptation_performance: f64,
340    /// Episode timestamp
341    timestamp: usize,
342}
343
344/// Optimization trajectory
345#[derive(Debug, Clone)]
346pub struct OptimizationTrajectory {
347    /// Parameter history
348    parameter_history: Vec<Array1<f64>>,
349    /// Objective history
350    objective_history: Vec<f64>,
351    /// Gradient history
352    gradient_history: Vec<Array1<f64>>,
353    /// Step size history
354    step_size_history: Vec<f64>,
355    /// Total steps
356    total_steps: usize,
357}
358
359/// Few-shot adaptation statistics
360#[derive(Debug, Clone)]
361pub struct FewShotAdaptationStats {
362    /// Adaptation speed (steps to convergence)
363    adaptation_speed: f64,
364    /// Transfer efficiency
365    transfer_efficiency: f64,
366    /// Number of support examples used
367    support_examples_used: usize,
368    /// Adaptation success rate
369    adaptation_success_rate: f64,
370    /// Meta-learning progress
371    meta_learning_progress: f64,
372}
373
374/// Task context for current optimization
375#[derive(Debug, Clone)]
376pub struct TaskContext {
377    /// Task description
378    task_description: String,
379    /// Problem characteristics
380    problem_characteristics: Array1<f64>,
381    /// Available support examples
382    available_support: Vec<String>,
383    /// Adaptation budget
384    adaptation_budget: usize,
385    /// Performance target
386    performance_target: f64,
387}
388
389impl FewShotLearningOptimizer {
390    /// Create new few-shot learning optimizer
391    pub fn new(config: LearnedOptimizationConfig) -> Self {
392        let feature_dim = config.hidden_size;
393        let meta_learner = MetaLearnerNetwork::new(feature_dim);
394        let fast_adapter = FastAdaptationMechanism::new(config.inner_learning_rate);
395        let similarity_matcher = ProblemSimilarityMatcher::new(feature_dim);
396        let experience_memory = ExperienceMemory::new(1000);
397
398        Self {
399            config,
400            meta_learner,
401            fast_adapter,
402            similarity_matcher,
403            experience_memory,
404            meta_state: MetaOptimizerState {
405                meta_params: Array1::zeros(feature_dim),
406                network_weights: Array2::zeros((feature_dim, feature_dim)),
407                performance_history: Vec::new(),
408                adaptation_stats: super::AdaptationStatistics::default(),
409                episode: 0,
410            },
411            adaptation_stats: FewShotAdaptationStats::default(),
412            current_task_context: None,
413        }
414    }
415
416    /// Perform few-shot adaptation to new problem
417    pub fn few_shot_adapt(
418        &mut self,
419        support_examples: &[SupportExample],
420        target_problem: &OptimizationProblem,
421    ) -> OptimizeResult<()> {
422        // Extract features from support _examples
423        let support_features = self.extract_support_features(support_examples)?;
424
425        // Find similar problems in memory
426        let similar_problems = self.find_similar_problems(target_problem)?;
427
428        // Select adaptation strategy
429        let strategy = self.select_adaptation_strategy(&support_features, &similar_problems)?;
430
431        // Perform adaptation based on selected strategy
432        match strategy {
433            AdaptationStrategy::Gradient => {
434                self.gradient_based_adaptation(support_examples)?;
435            }
436            AdaptationStrategy::Prototype => {
437                self.prototype_based_adaptation(support_examples)?;
438            }
439            AdaptationStrategy::MAML => {
440                self.maml_adaptation(support_examples)?;
441            }
442            AdaptationStrategy::Hybrid { weights } => {
443                self.hybrid_adaptation(support_examples, &weights)?;
444            }
445        }
446
447        // Update adaptation statistics
448        self.update_adaptation_stats(support_examples.len())?;
449
450        Ok(())
451    }
452
453    /// Extract features from support examples
454    fn extract_support_features(
455        &self,
456        support_examples: &[SupportExample],
457    ) -> OptimizeResult<Array2<f64>> {
458        let num_examples = support_examples.len();
459        let feature_dim = self.meta_learner.feature_extractor.feature_dim;
460        let mut features = Array2::zeros((num_examples, feature_dim));
461
462        for (i, example) in support_examples.iter().enumerate() {
463            let extracted_features = self
464                .meta_learner
465                .feature_extractor
466                .extract(&example.problem_encoding)?;
467            for j in 0..feature_dim.min(extracted_features.len()) {
468                features[[i, j]] = extracted_features[j];
469            }
470        }
471
472        Ok(features)
473    }
474
475    /// Find similar problems in experience memory
476    fn find_similar_problems(
477        &self,
478        target_problem: &OptimizationProblem,
479    ) -> OptimizeResult<Vec<String>> {
480        let target_encoding = self.encode_problem_for_similarity(target_problem)?;
481        let mut similarities = Vec::new();
482
483        for (problem_id, problem_embedding) in &self.similarity_matcher.problem_embeddings {
484            let similarity = self.compute_similarity(&target_encoding, problem_embedding)?;
485            if similarity > self.similarity_matcher.similarity_threshold {
486                similarities.push((problem_id.clone(), similarity));
487            }
488        }
489
490        // Sort by similarity and return top matches
491        similarities.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
492        Ok(similarities.into_iter().take(5).map(|(id, _)| id).collect())
493    }
494
495    /// Encode problem for similarity matching
496    fn encode_problem_for_similarity(
497        &self,
498        problem: &OptimizationProblem,
499    ) -> OptimizeResult<Array1<f64>> {
500        let mut encoding = Array1::zeros(self.meta_learner.feature_extractor.feature_dim);
501
502        // Basic encoding (in practice would be more sophisticated)
503        encoding[0] = (problem.dimension as f64).ln();
504        encoding[1] = (problem.max_evaluations as f64).ln();
505        encoding[2] = problem.target_accuracy.ln().abs();
506
507        // Encode problem class
508        match problem.problem_class.as_str() {
509            "quadratic" => encoding[3] = 1.0,
510            "neural_network" => encoding[4] = 1.0,
511            "sparse" => {
512                encoding[5] = 1.0;
513                encoding[6] = 1.0;
514            }
515            _ => {} // Default case for unknown problem classes
516        }
517
518        Ok(encoding)
519    }
520
521    /// Compute similarity between problem encodings
522    fn compute_similarity(
523        &self,
524        encoding1: &Array1<f64>,
525        encoding2: &Array1<f64>,
526    ) -> OptimizeResult<f64> {
527        // Cosine similarity
528        let dot_product = encoding1
529            .iter()
530            .zip(encoding2.iter())
531            .map(|(&a, &b)| a * b)
532            .sum::<f64>();
533
534        let norm1 = (encoding1.iter().map(|&x| x * x).sum::<f64>()).sqrt();
535        let norm2 = (encoding2.iter().map(|&x| x * x).sum::<f64>()).sqrt();
536
537        if norm1 > 0.0 && norm2 > 0.0 {
538            Ok(dot_product / (norm1 * norm2))
539        } else {
540            Ok(0.0)
541        }
542    }
543
544    /// Select adaptation strategy
545    fn select_adaptation_strategy(
546        &self,
547        support_features: &Array2<f64>,
548        similar_problems: &[String],
549    ) -> OptimizeResult<AdaptationStrategy> {
550        // Simple heuristic-based selection (in practice would use learned selector)
551        let num_support = support_features.nrows();
552        let num_similar = similar_problems.len();
553
554        if num_support <= 2 {
555            Ok(AdaptationStrategy::Prototype)
556        } else if num_similar > 3 {
557            Ok(AdaptationStrategy::MAML)
558        } else if num_support > 10 {
559            Ok(AdaptationStrategy::Gradient)
560        } else {
561            Ok(AdaptationStrategy::Hybrid {
562                weights: Array1::from(vec![0.3, 0.4, 0.3]),
563            })
564        }
565    }
566
567    /// Gradient-based adaptation
568    fn gradient_based_adaptation(
569        &mut self,
570        support_examples: &[SupportExample],
571    ) -> OptimizeResult<()> {
572        // First compute all meta-gradients
573        let all_gradients: Result<Vec<_>, _> = support_examples
574            .iter()
575            .map(|example| self.compute_meta_gradients(example))
576            .collect();
577        let all_gradients = all_gradients?;
578
579        // Now update the adapter with all computed gradients
580        let adapter = &mut self.fast_adapter.gradient_adapter;
581        for meta_gradients in all_gradients {
582            // Update adaptation parameters
583            for (i, &grad) in meta_gradients.iter().enumerate() {
584                if i < adapter.gradient_accumulator.len() {
585                    adapter.gradient_accumulator[i] += adapter.meta_lr * grad;
586                }
587            }
588        }
589
590        Ok(())
591    }
592
593    /// Prototype-based adaptation
594    fn prototype_based_adaptation(
595        &mut self,
596        support_examples: &[SupportExample],
597    ) -> OptimizeResult<()> {
598        let adapter = &mut self.fast_adapter.prototype_adapter;
599
600        // Update prototypes based on support _examples
601        for (i, example) in support_examples.iter().enumerate() {
602            if i < adapter.prototypes.nrows() {
603                for (j, &feature) in example.problem_encoding.iter().enumerate() {
604                    if j < adapter.prototypes.ncols() {
605                        adapter.prototypes[[i, j]] =
606                            0.9 * adapter.prototypes[[i, j]] + 0.1 * feature;
607                    }
608                }
609            }
610        }
611
612        Ok(())
613    }
614
615    /// MAML adaptation
616    fn maml_adaptation(&mut self, support_examples: &[SupportExample]) -> OptimizeResult<()> {
617        let adapter = &mut self.fast_adapter.maml_adapter;
618
619        for example in support_examples {
620            // Inner loop update
621            let inner_gradients = self.compute_inner_gradients(example)?;
622            self.apply_inner_update(&inner_gradients)?;
623
624            // Meta-gradients computation would happen during meta-training
625        }
626
627        Ok(())
628    }
629
630    /// Hybrid adaptation combining multiple strategies
631    fn hybrid_adaptation(
632        &mut self,
633        support_examples: &[SupportExample],
634        weights: &Array1<f64>,
635    ) -> OptimizeResult<()> {
636        if weights.len() >= 3 {
637            // Apply weighted combination of strategies
638            if weights[0] > 0.0 {
639                self.gradient_based_adaptation(support_examples)?;
640            }
641            if weights[1] > 0.0 {
642                self.prototype_based_adaptation(support_examples)?;
643            }
644            if weights[2] > 0.0 {
645                self.maml_adaptation(support_examples)?;
646            }
647        }
648
649        Ok(())
650    }
651
652    /// Compute meta-gradients from example
653    fn compute_meta_gradients(&self, example: &SupportExample) -> OptimizeResult<Array1<f64>> {
654        // Simplified meta-gradient computation
655        let mut gradients = Array1::zeros(self.meta_state.meta_params.len());
656
657        // Use trajectory information to estimate gradients
658        if !example.trajectory.objective_history.is_empty() {
659            let improvement = example
660                .trajectory
661                .objective_history
662                .first()
663                .copied()
664                .unwrap_or(0.0)
665                - example
666                    .trajectory
667                    .objective_history
668                    .last()
669                    .copied()
670                    .unwrap_or(0.0);
671
672            // Simple gradient estimation based on improvement
673            for i in 0..gradients.len() {
674                gradients[i] =
675                    improvement * (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.01;
676            }
677        }
678
679        Ok(gradients)
680    }
681
682    /// Compute inner gradients for MAML
683    fn compute_inner_gradients(&self, example: &SupportExample) -> OptimizeResult<Array1<f64>> {
684        // Simplified inner gradient computation
685        let mut gradients = Array1::zeros(self.meta_state.meta_params.len());
686
687        if !example.trajectory.gradient_history.is_empty() {
688            if let Some(last_gradient) = example.trajectory.gradient_history.last() {
689                for (i, &grad) in last_gradient.iter().enumerate() {
690                    if i < gradients.len() {
691                        gradients[i] = grad * 0.1; // Scale down
692                    }
693                }
694            }
695        }
696
697        Ok(gradients)
698    }
699
700    /// Apply inner loop update
701    fn apply_inner_update(&mut self, gradients: &Array1<f64>) -> OptimizeResult<()> {
702        let lr = self.fast_adapter.maml_adapter.inner_optimizer.learning_rate;
703
704        for (i, &grad) in gradients.iter().enumerate() {
705            if i < self.meta_state.meta_params.len() {
706                self.meta_state.meta_params[i] -= lr * grad;
707            }
708        }
709
710        Ok(())
711    }
712
713    /// Update adaptation statistics
714    fn update_adaptation_stats(&mut self, num_support_examples: usize) -> OptimizeResult<()> {
715        self.adaptation_stats.support_examples_used = num_support_examples;
716        self.adaptation_stats.adaptation_speed = 1.0 / (num_support_examples as f64 + 1.0);
717        self.adaptation_stats.transfer_efficiency = if num_support_examples > 0 {
718            1.0 / num_support_examples as f64
719        } else {
720            0.0
721        };
722
723        Ok(())
724    }
725
726    /// Generate optimization strategy from few-shot adaptation
727    pub fn generate_optimization_strategy(
728        &self,
729        problem: &OptimizationProblem,
730    ) -> OptimizeResult<OptimizationStrategy> {
731        // Extract problem features
732        let problem_encoding = self.encode_problem_for_similarity(problem)?;
733
734        // Generate strategy using meta-learner
735        let strategy_params = self
736            .meta_learner
737            .parameter_generator
738            .generate(&problem_encoding)?;
739
740        Ok(OptimizationStrategy {
741            step_size_schedule: self.generate_step_size_schedule(&strategy_params)?,
742            direction_computation: DirectionComputation::GradientBased {
743                momentum: strategy_params.get(0).copied().unwrap_or(0.9),
744            },
745            convergence_criteria: ConvergenceCriteria {
746                tolerance: strategy_params.get(1).copied().unwrap_or(1e-6),
747                max_nit: problem.max_evaluations,
748            },
749            adaptation_rate: strategy_params.get(2).copied().unwrap_or(0.01),
750        })
751    }
752
753    /// Generate step size schedule
754    fn generate_step_size_schedule(
755        &self,
756        strategy_params: &Array1<f64>,
757    ) -> OptimizeResult<StepSizeSchedule> {
758        let initial_step = strategy_params.get(3).copied().unwrap_or(0.01);
759        let decay_rate = strategy_params.get(4).copied().unwrap_or(0.99);
760
761        Ok(StepSizeSchedule::Exponential {
762            initial_step,
763            decay_rate,
764        })
765    }
766
767    /// Get adaptation statistics
768    pub fn get_adaptation_stats(&self) -> &FewShotAdaptationStats {
769        &self.adaptation_stats
770    }
771
772    /// Add experience to memory
773    pub fn add_experience(
774        &mut self,
775        problem: &OptimizationProblem,
776        trajectory: OptimizationTrajectory,
777    ) {
778        let problem_encoding = self
779            .encode_problem_for_similarity(problem)
780            .unwrap_or_default();
781
782        let support_example = SupportExample {
783            problem_encoding,
784            trajectory,
785            success: true, // Determine based on trajectory
786            metadata: HashMap::new(),
787        };
788
789        self.experience_memory.add_support_example(support_example);
790    }
791}
792
793/// Generated optimization strategy
794#[derive(Debug, Clone)]
795pub struct OptimizationStrategy {
796    /// Step size schedule
797    pub step_size_schedule: StepSizeSchedule,
798    /// Direction computation method
799    pub direction_computation: DirectionComputation,
800    /// Convergence criteria
801    pub convergence_criteria: ConvergenceCriteria,
802    /// Adaptation rate for online learning
803    pub adaptation_rate: f64,
804}
805
806/// Step size schedule types
807#[derive(Debug, Clone)]
808pub enum StepSizeSchedule {
809    Constant {
810        step_size: f64,
811    },
812    Exponential {
813        initial_step: f64,
814        decay_rate: f64,
815    },
816    Polynomial {
817        initial_step: f64,
818        power: f64,
819    },
820    Adaptive {
821        base_step: f64,
822        adaptation_factor: f64,
823    },
824}
825
826/// Direction computation methods
827#[derive(Debug, Clone)]
828pub enum DirectionComputation {
829    GradientBased { momentum: f64 },
830    QuasiNewton { method: String },
831    TrustRegion { radius: f64 },
832    Adaptive { method_weights: Array1<f64> },
833}
834
835/// Convergence criteria
836#[derive(Debug, Clone)]
837pub struct ConvergenceCriteria {
838    /// Tolerance for convergence
839    pub tolerance: f64,
840    /// Maximum iterations
841    pub max_nit: usize,
842}
843
844impl MetaLearnerNetwork {
845    /// Create new meta-learner network
846    pub fn new(feature_dim: usize) -> Self {
847        Self {
848            feature_extractor: FeatureExtractor::new(feature_dim),
849            context_encoder: ContextEncoder::new(feature_dim),
850            parameter_generator: ParameterGenerator::new(feature_dim),
851            update_network: UpdateNetwork::new(feature_dim),
852            memory_networks: vec![MemoryNetwork::new(feature_dim, 100)],
853        }
854    }
855}
856
857impl FeatureExtractor {
858    /// Create new feature extractor
859    pub fn new(feature_dim: usize) -> Self {
860        Self {
861            conv_layers: vec![],
862            dense_layers: vec![
863                DenseLayer::new(feature_dim, feature_dim * 2, ActivationType::ReLU),
864                DenseLayer::new(feature_dim * 2, feature_dim, ActivationType::ReLU),
865            ],
866            attention_mechanism: FeatureAttention::new(feature_dim),
867            feature_dim,
868        }
869    }
870
871    /// Extract features from problem encoding
872    pub fn extract(&self, problem_encoding: &Array1<f64>) -> OptimizeResult<Array1<f64>> {
873        let mut features = problem_encoding.clone();
874
875        // Pass through dense layers
876        for layer in &self.dense_layers {
877            features = layer.forward(&features.view())?;
878        }
879
880        // Apply attention
881        features = self.attention_mechanism.apply(&features)?;
882
883        Ok(features)
884    }
885}
886
887impl DenseLayer {
888    /// Create new dense layer
889    pub fn new(input_size: usize, output_size: usize, activation: ActivationType) -> Self {
890        let weights = Array2::from_shape_fn((output_size, input_size), |_| {
891            (scirs2_core::random::rng().random::<f64>() - 0.5) * (2.0 / input_size as f64).sqrt()
892        });
893        let bias = Array1::zeros(output_size);
894
895        Self {
896            weights,
897            bias,
898            activation,
899        }
900    }
901
902    /// Forward pass through dense layer
903    pub fn forward(&self, input: &ArrayView1<f64>) -> OptimizeResult<Array1<f64>> {
904        let mut output = Array1::zeros(self.bias.len());
905
906        for i in 0..output.len() {
907            for j in 0..input.len().min(self.weights.ncols()) {
908                output[i] += self.weights[[i, j]] * input[j];
909            }
910            output[i] += self.bias[i];
911            output[i] = self.activation.apply(output[i]);
912        }
913
914        Ok(output)
915    }
916}
917
918impl FeatureAttention {
919    /// Create new feature attention
920    pub fn new(feature_dim: usize) -> Self {
921        Self {
922            query_weights: Array2::from_shape_fn((feature_dim, feature_dim), |_| {
923                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
924            }),
925            key_weights: Array2::from_shape_fn((feature_dim, feature_dim), |_| {
926                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
927            }),
928            value_weights: Array2::from_shape_fn((feature_dim, feature_dim), |_| {
929                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
930            }),
931            attention_scores: Array1::zeros(feature_dim),
932        }
933    }
934
935    /// Apply attention to features
936    pub fn apply(&self, features: &Array1<f64>) -> OptimizeResult<Array1<f64>> {
937        // Simplified self-attention
938        let mut attended_features = Array1::zeros(features.len());
939
940        for i in 0..attended_features.len() {
941            let attention_weight = (i as f64 / features.len() as f64).exp(); // Simple attention
942            attended_features[i] = attention_weight * features.get(i).copied().unwrap_or(0.0);
943        }
944
945        // Normalize
946        let sum = attended_features.sum();
947        if sum > 0.0 {
948            attended_features /= sum;
949        }
950
951        Ok(attended_features)
952    }
953}
954
955impl ContextEncoder {
956    /// Create new context encoder
957    pub fn new(context_dim: usize) -> Self {
958        Self {
959            lstm: LSTMCell::new(context_dim),
960            embedding_layer: Array2::from_shape_fn((context_dim, 100), |_| {
961                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
962            }),
963            aggregation_network: Array2::from_shape_fn((context_dim, context_dim), |_| {
964                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
965            }),
966            context_dim,
967        }
968    }
969}
970
971impl LSTMCell {
972    /// Create new LSTM cell
973    pub fn new(hidden_size: usize) -> Self {
974        Self {
975            w_i: Array2::from_shape_fn((hidden_size, hidden_size * 2), |_| {
976                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
977            }),
978            w_f: Array2::from_shape_fn((hidden_size, hidden_size * 2), |_| {
979                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
980            }),
981            w_c: Array2::from_shape_fn((hidden_size, hidden_size * 2), |_| {
982                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
983            }),
984            w_o: Array2::from_shape_fn((hidden_size, hidden_size * 2), |_| {
985                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
986            }),
987            hidden_state: Array1::zeros(hidden_size),
988            cell_state: Array1::zeros(hidden_size),
989        }
990    }
991}
992
993impl ParameterGenerator {
994    /// Create new parameter generator
995    pub fn new(param_dim: usize) -> Self {
996        Self {
997            generator_network: Array2::from_shape_fn((param_dim, param_dim), |_| {
998                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
999            }),
1000            conditioning_network: Array2::from_shape_fn((param_dim, param_dim), |_| {
1001                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
1002            }),
1003            output_projection: Array2::from_shape_fn((param_dim, param_dim), |_| {
1004                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
1005            }),
1006            param_dim,
1007        }
1008    }
1009
1010    /// Generate parameters from encoding
1011    pub fn generate(&self, encoding: &Array1<f64>) -> OptimizeResult<Array1<f64>> {
1012        let mut params: Array1<f64> = Array1::zeros(self.param_dim);
1013
1014        // Simple generation (in practice would be more sophisticated)
1015        for i in 0..params.len() {
1016            for j in 0..encoding.len().min(self.generator_network.ncols()) {
1017                params[i] += self.generator_network[[i, j]] * encoding[j];
1018            }
1019            params[i] = params[i].tanh(); // Normalize to [-1, 1]
1020        }
1021
1022        Ok(params)
1023    }
1024}
1025
1026impl UpdateNetwork {
1027    /// Create new update network
1028    pub fn new(param_dim: usize) -> Self {
1029        Self {
1030            update_network: Array2::from_shape_fn((param_dim, param_dim), |_| {
1031                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
1032            }),
1033            meta_gradient_network: Array2::from_shape_fn((param_dim, param_dim), |_| {
1034                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
1035            }),
1036            lr_network: Array2::from_shape_fn((1, param_dim), |_| {
1037                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
1038            }),
1039            update_history: VecDeque::with_capacity(100),
1040        }
1041    }
1042}
1043
1044impl MemoryNetwork {
1045    /// Create new memory network
1046    pub fn new(feature_dim: usize, memory_size: usize) -> Self {
1047        Self {
1048            memory_bank: Array2::zeros((memory_size, feature_dim)),
1049            memory_keys: Array2::zeros((memory_size, feature_dim)),
1050            memory_values: Array2::zeros((memory_size, feature_dim)),
1051            access_patterns: Vec::new(),
1052            memory_size,
1053        }
1054    }
1055}
1056
1057impl FastAdaptationMechanism {
1058    /// Create new fast adaptation mechanism
1059    pub fn new(inner_lr: f64) -> Self {
1060        Self {
1061            gradient_adapter: GradientBasedAdapter::new(inner_lr),
1062            prototype_adapter: PrototypeBasedAdapter::new(),
1063            maml_adapter: MAMLAdapter::new(),
1064            strategy_selector: AdaptationStrategySelector::new(),
1065        }
1066    }
1067}
1068
1069impl GradientBasedAdapter {
1070    /// Create new gradient-based adapter
1071    pub fn new(inner_lr: f64) -> Self {
1072        Self {
1073            meta_lr: 0.001,
1074            inner_lr,
1075            adaptation_steps: 5,
1076            gradient_accumulator: Array1::zeros(100),
1077        }
1078    }
1079}
1080
1081impl Default for PrototypeBasedAdapter {
1082    fn default() -> Self {
1083        Self::new()
1084    }
1085}
1086
1087impl PrototypeBasedAdapter {
1088    /// Create new prototype-based adapter
1089    pub fn new() -> Self {
1090        Self {
1091            prototypes: Array2::zeros((10, 100)),
1092            prototype_labels: vec!["default".to_string(); 10],
1093            distance_metric: DistanceMetric::Euclidean,
1094            adaptation_weights: Array1::ones(10),
1095        }
1096    }
1097}
1098
1099impl Default for MAMLAdapter {
1100    fn default() -> Self {
1101        Self::new()
1102    }
1103}
1104
1105impl MAMLAdapter {
1106    /// Create new MAML adapter
1107    pub fn new() -> Self {
1108        Self {
1109            meta_parameters: Array1::zeros(100),
1110            task_parameters: HashMap::new(),
1111            inner_optimizer: InnerLoopOptimizer::new(),
1112            meta_optimizer: MetaOptimizer::new(),
1113        }
1114    }
1115}
1116
1117impl Default for InnerLoopOptimizer {
1118    fn default() -> Self {
1119        Self::new()
1120    }
1121}
1122
1123impl InnerLoopOptimizer {
1124    /// Create new inner loop optimizer
1125    pub fn new() -> Self {
1126        Self {
1127            learning_rate: 0.01,
1128            momentum: 0.9,
1129            velocity: Array1::zeros(100),
1130        }
1131    }
1132}
1133
1134impl Default for MetaOptimizer {
1135    fn default() -> Self {
1136        Self::new()
1137    }
1138}
1139
1140impl MetaOptimizer {
1141    /// Create new meta-optimizer
1142    pub fn new() -> Self {
1143        Self {
1144            meta_learning_rate: 0.001,
1145            meta_momentum: 0.9,
1146            meta_velocity: Array1::zeros(100),
1147        }
1148    }
1149}
1150
1151impl Default for AdaptationStrategySelector {
1152    fn default() -> Self {
1153        Self::new()
1154    }
1155}
1156
1157impl AdaptationStrategySelector {
1158    /// Create new strategy selector
1159    pub fn new() -> Self {
1160        let mut strategy_scores = HashMap::new();
1161        strategy_scores.insert("gradient".to_string(), 0.5);
1162        strategy_scores.insert("prototype".to_string(), 0.5);
1163        strategy_scores.insert("maml".to_string(), 0.5);
1164
1165        Self {
1166            strategy_scores,
1167            selection_network: Array2::from_shape_fn((4, 10), |_| {
1168                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
1169            }),
1170            current_strategy: AdaptationStrategy::Gradient,
1171        }
1172    }
1173}
1174
1175impl ProblemSimilarityMatcher {
1176    /// Create new problem similarity matcher
1177    pub fn new(feature_dim: usize) -> Self {
1178        Self {
1179            problem_embeddings: HashMap::new(),
1180            similarity_network: Array2::from_shape_fn((1, feature_dim * 2), |_| {
1181                (scirs2_core::random::rng().random::<f64>() - 0.5) * 0.1
1182            }),
1183            similarity_threshold: 0.7,
1184            similarity_cache: HashMap::new(),
1185        }
1186    }
1187}
1188
1189impl ExperienceMemory {
1190    /// Create new experience memory
1191    pub fn new(capacity: usize) -> Self {
1192        Self {
1193            support_set: Vec::new(),
1194            query_set: Vec::new(),
1195            capacity,
1196            episodic_memory: VecDeque::with_capacity(capacity),
1197        }
1198    }
1199
1200    /// Add support example
1201    pub fn add_support_example(&mut self, example: SupportExample) {
1202        if self.support_set.len() >= self.capacity {
1203            self.support_set.remove(0);
1204        }
1205        self.support_set.push(example);
1206    }
1207}
1208
1209impl Default for FewShotAdaptationStats {
1210    fn default() -> Self {
1211        Self {
1212            adaptation_speed: 0.0,
1213            transfer_efficiency: 0.0,
1214            support_examples_used: 0,
1215            adaptation_success_rate: 0.0,
1216            meta_learning_progress: 0.0,
1217        }
1218    }
1219}
1220
1221impl LearnedOptimizer for FewShotLearningOptimizer {
1222    fn meta_train(&mut self, training_tasks: &[TrainingTask]) -> OptimizeResult<()> {
1223        for task in training_tasks {
1224            // Create support examples from task
1225            let support_examples = self.create_support_examples_from_task(task)?;
1226
1227            // Perform few-shot adaptation
1228            self.few_shot_adapt(&support_examples, &task.problem)?;
1229
1230            // Update meta-learner based on adaptation performance
1231            self.update_meta_learner(&support_examples)?;
1232        }
1233
1234        Ok(())
1235    }
1236
1237    fn adapt_to_problem(
1238        &mut self,
1239        problem: &OptimizationProblem,
1240        initial_params: &ArrayView1<f64>,
1241    ) -> OptimizeResult<()> {
1242        // Find similar problems for support examples
1243        let similar_problems = self.find_similar_problems(problem)?;
1244
1245        // Create support examples from similar problems
1246        let support_examples = self.create_support_examples_from_similar(&similar_problems)?;
1247
1248        // Perform adaptation
1249        self.few_shot_adapt(&support_examples, problem)?;
1250
1251        Ok(())
1252    }
1253
1254    fn optimize<F>(
1255        &mut self,
1256        objective: F,
1257        initial_params: &ArrayView1<f64>,
1258    ) -> OptimizeResult<OptimizeResults<f64>>
1259    where
1260        F: Fn(&ArrayView1<f64>) -> f64,
1261    {
1262        // Create default problem for strategy generation
1263        let default_problem = OptimizationProblem {
1264            name: "few_shot".to_string(),
1265            dimension: initial_params.len(),
1266            problem_class: "general".to_string(),
1267            metadata: HashMap::new(),
1268            max_evaluations: 1000,
1269            target_accuracy: 1e-6,
1270        };
1271
1272        // Generate optimization strategy
1273        let strategy = self.generate_optimization_strategy(&default_problem)?;
1274
1275        // Apply strategy to optimize
1276        let mut current_params = initial_params.to_owned();
1277        let mut best_value = objective(initial_params);
1278        let mut iterations = 0;
1279
1280        for iter in 0..strategy.convergence_criteria.max_nit {
1281            iterations = iter;
1282
1283            // Compute step size based on schedule
1284            let step_size = match &strategy.step_size_schedule {
1285                StepSizeSchedule::Constant { step_size } => *step_size,
1286                StepSizeSchedule::Exponential {
1287                    initial_step,
1288                    decay_rate,
1289                } => initial_step * decay_rate.powi(iter as i32),
1290                StepSizeSchedule::Polynomial {
1291                    initial_step,
1292                    power,
1293                } => initial_step / (1.0 + iter as f64).powf(*power),
1294                StepSizeSchedule::Adaptive {
1295                    base_step,
1296                    adaptation_factor,
1297                } => base_step * (1.0 + adaptation_factor * iter as f64 / 100.0),
1298            };
1299
1300            // Compute direction
1301            let direction = self.compute_direction(
1302                &objective,
1303                &current_params,
1304                &strategy.direction_computation,
1305            )?;
1306
1307            // Update parameters
1308            for i in 0..current_params.len().min(direction.len()) {
1309                current_params[i] -= step_size * direction[i];
1310            }
1311
1312            let current_value = objective(&current_params.view());
1313
1314            if current_value < best_value {
1315                best_value = current_value;
1316            }
1317
1318            // Check convergence
1319            if (best_value - current_value).abs() < strategy.convergence_criteria.tolerance {
1320                break;
1321            }
1322        }
1323
1324        Ok(OptimizeResults::<f64> {
1325            x: current_params,
1326            fun: best_value,
1327            success: true,
1328            nit: iterations,
1329            message: "Few-shot learning optimization completed".to_string(),
1330            ..OptimizeResults::default()
1331        })
1332    }
1333
1334    fn get_state(&self) -> &MetaOptimizerState {
1335        &self.meta_state
1336    }
1337
1338    fn reset(&mut self) {
1339        self.experience_memory = ExperienceMemory::new(1000);
1340        self.adaptation_stats = FewShotAdaptationStats::default();
1341        self.current_task_context = None;
1342    }
1343}
1344
1345impl FewShotLearningOptimizer {
1346    fn create_support_examples_from_task(
1347        &self,
1348        task: &TrainingTask,
1349    ) -> OptimizeResult<Vec<SupportExample>> {
1350        // Simplified creation of support examples
1351        let problem_encoding = self.encode_problem_for_similarity(&task.problem)?;
1352
1353        let trajectory = OptimizationTrajectory {
1354            parameter_history: vec![Array1::zeros(task.problem.dimension)],
1355            objective_history: vec![1.0],
1356            gradient_history: vec![Array1::zeros(task.problem.dimension)],
1357            step_size_history: vec![0.01],
1358            total_steps: 1,
1359        };
1360
1361        Ok(vec![SupportExample {
1362            problem_encoding,
1363            trajectory,
1364            success: true,
1365            metadata: HashMap::new(),
1366        }])
1367    }
1368
1369    fn update_meta_learner(&mut self, _support_examples: &[SupportExample]) -> OptimizeResult<()> {
1370        // Simplified meta-learner update
1371        self.meta_state.episode += 1;
1372        Ok(())
1373    }
1374
1375    fn create_support_examples_from_similar(
1376        &self,
1377        _similar_problems: &[String],
1378    ) -> OptimizeResult<Vec<SupportExample>> {
1379        // Simplified creation from similar _problems
1380        Ok(vec![])
1381    }
1382
1383    fn compute_direction<F>(
1384        &self,
1385        objective: &F,
1386        params: &Array1<f64>,
1387        direction_method: &DirectionComputation,
1388    ) -> OptimizeResult<Array1<f64>>
1389    where
1390        F: Fn(&ArrayView1<f64>) -> f64,
1391    {
1392        match direction_method {
1393            DirectionComputation::GradientBased { momentum: _ } => {
1394                // Compute finite difference gradient
1395                let h = 1e-6;
1396                let f0 = objective(&params.view());
1397                let mut gradient = Array1::zeros(params.len());
1398
1399                for i in 0..params.len() {
1400                    let mut params_plus = params.clone();
1401                    params_plus[i] += h;
1402                    let f_plus = objective(&params_plus.view());
1403                    gradient[i] = (f_plus - f0) / h;
1404                }
1405
1406                Ok(gradient)
1407            }
1408            _ => {
1409                // Default to gradient
1410                let h = 1e-6;
1411                let f0 = objective(&params.view());
1412                let mut gradient = Array1::zeros(params.len());
1413
1414                for i in 0..params.len() {
1415                    let mut params_plus = params.clone();
1416                    params_plus[i] += h;
1417                    let f_plus = objective(&params_plus.view());
1418                    gradient[i] = (f_plus - f0) / h;
1419                }
1420
1421                Ok(gradient)
1422            }
1423        }
1424    }
1425}
1426
1427/// Convenience function for few-shot learning optimization
1428#[allow(dead_code)]
1429pub fn few_shot_optimize<F>(
1430    objective: F,
1431    initial_params: &ArrayView1<f64>,
1432    support_examples: &[SupportExample],
1433    config: Option<LearnedOptimizationConfig>,
1434) -> super::Result<OptimizeResults<f64>>
1435where
1436    F: Fn(&ArrayView1<f64>) -> f64,
1437{
1438    let config = config.unwrap_or_default();
1439    let mut optimizer = FewShotLearningOptimizer::new(config);
1440
1441    // Create default problem
1442    let problem = OptimizationProblem {
1443        name: "few_shot_target".to_string(),
1444        dimension: initial_params.len(),
1445        problem_class: "general".to_string(),
1446        metadata: HashMap::new(),
1447        max_evaluations: 1000,
1448        target_accuracy: 1e-6,
1449    };
1450
1451    // Perform few-shot adaptation
1452    optimizer.few_shot_adapt(support_examples, &problem)?;
1453
1454    // Optimize
1455    optimizer.optimize(objective, initial_params)
1456}
1457
1458#[cfg(test)]
1459mod tests {
1460    use super::*;
1461
1462    #[test]
1463    fn test_few_shot_optimizer_creation() {
1464        let config = LearnedOptimizationConfig::default();
1465        let optimizer = FewShotLearningOptimizer::new(config);
1466
1467        assert_eq!(optimizer.adaptation_stats.support_examples_used, 0);
1468    }
1469
1470    #[test]
1471    fn test_feature_extractor() {
1472        let extractor = FeatureExtractor::new(32);
1473        let encoding = Array1::from(vec![1.0, 2.0, 3.0]);
1474
1475        let features = extractor.extract(&encoding).unwrap();
1476        assert_eq!(features.len(), 32);
1477    }
1478
1479    #[test]
1480    fn test_parameter_generator() {
1481        let generator = ParameterGenerator::new(16);
1482        let encoding = Array1::from(vec![0.5, -0.3, 0.8]);
1483
1484        let params = generator.generate(&encoding).unwrap();
1485        assert_eq!(params.len(), 16);
1486        assert!(params.iter().all(|&x| x >= -1.0 && x <= 1.0));
1487    }
1488
1489    #[test]
1490    fn test_similarity_computation() {
1491        let config = LearnedOptimizationConfig::default();
1492        let optimizer = FewShotLearningOptimizer::new(config);
1493
1494        let encoding1 = Array1::from(vec![1.0, 0.0, 0.0]);
1495        let encoding2 = Array1::from(vec![0.0, 1.0, 0.0]);
1496
1497        let similarity = optimizer
1498            .compute_similarity(&encoding1, &encoding2)
1499            .unwrap();
1500        assert!(similarity >= 0.0 && similarity <= 1.0);
1501    }
1502
1503    #[test]
1504    fn test_few_shot_optimization() {
1505        let objective = |x: &ArrayView1<f64>| x[0].powi(2) + x[1].powi(2);
1506        let initial = Array1::from(vec![2.0, 2.0]);
1507
1508        // Create a simple support example
1509        let support_example = SupportExample {
1510            problem_encoding: Array1::from(vec![1.0, 1.0, 0.0]),
1511            trajectory: OptimizationTrajectory {
1512                parameter_history: vec![Array1::from(vec![1.0, 1.0])],
1513                objective_history: vec![2.0, 1.0],
1514                gradient_history: vec![Array1::from(vec![0.5, 0.5])],
1515                step_size_history: vec![0.01],
1516                total_steps: 1,
1517            },
1518            success: true,
1519            metadata: HashMap::new(),
1520        };
1521
1522        let result =
1523            few_shot_optimize(objective, &initial.view(), &[support_example], None).unwrap();
1524
1525        assert!(result.fun >= 0.0);
1526        assert_eq!(result.x.len(), 2);
1527        assert!(result.success);
1528    }
1529
1530    #[test]
1531    fn test_adaptation_strategy_selection() {
1532        let config = LearnedOptimizationConfig::default();
1533        let optimizer = FewShotLearningOptimizer::new(config);
1534
1535        let support_features =
1536            Array2::from_shape_fn((2, 10), |_| scirs2_core::random::rng().random::<f64>());
1537        let similar_problems = vec!["problem1".to_string(), "problem2".to_string()];
1538
1539        let strategy = optimizer
1540            .select_adaptation_strategy(&support_features, &similar_problems)
1541            .unwrap();
1542
1543        match strategy {
1544            AdaptationStrategy::Prototype => {}
1545            AdaptationStrategy::MAML => {}
1546            AdaptationStrategy::Gradient => {}
1547            AdaptationStrategy::Hybrid { .. } => {}
1548        }
1549    }
1550}
1551
1552#[allow(dead_code)]
1553pub fn placeholder() {
1554    // Placeholder function to prevent unused module warnings
1555}