scirs2_series/advanced_fusion_intelligence/
evolution.rs

1//! Evolution and Architecture Components for Advanced Fusion Intelligence
2//!
3//! This module contains all evolution and neural architecture related structures
4//! and implementations for the advanced fusion intelligence system, including
5//! evolutionary algorithms, architecture evolution, and genetic operations.
6
7use scirs2_core::ndarray::Array1;
8use scirs2_core::numeric::{Float, FromPrimitive};
9use scirs2_core::random::SeedableRng;
10use std::collections::HashMap;
11use std::fmt::Debug;
12
13use crate::error::Result;
14
15/// Engine for evolving neural architectures
16#[allow(dead_code)]
17#[derive(Debug, Clone)]
18pub struct EvolutionEngine<F: Float + Debug> {
19    population: Vec<Architecture<F>>,
20    selection_strategy: SelectionStrategy,
21    mutation_rate: F,
22    crossover_rate: F,
23}
24
25/// Neural network architecture configuration
26#[allow(dead_code)]
27#[derive(Debug, Clone)]
28pub struct Architecture<F: Float + Debug> {
29    layers: Vec<LayerConfig<F>>,
30    connections: Vec<ConnectionConfig<F>>,
31    fitness_score: F,
32}
33
34/// Configuration for individual network layer
35#[allow(dead_code)]
36#[derive(Debug, Clone)]
37pub struct LayerConfig<F: Float + Debug> {
38    layer_type: LayerType,
39    size: usize,
40    activation: ActivationFunction,
41    parameters: Vec<F>,
42}
43
44/// Types of neural network layers
45#[allow(dead_code)]
46#[derive(Debug, Clone)]
47pub enum LayerType {
48    /// Fully connected dense layer
49    Dense,
50    /// Convolutional layer
51    Convolutional,
52    /// Recurrent neural network layer
53    Recurrent,
54    /// Attention mechanism layer
55    Attention,
56    /// Quantum computing layer
57    Quantum,
58    /// Long Short-Term Memory layer
59    LSTM,
60    /// Dropout regularization layer
61    Dropout,
62}
63
64/// Activation functions for neural networks
65#[allow(dead_code)]
66#[derive(Debug, Clone)]
67pub enum ActivationFunction {
68    /// Rectified Linear Unit activation
69    ReLU,
70    /// Sigmoid activation function
71    Sigmoid,
72    /// Hyperbolic tangent activation
73    Tanh,
74    /// Gaussian Error Linear Unit
75    GELU,
76    /// Swish activation function
77    Swish,
78    /// Quantum activation function
79    Quantum,
80    /// Softmax activation function
81    Softmax,
82}
83
84/// Configuration for layer connections
85#[allow(dead_code)]
86#[derive(Debug, Clone)]
87pub struct ConnectionConfig<F: Float + Debug> {
88    from_layer: usize,
89    to_layer: usize,
90    connection_type: ConnectionType,
91    strength: F,
92    weight: F,
93}
94
95/// Types of neural network connections
96#[allow(dead_code)]
97#[derive(Debug, Clone)]
98pub enum ConnectionType {
99    /// Feedforward connection
100    Feedforward,
101    /// Recurrent connection
102    Recurrent,
103    /// Skip connection
104    Skip,
105    /// Attention-based connection
106    Attention,
107    /// Quantum connection
108    Quantum,
109    /// Fully connected layer
110    FullyConnected,
111}
112
113/// Strategies for evolutionary selection
114#[allow(dead_code)]
115#[derive(Debug, Clone)]
116pub enum SelectionStrategy {
117    /// Tournament selection
118    Tournament,
119    /// Roulette wheel selection
120    Roulette,
121    /// Elite selection
122    Elite,
123    /// Rank-based selection
124    RankBased,
125}
126
127/// Fitness evaluator for evolutionary algorithms
128#[allow(dead_code)]
129#[derive(Debug, Clone)]
130pub struct FitnessEvaluator<F: Float + Debug> {
131    evaluation_function: EvaluationFunction,
132    weights: Vec<F>,
133    normalization_strategy: NormalizationStrategy,
134}
135
136/// Evaluation function types
137#[allow(dead_code)]
138#[derive(Debug, Clone)]
139pub enum EvaluationFunction {
140    /// Accuracy-based evaluation
141    Accuracy,
142    /// Latency-optimized evaluation
143    LatencyOptimized,
144    /// Memory-optimized evaluation
145    MemoryOptimized,
146    /// Multi-objective evaluation
147    MultiObjective,
148}
149
150/// Normalization strategies
151#[allow(dead_code)]
152#[derive(Debug, Clone)]
153pub enum NormalizationStrategy {
154    /// Min-max normalization
155    MinMax,
156    /// Z-score normalization
157    ZScore,
158    /// Robust normalization
159    Robust,
160    /// Quantile normalization
161    Quantile,
162}
163
164/// Mutation operator for evolutionary algorithms
165#[allow(dead_code)]
166#[derive(Debug, Clone)]
167pub struct MutationOperator {
168    mutation_type: MutationType,
169    probability: f64,
170    intensity: f64,
171}
172
173/// Types of mutations for evolutionary algorithms
174#[allow(dead_code)]
175#[derive(Debug, Clone)]
176pub enum MutationType {
177    /// Parameter mutation
178    ParameterMutation,
179    /// Structural mutation
180    StructuralMutation,
181    /// Layer addition
182    LayerAddition,
183    /// Layer removal
184    LayerRemoval,
185    /// Connection mutation
186    ConnectionMutation,
187}
188
189/// Crossover operator for evolutionary algorithms
190#[allow(dead_code)]
191#[derive(Debug, Clone)]
192pub struct CrossoverOperator {
193    crossover_type: CrossoverType,
194    probability: f64,
195}
196
197/// Types of crossover operations
198#[allow(dead_code)]
199#[derive(Debug, Clone)]
200pub enum CrossoverType {
201    /// Single point crossover
202    SinglePoint,
203    /// Two point crossover
204    TwoPoint,
205    /// Uniform crossover
206    Uniform,
207    /// Semantic crossover
208    Semantic,
209}
210
211impl<F: Float + Debug + Clone + FromPrimitive> EvolutionEngine<F> {
212    /// Create new evolution engine
213    pub fn new(population_size: usize, selection_strategy: SelectionStrategy) -> Self {
214        let mut population = Vec::with_capacity(population_size);
215
216        // Initialize random population
217        for _ in 0..population_size {
218            let architecture = Architecture::random();
219            population.push(architecture);
220        }
221
222        EvolutionEngine {
223            population,
224            selection_strategy,
225            mutation_rate: F::from_f64(0.1).unwrap(),
226            crossover_rate: F::from_f64(0.8).unwrap(),
227        }
228    }
229
230    /// Evolve population for one generation
231    pub fn evolve_generation(&mut self, fitness_evaluator: &FitnessEvaluator<F>) -> Result<()> {
232        // 1. Evaluate fitness for all individuals
233        self.evaluate_population(fitness_evaluator)?;
234
235        // 2. Selection
236        let selected = self.selection()?;
237
238        // 3. Crossover and mutation
239        let mut new_population = Vec::new();
240
241        for i in (0..selected.len()).step_by(2) {
242            let parent1 = &selected[i];
243            let parent2 = if i + 1 < selected.len() {
244                &selected[i + 1]
245            } else {
246                &selected[0]
247            };
248
249            // Crossover
250            let (mut child1, mut child2) =
251                if scirs2_core::random::random::<f64>() < self.crossover_rate.to_f64().unwrap() {
252                    self.crossover(parent1, parent2)?
253                } else {
254                    (parent1.clone(), parent2.clone())
255                };
256
257            // Mutation
258            if scirs2_core::random::random::<f64>() < self.mutation_rate.to_f64().unwrap() {
259                self.mutate(&mut child1)?;
260            }
261            if scirs2_core::random::random::<f64>() < self.mutation_rate.to_f64().unwrap() {
262                self.mutate(&mut child2)?;
263            }
264
265            new_population.push(child1);
266            if new_population.len() < self.population.len() {
267                new_population.push(child2);
268            }
269        }
270
271        self.population = new_population;
272        Ok(())
273    }
274
275    /// Evaluate fitness for entire population
276    fn evaluate_population(&mut self, fitness_evaluator: &FitnessEvaluator<F>) -> Result<()> {
277        for individual in &mut self.population {
278            individual.fitness_score = fitness_evaluator.evaluate(individual)?;
279        }
280        Ok(())
281    }
282
283    /// Select parents for reproduction
284    fn selection(&self) -> Result<Vec<Architecture<F>>> {
285        match self.selection_strategy {
286            SelectionStrategy::Tournament => self.tournament_selection(),
287            SelectionStrategy::Roulette => self.roulette_wheel_selection(),
288            SelectionStrategy::Elite => self.elite_selection(),
289            SelectionStrategy::RankBased => self.rank_based_selection(),
290        }
291    }
292
293    /// Tournament selection implementation
294    fn tournament_selection(&self) -> Result<Vec<Architecture<F>>> {
295        let tournament_size = 3;
296        let mut selected = Vec::new();
297        let mut rng = scirs2_core::random::rngs::StdRng::seed_from_u64(42);
298
299        for _ in 0..self.population.len() {
300            let mut tournament = Vec::new();
301
302            // Select random individuals for tournament
303            for _ in 0..tournament_size {
304                let idx =
305                    scirs2_core::random::Rng::random_range(&mut rng, 0..self.population.len());
306                tournament.push(&self.population[idx]);
307            }
308
309            // Select best individual from tournament
310            let winner = tournament
311                .iter()
312                .max_by(|a, b| a.fitness_score.partial_cmp(&b.fitness_score).unwrap())
313                .unwrap();
314
315            selected.push((*winner).clone());
316        }
317
318        Ok(selected)
319    }
320
321    /// Roulette wheel selection implementation
322    fn roulette_wheel_selection(&self) -> Result<Vec<Architecture<F>>> {
323        let total_fitness: F = self
324            .population
325            .iter()
326            .map(|ind| ind.fitness_score)
327            .fold(F::zero(), |acc, x| acc + x);
328
329        if total_fitness == F::zero() {
330            return Ok(self.population.clone());
331        }
332
333        let mut selected = Vec::new();
334
335        for _ in 0..self.population.len() {
336            let random_value =
337                F::from_f64(scirs2_core::random::random::<f64>()).unwrap() * total_fitness;
338            let mut cumulative_fitness = F::zero();
339
340            for individual in &self.population {
341                cumulative_fitness = cumulative_fitness + individual.fitness_score;
342                if cumulative_fitness >= random_value {
343                    selected.push(individual.clone());
344                    break;
345                }
346            }
347        }
348
349        Ok(selected)
350    }
351
352    /// Elite selection implementation
353    fn elite_selection(&self) -> Result<Vec<Architecture<F>>> {
354        let mut sorted_population = self.population.clone();
355        sorted_population.sort_by(|a, b| b.fitness_score.partial_cmp(&a.fitness_score).unwrap());
356
357        // Select top 50% as elite
358        let elite_size = self.population.len() / 2;
359        let mut selected = Vec::new();
360
361        // Add elite individuals twice to maintain population size
362        for i in 0..self.population.len() {
363            let idx = i % elite_size;
364            selected.push(sorted_population[idx].clone());
365        }
366
367        Ok(selected)
368    }
369
370    /// Rank-based selection implementation
371    fn rank_based_selection(&self) -> Result<Vec<Architecture<F>>> {
372        let mut sorted_population = self.population.clone();
373        sorted_population.sort_by(|a, b| a.fitness_score.partial_cmp(&b.fitness_score).unwrap());
374
375        // Assign ranks (higher rank = better fitness)
376        let mut selected = Vec::new();
377        let total_ranks: usize = (1..=self.population.len()).sum();
378
379        for _ in 0..self.population.len() {
380            let random_value = scirs2_core::random::random::<f64>() * total_ranks as f64;
381            let mut cumulative_rank = 0.0;
382
383            for (rank, individual) in sorted_population.iter().enumerate() {
384                cumulative_rank += (rank + 1) as f64;
385                if cumulative_rank >= random_value {
386                    selected.push(individual.clone());
387                    break;
388                }
389            }
390        }
391
392        Ok(selected)
393    }
394
395    /// Crossover operation between two parents
396    fn crossover(
397        &self,
398        parent1: &Architecture<F>,
399        parent2: &Architecture<F>,
400    ) -> Result<(Architecture<F>, Architecture<F>)> {
401        let mut rng = scirs2_core::random::rngs::StdRng::seed_from_u64(42);
402        let max_len = parent1.layers.len().min(parent2.layers.len());
403        let crossover_point = if max_len > 0 {
404            scirs2_core::random::Rng::random_range(&mut rng, 0..max_len)
405        } else {
406            0
407        };
408
409        let mut child1 = parent1.clone();
410        let mut child2 = parent2.clone();
411
412        // Single-point crossover on layers
413        for i in crossover_point..child1.layers.len().min(child2.layers.len()) {
414            let temp = child1.layers[i].clone();
415            child1.layers[i] = child2.layers[i].clone();
416            child2.layers[i] = temp;
417        }
418
419        // Reset fitness scores
420        child1.fitness_score = F::zero();
421        child2.fitness_score = F::zero();
422
423        Ok((child1, child2))
424    }
425
426    /// Mutation operation on an individual
427    fn mutate(&self, individual: &mut Architecture<F>) -> Result<()> {
428        // Mutate layer parameters
429        for layer in &mut individual.layers {
430            for param in &mut layer.parameters {
431                if scirs2_core::random::random::<f64>() < 0.1 {
432                    let mutation_strength = F::from_f64(0.1).unwrap();
433                    let random_factor =
434                        F::from_f64(scirs2_core::random::random::<f64>() - 0.5).unwrap();
435                    *param = *param + mutation_strength * random_factor;
436                }
437            }
438        }
439
440        // Mutate connection weights
441        for connection in &mut individual.connections {
442            if scirs2_core::random::random::<f64>() < 0.1 {
443                let mutation_strength = F::from_f64(0.1).unwrap();
444                let random_factor =
445                    F::from_f64(scirs2_core::random::random::<f64>() - 0.5).unwrap();
446                connection.weight = connection.weight + mutation_strength * random_factor;
447            }
448        }
449
450        // Reset fitness score
451        individual.fitness_score = F::zero();
452        Ok(())
453    }
454
455    /// Get best individual from current population
456    pub fn get_best_individual(&self) -> Option<&Architecture<F>> {
457        self.population
458            .iter()
459            .max_by(|a, b| a.fitness_score.partial_cmp(&b.fitness_score).unwrap())
460    }
461
462    /// Get population statistics
463    pub fn get_population_stats(&self) -> (F, F, F) {
464        if self.population.is_empty() {
465            return (F::zero(), F::zero(), F::zero());
466        }
467
468        let fitness_values: Vec<F> = self
469            .population
470            .iter()
471            .map(|ind| ind.fitness_score)
472            .collect();
473
474        let mean = fitness_values.iter().fold(F::zero(), |acc, &x| acc + x)
475            / F::from_usize(fitness_values.len()).unwrap();
476
477        let max_fitness =
478            fitness_values
479                .iter()
480                .fold(F::neg_infinity(), |acc, &x| if x > acc { x } else { acc });
481
482        let min_fitness = fitness_values
483            .iter()
484            .fold(F::infinity(), |acc, &x| if x < acc { x } else { acc });
485
486        (mean, max_fitness, min_fitness)
487    }
488}
489
490impl<F: Float + Debug + Clone + FromPrimitive> Architecture<F> {
491    /// Create random architecture
492    pub fn random() -> Self {
493        let mut rng = scirs2_core::random::rngs::StdRng::seed_from_u64(42);
494        let num_layers = 3 + scirs2_core::random::Rng::random_range(&mut rng, 0..5); // 3-7 layers
495        let mut layers = Vec::new();
496
497        for i in 0..num_layers {
498            let layer = LayerConfig {
499                layer_type: LayerType::Dense, // Simplified to Dense for now
500                size: 32 + scirs2_core::random::Rng::random_range(&mut rng, 0..256), // 32-287 neurons
501                activation: ActivationFunction::ReLU, // Simplified to ReLU
502                parameters: vec![F::from_f64(scirs2_core::random::random::<f64>()).unwrap(); 4],
503            };
504            layers.push(layer);
505        }
506
507        let mut connections = Vec::new();
508        // Create sequential connections
509        for i in 0..num_layers - 1 {
510            let connection = ConnectionConfig {
511                from_layer: i,
512                to_layer: i + 1,
513                connection_type: ConnectionType::Feedforward,
514                strength: F::from_f64(1.0).unwrap(),
515                weight: F::from_f64(scirs2_core::random::random::<f64>()).unwrap(),
516            };
517            connections.push(connection);
518        }
519
520        Architecture {
521            layers,
522            connections,
523            fitness_score: F::zero(),
524        }
525    }
526
527    /// Calculate architecture complexity
528    pub fn calculate_complexity(&self) -> F {
529        let layer_complexity: usize = self.layers.iter().map(|layer| layer.size).sum();
530
531        let connection_complexity = self.connections.len();
532
533        F::from_usize(layer_complexity + connection_complexity).unwrap()
534    }
535
536    /// Validate architecture consistency
537    pub fn validate(&self) -> bool {
538        // Check that all connections reference valid layers
539        for connection in &self.connections {
540            if connection.from_layer >= self.layers.len()
541                || connection.to_layer >= self.layers.len()
542            {
543                return false;
544            }
545        }
546
547        // Check that layers have valid sizes
548        for layer in &self.layers {
549            if layer.size == 0 {
550                return false;
551            }
552        }
553
554        true
555    }
556}
557
558impl<F: Float + Debug + Clone + FromPrimitive> FitnessEvaluator<F> {
559    /// Create new fitness evaluator
560    pub fn new(evaluation_function: EvaluationFunction) -> Self {
561        FitnessEvaluator {
562            evaluation_function,
563            weights: vec![F::from_f64(1.0).unwrap(); 4], // Default weights
564            normalization_strategy: NormalizationStrategy::MinMax,
565        }
566    }
567
568    /// Evaluate fitness of an architecture
569    pub fn evaluate(&self, architecture: &Architecture<F>) -> Result<F> {
570        match self.evaluation_function {
571            EvaluationFunction::Accuracy => self.evaluate_accuracy(architecture),
572            EvaluationFunction::LatencyOptimized => self.evaluate_latency(architecture),
573            EvaluationFunction::MemoryOptimized => self.evaluate_memory(architecture),
574            EvaluationFunction::MultiObjective => self.evaluate_multi_objective(architecture),
575        }
576    }
577
578    /// Accuracy-based fitness evaluation
579    fn evaluate_accuracy(&self, architecture: &Architecture<F>) -> Result<F> {
580        // Simplified accuracy estimation based on architecture properties
581        let complexity_penalty = architecture.calculate_complexity() / F::from_f64(1000.0).unwrap();
582        let base_accuracy = F::from_f64(0.8).unwrap(); // Base accuracy
583
584        // Bonus for deep networks (up to a point)
585        let depth_bonus = if architecture.layers.len() > 10 {
586            F::from_f64(0.05).unwrap()
587        } else {
588            F::from_usize(architecture.layers.len()).unwrap() / F::from_f64(100.0).unwrap()
589        };
590
591        let fitness = base_accuracy + depth_bonus - complexity_penalty * F::from_f64(0.1).unwrap();
592        Ok(fitness.max(F::zero()))
593    }
594
595    /// Latency-optimized fitness evaluation
596    fn evaluate_latency(&self, architecture: &Architecture<F>) -> Result<F> {
597        // Lower complexity = better latency fitness
598        let complexity = architecture.calculate_complexity();
599        let max_complexity = F::from_f64(10000.0).unwrap();
600
601        let latency_fitness = (max_complexity - complexity) / max_complexity;
602        Ok(latency_fitness.max(F::zero()))
603    }
604
605    /// Memory-optimized fitness evaluation  
606    fn evaluate_memory(&self, architecture: &Architecture<F>) -> Result<F> {
607        // Estimate memory usage based on layer sizes
608        let memory_usage: F = architecture.layers.iter()
609            .map(|layer| F::from_usize(layer.size * layer.size).unwrap()) // Approximate parameter count
610            .fold(F::zero(), |acc, x| acc + x);
611
612        let max_memory = F::from_f64(1000000.0).unwrap(); // 1M parameters
613        let memory_fitness = (max_memory - memory_usage) / max_memory;
614
615        Ok(memory_fitness.max(F::zero()))
616    }
617
618    /// Multi-objective fitness evaluation
619    fn evaluate_multi_objective(&self, architecture: &Architecture<F>) -> Result<F> {
620        let accuracy_score = self.evaluate_accuracy(architecture)?;
621        let latency_score = self.evaluate_latency(architecture)?;
622        let memory_score = self.evaluate_memory(architecture)?;
623
624        // Weighted combination
625        let accuracy_weight = F::from_f64(0.5).unwrap();
626        let latency_weight = F::from_f64(0.3).unwrap();
627        let memory_weight = F::from_f64(0.2).unwrap();
628
629        let multi_objective_score = accuracy_score * accuracy_weight
630            + latency_score * latency_weight
631            + memory_score * memory_weight;
632
633        Ok(multi_objective_score)
634    }
635}
636
637impl MutationOperator {
638    /// Create new mutation operator
639    pub fn new(mutation_type: MutationType, probability: f64, intensity: f64) -> Self {
640        MutationOperator {
641            mutation_type,
642            probability,
643            intensity,
644        }
645    }
646
647    /// Apply mutation to architecture
648    pub fn apply<F: Float + Debug + Clone + FromPrimitive>(
649        &self,
650        architecture: &mut Architecture<F>,
651    ) -> Result<()> {
652        if scirs2_core::random::random::<f64>() > self.probability {
653            return Ok(());
654        }
655
656        match self.mutation_type {
657            MutationType::ParameterMutation => self.mutate_parameters(architecture),
658            MutationType::StructuralMutation => self.mutate_structure(architecture),
659            MutationType::LayerAddition => self.add_layer(architecture),
660            MutationType::LayerRemoval => self.remove_layer(architecture),
661            MutationType::ConnectionMutation => self.mutate_connections(architecture),
662        }
663    }
664
665    /// Mutate layer parameters
666    fn mutate_parameters<F: Float + Debug + Clone + FromPrimitive>(
667        &self,
668        architecture: &mut Architecture<F>,
669    ) -> Result<()> {
670        for layer in &mut architecture.layers {
671            for param in &mut layer.parameters {
672                if scirs2_core::random::random::<f64>() < 0.1 {
673                    let mutation =
674                        F::from_f64(self.intensity * (scirs2_core::random::random::<f64>() - 0.5))
675                            .unwrap();
676                    *param = *param + mutation;
677                }
678            }
679        }
680        Ok(())
681    }
682
683    /// Mutate architecture structure
684    fn mutate_structure<F: Float + Debug + Clone>(
685        &self,
686        architecture: &mut Architecture<F>,
687    ) -> Result<()> {
688        // Placeholder for structural mutations
689        Ok(())
690    }
691
692    /// Add new layer to architecture
693    fn add_layer<F: Float + Debug + Clone + FromPrimitive>(
694        &self,
695        architecture: &mut Architecture<F>,
696    ) -> Result<()> {
697        let mut rng = scirs2_core::random::rngs::StdRng::seed_from_u64(42);
698        let new_layer = LayerConfig {
699            layer_type: LayerType::Dense,
700            size: 32 + scirs2_core::random::Rng::random_range(&mut rng, 0..128),
701            activation: ActivationFunction::ReLU,
702            parameters: vec![F::from_f64(scirs2_core::random::random::<f64>()).unwrap(); 4],
703        };
704
705        architecture.layers.push(new_layer);
706        Ok(())
707    }
708
709    /// Remove layer from architecture
710    fn remove_layer<F: Float + Debug + Clone>(
711        &self,
712        architecture: &mut Architecture<F>,
713    ) -> Result<()> {
714        if architecture.layers.len() > 2 {
715            // Keep at least 2 layers
716            let mut rng = scirs2_core::random::rngs::StdRng::seed_from_u64(42);
717            let remove_idx =
718                scirs2_core::random::Rng::random_range(&mut rng, 0..architecture.layers.len());
719            architecture.layers.remove(remove_idx);
720        }
721        Ok(())
722    }
723
724    /// Mutate connections
725    fn mutate_connections<F: Float + Debug + Clone + FromPrimitive>(
726        &self,
727        architecture: &mut Architecture<F>,
728    ) -> Result<()> {
729        for connection in &mut architecture.connections {
730            if scirs2_core::random::random::<f64>() < 0.1 {
731                let mutation =
732                    F::from_f64(self.intensity * (scirs2_core::random::random::<f64>() - 0.5))
733                        .unwrap();
734                connection.weight = connection.weight + mutation;
735            }
736        }
737        Ok(())
738    }
739}
740
741impl CrossoverOperator {
742    /// Create new crossover operator
743    pub fn new(crossover_type: CrossoverType, probability: f64) -> Self {
744        CrossoverOperator {
745            crossover_type,
746            probability,
747        }
748    }
749
750    /// Apply crossover between two architectures
751    pub fn apply<F: Float + Debug + Clone>(
752        &self,
753        parent1: &Architecture<F>,
754        parent2: &Architecture<F>,
755    ) -> Result<(Architecture<F>, Architecture<F>)> {
756        if scirs2_core::random::random::<f64>() > self.probability {
757            return Ok((parent1.clone(), parent2.clone()));
758        }
759
760        match self.crossover_type {
761            CrossoverType::SinglePoint => self.single_point_crossover(parent1, parent2),
762            CrossoverType::TwoPoint => self.two_point_crossover(parent1, parent2),
763            CrossoverType::Uniform => self.uniform_crossover(parent1, parent2),
764            CrossoverType::Semantic => self.semantic_crossover(parent1, parent2),
765        }
766    }
767
768    /// Single point crossover
769    fn single_point_crossover<F: Float + Debug + Clone>(
770        &self,
771        parent1: &Architecture<F>,
772        parent2: &Architecture<F>,
773    ) -> Result<(Architecture<F>, Architecture<F>)> {
774        let mut rng = scirs2_core::random::rngs::StdRng::seed_from_u64(42);
775        let max_len = parent1.layers.len().min(parent2.layers.len());
776        let crossover_point = if max_len > 0 {
777            scirs2_core::random::Rng::random_range(&mut rng, 0..max_len)
778        } else {
779            0
780        };
781
782        let mut child1 = parent1.clone();
783        let mut child2 = parent2.clone();
784
785        // Swap layers after crossover point
786        for i in crossover_point..child1.layers.len().min(child2.layers.len()) {
787            let temp = child1.layers[i].clone();
788            child1.layers[i] = child2.layers[i].clone();
789            child2.layers[i] = temp;
790        }
791
792        Ok((child1, child2))
793    }
794
795    /// Two point crossover
796    fn two_point_crossover<F: Float + Debug + Clone>(
797        &self,
798        parent1: &Architecture<F>,
799        parent2: &Architecture<F>,
800    ) -> Result<(Architecture<F>, Architecture<F>)> {
801        let len = parent1.layers.len().min(parent2.layers.len());
802        if len < 2 {
803            return Ok((parent1.clone(), parent2.clone()));
804        }
805
806        let mut rng = scirs2_core::random::rngs::StdRng::seed_from_u64(42);
807        let point1 = scirs2_core::random::Rng::random_range(&mut rng, 0..len);
808        let point2 = scirs2_core::random::Rng::random_range(&mut rng, 0..len);
809        let (start, end) = if point1 < point2 {
810            (point1, point2)
811        } else {
812            (point2, point1)
813        };
814
815        let mut child1 = parent1.clone();
816        let mut child2 = parent2.clone();
817
818        // Swap layers between crossover points
819        for i in start..end {
820            let temp = child1.layers[i].clone();
821            child1.layers[i] = child2.layers[i].clone();
822            child2.layers[i] = temp;
823        }
824
825        Ok((child1, child2))
826    }
827
828    /// Uniform crossover
829    fn uniform_crossover<F: Float + Debug + Clone>(
830        &self,
831        parent1: &Architecture<F>,
832        parent2: &Architecture<F>,
833    ) -> Result<(Architecture<F>, Architecture<F>)> {
834        let mut child1 = parent1.clone();
835        let mut child2 = parent2.clone();
836
837        let len = child1.layers.len().min(child2.layers.len());
838
839        // For each layer, randomly choose which parent to inherit from
840        for i in 0..len {
841            if scirs2_core::random::random::<bool>() {
842                let temp = child1.layers[i].clone();
843                child1.layers[i] = child2.layers[i].clone();
844                child2.layers[i] = temp;
845            }
846        }
847
848        Ok((child1, child2))
849    }
850
851    /// Semantic crossover (simplified)
852    fn semantic_crossover<F: Float + Debug + Clone>(
853        &self,
854        parent1: &Architecture<F>,
855        parent2: &Architecture<F>,
856    ) -> Result<(Architecture<F>, Architecture<F>)> {
857        // For now, implement as single-point crossover
858        self.single_point_crossover(parent1, parent2)
859    }
860}