scirs2_spatial/neuromorphic/algorithms/
competitive_learning.rs

1//! Competitive Learning Algorithms for Neuromorphic Spatial Processing
2//!
3//! This module implements advanced competitive learning algorithms including
4//! winner-take-all dynamics, homeostatic plasticity, and multi-timescale adaptation
5//! for spatial data clustering and pattern discovery.
6
7use crate::error::{SpatialError, SpatialResult};
8use ndarray::{Array1, Array2, ArrayView1, ArrayView2};
9use rand::Rng;
10use std::collections::VecDeque;
11
12/// Bio-inspired competitive learning for spatial clustering
13///
14/// This clusterer uses winner-take-all dynamics with lateral inhibition to discover
15/// clusters in spatial data. Neurons compete for activation, with the winner
16/// (neuron with strongest response) being updated while others are inhibited.
17///
18/// # Features
19/// - Winner-take-all competitive dynamics
20/// - Lateral inhibition for neural competition
21/// - Adaptive learning rates
22/// - Neighborhood function for topological organization
23/// - Distance-based neuron activation
24///
25/// # Example
26/// ```rust
27/// use ndarray::Array2;
28/// use scirs2_spatial::neuromorphic::algorithms::CompetitiveNeuralClusterer;
29///
30/// let points = Array2::from_shape_vec((4, 2), vec![
31///     0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0
32/// ]).unwrap();
33///
34/// let mut clusterer = CompetitiveNeuralClusterer::new(2, 2);
35/// let assignments = clusterer.fit(&points.view(), 100).unwrap();
36/// println!("Cluster assignments: {:?}", assignments);
37/// ```
38#[derive(Debug, Clone)]
39pub struct CompetitiveNeuralClusterer {
40    /// Network neurons representing cluster centers
41    neurons: Vec<Array1<f64>>,
42    /// Learning rates for each neuron
43    learning_rates: Vec<f64>,
44    /// Lateral inhibition strengths
45    inhibition_strengths: Array2<f64>,
46    /// Winner-take-all threshold
47    #[allow(dead_code)]
48    wta_threshold: f64,
49    /// Neighborhood function parameters
50    neighborhood_sigma: f64,
51}
52
53impl CompetitiveNeuralClusterer {
54    /// Create new competitive neural clusterer
55    ///
56    /// # Arguments
57    /// * `num_clusters` - Number of clusters to discover
58    /// * `input_dims` - Number of input dimensions
59    ///
60    /// # Returns
61    /// A new `CompetitiveNeuralClusterer` with randomly initialized neurons
62    pub fn new(num_clusters: usize, input_dims: usize) -> Self {
63        let mut neurons = Vec::new();
64        let mut learning_rates = Vec::new();
65        let mut rng = rand::rng();
66
67        // Initialize neurons with random weights
68        for _ in 0..num_clusters {
69            let weights = Array1::from_shape_fn(input_dims, |_| rng.gen_range(0.0..1.0));
70            neurons.push(weights);
71            learning_rates.push(0.1);
72        }
73
74        // Initialize inhibition matrix
75        let inhibition_strengths =
76            Array2::from_shape_fn(
77                (num_clusters, num_clusters),
78                |(i, j)| {
79                    if i == j {
80                        0.0
81                    } else {
82                        0.1
83                    }
84                },
85            );
86
87        Self {
88            neurons,
89            learning_rates,
90            inhibition_strengths,
91            wta_threshold: 0.5,
92            neighborhood_sigma: 1.0,
93        }
94    }
95
96    /// Configure neighborhood parameters
97    ///
98    /// # Arguments
99    /// * `sigma` - Neighborhood function width
100    /// * `wta_threshold` - Winner-take-all threshold
101    pub fn with_competition_params(mut self, sigma: f64, wta_threshold: f64) -> Self {
102        self.neighborhood_sigma = sigma;
103        self.wta_threshold = wta_threshold;
104        self
105    }
106
107    /// Train competitive network on spatial data
108    ///
109    /// Applies competitive learning dynamics where neurons compete for activation
110    /// and the winner adapts towards the input pattern while inhibiting neighbors.
111    ///
112    /// # Arguments
113    /// * `points` - Input spatial points (n_points × n_dims)
114    /// * `epochs` - Number of training epochs
115    ///
116    /// # Returns
117    /// Cluster assignments for each input point
118    pub fn fit(
119        &mut self,
120        points: &ArrayView2<'_, f64>,
121        epochs: usize,
122    ) -> SpatialResult<Array1<usize>> {
123        let n_points = points.dim().0;
124        let mut assignments = Array1::zeros(n_points);
125
126        if n_points == 0 {
127            return Ok(assignments);
128        }
129
130        for epoch in 0..epochs {
131            // Adjust learning rate and neighborhood size
132            let epoch_factor = 1.0 - (epoch as f64) / (epochs as f64);
133            let current_sigma = self.neighborhood_sigma * epoch_factor;
134
135            for (point_idx, point) in points.outer_iter().enumerate() {
136                // Find winning neuron
137                let winner = self.find_winner(&point.to_owned())?;
138                assignments[point_idx] = winner;
139
140                // Update winner and neighbors
141                self.update_neurons(&point.to_owned(), winner, current_sigma, epoch_factor)?;
142
143                // Apply lateral inhibition
144                self.apply_lateral_inhibition(winner)?;
145            }
146        }
147
148        Ok(assignments)
149    }
150
151    /// Find winning neuron using competitive dynamics
152    ///
153    /// Computes the neuron with the smallest distance to the input pattern.
154    fn find_winner(&self, input: &Array1<f64>) -> SpatialResult<usize> {
155        let mut min_distance = f64::INFINITY;
156        let mut winner = 0;
157
158        for (i, neuron) in self.neurons.iter().enumerate() {
159            // Calculate Euclidean distance
160            let distance: f64 = input
161                .iter()
162                .zip(neuron.iter())
163                .map(|(&a, &b)| (a - b).powi(2))
164                .sum::<f64>()
165                .sqrt();
166
167            if distance < min_distance {
168                min_distance = distance;
169                winner = i;
170            }
171        }
172
173        Ok(winner)
174    }
175
176    /// Update neuron weights using competitive learning
177    ///
178    /// Applies weight updates to the winner and its neighbors based on
179    /// the neighborhood function and learning rate scheduling.
180    fn update_neurons(
181        &mut self,
182        input: &Array1<f64>,
183        winner: usize,
184        sigma: f64,
185        learning_factor: f64,
186    ) -> SpatialResult<()> {
187        for (i, neuron) in self.neurons.iter_mut().enumerate() {
188            // Calculate neighborhood influence
189            let distance_to_winner = (i as i32 - winner as i32).abs() as f64;
190            let neighborhood_influence =
191                (-distance_to_winner.powi(2) / (2.0 * sigma.powi(2))).exp();
192
193            // Update neuron weights
194            let effective_learning_rate =
195                self.learning_rates[i] * learning_factor * neighborhood_influence;
196
197            for (weight, &input_val) in neuron.iter_mut().zip(input.iter()) {
198                *weight += effective_learning_rate * (input_val - *weight);
199            }
200        }
201
202        Ok(())
203    }
204
205    /// Apply lateral inhibition between neurons
206    ///
207    /// Strengthens inhibitory connections and adjusts learning rates
208    /// based on competitive dynamics.
209    fn apply_lateral_inhibition(&mut self, winner: usize) -> SpatialResult<()> {
210        // Strengthen inhibitory connections from winner to others
211        for i in 0..self.neurons.len() {
212            if i != winner {
213                self.inhibition_strengths[[winner, i]] += 0.001;
214                self.inhibition_strengths[[winner, i]] =
215                    self.inhibition_strengths[[winner, i]].min(0.5);
216
217                // Reduce learning rate of inhibited neurons
218                self.learning_rates[i] *= 0.99;
219                self.learning_rates[i] = self.learning_rates[i].max(0.001);
220            }
221        }
222
223        // Boost winner's learning rate slightly
224        self.learning_rates[winner] *= 1.001;
225        self.learning_rates[winner] = self.learning_rates[winner].min(0.2);
226
227        Ok(())
228    }
229
230    /// Get cluster centers (neuron weights)
231    ///
232    /// # Returns
233    /// Array containing the current neuron weight vectors as cluster centers
234    pub fn get_cluster_centers(&self) -> Array2<f64> {
235        let num_clusters = self.neurons.len();
236        let input_dims = self.neurons[0].len();
237
238        let mut centers = Array2::zeros((num_clusters, input_dims));
239        for (i, neuron) in self.neurons.iter().enumerate() {
240            centers.row_mut(i).assign(neuron);
241        }
242
243        centers
244    }
245
246    /// Get current learning rates
247    pub fn learning_rates(&self) -> &[f64] {
248        &self.learning_rates
249    }
250
251    /// Get inhibition strength matrix
252    pub fn inhibition_strengths(&self) -> &Array2<f64> {
253        &self.inhibition_strengths
254    }
255
256    /// Get number of clusters
257    pub fn num_clusters(&self) -> usize {
258        self.neurons.len()
259    }
260
261    /// Reset the clusterer to initial state
262    pub fn reset(&mut self) {
263        let mut rng = rand::rng();
264        let input_dims = self.neurons[0].len();
265
266        // Reinitialize neuron weights
267        for neuron in &mut self.neurons {
268            for weight in neuron.iter_mut() {
269                *weight = rng.gen_range(0.0..1.0);
270            }
271        }
272
273        // Reset learning rates
274        for rate in &mut self.learning_rates {
275            *rate = 0.1;
276        }
277
278        // Reset inhibition strengths
279        let num_clusters = self.neurons.len();
280        for i in 0..num_clusters {
281            for j in 0..num_clusters {
282                self.inhibition_strengths[[i, j]] = if i == j { 0.0 } else { 0.1 };
283            }
284        }
285    }
286}
287
288/// Advanced homeostatic plasticity for neuromorphic spatial learning
289///
290/// This clusterer implements homeostatic plasticity mechanisms that maintain
291/// stable neural activity levels while adapting to input patterns. It includes
292/// intrinsic plasticity, synaptic scaling, and multi-timescale adaptation.
293///
294/// # Features
295/// - Homeostatic neurons with intrinsic plasticity
296/// - Synaptic scaling for stability
297/// - Multi-timescale adaptation (fast, medium, slow)
298/// - Metaplasticity for adaptive learning rates
299/// - Target firing rate maintenance
300///
301/// # Example
302/// ```rust
303/// use ndarray::Array2;
304/// use scirs2_spatial::neuromorphic::algorithms::HomeostaticNeuralClusterer;
305///
306/// let points = Array2::from_shape_vec((4, 2), vec![
307///     0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0
308/// ]).unwrap();
309///
310/// let mut clusterer = HomeostaticNeuralClusterer::new(2, 2)
311///     .with_homeostatic_params(0.1, 1000.0);
312/// let assignments = clusterer.fit(&points.view(), 50).unwrap();
313/// ```
314#[derive(Debug, Clone)]
315pub struct HomeostaticNeuralClusterer {
316    /// Number of clusters (output neurons)
317    num_clusters: usize,
318    /// Input dimension
319    input_dim: usize,
320    /// Output neurons with homeostatic mechanisms
321    output_neurons: Vec<HomeostaticNeuron>,
322    /// Synaptic weights
323    weights: Array2<f64>,
324    /// Global inhibition strength
325    #[allow(dead_code)]
326    global_inhibition: f64,
327    /// Learning rate adaptation parameters
328    learning_rate_adaptation: LearningRateAdaptation,
329    /// Metaplasticity parameters
330    metaplasticity: MetaplasticityController,
331    /// Multi-timescale adaptation
332    multi_timescale: MultiTimescaleAdaptation,
333}
334
335impl HomeostaticNeuralClusterer {
336    /// Create new homeostatic neural clusterer
337    ///
338    /// # Arguments
339    /// * `num_clusters` - Number of clusters to discover
340    /// * `input_dim` - Number of input dimensions
341    ///
342    /// # Returns
343    /// A new `HomeostaticNeuralClusterer` with default parameters
344    pub fn new(num_clusters: usize, input_dim: usize) -> Self {
345        let mut output_neurons = Vec::new();
346        for _ in 0..num_clusters {
347            output_neurons.push(HomeostaticNeuron::new());
348        }
349
350        let weights = Array2::zeros((num_clusters, input_dim));
351
352        let learning_rate_adaptation = LearningRateAdaptation::new(0.01);
353        let metaplasticity = MetaplasticityController::new(num_clusters, input_dim);
354        let multi_timescale = MultiTimescaleAdaptation::new();
355
356        Self {
357            num_clusters,
358            input_dim,
359            output_neurons,
360            weights,
361            global_inhibition: 0.1,
362            learning_rate_adaptation,
363            metaplasticity,
364            multi_timescale,
365        }
366    }
367
368    /// Configure homeostatic parameters
369    ///
370    /// # Arguments
371    /// * `target_firing_rate` - Target firing rate for neurons
372    /// * `homeostatic_tau` - Time constant for homeostatic adaptation
373    pub fn with_homeostatic_params(
374        mut self,
375        target_firing_rate: f64,
376        homeostatic_tau: f64,
377    ) -> Self {
378        for neuron in &mut self.output_neurons {
379            neuron.target_firing_rate = target_firing_rate;
380            neuron.homeostatic_tau = homeostatic_tau;
381        }
382        self
383    }
384
385    /// Fit homeostatic clustering model
386    ///
387    /// Trains the network using homeostatic plasticity mechanisms to discover
388    /// stable cluster representations while maintaining target activity levels.
389    ///
390    /// # Arguments
391    /// * `points` - Input spatial points (n_points × n_dims)
392    /// * `epochs` - Number of training epochs
393    ///
394    /// # Returns
395    /// Cluster assignments for each input point
396    pub fn fit(&mut self, points: &ArrayView2<f64>, epochs: usize) -> SpatialResult<Array1<usize>> {
397        let (n_samples, n_features) = points.dim();
398
399        if n_features != self.input_dim {
400            return Err(SpatialError::InvalidInput(
401                "Input dimension mismatch".to_string(),
402            ));
403        }
404
405        if n_samples == 0 {
406            return Ok(Array1::zeros(0));
407        }
408
409        // Initialize weights randomly
410        self.initialize_weights()?;
411
412        let mut assignments = Array1::zeros(n_samples);
413        let current_time = 0.0;
414        let dt = 0.001; // 1ms time step
415
416        for epoch in 0..epochs {
417            let mut epoch_error = 0.0;
418
419            for (sample_idx, sample) in points.outer_iter().enumerate() {
420                // Forward pass with homeostatic mechanisms
421                let (winner_idx, neuron_activities) = self.forward_pass_homeostatic(
422                    &sample,
423                    current_time + (epoch * n_samples + sample_idx) as f64 * dt,
424                )?;
425
426                assignments[sample_idx] = winner_idx;
427
428                // Compute reconstruction error
429                let reconstruction = self.weights.row(winner_idx);
430                let error: f64 = sample
431                    .iter()
432                    .zip(reconstruction.iter())
433                    .map(|(&x, &w)| (x - w).powi(2))
434                    .sum();
435                epoch_error += error;
436
437                // Homeostatic learning update
438                self.homeostatic_learning_update(
439                    &sample,
440                    winner_idx,
441                    &neuron_activities,
442                    error,
443                    current_time + (epoch * n_samples + sample_idx) as f64 * dt,
444                )?;
445            }
446
447            // Update learning rate based on performance
448            self.learning_rate_adaptation
449                .update_learning_rate(epoch_error / n_samples as f64);
450
451            // Update multi-timescale adaptation
452            self.multi_timescale
453                .update(epoch_error / n_samples as f64, dt * n_samples as f64);
454
455            // Homeostatic updates at end of epoch
456            self.update_homeostatic_mechanisms(dt * n_samples as f64)?;
457        }
458
459        Ok(assignments)
460    }
461
462    /// Get cluster centers (weights)
463    pub fn get_cluster_centers(&self) -> Array2<f64> {
464        self.weights.clone()
465    }
466
467    /// Get number of clusters
468    pub fn num_clusters(&self) -> usize {
469        self.num_clusters
470    }
471
472    /// Get neuron firing rates
473    pub fn neuron_firing_rates(&self) -> Vec<f64> {
474        self.output_neurons
475            .iter()
476            .map(|neuron| neuron.actual_firing_rate)
477            .collect()
478    }
479
480    /// Get current learning rate
481    pub fn current_learning_rate(&self) -> f64 {
482        self.learning_rate_adaptation.base_rate
483    }
484
485    /// Forward pass with homeostatic mechanisms
486    ///
487    /// Computes neural activities with homeostatic modulation and finds
488    /// the winning neuron based on highest activity.
489    fn forward_pass_homeostatic(
490        &mut self,
491        input: &ArrayView1<f64>,
492        current_time: f64,
493    ) -> SpatialResult<(usize, Array1<f64>)> {
494        let mut activities = Array1::zeros(self.num_clusters);
495
496        // Compute neural activities with homeostatic modulation
497        for (neuron_idx, neuron) in self.output_neurons.iter_mut().enumerate() {
498            let weights_row = self.weights.row(neuron_idx);
499
500            // Compute dot product (synaptic input)
501            let synaptic_input: f64 = input
502                .iter()
503                .zip(weights_row.iter())
504                .map(|(&x, &w)| x * w)
505                .sum();
506
507            // Apply synaptic scaling
508            let scaled_input = synaptic_input * neuron.synaptic_scaling;
509
510            // Update membrane potential
511            neuron.update_membrane_potential(scaled_input, current_time);
512
513            // Apply intrinsic excitability modulation
514            let modulated_potential = neuron.membrane_potential * neuron.intrinsic_excitability;
515            activities[neuron_idx] = modulated_potential;
516        }
517
518        // Find winner (highest activity)
519        let winner_idx = activities
520            .iter()
521            .enumerate()
522            .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
523            .map(|(i, _)| i)
524            .unwrap_or(0);
525
526        // Update firing rates
527        self.output_neurons[winner_idx].record_spike(current_time);
528
529        Ok((winner_idx, activities))
530    }
531
532    /// Initialize weights randomly
533    fn initialize_weights(&mut self) -> SpatialResult<()> {
534        let mut rng = rand::rng();
535
536        for mut row in self.weights.outer_iter_mut() {
537            for weight in row.iter_mut() {
538                *weight = rng.gen_range(0.0..1.0);
539            }
540
541            // Normalize weights
542            let norm: f64 = row.iter().map(|&w| w * w).sum::<f64>().sqrt();
543            if norm > 0.0 {
544                for weight in row.iter_mut() {
545                    *weight /= norm;
546                }
547            }
548        }
549
550        Ok(())
551    }
552
553    /// Homeostatic learning update
554    ///
555    /// Updates synaptic weights using homeostatic learning rules that
556    /// incorporate metaplasticity and multi-timescale adaptation.
557    fn homeostatic_learning_update(
558        &mut self,
559        input: &ArrayView1<f64>,
560        winner_idx: usize,
561        activities: &Array1<f64>,
562        error: f64,
563        current_time: f64,
564    ) -> SpatialResult<()> {
565        // Get current learning rate
566        let learning_rate = self.learning_rate_adaptation.base_rate;
567
568        // Apply metaplasticity
569        let meta_modulation = self.metaplasticity.compute_modulation(winner_idx, error);
570
571        // Apply multi-timescale adaptation
572        let timescale_modulation = self.multi_timescale.get_adaptation_factor();
573
574        // Combined learning rate
575        let effective_learning_rate = learning_rate * meta_modulation * timescale_modulation;
576
577        // Update winner weights (competitive learning with homeostatic modulation)
578        let winner_neuron = &self.output_neurons[winner_idx];
579        let homeostatic_factor = winner_neuron.get_homeostatic_factor();
580
581        for (weight, &input_val) in self
582            .weights
583            .row_mut(winner_idx)
584            .iter_mut()
585            .zip(input.iter())
586        {
587            let weight_update =
588                effective_learning_rate * homeostatic_factor * (input_val - *weight);
589            *weight += weight_update;
590        }
591
592        // Update metaplasticity variables
593        self.metaplasticity
594            .update_metaplastic_variables(winner_idx, activities, current_time);
595
596        Ok(())
597    }
598
599    /// Update homeostatic mechanisms
600    fn update_homeostatic_mechanisms(&mut self, dt: f64) -> SpatialResult<()> {
601        for neuron in &mut self.output_neurons {
602            neuron.update_homeostatic_mechanisms(dt);
603        }
604        Ok(())
605    }
606}
607
608/// Homeostatic neuron with intrinsic plasticity
609///
610/// This neuron implements multiple homeostatic mechanisms to maintain
611/// stable activity levels including intrinsic excitability adaptation
612/// and synaptic scaling.
613#[derive(Debug, Clone)]
614pub struct HomeostaticNeuron {
615    /// Current membrane potential
616    pub membrane_potential: f64,
617    /// Spike threshold (adaptive)
618    #[allow(dead_code)]
619    pub threshold: f64,
620    /// Target firing rate
621    pub target_firing_rate: f64,
622    /// Actual firing rate (exponential moving average)
623    pub actual_firing_rate: f64,
624    /// Intrinsic excitability
625    pub intrinsic_excitability: f64,
626    /// Homeostatic time constant
627    pub homeostatic_tau: f64,
628    /// Spike history for rate computation
629    pub spike_history: VecDeque<f64>,
630    /// Synaptic scaling factor
631    pub synaptic_scaling: f64,
632    /// Membrane time constant
633    pub membrane_tau: f64,
634    /// Last spike time
635    pub last_spike_time: f64,
636}
637
638impl HomeostaticNeuron {
639    /// Create new homeostatic neuron
640    pub fn new() -> Self {
641        Self {
642            membrane_potential: 0.0,
643            threshold: 1.0,
644            target_firing_rate: 0.1,
645            actual_firing_rate: 0.0,
646            intrinsic_excitability: 1.0,
647            homeostatic_tau: 1000.0, // 1 second
648            spike_history: VecDeque::new(),
649            synaptic_scaling: 1.0,
650            membrane_tau: 10.0, // 10ms
651            last_spike_time: -1000.0,
652        }
653    }
654
655    /// Update membrane potential
656    pub fn update_membrane_potential(&mut self, input: f64, current_time: f64) {
657        let dt = current_time - self.last_spike_time;
658
659        // Leaky integrate-and-fire dynamics
660        let decay = (-dt / self.membrane_tau).exp();
661        self.membrane_potential = self.membrane_potential * decay + input;
662    }
663
664    /// Record spike and update firing rate
665    pub fn record_spike(&mut self, current_time: f64) {
666        self.spike_history.push_back(current_time);
667
668        // Keep only recent spikes (last 1 second)
669        while let Some(&front_time) = self.spike_history.front() {
670            if current_time - front_time > 1000.0 {
671                self.spike_history.pop_front();
672            } else {
673                break;
674            }
675        }
676
677        // Update actual firing rate (exponential moving average)
678        let instantaneous_rate = self.spike_history.len() as f64 / 1000.0; // spikes per ms
679        let alpha = 1.0 / self.homeostatic_tau;
680        self.actual_firing_rate =
681            (1.0 - alpha) * self.actual_firing_rate + alpha * instantaneous_rate;
682    }
683
684    /// Update homeostatic mechanisms
685    pub fn update_homeostatic_mechanisms(&mut self, dt: f64) {
686        // Intrinsic plasticity: adjust excitability to maintain target firing rate
687        let rate_error = self.target_firing_rate - self.actual_firing_rate;
688        let excitability_update = 0.001 * rate_error * dt;
689        self.intrinsic_excitability += excitability_update;
690        self.intrinsic_excitability = self.intrinsic_excitability.clamp(0.1, 10.0);
691
692        // Synaptic scaling: global scaling of all synapses
693        let scaling_rate = 0.0001;
694        let scaling_update = scaling_rate * rate_error * dt;
695        self.synaptic_scaling += scaling_update;
696        self.synaptic_scaling = self.synaptic_scaling.clamp(0.1, 10.0);
697    }
698
699    /// Get homeostatic factor for learning modulation
700    pub fn get_homeostatic_factor(&self) -> f64 {
701        // Higher factor when firing rate is below target (need to strengthen synapses)
702        let rate_ratio = self.actual_firing_rate / self.target_firing_rate.max(1e-6);
703        (2.0 / (1.0 + rate_ratio)).clamp(0.1, 10.0)
704    }
705}
706
707/// Learning rate adaptation mechanisms
708///
709/// Adapts the learning rate based on performance history to optimize
710/// learning dynamics during training.
711#[derive(Debug, Clone)]
712pub struct LearningRateAdaptation {
713    /// Base learning rate
714    pub base_rate: f64,
715    /// Adaptation factor
716    pub adaptation_factor: f64,
717    /// Performance history
718    pub performance_history: VecDeque<f64>,
719    /// Adaptation threshold
720    pub adaptation_threshold: f64,
721    /// Maximum learning rate
722    pub max_rate: f64,
723    /// Minimum learning rate
724    pub min_rate: f64,
725}
726
727impl LearningRateAdaptation {
728    /// Create new learning rate adaptation
729    pub fn new(base_rate: f64) -> Self {
730        Self {
731            base_rate,
732            adaptation_factor: 0.1,
733            performance_history: VecDeque::new(),
734            adaptation_threshold: 0.1,
735            max_rate: 0.1,
736            min_rate: 1e-6,
737        }
738    }
739
740    /// Update learning rate based on performance
741    pub fn update_learning_rate(&mut self, current_error: f64) {
742        self.performance_history.push_back(current_error);
743
744        // Keep only recent errors
745        if self.performance_history.len() > 100 {
746            self.performance_history.pop_front();
747        }
748
749        // Adapt learning rate based on error trend
750        if self.performance_history.len() >= 2 {
751            let recent_avg = self.performance_history.iter().rev().take(10).sum::<f64>() / 10.0;
752            let older_avg = self
753                .performance_history
754                .iter()
755                .rev()
756                .skip(10)
757                .take(10)
758                .sum::<f64>()
759                / 10.0;
760
761            let performance_ratio = if older_avg > 0.0 {
762                recent_avg / older_avg
763            } else {
764                1.0
765            };
766
767            // Adapt learning rate
768            if performance_ratio < 0.95 {
769                // Performance improving - increase learning rate slightly
770                self.base_rate *= 1.01;
771            } else if performance_ratio > 1.05 {
772                // Performance degrading - decrease learning rate
773                self.base_rate *= 0.99;
774            }
775
776            // Apply bounds
777            self.base_rate = self.base_rate.max(self.min_rate).min(self.max_rate);
778        }
779    }
780}
781
782/// Metaplasticity controller for flexible learning
783///
784/// Controls learning plasticity based on activity history to implement
785/// metaplasticity - plasticity of plasticity itself.
786#[derive(Debug, Clone)]
787pub struct MetaplasticityController {
788    /// Metaplastic variables for each synapse
789    metaplastic_variables: Array2<f64>,
790    /// Metaplastic time constant
791    meta_tau: f64,
792    /// Plasticity threshold
793    #[allow(dead_code)]
794    plasticity_threshold: f64,
795    /// LTP/LTD balance factor
796    #[allow(dead_code)]
797    ltp_ltd_balance: f64,
798    /// Activity-dependent scaling
799    #[allow(dead_code)]
800    activity_scaling: f64,
801}
802
803impl MetaplasticityController {
804    /// Create new metaplasticity controller
805    pub fn new(num_clusters: usize, input_dim: usize) -> Self {
806        Self {
807            metaplastic_variables: Array2::ones((num_clusters, input_dim)),
808            meta_tau: 10000.0, // 10 seconds
809            plasticity_threshold: 0.5,
810            ltp_ltd_balance: 1.0,
811            activity_scaling: 1.0,
812        }
813    }
814
815    /// Compute metaplastic modulation
816    pub fn compute_modulation(&self, neuron_idx: usize, error: f64) -> f64 {
817        let meta_var_avg = self
818            .metaplastic_variables
819            .row(neuron_idx)
820            .mean()
821            .unwrap_or(1.0);
822
823        // Higher metaplastic variable means lower plasticity (harder to change)
824        let modulation = 1.0 / (1.0 + meta_var_avg);
825
826        // Scale by error magnitude
827        modulation * (1.0 + error.abs()).ln()
828    }
829
830    /// Update metaplastic variables
831    pub fn update_metaplastic_variables(
832        &mut self,
833        winner_idx: usize,
834        activities: &Array1<f64>,
835        _current_time: f64,
836    ) {
837        let dt = 1.0; // Assume 1ms updates
838        let decay_factor = (-dt / self.meta_tau).exp();
839
840        // Update metaplastic variables for winner
841        for meta_var in self.metaplastic_variables.row_mut(winner_idx).iter_mut() {
842            *meta_var = *meta_var * decay_factor + (1.0 - decay_factor) * activities[winner_idx];
843        }
844    }
845}
846
847/// Multi-timescale adaptation for different learning phases
848///
849/// Implements adaptation mechanisms operating at different timescales
850/// from fast (milliseconds) to slow (minutes/hours) dynamics.
851#[derive(Debug, Clone)]
852pub struct MultiTimescaleAdaptation {
853    /// Fast adaptation (seconds to minutes)
854    fast_adaptation: AdaptationScale,
855    /// Medium adaptation (minutes to hours)
856    medium_adaptation: AdaptationScale,
857    /// Slow adaptation (hours to days)
858    slow_adaptation: AdaptationScale,
859    /// Current timescale weights
860    timescale_weights: Array1<f64>,
861}
862
863impl MultiTimescaleAdaptation {
864    /// Create new multi-timescale adaptation
865    pub fn new() -> Self {
866        Self {
867            fast_adaptation: AdaptationScale::new(1.0, 1.0), // 1ms timescale
868            medium_adaptation: AdaptationScale::new(1000.0, 0.5), // 1s timescale
869            slow_adaptation: AdaptationScale::new(60000.0, 0.1), // 1min timescale
870            timescale_weights: Array1::from(vec![0.5, 0.3, 0.2]),
871        }
872    }
873
874    /// Update all adaptation scales
875    pub fn update(&mut self, error: f64, dt: f64) {
876        self.fast_adaptation.update(error, dt);
877        self.medium_adaptation.update(error, dt);
878        self.slow_adaptation.update(error, dt);
879    }
880
881    /// Get combined adaptation factor
882    pub fn get_adaptation_factor(&self) -> f64 {
883        let fast_factor = self.fast_adaptation.memory_trace;
884        let medium_factor = self.medium_adaptation.memory_trace;
885        let slow_factor = self.slow_adaptation.memory_trace;
886
887        self.timescale_weights[0] * fast_factor
888            + self.timescale_weights[1] * medium_factor
889            + self.timescale_weights[2] * slow_factor
890    }
891}
892
893/// Individual adaptation scale
894///
895/// Represents adaptation operating at a specific timescale with
896/// exponential memory traces and decay dynamics.
897#[derive(Debug, Clone)]
898pub struct AdaptationScale {
899    /// Time constant for this scale
900    time_constant: f64,
901    /// Adaptation strength
902    #[allow(dead_code)]
903    adaptation_strength: f64,
904    /// Memory trace
905    pub memory_trace: f64,
906    /// Decay factor
907    #[allow(dead_code)]
908    decay_factor: f64,
909}
910
911impl AdaptationScale {
912    /// Create new adaptation scale
913    pub fn new(time_constant: f64, adaptation_strength: f64) -> Self {
914        Self {
915            time_constant,
916            adaptation_strength,
917            memory_trace: 1.0,
918            decay_factor: 0.999,
919        }
920    }
921
922    /// Update this adaptation scale
923    pub fn update(&mut self, error: f64, dt: f64) {
924        let decay = (-dt / self.time_constant).exp();
925        self.memory_trace = self.memory_trace * decay + (1.0 - decay) * (1.0 - error);
926        self.memory_trace = self.memory_trace.clamp(0.0, 2.0);
927    }
928}
929
930#[cfg(test)]
931mod tests {
932    use super::*;
933    use ndarray::Array2;
934
935    #[test]
936    fn test_competitive_clusterer_creation() {
937        let clusterer = CompetitiveNeuralClusterer::new(3, 2);
938        assert_eq!(clusterer.num_clusters(), 3);
939        assert_eq!(clusterer.learning_rates().len(), 3);
940        assert_eq!(clusterer.inhibition_strengths().dim(), (3, 3));
941    }
942
943    #[test]
944    fn test_competitive_clustering() {
945        let points =
946            Array2::from_shape_vec((4, 2), vec![0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0]).unwrap();
947
948        let mut clusterer = CompetitiveNeuralClusterer::new(2, 2);
949        let result = clusterer.fit(&points.view(), 10);
950        assert!(result.is_ok());
951
952        let assignments = result.unwrap();
953        assert_eq!(assignments.len(), 4);
954
955        let centers = clusterer.get_cluster_centers();
956        assert_eq!(centers.dim(), (2, 2));
957    }
958
959    #[test]
960    fn test_homeostatic_clusterer_creation() {
961        let clusterer = HomeostaticNeuralClusterer::new(3, 2);
962        assert_eq!(clusterer.num_clusters(), 3);
963        assert_eq!(clusterer.neuron_firing_rates().len(), 3);
964    }
965
966    #[test]
967    fn test_homeostatic_clustering() {
968        let points =
969            Array2::from_shape_vec((4, 2), vec![0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0]).unwrap();
970
971        let mut clusterer =
972            HomeostaticNeuralClusterer::new(2, 2).with_homeostatic_params(0.1, 100.0);
973
974        let result = clusterer.fit(&points.view(), 10);
975        assert!(result.is_ok());
976
977        let assignments = result.unwrap();
978        assert_eq!(assignments.len(), 4);
979
980        let centers = clusterer.get_cluster_centers();
981        assert_eq!(centers.dim(), (2, 2));
982    }
983
984    #[test]
985    fn test_homeostatic_neuron() {
986        let mut neuron = HomeostaticNeuron::new();
987
988        // Test membrane potential update
989        neuron.update_membrane_potential(1.0, 10.0);
990        assert!(neuron.membrane_potential > 0.0);
991
992        // Test spike recording
993        neuron.record_spike(10.0);
994        assert_eq!(neuron.spike_history.len(), 1);
995
996        // Test homeostatic mechanisms
997        neuron.update_homeostatic_mechanisms(1.0);
998        assert!(neuron.intrinsic_excitability > 0.0);
999        assert!(neuron.synaptic_scaling > 0.0);
1000    }
1001
1002    #[test]
1003    fn test_learning_rate_adaptation() {
1004        let mut adaptation = LearningRateAdaptation::new(0.01);
1005        let initial_rate = adaptation.base_rate;
1006
1007        // Simulate improving performance
1008        adaptation.update_learning_rate(1.0);
1009        adaptation.update_learning_rate(0.8);
1010        adaptation.update_learning_rate(0.6);
1011
1012        // Rate should still be within bounds
1013        assert!(adaptation.base_rate >= adaptation.min_rate);
1014        assert!(adaptation.base_rate <= adaptation.max_rate);
1015    }
1016
1017    #[test]
1018    fn test_metaplasticity_controller() {
1019        let controller = MetaplasticityController::new(2, 3);
1020
1021        let modulation = controller.compute_modulation(0, 0.5);
1022        assert!(modulation > 0.0);
1023        assert!(modulation.is_finite());
1024    }
1025
1026    #[test]
1027    fn test_multi_timescale_adaptation() {
1028        let mut adaptation = MultiTimescaleAdaptation::new();
1029
1030        // Test update
1031        adaptation.update(0.5, 1.0);
1032
1033        let factor = adaptation.get_adaptation_factor();
1034        assert!(factor > 0.0);
1035        assert!(factor.is_finite());
1036    }
1037
1038    #[test]
1039    fn test_adaptation_scale() {
1040        let mut scale = AdaptationScale::new(10.0, 0.5);
1041        let initial_trace = scale.memory_trace;
1042
1043        scale.update(0.2, 1.0);
1044
1045        // Memory trace should have changed
1046        assert_ne!(scale.memory_trace, initial_trace);
1047        assert!(scale.memory_trace >= 0.0);
1048        assert!(scale.memory_trace <= 2.0);
1049    }
1050
1051    #[test]
1052    fn test_empty_input() {
1053        let points = Array2::zeros((0, 2));
1054
1055        let mut competitive = CompetitiveNeuralClusterer::new(2, 2);
1056        let result = competitive.fit(&points.view(), 10);
1057        assert!(result.is_ok());
1058        assert_eq!(result.unwrap().len(), 0);
1059
1060        let mut homeostatic = HomeostaticNeuralClusterer::new(2, 2);
1061        let result = homeostatic.fit(&points.view(), 10);
1062        assert!(result.is_ok());
1063        assert_eq!(result.unwrap().len(), 0);
1064    }
1065
1066    #[test]
1067    fn test_dimension_mismatch() {
1068        let points = Array2::zeros((4, 3)); // Wrong dimension
1069        let mut clusterer = HomeostaticNeuralClusterer::new(2, 2);
1070
1071        let result = clusterer.fit(&points.view(), 10);
1072        assert!(result.is_err());
1073    }
1074
1075    #[test]
1076    fn test_competitive_reset() {
1077        let mut clusterer = CompetitiveNeuralClusterer::new(2, 2);
1078
1079        // Modify some parameters
1080        clusterer.learning_rates[0] = 0.5;
1081
1082        // Reset should restore initial state
1083        clusterer.reset();
1084        assert_eq!(clusterer.learning_rates[0], 0.1);
1085    }
1086}