avx_async/
neuro.rs

1//! Neural Network Module
2//!
3//! Lightweight neural network for runtime optimization
4
5use std::sync::{Arc, Mutex};
6
7/// Simple feedforward neural network
8#[derive(Clone)]
9pub struct NeuralNetwork {
10    layers: Arc<Mutex<Vec<Layer>>>,
11    learning_rate: f64,
12}
13
14struct Layer {
15    weights: Vec<Vec<f64>>,
16    biases: Vec<f64>,
17    activations: Vec<f64>,
18}
19
20impl NeuralNetwork {
21    /// Create a new neural network with specified layer sizes
22    pub fn new(layer_sizes: &[usize], learning_rate: f64) -> Self {
23        let mut layers = Vec::new();
24
25        for i in 0..layer_sizes.len() - 1 {
26            let input_size = layer_sizes[i];
27            let output_size = layer_sizes[i + 1];
28
29            // Initialize weights with Xavier initialization
30            let scale = (2.0 / input_size as f64).sqrt();
31            let weights: Vec<Vec<f64>> = (0..output_size)
32                .map(|_| {
33                    (0..input_size)
34                        .map(|j| ((j as f64 * 0.7919) % 1.0 - 0.5) * scale)
35                        .collect()
36                })
37                .collect();
38
39            let biases = vec![0.0; output_size];
40            let activations = vec![0.0; output_size];
41
42            layers.push(Layer { weights, biases, activations });
43        }
44
45        Self {
46            layers: Arc::new(Mutex::new(layers)),
47            learning_rate,
48        }
49    }
50
51    /// Forward pass through the network
52    pub fn predict(&self, inputs: &[f64]) -> Vec<f64> {
53        let mut layers = self.layers.lock().unwrap();
54        let mut current = inputs.to_vec();
55
56        for layer in layers.iter_mut() {
57            let mut next = Vec::with_capacity(layer.biases.len());
58
59            for (neuron_idx, (weights, bias)) in layer.weights.iter().zip(layer.biases.iter()).enumerate() {
60                let sum: f64 = weights.iter()
61                    .zip(current.iter())
62                    .map(|(w, x)| w * x)
63                    .sum::<f64>() + bias;
64
65                // ReLU activation
66                let activation = sum.max(0.0);
67                next.push(activation);
68                layer.activations[neuron_idx] = activation;
69            }
70
71            current = next;
72        }
73
74        current
75    }
76
77    /// Train the network with a single sample (online learning)
78    pub fn train(&self, inputs: &[f64], targets: &[f64]) -> f64 {
79        // Forward pass
80        let outputs = self.predict(inputs);
81
82        // Calculate loss (MSE)
83        let loss: f64 = outputs.iter()
84            .zip(targets.iter())
85            .map(|(o, t)| (o - t).powi(2))
86            .sum::<f64>() / outputs.len() as f64;
87
88        // Backward pass (simplified gradient descent)
89        let mut layers = self.layers.lock().unwrap();
90
91        // Output layer gradients
92        let output_layer = layers.last_mut().unwrap();
93        let output_errors: Vec<f64> = outputs.iter()
94            .zip(targets.iter())
95            .map(|(o, t)| o - t)
96            .collect();
97
98        // Update output layer weights
99        for (neuron_idx, error) in output_errors.iter().enumerate() {
100            for (weight_idx, weight) in output_layer.weights[neuron_idx].iter_mut().enumerate() {
101                let gradient = error * if weight_idx < inputs.len() { inputs[weight_idx] } else { 1.0 };
102                *weight -= self.learning_rate * gradient;
103            }
104            output_layer.biases[neuron_idx] -= self.learning_rate * error;
105        }
106
107        loss
108    }
109
110    /// Get network statistics
111    pub fn stats(&self) -> NetworkStats {
112        let layers = self.layers.lock().unwrap();
113
114        let total_weights: usize = layers.iter()
115            .map(|l| l.weights.iter().map(|w| w.len()).sum::<usize>())
116            .sum();
117
118        let total_biases: usize = layers.iter()
119            .map(|l| l.biases.len())
120            .sum();
121
122        NetworkStats {
123            num_layers: layers.len(),
124            total_parameters: total_weights + total_biases,
125            learning_rate: self.learning_rate,
126        }
127    }
128
129    /// Export network configuration
130    pub fn to_json(&self) -> String {
131        let layers = self.layers.lock().unwrap();
132
133        let layer_sizes: Vec<String> = layers.iter()
134            .map(|l| format!("{}", l.weights.len()))
135            .collect();
136
137        format!(
138            r#"{{
139  "type": "feedforward",
140  "layers": [{}],
141  "learning_rate": {},
142  "activation": "ReLU"
143}}"#,
144            layer_sizes.join(", "),
145            self.learning_rate
146        )
147    }
148}
149
150#[derive(Debug, Clone)]
151pub struct NetworkStats {
152    pub num_layers: usize,
153    pub total_parameters: usize,
154    pub learning_rate: f64,
155}
156
157impl std::fmt::Display for NetworkStats {
158    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
159        write!(
160            f,
161            "NetworkStats[layers={}, params={}, lr={}]",
162            self.num_layers, self.total_parameters, self.learning_rate
163        )
164    }
165}
166
167/// Recurrent Neural Network for time-series prediction
168#[derive(Clone)]
169#[allow(dead_code)]
170pub struct RecurrentNetwork {
171    hidden_size: usize,
172    weights_ih: Arc<Mutex<Vec<Vec<f64>>>>,
173    weights_hh: Arc<Mutex<Vec<Vec<f64>>>>,
174    biases: Arc<Mutex<Vec<f64>>>,
175    hidden_state: Arc<Mutex<Vec<f64>>>,
176    learning_rate: f64,
177}
178
179impl RecurrentNetwork {
180    pub fn new(input_size: usize, hidden_size: usize, learning_rate: f64) -> Self {
181        let scale = (2.0 / input_size as f64).sqrt();
182
183        let weights_ih: Vec<Vec<f64>> = (0..hidden_size)
184            .map(|i| {
185                (0..input_size)
186                    .map(|j| ((i * 7919 + j * 3571) as f64 % 1000.0 / 1000.0 - 0.5) * scale)
187                    .collect()
188            })
189            .collect();
190
191        let weights_hh: Vec<Vec<f64>> = (0..hidden_size)
192            .map(|i| {
193                (0..hidden_size)
194                    .map(|j| ((i * 5381 + j * 2791) as f64 % 1000.0 / 1000.0 - 0.5) * scale)
195                    .collect()
196            })
197            .collect();
198
199        Self {
200            hidden_size,
201            weights_ih: Arc::new(Mutex::new(weights_ih)),
202            weights_hh: Arc::new(Mutex::new(weights_hh)),
203            biases: Arc::new(Mutex::new(vec![0.0; hidden_size])),
204            hidden_state: Arc::new(Mutex::new(vec![0.0; hidden_size])),
205            learning_rate,
206        }
207    }
208
209    /// Forward step with input
210    pub fn step(&self, input: &[f64]) -> Vec<f64> {
211        let weights_ih = self.weights_ih.lock().unwrap();
212        let weights_hh = self.weights_hh.lock().unwrap();
213        let biases = self.biases.lock().unwrap();
214        let mut hidden = self.hidden_state.lock().unwrap();
215
216        let mut new_hidden = Vec::with_capacity(self.hidden_size);
217
218        for i in 0..self.hidden_size {
219            let input_contrib: f64 = weights_ih[i].iter()
220                .zip(input.iter())
221                .map(|(w, x)| w * x)
222                .sum();
223
224            let hidden_contrib: f64 = weights_hh[i].iter()
225                .zip(hidden.iter())
226                .map(|(w, h)| w * h)
227                .sum();
228
229            let activation = (input_contrib + hidden_contrib + biases[i]).tanh();
230            new_hidden.push(activation);
231        }
232
233        *hidden = new_hidden.clone();
234        new_hidden
235    }
236
237    /// Reset hidden state
238    pub fn reset(&self) {
239        let mut hidden = self.hidden_state.lock().unwrap();
240        hidden.fill(0.0);
241    }
242
243    /// Predict next value in sequence
244    pub fn predict_next(&self, sequence: &[f64]) -> f64 {
245        let output = self.step(sequence);
246        output.iter().sum::<f64>() / output.len() as f64
247    }
248}
249
250
251
252
253