synaptic_neural_wasm/
lib.rs

1//! Synaptic Neural WASM - WASM-optimized neural network engine
2//!
3//! This crate provides high-performance neural network operations optimized
4//! for WebAssembly with SIMD acceleration support.
5
6use wasm_bindgen::prelude::*;
7use serde::{Serialize, Deserialize};
8use ndarray::{Array2, Array1, ArrayView2, ArrayView1};
9use std::fmt::Debug;
10
11/// Neural network errors
12#[derive(Debug, thiserror::Error)]
13pub enum NeuralError {
14    #[error("Invalid dimensions: {0}")]
15    InvalidDimensions(String),
16    
17    #[error("Computation error: {0}")]
18    ComputationError(String),
19    
20    #[error("Serialization error: {0}")]
21    SerializationError(String),
22}
23
24pub type Result<T> = std::result::Result<T, NeuralError>;
25
26/// Activation functions
27#[wasm_bindgen]
28#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
29pub enum Activation {
30    ReLU,
31    Sigmoid,
32    Tanh,
33    Linear,
34}
35
36/// Layer types
37#[derive(Debug, Clone, Serialize, Deserialize)]
38pub enum LayerType {
39    Dense { input_size: usize, output_size: usize },
40    Dropout { rate: f32 },
41}
42
43/// Neural network layer
44#[derive(Debug, Clone, Serialize, Deserialize)]
45pub struct Layer {
46    pub layer_type: LayerType,
47    pub activation: Activation,
48    pub weights: Option<Array2<f32>>,
49    pub bias: Option<Array1<f32>>,
50}
51
52impl Layer {
53    /// Create a dense layer
54    pub fn dense(input_size: usize, output_size: usize) -> Self {
55        let weights = Array2::from_shape_fn((output_size, input_size), |_| {
56            rand::random::<f32>() * 0.1 - 0.05
57        });
58        let bias = Array1::zeros(output_size);
59        
60        Self {
61            layer_type: LayerType::Dense { input_size, output_size },
62            activation: Activation::ReLU,
63            weights: Some(weights),
64            bias: Some(bias),
65        }
66    }
67    
68    /// Set activation function
69    pub fn with_activation(mut self, activation: Activation) -> Self {
70        self.activation = activation;
71        self
72    }
73    
74    /// Forward pass through the layer
75    pub fn forward(&self, input: &ArrayView1<f32>) -> Result<Array1<f32>> {
76        match &self.layer_type {
77            LayerType::Dense { .. } => {
78                let weights = self.weights.as_ref()
79                    .ok_or_else(|| NeuralError::ComputationError("No weights".to_string()))?;
80                let bias = self.bias.as_ref()
81                    .ok_or_else(|| NeuralError::ComputationError("No bias".to_string()))?;
82                
83                let output = weights.dot(input) + bias;
84                Ok(self.apply_activation(&output))
85            }
86            LayerType::Dropout { rate: _ } => {
87                // In inference mode, dropout is identity
88                Ok(input.to_owned())
89            }
90        }
91    }
92    
93    /// Apply activation function
94    fn apply_activation(&self, x: &Array1<f32>) -> Array1<f32> {
95        match self.activation {
96            Activation::ReLU => x.mapv(|v| v.max(0.0)),
97            Activation::Sigmoid => x.mapv(|v| 1.0 / (1.0 + (-v).exp())),
98            Activation::Tanh => x.mapv(|v| v.tanh()),
99            Activation::Linear => x.clone(),
100        }
101    }
102}
103
104/// Neural network
105#[wasm_bindgen]
106#[derive(Debug, Clone, Serialize, Deserialize)]
107pub struct NeuralNetwork {
108    layers: Vec<Layer>,
109}
110
111#[wasm_bindgen]
112impl NeuralNetwork {
113    /// Create a new neural network
114    #[wasm_bindgen(constructor)]
115    pub fn new() -> Self {
116        Self {
117            layers: Vec::new(),
118        }
119    }
120    
121    /// Get the number of layers
122    pub fn layer_count(&self) -> usize {
123        self.layers.len()
124    }
125    
126    /// Predict with the network (returns JSON string for WASM compatibility)
127    pub fn predict(&self, input: &[f32]) -> std::result::Result<String, JsValue> {
128        if self.layers.is_empty() {
129            return Err(JsValue::from_str("No layers in network"));
130        }
131        
132        let mut current = Array1::from_vec(input.to_vec());
133        
134        for layer in &self.layers {
135            current = layer.forward(&current.view())
136                .map_err(|e| JsValue::from_str(&e.to_string()))?;
137        }
138        
139        let output: Vec<f32> = current.to_vec();
140        serde_json::to_string(&output)
141            .map_err(|e| JsValue::from_str(&e.to_string()))
142    }
143}
144
145impl NeuralNetwork {
146    /// Add a layer to the network
147    pub fn add_layer(&mut self, layer: Layer) {
148        self.layers.push(layer);
149    }
150    
151    /// Forward pass through the network
152    pub fn forward(&self, input: &ArrayView1<f32>) -> Result<Array1<f32>> {
153        let mut current = input.to_owned();
154        
155        for layer in &self.layers {
156            current = layer.forward(&current.view())?;
157        }
158        
159        Ok(current)
160    }
161    
162    /// Save network to JSON
163    pub fn to_json(&self) -> Result<String> {
164        serde_json::to_string(self)
165            .map_err(|e| NeuralError::SerializationError(e.to_string()))
166    }
167    
168    /// Load network from JSON
169    pub fn from_json(json: &str) -> Result<Self> {
170        serde_json::from_str(json)
171            .map_err(|e| NeuralError::SerializationError(e.to_string()))
172    }
173}
174
175/// SIMD-accelerated operations
176#[cfg(feature = "simd")]
177pub mod simd {
178    use super::*;
179    
180    /// SIMD dot product for f32
181    pub fn dot_product_simd(a: &[f32], b: &[f32]) -> f32 {
182        assert_eq!(a.len(), b.len());
183        
184        let mut sum = 0.0;
185        let chunks = a.len() / 4;
186        
187        // Process 4 elements at a time
188        for i in 0..chunks {
189            let idx = i * 4;
190            sum += a[idx] * b[idx];
191            sum += a[idx + 1] * b[idx + 1];
192            sum += a[idx + 2] * b[idx + 2];
193            sum += a[idx + 3] * b[idx + 3];
194        }
195        
196        // Handle remaining elements
197        for i in (chunks * 4)..a.len() {
198            sum += a[i] * b[i];
199        }
200        
201        sum
202    }
203    
204    /// SIMD matrix multiplication
205    pub fn matmul_simd(a: &ArrayView2<f32>, b: &ArrayView2<f32>) -> Array2<f32> {
206        let (m, k) = a.dim();
207        let (k2, n) = b.dim();
208        assert_eq!(k, k2);
209        
210        let mut result = Array2::zeros((m, n));
211        
212        for i in 0..m {
213            for j in 0..n {
214                let a_row = a.row(i);
215                let b_col = b.column(j);
216                result[[i, j]] = dot_product_simd(a_row.as_slice().unwrap(), b_col.as_slice().unwrap());
217            }
218        }
219        
220        result
221    }
222}
223
224/// WebAssembly utilities
225#[wasm_bindgen]
226pub fn init_panic_hook() {
227    // Set panic hook for better error messages
228    // Feature flag would be needed for console_error_panic_hook
229}
230
231/// Performance utilities
232#[wasm_bindgen]
233pub struct Performance;
234
235#[wasm_bindgen]
236impl Performance {
237    /// Get current timestamp in milliseconds
238    pub fn now() -> f64 {
239        web_sys::window()
240            .and_then(|w| w.performance())
241            .map(|p| p.now())
242            .unwrap_or(0.0)
243    }
244}
245
246/// Measure execution time
247pub fn measure_time<F: FnOnce() -> R, R>(f: F) -> (R, f64) {
248    let start = Performance::now();
249    let result = f();
250    let elapsed = Performance::now() - start;
251    (result, elapsed)
252}
253
254#[cfg(test)]
255mod tests {
256    use super::*;
257    
258    #[test]
259    fn test_layer_creation() {
260        let layer = Layer::dense(10, 5);
261        assert_eq!(layer.weights.as_ref().unwrap().dim(), (5, 10));
262        assert_eq!(layer.bias.as_ref().unwrap().len(), 5);
263    }
264    
265    #[test]
266    fn test_activation_functions() {
267        let input = Array1::from_vec(vec![-1.0, 0.0, 1.0]);
268        let layer = Layer::dense(3, 3).with_activation(Activation::ReLU);
269        
270        let activated = layer.apply_activation(&input);
271        assert_eq!(activated[0], 0.0); // ReLU(-1) = 0
272        assert_eq!(activated[1], 0.0); // ReLU(0) = 0
273        assert_eq!(activated[2], 1.0); // ReLU(1) = 1
274    }
275    
276    #[test]
277    fn test_network_creation() {
278        let mut network = NeuralNetwork::new();
279        network.add_layer(Layer::dense(784, 128));
280        network.add_layer(Layer::dense(128, 10));
281        
282        assert_eq!(network.layer_count(), 2);
283    }
284    
285    #[cfg(feature = "simd")]
286    #[test]
287    fn test_simd_dot_product() {
288        let a = vec![1.0, 2.0, 3.0, 4.0];
289        let b = vec![5.0, 6.0, 7.0, 8.0];
290        
291        let result = simd::dot_product_simd(&a, &b);
292        assert_eq!(result, 70.0); // 1*5 + 2*6 + 3*7 + 4*8
293    }
294}