scirs2_neural/lib.rs
1#![allow(deprecated)]
2//! # SciRS2 Neural Networks
3//!
4//! **scirs2-neural** provides PyTorch-style neural network building blocks for Rust,
5//! with automatic differentiation integration and production-ready training utilities.
6//!
7//! ## 🎯 Key Features
8//!
9//! - **Layer-based Architecture**: Modular neural network layers (Dense, Conv2D, LSTM, etc.)
10//! - **Activation Functions**: Common activations (ReLU, Sigmoid, Tanh, GELU, etc.)
11//! - **Loss Functions**: Classification and regression losses
12//! - **Training Utilities**: Training loops, callbacks, and metrics
13//! - **Autograd Integration**: Automatic differentiation via scirs2-autograd
14//! - **Type Safety**: Compile-time shape and type checking where possible
15//!
16//! ## 📦 Module Overview
17//!
18//! | Module | Description |
19//! |--------|-------------|
20//! | [`activations_minimal`] | Activation functions (ReLU, Sigmoid, Tanh, GELU, etc.) |
21//! | [`layers`] | Neural network layers (Dense, Conv2D, LSTM, Dropout, etc.) |
22//! | [`losses`] | Loss functions (MSE, CrossEntropy, Focal, Contrastive, etc.) |
23//! | [`training`] | Training loops and utilities |
24//! | [`autograd`] | Automatic differentiation integration |
25//! | [`error`] | Error types and handling |
26//! | [`utils`] | Helper utilities |
27//!
28//! ## 🚀 Quick Start
29//!
30//! ### Installation
31//!
32//! Add to your `Cargo.toml`:
33//!
34//! ```toml
35//! [dependencies]
36//! scirs2-neural = "0.1.0-rc.1"
37//! ```
38//!
39//! ### Building a Simple Neural Network
40//!
41//! ```rust,ignore
42//! # IGNORED: Waiting for 0.1.0 - API needs migration to scirs2_core abstractions
43//! # Current implementation uses ndarray_rand::rand directly (POLICY violation)
44//! # TODO: Migrate all layer APIs to use scirs2_core::random::Random
45//! use scirs2_neural::prelude::*;
46//! use scirs2_core::ndarray::Array2;
47//!
48//! fn main() -> Result<()> {
49//! let mut rng = scirs2_core::random::Random::seed(42);
50//!
51//! // Build a 3-layer MLP for MNIST
52//! let mut model = Sequential::<f32>::new();
53//! model.add(Dense::new(784, 256, Some("relu"), &mut rng)?);
54//! model.add(Dropout::new(0.2, &mut rng)?);
55//! model.add(Dense::new(256, 128, Some("relu"), &mut rng)?);
56//! model.add(Dense::new(128, 10, None, &mut rng)?);
57//!
58//! // Forward pass
59//! let input = Array2::<f32>::zeros((32, 784));
60//! // let output = model.forward(&input)?;
61//!
62//! println!("Model created with {} layers", model.len());
63//! Ok(())
64//! }
65//! ```
66//!
67//! ### Using Individual Layers
68//!
69//! ```rust,ignore
70//! # IGNORED: Waiting for 0.1.0 - API needs migration to scirs2_core abstractions
71//! use scirs2_neural::prelude::*;
72//! use scirs2_core::ndarray::Array2;
73//!
74//! fn main() -> Result<()> {
75//! let mut rng = scirs2_core::random::Random::seed(42);
76//!
77//! // Dense layer
78//! let mut dense = Dense::new(10, 5, None, &mut rng)?;
79//! let input = Array2::<f32>::zeros((2, 10));
80//! // let output = dense.forward(&input)?;
81//!
82//! // Activation functions
83//! let relu = ReLU::new();
84//! let sigmoid = Sigmoid::new();
85//! let tanh = Tanh::new();
86//! let gelu = GELU::new();
87//!
88//! // Normalization layers
89//! let batch_norm = BatchNorm::new(5, 0.1, 1e-5, &mut rng)?;
90//! let layer_norm = LayerNorm::new(5, 1e-5, &mut rng)?;
91//!
92//! // Regularization
93//! let dropout = Dropout::new(0.5, &mut rng)?;
94//!
95//! Ok(())
96//! }
97//! ```
98//!
99//! ### Convolutional Networks
100//!
101//! ```rust,ignore
102//! # IGNORED: Waiting for 0.1.0 - API needs migration to scirs2_core abstractions
103//! use scirs2_neural::prelude::*;
104//!
105//! fn main() -> Result<()> {
106//! let mut rng = scirs2_core::random::Random::seed(42);
107//!
108//! // Build a simple CNN
109//! let mut model = Sequential::<f32>::new();
110//!
111//! // Conv layers (in_channels, out_channels, kernel_size, stride, name)
112//! model.add(Conv2D::new(1, 32, (3, 3), (1, 1), Some("relu"))?);
113//! model.add(Conv2D::new(32, 64, (3, 3), (1, 1), Some("relu"))?);
114//!
115//! // Flatten and classify
116//! model.add(Dense::new(64 * 28 * 28, 10, None, &mut rng)?);
117//!
118//! Ok(())
119//! }
120//! ```
121//!
122//! ### Recurrent Networks (LSTM)
123//!
124//! ```rust,ignore
125//! # IGNORED: Waiting for 0.1.0 - API needs migration to scirs2_core abstractions
126//! use scirs2_neural::prelude::*;
127//!
128//! fn main() -> Result<()> {
129//! let mut rng = scirs2_core::random::Random::seed(42);
130//!
131//! // Build an LSTM-based model
132//! let mut model = Sequential::<f32>::new();
133//!
134//! // LSTM (input_size, hidden_size, rng)
135//! model.add(LSTM::new(100, 256, &mut rng)?);
136//! model.add(Dense::new(256, 10, None, &mut rng)?);
137//!
138//! Ok(())
139//! }
140//! ```
141//!
142//! ### Loss Functions
143//!
144//! ```rust,ignore
145//! # IGNORED: Consistent with other examples pending 0.1.0 API migration
146//! use scirs2_neural::prelude::*;
147//! use scirs2_core::ndarray::array;
148//!
149//! fn main() -> Result<()> {
150//! // Mean Squared Error (regression)
151//! let mse = MeanSquaredError::new();
152//!
153//! // Cross Entropy (classification)
154//! let ce = CrossEntropyLoss::new(1e-7);
155//!
156//! // Focal Loss (imbalanced classes)
157//! let focal = FocalLoss::new(2.0, None, 1e-7);
158//!
159//! // Contrastive Loss (metric learning)
160//! let contrastive = ContrastiveLoss::new(1.0);
161//!
162//! // Triplet Loss (metric learning)
163//! let triplet = TripletLoss::new(1.0);
164//!
165//! // Compute loss
166//! let predictions = array![[0.7, 0.2, 0.1], [0.1, 0.8, 0.1]];
167//! let targets = array![[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]];
168//! // let loss = mse.compute(&predictions.view(), &targets.view())?;
169//!
170//! Ok(())
171//! }
172//! ```
173//!
174//! ### Training a Model
175//!
176//! ```rust,ignore
177//! # IGNORED: Waiting for 0.1.0 - API needs migration to scirs2_core abstractions
178//! use scirs2_neural::prelude::*;
179//! use scirs2_neural::training::ValidationSettings;
180//! use scirs2_core::ndarray::Array2;
181//!
182//! fn main() -> Result<()> {
183//! let mut rng = scirs2_core::random::Random::seed(42);
184//!
185//! // Build model
186//! let mut model = Sequential::<f32>::new();
187//! model.add(Dense::new(784, 128, Some("relu"), &mut rng)?);
188//! model.add(Dense::new(128, 10, None, &mut rng)?);
189//!
190//! // Training configuration
191//! let config = TrainingConfig {
192//! learning_rate: 0.001,
193//! batch_size: 32,
194//! epochs: 10,
195//! validation: Some(ValidationSettings {
196//! enabled: true,
197//! validation_split: 0.2,
198//! batch_size: 32,
199//! num_workers: 0,
200//! }),
201//! ..Default::default()
202//! };
203//!
204//! // Create training session
205//! let session = TrainingSession::<f32>::new(config);
206//!
207//! // Prepare data
208//! let x_train = Array2::<f32>::zeros((1000, 784));
209//! let y_train = Array2::<f32>::zeros((1000, 10));
210//!
211//! // Train
212//! // session.fit(&x_train, &y_train)?;
213//!
214//! Ok(())
215//! }
216//! ```
217//!
218//! ## 🧠 Available Layers
219//!
220//! ### Core Layers
221//!
222//! - **`Dense`**: Fully connected (linear) layer
223//! - **`Conv2D`**: 2D convolutional layer
224//! - **`LSTM`**: Long Short-Term Memory recurrent layer
225//!
226//! ### Activation Layers
227//!
228//! - **`ReLU`**: Rectified Linear Unit
229//! - **`Sigmoid`**: Sigmoid activation
230//! - **`Tanh`**: Hyperbolic tangent
231//! - **`GELU`**: Gaussian Error Linear Unit
232//! - **`Softmax`**: Softmax for classification
233//!
234//! ### Normalization Layers
235//!
236//! - **`BatchNorm`**: Batch normalization
237//! - **`LayerNorm`**: Layer normalization
238//!
239//! ### Regularization Layers
240//!
241//! - **`Dropout`**: Random dropout for regularization
242//!
243//! ## 📊 Loss Functions
244//!
245//! ### Regression
246//!
247//! - **`MeanSquaredError`**: L2 loss for regression
248//!
249//! ### Classification
250//!
251//! - **`CrossEntropyLoss`**: Standard classification loss
252//! - **`FocalLoss`**: For imbalanced classification
253//!
254//! ### Metric Learning
255//!
256//! - **`ContrastiveLoss`**: Pairwise similarity learning
257//! - **`TripletLoss`**: Triplet-based metric learning
258//!
259//! ## 🎨 Design Philosophy
260//!
261//! scirs2-neural follows PyTorch's design philosophy:
262//!
263//! - **Layer-based**: Composable building blocks
264//! - **Explicit**: Clear forward/backward passes
265//! - **Flexible**: Easy to extend with custom layers
266//! - **Type-safe**: Leverage Rust's type system
267//!
268//! ## 🔗 Integration with SciRS2 Ecosystem
269//!
270//! - **scirs2-autograd**: Automatic differentiation support
271//! - **scirs2-linalg**: Matrix operations and decompositions
272//! - **scirs2-metrics**: Model evaluation metrics
273//! - **scirs2-datasets**: Sample datasets for training
274//! - **scirs2-vision**: Computer vision utilities
275//! - **scirs2-text**: Text processing for NLP models
276//!
277//! ## 🚀 Performance
278//!
279//! scirs2-neural provides multiple optimization paths:
280//!
281//! - **Pure Rust**: Fast, safe implementations
282//! - **SIMD**: Vectorized operations where applicable
283//! - **Parallel**: Multi-threaded training
284//! - **GPU**: CUDA/Metal support (via scirs2-core)
285//!
286//! ## 📚 Comparison with PyTorch
287//!
288//! | Feature | PyTorch | scirs2-neural |
289//! |---------|---------|---------------|
290//! | Layer-based API | ✅ | ✅ |
291//! | Autograd | ✅ | ✅ (via scirs2-autograd) |
292//! | GPU Support | ✅ | ✅ (limited) |
293//! | Dynamic Graphs | ✅ | ✅ |
294//! | JIT Compilation | ✅ | ⚠️ (planned) |
295//! | Production Deployment | ⚠️ | ✅ (native Rust) |
296//! | Type Safety | ❌ | ✅ |
297//!
298//! ## 📜 Examples
299//!
300//! See the `examples/` directory for complete examples:
301//!
302//! - `mnist_mlp.rs` - Multi-layer perceptron for MNIST
303//! - `cifar_cnn.rs` - Convolutional network for CIFAR-10
304//! - `sentiment_lstm.rs` - LSTM for sentiment analysis
305//! - `custom_layer.rs` - Creating custom layers
306//!
307//! ## 🔒 Version
308//!
309//! Current version: **0.1.0-rc.1** (Released October 03, 2025)
310
311pub mod activations_minimal;
312pub mod autograd;
313pub mod error;
314// pub mod gpu; // Disabled in minimal version - has syntax errors
315pub mod layers;
316pub mod losses;
317pub mod training;
318pub mod utils;
319
320pub use activations_minimal::{Activation, ReLU, Sigmoid, Softmax, Tanh, GELU};
321pub use error::{Error, NeuralError, Result};
322pub use layers::{BatchNorm, Conv2D, Dense, Dropout, Layer, LayerNorm, Sequential, LSTM};
323pub use losses::{
324 ContrastiveLoss, CrossEntropyLoss, FocalLoss, Loss, MeanSquaredError, TripletLoss,
325};
326pub use training::{TrainingConfig, TrainingSession};
327
328/// Prelude module with core functionality
329///
330/// Import everything you need to get started:
331///
332/// ```rust
333/// use scirs2_neural::prelude::*;
334/// ```
335pub mod prelude {
336 pub use crate::{
337 activations_minimal::{Activation, ReLU, Sigmoid, Softmax, Tanh, GELU},
338 error::{Error, NeuralError, Result},
339 layers::{BatchNorm, Conv2D, Dense, Dropout, Layer, LayerNorm, Sequential, LSTM},
340 losses::{
341 ContrastiveLoss, CrossEntropyLoss, FocalLoss, Loss, MeanSquaredError, TripletLoss,
342 },
343 training::{TrainingConfig, TrainingSession},
344 };
345}