1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
// SPDX-License-Identifier: MIT OR Apache-2.0
//! Neural network module — MLP and CNN layers.
//!
//! Provides [`MLPClassifier`] and [`MLPRegressor`] with an sklearn-compatible
//! builder API, GPU-accelerated forward pass, and training history tracking.
//!
//! Also provides CNN building blocks: [`Conv2D`], [`MaxPool2D`], [`Flatten`],
//! and the [`Layer`] trait for composing custom architectures.
//!
//! # GPU Acceleration
//!
//! When a GPU compute backend is available, the **forward pass** is dispatched
//! to it for MLP networks whose `batch × max_layer_dim` exceeds the internal
//! `GPU_THRESHOLD` (4096). The **backward pass is always executed on the CPU**
//! regardless of backend availability — gradient computation has not been
//! ported to GPU yet. This means training speed is bounded by CPU backward-pass
//! throughput even when a GPU accelerates inference.
//!
//! # Example
//!
//! ```ignore
//! use scry_learn::prelude::*;
//!
//! let data = Dataset::from_csv("iris.csv", "species")?;
//! let (train, test) = train_test_split(&data, 0.2, 42);
//!
//! let mut clf = MLPClassifier::new()
//! .hidden_layers(&[100, 50])
//! .activation(Activation::Relu)
//! .learning_rate(0.001)
//! .seed(42);
//! clf.fit(&train)?;
//!
//! let preds = clf.predict(&test.features_row_major())?;
//! let acc = accuracy(&test.target, &preds);
//! println!("Accuracy: {acc:.2}%");
//!
//! // Inspect training history
//! let loss = clf.history().unwrap().epochs.last().unwrap().train_loss;
//! println!("Final loss: {loss:.4}");
//! ```
pub
pub
pub
pub use Activation;
pub use ;
pub use MLPClassifier;
pub use Conv2D;
pub use DropoutLayer;
pub use Flatten;
pub use ;
pub use MaxPool2D;
pub use MLPRegressor;
pub use ;