pub struct BasicTrainer<const N: usize, const O: usize> { /* private fields */ }
Expand description
A simple struct for Training Neural Networks
Implementations§
Source§impl<const N: usize, const O: usize> BasicTrainer<N, O>
impl<const N: usize, const O: usize> BasicTrainer<N, O>
Sourcepub fn new(data: DataSet<N, O>) -> Self
pub fn new(data: DataSet<N, O>) -> Self
Examples found in repository?
examples/training.rs (line 16)
3fn main() {
4 // create a neural network
5 let mut net = NeuralNetwork::new().add_layer(1, ActivationFunction::Linear);
6
7 // create training data
8 let mut inputs = vec![];
9 let mut outputs = vec![];
10 for i in -50..50 {
11 inputs.push([i as f32]);
12 outputs.push([i as f32 * 3.0]);
13 }
14 let data = DataSet { inputs, outputs };
15
16 let trainer = BasicTrainer::new(data);
17
18 // train the model
19 for _ in 0..10 {
20 trainer.train(&mut net, 10);
21 // lower is better
22 println!("{}", trainer.get_total_error(&net))
23 }
24
25 // show that this actually works!
26 println!("########");
27 for i in -5..5 {
28 println!("{}", &net.run(&[i as f32 + 0.5])[0]);
29 }
30}
More examples
examples/circle.rs (line 30)
3fn main() {
4 // this network is completely overkill, but it does the job
5 let mut net: NeuralNetwork<2, 1> = NeuralNetwork::new()
6 .add_layer(3, ActivationFunction::ReLU)
7 .add_layer(3, ActivationFunction::ReLU)
8 // this layer reduced everything to one input!
9 .add_layer(1, ActivationFunction::Linear);
10
11 let mut inputs = vec![];
12 let mut output = vec![];
13 for x in 0..=100 {
14 for y in 0..=100 {
15 inputs.push([x as f32, y as f32]);
16 // we want this to be a classifier, so we ask it for a result greater zero or smaller zero
17 output.push(if (x as f32).abs() + (y as f32).abs() < 30.0 {
18 [1.0]
19 } else {
20 [-1.0]
21 })
22 }
23 }
24
25 let data = DataSet {
26 inputs,
27 outputs: output,
28 };
29
30 let trainer = BasicTrainer::new(data);
31 for _ in 0..50 {
32 trainer.train(&mut net, 10);
33 println!("{}", trainer.get_total_error(&net))
34 }
35}
Sourcepub fn train(&self, net: &mut NeuralNetwork<N, O>, iterations: usize)
pub fn train(&self, net: &mut NeuralNetwork<N, O>, iterations: usize)
Examples found in repository?
examples/training.rs (line 20)
3fn main() {
4 // create a neural network
5 let mut net = NeuralNetwork::new().add_layer(1, ActivationFunction::Linear);
6
7 // create training data
8 let mut inputs = vec![];
9 let mut outputs = vec![];
10 for i in -50..50 {
11 inputs.push([i as f32]);
12 outputs.push([i as f32 * 3.0]);
13 }
14 let data = DataSet { inputs, outputs };
15
16 let trainer = BasicTrainer::new(data);
17
18 // train the model
19 for _ in 0..10 {
20 trainer.train(&mut net, 10);
21 // lower is better
22 println!("{}", trainer.get_total_error(&net))
23 }
24
25 // show that this actually works!
26 println!("########");
27 for i in -5..5 {
28 println!("{}", &net.run(&[i as f32 + 0.5])[0]);
29 }
30}
More examples
examples/circle.rs (line 32)
3fn main() {
4 // this network is completely overkill, but it does the job
5 let mut net: NeuralNetwork<2, 1> = NeuralNetwork::new()
6 .add_layer(3, ActivationFunction::ReLU)
7 .add_layer(3, ActivationFunction::ReLU)
8 // this layer reduced everything to one input!
9 .add_layer(1, ActivationFunction::Linear);
10
11 let mut inputs = vec![];
12 let mut output = vec![];
13 for x in 0..=100 {
14 for y in 0..=100 {
15 inputs.push([x as f32, y as f32]);
16 // we want this to be a classifier, so we ask it for a result greater zero or smaller zero
17 output.push(if (x as f32).abs() + (y as f32).abs() < 30.0 {
18 [1.0]
19 } else {
20 [-1.0]
21 })
22 }
23 }
24
25 let data = DataSet {
26 inputs,
27 outputs: output,
28 };
29
30 let trainer = BasicTrainer::new(data);
31 for _ in 0..50 {
32 trainer.train(&mut net, 10);
33 println!("{}", trainer.get_total_error(&net))
34 }
35}
Sourcepub fn get_total_error(&self, net: &NeuralNetwork<N, O>) -> f32
pub fn get_total_error(&self, net: &NeuralNetwork<N, O>) -> f32
Examples found in repository?
examples/training.rs (line 22)
3fn main() {
4 // create a neural network
5 let mut net = NeuralNetwork::new().add_layer(1, ActivationFunction::Linear);
6
7 // create training data
8 let mut inputs = vec![];
9 let mut outputs = vec![];
10 for i in -50..50 {
11 inputs.push([i as f32]);
12 outputs.push([i as f32 * 3.0]);
13 }
14 let data = DataSet { inputs, outputs };
15
16 let trainer = BasicTrainer::new(data);
17
18 // train the model
19 for _ in 0..10 {
20 trainer.train(&mut net, 10);
21 // lower is better
22 println!("{}", trainer.get_total_error(&net))
23 }
24
25 // show that this actually works!
26 println!("########");
27 for i in -5..5 {
28 println!("{}", &net.run(&[i as f32 + 0.5])[0]);
29 }
30}
More examples
examples/circle.rs (line 33)
3fn main() {
4 // this network is completely overkill, but it does the job
5 let mut net: NeuralNetwork<2, 1> = NeuralNetwork::new()
6 .add_layer(3, ActivationFunction::ReLU)
7 .add_layer(3, ActivationFunction::ReLU)
8 // this layer reduced everything to one input!
9 .add_layer(1, ActivationFunction::Linear);
10
11 let mut inputs = vec![];
12 let mut output = vec![];
13 for x in 0..=100 {
14 for y in 0..=100 {
15 inputs.push([x as f32, y as f32]);
16 // we want this to be a classifier, so we ask it for a result greater zero or smaller zero
17 output.push(if (x as f32).abs() + (y as f32).abs() < 30.0 {
18 [1.0]
19 } else {
20 [-1.0]
21 })
22 }
23 }
24
25 let data = DataSet {
26 inputs,
27 outputs: output,
28 };
29
30 let trainer = BasicTrainer::new(data);
31 for _ in 0..50 {
32 trainer.train(&mut net, 10);
33 println!("{}", trainer.get_total_error(&net))
34 }
35}
Auto Trait Implementations§
impl<const N: usize, const O: usize> Freeze for BasicTrainer<N, O>
impl<const N: usize, const O: usize> RefUnwindSafe for BasicTrainer<N, O>
impl<const N: usize, const O: usize> Send for BasicTrainer<N, O>
impl<const N: usize, const O: usize> Sync for BasicTrainer<N, O>
impl<const N: usize, const O: usize> Unpin for BasicTrainer<N, O>
impl<const N: usize, const O: usize> UnwindSafe for BasicTrainer<N, O>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more