pub struct MLP { /* private fields */ }
Implementations§
Source§impl MLP
impl MLP
Sourcepub fn new(input_count: usize, output_counts: Vec<usize>) -> MLP
pub fn new(input_count: usize, output_counts: Vec<usize>) -> MLP
Examples found in repository?
examples/gradient_descent.rs (line 6)
5fn main() {
6 let mlp = MLP::new(3, vec![4, 4, 1]);
7
8 let xs = vec![
9 vec![2.0, 3.0, -1.0],
10 vec![3.0, -1.0, 0.5],
11 vec![0.5, 1.0, 1.0],
12 vec![1.0, 1.0, -1.0],
13 ];
14
15 let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17 for _ in 0..100 {
18 // Forward pass
19 let ypred: Vec<Value> = xs
20 .iter()
21 .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22 .collect();
23 let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25 // Loss function
26 let ygt = ys.iter().map(|y| Value::from(*y));
27 let loss: Value = ypred
28 .into_iter()
29 .zip(ygt)
30 .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31 .sum();
32
33 println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35 // Backward pass
36 mlp.parameters().iter().for_each(|p| p.clear_gradient());
37 loss.backward();
38
39 // Adjustment
40 mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41 }
42}
Sourcepub fn forward(&self, xs: Vec<Value>) -> Vec<Value>
pub fn forward(&self, xs: Vec<Value>) -> Vec<Value>
Examples found in repository?
examples/gradient_descent.rs (line 21)
5fn main() {
6 let mlp = MLP::new(3, vec![4, 4, 1]);
7
8 let xs = vec![
9 vec![2.0, 3.0, -1.0],
10 vec![3.0, -1.0, 0.5],
11 vec![0.5, 1.0, 1.0],
12 vec![1.0, 1.0, -1.0],
13 ];
14
15 let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17 for _ in 0..100 {
18 // Forward pass
19 let ypred: Vec<Value> = xs
20 .iter()
21 .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22 .collect();
23 let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25 // Loss function
26 let ygt = ys.iter().map(|y| Value::from(*y));
27 let loss: Value = ypred
28 .into_iter()
29 .zip(ygt)
30 .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31 .sum();
32
33 println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35 // Backward pass
36 mlp.parameters().iter().for_each(|p| p.clear_gradient());
37 loss.backward();
38
39 // Adjustment
40 mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41 }
42}
Sourcepub fn parameters(&self) -> Vec<Value>
pub fn parameters(&self) -> Vec<Value>
Examples found in repository?
examples/gradient_descent.rs (line 36)
5fn main() {
6 let mlp = MLP::new(3, vec![4, 4, 1]);
7
8 let xs = vec![
9 vec![2.0, 3.0, -1.0],
10 vec![3.0, -1.0, 0.5],
11 vec![0.5, 1.0, 1.0],
12 vec![1.0, 1.0, -1.0],
13 ];
14
15 let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17 for _ in 0..100 {
18 // Forward pass
19 let ypred: Vec<Value> = xs
20 .iter()
21 .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22 .collect();
23 let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25 // Loss function
26 let ygt = ys.iter().map(|y| Value::from(*y));
27 let loss: Value = ypred
28 .into_iter()
29 .zip(ygt)
30 .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31 .sum();
32
33 println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35 // Backward pass
36 mlp.parameters().iter().for_each(|p| p.clear_gradient());
37 loss.backward();
38
39 // Adjustment
40 mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41 }
42}
Trait Implementations§
Auto Trait Implementations§
impl Freeze for MLP
impl !RefUnwindSafe for MLP
impl !Send for MLP
impl !Sync for MLP
impl Unpin for MLP
impl !UnwindSafe for MLP
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more