gradient_descent/
gradient_descent.rs1use micrograd_rs::{Value, MLP};
4
5fn main() {
6 let mlp = MLP::new(3, vec![4, 4, 1]);
7
8 let xs = vec![
9 vec![2.0, 3.0, -1.0],
10 vec![3.0, -1.0, 0.5],
11 vec![0.5, 1.0, 1.0],
12 vec![1.0, 1.0, -1.0],
13 ];
14
15 let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17 for _ in 0..100 {
18 let ypred: Vec<Value> = xs
20 .iter()
21 .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22 .collect();
23 let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25 let ygt = ys.iter().map(|y| Value::from(*y));
27 let loss: Value = ypred
28 .into_iter()
29 .zip(ygt)
30 .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31 .sum();
32
33 println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35 mlp.parameters().iter().for_each(|p| p.clear_gradient());
37 loss.backward();
38
39 mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41 }
42}