pub struct Value(/* private fields */);
Implementations§
Source§impl Value
impl Value
Sourcepub fn from<T>(t: T) -> Value
pub fn from<T>(t: T) -> Value
Examples found in repository?
examples/scalar_back_propagation.rs (line 6)
5fn main() {
6 let x1 = Value::from(2.0).with_label("x1");
7 let x1_clone = x1.clone();
8 let x2 = Value::from(0.0).with_label("x2");
9
10 let w1 = Value::from(-3.0).with_label("w1");
11 let w2 = Value::from(1.0).with_label("w2");
12
13 let b = Value::from(6.8813735870195432).with_label("b");
14
15 let x1w1 = (x1 * w1).with_label("x1w1");
16 let x2w2 = (x2 * w2).with_label("x2w2");
17
18 let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20 let n = (x1w1x2w2 + b).with_label("n");
21 let o = n.tanh().with_label("o");
22
23 o.backward();
24
25 assert_eq!(0.7071, round_to(o.data(), 4.0));
26 assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27 println!("{:?}", o);
28}
More examples
examples/gradient_descent.rs (line 21)
5fn main() {
6 let mlp = MLP::new(3, vec![4, 4, 1]);
7
8 let xs = vec![
9 vec![2.0, 3.0, -1.0],
10 vec![3.0, -1.0, 0.5],
11 vec![0.5, 1.0, 1.0],
12 vec![1.0, 1.0, -1.0],
13 ];
14
15 let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17 for _ in 0..100 {
18 // Forward pass
19 let ypred: Vec<Value> = xs
20 .iter()
21 .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22 .collect();
23 let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25 // Loss function
26 let ygt = ys.iter().map(|y| Value::from(*y));
27 let loss: Value = ypred
28 .into_iter()
29 .zip(ygt)
30 .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31 .sum();
32
33 println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35 // Backward pass
36 mlp.parameters().iter().for_each(|p| p.clear_gradient());
37 loss.backward();
38
39 // Adjustment
40 mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41 }
42}
Sourcepub fn with_label(self, label: &str) -> Value
pub fn with_label(self, label: &str) -> Value
Examples found in repository?
examples/scalar_back_propagation.rs (line 6)
5fn main() {
6 let x1 = Value::from(2.0).with_label("x1");
7 let x1_clone = x1.clone();
8 let x2 = Value::from(0.0).with_label("x2");
9
10 let w1 = Value::from(-3.0).with_label("w1");
11 let w2 = Value::from(1.0).with_label("w2");
12
13 let b = Value::from(6.8813735870195432).with_label("b");
14
15 let x1w1 = (x1 * w1).with_label("x1w1");
16 let x2w2 = (x2 * w2).with_label("x2w2");
17
18 let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20 let n = (x1w1x2w2 + b).with_label("n");
21 let o = n.tanh().with_label("o");
22
23 o.backward();
24
25 assert_eq!(0.7071, round_to(o.data(), 4.0));
26 assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27 println!("{:?}", o);
28}
Sourcepub fn data(&self) -> f64
pub fn data(&self) -> f64
Examples found in repository?
examples/scalar_back_propagation.rs (line 25)
5fn main() {
6 let x1 = Value::from(2.0).with_label("x1");
7 let x1_clone = x1.clone();
8 let x2 = Value::from(0.0).with_label("x2");
9
10 let w1 = Value::from(-3.0).with_label("w1");
11 let w2 = Value::from(1.0).with_label("w2");
12
13 let b = Value::from(6.8813735870195432).with_label("b");
14
15 let x1w1 = (x1 * w1).with_label("x1w1");
16 let x2w2 = (x2 * w2).with_label("x2w2");
17
18 let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20 let n = (x1w1x2w2 + b).with_label("n");
21 let o = n.tanh().with_label("o");
22
23 o.backward();
24
25 assert_eq!(0.7071, round_to(o.data(), 4.0));
26 assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27 println!("{:?}", o);
28}
More examples
examples/gradient_descent.rs (line 23)
5fn main() {
6 let mlp = MLP::new(3, vec![4, 4, 1]);
7
8 let xs = vec![
9 vec![2.0, 3.0, -1.0],
10 vec![3.0, -1.0, 0.5],
11 vec![0.5, 1.0, 1.0],
12 vec![1.0, 1.0, -1.0],
13 ];
14
15 let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17 for _ in 0..100 {
18 // Forward pass
19 let ypred: Vec<Value> = xs
20 .iter()
21 .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22 .collect();
23 let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25 // Loss function
26 let ygt = ys.iter().map(|y| Value::from(*y));
27 let loss: Value = ypred
28 .into_iter()
29 .zip(ygt)
30 .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31 .sum();
32
33 println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35 // Backward pass
36 mlp.parameters().iter().for_each(|p| p.clear_gradient());
37 loss.backward();
38
39 // Adjustment
40 mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41 }
42}
Sourcepub fn gradient(&self) -> f64
pub fn gradient(&self) -> f64
Examples found in repository?
examples/scalar_back_propagation.rs (line 26)
5fn main() {
6 let x1 = Value::from(2.0).with_label("x1");
7 let x1_clone = x1.clone();
8 let x2 = Value::from(0.0).with_label("x2");
9
10 let w1 = Value::from(-3.0).with_label("w1");
11 let w2 = Value::from(1.0).with_label("w2");
12
13 let b = Value::from(6.8813735870195432).with_label("b");
14
15 let x1w1 = (x1 * w1).with_label("x1w1");
16 let x2w2 = (x2 * w2).with_label("x2w2");
17
18 let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20 let n = (x1w1x2w2 + b).with_label("n");
21 let o = n.tanh().with_label("o");
22
23 o.backward();
24
25 assert_eq!(0.7071, round_to(o.data(), 4.0));
26 assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27 println!("{:?}", o);
28}
Sourcepub fn clear_gradient(&self)
pub fn clear_gradient(&self)
Examples found in repository?
examples/gradient_descent.rs (line 36)
5fn main() {
6 let mlp = MLP::new(3, vec![4, 4, 1]);
7
8 let xs = vec![
9 vec![2.0, 3.0, -1.0],
10 vec![3.0, -1.0, 0.5],
11 vec![0.5, 1.0, 1.0],
12 vec![1.0, 1.0, -1.0],
13 ];
14
15 let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17 for _ in 0..100 {
18 // Forward pass
19 let ypred: Vec<Value> = xs
20 .iter()
21 .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22 .collect();
23 let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25 // Loss function
26 let ygt = ys.iter().map(|y| Value::from(*y));
27 let loss: Value = ypred
28 .into_iter()
29 .zip(ygt)
30 .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31 .sum();
32
33 println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35 // Backward pass
36 mlp.parameters().iter().for_each(|p| p.clear_gradient());
37 loss.backward();
38
39 // Adjustment
40 mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41 }
42}
Sourcepub fn adjust(&self, factor: f64)
pub fn adjust(&self, factor: f64)
Examples found in repository?
examples/gradient_descent.rs (line 40)
5fn main() {
6 let mlp = MLP::new(3, vec![4, 4, 1]);
7
8 let xs = vec![
9 vec![2.0, 3.0, -1.0],
10 vec![3.0, -1.0, 0.5],
11 vec![0.5, 1.0, 1.0],
12 vec![1.0, 1.0, -1.0],
13 ];
14
15 let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17 for _ in 0..100 {
18 // Forward pass
19 let ypred: Vec<Value> = xs
20 .iter()
21 .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22 .collect();
23 let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25 // Loss function
26 let ygt = ys.iter().map(|y| Value::from(*y));
27 let loss: Value = ypred
28 .into_iter()
29 .zip(ygt)
30 .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31 .sum();
32
33 println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35 // Backward pass
36 mlp.parameters().iter().for_each(|p| p.clear_gradient());
37 loss.backward();
38
39 // Adjustment
40 mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41 }
42}
Sourcepub fn pow(&self, other: &Value) -> Value
pub fn pow(&self, other: &Value) -> Value
Examples found in repository?
examples/gradient_descent.rs (line 30)
5fn main() {
6 let mlp = MLP::new(3, vec![4, 4, 1]);
7
8 let xs = vec![
9 vec![2.0, 3.0, -1.0],
10 vec![3.0, -1.0, 0.5],
11 vec![0.5, 1.0, 1.0],
12 vec![1.0, 1.0, -1.0],
13 ];
14
15 let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17 for _ in 0..100 {
18 // Forward pass
19 let ypred: Vec<Value> = xs
20 .iter()
21 .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22 .collect();
23 let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25 // Loss function
26 let ygt = ys.iter().map(|y| Value::from(*y));
27 let loss: Value = ypred
28 .into_iter()
29 .zip(ygt)
30 .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31 .sum();
32
33 println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35 // Backward pass
36 mlp.parameters().iter().for_each(|p| p.clear_gradient());
37 loss.backward();
38
39 // Adjustment
40 mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41 }
42}
Sourcepub fn tanh(&self) -> Value
pub fn tanh(&self) -> Value
Examples found in repository?
examples/scalar_back_propagation.rs (line 21)
5fn main() {
6 let x1 = Value::from(2.0).with_label("x1");
7 let x1_clone = x1.clone();
8 let x2 = Value::from(0.0).with_label("x2");
9
10 let w1 = Value::from(-3.0).with_label("w1");
11 let w2 = Value::from(1.0).with_label("w2");
12
13 let b = Value::from(6.8813735870195432).with_label("b");
14
15 let x1w1 = (x1 * w1).with_label("x1w1");
16 let x2w2 = (x2 * w2).with_label("x2w2");
17
18 let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20 let n = (x1w1x2w2 + b).with_label("n");
21 let o = n.tanh().with_label("o");
22
23 o.backward();
24
25 assert_eq!(0.7071, round_to(o.data(), 4.0));
26 assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27 println!("{:?}", o);
28}
Sourcepub fn backward(&self)
pub fn backward(&self)
Examples found in repository?
examples/scalar_back_propagation.rs (line 23)
5fn main() {
6 let x1 = Value::from(2.0).with_label("x1");
7 let x1_clone = x1.clone();
8 let x2 = Value::from(0.0).with_label("x2");
9
10 let w1 = Value::from(-3.0).with_label("w1");
11 let w2 = Value::from(1.0).with_label("w2");
12
13 let b = Value::from(6.8813735870195432).with_label("b");
14
15 let x1w1 = (x1 * w1).with_label("x1w1");
16 let x2w2 = (x2 * w2).with_label("x2w2");
17
18 let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20 let n = (x1w1x2w2 + b).with_label("n");
21 let o = n.tanh().with_label("o");
22
23 o.backward();
24
25 assert_eq!(0.7071, round_to(o.data(), 4.0));
26 assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27 println!("{:?}", o);
28}
More examples
examples/gradient_descent.rs (line 37)
5fn main() {
6 let mlp = MLP::new(3, vec![4, 4, 1]);
7
8 let xs = vec![
9 vec![2.0, 3.0, -1.0],
10 vec![3.0, -1.0, 0.5],
11 vec![0.5, 1.0, 1.0],
12 vec![1.0, 1.0, -1.0],
13 ];
14
15 let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17 for _ in 0..100 {
18 // Forward pass
19 let ypred: Vec<Value> = xs
20 .iter()
21 .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22 .collect();
23 let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25 // Loss function
26 let ygt = ys.iter().map(|y| Value::from(*y));
27 let loss: Value = ypred
28 .into_iter()
29 .zip(ygt)
30 .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31 .sum();
32
33 println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35 // Backward pass
36 mlp.parameters().iter().for_each(|p| p.clear_gradient());
37 loss.backward();
38
39 // Adjustment
40 mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41 }
42}
Trait Implementations§
impl Eq for Value
impl StructuralPartialEq for Value
Auto Trait Implementations§
impl Freeze for Value
impl !RefUnwindSafe for Value
impl !Send for Value
impl !Sync for Value
impl Unpin for Value
impl !UnwindSafe for Value
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more