pub struct Expr {
pub result: f64,
pub is_learnable: bool,
pub name: String,
/* private fields */
}Expand description
Expression representing a node in a calculation graph.
This struct represents a node in a calculation graph. It can be a leaf node, a unary operation or a binary operation.
A leaf node holds a value, which is the one that is used in the calculation.
A unary expression is the result of applying a unary operation to another expression. For example, the result of applying the tanh operation to a leaf node.
A binary expression is the result of applying a binary operation to two other expressions. For example, the result of adding two leaf nodes.
Fields§
§result: f64The numeric result of the expression, as result of applying the operation to the operands.
is_learnable: boolWhether the expression is learnable or not. Only learnable Expr will have their values updated during backpropagation (learning).
name: StringThe name of the expression, used to identify it in the calculation graph.
Implementations§
Source§impl Expr
impl Expr
Sourcepub fn new_leaf(value: f64, name: &str) -> Expr
pub fn new_leaf(value: f64, name: &str) -> Expr
Creates a new leaf expression with the given value.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");Examples found in repository?
More examples
14fn main() {
15 let mut target = Expr::new_leaf(50.0, "target");
16 target.is_learnable = false;
17
18 let neuron = Neuron::new(3, Activation::None);
19 println!("Initial values: {:}", neuron);
20
21 let mut inputs = vec![
22 Expr::new_leaf(1.0, "x_1"),
23 Expr::new_leaf(2.0, "x_2"),
24 Expr::new_leaf(3.0, "x_3"),
25 ];
26
27 inputs.iter_mut().for_each(|input| {
28 input.is_learnable = false;
29 });
30
31 let mut y = neuron.forward(inputs);
32 y.name = "y".to_string();
33
34 let difference = y - target;
35 let mut square_exponent = Expr::new_leaf(2.0, "square_exponent");
36 square_exponent.is_learnable = false;
37
38 let mut loss = difference.pow(square_exponent, "loss");
39
40 let target = loss.find("target").unwrap();
41 let y = loss.find("y").unwrap();
42 println!("Initial target: {:.2}", target.result);
43 println!("Predicted: {:.2}", y.result);
44 println!("Initial loss: {:.2}", loss.result);
45
46 println!("\nTraining:");
47 let learning_rate = 0.01;
48 for i in 1..=100 {
49 loss.learn(learning_rate);
50 loss.recalculate();
51
52 let y = loss.find("y").expect("Node not found");
53 let target = loss.find("target").expect("Node not found");
54
55 println!(
56 "Iteration {:3}, loss: {:9.4} / predicted: {:.2} (target: {:.2})",
57 i, loss.result, y.result, target.result
58 );
59 }
60
61 println!("Final values: {:}", neuron);
62}14fn main() {
15 let mut target = vec![Expr::new_leaf(15.0, "t1"), Expr::new_leaf(85.0, "t2")];
16 target[0].is_learnable = false;
17 target[1].is_learnable = false;
18
19 let layer = Layer::new(3, 2, Activation::None);
20 println!("Initial values: {:}", layer);
21
22 let mut inputs = vec![
23 Expr::new_leaf(1.0, "x_1"),
24 Expr::new_leaf(2.0, "x_2"),
25 Expr::new_leaf(3.0, "x_3"),
26 ];
27
28 inputs.iter_mut().for_each(|input| {
29 input.is_learnable = false;
30 });
31
32 let mut y = layer.forward(inputs);
33 let mut y1 = y.remove(0);
34 y1.name = "y1".to_string();
35 let mut y2 = y.remove(0);
36 y2.name = "y2".to_string();
37
38 let d1 = y1 - target[0].clone();
39 let mut sqr1 = Expr::new_leaf(2.0, "square_exponent1");
40 sqr1.is_learnable = false;
41
42 let d2 = y2 - target[1].clone();
43 let mut sqr2 = Expr::new_leaf(2.0, "square_exponent2");
44 sqr2.is_learnable = false;
45
46 let mut loss = d1.pow(sqr1, "diff1") + d2.pow(sqr2, "diff2");
47
48 let t1 = loss.find("t1").unwrap();
49 let t2 = loss.find("t2").unwrap();
50 let y1 = loss.find("y1").unwrap();
51 let y2 = loss.find("y2").unwrap();
52
53 println!("Initial targets: {:.2}, {:.2}", t1.result, t2.result);
54 println!("Predicted: {:.2}, {:.2}", y1.result, y2.result);
55 println!("Initial loss: {:.2}", loss.result);
56
57 println!("\nTraining:");
58 let learning_rate = 0.004;
59 for i in 1..=100 {
60 loss.learn(learning_rate);
61 loss.recalculate();
62
63 let t1 = loss.find("t1").unwrap();
64 let t2 = loss.find("t2").unwrap();
65 let y1 = loss.find("y1").unwrap();
66 let y2 = loss.find("y2").unwrap();
67
68 println!(
69 "Iteration {:3}, loss: {:9.4} / predicted: {:5.2}, {:5.2} (targets: {:5.2}, {:5.2})",
70 i, loss.result, y1.result, y2.result, t1.result, t2.result
71 );
72 }
73
74 println!("Final values: {:}", layer);
75}15fn main() {
16 // these are the initial values for the nodes of the graph
17 let mut x1 = Expr::new_leaf(2.0, "x1");
18 x1.is_learnable = false;
19
20 let mut x2 = Expr::new_leaf(1.0, "x2");
21 x2.is_learnable = false;
22
23 let w1 = Expr::new_leaf(-3.0, "w1");
24 let w2 = Expr::new_leaf(1.0, "w2");
25 let b = Expr::new_leaf(6.5, "b");
26
27 // here we compute the expression x1*w1 + x2*w2 + b
28 let x1w1 = x1 * w1;
29 let x2w2 = x2 * w2;
30 let x1w1_x2w2 = x1w1 + x2w2;
31 let n = x1w1_x2w2 + b;
32
33 // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34 let o = n.tanh("o");
35
36 println!("Initial output: {:.2}", o.result);
37
38 // we set the target value
39 let target_value = 0.2;
40 let mut target = Expr::new_leaf(target_value, "target");
41 target.is_learnable = false;
42
43 // we compute the loss function
44 let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45 squared_exponent.is_learnable = false;
46
47 let mut loss = (o - target).pow(squared_exponent, "loss");
48 loss.is_learnable = false;
49
50 // we print the initial loss
51 println!("Initial loss: {:.4}", loss.result);
52
53 println!("\nTraining:");
54 let learning_rate = 0.01;
55 for i in 1..=50 {
56 loss.learn(learning_rate);
57 loss.recalculate();
58
59 let target = loss.find("o").expect("Node not found");
60
61 println!(
62 "Iteration {:2}, loss: {:.4} / result: {:.2}",
63 i, loss.result, target.result
64 );
65 }
66
67 let w1 = loss.find("w1").expect("Node not found");
68 let w2 = loss.find("w2").expect("Node not found");
69 let b = loss.find("b").expect("Node not found");
70
71 println!(
72 "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73 w1.result, w2.result, b.result
74 );
75
76 let x1 = loss.find("x1").expect("Node not found");
77 let x2 = loss.find("x2").expect("Node not found");
78
79 let n = loss
80 .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81 .expect("Node not found");
82 let o = loss.find("o").expect("Node not found");
83
84 println!(
85 "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86 x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87 )
88}15fn main() {
16 let mut targets = vec![
17 Expr::new_leaf(150.0, "t1"),
18 Expr::new_leaf(250.0, "t2"),
19 Expr::new_leaf(350.0, "t3"),
20 ];
21 targets.iter_mut().for_each(|target| {
22 target.is_learnable = false;
23 });
24
25 let mlp = MLP::new(
26 3,
27 Activation::Tanh,
28 vec![2, 2],
29 Activation::Tanh,
30 1,
31 Activation::None,
32 );
33 println!("Initial values: {:}", mlp);
34
35 let mut inputs = vec![
36 vec![
37 Expr::new_leaf(1.0, "x_1,1"),
38 Expr::new_leaf(2.0, "x_1,2"),
39 Expr::new_leaf(3.0, "x_1,3"),
40 ],
41 vec![
42 Expr::new_leaf(4.0, "x_2,1"),
43 Expr::new_leaf(5.0, "x_2,2"),
44 Expr::new_leaf(6.0, "x_2,3"),
45 ],
46 vec![
47 Expr::new_leaf(7.0, "x_3,1"),
48 Expr::new_leaf(8.0, "x_3,2"),
49 Expr::new_leaf(9.0, "x_3,3"),
50 ],
51 ];
52
53 inputs.iter_mut().for_each(|instance| {
54 instance.iter_mut().for_each(|value| {
55 value.is_learnable = false;
56 });
57 });
58
59 let predictions = inputs
60 // for each example, make a prediction
61 .iter()
62 .map(|example| mlp.forward(example.clone()))
63 // name these predictions y1, y2, y3
64 .enumerate()
65 .map(|(i, mut y)| {
66 // the result is a vector but it's a single value because we specified 1 output neuron
67 let mut result = y.remove(0);
68 result.name = format!("y{:}", i + 1).to_string();
69 result
70 })
71 // collect them into a single vector
72 .collect::<Vec<_>>();
73
74 let differences = predictions
75 .iter()
76 .zip(targets.iter())
77 .map(|(y, t)| y.clone() - t.clone())
78 .collect::<Vec<_>>();
79 let mut loss = differences
80 .iter()
81 .map(|d| d.clone() * d.clone())
82 .sum::<Expr>();
83
84 let y1 = loss.find("y1").unwrap();
85 let y2 = loss.find("y2").unwrap();
86 let y3 = loss.find("y3").unwrap();
87 println!("Initial loss: {:.2}", loss.result);
88 println!(
89 "Initial predictions: {:5.2} {:5.2} {:5.2}",
90 y1.result, y2.result, y3.result
91 );
92
93 println!("\nTraining:");
94 let learning_rate = 0.025;
95 for i in 1..=100 {
96 loss.learn(learning_rate);
97 loss.recalculate();
98
99 let t1 = loss.find("t1").unwrap();
100 let t2 = loss.find("t2").unwrap();
101 let t3 = loss.find("t3").unwrap();
102
103 let y1 = loss.find("y1").unwrap();
104 let y2 = loss.find("y2").unwrap();
105 let y3 = loss.find("y3").unwrap();
106
107 println!(
108 "Iteration {:3}, loss: {:11.4} / predicted: {:5.2}, {:5.2}, {:5.2} (targets: {:5.2}, {:5.2}, {:5.2})",
109 i, loss.result, y1.result, y2.result, y3.result, t1.result, t2.result, t3.result
110 );
111 }
112
113 println!("Final values: {:}", mlp);
114}Sourcepub fn tanh(self, name: &str) -> Expr
pub fn tanh(self, name: &str) -> Expr
Applies the hyperbolic tangent function to the expression and returns it as a new expression.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let expr2 = expr.tanh("tanh");
println!("Result: {}", expr2.result); // 0.7615941559557649Examples found in repository?
15fn main() {
16 // these are the initial values for the nodes of the graph
17 let mut x1 = Expr::new_leaf(2.0, "x1");
18 x1.is_learnable = false;
19
20 let mut x2 = Expr::new_leaf(1.0, "x2");
21 x2.is_learnable = false;
22
23 let w1 = Expr::new_leaf(-3.0, "w1");
24 let w2 = Expr::new_leaf(1.0, "w2");
25 let b = Expr::new_leaf(6.5, "b");
26
27 // here we compute the expression x1*w1 + x2*w2 + b
28 let x1w1 = x1 * w1;
29 let x2w2 = x2 * w2;
30 let x1w1_x2w2 = x1w1 + x2w2;
31 let n = x1w1_x2w2 + b;
32
33 // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34 let o = n.tanh("o");
35
36 println!("Initial output: {:.2}", o.result);
37
38 // we set the target value
39 let target_value = 0.2;
40 let mut target = Expr::new_leaf(target_value, "target");
41 target.is_learnable = false;
42
43 // we compute the loss function
44 let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45 squared_exponent.is_learnable = false;
46
47 let mut loss = (o - target).pow(squared_exponent, "loss");
48 loss.is_learnable = false;
49
50 // we print the initial loss
51 println!("Initial loss: {:.4}", loss.result);
52
53 println!("\nTraining:");
54 let learning_rate = 0.01;
55 for i in 1..=50 {
56 loss.learn(learning_rate);
57 loss.recalculate();
58
59 let target = loss.find("o").expect("Node not found");
60
61 println!(
62 "Iteration {:2}, loss: {:.4} / result: {:.2}",
63 i, loss.result, target.result
64 );
65 }
66
67 let w1 = loss.find("w1").expect("Node not found");
68 let w2 = loss.find("w2").expect("Node not found");
69 let b = loss.find("b").expect("Node not found");
70
71 println!(
72 "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73 w1.result, w2.result, b.result
74 );
75
76 let x1 = loss.find("x1").expect("Node not found");
77 let x2 = loss.find("x2").expect("Node not found");
78
79 let n = loss
80 .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81 .expect("Node not found");
82 let o = loss.find("o").expect("Node not found");
83
84 println!(
85 "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86 x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87 )
88}Sourcepub fn relu(self, name: &str) -> Expr
pub fn relu(self, name: &str) -> Expr
Applies the rectified linear unit function to the expression and returns it as a new expression.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(-1.0, "x");
let expr2 = expr.relu("relu");
println!("Result: {}", expr2.result); // 0.0Sourcepub fn exp(self, name: &str) -> Expr
pub fn exp(self, name: &str) -> Expr
Applies the exponential function (e^x) to the expression and returns it as a new expression.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let expr2 = expr.exp("exp");
println!("Result: {}", expr2.result); // 2.718281828459045Sourcepub fn pow(self, exponent: Expr, name: &str) -> Expr
pub fn pow(self, exponent: Expr, name: &str) -> Expr
Raises the expression to the power of the given exponent (expression) and returns it as a new expression.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(2.0, "x");
let exponent = Expr::new_leaf(3.0, "y");
let result = expr.pow(exponent, "x^y");
println!("Result: {}", result.result); // 8.0Examples found in repository?
More examples
14fn main() {
15 let mut target = Expr::new_leaf(50.0, "target");
16 target.is_learnable = false;
17
18 let neuron = Neuron::new(3, Activation::None);
19 println!("Initial values: {:}", neuron);
20
21 let mut inputs = vec![
22 Expr::new_leaf(1.0, "x_1"),
23 Expr::new_leaf(2.0, "x_2"),
24 Expr::new_leaf(3.0, "x_3"),
25 ];
26
27 inputs.iter_mut().for_each(|input| {
28 input.is_learnable = false;
29 });
30
31 let mut y = neuron.forward(inputs);
32 y.name = "y".to_string();
33
34 let difference = y - target;
35 let mut square_exponent = Expr::new_leaf(2.0, "square_exponent");
36 square_exponent.is_learnable = false;
37
38 let mut loss = difference.pow(square_exponent, "loss");
39
40 let target = loss.find("target").unwrap();
41 let y = loss.find("y").unwrap();
42 println!("Initial target: {:.2}", target.result);
43 println!("Predicted: {:.2}", y.result);
44 println!("Initial loss: {:.2}", loss.result);
45
46 println!("\nTraining:");
47 let learning_rate = 0.01;
48 for i in 1..=100 {
49 loss.learn(learning_rate);
50 loss.recalculate();
51
52 let y = loss.find("y").expect("Node not found");
53 let target = loss.find("target").expect("Node not found");
54
55 println!(
56 "Iteration {:3}, loss: {:9.4} / predicted: {:.2} (target: {:.2})",
57 i, loss.result, y.result, target.result
58 );
59 }
60
61 println!("Final values: {:}", neuron);
62}14fn main() {
15 let mut target = vec![Expr::new_leaf(15.0, "t1"), Expr::new_leaf(85.0, "t2")];
16 target[0].is_learnable = false;
17 target[1].is_learnable = false;
18
19 let layer = Layer::new(3, 2, Activation::None);
20 println!("Initial values: {:}", layer);
21
22 let mut inputs = vec![
23 Expr::new_leaf(1.0, "x_1"),
24 Expr::new_leaf(2.0, "x_2"),
25 Expr::new_leaf(3.0, "x_3"),
26 ];
27
28 inputs.iter_mut().for_each(|input| {
29 input.is_learnable = false;
30 });
31
32 let mut y = layer.forward(inputs);
33 let mut y1 = y.remove(0);
34 y1.name = "y1".to_string();
35 let mut y2 = y.remove(0);
36 y2.name = "y2".to_string();
37
38 let d1 = y1 - target[0].clone();
39 let mut sqr1 = Expr::new_leaf(2.0, "square_exponent1");
40 sqr1.is_learnable = false;
41
42 let d2 = y2 - target[1].clone();
43 let mut sqr2 = Expr::new_leaf(2.0, "square_exponent2");
44 sqr2.is_learnable = false;
45
46 let mut loss = d1.pow(sqr1, "diff1") + d2.pow(sqr2, "diff2");
47
48 let t1 = loss.find("t1").unwrap();
49 let t2 = loss.find("t2").unwrap();
50 let y1 = loss.find("y1").unwrap();
51 let y2 = loss.find("y2").unwrap();
52
53 println!("Initial targets: {:.2}, {:.2}", t1.result, t2.result);
54 println!("Predicted: {:.2}, {:.2}", y1.result, y2.result);
55 println!("Initial loss: {:.2}", loss.result);
56
57 println!("\nTraining:");
58 let learning_rate = 0.004;
59 for i in 1..=100 {
60 loss.learn(learning_rate);
61 loss.recalculate();
62
63 let t1 = loss.find("t1").unwrap();
64 let t2 = loss.find("t2").unwrap();
65 let y1 = loss.find("y1").unwrap();
66 let y2 = loss.find("y2").unwrap();
67
68 println!(
69 "Iteration {:3}, loss: {:9.4} / predicted: {:5.2}, {:5.2} (targets: {:5.2}, {:5.2})",
70 i, loss.result, y1.result, y2.result, t1.result, t2.result
71 );
72 }
73
74 println!("Final values: {:}", layer);
75}15fn main() {
16 // these are the initial values for the nodes of the graph
17 let mut x1 = Expr::new_leaf(2.0, "x1");
18 x1.is_learnable = false;
19
20 let mut x2 = Expr::new_leaf(1.0, "x2");
21 x2.is_learnable = false;
22
23 let w1 = Expr::new_leaf(-3.0, "w1");
24 let w2 = Expr::new_leaf(1.0, "w2");
25 let b = Expr::new_leaf(6.5, "b");
26
27 // here we compute the expression x1*w1 + x2*w2 + b
28 let x1w1 = x1 * w1;
29 let x2w2 = x2 * w2;
30 let x1w1_x2w2 = x1w1 + x2w2;
31 let n = x1w1_x2w2 + b;
32
33 // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34 let o = n.tanh("o");
35
36 println!("Initial output: {:.2}", o.result);
37
38 // we set the target value
39 let target_value = 0.2;
40 let mut target = Expr::new_leaf(target_value, "target");
41 target.is_learnable = false;
42
43 // we compute the loss function
44 let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45 squared_exponent.is_learnable = false;
46
47 let mut loss = (o - target).pow(squared_exponent, "loss");
48 loss.is_learnable = false;
49
50 // we print the initial loss
51 println!("Initial loss: {:.4}", loss.result);
52
53 println!("\nTraining:");
54 let learning_rate = 0.01;
55 for i in 1..=50 {
56 loss.learn(learning_rate);
57 loss.recalculate();
58
59 let target = loss.find("o").expect("Node not found");
60
61 println!(
62 "Iteration {:2}, loss: {:.4} / result: {:.2}",
63 i, loss.result, target.result
64 );
65 }
66
67 let w1 = loss.find("w1").expect("Node not found");
68 let w2 = loss.find("w2").expect("Node not found");
69 let b = loss.find("b").expect("Node not found");
70
71 println!(
72 "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73 w1.result, w2.result, b.result
74 );
75
76 let x1 = loss.find("x1").expect("Node not found");
77 let x2 = loss.find("x2").expect("Node not found");
78
79 let n = loss
80 .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81 .expect("Node not found");
82 let o = loss.find("o").expect("Node not found");
83
84 println!(
85 "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86 x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87 )
88}Sourcepub fn log(self, name: &str) -> Expr
pub fn log(self, name: &str) -> Expr
Applies the natural logarithm function to the expression and returns it as a new expression.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(2.0, "x");
let expr2 = expr.log("log");
println!("Result: {}", expr2.result); // 0.6931471805599453Sourcepub fn neg(self, name: &str) -> Expr
pub fn neg(self, name: &str) -> Expr
Negates the expression and returns it as a new expression.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let expr2 = expr.neg("neg");
println!("Result: {}", expr2.result); // -1.0Sourcepub fn recalculate(&mut self)
pub fn recalculate(&mut self)
Recalculates the value of the expression recursively, from new values of the operands.
Usually will be used after a call to Expr::learn, where the gradients have been calculated and
the internal values of the expression tree have been updated.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let mut expr2 = expr.tanh("tanh(x)");
expr2.learn(1e-09);
expr2.recalculate();Examples found in repository?
14fn main() {
15 let mut target = Expr::new_leaf(50.0, "target");
16 target.is_learnable = false;
17
18 let neuron = Neuron::new(3, Activation::None);
19 println!("Initial values: {:}", neuron);
20
21 let mut inputs = vec![
22 Expr::new_leaf(1.0, "x_1"),
23 Expr::new_leaf(2.0, "x_2"),
24 Expr::new_leaf(3.0, "x_3"),
25 ];
26
27 inputs.iter_mut().for_each(|input| {
28 input.is_learnable = false;
29 });
30
31 let mut y = neuron.forward(inputs);
32 y.name = "y".to_string();
33
34 let difference = y - target;
35 let mut square_exponent = Expr::new_leaf(2.0, "square_exponent");
36 square_exponent.is_learnable = false;
37
38 let mut loss = difference.pow(square_exponent, "loss");
39
40 let target = loss.find("target").unwrap();
41 let y = loss.find("y").unwrap();
42 println!("Initial target: {:.2}", target.result);
43 println!("Predicted: {:.2}", y.result);
44 println!("Initial loss: {:.2}", loss.result);
45
46 println!("\nTraining:");
47 let learning_rate = 0.01;
48 for i in 1..=100 {
49 loss.learn(learning_rate);
50 loss.recalculate();
51
52 let y = loss.find("y").expect("Node not found");
53 let target = loss.find("target").expect("Node not found");
54
55 println!(
56 "Iteration {:3}, loss: {:9.4} / predicted: {:.2} (target: {:.2})",
57 i, loss.result, y.result, target.result
58 );
59 }
60
61 println!("Final values: {:}", neuron);
62}More examples
14fn main() {
15 let mut target = vec![Expr::new_leaf(15.0, "t1"), Expr::new_leaf(85.0, "t2")];
16 target[0].is_learnable = false;
17 target[1].is_learnable = false;
18
19 let layer = Layer::new(3, 2, Activation::None);
20 println!("Initial values: {:}", layer);
21
22 let mut inputs = vec![
23 Expr::new_leaf(1.0, "x_1"),
24 Expr::new_leaf(2.0, "x_2"),
25 Expr::new_leaf(3.0, "x_3"),
26 ];
27
28 inputs.iter_mut().for_each(|input| {
29 input.is_learnable = false;
30 });
31
32 let mut y = layer.forward(inputs);
33 let mut y1 = y.remove(0);
34 y1.name = "y1".to_string();
35 let mut y2 = y.remove(0);
36 y2.name = "y2".to_string();
37
38 let d1 = y1 - target[0].clone();
39 let mut sqr1 = Expr::new_leaf(2.0, "square_exponent1");
40 sqr1.is_learnable = false;
41
42 let d2 = y2 - target[1].clone();
43 let mut sqr2 = Expr::new_leaf(2.0, "square_exponent2");
44 sqr2.is_learnable = false;
45
46 let mut loss = d1.pow(sqr1, "diff1") + d2.pow(sqr2, "diff2");
47
48 let t1 = loss.find("t1").unwrap();
49 let t2 = loss.find("t2").unwrap();
50 let y1 = loss.find("y1").unwrap();
51 let y2 = loss.find("y2").unwrap();
52
53 println!("Initial targets: {:.2}, {:.2}", t1.result, t2.result);
54 println!("Predicted: {:.2}, {:.2}", y1.result, y2.result);
55 println!("Initial loss: {:.2}", loss.result);
56
57 println!("\nTraining:");
58 let learning_rate = 0.004;
59 for i in 1..=100 {
60 loss.learn(learning_rate);
61 loss.recalculate();
62
63 let t1 = loss.find("t1").unwrap();
64 let t2 = loss.find("t2").unwrap();
65 let y1 = loss.find("y1").unwrap();
66 let y2 = loss.find("y2").unwrap();
67
68 println!(
69 "Iteration {:3}, loss: {:9.4} / predicted: {:5.2}, {:5.2} (targets: {:5.2}, {:5.2})",
70 i, loss.result, y1.result, y2.result, t1.result, t2.result
71 );
72 }
73
74 println!("Final values: {:}", layer);
75}15fn main() {
16 // these are the initial values for the nodes of the graph
17 let mut x1 = Expr::new_leaf(2.0, "x1");
18 x1.is_learnable = false;
19
20 let mut x2 = Expr::new_leaf(1.0, "x2");
21 x2.is_learnable = false;
22
23 let w1 = Expr::new_leaf(-3.0, "w1");
24 let w2 = Expr::new_leaf(1.0, "w2");
25 let b = Expr::new_leaf(6.5, "b");
26
27 // here we compute the expression x1*w1 + x2*w2 + b
28 let x1w1 = x1 * w1;
29 let x2w2 = x2 * w2;
30 let x1w1_x2w2 = x1w1 + x2w2;
31 let n = x1w1_x2w2 + b;
32
33 // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34 let o = n.tanh("o");
35
36 println!("Initial output: {:.2}", o.result);
37
38 // we set the target value
39 let target_value = 0.2;
40 let mut target = Expr::new_leaf(target_value, "target");
41 target.is_learnable = false;
42
43 // we compute the loss function
44 let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45 squared_exponent.is_learnable = false;
46
47 let mut loss = (o - target).pow(squared_exponent, "loss");
48 loss.is_learnable = false;
49
50 // we print the initial loss
51 println!("Initial loss: {:.4}", loss.result);
52
53 println!("\nTraining:");
54 let learning_rate = 0.01;
55 for i in 1..=50 {
56 loss.learn(learning_rate);
57 loss.recalculate();
58
59 let target = loss.find("o").expect("Node not found");
60
61 println!(
62 "Iteration {:2}, loss: {:.4} / result: {:.2}",
63 i, loss.result, target.result
64 );
65 }
66
67 let w1 = loss.find("w1").expect("Node not found");
68 let w2 = loss.find("w2").expect("Node not found");
69 let b = loss.find("b").expect("Node not found");
70
71 println!(
72 "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73 w1.result, w2.result, b.result
74 );
75
76 let x1 = loss.find("x1").expect("Node not found");
77 let x2 = loss.find("x2").expect("Node not found");
78
79 let n = loss
80 .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81 .expect("Node not found");
82 let o = loss.find("o").expect("Node not found");
83
84 println!(
85 "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86 x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87 )
88}15fn main() {
16 let mut targets = vec![
17 Expr::new_leaf(150.0, "t1"),
18 Expr::new_leaf(250.0, "t2"),
19 Expr::new_leaf(350.0, "t3"),
20 ];
21 targets.iter_mut().for_each(|target| {
22 target.is_learnable = false;
23 });
24
25 let mlp = MLP::new(
26 3,
27 Activation::Tanh,
28 vec![2, 2],
29 Activation::Tanh,
30 1,
31 Activation::None,
32 );
33 println!("Initial values: {:}", mlp);
34
35 let mut inputs = vec![
36 vec![
37 Expr::new_leaf(1.0, "x_1,1"),
38 Expr::new_leaf(2.0, "x_1,2"),
39 Expr::new_leaf(3.0, "x_1,3"),
40 ],
41 vec![
42 Expr::new_leaf(4.0, "x_2,1"),
43 Expr::new_leaf(5.0, "x_2,2"),
44 Expr::new_leaf(6.0, "x_2,3"),
45 ],
46 vec![
47 Expr::new_leaf(7.0, "x_3,1"),
48 Expr::new_leaf(8.0, "x_3,2"),
49 Expr::new_leaf(9.0, "x_3,3"),
50 ],
51 ];
52
53 inputs.iter_mut().for_each(|instance| {
54 instance.iter_mut().for_each(|value| {
55 value.is_learnable = false;
56 });
57 });
58
59 let predictions = inputs
60 // for each example, make a prediction
61 .iter()
62 .map(|example| mlp.forward(example.clone()))
63 // name these predictions y1, y2, y3
64 .enumerate()
65 .map(|(i, mut y)| {
66 // the result is a vector but it's a single value because we specified 1 output neuron
67 let mut result = y.remove(0);
68 result.name = format!("y{:}", i + 1).to_string();
69 result
70 })
71 // collect them into a single vector
72 .collect::<Vec<_>>();
73
74 let differences = predictions
75 .iter()
76 .zip(targets.iter())
77 .map(|(y, t)| y.clone() - t.clone())
78 .collect::<Vec<_>>();
79 let mut loss = differences
80 .iter()
81 .map(|d| d.clone() * d.clone())
82 .sum::<Expr>();
83
84 let y1 = loss.find("y1").unwrap();
85 let y2 = loss.find("y2").unwrap();
86 let y3 = loss.find("y3").unwrap();
87 println!("Initial loss: {:.2}", loss.result);
88 println!(
89 "Initial predictions: {:5.2} {:5.2} {:5.2}",
90 y1.result, y2.result, y3.result
91 );
92
93 println!("\nTraining:");
94 let learning_rate = 0.025;
95 for i in 1..=100 {
96 loss.learn(learning_rate);
97 loss.recalculate();
98
99 let t1 = loss.find("t1").unwrap();
100 let t2 = loss.find("t2").unwrap();
101 let t3 = loss.find("t3").unwrap();
102
103 let y1 = loss.find("y1").unwrap();
104 let y2 = loss.find("y2").unwrap();
105 let y3 = loss.find("y3").unwrap();
106
107 println!(
108 "Iteration {:3}, loss: {:11.4} / predicted: {:5.2}, {:5.2}, {:5.2} (targets: {:5.2}, {:5.2}, {:5.2})",
109 i, loss.result, y1.result, y2.result, y3.result, t1.result, t2.result, t3.result
110 );
111 }
112
113 println!("Final values: {:}", mlp);
114}Sourcepub fn learn(&mut self, learning_rate: f64)
pub fn learn(&mut self, learning_rate: f64)
Applies backpropagation to the expression, updating the values of the gradients and the expression itself.
This method will change the gradients based on the gradient of the last expression in the calculation graph.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let mut expr2 = expr.tanh("tanh(x)");
expr2.learn(1e-09);After adjusting the gradients, the method will update the values of the individual expression tree nodes to minimize the loss function.
In order to get a new calculation of the expression tree, you’ll need to call
Expr::recalculate after calling Expr::learn.
Examples found in repository?
14fn main() {
15 let mut target = Expr::new_leaf(50.0, "target");
16 target.is_learnable = false;
17
18 let neuron = Neuron::new(3, Activation::None);
19 println!("Initial values: {:}", neuron);
20
21 let mut inputs = vec![
22 Expr::new_leaf(1.0, "x_1"),
23 Expr::new_leaf(2.0, "x_2"),
24 Expr::new_leaf(3.0, "x_3"),
25 ];
26
27 inputs.iter_mut().for_each(|input| {
28 input.is_learnable = false;
29 });
30
31 let mut y = neuron.forward(inputs);
32 y.name = "y".to_string();
33
34 let difference = y - target;
35 let mut square_exponent = Expr::new_leaf(2.0, "square_exponent");
36 square_exponent.is_learnable = false;
37
38 let mut loss = difference.pow(square_exponent, "loss");
39
40 let target = loss.find("target").unwrap();
41 let y = loss.find("y").unwrap();
42 println!("Initial target: {:.2}", target.result);
43 println!("Predicted: {:.2}", y.result);
44 println!("Initial loss: {:.2}", loss.result);
45
46 println!("\nTraining:");
47 let learning_rate = 0.01;
48 for i in 1..=100 {
49 loss.learn(learning_rate);
50 loss.recalculate();
51
52 let y = loss.find("y").expect("Node not found");
53 let target = loss.find("target").expect("Node not found");
54
55 println!(
56 "Iteration {:3}, loss: {:9.4} / predicted: {:.2} (target: {:.2})",
57 i, loss.result, y.result, target.result
58 );
59 }
60
61 println!("Final values: {:}", neuron);
62}More examples
14fn main() {
15 let mut target = vec![Expr::new_leaf(15.0, "t1"), Expr::new_leaf(85.0, "t2")];
16 target[0].is_learnable = false;
17 target[1].is_learnable = false;
18
19 let layer = Layer::new(3, 2, Activation::None);
20 println!("Initial values: {:}", layer);
21
22 let mut inputs = vec![
23 Expr::new_leaf(1.0, "x_1"),
24 Expr::new_leaf(2.0, "x_2"),
25 Expr::new_leaf(3.0, "x_3"),
26 ];
27
28 inputs.iter_mut().for_each(|input| {
29 input.is_learnable = false;
30 });
31
32 let mut y = layer.forward(inputs);
33 let mut y1 = y.remove(0);
34 y1.name = "y1".to_string();
35 let mut y2 = y.remove(0);
36 y2.name = "y2".to_string();
37
38 let d1 = y1 - target[0].clone();
39 let mut sqr1 = Expr::new_leaf(2.0, "square_exponent1");
40 sqr1.is_learnable = false;
41
42 let d2 = y2 - target[1].clone();
43 let mut sqr2 = Expr::new_leaf(2.0, "square_exponent2");
44 sqr2.is_learnable = false;
45
46 let mut loss = d1.pow(sqr1, "diff1") + d2.pow(sqr2, "diff2");
47
48 let t1 = loss.find("t1").unwrap();
49 let t2 = loss.find("t2").unwrap();
50 let y1 = loss.find("y1").unwrap();
51 let y2 = loss.find("y2").unwrap();
52
53 println!("Initial targets: {:.2}, {:.2}", t1.result, t2.result);
54 println!("Predicted: {:.2}, {:.2}", y1.result, y2.result);
55 println!("Initial loss: {:.2}", loss.result);
56
57 println!("\nTraining:");
58 let learning_rate = 0.004;
59 for i in 1..=100 {
60 loss.learn(learning_rate);
61 loss.recalculate();
62
63 let t1 = loss.find("t1").unwrap();
64 let t2 = loss.find("t2").unwrap();
65 let y1 = loss.find("y1").unwrap();
66 let y2 = loss.find("y2").unwrap();
67
68 println!(
69 "Iteration {:3}, loss: {:9.4} / predicted: {:5.2}, {:5.2} (targets: {:5.2}, {:5.2})",
70 i, loss.result, y1.result, y2.result, t1.result, t2.result
71 );
72 }
73
74 println!("Final values: {:}", layer);
75}15fn main() {
16 // these are the initial values for the nodes of the graph
17 let mut x1 = Expr::new_leaf(2.0, "x1");
18 x1.is_learnable = false;
19
20 let mut x2 = Expr::new_leaf(1.0, "x2");
21 x2.is_learnable = false;
22
23 let w1 = Expr::new_leaf(-3.0, "w1");
24 let w2 = Expr::new_leaf(1.0, "w2");
25 let b = Expr::new_leaf(6.5, "b");
26
27 // here we compute the expression x1*w1 + x2*w2 + b
28 let x1w1 = x1 * w1;
29 let x2w2 = x2 * w2;
30 let x1w1_x2w2 = x1w1 + x2w2;
31 let n = x1w1_x2w2 + b;
32
33 // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34 let o = n.tanh("o");
35
36 println!("Initial output: {:.2}", o.result);
37
38 // we set the target value
39 let target_value = 0.2;
40 let mut target = Expr::new_leaf(target_value, "target");
41 target.is_learnable = false;
42
43 // we compute the loss function
44 let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45 squared_exponent.is_learnable = false;
46
47 let mut loss = (o - target).pow(squared_exponent, "loss");
48 loss.is_learnable = false;
49
50 // we print the initial loss
51 println!("Initial loss: {:.4}", loss.result);
52
53 println!("\nTraining:");
54 let learning_rate = 0.01;
55 for i in 1..=50 {
56 loss.learn(learning_rate);
57 loss.recalculate();
58
59 let target = loss.find("o").expect("Node not found");
60
61 println!(
62 "Iteration {:2}, loss: {:.4} / result: {:.2}",
63 i, loss.result, target.result
64 );
65 }
66
67 let w1 = loss.find("w1").expect("Node not found");
68 let w2 = loss.find("w2").expect("Node not found");
69 let b = loss.find("b").expect("Node not found");
70
71 println!(
72 "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73 w1.result, w2.result, b.result
74 );
75
76 let x1 = loss.find("x1").expect("Node not found");
77 let x2 = loss.find("x2").expect("Node not found");
78
79 let n = loss
80 .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81 .expect("Node not found");
82 let o = loss.find("o").expect("Node not found");
83
84 println!(
85 "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86 x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87 )
88}15fn main() {
16 let mut targets = vec![
17 Expr::new_leaf(150.0, "t1"),
18 Expr::new_leaf(250.0, "t2"),
19 Expr::new_leaf(350.0, "t3"),
20 ];
21 targets.iter_mut().for_each(|target| {
22 target.is_learnable = false;
23 });
24
25 let mlp = MLP::new(
26 3,
27 Activation::Tanh,
28 vec![2, 2],
29 Activation::Tanh,
30 1,
31 Activation::None,
32 );
33 println!("Initial values: {:}", mlp);
34
35 let mut inputs = vec![
36 vec![
37 Expr::new_leaf(1.0, "x_1,1"),
38 Expr::new_leaf(2.0, "x_1,2"),
39 Expr::new_leaf(3.0, "x_1,3"),
40 ],
41 vec![
42 Expr::new_leaf(4.0, "x_2,1"),
43 Expr::new_leaf(5.0, "x_2,2"),
44 Expr::new_leaf(6.0, "x_2,3"),
45 ],
46 vec![
47 Expr::new_leaf(7.0, "x_3,1"),
48 Expr::new_leaf(8.0, "x_3,2"),
49 Expr::new_leaf(9.0, "x_3,3"),
50 ],
51 ];
52
53 inputs.iter_mut().for_each(|instance| {
54 instance.iter_mut().for_each(|value| {
55 value.is_learnable = false;
56 });
57 });
58
59 let predictions = inputs
60 // for each example, make a prediction
61 .iter()
62 .map(|example| mlp.forward(example.clone()))
63 // name these predictions y1, y2, y3
64 .enumerate()
65 .map(|(i, mut y)| {
66 // the result is a vector but it's a single value because we specified 1 output neuron
67 let mut result = y.remove(0);
68 result.name = format!("y{:}", i + 1).to_string();
69 result
70 })
71 // collect them into a single vector
72 .collect::<Vec<_>>();
73
74 let differences = predictions
75 .iter()
76 .zip(targets.iter())
77 .map(|(y, t)| y.clone() - t.clone())
78 .collect::<Vec<_>>();
79 let mut loss = differences
80 .iter()
81 .map(|d| d.clone() * d.clone())
82 .sum::<Expr>();
83
84 let y1 = loss.find("y1").unwrap();
85 let y2 = loss.find("y2").unwrap();
86 let y3 = loss.find("y3").unwrap();
87 println!("Initial loss: {:.2}", loss.result);
88 println!(
89 "Initial predictions: {:5.2} {:5.2} {:5.2}",
90 y1.result, y2.result, y3.result
91 );
92
93 println!("\nTraining:");
94 let learning_rate = 0.025;
95 for i in 1..=100 {
96 loss.learn(learning_rate);
97 loss.recalculate();
98
99 let t1 = loss.find("t1").unwrap();
100 let t2 = loss.find("t2").unwrap();
101 let t3 = loss.find("t3").unwrap();
102
103 let y1 = loss.find("y1").unwrap();
104 let y2 = loss.find("y2").unwrap();
105 let y3 = loss.find("y3").unwrap();
106
107 println!(
108 "Iteration {:3}, loss: {:11.4} / predicted: {:5.2}, {:5.2}, {:5.2} (targets: {:5.2}, {:5.2}, {:5.2})",
109 i, loss.result, y1.result, y2.result, y3.result, t1.result, t2.result, t3.result
110 );
111 }
112
113 println!("Final values: {:}", mlp);
114}Sourcepub fn find(&self, name: &str) -> Option<&Expr>
pub fn find(&self, name: &str) -> Option<&Expr>
Finds a node in the expression tree by its name.
This method will search the expression tree for a node with the given name. If the node is not found, it will return None.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let expr2 = expr.tanh("tanh(x)");
let original = expr2.find("x");
assert_eq!(original.expect("Could not find x").result, 1.0);Examples found in repository?
14fn main() {
15 let mut target = Expr::new_leaf(50.0, "target");
16 target.is_learnable = false;
17
18 let neuron = Neuron::new(3, Activation::None);
19 println!("Initial values: {:}", neuron);
20
21 let mut inputs = vec![
22 Expr::new_leaf(1.0, "x_1"),
23 Expr::new_leaf(2.0, "x_2"),
24 Expr::new_leaf(3.0, "x_3"),
25 ];
26
27 inputs.iter_mut().for_each(|input| {
28 input.is_learnable = false;
29 });
30
31 let mut y = neuron.forward(inputs);
32 y.name = "y".to_string();
33
34 let difference = y - target;
35 let mut square_exponent = Expr::new_leaf(2.0, "square_exponent");
36 square_exponent.is_learnable = false;
37
38 let mut loss = difference.pow(square_exponent, "loss");
39
40 let target = loss.find("target").unwrap();
41 let y = loss.find("y").unwrap();
42 println!("Initial target: {:.2}", target.result);
43 println!("Predicted: {:.2}", y.result);
44 println!("Initial loss: {:.2}", loss.result);
45
46 println!("\nTraining:");
47 let learning_rate = 0.01;
48 for i in 1..=100 {
49 loss.learn(learning_rate);
50 loss.recalculate();
51
52 let y = loss.find("y").expect("Node not found");
53 let target = loss.find("target").expect("Node not found");
54
55 println!(
56 "Iteration {:3}, loss: {:9.4} / predicted: {:.2} (target: {:.2})",
57 i, loss.result, y.result, target.result
58 );
59 }
60
61 println!("Final values: {:}", neuron);
62}More examples
14fn main() {
15 let mut target = vec![Expr::new_leaf(15.0, "t1"), Expr::new_leaf(85.0, "t2")];
16 target[0].is_learnable = false;
17 target[1].is_learnable = false;
18
19 let layer = Layer::new(3, 2, Activation::None);
20 println!("Initial values: {:}", layer);
21
22 let mut inputs = vec![
23 Expr::new_leaf(1.0, "x_1"),
24 Expr::new_leaf(2.0, "x_2"),
25 Expr::new_leaf(3.0, "x_3"),
26 ];
27
28 inputs.iter_mut().for_each(|input| {
29 input.is_learnable = false;
30 });
31
32 let mut y = layer.forward(inputs);
33 let mut y1 = y.remove(0);
34 y1.name = "y1".to_string();
35 let mut y2 = y.remove(0);
36 y2.name = "y2".to_string();
37
38 let d1 = y1 - target[0].clone();
39 let mut sqr1 = Expr::new_leaf(2.0, "square_exponent1");
40 sqr1.is_learnable = false;
41
42 let d2 = y2 - target[1].clone();
43 let mut sqr2 = Expr::new_leaf(2.0, "square_exponent2");
44 sqr2.is_learnable = false;
45
46 let mut loss = d1.pow(sqr1, "diff1") + d2.pow(sqr2, "diff2");
47
48 let t1 = loss.find("t1").unwrap();
49 let t2 = loss.find("t2").unwrap();
50 let y1 = loss.find("y1").unwrap();
51 let y2 = loss.find("y2").unwrap();
52
53 println!("Initial targets: {:.2}, {:.2}", t1.result, t2.result);
54 println!("Predicted: {:.2}, {:.2}", y1.result, y2.result);
55 println!("Initial loss: {:.2}", loss.result);
56
57 println!("\nTraining:");
58 let learning_rate = 0.004;
59 for i in 1..=100 {
60 loss.learn(learning_rate);
61 loss.recalculate();
62
63 let t1 = loss.find("t1").unwrap();
64 let t2 = loss.find("t2").unwrap();
65 let y1 = loss.find("y1").unwrap();
66 let y2 = loss.find("y2").unwrap();
67
68 println!(
69 "Iteration {:3}, loss: {:9.4} / predicted: {:5.2}, {:5.2} (targets: {:5.2}, {:5.2})",
70 i, loss.result, y1.result, y2.result, t1.result, t2.result
71 );
72 }
73
74 println!("Final values: {:}", layer);
75}15fn main() {
16 // these are the initial values for the nodes of the graph
17 let mut x1 = Expr::new_leaf(2.0, "x1");
18 x1.is_learnable = false;
19
20 let mut x2 = Expr::new_leaf(1.0, "x2");
21 x2.is_learnable = false;
22
23 let w1 = Expr::new_leaf(-3.0, "w1");
24 let w2 = Expr::new_leaf(1.0, "w2");
25 let b = Expr::new_leaf(6.5, "b");
26
27 // here we compute the expression x1*w1 + x2*w2 + b
28 let x1w1 = x1 * w1;
29 let x2w2 = x2 * w2;
30 let x1w1_x2w2 = x1w1 + x2w2;
31 let n = x1w1_x2w2 + b;
32
33 // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34 let o = n.tanh("o");
35
36 println!("Initial output: {:.2}", o.result);
37
38 // we set the target value
39 let target_value = 0.2;
40 let mut target = Expr::new_leaf(target_value, "target");
41 target.is_learnable = false;
42
43 // we compute the loss function
44 let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45 squared_exponent.is_learnable = false;
46
47 let mut loss = (o - target).pow(squared_exponent, "loss");
48 loss.is_learnable = false;
49
50 // we print the initial loss
51 println!("Initial loss: {:.4}", loss.result);
52
53 println!("\nTraining:");
54 let learning_rate = 0.01;
55 for i in 1..=50 {
56 loss.learn(learning_rate);
57 loss.recalculate();
58
59 let target = loss.find("o").expect("Node not found");
60
61 println!(
62 "Iteration {:2}, loss: {:.4} / result: {:.2}",
63 i, loss.result, target.result
64 );
65 }
66
67 let w1 = loss.find("w1").expect("Node not found");
68 let w2 = loss.find("w2").expect("Node not found");
69 let b = loss.find("b").expect("Node not found");
70
71 println!(
72 "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73 w1.result, w2.result, b.result
74 );
75
76 let x1 = loss.find("x1").expect("Node not found");
77 let x2 = loss.find("x2").expect("Node not found");
78
79 let n = loss
80 .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81 .expect("Node not found");
82 let o = loss.find("o").expect("Node not found");
83
84 println!(
85 "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86 x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87 )
88}15fn main() {
16 let mut targets = vec![
17 Expr::new_leaf(150.0, "t1"),
18 Expr::new_leaf(250.0, "t2"),
19 Expr::new_leaf(350.0, "t3"),
20 ];
21 targets.iter_mut().for_each(|target| {
22 target.is_learnable = false;
23 });
24
25 let mlp = MLP::new(
26 3,
27 Activation::Tanh,
28 vec![2, 2],
29 Activation::Tanh,
30 1,
31 Activation::None,
32 );
33 println!("Initial values: {:}", mlp);
34
35 let mut inputs = vec![
36 vec![
37 Expr::new_leaf(1.0, "x_1,1"),
38 Expr::new_leaf(2.0, "x_1,2"),
39 Expr::new_leaf(3.0, "x_1,3"),
40 ],
41 vec![
42 Expr::new_leaf(4.0, "x_2,1"),
43 Expr::new_leaf(5.0, "x_2,2"),
44 Expr::new_leaf(6.0, "x_2,3"),
45 ],
46 vec![
47 Expr::new_leaf(7.0, "x_3,1"),
48 Expr::new_leaf(8.0, "x_3,2"),
49 Expr::new_leaf(9.0, "x_3,3"),
50 ],
51 ];
52
53 inputs.iter_mut().for_each(|instance| {
54 instance.iter_mut().for_each(|value| {
55 value.is_learnable = false;
56 });
57 });
58
59 let predictions = inputs
60 // for each example, make a prediction
61 .iter()
62 .map(|example| mlp.forward(example.clone()))
63 // name these predictions y1, y2, y3
64 .enumerate()
65 .map(|(i, mut y)| {
66 // the result is a vector but it's a single value because we specified 1 output neuron
67 let mut result = y.remove(0);
68 result.name = format!("y{:}", i + 1).to_string();
69 result
70 })
71 // collect them into a single vector
72 .collect::<Vec<_>>();
73
74 let differences = predictions
75 .iter()
76 .zip(targets.iter())
77 .map(|(y, t)| y.clone() - t.clone())
78 .collect::<Vec<_>>();
79 let mut loss = differences
80 .iter()
81 .map(|d| d.clone() * d.clone())
82 .sum::<Expr>();
83
84 let y1 = loss.find("y1").unwrap();
85 let y2 = loss.find("y2").unwrap();
86 let y3 = loss.find("y3").unwrap();
87 println!("Initial loss: {:.2}", loss.result);
88 println!(
89 "Initial predictions: {:5.2} {:5.2} {:5.2}",
90 y1.result, y2.result, y3.result
91 );
92
93 println!("\nTraining:");
94 let learning_rate = 0.025;
95 for i in 1..=100 {
96 loss.learn(learning_rate);
97 loss.recalculate();
98
99 let t1 = loss.find("t1").unwrap();
100 let t2 = loss.find("t2").unwrap();
101 let t3 = loss.find("t3").unwrap();
102
103 let y1 = loss.find("y1").unwrap();
104 let y2 = loss.find("y2").unwrap();
105 let y3 = loss.find("y3").unwrap();
106
107 println!(
108 "Iteration {:3}, loss: {:11.4} / predicted: {:5.2}, {:5.2}, {:5.2} (targets: {:5.2}, {:5.2}, {:5.2})",
109 i, loss.result, y1.result, y2.result, y3.result, t1.result, t2.result, t3.result
110 );
111 }
112
113 println!("Final values: {:}", mlp);
114}Trait Implementations§
Source§impl Add<Expr> for f64
impl Add<Expr> for f64
Source§impl Add<f64> for Expr
impl Add<f64> for Expr
Source§impl Add for Expr
impl Add for Expr
This implementation allows the addition of two Expr objects.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let expr2 = Expr::new_leaf(2.0, "y");
let result = expr + expr2;
println!("Result: {}", result.result); // 3.0Source§impl Div<f64> for Expr
impl Div<f64> for Expr
Source§impl Div for Expr
impl Div for Expr
This implementation allows the division of two Expr objects.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let expr2 = Expr::new_leaf(2.0, "y");
let result = expr / expr2;
println!("Result: {}", result.result); // 0.5Source§impl Mul<Expr> for f64
impl Mul<Expr> for f64
Source§impl Mul<f64> for Expr
impl Mul<f64> for Expr
Source§impl Mul for Expr
impl Mul for Expr
This implementation allows the multiplication of two Expr objects.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let expr2 = Expr::new_leaf(2.0, "y");
let result = expr * expr2;
println!("Result: {}", result.result); // 2.0Source§impl Sub<Expr> for f64
impl Sub<Expr> for f64
Source§impl Sub<f64> for Expr
impl Sub<f64> for Expr
Source§impl Sub for Expr
impl Sub for Expr
This implementation allows the subtraction of two Expr objects.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let expr2 = Expr::new_leaf(2.0, "y");
let result = expr - expr2;
println!("Result: {}", result.result); // -1.0Source§impl Sum for Expr
impl Sum for Expr
Note that this implementation will generate temporary Expr objects,
which may not be the most efficient way to sum a collection of Expr objects.
However, it is provided as a convenience method for users that want to use sum
over an Iterator<Expr>.
Example:
use alpha_micrograd_rust::value::Expr;
let expr = Expr::new_leaf(1.0, "x");
let expr2 = Expr::new_leaf(2.0, "y");
let expr3 = Expr::new_leaf(3.0, "z");
let sum = vec![expr, expr2, expr3].into_iter().sum::<Expr>();
println!("Result: {}", sum.result); // 6.0