Struct Expr

Source
pub struct Expr {
    pub result: f64,
    pub is_learnable: bool,
    pub name: String,
    /* private fields */
}
Expand description

Expression representing a node in a calculation graph.

This struct represents a node in a calculation graph. It can be a leaf node, a unary operation or a binary operation.

A leaf node holds a value, which is the one that is used in the calculation.

A unary expression is the result of applying a unary operation to another expression. For example, the result of applying the tanh operation to a leaf node.

A binary expression is the result of applying a binary operation to two other expressions. For example, the result of adding two leaf nodes.

Fields§

§result: f64

The numeric result of the expression, as result of applying the operation to the operands.

§is_learnable: bool

Whether the expression is learnable or not. Only learnable Expr will have their values updated during backpropagation (learning).

§name: String

The name of the expression, used to identify it in the calculation graph.

Implementations§

Source§

impl Expr

Source

pub fn new_leaf(value: f64, name: &str) -> Expr

Creates a new leaf expression with the given value.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
Examples found in repository?
examples/operations.rs (line 11)
10fn main() {
11    let a = Expr::new_leaf(4.0, "a");
12    let b = Expr::new_leaf(2.0, "b");
13
14    let difference = a - b;
15
16    let square_exponent = Expr::new_leaf(2.0, "square_exponent");
17    let squared_diff = difference.pow(square_exponent, "squared_diff");
18
19    println!("squared difference: {:.2}", squared_diff.result);
20}
More examples
Hide additional examples
examples/neuron.rs (line 15)
14fn main() {
15    let mut target = Expr::new_leaf(50.0, "target");
16    target.is_learnable = false;
17
18    let neuron = Neuron::new(3, Activation::None);
19    println!("Initial values: {:}", neuron);
20
21    let mut inputs = vec![
22        Expr::new_leaf(1.0, "x_1"),
23        Expr::new_leaf(2.0, "x_2"),
24        Expr::new_leaf(3.0, "x_3"),
25    ];
26
27    inputs.iter_mut().for_each(|input| {
28        input.is_learnable = false;
29    });
30
31    let mut y = neuron.forward(inputs);
32    y.name = "y".to_string();
33
34    let difference = y - target;
35    let mut square_exponent = Expr::new_leaf(2.0, "square_exponent");
36    square_exponent.is_learnable = false;
37
38    let mut loss = difference.pow(square_exponent, "loss");
39
40    let target = loss.find("target").unwrap();
41    let y = loss.find("y").unwrap();
42    println!("Initial target: {:.2}", target.result);
43    println!("Predicted: {:.2}", y.result);
44    println!("Initial loss: {:.2}", loss.result);
45
46    println!("\nTraining:");
47    let learning_rate = 0.01;
48    for i in 1..=100 {
49        loss.learn(learning_rate);
50        loss.recalculate();
51
52        let y = loss.find("y").expect("Node not found");
53        let target = loss.find("target").expect("Node not found");
54
55        println!(
56            "Iteration {:3}, loss: {:9.4} / predicted: {:.2} (target: {:.2})",
57            i, loss.result, y.result, target.result
58        );
59    }
60
61    println!("Final values: {:}", neuron);
62}
examples/layer.rs (line 15)
14fn main() {
15    let mut target = vec![Expr::new_leaf(15.0, "t1"), Expr::new_leaf(85.0, "t2")];
16    target[0].is_learnable = false;
17    target[1].is_learnable = false;
18
19    let layer = Layer::new(3, 2, Activation::None);
20    println!("Initial values: {:}", layer);
21
22    let mut inputs = vec![
23        Expr::new_leaf(1.0, "x_1"),
24        Expr::new_leaf(2.0, "x_2"),
25        Expr::new_leaf(3.0, "x_3"),
26    ];
27
28    inputs.iter_mut().for_each(|input| {
29        input.is_learnable = false;
30    });
31
32    let mut y = layer.forward(inputs);
33    let mut y1 = y.remove(0);
34    y1.name = "y1".to_string();
35    let mut y2 = y.remove(0);
36    y2.name = "y2".to_string();
37
38    let d1 = y1 - target[0].clone();
39    let mut sqr1 = Expr::new_leaf(2.0, "square_exponent1");
40    sqr1.is_learnable = false;
41
42    let d2 = y2 - target[1].clone();
43    let mut sqr2 = Expr::new_leaf(2.0, "square_exponent2");
44    sqr2.is_learnable = false;
45
46    let mut loss = d1.pow(sqr1, "diff1") + d2.pow(sqr2, "diff2");
47
48    let t1 = loss.find("t1").unwrap();
49    let t2 = loss.find("t2").unwrap();
50    let y1 = loss.find("y1").unwrap();
51    let y2 = loss.find("y2").unwrap();
52
53    println!("Initial targets: {:.2}, {:.2}", t1.result, t2.result);
54    println!("Predicted: {:.2}, {:.2}", y1.result, y2.result);
55    println!("Initial loss: {:.2}", loss.result);
56
57    println!("\nTraining:");
58    let learning_rate = 0.004;
59    for i in 1..=100 {
60        loss.learn(learning_rate);
61        loss.recalculate();
62
63        let t1 = loss.find("t1").unwrap();
64        let t2 = loss.find("t2").unwrap();
65        let y1 = loss.find("y1").unwrap();
66        let y2 = loss.find("y2").unwrap();
67
68        println!(
69            "Iteration {:3}, loss: {:9.4} / predicted: {:5.2}, {:5.2} (targets: {:5.2}, {:5.2})",
70            i, loss.result, y1.result, y2.result, t1.result, t2.result
71        );
72    }
73
74    println!("Final values: {:}", layer);
75}
examples/backpropagation.rs (line 17)
15fn main() {
16    // these are the initial values for the nodes of the graph
17    let mut x1 = Expr::new_leaf(2.0, "x1");
18    x1.is_learnable = false;
19
20    let mut x2 = Expr::new_leaf(1.0, "x2");
21    x2.is_learnable = false;
22
23    let w1 = Expr::new_leaf(-3.0, "w1");
24    let w2 = Expr::new_leaf(1.0, "w2");
25    let b = Expr::new_leaf(6.5, "b");
26
27    // here we compute the expression x1*w1 + x2*w2 + b
28    let x1w1 = x1 * w1;
29    let x2w2 = x2 * w2;
30    let x1w1_x2w2 = x1w1 + x2w2;
31    let n = x1w1_x2w2 + b;
32
33    // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34    let o = n.tanh("o");
35
36    println!("Initial output: {:.2}", o.result);
37
38    // we set the target value
39    let target_value = 0.2;
40    let mut target = Expr::new_leaf(target_value, "target");
41    target.is_learnable = false;
42
43    // we compute the loss function
44    let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45    squared_exponent.is_learnable = false;
46
47    let mut loss = (o - target).pow(squared_exponent, "loss");
48    loss.is_learnable = false;
49
50    // we print the initial loss
51    println!("Initial loss: {:.4}", loss.result);
52
53    println!("\nTraining:");
54    let learning_rate = 0.01;
55    for i in 1..=50 {
56        loss.learn(learning_rate);
57        loss.recalculate();
58
59        let target = loss.find("o").expect("Node not found");
60
61        println!(
62            "Iteration {:2}, loss: {:.4} / result: {:.2}",
63            i, loss.result, target.result
64        );
65    }
66
67    let w1 = loss.find("w1").expect("Node not found");
68    let w2 = loss.find("w2").expect("Node not found");
69    let b = loss.find("b").expect("Node not found");
70
71    println!(
72        "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73        w1.result, w2.result, b.result
74    );
75
76    let x1 = loss.find("x1").expect("Node not found");
77    let x2 = loss.find("x2").expect("Node not found");
78
79    let n = loss
80        .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81        .expect("Node not found");
82    let o = loss.find("o").expect("Node not found");
83
84    println!(
85        "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86        x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87    )
88}
examples/mlp.rs (line 17)
15fn main() {
16    let mut targets = vec![
17        Expr::new_leaf(150.0, "t1"),
18        Expr::new_leaf(250.0, "t2"),
19        Expr::new_leaf(350.0, "t3"),
20    ];
21    targets.iter_mut().for_each(|target| {
22        target.is_learnable = false;
23    });
24
25    let mlp = MLP::new(
26        3,
27        Activation::Tanh,
28        vec![2, 2],
29        Activation::Tanh,
30        1,
31        Activation::None,
32    );
33    println!("Initial values: {:}", mlp);
34
35    let mut inputs = vec![
36        vec![
37            Expr::new_leaf(1.0, "x_1,1"),
38            Expr::new_leaf(2.0, "x_1,2"),
39            Expr::new_leaf(3.0, "x_1,3"),
40        ],
41        vec![
42            Expr::new_leaf(4.0, "x_2,1"),
43            Expr::new_leaf(5.0, "x_2,2"),
44            Expr::new_leaf(6.0, "x_2,3"),
45        ],
46        vec![
47            Expr::new_leaf(7.0, "x_3,1"),
48            Expr::new_leaf(8.0, "x_3,2"),
49            Expr::new_leaf(9.0, "x_3,3"),
50        ],
51    ];
52
53    inputs.iter_mut().for_each(|instance| {
54        instance.iter_mut().for_each(|value| {
55            value.is_learnable = false;
56        });
57    });
58
59    let predictions = inputs
60        // for each example, make a prediction
61        .iter()
62        .map(|example| mlp.forward(example.clone()))
63        // name these predictions y1, y2, y3
64        .enumerate()
65        .map(|(i, mut y)| {
66            // the result is a vector but it's a single value because we specified 1 output neuron
67            let mut result = y.remove(0);
68            result.name = format!("y{:}", i + 1).to_string();
69            result
70        })
71        // collect them into a single vector
72        .collect::<Vec<_>>();
73
74    let differences = predictions
75        .iter()
76        .zip(targets.iter())
77        .map(|(y, t)| y.clone() - t.clone())
78        .collect::<Vec<_>>();
79    let mut loss = differences
80        .iter()
81        .map(|d| d.clone() * d.clone())
82        .sum::<Expr>();
83
84    let y1 = loss.find("y1").unwrap();
85    let y2 = loss.find("y2").unwrap();
86    let y3 = loss.find("y3").unwrap();
87    println!("Initial loss: {:.2}", loss.result);
88    println!(
89        "Initial predictions: {:5.2} {:5.2} {:5.2}",
90        y1.result, y2.result, y3.result
91    );
92
93    println!("\nTraining:");
94    let learning_rate = 0.025;
95    for i in 1..=100 {
96        loss.learn(learning_rate);
97        loss.recalculate();
98
99        let t1 = loss.find("t1").unwrap();
100        let t2 = loss.find("t2").unwrap();
101        let t3 = loss.find("t3").unwrap();
102
103        let y1 = loss.find("y1").unwrap();
104        let y2 = loss.find("y2").unwrap();
105        let y3 = loss.find("y3").unwrap();
106
107        println!(
108            "Iteration {:3}, loss: {:11.4} / predicted: {:5.2}, {:5.2}, {:5.2} (targets: {:5.2}, {:5.2}, {:5.2})",
109            i, loss.result, y1.result, y2.result, y3.result, t1.result, t2.result, t3.result
110        );
111    }
112
113    println!("Final values: {:}", mlp);
114}
Source

pub fn tanh(self, name: &str) -> Expr

Applies the hyperbolic tangent function to the expression and returns it as a new expression.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let expr2 = expr.tanh("tanh");
 
println!("Result: {}", expr2.result); // 0.7615941559557649
Examples found in repository?
examples/backpropagation.rs (line 34)
15fn main() {
16    // these are the initial values for the nodes of the graph
17    let mut x1 = Expr::new_leaf(2.0, "x1");
18    x1.is_learnable = false;
19
20    let mut x2 = Expr::new_leaf(1.0, "x2");
21    x2.is_learnable = false;
22
23    let w1 = Expr::new_leaf(-3.0, "w1");
24    let w2 = Expr::new_leaf(1.0, "w2");
25    let b = Expr::new_leaf(6.5, "b");
26
27    // here we compute the expression x1*w1 + x2*w2 + b
28    let x1w1 = x1 * w1;
29    let x2w2 = x2 * w2;
30    let x1w1_x2w2 = x1w1 + x2w2;
31    let n = x1w1_x2w2 + b;
32
33    // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34    let o = n.tanh("o");
35
36    println!("Initial output: {:.2}", o.result);
37
38    // we set the target value
39    let target_value = 0.2;
40    let mut target = Expr::new_leaf(target_value, "target");
41    target.is_learnable = false;
42
43    // we compute the loss function
44    let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45    squared_exponent.is_learnable = false;
46
47    let mut loss = (o - target).pow(squared_exponent, "loss");
48    loss.is_learnable = false;
49
50    // we print the initial loss
51    println!("Initial loss: {:.4}", loss.result);
52
53    println!("\nTraining:");
54    let learning_rate = 0.01;
55    for i in 1..=50 {
56        loss.learn(learning_rate);
57        loss.recalculate();
58
59        let target = loss.find("o").expect("Node not found");
60
61        println!(
62            "Iteration {:2}, loss: {:.4} / result: {:.2}",
63            i, loss.result, target.result
64        );
65    }
66
67    let w1 = loss.find("w1").expect("Node not found");
68    let w2 = loss.find("w2").expect("Node not found");
69    let b = loss.find("b").expect("Node not found");
70
71    println!(
72        "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73        w1.result, w2.result, b.result
74    );
75
76    let x1 = loss.find("x1").expect("Node not found");
77    let x2 = loss.find("x2").expect("Node not found");
78
79    let n = loss
80        .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81        .expect("Node not found");
82    let o = loss.find("o").expect("Node not found");
83
84    println!(
85        "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86        x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87    )
88}
Source

pub fn relu(self, name: &str) -> Expr

Applies the rectified linear unit function to the expression and returns it as a new expression.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(-1.0, "x");
let expr2 = expr.relu("relu");
 
println!("Result: {}", expr2.result); // 0.0
Source

pub fn exp(self, name: &str) -> Expr

Applies the exponential function (e^x) to the expression and returns it as a new expression.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let expr2 = expr.exp("exp");
 
println!("Result: {}", expr2.result); // 2.718281828459045
Source

pub fn pow(self, exponent: Expr, name: &str) -> Expr

Raises the expression to the power of the given exponent (expression) and returns it as a new expression.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(2.0, "x");
let exponent = Expr::new_leaf(3.0, "y");
let result = expr.pow(exponent, "x^y");
 
println!("Result: {}", result.result); // 8.0
Examples found in repository?
examples/operations.rs (line 17)
10fn main() {
11    let a = Expr::new_leaf(4.0, "a");
12    let b = Expr::new_leaf(2.0, "b");
13
14    let difference = a - b;
15
16    let square_exponent = Expr::new_leaf(2.0, "square_exponent");
17    let squared_diff = difference.pow(square_exponent, "squared_diff");
18
19    println!("squared difference: {:.2}", squared_diff.result);
20}
More examples
Hide additional examples
examples/neuron.rs (line 38)
14fn main() {
15    let mut target = Expr::new_leaf(50.0, "target");
16    target.is_learnable = false;
17
18    let neuron = Neuron::new(3, Activation::None);
19    println!("Initial values: {:}", neuron);
20
21    let mut inputs = vec![
22        Expr::new_leaf(1.0, "x_1"),
23        Expr::new_leaf(2.0, "x_2"),
24        Expr::new_leaf(3.0, "x_3"),
25    ];
26
27    inputs.iter_mut().for_each(|input| {
28        input.is_learnable = false;
29    });
30
31    let mut y = neuron.forward(inputs);
32    y.name = "y".to_string();
33
34    let difference = y - target;
35    let mut square_exponent = Expr::new_leaf(2.0, "square_exponent");
36    square_exponent.is_learnable = false;
37
38    let mut loss = difference.pow(square_exponent, "loss");
39
40    let target = loss.find("target").unwrap();
41    let y = loss.find("y").unwrap();
42    println!("Initial target: {:.2}", target.result);
43    println!("Predicted: {:.2}", y.result);
44    println!("Initial loss: {:.2}", loss.result);
45
46    println!("\nTraining:");
47    let learning_rate = 0.01;
48    for i in 1..=100 {
49        loss.learn(learning_rate);
50        loss.recalculate();
51
52        let y = loss.find("y").expect("Node not found");
53        let target = loss.find("target").expect("Node not found");
54
55        println!(
56            "Iteration {:3}, loss: {:9.4} / predicted: {:.2} (target: {:.2})",
57            i, loss.result, y.result, target.result
58        );
59    }
60
61    println!("Final values: {:}", neuron);
62}
examples/layer.rs (line 46)
14fn main() {
15    let mut target = vec![Expr::new_leaf(15.0, "t1"), Expr::new_leaf(85.0, "t2")];
16    target[0].is_learnable = false;
17    target[1].is_learnable = false;
18
19    let layer = Layer::new(3, 2, Activation::None);
20    println!("Initial values: {:}", layer);
21
22    let mut inputs = vec![
23        Expr::new_leaf(1.0, "x_1"),
24        Expr::new_leaf(2.0, "x_2"),
25        Expr::new_leaf(3.0, "x_3"),
26    ];
27
28    inputs.iter_mut().for_each(|input| {
29        input.is_learnable = false;
30    });
31
32    let mut y = layer.forward(inputs);
33    let mut y1 = y.remove(0);
34    y1.name = "y1".to_string();
35    let mut y2 = y.remove(0);
36    y2.name = "y2".to_string();
37
38    let d1 = y1 - target[0].clone();
39    let mut sqr1 = Expr::new_leaf(2.0, "square_exponent1");
40    sqr1.is_learnable = false;
41
42    let d2 = y2 - target[1].clone();
43    let mut sqr2 = Expr::new_leaf(2.0, "square_exponent2");
44    sqr2.is_learnable = false;
45
46    let mut loss = d1.pow(sqr1, "diff1") + d2.pow(sqr2, "diff2");
47
48    let t1 = loss.find("t1").unwrap();
49    let t2 = loss.find("t2").unwrap();
50    let y1 = loss.find("y1").unwrap();
51    let y2 = loss.find("y2").unwrap();
52
53    println!("Initial targets: {:.2}, {:.2}", t1.result, t2.result);
54    println!("Predicted: {:.2}, {:.2}", y1.result, y2.result);
55    println!("Initial loss: {:.2}", loss.result);
56
57    println!("\nTraining:");
58    let learning_rate = 0.004;
59    for i in 1..=100 {
60        loss.learn(learning_rate);
61        loss.recalculate();
62
63        let t1 = loss.find("t1").unwrap();
64        let t2 = loss.find("t2").unwrap();
65        let y1 = loss.find("y1").unwrap();
66        let y2 = loss.find("y2").unwrap();
67
68        println!(
69            "Iteration {:3}, loss: {:9.4} / predicted: {:5.2}, {:5.2} (targets: {:5.2}, {:5.2})",
70            i, loss.result, y1.result, y2.result, t1.result, t2.result
71        );
72    }
73
74    println!("Final values: {:}", layer);
75}
examples/backpropagation.rs (line 47)
15fn main() {
16    // these are the initial values for the nodes of the graph
17    let mut x1 = Expr::new_leaf(2.0, "x1");
18    x1.is_learnable = false;
19
20    let mut x2 = Expr::new_leaf(1.0, "x2");
21    x2.is_learnable = false;
22
23    let w1 = Expr::new_leaf(-3.0, "w1");
24    let w2 = Expr::new_leaf(1.0, "w2");
25    let b = Expr::new_leaf(6.5, "b");
26
27    // here we compute the expression x1*w1 + x2*w2 + b
28    let x1w1 = x1 * w1;
29    let x2w2 = x2 * w2;
30    let x1w1_x2w2 = x1w1 + x2w2;
31    let n = x1w1_x2w2 + b;
32
33    // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34    let o = n.tanh("o");
35
36    println!("Initial output: {:.2}", o.result);
37
38    // we set the target value
39    let target_value = 0.2;
40    let mut target = Expr::new_leaf(target_value, "target");
41    target.is_learnable = false;
42
43    // we compute the loss function
44    let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45    squared_exponent.is_learnable = false;
46
47    let mut loss = (o - target).pow(squared_exponent, "loss");
48    loss.is_learnable = false;
49
50    // we print the initial loss
51    println!("Initial loss: {:.4}", loss.result);
52
53    println!("\nTraining:");
54    let learning_rate = 0.01;
55    for i in 1..=50 {
56        loss.learn(learning_rate);
57        loss.recalculate();
58
59        let target = loss.find("o").expect("Node not found");
60
61        println!(
62            "Iteration {:2}, loss: {:.4} / result: {:.2}",
63            i, loss.result, target.result
64        );
65    }
66
67    let w1 = loss.find("w1").expect("Node not found");
68    let w2 = loss.find("w2").expect("Node not found");
69    let b = loss.find("b").expect("Node not found");
70
71    println!(
72        "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73        w1.result, w2.result, b.result
74    );
75
76    let x1 = loss.find("x1").expect("Node not found");
77    let x2 = loss.find("x2").expect("Node not found");
78
79    let n = loss
80        .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81        .expect("Node not found");
82    let o = loss.find("o").expect("Node not found");
83
84    println!(
85        "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86        x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87    )
88}
Source

pub fn log(self, name: &str) -> Expr

Applies the natural logarithm function to the expression and returns it as a new expression.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(2.0, "x");
let expr2 = expr.log("log");
 
println!("Result: {}", expr2.result); // 0.6931471805599453
Source

pub fn neg(self, name: &str) -> Expr

Negates the expression and returns it as a new expression.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let expr2 = expr.neg("neg");
 
println!("Result: {}", expr2.result); // -1.0
Source

pub fn recalculate(&mut self)

Recalculates the value of the expression recursively, from new values of the operands.

Usually will be used after a call to Expr::learn, where the gradients have been calculated and the internal values of the expression tree have been updated.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let mut expr2 = expr.tanh("tanh(x)");
expr2.learn(1e-09);
expr2.recalculate();
Examples found in repository?
examples/neuron.rs (line 50)
14fn main() {
15    let mut target = Expr::new_leaf(50.0, "target");
16    target.is_learnable = false;
17
18    let neuron = Neuron::new(3, Activation::None);
19    println!("Initial values: {:}", neuron);
20
21    let mut inputs = vec![
22        Expr::new_leaf(1.0, "x_1"),
23        Expr::new_leaf(2.0, "x_2"),
24        Expr::new_leaf(3.0, "x_3"),
25    ];
26
27    inputs.iter_mut().for_each(|input| {
28        input.is_learnable = false;
29    });
30
31    let mut y = neuron.forward(inputs);
32    y.name = "y".to_string();
33
34    let difference = y - target;
35    let mut square_exponent = Expr::new_leaf(2.0, "square_exponent");
36    square_exponent.is_learnable = false;
37
38    let mut loss = difference.pow(square_exponent, "loss");
39
40    let target = loss.find("target").unwrap();
41    let y = loss.find("y").unwrap();
42    println!("Initial target: {:.2}", target.result);
43    println!("Predicted: {:.2}", y.result);
44    println!("Initial loss: {:.2}", loss.result);
45
46    println!("\nTraining:");
47    let learning_rate = 0.01;
48    for i in 1..=100 {
49        loss.learn(learning_rate);
50        loss.recalculate();
51
52        let y = loss.find("y").expect("Node not found");
53        let target = loss.find("target").expect("Node not found");
54
55        println!(
56            "Iteration {:3}, loss: {:9.4} / predicted: {:.2} (target: {:.2})",
57            i, loss.result, y.result, target.result
58        );
59    }
60
61    println!("Final values: {:}", neuron);
62}
More examples
Hide additional examples
examples/layer.rs (line 61)
14fn main() {
15    let mut target = vec![Expr::new_leaf(15.0, "t1"), Expr::new_leaf(85.0, "t2")];
16    target[0].is_learnable = false;
17    target[1].is_learnable = false;
18
19    let layer = Layer::new(3, 2, Activation::None);
20    println!("Initial values: {:}", layer);
21
22    let mut inputs = vec![
23        Expr::new_leaf(1.0, "x_1"),
24        Expr::new_leaf(2.0, "x_2"),
25        Expr::new_leaf(3.0, "x_3"),
26    ];
27
28    inputs.iter_mut().for_each(|input| {
29        input.is_learnable = false;
30    });
31
32    let mut y = layer.forward(inputs);
33    let mut y1 = y.remove(0);
34    y1.name = "y1".to_string();
35    let mut y2 = y.remove(0);
36    y2.name = "y2".to_string();
37
38    let d1 = y1 - target[0].clone();
39    let mut sqr1 = Expr::new_leaf(2.0, "square_exponent1");
40    sqr1.is_learnable = false;
41
42    let d2 = y2 - target[1].clone();
43    let mut sqr2 = Expr::new_leaf(2.0, "square_exponent2");
44    sqr2.is_learnable = false;
45
46    let mut loss = d1.pow(sqr1, "diff1") + d2.pow(sqr2, "diff2");
47
48    let t1 = loss.find("t1").unwrap();
49    let t2 = loss.find("t2").unwrap();
50    let y1 = loss.find("y1").unwrap();
51    let y2 = loss.find("y2").unwrap();
52
53    println!("Initial targets: {:.2}, {:.2}", t1.result, t2.result);
54    println!("Predicted: {:.2}, {:.2}", y1.result, y2.result);
55    println!("Initial loss: {:.2}", loss.result);
56
57    println!("\nTraining:");
58    let learning_rate = 0.004;
59    for i in 1..=100 {
60        loss.learn(learning_rate);
61        loss.recalculate();
62
63        let t1 = loss.find("t1").unwrap();
64        let t2 = loss.find("t2").unwrap();
65        let y1 = loss.find("y1").unwrap();
66        let y2 = loss.find("y2").unwrap();
67
68        println!(
69            "Iteration {:3}, loss: {:9.4} / predicted: {:5.2}, {:5.2} (targets: {:5.2}, {:5.2})",
70            i, loss.result, y1.result, y2.result, t1.result, t2.result
71        );
72    }
73
74    println!("Final values: {:}", layer);
75}
examples/backpropagation.rs (line 57)
15fn main() {
16    // these are the initial values for the nodes of the graph
17    let mut x1 = Expr::new_leaf(2.0, "x1");
18    x1.is_learnable = false;
19
20    let mut x2 = Expr::new_leaf(1.0, "x2");
21    x2.is_learnable = false;
22
23    let w1 = Expr::new_leaf(-3.0, "w1");
24    let w2 = Expr::new_leaf(1.0, "w2");
25    let b = Expr::new_leaf(6.5, "b");
26
27    // here we compute the expression x1*w1 + x2*w2 + b
28    let x1w1 = x1 * w1;
29    let x2w2 = x2 * w2;
30    let x1w1_x2w2 = x1w1 + x2w2;
31    let n = x1w1_x2w2 + b;
32
33    // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34    let o = n.tanh("o");
35
36    println!("Initial output: {:.2}", o.result);
37
38    // we set the target value
39    let target_value = 0.2;
40    let mut target = Expr::new_leaf(target_value, "target");
41    target.is_learnable = false;
42
43    // we compute the loss function
44    let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45    squared_exponent.is_learnable = false;
46
47    let mut loss = (o - target).pow(squared_exponent, "loss");
48    loss.is_learnable = false;
49
50    // we print the initial loss
51    println!("Initial loss: {:.4}", loss.result);
52
53    println!("\nTraining:");
54    let learning_rate = 0.01;
55    for i in 1..=50 {
56        loss.learn(learning_rate);
57        loss.recalculate();
58
59        let target = loss.find("o").expect("Node not found");
60
61        println!(
62            "Iteration {:2}, loss: {:.4} / result: {:.2}",
63            i, loss.result, target.result
64        );
65    }
66
67    let w1 = loss.find("w1").expect("Node not found");
68    let w2 = loss.find("w2").expect("Node not found");
69    let b = loss.find("b").expect("Node not found");
70
71    println!(
72        "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73        w1.result, w2.result, b.result
74    );
75
76    let x1 = loss.find("x1").expect("Node not found");
77    let x2 = loss.find("x2").expect("Node not found");
78
79    let n = loss
80        .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81        .expect("Node not found");
82    let o = loss.find("o").expect("Node not found");
83
84    println!(
85        "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86        x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87    )
88}
examples/mlp.rs (line 97)
15fn main() {
16    let mut targets = vec![
17        Expr::new_leaf(150.0, "t1"),
18        Expr::new_leaf(250.0, "t2"),
19        Expr::new_leaf(350.0, "t3"),
20    ];
21    targets.iter_mut().for_each(|target| {
22        target.is_learnable = false;
23    });
24
25    let mlp = MLP::new(
26        3,
27        Activation::Tanh,
28        vec![2, 2],
29        Activation::Tanh,
30        1,
31        Activation::None,
32    );
33    println!("Initial values: {:}", mlp);
34
35    let mut inputs = vec![
36        vec![
37            Expr::new_leaf(1.0, "x_1,1"),
38            Expr::new_leaf(2.0, "x_1,2"),
39            Expr::new_leaf(3.0, "x_1,3"),
40        ],
41        vec![
42            Expr::new_leaf(4.0, "x_2,1"),
43            Expr::new_leaf(5.0, "x_2,2"),
44            Expr::new_leaf(6.0, "x_2,3"),
45        ],
46        vec![
47            Expr::new_leaf(7.0, "x_3,1"),
48            Expr::new_leaf(8.0, "x_3,2"),
49            Expr::new_leaf(9.0, "x_3,3"),
50        ],
51    ];
52
53    inputs.iter_mut().for_each(|instance| {
54        instance.iter_mut().for_each(|value| {
55            value.is_learnable = false;
56        });
57    });
58
59    let predictions = inputs
60        // for each example, make a prediction
61        .iter()
62        .map(|example| mlp.forward(example.clone()))
63        // name these predictions y1, y2, y3
64        .enumerate()
65        .map(|(i, mut y)| {
66            // the result is a vector but it's a single value because we specified 1 output neuron
67            let mut result = y.remove(0);
68            result.name = format!("y{:}", i + 1).to_string();
69            result
70        })
71        // collect them into a single vector
72        .collect::<Vec<_>>();
73
74    let differences = predictions
75        .iter()
76        .zip(targets.iter())
77        .map(|(y, t)| y.clone() - t.clone())
78        .collect::<Vec<_>>();
79    let mut loss = differences
80        .iter()
81        .map(|d| d.clone() * d.clone())
82        .sum::<Expr>();
83
84    let y1 = loss.find("y1").unwrap();
85    let y2 = loss.find("y2").unwrap();
86    let y3 = loss.find("y3").unwrap();
87    println!("Initial loss: {:.2}", loss.result);
88    println!(
89        "Initial predictions: {:5.2} {:5.2} {:5.2}",
90        y1.result, y2.result, y3.result
91    );
92
93    println!("\nTraining:");
94    let learning_rate = 0.025;
95    for i in 1..=100 {
96        loss.learn(learning_rate);
97        loss.recalculate();
98
99        let t1 = loss.find("t1").unwrap();
100        let t2 = loss.find("t2").unwrap();
101        let t3 = loss.find("t3").unwrap();
102
103        let y1 = loss.find("y1").unwrap();
104        let y2 = loss.find("y2").unwrap();
105        let y3 = loss.find("y3").unwrap();
106
107        println!(
108            "Iteration {:3}, loss: {:11.4} / predicted: {:5.2}, {:5.2}, {:5.2} (targets: {:5.2}, {:5.2}, {:5.2})",
109            i, loss.result, y1.result, y2.result, y3.result, t1.result, t2.result, t3.result
110        );
111    }
112
113    println!("Final values: {:}", mlp);
114}
Source

pub fn learn(&mut self, learning_rate: f64)

Applies backpropagation to the expression, updating the values of the gradients and the expression itself.

This method will change the gradients based on the gradient of the last expression in the calculation graph.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let mut expr2 = expr.tanh("tanh(x)");
expr2.learn(1e-09);

After adjusting the gradients, the method will update the values of the individual expression tree nodes to minimize the loss function.

In order to get a new calculation of the expression tree, you’ll need to call Expr::recalculate after calling Expr::learn.

Examples found in repository?
examples/neuron.rs (line 49)
14fn main() {
15    let mut target = Expr::new_leaf(50.0, "target");
16    target.is_learnable = false;
17
18    let neuron = Neuron::new(3, Activation::None);
19    println!("Initial values: {:}", neuron);
20
21    let mut inputs = vec![
22        Expr::new_leaf(1.0, "x_1"),
23        Expr::new_leaf(2.0, "x_2"),
24        Expr::new_leaf(3.0, "x_3"),
25    ];
26
27    inputs.iter_mut().for_each(|input| {
28        input.is_learnable = false;
29    });
30
31    let mut y = neuron.forward(inputs);
32    y.name = "y".to_string();
33
34    let difference = y - target;
35    let mut square_exponent = Expr::new_leaf(2.0, "square_exponent");
36    square_exponent.is_learnable = false;
37
38    let mut loss = difference.pow(square_exponent, "loss");
39
40    let target = loss.find("target").unwrap();
41    let y = loss.find("y").unwrap();
42    println!("Initial target: {:.2}", target.result);
43    println!("Predicted: {:.2}", y.result);
44    println!("Initial loss: {:.2}", loss.result);
45
46    println!("\nTraining:");
47    let learning_rate = 0.01;
48    for i in 1..=100 {
49        loss.learn(learning_rate);
50        loss.recalculate();
51
52        let y = loss.find("y").expect("Node not found");
53        let target = loss.find("target").expect("Node not found");
54
55        println!(
56            "Iteration {:3}, loss: {:9.4} / predicted: {:.2} (target: {:.2})",
57            i, loss.result, y.result, target.result
58        );
59    }
60
61    println!("Final values: {:}", neuron);
62}
More examples
Hide additional examples
examples/layer.rs (line 60)
14fn main() {
15    let mut target = vec![Expr::new_leaf(15.0, "t1"), Expr::new_leaf(85.0, "t2")];
16    target[0].is_learnable = false;
17    target[1].is_learnable = false;
18
19    let layer = Layer::new(3, 2, Activation::None);
20    println!("Initial values: {:}", layer);
21
22    let mut inputs = vec![
23        Expr::new_leaf(1.0, "x_1"),
24        Expr::new_leaf(2.0, "x_2"),
25        Expr::new_leaf(3.0, "x_3"),
26    ];
27
28    inputs.iter_mut().for_each(|input| {
29        input.is_learnable = false;
30    });
31
32    let mut y = layer.forward(inputs);
33    let mut y1 = y.remove(0);
34    y1.name = "y1".to_string();
35    let mut y2 = y.remove(0);
36    y2.name = "y2".to_string();
37
38    let d1 = y1 - target[0].clone();
39    let mut sqr1 = Expr::new_leaf(2.0, "square_exponent1");
40    sqr1.is_learnable = false;
41
42    let d2 = y2 - target[1].clone();
43    let mut sqr2 = Expr::new_leaf(2.0, "square_exponent2");
44    sqr2.is_learnable = false;
45
46    let mut loss = d1.pow(sqr1, "diff1") + d2.pow(sqr2, "diff2");
47
48    let t1 = loss.find("t1").unwrap();
49    let t2 = loss.find("t2").unwrap();
50    let y1 = loss.find("y1").unwrap();
51    let y2 = loss.find("y2").unwrap();
52
53    println!("Initial targets: {:.2}, {:.2}", t1.result, t2.result);
54    println!("Predicted: {:.2}, {:.2}", y1.result, y2.result);
55    println!("Initial loss: {:.2}", loss.result);
56
57    println!("\nTraining:");
58    let learning_rate = 0.004;
59    for i in 1..=100 {
60        loss.learn(learning_rate);
61        loss.recalculate();
62
63        let t1 = loss.find("t1").unwrap();
64        let t2 = loss.find("t2").unwrap();
65        let y1 = loss.find("y1").unwrap();
66        let y2 = loss.find("y2").unwrap();
67
68        println!(
69            "Iteration {:3}, loss: {:9.4} / predicted: {:5.2}, {:5.2} (targets: {:5.2}, {:5.2})",
70            i, loss.result, y1.result, y2.result, t1.result, t2.result
71        );
72    }
73
74    println!("Final values: {:}", layer);
75}
examples/backpropagation.rs (line 56)
15fn main() {
16    // these are the initial values for the nodes of the graph
17    let mut x1 = Expr::new_leaf(2.0, "x1");
18    x1.is_learnable = false;
19
20    let mut x2 = Expr::new_leaf(1.0, "x2");
21    x2.is_learnable = false;
22
23    let w1 = Expr::new_leaf(-3.0, "w1");
24    let w2 = Expr::new_leaf(1.0, "w2");
25    let b = Expr::new_leaf(6.5, "b");
26
27    // here we compute the expression x1*w1 + x2*w2 + b
28    let x1w1 = x1 * w1;
29    let x2w2 = x2 * w2;
30    let x1w1_x2w2 = x1w1 + x2w2;
31    let n = x1w1_x2w2 + b;
32
33    // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34    let o = n.tanh("o");
35
36    println!("Initial output: {:.2}", o.result);
37
38    // we set the target value
39    let target_value = 0.2;
40    let mut target = Expr::new_leaf(target_value, "target");
41    target.is_learnable = false;
42
43    // we compute the loss function
44    let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45    squared_exponent.is_learnable = false;
46
47    let mut loss = (o - target).pow(squared_exponent, "loss");
48    loss.is_learnable = false;
49
50    // we print the initial loss
51    println!("Initial loss: {:.4}", loss.result);
52
53    println!("\nTraining:");
54    let learning_rate = 0.01;
55    for i in 1..=50 {
56        loss.learn(learning_rate);
57        loss.recalculate();
58
59        let target = loss.find("o").expect("Node not found");
60
61        println!(
62            "Iteration {:2}, loss: {:.4} / result: {:.2}",
63            i, loss.result, target.result
64        );
65    }
66
67    let w1 = loss.find("w1").expect("Node not found");
68    let w2 = loss.find("w2").expect("Node not found");
69    let b = loss.find("b").expect("Node not found");
70
71    println!(
72        "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73        w1.result, w2.result, b.result
74    );
75
76    let x1 = loss.find("x1").expect("Node not found");
77    let x2 = loss.find("x2").expect("Node not found");
78
79    let n = loss
80        .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81        .expect("Node not found");
82    let o = loss.find("o").expect("Node not found");
83
84    println!(
85        "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86        x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87    )
88}
examples/mlp.rs (line 96)
15fn main() {
16    let mut targets = vec![
17        Expr::new_leaf(150.0, "t1"),
18        Expr::new_leaf(250.0, "t2"),
19        Expr::new_leaf(350.0, "t3"),
20    ];
21    targets.iter_mut().for_each(|target| {
22        target.is_learnable = false;
23    });
24
25    let mlp = MLP::new(
26        3,
27        Activation::Tanh,
28        vec![2, 2],
29        Activation::Tanh,
30        1,
31        Activation::None,
32    );
33    println!("Initial values: {:}", mlp);
34
35    let mut inputs = vec![
36        vec![
37            Expr::new_leaf(1.0, "x_1,1"),
38            Expr::new_leaf(2.0, "x_1,2"),
39            Expr::new_leaf(3.0, "x_1,3"),
40        ],
41        vec![
42            Expr::new_leaf(4.0, "x_2,1"),
43            Expr::new_leaf(5.0, "x_2,2"),
44            Expr::new_leaf(6.0, "x_2,3"),
45        ],
46        vec![
47            Expr::new_leaf(7.0, "x_3,1"),
48            Expr::new_leaf(8.0, "x_3,2"),
49            Expr::new_leaf(9.0, "x_3,3"),
50        ],
51    ];
52
53    inputs.iter_mut().for_each(|instance| {
54        instance.iter_mut().for_each(|value| {
55            value.is_learnable = false;
56        });
57    });
58
59    let predictions = inputs
60        // for each example, make a prediction
61        .iter()
62        .map(|example| mlp.forward(example.clone()))
63        // name these predictions y1, y2, y3
64        .enumerate()
65        .map(|(i, mut y)| {
66            // the result is a vector but it's a single value because we specified 1 output neuron
67            let mut result = y.remove(0);
68            result.name = format!("y{:}", i + 1).to_string();
69            result
70        })
71        // collect them into a single vector
72        .collect::<Vec<_>>();
73
74    let differences = predictions
75        .iter()
76        .zip(targets.iter())
77        .map(|(y, t)| y.clone() - t.clone())
78        .collect::<Vec<_>>();
79    let mut loss = differences
80        .iter()
81        .map(|d| d.clone() * d.clone())
82        .sum::<Expr>();
83
84    let y1 = loss.find("y1").unwrap();
85    let y2 = loss.find("y2").unwrap();
86    let y3 = loss.find("y3").unwrap();
87    println!("Initial loss: {:.2}", loss.result);
88    println!(
89        "Initial predictions: {:5.2} {:5.2} {:5.2}",
90        y1.result, y2.result, y3.result
91    );
92
93    println!("\nTraining:");
94    let learning_rate = 0.025;
95    for i in 1..=100 {
96        loss.learn(learning_rate);
97        loss.recalculate();
98
99        let t1 = loss.find("t1").unwrap();
100        let t2 = loss.find("t2").unwrap();
101        let t3 = loss.find("t3").unwrap();
102
103        let y1 = loss.find("y1").unwrap();
104        let y2 = loss.find("y2").unwrap();
105        let y3 = loss.find("y3").unwrap();
106
107        println!(
108            "Iteration {:3}, loss: {:11.4} / predicted: {:5.2}, {:5.2}, {:5.2} (targets: {:5.2}, {:5.2}, {:5.2})",
109            i, loss.result, y1.result, y2.result, y3.result, t1.result, t2.result, t3.result
110        );
111    }
112
113    println!("Final values: {:}", mlp);
114}
Source

pub fn find(&self, name: &str) -> Option<&Expr>

Finds a node in the expression tree by its name.

This method will search the expression tree for a node with the given name. If the node is not found, it will return None.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let expr2 = expr.tanh("tanh(x)");
let original = expr2.find("x");
 
assert_eq!(original.expect("Could not find x").result, 1.0);
Examples found in repository?
examples/neuron.rs (line 40)
14fn main() {
15    let mut target = Expr::new_leaf(50.0, "target");
16    target.is_learnable = false;
17
18    let neuron = Neuron::new(3, Activation::None);
19    println!("Initial values: {:}", neuron);
20
21    let mut inputs = vec![
22        Expr::new_leaf(1.0, "x_1"),
23        Expr::new_leaf(2.0, "x_2"),
24        Expr::new_leaf(3.0, "x_3"),
25    ];
26
27    inputs.iter_mut().for_each(|input| {
28        input.is_learnable = false;
29    });
30
31    let mut y = neuron.forward(inputs);
32    y.name = "y".to_string();
33
34    let difference = y - target;
35    let mut square_exponent = Expr::new_leaf(2.0, "square_exponent");
36    square_exponent.is_learnable = false;
37
38    let mut loss = difference.pow(square_exponent, "loss");
39
40    let target = loss.find("target").unwrap();
41    let y = loss.find("y").unwrap();
42    println!("Initial target: {:.2}", target.result);
43    println!("Predicted: {:.2}", y.result);
44    println!("Initial loss: {:.2}", loss.result);
45
46    println!("\nTraining:");
47    let learning_rate = 0.01;
48    for i in 1..=100 {
49        loss.learn(learning_rate);
50        loss.recalculate();
51
52        let y = loss.find("y").expect("Node not found");
53        let target = loss.find("target").expect("Node not found");
54
55        println!(
56            "Iteration {:3}, loss: {:9.4} / predicted: {:.2} (target: {:.2})",
57            i, loss.result, y.result, target.result
58        );
59    }
60
61    println!("Final values: {:}", neuron);
62}
More examples
Hide additional examples
examples/layer.rs (line 48)
14fn main() {
15    let mut target = vec![Expr::new_leaf(15.0, "t1"), Expr::new_leaf(85.0, "t2")];
16    target[0].is_learnable = false;
17    target[1].is_learnable = false;
18
19    let layer = Layer::new(3, 2, Activation::None);
20    println!("Initial values: {:}", layer);
21
22    let mut inputs = vec![
23        Expr::new_leaf(1.0, "x_1"),
24        Expr::new_leaf(2.0, "x_2"),
25        Expr::new_leaf(3.0, "x_3"),
26    ];
27
28    inputs.iter_mut().for_each(|input| {
29        input.is_learnable = false;
30    });
31
32    let mut y = layer.forward(inputs);
33    let mut y1 = y.remove(0);
34    y1.name = "y1".to_string();
35    let mut y2 = y.remove(0);
36    y2.name = "y2".to_string();
37
38    let d1 = y1 - target[0].clone();
39    let mut sqr1 = Expr::new_leaf(2.0, "square_exponent1");
40    sqr1.is_learnable = false;
41
42    let d2 = y2 - target[1].clone();
43    let mut sqr2 = Expr::new_leaf(2.0, "square_exponent2");
44    sqr2.is_learnable = false;
45
46    let mut loss = d1.pow(sqr1, "diff1") + d2.pow(sqr2, "diff2");
47
48    let t1 = loss.find("t1").unwrap();
49    let t2 = loss.find("t2").unwrap();
50    let y1 = loss.find("y1").unwrap();
51    let y2 = loss.find("y2").unwrap();
52
53    println!("Initial targets: {:.2}, {:.2}", t1.result, t2.result);
54    println!("Predicted: {:.2}, {:.2}", y1.result, y2.result);
55    println!("Initial loss: {:.2}", loss.result);
56
57    println!("\nTraining:");
58    let learning_rate = 0.004;
59    for i in 1..=100 {
60        loss.learn(learning_rate);
61        loss.recalculate();
62
63        let t1 = loss.find("t1").unwrap();
64        let t2 = loss.find("t2").unwrap();
65        let y1 = loss.find("y1").unwrap();
66        let y2 = loss.find("y2").unwrap();
67
68        println!(
69            "Iteration {:3}, loss: {:9.4} / predicted: {:5.2}, {:5.2} (targets: {:5.2}, {:5.2})",
70            i, loss.result, y1.result, y2.result, t1.result, t2.result
71        );
72    }
73
74    println!("Final values: {:}", layer);
75}
examples/backpropagation.rs (line 59)
15fn main() {
16    // these are the initial values for the nodes of the graph
17    let mut x1 = Expr::new_leaf(2.0, "x1");
18    x1.is_learnable = false;
19
20    let mut x2 = Expr::new_leaf(1.0, "x2");
21    x2.is_learnable = false;
22
23    let w1 = Expr::new_leaf(-3.0, "w1");
24    let w2 = Expr::new_leaf(1.0, "w2");
25    let b = Expr::new_leaf(6.5, "b");
26
27    // here we compute the expression x1*w1 + x2*w2 + b
28    let x1w1 = x1 * w1;
29    let x2w2 = x2 * w2;
30    let x1w1_x2w2 = x1w1 + x2w2;
31    let n = x1w1_x2w2 + b;
32
33    // we add a non-linear activation function: tanh(x1*w1 + x2*w2 + b)
34    let o = n.tanh("o");
35
36    println!("Initial output: {:.2}", o.result);
37
38    // we set the target value
39    let target_value = 0.2;
40    let mut target = Expr::new_leaf(target_value, "target");
41    target.is_learnable = false;
42
43    // we compute the loss function
44    let mut squared_exponent = Expr::new_leaf(2.0, "squared_exponent");
45    squared_exponent.is_learnable = false;
46
47    let mut loss = (o - target).pow(squared_exponent, "loss");
48    loss.is_learnable = false;
49
50    // we print the initial loss
51    println!("Initial loss: {:.4}", loss.result);
52
53    println!("\nTraining:");
54    let learning_rate = 0.01;
55    for i in 1..=50 {
56        loss.learn(learning_rate);
57        loss.recalculate();
58
59        let target = loss.find("o").expect("Node not found");
60
61        println!(
62            "Iteration {:2}, loss: {:.4} / result: {:.2}",
63            i, loss.result, target.result
64        );
65    }
66
67    let w1 = loss.find("w1").expect("Node not found");
68    let w2 = loss.find("w2").expect("Node not found");
69    let b = loss.find("b").expect("Node not found");
70
71    println!(
72        "\nFinal values: w1: {:.2}, w2: {:.2}, b: {:.2}",
73        w1.result, w2.result, b.result
74    );
75
76    let x1 = loss.find("x1").expect("Node not found");
77    let x2 = loss.find("x2").expect("Node not found");
78
79    let n = loss
80        .find("(((x1 * w1) + (x2 * w2)) + b)") // auto-generated node name
81        .expect("Node not found");
82    let o = loss.find("o").expect("Node not found");
83
84    println!(
85        "Final formula: tanh({:.2}*{:.2} + {:.2}*{:.2} + {:.2}) = tanh({:.2}) = {:.2}",
86        x1.result, w1.result, x2.result, w2.result, b.result, n.result, o.result
87    )
88}
examples/mlp.rs (line 84)
15fn main() {
16    let mut targets = vec![
17        Expr::new_leaf(150.0, "t1"),
18        Expr::new_leaf(250.0, "t2"),
19        Expr::new_leaf(350.0, "t3"),
20    ];
21    targets.iter_mut().for_each(|target| {
22        target.is_learnable = false;
23    });
24
25    let mlp = MLP::new(
26        3,
27        Activation::Tanh,
28        vec![2, 2],
29        Activation::Tanh,
30        1,
31        Activation::None,
32    );
33    println!("Initial values: {:}", mlp);
34
35    let mut inputs = vec![
36        vec![
37            Expr::new_leaf(1.0, "x_1,1"),
38            Expr::new_leaf(2.0, "x_1,2"),
39            Expr::new_leaf(3.0, "x_1,3"),
40        ],
41        vec![
42            Expr::new_leaf(4.0, "x_2,1"),
43            Expr::new_leaf(5.0, "x_2,2"),
44            Expr::new_leaf(6.0, "x_2,3"),
45        ],
46        vec![
47            Expr::new_leaf(7.0, "x_3,1"),
48            Expr::new_leaf(8.0, "x_3,2"),
49            Expr::new_leaf(9.0, "x_3,3"),
50        ],
51    ];
52
53    inputs.iter_mut().for_each(|instance| {
54        instance.iter_mut().for_each(|value| {
55            value.is_learnable = false;
56        });
57    });
58
59    let predictions = inputs
60        // for each example, make a prediction
61        .iter()
62        .map(|example| mlp.forward(example.clone()))
63        // name these predictions y1, y2, y3
64        .enumerate()
65        .map(|(i, mut y)| {
66            // the result is a vector but it's a single value because we specified 1 output neuron
67            let mut result = y.remove(0);
68            result.name = format!("y{:}", i + 1).to_string();
69            result
70        })
71        // collect them into a single vector
72        .collect::<Vec<_>>();
73
74    let differences = predictions
75        .iter()
76        .zip(targets.iter())
77        .map(|(y, t)| y.clone() - t.clone())
78        .collect::<Vec<_>>();
79    let mut loss = differences
80        .iter()
81        .map(|d| d.clone() * d.clone())
82        .sum::<Expr>();
83
84    let y1 = loss.find("y1").unwrap();
85    let y2 = loss.find("y2").unwrap();
86    let y3 = loss.find("y3").unwrap();
87    println!("Initial loss: {:.2}", loss.result);
88    println!(
89        "Initial predictions: {:5.2} {:5.2} {:5.2}",
90        y1.result, y2.result, y3.result
91    );
92
93    println!("\nTraining:");
94    let learning_rate = 0.025;
95    for i in 1..=100 {
96        loss.learn(learning_rate);
97        loss.recalculate();
98
99        let t1 = loss.find("t1").unwrap();
100        let t2 = loss.find("t2").unwrap();
101        let t3 = loss.find("t3").unwrap();
102
103        let y1 = loss.find("y1").unwrap();
104        let y2 = loss.find("y2").unwrap();
105        let y3 = loss.find("y3").unwrap();
106
107        println!(
108            "Iteration {:3}, loss: {:11.4} / predicted: {:5.2}, {:5.2}, {:5.2} (targets: {:5.2}, {:5.2}, {:5.2})",
109            i, loss.result, y1.result, y2.result, y3.result, t1.result, t2.result, t3.result
110        );
111    }
112
113    println!("Final values: {:}", mlp);
114}

Trait Implementations§

Source§

impl Add<Expr> for f64

Implements the Add trait for the f64 type, when the right operand is an Expr.

This implementation allows the addition of a f64 value and an Expr object.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let result = 2.0 + expr;
 
println!("Result: {}", result.result); // 3.0
Source§

type Output = Expr

The resulting type after applying the + operator.
Source§

fn add(self, other: Expr) -> Expr

Performs the + operation. Read more
Source§

impl Add<f64> for Expr

Implements the Add trait for the Expr struct, when the right operand is a f64.

This implementation allows the addition of an Expr object and a f64 value.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let result = expr + 2.0;
 
println!("Result: {}", result.result); // 3.0
Source§

type Output = Expr

The resulting type after applying the + operator.
Source§

fn add(self, other: f64) -> Expr

Performs the + operation. Read more
Source§

impl Add for Expr

Implements the Add trait for the Expr struct.

This implementation allows the addition of two Expr objects.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let expr2 = Expr::new_leaf(2.0, "y");
 
let result = expr + expr2;
 
println!("Result: {}", result.result); // 3.0
Source§

type Output = Expr

The resulting type after applying the + operator.
Source§

fn add(self, other: Expr) -> Expr

Performs the + operation. Read more
Source§

impl Clone for Expr

Source§

fn clone(&self) -> Expr

Returns a copy of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for Expr

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Div<f64> for Expr

Implements the Div trait for the Expr struct, when the right operand is a f64.

This implementation allows the division of an Expr object and a f64 value.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let result = expr / 2.0;
 
println!("Result: {}", result.result); // 0.5
Source§

type Output = Expr

The resulting type after applying the / operator.
Source§

fn div(self, other: f64) -> Expr

Performs the / operation. Read more
Source§

impl Div for Expr

Implements the Div trait for the Expr struct.

This implementation allows the division of two Expr objects.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let expr2 = Expr::new_leaf(2.0, "y");
 
let result = expr / expr2;
 
println!("Result: {}", result.result); // 0.5
Source§

type Output = Expr

The resulting type after applying the / operator.
Source§

fn div(self, other: Expr) -> Expr

Performs the / operation. Read more
Source§

impl Mul<Expr> for f64

Implements the Mul trait for the f64 type, when the right operand is an Expr.

This implementation allows the multiplication of a f64 value and an Expr object.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let result = 2.0 * expr;
 
println!("Result: {}", result.result); // 2.0
Source§

type Output = Expr

The resulting type after applying the * operator.
Source§

fn mul(self, other: Expr) -> Expr

Performs the * operation. Read more
Source§

impl Mul<f64> for Expr

Implements the Mul trait for the Expr struct, when the right operand is a f64.

This implementation allows the multiplication of an Expr object and a f64 value.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let result = expr * 2.0;
 
println!("Result: {}", result.result); // 2.0
Source§

type Output = Expr

The resulting type after applying the * operator.
Source§

fn mul(self, other: f64) -> Expr

Performs the * operation. Read more
Source§

impl Mul for Expr

Implements the Mul trait for the Expr struct.

This implementation allows the multiplication of two Expr objects.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let expr2 = Expr::new_leaf(2.0, "y");
 
let result = expr * expr2;
 
println!("Result: {}", result.result); // 2.0
Source§

type Output = Expr

The resulting type after applying the * operator.
Source§

fn mul(self, other: Expr) -> Expr

Performs the * operation. Read more
Source§

impl Sub<Expr> for f64

Implements the Sub trait for the f64 type, when the right operand is an Expr.

This implementation allows the subtraction of a f64 value and an Expr object.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let result = 2.0 - expr;
 
println!("Result: {}", result.result); // 1.0
Source§

type Output = Expr

The resulting type after applying the - operator.
Source§

fn sub(self, other: Expr) -> Expr

Performs the - operation. Read more
Source§

impl Sub<f64> for Expr

Implements the Sub trait for the Expr struct, when the right operand is a f64.

This implementation allows the subtraction of an Expr object and a f64 value.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let result = expr - 2.0;
 
println!("Result: {}", result.result); // -1.0
Source§

type Output = Expr

The resulting type after applying the - operator.
Source§

fn sub(self, other: f64) -> Expr

Performs the - operation. Read more
Source§

impl Sub for Expr

Implements the Sub trait for the Expr struct.

This implementation allows the subtraction of two Expr objects.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let expr2 = Expr::new_leaf(2.0, "y");
 
let result = expr - expr2;
 
println!("Result: {}", result.result); // -1.0
Source§

type Output = Expr

The resulting type after applying the - operator.
Source§

fn sub(self, other: Expr) -> Expr

Performs the - operation. Read more
Source§

impl Sum for Expr

Implements the Sum trait for the Expr struct.

Note that this implementation will generate temporary Expr objects, which may not be the most efficient way to sum a collection of Expr objects. However, it is provided as a convenience method for users that want to use sum over an Iterator<Expr>.

Example:

use alpha_micrograd_rust::value::Expr;
 
let expr = Expr::new_leaf(1.0, "x");
let expr2 = Expr::new_leaf(2.0, "y");
let expr3 = Expr::new_leaf(3.0, "z");
 
let sum = vec![expr, expr2, expr3].into_iter().sum::<Expr>();
 
println!("Result: {}", sum.result); // 6.0
Source§

fn sum<I>(iter: I) -> Self
where I: Iterator<Item = Self>,

Takes an iterator and generates Self from the elements by “summing up” the items.

Auto Trait Implementations§

§

impl Freeze for Expr

§

impl RefUnwindSafe for Expr

§

impl Send for Expr

§

impl Sync for Expr

§

impl Unpin for Expr

§

impl UnwindSafe for Expr

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dst: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dst. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V