Struct Value

Source
pub struct Value(/* private fields */);

Implementations§

Source§

impl Value

Source

pub fn from<T>(t: T) -> Value
where T: Into<Value>,

Examples found in repository?
examples/scalar_back_propagation.rs (line 6)
5fn main() {
6    let x1 = Value::from(2.0).with_label("x1");
7    let x1_clone = x1.clone();
8    let x2 = Value::from(0.0).with_label("x2");
9
10    let w1 = Value::from(-3.0).with_label("w1");
11    let w2 = Value::from(1.0).with_label("w2");
12
13    let b = Value::from(6.8813735870195432).with_label("b");
14
15    let x1w1 = (x1 * w1).with_label("x1w1");
16    let x2w2 = (x2 * w2).with_label("x2w2");
17
18    let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20    let n = (x1w1x2w2 + b).with_label("n");
21    let o = n.tanh().with_label("o");
22
23    o.backward();
24
25    assert_eq!(0.7071, round_to(o.data(), 4.0));
26    assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27    println!("{:?}", o);
28}
More examples
Hide additional examples
examples/gradient_descent.rs (line 21)
5fn main() {
6    let mlp = MLP::new(3, vec![4, 4, 1]);
7
8    let xs = vec![
9        vec![2.0, 3.0, -1.0],
10        vec![3.0, -1.0, 0.5],
11        vec![0.5, 1.0, 1.0],
12        vec![1.0, 1.0, -1.0],
13    ];
14
15    let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17    for _ in 0..100 {
18        // Forward pass
19        let ypred: Vec<Value> = xs
20            .iter()
21            .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22            .collect();
23        let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25        // Loss function
26        let ygt = ys.iter().map(|y| Value::from(*y));
27        let loss: Value = ypred
28            .into_iter()
29            .zip(ygt)
30            .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31            .sum();
32
33        println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35        // Backward pass
36        mlp.parameters().iter().for_each(|p| p.clear_gradient());
37        loss.backward();
38
39        // Adjustment
40        mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41    }
42}
Source

pub fn with_label(self, label: &str) -> Value

Examples found in repository?
examples/scalar_back_propagation.rs (line 6)
5fn main() {
6    let x1 = Value::from(2.0).with_label("x1");
7    let x1_clone = x1.clone();
8    let x2 = Value::from(0.0).with_label("x2");
9
10    let w1 = Value::from(-3.0).with_label("w1");
11    let w2 = Value::from(1.0).with_label("w2");
12
13    let b = Value::from(6.8813735870195432).with_label("b");
14
15    let x1w1 = (x1 * w1).with_label("x1w1");
16    let x2w2 = (x2 * w2).with_label("x2w2");
17
18    let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20    let n = (x1w1x2w2 + b).with_label("n");
21    let o = n.tanh().with_label("o");
22
23    o.backward();
24
25    assert_eq!(0.7071, round_to(o.data(), 4.0));
26    assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27    println!("{:?}", o);
28}
Source

pub fn data(&self) -> f64

Examples found in repository?
examples/scalar_back_propagation.rs (line 25)
5fn main() {
6    let x1 = Value::from(2.0).with_label("x1");
7    let x1_clone = x1.clone();
8    let x2 = Value::from(0.0).with_label("x2");
9
10    let w1 = Value::from(-3.0).with_label("w1");
11    let w2 = Value::from(1.0).with_label("w2");
12
13    let b = Value::from(6.8813735870195432).with_label("b");
14
15    let x1w1 = (x1 * w1).with_label("x1w1");
16    let x2w2 = (x2 * w2).with_label("x2w2");
17
18    let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20    let n = (x1w1x2w2 + b).with_label("n");
21    let o = n.tanh().with_label("o");
22
23    o.backward();
24
25    assert_eq!(0.7071, round_to(o.data(), 4.0));
26    assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27    println!("{:?}", o);
28}
More examples
Hide additional examples
examples/gradient_descent.rs (line 23)
5fn main() {
6    let mlp = MLP::new(3, vec![4, 4, 1]);
7
8    let xs = vec![
9        vec![2.0, 3.0, -1.0],
10        vec![3.0, -1.0, 0.5],
11        vec![0.5, 1.0, 1.0],
12        vec![1.0, 1.0, -1.0],
13    ];
14
15    let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17    for _ in 0..100 {
18        // Forward pass
19        let ypred: Vec<Value> = xs
20            .iter()
21            .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22            .collect();
23        let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25        // Loss function
26        let ygt = ys.iter().map(|y| Value::from(*y));
27        let loss: Value = ypred
28            .into_iter()
29            .zip(ygt)
30            .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31            .sum();
32
33        println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35        // Backward pass
36        mlp.parameters().iter().for_each(|p| p.clear_gradient());
37        loss.backward();
38
39        // Adjustment
40        mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41    }
42}
Source

pub fn gradient(&self) -> f64

Examples found in repository?
examples/scalar_back_propagation.rs (line 26)
5fn main() {
6    let x1 = Value::from(2.0).with_label("x1");
7    let x1_clone = x1.clone();
8    let x2 = Value::from(0.0).with_label("x2");
9
10    let w1 = Value::from(-3.0).with_label("w1");
11    let w2 = Value::from(1.0).with_label("w2");
12
13    let b = Value::from(6.8813735870195432).with_label("b");
14
15    let x1w1 = (x1 * w1).with_label("x1w1");
16    let x2w2 = (x2 * w2).with_label("x2w2");
17
18    let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20    let n = (x1w1x2w2 + b).with_label("n");
21    let o = n.tanh().with_label("o");
22
23    o.backward();
24
25    assert_eq!(0.7071, round_to(o.data(), 4.0));
26    assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27    println!("{:?}", o);
28}
Source

pub fn clear_gradient(&self)

Examples found in repository?
examples/gradient_descent.rs (line 36)
5fn main() {
6    let mlp = MLP::new(3, vec![4, 4, 1]);
7
8    let xs = vec![
9        vec![2.0, 3.0, -1.0],
10        vec![3.0, -1.0, 0.5],
11        vec![0.5, 1.0, 1.0],
12        vec![1.0, 1.0, -1.0],
13    ];
14
15    let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17    for _ in 0..100 {
18        // Forward pass
19        let ypred: Vec<Value> = xs
20            .iter()
21            .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22            .collect();
23        let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25        // Loss function
26        let ygt = ys.iter().map(|y| Value::from(*y));
27        let loss: Value = ypred
28            .into_iter()
29            .zip(ygt)
30            .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31            .sum();
32
33        println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35        // Backward pass
36        mlp.parameters().iter().for_each(|p| p.clear_gradient());
37        loss.backward();
38
39        // Adjustment
40        mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41    }
42}
Source

pub fn adjust(&self, factor: f64)

Examples found in repository?
examples/gradient_descent.rs (line 40)
5fn main() {
6    let mlp = MLP::new(3, vec![4, 4, 1]);
7
8    let xs = vec![
9        vec![2.0, 3.0, -1.0],
10        vec![3.0, -1.0, 0.5],
11        vec![0.5, 1.0, 1.0],
12        vec![1.0, 1.0, -1.0],
13    ];
14
15    let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17    for _ in 0..100 {
18        // Forward pass
19        let ypred: Vec<Value> = xs
20            .iter()
21            .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22            .collect();
23        let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25        // Loss function
26        let ygt = ys.iter().map(|y| Value::from(*y));
27        let loss: Value = ypred
28            .into_iter()
29            .zip(ygt)
30            .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31            .sum();
32
33        println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35        // Backward pass
36        mlp.parameters().iter().for_each(|p| p.clear_gradient());
37        loss.backward();
38
39        // Adjustment
40        mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41    }
42}
Source

pub fn pow(&self, other: &Value) -> Value

Examples found in repository?
examples/gradient_descent.rs (line 30)
5fn main() {
6    let mlp = MLP::new(3, vec![4, 4, 1]);
7
8    let xs = vec![
9        vec![2.0, 3.0, -1.0],
10        vec![3.0, -1.0, 0.5],
11        vec![0.5, 1.0, 1.0],
12        vec![1.0, 1.0, -1.0],
13    ];
14
15    let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17    for _ in 0..100 {
18        // Forward pass
19        let ypred: Vec<Value> = xs
20            .iter()
21            .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22            .collect();
23        let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25        // Loss function
26        let ygt = ys.iter().map(|y| Value::from(*y));
27        let loss: Value = ypred
28            .into_iter()
29            .zip(ygt)
30            .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31            .sum();
32
33        println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35        // Backward pass
36        mlp.parameters().iter().for_each(|p| p.clear_gradient());
37        loss.backward();
38
39        // Adjustment
40        mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41    }
42}
Source

pub fn tanh(&self) -> Value

Examples found in repository?
examples/scalar_back_propagation.rs (line 21)
5fn main() {
6    let x1 = Value::from(2.0).with_label("x1");
7    let x1_clone = x1.clone();
8    let x2 = Value::from(0.0).with_label("x2");
9
10    let w1 = Value::from(-3.0).with_label("w1");
11    let w2 = Value::from(1.0).with_label("w2");
12
13    let b = Value::from(6.8813735870195432).with_label("b");
14
15    let x1w1 = (x1 * w1).with_label("x1w1");
16    let x2w2 = (x2 * w2).with_label("x2w2");
17
18    let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20    let n = (x1w1x2w2 + b).with_label("n");
21    let o = n.tanh().with_label("o");
22
23    o.backward();
24
25    assert_eq!(0.7071, round_to(o.data(), 4.0));
26    assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27    println!("{:?}", o);
28}
Source

pub fn backward(&self)

Examples found in repository?
examples/scalar_back_propagation.rs (line 23)
5fn main() {
6    let x1 = Value::from(2.0).with_label("x1");
7    let x1_clone = x1.clone();
8    let x2 = Value::from(0.0).with_label("x2");
9
10    let w1 = Value::from(-3.0).with_label("w1");
11    let w2 = Value::from(1.0).with_label("w2");
12
13    let b = Value::from(6.8813735870195432).with_label("b");
14
15    let x1w1 = (x1 * w1).with_label("x1w1");
16    let x2w2 = (x2 * w2).with_label("x2w2");
17
18    let x1w1x2w2 = (x1w1 + x2w2).with_label("x1w1x2w2");
19
20    let n = (x1w1x2w2 + b).with_label("n");
21    let o = n.tanh().with_label("o");
22
23    o.backward();
24
25    assert_eq!(0.7071, round_to(o.data(), 4.0));
26    assert_eq!(-1.5, round_to(x1_clone.gradient(), 3.0));
27    println!("{:?}", o);
28}
More examples
Hide additional examples
examples/gradient_descent.rs (line 37)
5fn main() {
6    let mlp = MLP::new(3, vec![4, 4, 1]);
7
8    let xs = vec![
9        vec![2.0, 3.0, -1.0],
10        vec![3.0, -1.0, 0.5],
11        vec![0.5, 1.0, 1.0],
12        vec![1.0, 1.0, -1.0],
13    ];
14
15    let ys = vec![1.0, -1.0, -1.0, 1.0];
16
17    for _ in 0..100 {
18        // Forward pass
19        let ypred: Vec<Value> = xs
20            .iter()
21            .map(|x| mlp.forward(x.iter().map(|x| Value::from(*x)).collect())[0].clone())
22            .collect();
23        let ypred_floats: Vec<f64> = ypred.iter().map(|v| v.data()).collect();
24
25        // Loss function
26        let ygt = ys.iter().map(|y| Value::from(*y));
27        let loss: Value = ypred
28            .into_iter()
29            .zip(ygt)
30            .map(|(yp, yg)| (yp - yg).pow(&Value::from(2.0)))
31            .sum();
32
33        println!("Loss: {} Predictions: {:?}", loss.data(), ypred_floats);
34
35        // Backward pass
36        mlp.parameters().iter().for_each(|p| p.clear_gradient());
37        loss.backward();
38
39        // Adjustment
40        mlp.parameters().iter().for_each(|p| p.adjust(-0.05));
41    }
42}

Trait Implementations§

Source§

impl<'a, 'b> Add<&'b Value> for &'a Value

Source§

type Output = Value

The resulting type after applying the + operator.
Source§

fn add(self, other: &'b Value) -> Self::Output

Performs the + operation. Read more
Source§

impl Add for Value

Source§

type Output = Value

The resulting type after applying the + operator.
Source§

fn add(self, other: Value) -> Self::Output

Performs the + operation. Read more
Source§

impl Clone for Value

Source§

fn clone(&self) -> Value

Returns a duplicate of the value. Read more
1.0.0 · Source§

const fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for Value

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Deref for Value

Source§

type Target = Rc<RefCell<ValueInternal>>

The resulting type after dereferencing.
Source§

fn deref(&self) -> &Self::Target

Dereferences the value.
Source§

impl<T: Into<f64>> From<T> for Value

Source§

fn from(t: T) -> Value

Converts to this type from the input type.
Source§

impl Hash for Value

Source§

fn hash<H: Hasher>(&self, state: &mut H)

Feeds this value into the given Hasher. Read more
1.3.0 · Source§

fn hash_slice<H>(data: &[Self], state: &mut H)
where H: Hasher, Self: Sized,

Feeds a slice of this type into the given Hasher. Read more
Source§

impl<'a, 'b> Mul<&'b Value> for &'a Value

Source§

type Output = Value

The resulting type after applying the * operator.
Source§

fn mul(self, other: &'b Value) -> Self::Output

Performs the * operation. Read more
Source§

impl Mul for Value

Source§

type Output = Value

The resulting type after applying the * operator.
Source§

fn mul(self, other: Value) -> Self::Output

Performs the * operation. Read more
Source§

impl<'a> Neg for &'a Value

Source§

type Output = Value

The resulting type after applying the - operator.
Source§

fn neg(self) -> Self::Output

Performs the unary - operation. Read more
Source§

impl Neg for Value

Source§

type Output = Value

The resulting type after applying the - operator.
Source§

fn neg(self) -> Self::Output

Performs the unary - operation. Read more
Source§

impl PartialEq for Value

Source§

fn eq(&self, other: &Value) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · Source§

const fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
Source§

impl<'a, 'b> Sub<&'b Value> for &'a Value

Source§

type Output = Value

The resulting type after applying the - operator.
Source§

fn sub(self, other: &'b Value) -> Self::Output

Performs the - operation. Read more
Source§

impl Sub for Value

Source§

type Output = Value

The resulting type after applying the - operator.
Source§

fn sub(self, other: Value) -> Self::Output

Performs the - operation. Read more
Source§

impl Sum for Value

Source§

fn sum<I: Iterator<Item = Self>>(iter: I) -> Self

Takes an iterator and generates Self from the elements by “summing up” the items.
Source§

impl Eq for Value

Source§

impl StructuralPartialEq for Value

Auto Trait Implementations§

§

impl Freeze for Value

§

impl !RefUnwindSafe for Value

§

impl !Send for Value

§

impl !Sync for Value

§

impl Unpin for Value

§

impl !UnwindSafe for Value

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<P, T> Receiver for P
where P: Deref<Target = T> + ?Sized, T: ?Sized,

Source§

type Target = T

🔬This is a nightly-only experimental API. (arbitrary_self_types)
The target type on which the method may be called.
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V