Struct microtensor::Variable
source · pub struct Variable<T: Real + 'static> { /* private fields */ }
Expand description
Variables track the computational operations used to create them and allow for computing their gradient with respect to all input variables involved.
They get created by calling tracked or trained on any differentiable Tensor type.
Variables dereference to their underlying Tensor automatically for non-differentiable operations. Differentiable operations, on the other hand, will always return another Variable.
Implementations§
source§impl<T: Real + 'static> Variable<T>
impl<T: Real + 'static> Variable<T>
pub fn id(&self) -> usize
pub fn tensor(&self) -> &Tensor<T>
sourcepub fn grad(&self) -> Option<&Tensor<T>>
pub fn grad(&self) -> Option<&Tensor<T>>
Examples found in repository?
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
fn main() {
// Define some tensors
let x = Tensor::vec(&[1.0, 2.0]);
let w = Tensor::randn(&[2, 8]).trained();
let b = Tensor::zeros(&[8]).trained();
// Do some computation
let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);
// Compute gradients
z.backward();
println!("Gradient of z with respect to w: {}", w.grad().unwrap());
// Nudge w and b in order to minimize z
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
More examples
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
fn load_model(filename: &str) {
let graph = Graph::load(filename).unwrap();
// Feed new data using #run.
// Updating the entire graph in this way is more efficient
// than calling #forward on each individual output.
graph.run(&[
&Tensor::vec(&[5.0, 6.0]).tracked(),
&Tensor::randn(&[16]).tracked(),
]);
// Get new output..
let z = &graph.outputs[1];
println!("z is now {}", z.item());
// ..or train the model further
let z = &graph.outputs[1];
z.backward();
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
pub fn unary_op(&self, op: impl UnaryOp<T> + 'static) -> Self
pub fn binary_op(&self, op: impl BinaryOp<T> + 'static, rhs: &Self) -> Self
sourcepub fn forward(&self)
pub fn forward(&self)
Reevaluate this Variable’s graph to produce a new output.
Examples found in repository?
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
sourcepub fn backward(&self)
pub fn backward(&self)
Compute gradients across this Variable’s entire graph.
Examples found in repository?
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
fn main() {
// Define some tensors
let x = Tensor::vec(&[1.0, 2.0]);
let w = Tensor::randn(&[2, 8]).trained();
let b = Tensor::zeros(&[8]).trained();
// Do some computation
let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);
// Compute gradients
z.backward();
println!("Gradient of z with respect to w: {}", w.grad().unwrap());
// Nudge w and b in order to minimize z
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
More examples
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
fn load_model(filename: &str) {
let graph = Graph::load(filename).unwrap();
// Feed new data using #run.
// Updating the entire graph in this way is more efficient
// than calling #forward on each individual output.
graph.run(&[
&Tensor::vec(&[5.0, 6.0]).tracked(),
&Tensor::randn(&[16]).tracked(),
]);
// Get new output..
let z = &graph.outputs[1];
println!("z is now {}", z.item());
// ..or train the model further
let z = &graph.outputs[1];
z.backward();
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
sourcepub fn parameters(&self) -> Vec<Self>
pub fn parameters(&self) -> Vec<Self>
List all trainable parameters in this Variable’s graph.
Examples found in repository?
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
fn main() {
// Define some tensors
let x = Tensor::vec(&[1.0, 2.0]);
let w = Tensor::randn(&[2, 8]).trained();
let b = Tensor::zeros(&[8]).trained();
// Do some computation
let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);
// Compute gradients
z.backward();
println!("Gradient of z with respect to w: {}", w.grad().unwrap());
// Nudge w and b in order to minimize z
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
More examples
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
fn load_model(filename: &str) {
let graph = Graph::load(filename).unwrap();
// Feed new data using #run.
// Updating the entire graph in this way is more efficient
// than calling #forward on each individual output.
graph.run(&[
&Tensor::vec(&[5.0, 6.0]).tracked(),
&Tensor::randn(&[16]).tracked(),
]);
// Get new output..
let z = &graph.outputs[1];
println!("z is now {}", z.item());
// ..or train the model further
let z = &graph.outputs[1];
z.backward();
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
pub fn inputs(&self) -> Vec<Self>
sourcepub fn reset(&self)
pub fn reset(&self)
Set gradients to zero for this Variable’s entire graph.
Examples found in repository?
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
More examples
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
sourcepub fn check_gradients<F>(shape: &[usize], generator: F) -> Twhere
F: Fn(&Self) -> Self,
pub fn check_gradients<F>(shape: &[usize], generator: F) -> Twhere F: Fn(&Self) -> Self,
Compute a function’s gradient with respect to a generated input numerically and compare it to the automatically derived solution.
Supply any function to check that it gets differentiated correctly.
pub fn statistics(&self) -> (usize, usize, usize, usize, usize)
Methods from Deref<Target = Tensor<T>>§
pub fn raw(&self) -> RwLockReadGuard<'_, Vec<T>>
pub fn raw_mut(&self) -> RwLockWriteGuard<'_, Vec<T>>
pub fn size(&self) -> usize
pub fn rank(&self) -> usize
pub fn contiguous(&self) -> Self
pub fn detach(&self) -> Self
pub fn zip<O, F>(&self, rhs: &Self, cb: F) -> Tensor<O>where O: Inner, F: Fn((T, T)) -> O,
pub fn vectorize<O, F>(&self, cb: F) -> Tensor<O>where O: Inner, F: FnMut(T) -> O,
pub fn reduce<F>(&self, cb: F) -> Option<Self>where F: Fn(T, T) -> T,
pub fn collapse<O, F>(&self, dim: isize, cb: F) -> Tensor<O>where O: Inner, F: Fn(Self) -> O,
pub fn collapse_only<F>(&self, dim: isize, cb: F) -> Selfwhere F: Fn(Self) -> Self,
pub fn expand<O, F>(&self, cb: F) -> Tensor<O>where O: Inner, F: Fn(T) -> Vec<O>,
pub fn map<O, F>(&self, dim: isize, cb: F) -> Tensor<O>where O: Inner, F: Fn(Self) -> Tensor<O>,
pub fn iter(&self, dim: isize) -> TensorSliceIterator<'_, T>
pub fn param_iter(&self) -> TensorIterator<'_, T>
sourcepub fn item(&self) -> T
pub fn item(&self) -> T
Examples found in repository?
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
fn load_model(filename: &str) {
let graph = Graph::load(filename).unwrap();
// Feed new data using #run.
// Updating the entire graph in this way is more efficient
// than calling #forward on each individual output.
graph.run(&[
&Tensor::vec(&[5.0, 6.0]).tracked(),
&Tensor::randn(&[16]).tracked(),
]);
// Get new output..
let z = &graph.outputs[1];
println!("z is now {}", z.item());
// ..or train the model further
let z = &graph.outputs[1];
z.backward();
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
pub fn view(&self, shape: &[usize]) -> Self
pub fn extend_front(&self, size: usize) -> Self
pub fn transpose_vec(&self, extend_front: bool) -> Self
pub fn equal(&self, rhs: &Self) -> Tensor<bool>
pub fn split(&self, size: usize, dim: isize) -> Vec<Self>
pub fn chunks(&self, n: usize, dim: isize) -> Vec<Self>
pub fn to_vec(&self, dim: isize) -> Vec<Self>
pub fn shuffle(&self, dim: isize) -> Self
sourcepub fn feed(&self, other: &Self)
pub fn feed(&self, other: &Self)
Examples found in repository?
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
pub fn add(&self, rhs: &Self) -> Self
pub fn sub(&self, rhs: &Self) -> Self
pub fn mul(&self, rhs: &Self) -> Self
pub fn div(&self, rhs: &Self) -> Self
pub fn rem(&self, rhs: &Self) -> Self
pub fn sum_over(&self, dim: isize) -> Self
pub fn gt(&self, rhs: &Self) -> Tensor<bool>
pub fn lt(&self, rhs: &Self) -> Tensor<bool>
pub fn top_k(&self, k: usize, dim: isize) -> Self
sourcepub fn argmax<O: Integer + Unsigned>(&self, dim: isize) -> Tensor<O>
pub fn argmax<O: Integer + Unsigned>(&self, dim: isize) -> Tensor<O>
Collapse dimension using index of its greatest value
pub fn clamp(&self, min: T, max: T) -> Self
sourcepub fn cast<I: Numeric>(&self) -> Tensor<I>
pub fn cast<I: Numeric>(&self) -> Tensor<I>
Examples found in repository?
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
More examples
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
pub fn bernoulli<O: Numeric>(&self) -> Tensor<O>
pub fn sample(&self) -> usize
sourcepub fn trained(&self) -> Variable<T>
pub fn trained(&self) -> Variable<T>
Examples found in repository?
More examples
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
fn main() {
// Define some tensors
let x = Tensor::vec(&[1.0, 2.0]);
let w = Tensor::randn(&[2, 8]).trained();
let b = Tensor::zeros(&[8]).trained();
// Do some computation
let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);
// Compute gradients
z.backward();
println!("Gradient of z with respect to w: {}", w.grad().unwrap());
// Nudge w and b in order to minimize z
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
fn build_model(filename: &str) {
// Have some inputs
let x1 = Tensor::vec(&[1.0, 2.0]).tracked();
let x2 = Tensor::ones(&[16]).tracked();
let w = Tensor::randn(&[2, 16]).trained();
// Do some computations
let y = x1.mm(&w);
let z = (&y * &x2).sum(0);
// Pack the resulting graph into a Graph structure to make its inputs
// and outputs explicit and arrange them in an order of your liking.
let graph = Graph::new(&[x1, x2], &[y, z]);
// Save entire computation graph to disc
graph.save(filename).unwrap();
}
sourcepub fn tracked(&self) -> Variable<T>
pub fn tracked(&self) -> Variable<T>
Examples found in repository?
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
fn main() {
// Define some tensors
let x = Tensor::vec(&[1.0, 2.0]);
let w = Tensor::randn(&[2, 8]).trained();
let b = Tensor::zeros(&[8]).trained();
// Do some computation
let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);
// Compute gradients
z.backward();
println!("Gradient of z with respect to w: {}", w.grad().unwrap());
// Nudge w and b in order to minimize z
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
More examples
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
fn build_model(filename: &str) {
// Have some inputs
let x1 = Tensor::vec(&[1.0, 2.0]).tracked();
let x2 = Tensor::ones(&[16]).tracked();
let w = Tensor::randn(&[2, 16]).trained();
// Do some computations
let y = x1.mm(&w);
let z = (&y * &x2).sum(0);
// Pack the resulting graph into a Graph structure to make its inputs
// and outputs explicit and arrange them in an order of your liking.
let graph = Graph::new(&[x1, x2], &[y, z]);
// Save entire computation graph to disc
graph.save(filename).unwrap();
}
fn load_model(filename: &str) {
let graph = Graph::load(filename).unwrap();
// Feed new data using #run.
// Updating the entire graph in this way is more efficient
// than calling #forward on each individual output.
graph.run(&[
&Tensor::vec(&[5.0, 6.0]).tracked(),
&Tensor::randn(&[16]).tracked(),
]);
// Get new output..
let z = &graph.outputs[1];
println!("z is now {}", z.item());
// ..or train the model further
let z = &graph.outputs[1];
z.backward();
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
sourcepub fn one_hot<O: Numeric>(&self, size: usize) -> Tensor<O>
pub fn one_hot<O: Numeric>(&self, size: usize) -> Tensor<O>
Examples found in repository?
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
More examples
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
pub fn confusion(&self, labels: &Self) -> Self
Trait Implementations§
source§impl<T: Real> AddAssign<Tensor<T>> for Variable<T>
impl<T: Real> AddAssign<Tensor<T>> for Variable<T>
source§fn add_assign(&mut self, rhs: Tensor<T>)
fn add_assign(&mut self, rhs: Tensor<T>)
+=
operation. Read moresource§impl<T: Real> BaseHops<T> for Variable<T>
impl<T: Real> BaseHops<T> for Variable<T>
fn at(&self, indices: &[isize]) -> Self
fn squeeze_only(&self, dim: isize) -> Self
fn squeeze_but(&self, dim: isize) -> Self
fn squeeze_first(&self, n: usize) -> Self
fn squeeze_all(&self) -> Self
fn unsqueeze_n(&self, n: usize, dim: isize) -> Self
fn extend(&self, rank: usize) -> Self
fn stack(rows: &[Self], dim: isize) -> Self
fn rows(rows: &[Self]) -> Self
source§impl<T: Real> BaseOps<T> for Variable<T>
impl<T: Real> BaseOps<T> for Variable<T>
fn scalar(item: T) -> Self
fn shape(&self) -> &Shape
fn range(&self, ranges: &[Range<isize>]) -> Self
fn broadcast(&self, shape: &Shape, _ignore_from: Option<isize>) -> Self
fn reshape(&self, dims: &[usize]) -> Self
fn squeeze(&self, squeezed: &[isize]) -> Self
fn unsqueeze(&self, dim: isize) -> Self
fn transpose(&self, dim1: isize, dim2: isize) -> Self
fn concat(&self, rhs: &Self, dim: isize) -> Self
source§impl<T: Real> DivAssign<Tensor<T>> for Variable<T>
impl<T: Real> DivAssign<Tensor<T>> for Variable<T>
source§fn div_assign(&mut self, rhs: Tensor<T>)
fn div_assign(&mut self, rhs: Tensor<T>)
/=
operation. Read moresource§impl<T: Real> MulAssign<Tensor<T>> for Variable<T>
impl<T: Real> MulAssign<Tensor<T>> for Variable<T>
source§fn mul_assign(&mut self, rhs: Tensor<T>)
fn mul_assign(&mut self, rhs: Tensor<T>)
*=
operation. Read moresource§impl<T: Real> NumericOps<T> for Variable<T>
impl<T: Real> NumericOps<T> for Variable<T>
source§impl<T: Real> RealHops<T> for Variable<T>
impl<T: Real> RealHops<T> for Variable<T>
fn powf(&self, exp: I) -> Self
fn sqr(&self) -> Self
fn sqrt(&self) -> Self
fn exp(&self) -> Self
fn norm(&self, dim: isize) -> Self
fn dot(&self, rhs: &Self, dim: isize) -> Self
fn mean(&self, dim: isize) -> Self
fn variance(&self, dim: isize) -> Self
fn softmax(&self, dim: isize) -> Self
fn max_with(&self, rhs: &Self) -> Self
Auto Trait Implementations§
impl<T> !RefUnwindSafe for Variable<T>
impl<T> Send for Variable<T>
impl<T> Sync for Variable<T>
impl<T> Unpin for Variable<T>
impl<T> !UnwindSafe for Variable<T>
Blanket Implementations§
source§impl<T> Type for Twhere
T: ?Sized,
impl<T> Type for Twhere T: ?Sized,
source§default fn dangling(t: <T as Type>::Meta) -> NonNull<T>
default fn dangling(t: <T as Type>::Meta) -> NonNull<T>
*const Self
with the provided Self::Meta
.source§default fn fatten(thin: *mut (), t: <T as Type>::Meta) -> *mut T
default fn fatten(thin: *mut (), t: <T as Type>::Meta) -> *mut T
*mut Self
with the provided Self::Meta
.source§fn meta_type(self: *const Self) -> MetaType
fn meta_type(self: *const Self) -> MetaType
TraitObject
, Slice
or Concrete
.source§impl<T> Type for T
impl<T> Type for T
source§const METATYPE: MetaType = MetaType::Concrete
const METATYPE: MetaType = MetaType::Concrete
TraitObject
, Slice
or Concrete
.source§fn dangling(_t: <T as Type>::Meta) -> NonNull<T>
fn dangling(_t: <T as Type>::Meta) -> NonNull<T>
*const Self
with the provided Self::Meta
.source§fn fatten(thin: *mut (), _t: <T as Type>::Meta) -> *mut T
fn fatten(thin: *mut (), _t: <T as Type>::Meta) -> *mut T
*mut Self
with the provided Self::Meta
.source§fn meta_type(self: *const Self) -> MetaType
fn meta_type(self: *const Self) -> MetaType
TraitObject
, Slice
or Concrete
.