Struct microtensor::Tensor
source · pub struct Tensor<T: Inner> { /* private fields */ }
Expand description
Implementations§
source§impl<T: Inner> Tensor<T>
impl<T: Inner> Tensor<T>
pub fn from_shape(shape: Shape, data: Vec<T>) -> Self
pub fn new(shape: &[usize], data: Vec<T>) -> Self
sourcepub fn vec(vec: &[T]) -> Self
pub fn vec(vec: &[T]) -> Self
Examples found in repository?
examples/basic.rs (line 5)
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
fn main() {
// Define some tensors
let x = Tensor::vec(&[1.0, 2.0]);
let w = Tensor::randn(&[2, 8]).trained();
let b = Tensor::zeros(&[8]).trained();
// Do some computation
let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);
// Compute gradients
z.backward();
println!("Gradient of z with respect to w: {}", w.grad().unwrap());
// Nudge w and b in order to minimize z
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
More examples
examples/graph.rs (line 21)
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
fn build_model(filename: &str) {
// Have some inputs
let x1 = Tensor::vec(&[1.0, 2.0]).tracked();
let x2 = Tensor::ones(&[16]).tracked();
let w = Tensor::randn(&[2, 16]).trained();
// Do some computations
let y = x1.mm(&w);
let z = (&y * &x2).sum(0);
// Pack the resulting graph into a Graph structure to make its inputs
// and outputs explicit and arrange them in an order of your liking.
let graph = Graph::new(&[x1, x2], &[y, z]);
// Save entire computation graph to disc
graph.save(filename).unwrap();
}
fn load_model(filename: &str) {
let graph = Graph::load(filename).unwrap();
// Feed new data using #run.
// Updating the entire graph in this way is more efficient
// than calling #forward on each individual output.
graph.run(&[
&Tensor::vec(&[5.0, 6.0]).tracked(),
&Tensor::randn(&[16]).tracked(),
]);
// Get new output..
let z = &graph.outputs[1];
println!("z is now {}", z.item());
// ..or train the model further
let z = &graph.outputs[1];
z.backward();
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
pub fn from_vec(vec: Vec<T>) -> Self
pub fn fill(shape: &[usize], filler: T) -> Self
pub fn init(shape: &[usize], cb: impl FnMut(Vec<usize>) -> T) -> Self
pub fn raw(&self) -> RwLockReadGuard<'_, Vec<T>>
pub fn raw_mut(&self) -> RwLockWriteGuard<'_, Vec<T>>
pub fn into_raw(self) -> Vec<T>
pub fn size(&self) -> usize
pub fn rank(&self) -> usize
pub fn contiguous(&self) -> Self
pub fn detach(&self) -> Self
pub fn zip<O, F>(&self, rhs: &Self, cb: F) -> Tensor<O>where O: Inner, F: Fn((T, T)) -> O,
pub fn vectorize<O, F>(&self, cb: F) -> Tensor<O>where O: Inner, F: FnMut(T) -> O,
pub fn reduce<F>(&self, cb: F) -> Option<Self>where F: Fn(T, T) -> T,
pub fn collapse<O, F>(&self, dim: isize, cb: F) -> Tensor<O>where O: Inner, F: Fn(Self) -> O,
pub fn collapse_only<F>(&self, dim: isize, cb: F) -> Selfwhere F: Fn(Self) -> Self,
pub fn expand<O, F>(&self, cb: F) -> Tensor<O>where O: Inner, F: Fn(T) -> Vec<O>,
pub fn map<O, F>(&self, dim: isize, cb: F) -> Tensor<O>where O: Inner, F: Fn(Self) -> Tensor<O>,
pub fn iter(&self, dim: isize) -> TensorSliceIterator<'_, T>
pub fn param_iter(&self) -> TensorIterator<'_, T>
sourcepub fn item(&self) -> T
pub fn item(&self) -> T
Examples found in repository?
examples/graph.rs (line 50)
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
fn load_model(filename: &str) {
let graph = Graph::load(filename).unwrap();
// Feed new data using #run.
// Updating the entire graph in this way is more efficient
// than calling #forward on each individual output.
graph.run(&[
&Tensor::vec(&[5.0, 6.0]).tracked(),
&Tensor::randn(&[16]).tracked(),
]);
// Get new output..
let z = &graph.outputs[1];
println!("z is now {}", z.item());
// ..or train the model further
let z = &graph.outputs[1];
z.backward();
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
pub fn view(&self, shape: &[usize]) -> Self
pub fn extend_front(&self, size: usize) -> Self
pub fn transpose_vec(&self, extend_front: bool) -> Self
pub fn equal(&self, rhs: &Self) -> Tensor<bool>
pub fn split(&self, size: usize, dim: isize) -> Vec<Self>
pub fn chunks(&self, n: usize, dim: isize) -> Vec<Self>
pub fn to_vec(&self, dim: isize) -> Vec<Self>
pub fn shuffle(&self, dim: isize) -> Self
source§impl<T: Numeric> Tensor<T>
impl<T: Numeric> Tensor<T>
sourcepub fn ones(shape: &[usize]) -> Self
pub fn ones(shape: &[usize]) -> Self
Examples found in repository?
examples/graph.rs (line 22)
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
fn build_model(filename: &str) {
// Have some inputs
let x1 = Tensor::vec(&[1.0, 2.0]).tracked();
let x2 = Tensor::ones(&[16]).tracked();
let w = Tensor::randn(&[2, 16]).trained();
// Do some computations
let y = x1.mm(&w);
let z = (&y * &x2).sum(0);
// Pack the resulting graph into a Graph structure to make its inputs
// and outputs explicit and arrange them in an order of your liking.
let graph = Graph::new(&[x1, x2], &[y, z]);
// Save entire computation graph to disc
graph.save(filename).unwrap();
}
More examples
examples/perceptron_eager.rs (line 63)
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
examples/perceptron_graph.rs (line 42)
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
sourcepub fn zeros(shape: &[usize]) -> Self
pub fn zeros(shape: &[usize]) -> Self
Examples found in repository?
More examples
examples/perceptron_graph.rs (line 20)
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn dense_layer(input: &Variable<f32>, size: usize) -> Variable<f32> {
let weights = (Tensor::randn(&[input.shape()[-1], size]) / size as f32).trained();
let bias = Tensor::zeros(&[size]).trained();
input.mm(&weights) + bias
}
fn perceptron(input: &Variable<f32>) -> Variable<f32> {
let output = dense_layer(input, 16).relu();
dense_layer(&output, 10).sigmoid()
}
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
examples/basic.rs (line 7)
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
fn main() {
// Define some tensors
let x = Tensor::vec(&[1.0, 2.0]);
let w = Tensor::randn(&[2, 8]).trained();
let b = Tensor::zeros(&[8]).trained();
// Do some computation
let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);
// Compute gradients
z.backward();
println!("Gradient of z with respect to w: {}", w.grad().unwrap());
// Nudge w and b in order to minimize z
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
pub fn arrange(shape: &[usize], start: T, step: T) -> Self
pub fn hot_encode(idx: usize, size: usize) -> Self
pub fn band(dims: &[usize], num_lower: isize, num_upper: isize) -> Self
sourcepub fn feed(&self, other: &Self)
pub fn feed(&self, other: &Self)
Examples found in repository?
examples/perceptron_graph.rs (line 46)
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
pub fn add(&self, rhs: &Self) -> Self
pub fn sub(&self, rhs: &Self) -> Self
pub fn mul(&self, rhs: &Self) -> Self
pub fn div(&self, rhs: &Self) -> Self
pub fn rem(&self, rhs: &Self) -> Self
pub fn sum_over(&self, dim: isize) -> Self
pub fn gt(&self, rhs: &Self) -> Tensor<bool>
pub fn lt(&self, rhs: &Self) -> Tensor<bool>
pub fn top_k(&self, k: usize, dim: isize) -> Self
sourcepub fn argmax<O: Integer + Unsigned>(&self, dim: isize) -> Tensor<O>
pub fn argmax<O: Integer + Unsigned>(&self, dim: isize) -> Tensor<O>
Collapse dimension using index of its greatest value
pub fn clamp(&self, min: T, max: T) -> Self
sourcepub fn cast<I: Numeric>(&self) -> Tensor<I>
pub fn cast<I: Numeric>(&self) -> Tensor<I>
Examples found in repository?
examples/perceptron_eager.rs (line 64)
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
More examples
examples/perceptron_graph.rs (line 43)
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
source§impl<T: Real> Tensor<T>
impl<T: Real> Tensor<T>
sourcepub fn rand(shape: &[usize]) -> Self
pub fn rand(shape: &[usize]) -> Self
Examples found in repository?
examples/perceptron_eager.rs (line 64)
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
More examples
examples/perceptron_graph.rs (line 43)
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
sourcepub fn randn(shape: &[usize]) -> Self
pub fn randn(shape: &[usize]) -> Self
Examples found in repository?
More examples
examples/basic.rs (line 6)
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
fn main() {
// Define some tensors
let x = Tensor::vec(&[1.0, 2.0]);
let w = Tensor::randn(&[2, 8]).trained();
let b = Tensor::zeros(&[8]).trained();
// Do some computation
let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);
// Compute gradients
z.backward();
println!("Gradient of z with respect to w: {}", w.grad().unwrap());
// Nudge w and b in order to minimize z
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
examples/graph.rs (line 23)
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
fn build_model(filename: &str) {
// Have some inputs
let x1 = Tensor::vec(&[1.0, 2.0]).tracked();
let x2 = Tensor::ones(&[16]).tracked();
let w = Tensor::randn(&[2, 16]).trained();
// Do some computations
let y = x1.mm(&w);
let z = (&y * &x2).sum(0);
// Pack the resulting graph into a Graph structure to make its inputs
// and outputs explicit and arrange them in an order of your liking.
let graph = Graph::new(&[x1, x2], &[y, z]);
// Save entire computation graph to disc
graph.save(filename).unwrap();
}
fn load_model(filename: &str) {
let graph = Graph::load(filename).unwrap();
// Feed new data using #run.
// Updating the entire graph in this way is more efficient
// than calling #forward on each individual output.
graph.run(&[
&Tensor::vec(&[5.0, 6.0]).tracked(),
&Tensor::randn(&[16]).tracked(),
]);
// Get new output..
let z = &graph.outputs[1];
println!("z is now {}", z.item());
// ..or train the model further
let z = &graph.outputs[1];
z.backward();
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
pub fn linspace(shape: &[usize], start: T, end: T) -> Self
pub fn bernoulli<O: Numeric>(&self) -> Tensor<O>
pub fn sample(&self) -> usize
sourcepub fn trained(&self) -> Variable<T>
pub fn trained(&self) -> Variable<T>
Examples found in repository?
More examples
examples/basic.rs (line 6)
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
fn main() {
// Define some tensors
let x = Tensor::vec(&[1.0, 2.0]);
let w = Tensor::randn(&[2, 8]).trained();
let b = Tensor::zeros(&[8]).trained();
// Do some computation
let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);
// Compute gradients
z.backward();
println!("Gradient of z with respect to w: {}", w.grad().unwrap());
// Nudge w and b in order to minimize z
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
examples/graph.rs (line 23)
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
fn build_model(filename: &str) {
// Have some inputs
let x1 = Tensor::vec(&[1.0, 2.0]).tracked();
let x2 = Tensor::ones(&[16]).tracked();
let w = Tensor::randn(&[2, 16]).trained();
// Do some computations
let y = x1.mm(&w);
let z = (&y * &x2).sum(0);
// Pack the resulting graph into a Graph structure to make its inputs
// and outputs explicit and arrange them in an order of your liking.
let graph = Graph::new(&[x1, x2], &[y, z]);
// Save entire computation graph to disc
graph.save(filename).unwrap();
}
sourcepub fn tracked(&self) -> Variable<T>
pub fn tracked(&self) -> Variable<T>
Examples found in repository?
examples/basic.rs (line 10)
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
fn main() {
// Define some tensors
let x = Tensor::vec(&[1.0, 2.0]);
let w = Tensor::randn(&[2, 8]).trained();
let b = Tensor::zeros(&[8]).trained();
// Do some computation
let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);
// Compute gradients
z.backward();
println!("Gradient of z with respect to w: {}", w.grad().unwrap());
// Nudge w and b in order to minimize z
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
More examples
examples/graph.rs (line 21)
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
fn build_model(filename: &str) {
// Have some inputs
let x1 = Tensor::vec(&[1.0, 2.0]).tracked();
let x2 = Tensor::ones(&[16]).tracked();
let w = Tensor::randn(&[2, 16]).trained();
// Do some computations
let y = x1.mm(&w);
let z = (&y * &x2).sum(0);
// Pack the resulting graph into a Graph structure to make its inputs
// and outputs explicit and arrange them in an order of your liking.
let graph = Graph::new(&[x1, x2], &[y, z]);
// Save entire computation graph to disc
graph.save(filename).unwrap();
}
fn load_model(filename: &str) {
let graph = Graph::load(filename).unwrap();
// Feed new data using #run.
// Updating the entire graph in this way is more efficient
// than calling #forward on each individual output.
graph.run(&[
&Tensor::vec(&[5.0, 6.0]).tracked(),
&Tensor::randn(&[16]).tracked(),
]);
// Get new output..
let z = &graph.outputs[1];
println!("z is now {}", z.item());
// ..or train the model further
let z = &graph.outputs[1];
z.backward();
for mut param in z.parameters() {
param -= param.grad().unwrap() * 0.01
}
}
examples/perceptron_eager.rs (line 67)
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
examples/perceptron_graph.rs (line 31)
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
source§impl<T: Integer + Unsigned> Tensor<T>
impl<T: Integer + Unsigned> Tensor<T>
sourcepub fn one_hot<O: Numeric>(&self, size: usize) -> Tensor<O>
pub fn one_hot<O: Numeric>(&self, size: usize) -> Tensor<O>
Examples found in repository?
examples/perceptron_eager.rs (line 64)
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
fn main() {
// Construct model that stores all trainable tensors explicitly
let model = Perceptron::new(28 * 28);
// Train with labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]);
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Run the model, creating a fresh computation graph in the process
let output = model.run(&images.tracked());
// Compute loss
let loss = (&labels.tracked() - &output).sqr().mean(0);
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
More examples
examples/perceptron_graph.rs (line 43)
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn main() {
// Define model by performing all computations on a placeholder once
let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
let output = perceptron(&image_input);
// Define the loss to me minimized
let label_input = Tensor::zeros(&[32, 10]).tracked();
let loss = (&label_input - &output).sqr().mean(0);
// Train with some labeled samples
let learning_rate = 0.01;
for _ in 0..100 {
// Insert real training data here
let images = Tensor::ones(&[32, 28 * 28]).tracked();
let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);
// Feed existing computation graph with new inputs
image_input.feed(&images);
label_input.feed(&labels);
// Recompute output and loss
loss.forward();
// Compute gradients
loss.backward();
// Minimize loss by updating model parameters
for mut param in loss.parameters() {
param -= param.grad().unwrap() * learning_rate
}
// Reset gradients
loss.reset();
}
}
pub fn confusion(&self, labels: &Self) -> Self
Trait Implementations§
source§impl<T: Numeric> AddAssign<Tensor<T>> for Tensor<T>
impl<T: Numeric> AddAssign<Tensor<T>> for Tensor<T>
source§fn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
Performs the
+=
operation. Read moresource§impl<T: Real> AddAssign<Tensor<T>> for Variable<T>
impl<T: Real> AddAssign<Tensor<T>> for Variable<T>
source§fn add_assign(&mut self, rhs: Tensor<T>)
fn add_assign(&mut self, rhs: Tensor<T>)
Performs the
+=
operation. Read moresource§impl<T: Inner> BaseHops<T> for Tensor<T>
impl<T: Inner> BaseHops<T> for Tensor<T>
fn at(&self, indices: &[isize]) -> Self
fn squeeze_only(&self, dim: isize) -> Self
fn squeeze_but(&self, dim: isize) -> Self
fn squeeze_first(&self, n: usize) -> Self
fn squeeze_all(&self) -> Self
fn unsqueeze_n(&self, n: usize, dim: isize) -> Self
fn extend(&self, rank: usize) -> Self
fn stack(rows: &[Self], dim: isize) -> Self
fn rows(rows: &[Self]) -> Self
source§impl<T: Inner> BaseOps<T> for Tensor<T>
impl<T: Inner> BaseOps<T> for Tensor<T>
fn scalar(item: T) -> Self
fn shape(&self) -> &Shape
fn range(&self, ranges: &[Range<isize>]) -> Self
fn broadcast(&self, shape: &Shape, ignore_from: Option<isize>) -> Self
fn reshape(&self, shape: &[usize]) -> Self
fn squeeze(&self, squeezed: &[isize]) -> Self
fn unsqueeze(&self, dim: isize) -> Self
fn transpose(&self, dim1: isize, dim2: isize) -> Self
fn concat(&self, rhs: &Self, dim: isize) -> Self
source§impl<'de, T> Deserialize<'de> for Tensor<T>where
T: Deserialize<'de> + Inner,
impl<'de, T> Deserialize<'de> for Tensor<T>where T: Deserialize<'de> + Inner,
source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where __D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
source§impl<T: Numeric> DivAssign<Tensor<T>> for Tensor<T>
impl<T: Numeric> DivAssign<Tensor<T>> for Tensor<T>
source§fn div_assign(&mut self, rhs: Self)
fn div_assign(&mut self, rhs: Self)
Performs the
/=
operation. Read moresource§impl<T: Real> DivAssign<Tensor<T>> for Variable<T>
impl<T: Real> DivAssign<Tensor<T>> for Variable<T>
source§fn div_assign(&mut self, rhs: Tensor<T>)
fn div_assign(&mut self, rhs: Tensor<T>)
Performs the
/=
operation. Read moresource§impl<T: Numeric> MulAssign<Tensor<T>> for Tensor<T>
impl<T: Numeric> MulAssign<Tensor<T>> for Tensor<T>
source§fn mul_assign(&mut self, rhs: Self)
fn mul_assign(&mut self, rhs: Self)
Performs the
*=
operation. Read moresource§impl<T: Real> MulAssign<Tensor<T>> for Variable<T>
impl<T: Real> MulAssign<Tensor<T>> for Variable<T>
source§fn mul_assign(&mut self, rhs: Tensor<T>)
fn mul_assign(&mut self, rhs: Tensor<T>)
Performs the
*=
operation. Read moresource§impl<T: Numeric> NumericOps<T> for Tensor<T>
impl<T: Numeric> NumericOps<T> for Tensor<T>
source§impl<T: Real> RealHops<T> for Tensor<T>
impl<T: Real> RealHops<T> for Tensor<T>
fn powf(&self, exp: I) -> Self
fn sqr(&self) -> Self
fn sqrt(&self) -> Self
fn exp(&self) -> Self
fn norm(&self, dim: isize) -> Self
fn dot(&self, rhs: &Self, dim: isize) -> Self
fn mean(&self, dim: isize) -> Self
fn variance(&self, dim: isize) -> Self
fn softmax(&self, dim: isize) -> Self
fn max_with(&self, rhs: &Self) -> Self
source§impl<T: Numeric> SubAssign<Tensor<T>> for Tensor<T>
impl<T: Numeric> SubAssign<Tensor<T>> for Tensor<T>
source§fn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
Performs the
-=
operation. Read moreAuto Trait Implementations§
impl<T> !RefUnwindSafe for Tensor<T>
impl<T> Send for Tensor<T>
impl<T> Sync for Tensor<T>
impl<T> Unpin for Tensor<T>
impl<T> !UnwindSafe for Tensor<T>
Blanket Implementations§
source§impl<T> Serialize for Twhere
T: Serialize + ?Sized,
impl<T> Serialize for Twhere T: Serialize + ?Sized,
fn erased_serialize(&self, serializer: &mut dyn Serializer) -> Result<Ok, Error>
source§impl<T> Type for Twhere
T: ?Sized,
impl<T> Type for Twhere T: ?Sized,
source§default fn dangling(t: <T as Type>::Meta) -> NonNull<T>
default fn dangling(t: <T as Type>::Meta) -> NonNull<T>
Create a dangling non-null
*const Self
with the provided Self::Meta
.source§default fn fatten(thin: *mut (), t: <T as Type>::Meta) -> *mut T
default fn fatten(thin: *mut (), t: <T as Type>::Meta) -> *mut T
Create a
*mut Self
with the provided Self::Meta
.source§fn meta_type(self: *const Self) -> MetaType
fn meta_type(self: *const Self) -> MetaType
Helper method describing whether a type is
TraitObject
, Slice
or Concrete
.source§impl<T> Type for T
impl<T> Type for T
source§const METATYPE: MetaType = MetaType::Concrete
const METATYPE: MetaType = MetaType::Concrete
Enum describing whether a type is
TraitObject
, Slice
or Concrete
.source§fn dangling(_t: <T as Type>::Meta) -> NonNull<T>
fn dangling(_t: <T as Type>::Meta) -> NonNull<T>
Create a dangling non-null
*const Self
with the provided Self::Meta
.source§fn fatten(thin: *mut (), _t: <T as Type>::Meta) -> *mut T
fn fatten(thin: *mut (), _t: <T as Type>::Meta) -> *mut T
Create a
*mut Self
with the provided Self::Meta
.source§fn meta_type(self: *const Self) -> MetaType
fn meta_type(self: *const Self) -> MetaType
Helper method describing whether a type is
TraitObject
, Slice
or Concrete
.