Struct microtensor::Variable

source ·
pub struct Variable<T: Real + 'static> { /* private fields */ }
Expand description

Variables track the computational operations used to create them and allow for computing their gradient with respect to all input variables involved.

They get created by calling tracked or trained on any differentiable Tensor type.

Variables dereference to their underlying Tensor automatically for non-differentiable operations. Differentiable operations, on the other hand, will always return another Variable.

Implementations§

source§

impl<T: Real + 'static> Variable<T>

source

pub fn id(&self) -> usize

source

pub fn tensor(&self) -> &Tensor<T>

source

pub fn grad(&self) -> Option<&Tensor<T>>

Examples found in repository?
examples/basic.rs (line 15)
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
fn main() {
  // Define some tensors
  let x = Tensor::vec(&[1.0, 2.0]);
  let w = Tensor::randn(&[2, 8]).trained();
  let b = Tensor::zeros(&[8]).trained();

  // Do some computation
  let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);

  // Compute gradients
  z.backward();

  println!("Gradient of z with respect to w: {}", w.grad().unwrap());

  // Nudge w and b in order to minimize z
  for mut param in z.parameters() {
    param -= param.grad().unwrap() * 0.01
  }
}
More examples
Hide additional examples
examples/graph.rs (line 56)
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
fn load_model(filename: &str) {
  let graph = Graph::load(filename).unwrap();

  // Feed new data using #run.
  // Updating the entire graph in this way is more efficient
  // than calling #forward on each individual output.
  graph.run(&[
    &Tensor::vec(&[5.0, 6.0]).tracked(),
    &Tensor::randn(&[16]).tracked(),
  ]);

  // Get new output..
  let z = &graph.outputs[1];
  println!("z is now {}", z.item());

  // ..or train the model further
  let z = &graph.outputs[1];
  z.backward();
  for mut param in z.parameters() {
    param -= param.grad().unwrap() * 0.01
  }
}
examples/perceptron_eager.rs (line 77)
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
fn main() {
  // Construct model that stores all trainable tensors explicitly
  let model = Perceptron::new(28 * 28);

  // Train with labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]);
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Run the model, creating a fresh computation graph in the process
    let output = model.run(&images.tracked());

    // Compute loss
    let loss = (&labels.tracked() - &output).sqr().mean(0);

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
examples/perceptron_graph.rs (line 57)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
fn main() {
  // Define model by performing all computations on a placeholder once
  let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
  let output = perceptron(&image_input);

  // Define the loss to me minimized
  let label_input = Tensor::zeros(&[32, 10]).tracked();
  let loss = (&label_input - &output).sqr().mean(0);

  // Train with some labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]).tracked();
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Feed existing computation graph with new inputs
    image_input.feed(&images);
    label_input.feed(&labels);

    // Recompute output and loss
    loss.forward();

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
source

pub fn unary_op(&self, op: impl UnaryOp<T> + 'static) -> Self

source

pub fn binary_op(&self, op: impl BinaryOp<T> + 'static, rhs: &Self) -> Self

source

pub fn forward(&self)

Reevaluate this Variable’s graph to produce a new output.

Examples found in repository?
examples/perceptron_graph.rs (line 50)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
fn main() {
  // Define model by performing all computations on a placeholder once
  let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
  let output = perceptron(&image_input);

  // Define the loss to me minimized
  let label_input = Tensor::zeros(&[32, 10]).tracked();
  let loss = (&label_input - &output).sqr().mean(0);

  // Train with some labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]).tracked();
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Feed existing computation graph with new inputs
    image_input.feed(&images);
    label_input.feed(&labels);

    // Recompute output and loss
    loss.forward();

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
source

pub fn backward(&self)

Compute gradients across this Variable’s entire graph.

Examples found in repository?
examples/basic.rs (line 13)
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
fn main() {
  // Define some tensors
  let x = Tensor::vec(&[1.0, 2.0]);
  let w = Tensor::randn(&[2, 8]).trained();
  let b = Tensor::zeros(&[8]).trained();

  // Do some computation
  let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);

  // Compute gradients
  z.backward();

  println!("Gradient of z with respect to w: {}", w.grad().unwrap());

  // Nudge w and b in order to minimize z
  for mut param in z.parameters() {
    param -= param.grad().unwrap() * 0.01
  }
}
More examples
Hide additional examples
examples/graph.rs (line 54)
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
fn load_model(filename: &str) {
  let graph = Graph::load(filename).unwrap();

  // Feed new data using #run.
  // Updating the entire graph in this way is more efficient
  // than calling #forward on each individual output.
  graph.run(&[
    &Tensor::vec(&[5.0, 6.0]).tracked(),
    &Tensor::randn(&[16]).tracked(),
  ]);

  // Get new output..
  let z = &graph.outputs[1];
  println!("z is now {}", z.item());

  // ..or train the model further
  let z = &graph.outputs[1];
  z.backward();
  for mut param in z.parameters() {
    param -= param.grad().unwrap() * 0.01
  }
}
examples/perceptron_eager.rs (line 73)
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
fn main() {
  // Construct model that stores all trainable tensors explicitly
  let model = Perceptron::new(28 * 28);

  // Train with labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]);
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Run the model, creating a fresh computation graph in the process
    let output = model.run(&images.tracked());

    // Compute loss
    let loss = (&labels.tracked() - &output).sqr().mean(0);

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
examples/perceptron_graph.rs (line 53)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
fn main() {
  // Define model by performing all computations on a placeholder once
  let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
  let output = perceptron(&image_input);

  // Define the loss to me minimized
  let label_input = Tensor::zeros(&[32, 10]).tracked();
  let loss = (&label_input - &output).sqr().mean(0);

  // Train with some labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]).tracked();
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Feed existing computation graph with new inputs
    image_input.feed(&images);
    label_input.feed(&labels);

    // Recompute output and loss
    loss.forward();

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
source

pub fn parameters(&self) -> Vec<Self>

List all trainable parameters in this Variable’s graph.

Examples found in repository?
examples/basic.rs (line 18)
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
fn main() {
  // Define some tensors
  let x = Tensor::vec(&[1.0, 2.0]);
  let w = Tensor::randn(&[2, 8]).trained();
  let b = Tensor::zeros(&[8]).trained();

  // Do some computation
  let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);

  // Compute gradients
  z.backward();

  println!("Gradient of z with respect to w: {}", w.grad().unwrap());

  // Nudge w and b in order to minimize z
  for mut param in z.parameters() {
    param -= param.grad().unwrap() * 0.01
  }
}
More examples
Hide additional examples
examples/graph.rs (line 55)
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
fn load_model(filename: &str) {
  let graph = Graph::load(filename).unwrap();

  // Feed new data using #run.
  // Updating the entire graph in this way is more efficient
  // than calling #forward on each individual output.
  graph.run(&[
    &Tensor::vec(&[5.0, 6.0]).tracked(),
    &Tensor::randn(&[16]).tracked(),
  ]);

  // Get new output..
  let z = &graph.outputs[1];
  println!("z is now {}", z.item());

  // ..or train the model further
  let z = &graph.outputs[1];
  z.backward();
  for mut param in z.parameters() {
    param -= param.grad().unwrap() * 0.01
  }
}
examples/perceptron_eager.rs (line 76)
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
fn main() {
  // Construct model that stores all trainable tensors explicitly
  let model = Perceptron::new(28 * 28);

  // Train with labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]);
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Run the model, creating a fresh computation graph in the process
    let output = model.run(&images.tracked());

    // Compute loss
    let loss = (&labels.tracked() - &output).sqr().mean(0);

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
examples/perceptron_graph.rs (line 56)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
fn main() {
  // Define model by performing all computations on a placeholder once
  let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
  let output = perceptron(&image_input);

  // Define the loss to me minimized
  let label_input = Tensor::zeros(&[32, 10]).tracked();
  let loss = (&label_input - &output).sqr().mean(0);

  // Train with some labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]).tracked();
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Feed existing computation graph with new inputs
    image_input.feed(&images);
    label_input.feed(&labels);

    // Recompute output and loss
    loss.forward();

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
source

pub fn inputs(&self) -> Vec<Self>

source

pub fn reset(&self)

Set gradients to zero for this Variable’s entire graph.

Examples found in repository?
examples/perceptron_eager.rs (line 81)
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
fn main() {
  // Construct model that stores all trainable tensors explicitly
  let model = Perceptron::new(28 * 28);

  // Train with labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]);
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Run the model, creating a fresh computation graph in the process
    let output = model.run(&images.tracked());

    // Compute loss
    let loss = (&labels.tracked() - &output).sqr().mean(0);

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
More examples
Hide additional examples
examples/perceptron_graph.rs (line 61)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
fn main() {
  // Define model by performing all computations on a placeholder once
  let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
  let output = perceptron(&image_input);

  // Define the loss to me minimized
  let label_input = Tensor::zeros(&[32, 10]).tracked();
  let loss = (&label_input - &output).sqr().mean(0);

  // Train with some labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]).tracked();
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Feed existing computation graph with new inputs
    image_input.feed(&images);
    label_input.feed(&labels);

    // Recompute output and loss
    loss.forward();

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
source

pub fn check_gradients<F>(shape: &[usize], generator: F) -> Twhere F: Fn(&Self) -> Self,

Compute a function’s gradient with respect to a generated input numerically and compare it to the automatically derived solution.

Supply any function to check that it gets differentiated correctly.

source

pub fn statistics(&self) -> (usize, usize, usize, usize, usize)

Methods from Deref<Target = Tensor<T>>§

source

pub fn raw(&self) -> RwLockReadGuard<'_, Vec<T>>

source

pub fn raw_mut(&self) -> RwLockWriteGuard<'_, Vec<T>>

source

pub fn size(&self) -> usize

source

pub fn rank(&self) -> usize

source

pub fn contiguous(&self) -> Self

source

pub fn detach(&self) -> Self

source

pub fn zip<O, F>(&self, rhs: &Self, cb: F) -> Tensor<O>where O: Inner, F: Fn((T, T)) -> O,

source

pub fn vectorize<O, F>(&self, cb: F) -> Tensor<O>where O: Inner, F: FnMut(T) -> O,

source

pub fn reduce<F>(&self, cb: F) -> Option<Self>where F: Fn(T, T) -> T,

source

pub fn collapse<O, F>(&self, dim: isize, cb: F) -> Tensor<O>where O: Inner, F: Fn(Self) -> O,

source

pub fn collapse_only<F>(&self, dim: isize, cb: F) -> Selfwhere F: Fn(Self) -> Self,

source

pub fn expand<O, F>(&self, cb: F) -> Tensor<O>where O: Inner, F: Fn(T) -> Vec<O>,

source

pub fn map<O, F>(&self, dim: isize, cb: F) -> Tensor<O>where O: Inner, F: Fn(Self) -> Tensor<O>,

source

pub fn iter(&self, dim: isize) -> TensorSliceIterator<'_, T>

source

pub fn param_iter(&self) -> TensorIterator<'_, T>

source

pub fn item(&self) -> T

Examples found in repository?
examples/graph.rs (line 50)
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
fn load_model(filename: &str) {
  let graph = Graph::load(filename).unwrap();

  // Feed new data using #run.
  // Updating the entire graph in this way is more efficient
  // than calling #forward on each individual output.
  graph.run(&[
    &Tensor::vec(&[5.0, 6.0]).tracked(),
    &Tensor::randn(&[16]).tracked(),
  ]);

  // Get new output..
  let z = &graph.outputs[1];
  println!("z is now {}", z.item());

  // ..or train the model further
  let z = &graph.outputs[1];
  z.backward();
  for mut param in z.parameters() {
    param -= param.grad().unwrap() * 0.01
  }
}
source

pub fn view(&self, shape: &[usize]) -> Self

source

pub fn extend_front(&self, size: usize) -> Self

source

pub fn transpose_vec(&self, extend_front: bool) -> Self

source

pub fn equal(&self, rhs: &Self) -> Tensor<bool>

source

pub fn split(&self, size: usize, dim: isize) -> Vec<Self>

source

pub fn chunks(&self, n: usize, dim: isize) -> Vec<Self>

source

pub fn to_vec(&self, dim: isize) -> Vec<Self>

source

pub fn shuffle(&self, dim: isize) -> Self

source

pub fn feed(&self, other: &Self)

Examples found in repository?
examples/perceptron_graph.rs (line 46)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
fn main() {
  // Define model by performing all computations on a placeholder once
  let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
  let output = perceptron(&image_input);

  // Define the loss to me minimized
  let label_input = Tensor::zeros(&[32, 10]).tracked();
  let loss = (&label_input - &output).sqr().mean(0);

  // Train with some labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]).tracked();
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Feed existing computation graph with new inputs
    image_input.feed(&images);
    label_input.feed(&labels);

    // Recompute output and loss
    loss.forward();

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
source

pub fn add(&self, rhs: &Self) -> Self

source

pub fn sub(&self, rhs: &Self) -> Self

source

pub fn mul(&self, rhs: &Self) -> Self

source

pub fn div(&self, rhs: &Self) -> Self

source

pub fn rem(&self, rhs: &Self) -> Self

source

pub fn sum_over(&self, dim: isize) -> Self

source

pub fn gt(&self, rhs: &Self) -> Tensor<bool>

source

pub fn lt(&self, rhs: &Self) -> Tensor<bool>

source

pub fn top_k(&self, k: usize, dim: isize) -> Self

source

pub fn argmax<O: Integer + Unsigned>(&self, dim: isize) -> Tensor<O>

Collapse dimension using index of its greatest value

source

pub fn clamp(&self, min: T, max: T) -> Self

source

pub fn cast<I: Numeric>(&self) -> Tensor<I>

Examples found in repository?
examples/perceptron_eager.rs (line 64)
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
fn main() {
  // Construct model that stores all trainable tensors explicitly
  let model = Perceptron::new(28 * 28);

  // Train with labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]);
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Run the model, creating a fresh computation graph in the process
    let output = model.run(&images.tracked());

    // Compute loss
    let loss = (&labels.tracked() - &output).sqr().mean(0);

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
More examples
Hide additional examples
examples/perceptron_graph.rs (line 43)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
fn main() {
  // Define model by performing all computations on a placeholder once
  let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
  let output = perceptron(&image_input);

  // Define the loss to me minimized
  let label_input = Tensor::zeros(&[32, 10]).tracked();
  let loss = (&label_input - &output).sqr().mean(0);

  // Train with some labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]).tracked();
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Feed existing computation graph with new inputs
    image_input.feed(&images);
    label_input.feed(&labels);

    // Recompute output and loss
    loss.forward();

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
source

pub fn bernoulli<O: Numeric>(&self) -> Tensor<O>

source

pub fn sample(&self) -> usize

source

pub fn trained(&self) -> Variable<T>

Examples found in repository?
examples/perceptron_eager.rs (line 26)
24
25
26
27
28
29
  pub fn new(input_size: usize, size: usize) -> Self {
    Self {
      weights: (Tensor::randn(&[input_size, size]) / size as f32).trained(),
      bias: Tensor::zeros(&[size]).trained(),
    }
  }
More examples
Hide additional examples
examples/perceptron_graph.rs (line 19)
18
19
20
21
22
fn dense_layer(input: &Variable<f32>, size: usize) -> Variable<f32> {
  let weights = (Tensor::randn(&[input.shape()[-1], size]) / size as f32).trained();
  let bias = Tensor::zeros(&[size]).trained();
  input.mm(&weights) + bias
}
examples/basic.rs (line 6)
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
fn main() {
  // Define some tensors
  let x = Tensor::vec(&[1.0, 2.0]);
  let w = Tensor::randn(&[2, 8]).trained();
  let b = Tensor::zeros(&[8]).trained();

  // Do some computation
  let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);

  // Compute gradients
  z.backward();

  println!("Gradient of z with respect to w: {}", w.grad().unwrap());

  // Nudge w and b in order to minimize z
  for mut param in z.parameters() {
    param -= param.grad().unwrap() * 0.01
  }
}
examples/graph.rs (line 23)
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
fn build_model(filename: &str) {
  // Have some inputs
  let x1 = Tensor::vec(&[1.0, 2.0]).tracked();
  let x2 = Tensor::ones(&[16]).tracked();
  let w = Tensor::randn(&[2, 16]).trained();

  // Do some computations
  let y = x1.mm(&w);
  let z = (&y * &x2).sum(0);

  // Pack the resulting graph into a Graph structure to make its inputs
  // and outputs explicit and arrange them in an order of your liking.
  let graph = Graph::new(&[x1, x2], &[y, z]);

  // Save entire computation graph to disc
  graph.save(filename).unwrap();
}
source

pub fn tracked(&self) -> Variable<T>

Examples found in repository?
examples/basic.rs (line 10)
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
fn main() {
  // Define some tensors
  let x = Tensor::vec(&[1.0, 2.0]);
  let w = Tensor::randn(&[2, 8]).trained();
  let b = Tensor::zeros(&[8]).trained();

  // Do some computation
  let z = (x.tracked().mm(&w) + b - 0.5).sqr().mean(0);

  // Compute gradients
  z.backward();

  println!("Gradient of z with respect to w: {}", w.grad().unwrap());

  // Nudge w and b in order to minimize z
  for mut param in z.parameters() {
    param -= param.grad().unwrap() * 0.01
  }
}
More examples
Hide additional examples
examples/graph.rs (line 21)
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
fn build_model(filename: &str) {
  // Have some inputs
  let x1 = Tensor::vec(&[1.0, 2.0]).tracked();
  let x2 = Tensor::ones(&[16]).tracked();
  let w = Tensor::randn(&[2, 16]).trained();

  // Do some computations
  let y = x1.mm(&w);
  let z = (&y * &x2).sum(0);

  // Pack the resulting graph into a Graph structure to make its inputs
  // and outputs explicit and arrange them in an order of your liking.
  let graph = Graph::new(&[x1, x2], &[y, z]);

  // Save entire computation graph to disc
  graph.save(filename).unwrap();
}

fn load_model(filename: &str) {
  let graph = Graph::load(filename).unwrap();

  // Feed new data using #run.
  // Updating the entire graph in this way is more efficient
  // than calling #forward on each individual output.
  graph.run(&[
    &Tensor::vec(&[5.0, 6.0]).tracked(),
    &Tensor::randn(&[16]).tracked(),
  ]);

  // Get new output..
  let z = &graph.outputs[1];
  println!("z is now {}", z.item());

  // ..or train the model further
  let z = &graph.outputs[1];
  z.backward();
  for mut param in z.parameters() {
    param -= param.grad().unwrap() * 0.01
  }
}
examples/perceptron_eager.rs (line 67)
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
fn main() {
  // Construct model that stores all trainable tensors explicitly
  let model = Perceptron::new(28 * 28);

  // Train with labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]);
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Run the model, creating a fresh computation graph in the process
    let output = model.run(&images.tracked());

    // Compute loss
    let loss = (&labels.tracked() - &output).sqr().mean(0);

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
examples/perceptron_graph.rs (line 31)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
fn main() {
  // Define model by performing all computations on a placeholder once
  let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
  let output = perceptron(&image_input);

  // Define the loss to me minimized
  let label_input = Tensor::zeros(&[32, 10]).tracked();
  let loss = (&label_input - &output).sqr().mean(0);

  // Train with some labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]).tracked();
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Feed existing computation graph with new inputs
    image_input.feed(&images);
    label_input.feed(&labels);

    // Recompute output and loss
    loss.forward();

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
source

pub fn accuracy<O: Real>(&self, labels: &Self) -> O

source

pub fn one_hot<O: Numeric>(&self, size: usize) -> Tensor<O>

Examples found in repository?
examples/perceptron_eager.rs (line 64)
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
fn main() {
  // Construct model that stores all trainable tensors explicitly
  let model = Perceptron::new(28 * 28);

  // Train with labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]);
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Run the model, creating a fresh computation graph in the process
    let output = model.run(&images.tracked());

    // Compute loss
    let loss = (&labels.tracked() - &output).sqr().mean(0);

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
More examples
Hide additional examples
examples/perceptron_graph.rs (line 43)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
fn main() {
  // Define model by performing all computations on a placeholder once
  let image_input = Tensor::zeros(&[32, 28 * 28]).tracked();
  let output = perceptron(&image_input);

  // Define the loss to me minimized
  let label_input = Tensor::zeros(&[32, 10]).tracked();
  let loss = (&label_input - &output).sqr().mean(0);

  // Train with some labeled samples
  let learning_rate = 0.01;
  for _ in 0..100 {
    // Insert real training data here
    let images = Tensor::ones(&[32, 28 * 28]).tracked();
    let labels = (Tensor::rand(&[32]) * 10.0).cast::<u8>().one_hot(10);

    // Feed existing computation graph with new inputs
    image_input.feed(&images);
    label_input.feed(&labels);

    // Recompute output and loss
    loss.forward();

    // Compute gradients
    loss.backward();

    // Minimize loss by updating model parameters
    for mut param in loss.parameters() {
      param -= param.grad().unwrap() * learning_rate
    }

    // Reset gradients
    loss.reset();
  }
}
source

pub fn confusion(&self, labels: &Self) -> Self

source

pub fn signum(&self) -> Self

source

pub fn numeric<O: Numeric>(&self) -> Tensor<O>

source

pub fn and(&self, rhs: &Self) -> Self

source

pub fn or(&self, rhs: &Self) -> Self

source

pub fn all(&self) -> Option<bool>

source

pub fn any(&self) -> Option<bool>

source

pub fn when<O: Inner>(&self, either: Tensor<O>, or: Tensor<O>) -> Tensor<O>

Trait Implementations§

source§

impl<T: Real> Add<&Variable<T>> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the + operator.
source§

fn add(self, rhs: Self) -> Variable<T>

Performs the + operation. Read more
source§

impl<T: Real> Add<&Variable<T>> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the + operator.
source§

fn add(self, rhs: &Variable<T>) -> Variable<T>

Performs the + operation. Read more
source§

impl Add<&Variable<f32>> for f32

§

type Output = Variable<f32>

The resulting type after applying the + operator.
source§

fn add(self, rhs: &Variable<f32>) -> Variable<f32>

Performs the + operation. Read more
source§

impl<T: Real> Add<T> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the + operator.
source§

fn add(self, rhs: T) -> Variable<T>

Performs the + operation. Read more
source§

impl<T: Real> Add<T> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the + operator.
source§

fn add(self, rhs: T) -> Variable<T>

Performs the + operation. Read more
source§

impl<T: Real> Add<Variable<T>> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the + operator.
source§

fn add(self, rhs: Variable<T>) -> Variable<T>

Performs the + operation. Read more
source§

impl<T: Real> Add<Variable<T>> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the + operator.
source§

fn add(self, rhs: Self) -> Variable<T>

Performs the + operation. Read more
source§

impl Add<Variable<f32>> for f32

§

type Output = Variable<f32>

The resulting type after applying the + operator.
source§

fn add(self, rhs: Variable<f32>) -> Variable<f32>

Performs the + operation. Read more
source§

impl<T: Real> AddAssign<Tensor<T>> for Variable<T>

source§

fn add_assign(&mut self, rhs: Tensor<T>)

Performs the += operation. Read more
source§

impl<T: Real> BaseHops<T> for Variable<T>

source§

fn at(&self, indices: &[isize]) -> Self

source§

fn squeeze_only(&self, dim: isize) -> Self

source§

fn squeeze_but(&self, dim: isize) -> Self

source§

fn squeeze_first(&self, n: usize) -> Self

source§

fn squeeze_all(&self) -> Self

source§

fn unsqueeze_n(&self, n: usize, dim: isize) -> Self

source§

fn extend(&self, rank: usize) -> Self

source§

fn stack(rows: &[Self], dim: isize) -> Self

source§

fn rows(rows: &[Self]) -> Self

source§

impl<T: Real> BaseOps<T> for Variable<T>

source§

fn scalar(item: T) -> Self

source§

fn shape(&self) -> &Shape

source§

fn range(&self, ranges: &[Range<isize>]) -> Self

source§

fn broadcast(&self, shape: &Shape, _ignore_from: Option<isize>) -> Self

source§

fn reshape(&self, dims: &[usize]) -> Self

source§

fn squeeze(&self, squeezed: &[isize]) -> Self

source§

fn unsqueeze(&self, dim: isize) -> Self

source§

fn transpose(&self, dim1: isize, dim2: isize) -> Self

source§

fn concat(&self, rhs: &Self, dim: isize) -> Self

source§

impl<T: Clone + Real + 'static> Clone for Variable<T>

source§

fn clone(&self) -> Variable<T>

Returns a copy of the value. Read more
1.0.0 · source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
source§

impl<T: Debug + Real + 'static> Debug for Variable<T>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl<T: Real> Deref for Variable<T>

§

type Target = Tensor<T>

The resulting type after dereferencing.
source§

fn deref(&self) -> &Self::Target

Dereferences the value.
source§

impl<T: Real> Display for Variable<T>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl<T: Real> Div<&Variable<T>> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the / operator.
source§

fn div(self, rhs: Self) -> Variable<T>

Performs the / operation. Read more
source§

impl<T: Real> Div<&Variable<T>> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the / operator.
source§

fn div(self, rhs: &Variable<T>) -> Variable<T>

Performs the / operation. Read more
source§

impl Div<&Variable<f32>> for f32

§

type Output = Variable<f32>

The resulting type after applying the / operator.
source§

fn div(self, rhs: &Variable<f32>) -> Variable<f32>

Performs the / operation. Read more
source§

impl<T: Real> Div<T> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the / operator.
source§

fn div(self, rhs: T) -> Variable<T>

Performs the / operation. Read more
source§

impl<T: Real> Div<T> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the / operator.
source§

fn div(self, rhs: T) -> Variable<T>

Performs the / operation. Read more
source§

impl<T: Real> Div<Variable<T>> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the / operator.
source§

fn div(self, rhs: Variable<T>) -> Variable<T>

Performs the / operation. Read more
source§

impl<T: Real> Div<Variable<T>> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the / operator.
source§

fn div(self, rhs: Self) -> Variable<T>

Performs the / operation. Read more
source§

impl Div<Variable<f32>> for f32

§

type Output = Variable<f32>

The resulting type after applying the / operator.
source§

fn div(self, rhs: Variable<f32>) -> Variable<f32>

Performs the / operation. Read more
source§

impl<T: Real> DivAssign<Tensor<T>> for Variable<T>

source§

fn div_assign(&mut self, rhs: Tensor<T>)

Performs the /= operation. Read more
source§

impl<T: Real> Mul<&Variable<T>> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the * operator.
source§

fn mul(self, rhs: Self) -> Variable<T>

Performs the * operation. Read more
source§

impl<T: Real> Mul<&Variable<T>> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the * operator.
source§

fn mul(self, rhs: &Variable<T>) -> Variable<T>

Performs the * operation. Read more
source§

impl Mul<&Variable<f32>> for f32

§

type Output = Variable<f32>

The resulting type after applying the * operator.
source§

fn mul(self, rhs: &Variable<f32>) -> Variable<f32>

Performs the * operation. Read more
source§

impl<T: Real> Mul<T> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the * operator.
source§

fn mul(self, rhs: T) -> Variable<T>

Performs the * operation. Read more
source§

impl<T: Real> Mul<T> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the * operator.
source§

fn mul(self, rhs: T) -> Variable<T>

Performs the * operation. Read more
source§

impl<T: Real> Mul<Variable<T>> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the * operator.
source§

fn mul(self, rhs: Variable<T>) -> Variable<T>

Performs the * operation. Read more
source§

impl<T: Real> Mul<Variable<T>> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the * operator.
source§

fn mul(self, rhs: Self) -> Variable<T>

Performs the * operation. Read more
source§

impl Mul<Variable<f32>> for f32

§

type Output = Variable<f32>

The resulting type after applying the * operator.
source§

fn mul(self, rhs: Variable<f32>) -> Variable<f32>

Performs the * operation. Read more
source§

impl<T: Real> MulAssign<Tensor<T>> for Variable<T>

source§

fn mul_assign(&mut self, rhs: Tensor<T>)

Performs the *= operation. Read more
source§

impl<T: Real> Neg for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the - operator.
source§

fn neg(self) -> Self::Output

Performs the unary - operation. Read more
source§

impl<T: Real> Neg for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the - operator.
source§

fn neg(self) -> Self::Output

Performs the unary - operation. Read more
source§

impl<T: Real> NumericOps<T> for Variable<T>

source§

fn sum(&self, dim: isize) -> Variable<T>

source§

fn mm(&self, rhs: &Self) -> Self

source§

fn min(&self, dim: isize) -> Self

source§

fn max(&self, dim: isize) -> Self

source§

fn max_over(&self, _dim: isize) -> Self

source§

impl<T: Real> PartialEq<Variable<T>> for Variable<T>

source§

fn eq(&self, rhs: &Self) -> bool

This method tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

This method tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
source§

impl<T: Real> RealHops<T> for Variable<T>

source§

fn powf(&self, exp: I) -> Self

source§

fn sqr(&self) -> Self

source§

fn sqrt(&self) -> Self

source§

fn exp(&self) -> Self

source§

fn norm(&self, dim: isize) -> Self

source§

fn dot(&self, rhs: &Self, dim: isize) -> Self

source§

fn mean(&self, dim: isize) -> Self

source§

fn variance(&self, dim: isize) -> Self

source§

fn softmax(&self, dim: isize) -> Self

source§

fn max_with(&self, rhs: &Self) -> Self

source§

impl<T: Real> RealOps<T> for Variable<T>

source§

fn pow(&self, rhs: &Self) -> Variable<T>

source§

fn sin(&self) -> Self

source§

fn cos(&self) -> Self

source§

fn log(&self) -> Self

source§

fn relu(&self) -> Variable<T>

source§

fn sigmoid(&self) -> Variable<T>

source§

impl<T: Real> Rem<&Variable<T>> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the % operator.
source§

fn rem(self, rhs: Self) -> Variable<T>

Performs the % operation. Read more
source§

impl<T: Real> Rem<&Variable<T>> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the % operator.
source§

fn rem(self, rhs: &Variable<T>) -> Variable<T>

Performs the % operation. Read more
source§

impl Rem<&Variable<f32>> for f32

§

type Output = Variable<f32>

The resulting type after applying the % operator.
source§

fn rem(self, rhs: &Variable<f32>) -> Variable<f32>

Performs the % operation. Read more
source§

impl<T: Real> Rem<T> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the % operator.
source§

fn rem(self, rhs: T) -> Variable<T>

Performs the % operation. Read more
source§

impl<T: Real> Rem<T> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the % operator.
source§

fn rem(self, rhs: T) -> Variable<T>

Performs the % operation. Read more
source§

impl<T: Real> Rem<Variable<T>> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the % operator.
source§

fn rem(self, rhs: Variable<T>) -> Variable<T>

Performs the % operation. Read more
source§

impl<T: Real> Rem<Variable<T>> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the % operator.
source§

fn rem(self, rhs: Self) -> Variable<T>

Performs the % operation. Read more
source§

impl Rem<Variable<f32>> for f32

§

type Output = Variable<f32>

The resulting type after applying the % operator.
source§

fn rem(self, rhs: Variable<f32>) -> Variable<f32>

Performs the % operation. Read more
source§

impl<T: Real> SignedOps<T> for Variable<T>

source§

fn abs(&self) -> Variable<T>

source§

impl<T: Real> Sub<&Variable<T>> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the - operator.
source§

fn sub(self, rhs: Self) -> Variable<T>

Performs the - operation. Read more
source§

impl<T: Real> Sub<&Variable<T>> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the - operator.
source§

fn sub(self, rhs: &Variable<T>) -> Variable<T>

Performs the - operation. Read more
source§

impl Sub<&Variable<f32>> for f32

§

type Output = Variable<f32>

The resulting type after applying the - operator.
source§

fn sub(self, rhs: &Variable<f32>) -> Variable<f32>

Performs the - operation. Read more
source§

impl<T: Real> Sub<T> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the - operator.
source§

fn sub(self, rhs: T) -> Variable<T>

Performs the - operation. Read more
source§

impl<T: Real> Sub<T> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the - operator.
source§

fn sub(self, rhs: T) -> Variable<T>

Performs the - operation. Read more
source§

impl<T: Real> Sub<Variable<T>> for &Variable<T>

§

type Output = Variable<T>

The resulting type after applying the - operator.
source§

fn sub(self, rhs: Variable<T>) -> Variable<T>

Performs the - operation. Read more
source§

impl<T: Real> Sub<Variable<T>> for Variable<T>

§

type Output = Variable<T>

The resulting type after applying the - operator.
source§

fn sub(self, rhs: Self) -> Variable<T>

Performs the - operation. Read more
source§

impl Sub<Variable<f32>> for f32

§

type Output = Variable<f32>

The resulting type after applying the - operator.
source§

fn sub(self, rhs: Variable<f32>) -> Variable<f32>

Performs the - operation. Read more
source§

impl<T: Real> SubAssign<Tensor<T>> for Variable<T>

source§

fn sub_assign(&mut self, rhs: Tensor<T>)

Performs the -= operation. Read more

Auto Trait Implementations§

§

impl<T> !RefUnwindSafe for Variable<T>

§

impl<T> Send for Variable<T>

§

impl<T> Sync for Variable<T>

§

impl<T> Unpin for Variable<T>

§

impl<T> !UnwindSafe for Variable<T>

Blanket Implementations§

source§

impl<T> Any for Twhere T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for Twhere T: ?Sized,

const: unstable · source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for Twhere T: ?Sized,

const: unstable · source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

const: unstable · source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T, U> Into<U> for Twhere U: From<T>,

const: unstable · source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<T> Same<T> for T

§

type Output = T

Should always be Self
source§

impl<T> ToOwned for Twhere T: Clone,

§

type Owned = T

The resulting type after obtaining ownership.
source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
source§

impl<T> ToString for Twhere T: Display + ?Sized,

source§

default fn to_string(&self) -> String

Converts the given value to a String. Read more
source§

impl<T, U> TryFrom<U> for Twhere U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
const: unstable · source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for Twhere U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
const: unstable · source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
source§

impl<T> Type for Twhere T: ?Sized,

source§

default fn meta(self: *const T) -> <T as Type>::Meta

Retrieve TraitObject, Slice or Concrete meta data respectively for a type
source§

default fn data(self: *const T) -> *const ()

Retrieve pointer to the data
source§

default fn data_mut(self: *mut T) -> *mut ()

Retrieve mut pointer to the data
source§

default fn dangling(t: <T as Type>::Meta) -> NonNull<T>

Create a dangling non-null *const Self with the provided Self::Meta.
source§

default fn fatten(thin: *mut (), t: <T as Type>::Meta) -> *mut T

Create a *mut Self with the provided Self::Meta.
source§

const METATYPE: MetaType

Enum describing whether a type is TraitObject, Slice or Concrete.
§

type Meta: 'static

Type of metadata for type.
source§

fn meta_type(self: *const Self) -> MetaType

Helper method describing whether a type is TraitObject, Slice or Concrete.
source§

impl<T> Type for T

source§

const METATYPE: MetaType = MetaType::Concrete

Enum describing whether a type is TraitObject, Slice or Concrete.
§

type Meta = Concrete

Type of metadata for type.
source§

fn meta(self: *const T) -> <T as Type>::Meta

Retrieve TraitObject, Slice or Concrete meta data respectively for a type
source§

fn data(self: *const T) -> *const ()

Retrieve pointer to the data
source§

fn data_mut(self: *mut T) -> *mut ()

Retrieve mut pointer to the data
source§

fn dangling(_t: <T as Type>::Meta) -> NonNull<T>

Create a dangling non-null *const Self with the provided Self::Meta.
source§

fn fatten(thin: *mut (), _t: <T as Type>::Meta) -> *mut T

Create a *mut Self with the provided Self::Meta.
source§

fn meta_type(self: *const Self) -> MetaType

Helper method describing whether a type is TraitObject, Slice or Concrete.
§

impl<V, T> VZip<V> for Twhere V: MultiLane<T>,

§

fn vzip(self) -> V

source§

impl<T> Inner for Twhere T: PartialEq<T> + Clone + Send + Sync + Debug,

source§

impl<T, Rhs, Output> NumOps<Rhs, Output> for Twhere T: Sub<Rhs, Output = Output> + Mul<Rhs, Output = Output> + Div<Rhs, Output = Output> + Add<Rhs, Output = Output> + Rem<Rhs, Output = Output>,

source§

impl<T, Base> RefNum<Base> for Twhere T: NumOps<Base, Base> + for<'r> NumOps<&'r Base, Base>,