[][src]Struct autograd::Graph

pub struct Graph<F: Float> { /* fields omitted */ }

Generator of Tensor objects.

Use autograd::with to instantiate this.

use autograd as ag;

ag::with(|graph1: &mut ag::Graph<f32>| {
    // Creating some nodes (tensors) in this graph.
    let a = graph1.zeros(&[2, 3]);
    let b = graph1.ones(&[2, 3]);

    // Evaluate the tensors
    (a + b).eval(&[]);

    // Creating another scope (graph).
    ag::with(|graph2: &mut ag::Graph<f32>| {
        // `c` is valid only in graph2.
        let c = graph2.zeros(&[3, 4]);

        // Cross-scope access to what derived from `Graph` can't compile for now.

        // graph1.zeros(&[2, 3])
        // ^^^^^^ invalid access for `graph1`

        // a + c
        // ^ invalid access for `a` that belongs to ``graph1`
    });
    // tensors in graph2 destructed here.
});
// tensors in graph1 destructed here.

Implementations

impl<'graph, F: Float> Graph<F>[src]

pub fn grad<A, B>(&'graph self, ys_: &[A], xs: &[B]) -> Vec<Tensor<'graph, F>> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Symbolic gradient tensors of xs in the same order as xs's

Arguments

  • ys - Targets of differentiation that are arbitrary shapes.
  • xs - Tensors with which differentiate ys.

Example

Partial derivatives of z = 2x^2 + 3y + 1.

use ndarray;
use autograd as ag;

ag::with(|g| {
    let x = g.placeholder(&[]);
    let y = g.placeholder(&[]);
    let z = 2.*x*x + 3.*y + 1.;

    // dz/dy
    let gy = g.grad(&[z], &[y])[0];
    // dz/dx
    let gx = g.grad(&[z], &[x])[0];

    // ddz/dx (differentiates `z` again)
    let ggx = g.grad(&[gx], &[x])[0];

    // evaluation of symbolic gradients
    assert_eq!(3., gy.eval(&[]).unwrap()[ndarray::IxDyn(&[])]);
    assert_eq!(4., ggx.eval(&[]).unwrap()[ndarray::IxDyn(&[])]);

    // dz/dx requires to fill the placeholder `x`
    assert_eq!(8., gx.eval(&[x.given(ndarray::arr0(2.).view())]).unwrap()[ndarray::IxDyn(&[])]);
});

pub unsafe fn grad_with_default<A, B, C>(
    &'graph self,
    ys: &[A],
    xs: &[B],
    ys_grads: &[C]
) -> Vec<Tensor<'graph, F>> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy,
    C: AsRef<Tensor<'graph, F>> + Copy
[src]

Computes xs's gradients with ys's already known gradients.

Almost same spec as grad's except that you can pass yss already known gradients. If ys_grads are tensors filled with 1s, this function should be replaced with grad.

NOTE: Please be careful to match ys_grads[i].shape and ys[i].shape, otherwise undefined behavior would happen.

Arguments

  • ys - Targets of differentiation.
  • xs - tensors with which differentiate ys.
  • ys_grads - Already known gradients of ys.

Returns

Symbolic gradient tensors of xs in the same order as xs'graph.

pub fn jacobians<A, B>(
    &'graph self,
    y_: A,
    xs_: &[B],
    objective_len: usize
) -> Vec<Tensor<'graph, F>> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Computes jacobians for variables.

Arguments

  • y - Target of differentiation.
  • xs - Tensors with which differentiate ys.
  • y_size - (flattened) size of y

Returns

Jacobians for each variable. Each one is a matrix of shape (y_size, x size).

Note: the current implementation works correctly but is unoptimized for serious use.

use autograd as ag;
use ag::tensor::Variable;

ag::with(|g| {
   let rng = ag::ndarray_ext::ArrayRng::<f32>::default();
   let a = g.variable(rng.standard_normal(&[4, 2]));
   let b = g.variable(rng.standard_normal(&[2, 3]));
   let c = g.matmul(a, b);
   let j = g.jacobians(c, &[a, b], 4*3);

   assert_eq!(j[0].eval(&[]).unwrap().shape(), &[4*3, 4*2]);
   assert_eq!(j[1].eval(&[]).unwrap().shape(), &[4*3, 2*3]);
});

pub fn _hessian_vector_product<A, B, C>(
    &'graph self,
    ys: &[A],
    xs: &[B],
    vectors: &[C]
) -> Vec<Tensor<'graph, F>> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy,
    C: AsRef<Tensor<'graph, F>> + Copy
[src]

(Experimental) Computes hessian vector product

pub fn stop_gradient<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Stops gradient propagation.

Guarantees that the gradient is not propagated to the tensors behind this during gradient computation.

pub fn placeholder(&'graph self, shape_: &[isize]) -> Tensor<'graph, F>[src]

Creates a placeholder tensor.

Behaves like TensorFlow's placeholder object. shape_[i] must be a positive value, or -1 which means dynamic dim.

use ndarray;
use autograd as ag;

ag::with(|g| {
    let x = g.placeholder(&[2]);

    // Fills placeholder, then eval
    let arr = ndarray::array![1., 1.].into_dyn();
    assert_eq!(x.eval(&[x.given(arr.view())]), Ok(arr));
});

pub fn shape<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns a Tensor representation of the input tensor's shape

use autograd as ag;

ag::with(|g| {
   let x: ag::Tensor<f32> = g.zeros(&[2, 3]);
   let s = g.shape(x);
   assert_eq!(&[2., 3.], s.eval(&[]).unwrap().as_slice().unwrap());
});

pub fn size<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns the (symbolic) size of the input tensor

use ndarray;
use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[4, 3]);
   let b = g.size(a);

   assert_eq!(12., b.eval(&[]).unwrap()[ndarray::IxDyn(&[])]);
});

pub fn rank<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns the (symbolic) rank of the input tensor

use ndarray;
use autograd as ag;

ag::with(|g| {
   let x: ag::Tensor<f32> = g.zeros(&[2, 3, 4]);
   let r = g.rank(x);
   assert_eq!(3., r.eval(&[]).unwrap()[ndarray::IxDyn(&[])]);
});

pub fn sin<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise sine

pub fn cos<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise cosine

pub fn tan<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise tangent

pub fn asin<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise arcsin

pub fn acos<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise arccos

pub fn atan<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise arctan

pub fn sinh<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise hyperbolic sine

pub fn cosh<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise hyperbolic cosine

pub fn tanh<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise hyperbolic tangent

pub fn asinh<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise hyperbolic arcsin

pub fn acosh<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise hyperbolic arccos

pub fn atanh<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise hyperbolic arctan

pub fn identity<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Identity function without copy.

pub fn add<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise addition.

This can be replaced with + operation of Tensor.

pub fn sub<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Element-wise subtraction.

This can be replaced with - operation of Tensor.

pub fn mul<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise multiplication.

This can be replaced with * operation of Tensor.

pub fn div<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise division.

This can be replaced with / operation of Tensor.

pub fn sqrt<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise sqrt

pub fn pow<A>(&'graph self, x: A, a: F) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise pow

pub fn ln<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise base e (napier) logarithm

pub fn log2<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise base 2 logarithm

pub fn log10<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise base 10 logarithm

pub fn exp<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise base e (napier) exponential

pub fn exp2<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise base 2 exponential

pub fn exp10<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise base 10 exponential

pub fn maximum<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns the max of x and y (i.e. x > y ? x : y) element-wise.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![1., 2., 3.]);
   let b = g.constant(array![3., 2., 1.]);
   let c = g.maximum(a, b);
   assert_eq!(c.eval(&[]), Ok(array![3., 2., 3.].into_dyn()));
});

pub fn minimum<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns the min of x and y (i.e. x > y ? y : x) element-wise.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![1., 2., 3.]);
   let b = g.constant(array![3., 2., 1.]);
   let c = g.minimum(a, b);
   assert_eq!(c.eval(&[]), Ok(array![1., 2., 1.].into_dyn()));
});

pub fn add_n<A>(&'graph self, xs: &[A]) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Adds all input tensors, element-wise.

All the input tensors must have same shapes.

use ndarray::array;
use autograd as ag;

ag::with(|g| {
   let a = g.ones(&[2, 2]);
   let b = g.ones(&[2, 2]);
   let c = g.ones(&[2, 2]);
   let d = g.add_n(&[a, b, c]);

   assert_eq!(d.eval(&[]).unwrap().shape(), &[2, 2]);
   assert_eq!(d.eval(&[]), Ok(array![[3., 3.], [3., 3.]].into_dyn()));
});

pub fn equal<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Compares a couple of tensors and returns a binary tensor.

if a[i] == b[i] then return-value[i] will be 1 else 0

Panics

When broadcast is impossible

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![1., 2., 3.]);
   let b = g.constant(array![3., 2., 1.]);
   let c = g.equal(a, b);
   assert_eq!(c.eval(&[]), Ok(ndarray::arr1(&[0., 1., 0.]).into_dyn()));
});

pub fn not_equal<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Compares a couple of tensors and returns a binary tensor.

if a[i] != b[i] then return-value[i] will be 1 else 0

Panics

When broadcast is impossible

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![1., 2., 3.]);
   let b = g.constant(array![3., 2., 1.]);
   let c = g.not_equal(a, b);
   assert_eq!(c.eval(&[]), Ok(array![1., 0., 1.].into_dyn()));
});

pub fn argmax<A>(
    &'graph self,
    x: A,
    axis: isize,
    keep_dim: bool
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Takes argmax along specified axis.

axis can be negative.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let x = g.constant(array![[3., 4.], [6., 5.]]);
   let y = g.argmax(x, 1, false);

   assert_eq!(y.eval(&[]), Ok(array![1., 0.].into_dyn()));
});

pub fn expand_dims<A, AT>(&'graph self, x: A, axes: &AT) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    AT: AsTensor<'graph, F>, 
[src]

Expands the shape (inserts axes).

Each axis can be negative.

use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[3]);
   let b = g.expand_dims(a, &[0, 2]);
   assert_eq!(b.eval(&[]).unwrap().shape(), &[1, 3, 1]);
});

pub fn squeeze<A, AT>(&'graph self, x: A, axes: &AT) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    AT: AsTensor<'graph, F>, 
[src]

Remove the specified dims.

Each axis can be negative.

use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[1, 3, 1]);
   let b = g.squeeze(a, &[0, 2]);
   assert_eq!(b.eval(&[]).unwrap().shape(), &[3]);
})

pub fn tile<A>(&'graph self, x: A, axis: isize, num: usize) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Tiles the input tensor along specified axis.

Tiles input tensor num times along axis. axis can be negative.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let x = g.constant(array![[2., 2.], [3., 3.]]);
   let y = g.tile(x, 0, 2);

   assert_eq!(
       y.eval(&[]),
       Ok(array![[2., 2.], [3., 3.], [2., 2.], [3., 3.]].into_dyn())
   );
});

pub fn clip<A>(&'graph self, x: A, min: F, max: F) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Limits all elements of x so as to be within [min, max]

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let x = g.constant(array![2., 4., 6.]);
   let y = g.clip(x, 3., 5.);
   assert_eq!(y.eval(&[]), Ok(ndarray::arr1(&[3., 4., 5.]).into_dyn()));
});

pub fn reduce_max<A, AT>(
    &'graph self,
    x: A,
    axes: &AT,
    keep_dims: bool
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    AT: AsTensor<'graph, F>, 
[src]

Takes max along specified axes.

Each of element of axes can be negative.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let x = g.constant(array![[2., 4.], [3., 1.]]);
   let y = g.reduce_max(&x, &[0], false);
   assert_eq!(y.eval(&[]), Ok(array![3., 4.].into_dyn()));
});

pub fn reduce_min<A, AT>(
    &'graph self,
    x: A,
    axes: &AT,
    keep_dims: bool
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    AT: AsTensor<'graph, F>, 
[src]

Takes min along specified axes.

Each of element of axes can be negative.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let x = g.constant(array![[2., 4.], [3., 1.]]);
   let y = g.reduce_min(&x, &[0], false);
   assert_eq!(y.eval(&[]), Ok(array![2., 1.].into_dyn()));
});

pub fn reduce_sum_to_scalar<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Sum up all the elements to a scalar value (0-D Tensor).

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let x = g.constant(array![[2., 4.], [3., 1.]]);
   let y = g.reduce_sum_to_scalar(&x);
   assert_eq!(y.eval(&[]), Ok(ndarray::arr0(10.).into_dyn()));
});

pub fn reduce_sum<A, AT>(
    &'graph self,
    x: A,
    axes: &AT,
    keep_dims: bool
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    AT: AsTensor<'graph, F>, 
[src]

Takes sumation along specified axes.

Elements of axes can be negative.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let x = g.constant(array![[2., 4.], [3., 1.]]);
   let y = g.reduce_sum(&x, &[1], false);

   assert_eq!(y.eval(&[]), Ok(array![6., 4.].into_dyn()));
});

pub fn reduce_mean<A, AT>(
    &'graph self,
    x: A,
    axes: &AT,
    keep_dims: bool
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    AT: AsTensor<'graph, F>, 
[src]

Takes mean along specified axes.

Elements of axes can be negative.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let x = g.constant(array![[2., 4.], [3., 1.]]);
   let y = g.reduce_mean(x, &[1], false);
   assert_eq!(y.eval(&[]), Ok(array![3., 2.].into_dyn()));
});

pub fn reduce_prod<A, AT>(
    &'graph self,
    x: A,
    axes: &AT,
    keep_dims: bool
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    AT: AsTensor<'graph, F>, 
[src]

Takes product along specified axes.

Elements of axes can be negative.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let x = g.constant(array![[2., 4.], [3., 1.]]);
   let y = g.reduce_prod(&x, &[1], false);
   assert_eq!(y.eval(&[]), Ok(array![8., 3.].into_dyn()));
});

pub fn reshape<A, AT>(&'graph self, x: A, shape: &AT) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    AT: AsTensor<'graph, F>, 
[src]

Reshapes the input tensor without copy.

Only one element in shape can be -1.

use ndarray;
use autograd as ag;

ag::with(|g| {
   let x: ag::Tensor<f32> = g.zeros(&[3, 2, 2]);
   let y = g.reshape(&x, &[3, -1]);
   assert_eq!(y.eval(&[]), Ok(ag::ndarray_ext::zeros::<f32>(&[3, 4])));
});

pub fn flatten<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Flattens the input tensor into 1-ranked (vector) without copy.

use autograd as ag;

ag::with(|g| {
   let x: ag::Tensor<f32> = g.zeros(&[3, 2, 2]);
   let z = g.flatten(x);
   assert_eq!(z.eval(&[]).unwrap().shape(), &[12]);
});

pub fn sign<A>(&'graph self, a: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns -1 if x < 0, 0 if x==0, 1 if x > 0, element-wise.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![-5., 4.5, 0.]);
   let b = g.sign(a);
   assert_eq!(
       b.eval(&[]).unwrap().as_slice().unwrap(),
       &[-1., 1., 0.]
   );
});

pub fn abs<A>(&'graph self, a: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns the largest integer less than or equal to a number, element-wise.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![-0.2, 0., 0.2]);
   let b = g.abs(a);
   assert_eq!(
       b.eval(&[]),
       Ok(ndarray::arr1(&[0.2, 0., 0.2]).into_dyn())
   );
});

pub fn floor<A>(&'graph self, a: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns the largest integer less than or equal to a number, element-wise.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]);
   let b = g.floor(a);
   assert_eq!(
       b.eval(&[]),
       Ok(array![-2., -2., -1.,  0.,  1.,  1.,  2.].into_dyn())
   );
});

pub fn neg<A>(&'graph self, a: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Performs the - operation.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![2., 3.]);
   let b = g.neg(a);
   assert_eq!(
       b.eval(&[]),
       Ok(array![-2., -3.].into_dyn())
   );
});

pub fn square<A>(&'graph self, a: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Takes square of the input.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![2., 3.]);
   let b = g.square(a);
   assert_eq!(
       b.eval(&[]),
       Ok(array![4., 9.].into_dyn())
   );
});

pub fn inv<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns the 1/x, element-wise.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![2.]);
   let b = g.inv(a);
   assert_eq!(
       b.eval(&[]),
       Ok(array![0.5].into_dyn())
   );
});

pub fn inv_sqrt<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns the 1/sqrt(x), element-wise.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![4.]);
   let b = g.inv_sqrt(a);
   assert_eq!(
       b.eval(&[]),
       Ok(array![0.5].into_dyn())
   );
});

pub fn ceil<A>(&'graph self, a: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Returns the smallest integer greater than or equal to a number, element-wise.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]);
   let b = g.ceil(a);
   assert_eq!(
       b.eval(&[]),
       Ok(array![-1., -1., -0.,  1.,  2.,  2.,  2.].into_dyn())
   );

});

pub fn greater<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Compares a couple of tensors and returns a binary tensor.

Panics

When broadcast is impossible

pub fn greater_equal<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Compares a couple of tensors and returns a binary tensor.

Panics

When broadcast is impossible

pub fn lesser<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Compares a couple of tensors and returns a binary tensor.

Panics

When broadcast is impossible

pub fn lesser_equal<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Compares a couple of tensors and returns a binary tensor.

Panics

When broadcast is impossible

pub fn sigmoid<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise logistic sigmoid function.

pub fn elu<A>(&'graph self, x: A, alpha: F) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise exponential linear unit.

See https://arxiv.org/abs/1511.07289

pub fn relu<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise rectified linear unit.

pub fn leaky_relu<A>(&'graph self, x: A, alpha: F) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise leaky relu.

In common, alpha is around 0.1 ~ 0.2.

See http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf

pub fn softplus<A>(&'graph self, x: A) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Elementwise softplus.

pub fn reduce_logsumexp<A>(
    &'graph self,
    x: A,
    axis: isize,
    keep_dim: bool
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>>, 
[src]

Computes log(sum(exp(x))) along specified axis.

axis can be negative.

pub fn log_softmax<A>(&'graph self, x: A, axis: isize) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Log softmax function.

Computes softmax(x) along specified axis and takes logarithm of it. axis can be negative.

pub fn softmax<A>(&'graph self, x: A, axis: isize) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Computes softmax along specified axis

axis can be negative.

pub fn sigmoid_cross_entropy<A, B>(
    &'graph self,
    y: A,
    t: B
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Computes binary_cross_entropy(sigmoid(y), t).

This function is better than that combination in that it can prevent underflow of log(sigmoid).

Arguments

  • y - Tensor with arbitrary shape
  • t - Ground-truth Tensor with same shape as y'graph

Panics

When y.shape != t.shape.

Returns

Loss tensor with same shape as inputs's shapes

pub fn softmax_cross_entropy<A, B>(
    &'graph self,
    y: A,
    t: B
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Computes categorical_cross_entropy(softmax(y), t).

This function is better than that combination in that it can prevent underflow of log(softmax).

Arguments

  • y - Tensor with shape (batch_size, num_classes)
  • t - Tensor with shape (batch_size, num_classes)

Returns

Loss tensor with shape (batch_size, 1)

pub fn sparse_softmax_cross_entropy<A, B>(
    &'graph self,
    y: A,
    t: B
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

A variant of softmax_cross_entropy.

The behavior of this function is same as softmax_cross_entropy except that t is not batch of one-hot distributions but batch of ground truth label ids.

Arguments

  • y - Tensor with shape (batch_size, num_classes)
  • t - Tensor with shape (batch_size,) or (batch_size, 1)

Returns

Loss tensor with shape (batch_size, 1)

pub fn matmul<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Matrix multiplication.

Both a and b must be 2-ranked tensors.

use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[4, 2]);
   let b: ag::Tensor<f32> = g.zeros(&[2, 3]);
   let c = g.matmul(a, b);
   assert_eq!(c.eval(&[]).unwrap().shape(), &[4, 3]);
});

This function supports only f32 and f64.

pub fn tensordot<A, B, AT1, AT2>(
    &'graph self,
    a: A,
    b: B,
    a_axes: &AT1,
    b_axes: &AT2
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy,
    AT1: AsTensor<'graph, F>,
    AT2: AsTensor<'graph, F>, 
[src]

Computes tensor-dot-product (tensor contraction) along specified axes.

Arguments

  • a - First input tensor
  • b - Second input tensor
  • a_axes - a's Contraction axes
  • b_axes - b's Contraction axes

NOTE:

  • length of a_axes and b_axes must match.
  • Each axis number can be negative.
  • Supports only f32 and f64.
use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[3, 4, 5]);
   let b: ag::Tensor<f32> = g.zeros(&[4, 3, 2]);
   let c = g.tensordot(a, b, &[1, 0], &[0, 1]);
   assert_eq!(c.eval(&[]).unwrap().shape(), &[5, 2]);
});

For detailed description, see https://docs.scipy.org/doc/numpy/reference/generated/numpy.tensordot.html.

pub fn batch_matmul_t<A, B>(
    &'graph self,
    a: A,
    b: B,
    trans_a: bool,
    trans_b: bool
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Batched matrix multiplication with inputs's transposition.

The rank of a and b must be equals.

use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[2, 3, 2, 4]);
   let b: ag::Tensor<f32> = g.zeros(&[2, 3, 2, 3]);
   let c = g.batch_matmul_t(a, b, true, false);
   assert_eq!(c.eval(&[]).unwrap().shape(), &[2, 3, 4, 3]);
});

This function supports only f32 and f64. For detailed description, see https://www.tensorflow.org/api_docs/python/tf/matmul

pub fn batch_matmul<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Batched matrix multiplication.

The rank of a and b must be equals.

use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.ones(&[2, 3, 4, 2]);
   let b: ag::Tensor<f32> = g.ones(&[2, 3, 2, 3]);
   let c = g.batch_matmul(a, b);
   assert_eq!(c.eval(&[]).unwrap().shape(), &[2, 3, 4, 3]);
});

This function supports only f32 and f64. For detailed description, see https://www.tensorflow.org/api_docs/python/tf/matmul

pub fn setdiff1d<A, B>(&'graph self, a: A, b: B) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Takes diff between two tensors.

Returns the sorted, unique values in a that are not in b.

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let a = g.constant(array![4., 1., 5., 2., 3., 6.]);
   let b = g.constant(array![[2., 3.], [1., 4.]]);
   let c = g.setdiff1d(a, b);
   assert_eq!(
       c.eval(&[]),
       Ok(ndarray::arr1(&[5., 6.]).into_dyn())
   )
});

pub fn transpose<A, AT>(&'graph self, x: A, axes: &AT) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    AT: AsTensor<'graph, F>, 
[src]

Permutes dimensions without copy.

It's like TensorFlow or NumPy's. x's rank (ndim) and axes.len() must match.

use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[1, 2, 3, 4, 5]);
   let b = g.transpose(a, &[4, 2, 3, 0, 1]);
   assert_eq!(b.eval(&[]).unwrap().shape(), &[5, 3, 4, 1, 2]);
});

pub fn split<A>(
    &'graph self,
    x: A,
    sizes: &[usize],
    axis: isize
) -> Vec<Tensor<'graph, F>> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Splits input tensors into parts.

Splits x into sizes.len() parts along axis.

The size of dimension of each part is sizes[i] on axis, but is x.shape[i] on other axis (similar to TensorFlow's split).

use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[3, 7, 5]);
   let b = g.split(a, &[2, 3, 2], 1);

   let evaluated = g.eval(&[&b[0], &b[1], &b[2]], &[]);
   let e0 = &evaluated[0];
   let e1 = &evaluated[1];
   let e2 = &evaluated[2];

   assert_eq!(e0.as_ref().unwrap().shape(), &[3, 2, 5]);
   assert_eq!(e1.as_ref().unwrap().shape(), &[3, 3, 5]);
   assert_eq!(e2.as_ref().unwrap().shape(), &[3, 2, 5]);
});

pub fn slice<A>(
    &'graph self,
    x: A,
    starts: &[isize],
    ends: &[isize]
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Slices the input tensor.

Arguments

  • x - Tensor with arbitrary shape.
  • starts - Inclusive start indices for the dimensions.
  • ends - End indices for the dimensions. Each index is inclusive if it is negative and exclusive if it's not.

NOTE: Negative values in starts and ends are counted from the back of the axis.

use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[4, 4]);
   let b = g.slice(a, &[0, 0], &[-1, 2]); // numpy equivalent is a[:, 0:2]

   assert_eq!(b.eval(&[]).unwrap().shape(), &[4, 2]);
});
use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[4, 4]);
   let b = g.slice(a, &[0, 0], &[-2, 2]); // numpy equivalent is a[:-1, :2]

   assert_eq!(b.eval(&[]).unwrap().shape(), &[3, 2]);
});

pub fn concat<A>(&'graph self, tensors: &[A], axis: isize) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

Concatenates input tensors along specified axis.

axis can be negative.

use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[3, 2]);
   let b: ag::Tensor<f32> = g.zeros(&[3, 2]);
   let c: ag::Tensor<f32> = g.zeros(&[3, 2]);
   let d = g.concat(&[a, b, c], 0);

   assert_eq!(d.eval(&[]).unwrap().shape(), &[9, 2]);
});

pub fn gather_common<A, B>(
    &'graph self,
    param: A,
    indices: B,
    axis: isize
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Gathers subviews from the input tensor.

Same spec as https://www.tensorflow.org/api_docs/python/tf/gather. For example, this can be used for embedding vectors lookup etc.

Unlike ag::gather, indices can contain negative elements.

Returns

Tensor with shape param.shape[..axis] + indices.shape + param.shape[axis+1..]

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let param = g.zeros(&[5, 4, 8, 2]);
   let indices = g.constant(array![[5., -1., 3.], [2., 1., -2.]]);
   let y = g.gather_common(param, indices, 2);

   assert_eq!(y.eval(&[]).unwrap().shape(), &[5, 4, 2, 3, 2])
});

pub fn gather<A, B>(
    &'graph self,
    param: A,
    indices: B,
    axis: isize
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

Gathers subviews from the input tensor.

Same spec as https://www.tensorflow.org/api_docs/python/tf/gather. For example, this can be used for embedding vectors lookup etc.

Returns

Tensor with shape param.shape[..axis] + indices.shape + param.shape[axis+1..]

use ndarray::array;
use autograd as ag;
use ag::tensor::Constant;

ag::with(|g| {
   let param = g.zeros(&[5, 4, 8, 2]);
   let indices = g.constant(array![[5., 4., 3.], [2., 1., 0.]]);  // shape: (2, 3)
   let y = g.gather(param, indices, 2);

   assert_eq!(y.eval(&[]).unwrap().shape(), &[5, 4, 2, 3, 2])
});

pub fn normalize<A, AT>(&'graph self, _x: A, _axes: &AT) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    AT: AsTensor<'graph, F>, 
[src]

Normalizes the input tensor with its mean and variance along specified axis.

use autograd as ag;

ag::with(|g| {
   let x: ag::Tensor<f32> = g.standard_normal(&[3, 4]);
   let y1 = g.normalize(x, &[0]);
   let y2 = g.normalize(x, &[0]);

   let evaluated = g.eval(&[y1, y2], &[]);
   let e0 = &evaluated[0];
   let e1 = &evaluated[1];
   assert_eq!(e0.as_ref().unwrap().shape(), &[3, 4]);
   assert_eq!(e1.as_ref().unwrap().shape(), &[3, 4]);
});

pub fn batch_norm<A, B, C>(
    &'graph self,
    x: A,
    scale: B,
    shift: C
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy,
    C: AsRef<Tensor<'graph, F>> + Copy
[src]

Applies batch normalization.

scale and shift should be shared variables. Since normalization is performed along 1st axis of x, both of them should have shape (1, x.shape[1])

use autograd as ag;
use ag::tensor::Variable;

ag::with(|g| {
   let x = g.standard_normal(&[3, 4]);
   let scale = g.variable(ag::ndarray_ext::ones::<f32>(&[1, 4]));
   let shift = g.variable(ag::ndarray_ext::zeros::<f32>(&[1, 4]));
   let norm = g.batch_norm(x, scale, shift);

   assert_eq!(norm.eval(&[]).unwrap().shape(), &[3, 4]);
});

pub fn scalar(&'graph self, val: F) -> Tensor<'graph, F>[src]

Generates a zero-ranked tensor from a scalar value.

use autograd as ag;

ag::with(|g| {
   let a = g.scalar(3.);
   println!("{}", a.eval(&[]).unwrap());  // => 3.
   assert_eq!(a.eval(&[]).unwrap().shape(), &[]);
});

pub fn random_normal<A>(
    &'graph self,
    shape: &A,
    mean: f64,
    stddev: f64
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the normal distribution.

pub fn random_normal_rng<A, R: Rng + 'static>(
    &'graph self,
    arr_rng: ArrayRng<F, R>,
    shape: &A,
    mean: f64,
    stddev: f64
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the normal distribution.

Pre-instantiated ArrayRng is acceptable.

pub fn random_uniform<A>(
    &'graph self,
    shape: &A,
    min: f64,
    max: f64
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the uniform distribution.

pub fn random_uniform_rng<A, R: Rng + 'static>(
    &'graph self,
    arr_rng: ArrayRng<F, R>,
    shape: &A,
    min: f64,
    max: f64
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the uniform distribution.

Pre-instantiated ArrayRng is acceptable.

pub fn standard_normal<A>(&'graph self, shape: &A) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the standard normal distribution.

pub fn standard_normal_rng<A, R: Rng + 'static>(
    &'graph self,
    arr_rng: ArrayRng<F, R>,
    shape: &A
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the standard normal distribution.

Pre-instantiated ArrayRng is acceptable.

pub fn standard_uniform<A>(&'graph self, shape: &A) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the standard uniform distribution.

pub fn standard_uniform_rng<A, R: Rng + 'static>(
    &'graph self,
    arr_rng: ArrayRng<F, R>,
    shape: &A
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the standard uniform distribution.

Pre-instantiated ArrayRng is acceptable.

pub fn bernoulli<A>(&'graph self, shape: &A, p: f64) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the bernoulli distribution.

pub fn bernoulli_rng<A, R: Rng + 'static>(
    &'graph self,
    arr_rng: ArrayRng<F, R>,
    shape: &A,
    p: f64
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the bernoulli distribution.

Pre-instantiated ArrayRng is acceptable.

pub fn random_exp<A>(&'graph self, shape: &A, lambda: f64) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the exponential distribution.

pub fn random_exp_rng<A, R: Rng + 'static>(
    &'graph self,
    arr_rng: ArrayRng<F, R>,
    shape: &A,
    lambda: f64
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the exponential distribution.

Pre-instantiated ArrayRng is acceptable.

pub fn random_gamma<A>(
    &'graph self,
    shape: &A,
    shape_param: f64,
    scale: f64
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the gamma distribution.

pub fn random_gamma_rng<A, R: Rng + 'static>(
    &'graph self,
    arr_rng: ArrayRng<F, R>,
    shape: &A,
    shape_param: f64,
    scale: f64
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the gamma distribution.

Pre-instantiated ArrayRng is acceptable.

pub fn log_normal<A>(
    &'graph self,
    shape: &A,
    mean: f64,
    stddev: f64
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the log-normal distribution.

pub fn log_normal_rng<A, R: Rng + 'static>(
    &'graph self,
    arr_rng: ArrayRng<F, R>,
    shape: &A,
    mean: f64,
    stddev: f64
) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Outputs values sampled from the log-normal distribution.

Pre-instantiated ArrayRng is acceptable.

pub fn convert_to_tensor<D>(&'graph self, arr: Array<F, D>) -> Tensor<'graph, F> where
    D: Dimension
[src]

Converts an ndarray::Array to a ag::Tensor.

use ndarray::array;
use autograd as ag;

ag::with(|g| {
   let arr = array![2., 3.];
   let tensor = g.convert_to_tensor(arr.clone());
   assert_eq!(tensor.eval(&[]), Ok(arr.into_dyn()));
});

pub fn zeros<A>(&'graph self, shape: &A) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Returns zeros with given shape.

use ndarray;
use autograd as ag;

ag::with(|g| {
   let a: ag::Tensor<f32> = g.zeros(&[4, 2]);
   assert_eq!(a.eval(&[]), Ok(ndarray::Array2::<f32>::zeros((4, 2)).into_dyn()));
});

pub fn ones<A>(&'graph self, shape: &A) -> Tensor<'graph, F> where
    A: AsTensor<'graph, F>, 
[src]

Returns ones with given shape.

use ndarray;
use autograd as ag;

ag::with(|g| {
   let a = g.ones(&[4, 2]);
   assert_eq!(a.eval(&[]), Ok(ndarray::Array2::<f32>::ones((4, 2)).into_dyn()));
});

pub fn conv2d<A, B>(
    &'graph self,
    x: A,
    w: B,
    pad: usize,
    stride: usize
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

2D convolution.

  • x: Tensor with shape (batch, channel, h, w)
  • w: Tensor with shape (out_channel, channel, filter_h, filter_w)

Returns a tensor with shape (batch, out_channel, out_h, out_w)

where

  • out_h = (h + 2 * pad - filter_h) / stride + 1
  • out_w = (w + 2 * pad - filter_w) / stride + 1

This function supports only f32 and f64.

pub fn dilated_conv2d<A, B>(
    &'graph self,
    x: A,
    w: B,
    pad: usize,
    stride: usize,
    dilate: usize
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

2D convolution with dilation.

  • x: Tensor with shape (batch, channel, h, w)
  • w: Tensor with shape (out_channel, in_channel, filter_h, filter_w)

Returns a tensor with shape (batch, out_channel, out_h, out_w)

where

  • out_h = (h + 2 * pad - (dilate * (filter - 1) + 1)) / stride + 1
  • out_w = (w + 2 * pad - (dilate * (filter - 1) + 1)) / stride + 1

This function supports only f32 and f64.

pub fn conv2d_transpose<A, B>(
    &'graph self,
    x: A,
    w: B,
    pad: usize,
    stride: usize
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

2D transposed convolution.

  • x: Tensor with shape (batch, in_channel, h, w)
  • w: Tensor with shape (in_channel, out_channel, filter_h, filter_w)

Returns a tensor with shape (batch, out_channel, out_h, out_w)

where

  • out_h = stride * (h - 1) - pad + filter_h
  • out_w = stride * (w - 1) - pad + filter_w

This function supports only f32 and f64.

pub fn dilated_conv2d_transpose<A, B>(
    &'graph self,
    x: A,
    w: B,
    pad: usize,
    stride: usize,
    dilate: usize
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy,
    B: AsRef<Tensor<'graph, F>> + Copy
[src]

2D transposed convolution with dilation.

  • x: Tensor with shape (batch, in_channel, h, w)
  • w: Tensor with shape (in_channel, out_channel, filter_h, filter_w)

Returns a tensor with shape (batch, out_channel, out_h, out_w)

where

  • out_h = stride * (h - 1) - pad + (dilate * (filter_h - 1) + 1)
  • out_w = stride * (w - 1) - pad + (dilate * (filter_w - 1) + 1)

This function supports only f32 and f64.

pub fn max_pool2d<A>(
    &'graph self,
    x: A,
    pool_size: usize,
    pad: usize,
    stride: usize
) -> Tensor<'graph, F> where
    A: AsRef<Tensor<'graph, F>> + Copy
[src]

2D max pooling.

  • x: Tensor with shape (batch, channel, h, w)

Returns a tensor with shape (batch, channel, out_h, out_w)

where

  • out_h = (h + 2 * pad - pool_size) / stride + 1
  • out_w = (w + 2 * pad - pool_size) / stride + 1

This function supports only f32 and f64.

impl<F: Float> Graph<F>[src]

pub fn eval<'feed, 'tensor, 'scope, A>(
    &'scope self,
    tensors: &'tensor [A],
    feeds: &[Feed<'feed, F>]
) -> Vec<Result<NdArray<F>, EvalError>> where
    A: AsRef<Tensor<'scope, F>> + Copy
[src]

Evaluates given symbolic tensors as a list of ndarray::Array<F, ndarray::IxDyn>.

Unlike Tensor::eval, this function supports batched evaluation.

See also Eval.

use ndarray::array;
use autograd as ag;

ag::with(|g| {
    let a = g.zeros(&[2]);
    let b = g.ones(&[2]);

    // eval two tensors at once.
    let evaluated = g.eval(&[a, b], &[]);
    assert_eq!(evaluated[0], Ok(array![0., 0.].into_dyn()));
    assert_eq!(evaluated[1], Ok(array![1., 1.].into_dyn()));
});

Trait Implementations

impl<'graph, F: Float> Constant<'graph, F, Arc<ArrayBase<OwnedRepr<F>, Dim<IxDynImpl>>>> for Graph<F>[src]

impl<'graph, F: Float> Constant<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 0]>>> for Graph<F>[src]

impl<'graph, F: Float> Constant<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 1]>>> for Graph<F>[src]

impl<'graph, F: Float> Constant<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 2]>>> for Graph<F>[src]

impl<'graph, F: Float> Constant<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 3]>>> for Graph<F>[src]

impl<'graph, F: Float> Constant<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 4]>>> for Graph<F>[src]

impl<'graph, F: Float> Constant<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 5]>>> for Graph<F>[src]

impl<'graph, F: Float> Constant<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 6]>>> for Graph<F>[src]

impl<'graph, F: Float> Constant<'graph, F, ArrayBase<OwnedRepr<F>, Dim<IxDynImpl>>> for Graph<F>[src]

impl<T: Float> Debug for Graph<T>[src]

impl<'graph, F: Float> Variable<'graph, F, Arc<RwLock<ArrayBase<OwnedRepr<F>, Dim<IxDynImpl>>>>> for Graph<F>[src]

impl<'graph, F: Float> Variable<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 0]>>> for Graph<F>[src]

impl<'graph, F: Float> Variable<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 1]>>> for Graph<F>[src]

impl<'graph, F: Float> Variable<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 2]>>> for Graph<F>[src]

impl<'graph, F: Float> Variable<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 3]>>> for Graph<F>[src]

impl<'graph, F: Float> Variable<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 4]>>> for Graph<F>[src]

impl<'graph, F: Float> Variable<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 5]>>> for Graph<F>[src]

impl<'graph, F: Float> Variable<'graph, F, ArrayBase<OwnedRepr<F>, Dim<[usize; 6]>>> for Graph<F>[src]

impl<'graph, F: Float> Variable<'graph, F, ArrayBase<OwnedRepr<F>, Dim<IxDynImpl>>> for Graph<F>[src]

Auto Trait Implementations

impl<F> !RefUnwindSafe for Graph<F>

impl<F> !Send for Graph<F>

impl<F> !Sync for Graph<F>

impl<F> Unpin for Graph<F>

impl<F> !UnwindSafe for Graph<F>

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

impl<V, T> VZip<V> for T where
    V: MultiLane<T>,