autograd 0.6.0

A toy library to run computation graphs mimicking TensorFlow and Theano
Documentation

autograd

Build Status

This library provides differentiable operations and tensors. The current backend is rust-ndarray.

Examples

Here we are computing partial derivatives of z = 2x^2 + 3y + 1.


extern crate ndarray;
extern crate autograd as ag;

let ref x = ag::placeholder(&[]);
let ref y = ag::placeholder(&[]);
let ref z = 2*x*x + 3*y + 1;

// dz/dy
let ref g1 = ag::grad(&[z], &[y])[0];

// dz/dx
let ref g2 = ag::grad(&[z], &[x])[0];

// ddz/dx (differentiates `z` again)
let ref gg = ag::grad(&[g2], &[x])[0];

// evaluation of symbolic gradients
let mut ctx = ag::Context::new();
println!("{}", g1.eval(&mut ctx));   // => 3.
println!("{}", gg.eval(&mut ctx));   // => 4.

// dz/dx requires to fill the placeholder `x`
ag::feed_input(x, ndarray::arr0(2.), &mut ctx);
println!("{}", g2.eval(&mut ctx));   // => 8.

Another example: multi layer perceptron for MNIST digits classification.

// -- graph def --
let mut ctx = ag::Context::new();
let ref x = ag::placeholder(&[-1, 28*28]);
let ref y = ag::placeholder(&[-1]);
let ref w = ag::variable(ag::ndarray_ext::glorot_uniform(&[28*28, 10]), &mut ctx);
let ref b = ag::variable(ag::ndarray_ext::zeros(&[1, 10]), &mut ctx);
let ref z = ag::matmul(x, w) + b;
let ref loss = ag::sparse_softmax_cross_entropy(z, y);
let ref grads = ag::grad(loss, &[w, b]);
let ref predictions = ag::argmax(z, -1, true);
let ref accuracy = ag::reduce_mean(&ag::equal(predictions, y), &[0], false);

// -- dataset --
let ((x_train, y_train), (x_test, y_test)) = dataset::load();

// -- training method --
let mut optimizer = ag::gradient_descent::SGD { lr: 0.01 };

// -- training loop --
for epoch in 0..max_epoch {
    ...
}

For more, see examples or tests.

Available ops are listed here.