[][src]Function autograd::ops::matmul_t

pub fn matmul_t<T: Float, A: AsRef<Tensor<T>>, B: AsRef<Tensor<T>>>(
    a: A,
    b: B,
    transpose_a: bool,
    transpose_b: bool
) -> Tensor<T>

Matrix multiplication with inputs's transposition.

Similar specification as matmul but, if transpose_a is true, a is transposed before actual matrix multiplication. It is the same for transpose_b.

The performance is better than explicitly computing like ag::matmul(ag::transpose).

extern crate autograd as ag;

let ref a: ag::Tensor<f32> = ag::zeros(&[2, 4]);
let ref b: ag::Tensor<f32> = ag::zeros(&[2, 3]);
let ref c = ag::matmul_t(a, b, true, false);

assert_eq!(c.eval(&[]).unwrap().shape(), &[4, 3]);