pub struct Var { /* private fields */ }
Expand description
Implementations
sourceimpl Var
impl Var
pub fn new(input: &[f64], dim: &[usize]) -> Var
pub fn new_f64(input: &[f64], dim: &[usize]) -> Var
pub fn new_f32(input: &[f32], dim: &[usize]) -> Var
sourcepub fn ref_copy(self: &Var) -> Var
pub fn ref_copy(self: &Var) -> Var
Where it needs a assign operator, we should use this ref_copy. If a hard copy is necessary, then call clone().
pub fn size(&self) -> Vec<usize>
pub fn numel(&self) -> usize
pub fn get_f32(&self, o: &[usize]) -> Result<f32, AutoDiffError>
pub fn set_f32(&self, o: &[usize], v: f32) -> Result<(), AutoDiffError>
pub fn get_f64(&self, o: &[usize]) -> Result<f64, AutoDiffError>
pub fn set_f64(&self, o: &[usize], v: f64) -> Result<(), AutoDiffError>
pub fn fill(size: &[usize], fill_value: &Var) -> Var
pub fn fill_f32(size: &[usize], fill_value: f32) -> Var
pub fn fill_f64(size: &[usize], fill_value: f64) -> Var
pub fn zeros(dim: &[usize]) -> Var
pub fn ones(dim: &[usize]) -> Var
pub fn twos(dim: &[usize]) -> Var
pub fn empty(dim: &[usize]) -> Var
pub fn from_record_f64(&self, row: usize, record: &[f64])
pub fn rand_usize(
rng: &mut StdRng,
dim: &[usize],
left: usize,
right: usize
) -> Var
pub fn normal_f64(rng: &mut StdRng, dim: &[usize], mean: f64, std: f64) -> Var
pub fn normal_f32(rng: &mut StdRng, dim: &[usize], mean: f32, std: f32) -> Var
pub fn normal(rng: &mut StdRng, dim: &[usize], mean: f64, std: f64) -> Var
pub fn uniform_f64(rng: &mut StdRng, dim: &[usize], from: f64, to: f64) -> Var
pub fn uniform_f32(rng: &mut StdRng, dim: &[usize], from: f32, to: f32) -> Var
pub fn uniform(rng: &mut StdRng, dim: &[usize], from: f64, to: f64) -> Var
pub fn _add(&self, other: &Var) -> Var
pub fn _sub(&self, other: &Var) -> Var
pub fn _mul(&self, other: &Var) -> Var
pub fn _div(&self, other: &Var) -> Var
sourcepub fn matmul(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn matmul(&self, other: &Var) -> Result<Var, AutoDiffError>
Matrix/inner/dot product
use auto_diff::{Var, var_f64, AutoDiffError};
extern crate openblas_src;
fn test_matmul() -> Result<(), AutoDiffError> {
let v1 = var_f64!([[1., 2., 3.], [4., 5., 6.]]); let v2 = var_f64!([[11., 12., 13.], [14., 15., 16.], [17., 18., 19.]]); let v3 = v1.matmul(&v2)?; let em = var_f64!([[90.0, 96.0, 102.0], [216.0, 231.0, 246.0]]); assert_eq!(v3, em);
Ok(())
}
test_matmul();
sourcepub fn outer(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn outer(&self, other: &Var) -> Result<Var, AutoDiffError>
Outer product
let v1 = Var::new_f64(&[1., 2., 3.], &[3]);
let v2 = Var::new_f64(&[4., 5., 6.], &[3]);
let v3 = v1.outer(&v2)?;
let em = var_f64!([[4., 5., 6.],
[8., 10., 12.],
[12., 15., 18.]]);
assert_eq!(v3, em);
pub fn elu(&self, alpha: Var) -> Result<Var, AutoDiffError>
pub fn relu(&self) -> Result<Var, AutoDiffError>
pub fn sigmoid(&self) -> Result<Var, AutoDiffError>
pub fn mse_loss(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn bce_with_logits_loss(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn cross_entropy_loss(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn abs(&self) -> Result<Var, AutoDiffError>
pub fn acos(&self) -> Result<Var, AutoDiffError>
pub fn asin(&self) -> Result<Var, AutoDiffError>
pub fn atan(&self) -> Result<Var, AutoDiffError>
pub fn ceil(&self) -> Result<Var, AutoDiffError>
pub fn cos(&self) -> Result<Var, AutoDiffError>
pub fn cosh(&self) -> Result<Var, AutoDiffError>
pub fn exp(&self) -> Result<Var, AutoDiffError>
pub fn expm1(&self) -> Result<Var, AutoDiffError>
pub fn floor(&self) -> Result<Var, AutoDiffError>
pub fn frac(&self) -> Result<Var, AutoDiffError>
pub fn log(&self) -> Result<Var, AutoDiffError>
pub fn log10(&self) -> Result<Var, AutoDiffError>
pub fn log1p(&self) -> Result<Var, AutoDiffError>
pub fn log1pexp(&self) -> Result<Var, AutoDiffError>
pub fn log2(&self) -> Result<Var, AutoDiffError>
pub fn neg(&self) -> Result<Var, AutoDiffError>
pub fn _neg(&self) -> Var
pub fn reciprocal(&self) -> Result<Var, AutoDiffError>
pub fn round(&self) -> Result<Var, AutoDiffError>
pub fn rsqrt(&self) -> Result<Var, AutoDiffError>
pub fn sign(&self) -> Result<Var, AutoDiffError>
pub fn sin(&self) -> Result<Var, AutoDiffError>
pub fn sinh(&self) -> Result<Var, AutoDiffError>
pub fn sqrt(&self) -> Result<Var, AutoDiffError>
pub fn tan(&self) -> Result<Var, AutoDiffError>
pub fn tanh(&self) -> Result<Var, AutoDiffError>
pub fn trunc(&self) -> Result<Var, AutoDiffError>
pub fn max_pair(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn min_pair(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn arg_sort(
&self,
dim: usize,
descending: bool
) -> Result<Var, AutoDiffError>
pub fn eq_elem(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn equal(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn ge(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn gt(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn le(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn lt(&self, other: &Var) -> Result<Var, AutoDiffError>
pub fn ne(&self, other: &Var) -> Result<Var, AutoDiffError>
sourcepub fn cat(&self, other: &[Var], dim: usize) -> Result<Var, AutoDiffError>
pub fn cat(&self, other: &[Var], dim: usize) -> Result<Var, AutoDiffError>
Concatenates the given sequence of seq tensors in the given dimension. The input tensor should all have the same size except on the given dimension. The output tensor will have all the same size as the input except the given dimension, which will be the sum of the inputs on the given dimension. Apply cat on [tensor(5, 3, 2), tensor(5, 7, 2), ] will get a tensor(5, 10, 2).
use auto_diff::{Var, var_f64, AutoDiffError};
extern crate openblas_src;
fn test_cat() -> Result<(), AutoDiffError> {
let m1 = Var::empty(&[3, 1]); let m2 = Var::empty(&[3, 1]); let m3 = Var::empty(&[3, 1]); let m4 = m1.cat(&[m2, m3], 1)?; assert_eq!(m4.size(), [3, 3]);
Ok(())
}
test_cat();
pub fn chunk(
&self,
chunks: usize,
dim: usize
) -> Result<Vec<Var>, AutoDiffError>
pub fn conditional_select(&self, x: &Var, y: &Var) -> Result<Var, AutoDiffError>
pub fn gather(&self, dim: usize, index: Var) -> Result<Var, AutoDiffError>
pub fn index_select(&self, dim: usize, index: Var) -> Result<Var, AutoDiffError>
pub fn index_exclude(
&self,
dim: usize,
index: Var
) -> Result<Var, AutoDiffError>
pub fn permute(&self, dim: &[usize]) -> Result<Var, AutoDiffError>
pub fn repeat(&self, dim: &[usize]) -> Result<Var, AutoDiffError>
pub fn reshape(&self, new_shape: &[usize]) -> Result<Var, AutoDiffError>
pub fn split(
&self,
sections: &[usize],
dim: usize
) -> Result<Vec<Var>, AutoDiffError>
pub fn squeeze(&self, dim: Option<usize>) -> Result<Var, AutoDiffError>
pub fn t(&self) -> Result<Var, AutoDiffError>
pub fn take(&self, index: &[usize]) -> Result<Var, AutoDiffError>
pub fn unsqueeze(&self, dim: usize) -> Result<Var, AutoDiffError>
sourcepub fn stack(&self, other: &[Var], dim: usize) -> Result<Var, AutoDiffError>
pub fn stack(&self, other: &[Var], dim: usize) -> Result<Var, AutoDiffError>
Stack tensor with the same size along a new dimension specified by dim. The difference from cat is that cat don’t create new dimension.
let m1 = var_f64!([[1., 2., ],
[3., 4., ]]);
let m2 = var_f64!([[5., 6., ],
[7., 8., ]]);
let m3 = m1.stack(&[m2], 1)?;
pub fn det(&self) -> Result<Var, AutoDiffError>
pub fn inv(&self) -> Result<Var, AutoDiffError>
pub fn normalize_unit(&self) -> Result<Var, AutoDiffError>
pub fn tr(&self) -> Result<Var, AutoDiffError>
pub fn argmax(
&self,
dim: Option<&[usize]>,
keepdim: bool
) -> Result<Var, AutoDiffError>
pub fn argmin(
&self,
dim: Option<&[usize]>,
keepdim: bool
) -> Result<Var, AutoDiffError>
pub fn logsumexp(
&self,
dim: Option<&[usize]>,
keepdim: bool
) -> Result<Var, AutoDiffError>
pub fn mean(
&self,
dim: Option<&[usize]>,
keepdim: bool
) -> Result<Var, AutoDiffError>
pub fn prod(
&self,
dim: Option<&[usize]>,
keepdim: bool
) -> Result<Var, AutoDiffError>
pub fn std(
&self,
dim: Option<&[usize]>,
keepdim: bool
) -> Result<Var, AutoDiffError>
pub fn sum(
&self,
dim: Option<&[usize]>,
keepdim: bool
) -> Result<Var, AutoDiffError>
pub fn var(
&self,
dim: Option<&[usize]>,
keepdim: bool
) -> Result<Var, AutoDiffError>
pub fn max(
&self,
dim: Option<&[usize]>,
keepdim: bool
) -> Result<Var, AutoDiffError>
pub fn min(
&self,
dim: Option<&[usize]>,
keepdim: bool
) -> Result<Var, AutoDiffError>
sourcepub fn get_patch(
&self,
range: &[(usize, usize)],
step: Option<&[usize]>
) -> Result<Var, AutoDiffError>
pub fn get_patch(
&self,
range: &[(usize, usize)],
step: Option<&[usize]>
) -> Result<Var, AutoDiffError>
Get a portion of the tensor and return it.
let m1 = var_f64!([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]]);
let m2 = var_f64!([[4., 5.],
[7., 8.]]);
assert_eq!(m1.get_patch(&[(1, 3), (0, 2)], None)?, m2);
sourcepub fn set_patch(
&self,
other: &Var,
range: &[(usize, usize)],
step: Option<&[usize]>
) -> Result<Var, AutoDiffError>
pub fn set_patch(
&self,
other: &Var,
range: &[(usize, usize)],
step: Option<&[usize]>
) -> Result<Var, AutoDiffError>
Set a portion of the tensor.
let m1 = var_f64!([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]]);
let m2 = var_f64!([[10., 11.],
[12., 13.]]);
let m3 = var_f64!([[1., 2., 3.],
[10., 11., 6.],
[12., 13., 9.]]);
assert_eq!(m1.set_patch(&m2, &[(1, 3), (0, 2)], None)?, m3);
pub fn view(&self, new_shape: &[usize]) -> Result<Var, AutoDiffError>
pub fn val(&self) -> Tensor
sourcepub fn grad(&self) -> Result<Var, AutoDiffError>
pub fn grad(&self) -> Result<Var, AutoDiffError>
The current gradient for the Var.
sourcepub fn bp(&self) -> Result<(), AutoDiffError>
pub fn bp(&self) -> Result<(), AutoDiffError>
Apply back propagation to get numerical gradient.
pub fn step(&self, opt: &mut dyn Optimizer) -> Result<(), AutoDiffError>
sourcepub fn rerun(&self) -> Result<(), AutoDiffError>
pub fn rerun(&self) -> Result<(), AutoDiffError>
Run the computation graph again.
sourcepub fn get_io_var(&self) -> Result<(Vec<Var>, Vec<Var>), AutoDiffError>
pub fn get_io_var(&self) -> Result<(Vec<Var>, Vec<Var>), AutoDiffError>
Extract input and output from the hidden net.
sourcepub fn get_var_by_label(&self, label: &str) -> Result<Var, AutoDiffError>
pub fn get_var_by_label(&self, label: &str) -> Result<Var, AutoDiffError>
Get var by string label
pub fn set_label(&self, label: &str) -> Result<(), AutoDiffError>
pub fn set_predict(&self) -> Result<(), AutoDiffError>
pub fn predict(&self) -> Result<Var, AutoDiffError>
Trait Implementations
sourceimpl<'de> Deserialize<'de> for Var
impl<'de> Deserialize<'de> for Var
sourcefn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where
D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
impl Eq for Var
Auto Trait Implementations
impl !RefUnwindSafe for Var
impl !Send for Var
impl !Sync for Var
impl Unpin for Var
impl !UnwindSafe for Var
Blanket Implementations
sourceimpl<T> BorrowMut<T> for T where
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
sourceimpl<T> ToOwned for T where
T: Clone,
impl<T> ToOwned for T where
T: Clone,
type Owned = T
type Owned = T
The resulting type after obtaining ownership.
sourcefn clone_into(&self, target: &mut T)
fn clone_into(&self, target: &mut T)
toowned_clone_into
)Uses borrowed data to replace owned data, usually by cloning. Read more