[][src]Struct melange::tensor::tensor::Tensor

pub struct Tensor<T, S, C, L, P> { /* fields omitted */ }

The central struct of the tensor module.

Tensor is highly generic structure that provides a unique interface for all combinations of shapes, layouts and policies. It is parametrized by the following generics:

  • T the data scalar type,
  • S the type-level shape for compile time checks,
  • C the transpose policy
  • L the layout
  • P the allocation policy

The behavior of the tensor is fully determined by those type parameters. Although the roles of T and S are easy to understand, the other parameters might seem more obscure.

C is primarily used with BLAS-backed operation that require the data to be layed out in a contiguous manner and in a specific order. Two flag are available in the contiguous case: Contiguous and Transposed. Contiguous is the default case, Transposed is used as a flag for tensors whose axes has been inverted. Although axes inversion is sufficient for most Melange operations to work, it is not the case with BLAS. The Strided flag is used for tensors that are not contiguous and shouldn't be passed to BLAS operations (which is enforced at compile time).

L represents the Layout that internally stores the tensor's data, there are various Layout with various properties but the default are a good choice for most cases:

  • StaticHeapLayout with static shapes,
  • HeapLayout with dynamic shapes,
  • SliceLayout for views.

P is the policy that is used when a new tensor needs to be allocated such as the resul of an operation on borrowed tensors. Note that by convention, operations use and pass self's policy. DefaultPolicy allocates with the defaults suggested in the previous paragraph.

For ease of use, aliases for common cases are defined in the prelude of the tensor module.

Implementations

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Add<Output = T>, 
[src]

pub fn add<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, S, Crhs, Lrhs, Prhs>
) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn add_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<T, <S as ReprShape<T, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<T, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<T, <S as ReprShape<T, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn add_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<T, <S as ReprShapeDyn<T, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<T, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<T>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Sub<Output = T>, 
[src]

pub fn sub<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, S, Crhs, Lrhs, Prhs>
) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn sub_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<T, <S as ReprShape<T, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<T, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<T, <S as ReprShape<T, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn sub_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<T, <S as ReprShapeDyn<T, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<T, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<T>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Mul<Output = T>, 
[src]

pub fn mul<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, S, Crhs, Lrhs, Prhs>
) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn mul_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<T, <S as ReprShape<T, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<T, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<T, <S as ReprShape<T, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn mul_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<T, <S as ReprShapeDyn<T, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<T, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<T>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Div<Output = T>, 
[src]

pub fn div<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, S, Crhs, Lrhs, Prhs>
) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn div_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<T, <S as ReprShape<T, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<T, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<T, <S as ReprShape<T, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn div_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<T, <S as ReprShapeDyn<T, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<T, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<T>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Rem<Output = T>, 
[src]

pub fn rem<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, S, Crhs, Lrhs, Prhs>
) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn rem_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<T, <S as ReprShape<T, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<T, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<T, <S as ReprShape<T, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn rem_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<T, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<T, <S as ReprShapeDyn<T, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<T, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<T>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn atan2<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn atan2_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShape<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f64, <S as ReprShape<f64, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn atan2_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShapeDyn<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn copysign<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn copysign_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShape<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f64, <S as ReprShape<f64, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn copysign_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShapeDyn<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShape<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f64, <S as ReprShape<f64, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShapeDyn<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn max<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn max_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShape<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f64, <S as ReprShape<f64, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn max_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShapeDyn<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn min<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn min_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShape<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f64, <S as ReprShape<f64, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn min_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShapeDyn<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShape<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f64, <S as ReprShape<f64, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f64, <S as ReprShapeDyn<f64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn atan2<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn atan2_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShape<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f32, <S as ReprShape<f32, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn atan2_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShapeDyn<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn copysign<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn copysign_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShape<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f32, <S as ReprShape<f32, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn copysign_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShapeDyn<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShape<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f32, <S as ReprShape<f32, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShapeDyn<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn max<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn max_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShape<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f32, <S as ReprShape<f32, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn max_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShapeDyn<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn min<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn min_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShape<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f32, <S as ReprShape<f32, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn min_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShapeDyn<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShape<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<f32, <S as ReprShape<f32, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<f32, <S as ReprShapeDyn<f32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<f32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<u128, S, C, L, P> where
    L: for<'a> Layout<'a, u128>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u128, S, Crhs, Lrhs, Prhs>
) -> Tensor<u128, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u128, S>,
    Lrhs: for<'a> Layout<'a, u128>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u128, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u128, <S as ReprShape<u128, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<u128, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<u128, <S as ReprShape<u128, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, u128>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u128, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u128, <S as ReprShapeDyn<u128, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<u128, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<u128>,
    Lrhs: for<'a> Layout<'a, u128>, 
[src]

impl<S, C, L, P> Tensor<u128, S, C, L, P> where
    L: for<'a> Layout<'a, u128>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u128, S, Crhs, Lrhs, Prhs>
) -> Tensor<u128, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u128, S>,
    Lrhs: for<'a> Layout<'a, u128>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u128, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u128, <S as ReprShape<u128, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<u128, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<u128, <S as ReprShape<u128, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, u128>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u128, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u128, <S as ReprShapeDyn<u128, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<u128, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<u128>,
    Lrhs: for<'a> Layout<'a, u128>, 
[src]

impl<S, C, L, P> Tensor<u64, S, C, L, P> where
    L: for<'a> Layout<'a, u64>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u64, S, Crhs, Lrhs, Prhs>
) -> Tensor<u64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u64, S>,
    Lrhs: for<'a> Layout<'a, u64>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u64, <S as ReprShape<u64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<u64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<u64, <S as ReprShape<u64, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, u64>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u64, <S as ReprShapeDyn<u64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<u64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<u64>,
    Lrhs: for<'a> Layout<'a, u64>, 
[src]

impl<S, C, L, P> Tensor<u64, S, C, L, P> where
    L: for<'a> Layout<'a, u64>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u64, S, Crhs, Lrhs, Prhs>
) -> Tensor<u64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u64, S>,
    Lrhs: for<'a> Layout<'a, u64>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u64, <S as ReprShape<u64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<u64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<u64, <S as ReprShape<u64, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, u64>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u64, <S as ReprShapeDyn<u64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<u64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<u64>,
    Lrhs: for<'a> Layout<'a, u64>, 
[src]

impl<S, C, L, P> Tensor<u32, S, C, L, P> where
    L: for<'a> Layout<'a, u32>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u32, S, Crhs, Lrhs, Prhs>
) -> Tensor<u32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u32, S>,
    Lrhs: for<'a> Layout<'a, u32>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u32, <S as ReprShape<u32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<u32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<u32, <S as ReprShape<u32, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, u32>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u32, <S as ReprShapeDyn<u32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<u32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<u32>,
    Lrhs: for<'a> Layout<'a, u32>, 
[src]

impl<S, C, L, P> Tensor<u32, S, C, L, P> where
    L: for<'a> Layout<'a, u32>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u32, S, Crhs, Lrhs, Prhs>
) -> Tensor<u32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u32, S>,
    Lrhs: for<'a> Layout<'a, u32>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u32, <S as ReprShape<u32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<u32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<u32, <S as ReprShape<u32, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, u32>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u32, <S as ReprShapeDyn<u32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<u32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<u32>,
    Lrhs: for<'a> Layout<'a, u32>, 
[src]

impl<S, C, L, P> Tensor<u16, S, C, L, P> where
    L: for<'a> Layout<'a, u16>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u16, S, Crhs, Lrhs, Prhs>
) -> Tensor<u16, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u16, S>,
    Lrhs: for<'a> Layout<'a, u16>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u16, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u16, <S as ReprShape<u16, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<u16, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<u16, <S as ReprShape<u16, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, u16>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u16, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u16, <S as ReprShapeDyn<u16, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<u16, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<u16>,
    Lrhs: for<'a> Layout<'a, u16>, 
[src]

impl<S, C, L, P> Tensor<u16, S, C, L, P> where
    L: for<'a> Layout<'a, u16>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u16, S, Crhs, Lrhs, Prhs>
) -> Tensor<u16, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u16, S>,
    Lrhs: for<'a> Layout<'a, u16>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u16, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u16, <S as ReprShape<u16, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<u16, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<u16, <S as ReprShape<u16, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, u16>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u16, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u16, <S as ReprShapeDyn<u16, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<u16, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<u16>,
    Lrhs: for<'a> Layout<'a, u16>, 
[src]

impl<S, C, L, P> Tensor<u8, S, C, L, P> where
    L: for<'a> Layout<'a, u8>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u8, S, Crhs, Lrhs, Prhs>
) -> Tensor<u8, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u8, S>,
    Lrhs: for<'a> Layout<'a, u8>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u8, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u8, <S as ReprShape<u8, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<u8, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<u8, <S as ReprShape<u8, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, u8>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u8, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u8, <S as ReprShapeDyn<u8, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<u8, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<u8>,
    Lrhs: for<'a> Layout<'a, u8>, 
[src]

impl<S, C, L, P> Tensor<u8, S, C, L, P> where
    L: for<'a> Layout<'a, u8>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u8, S, Crhs, Lrhs, Prhs>
) -> Tensor<u8, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u8, S>,
    Lrhs: for<'a> Layout<'a, u8>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u8, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u8, <S as ReprShape<u8, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<u8, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<u8, <S as ReprShape<u8, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, u8>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<u8, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<u8, <S as ReprShapeDyn<u8, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<u8, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<u8>,
    Lrhs: for<'a> Layout<'a, u8>, 
[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> Layout<'a, i128>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i128, S, Crhs, Lrhs, Prhs>
) -> Tensor<i128, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i128, S>,
    Lrhs: for<'a> Layout<'a, i128>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i128, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i128, <S as ReprShape<i128, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<i128, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<i128, <S as ReprShape<i128, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, i128>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i128, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i128, <S as ReprShapeDyn<i128, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<i128, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<i128>,
    Lrhs: for<'a> Layout<'a, i128>, 
[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> Layout<'a, i128>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i128, S, Crhs, Lrhs, Prhs>
) -> Tensor<i128, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i128, S>,
    Lrhs: for<'a> Layout<'a, i128>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i128, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i128, <S as ReprShape<i128, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<i128, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<i128, <S as ReprShape<i128, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, i128>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i128, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i128, <S as ReprShapeDyn<i128, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<i128, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<i128>,
    Lrhs: for<'a> Layout<'a, i128>, 
[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> Layout<'a, i64>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i64, S, Crhs, Lrhs, Prhs>
) -> Tensor<i64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i64, S>,
    Lrhs: for<'a> Layout<'a, i64>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i64, <S as ReprShape<i64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<i64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<i64, <S as ReprShape<i64, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, i64>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i64, <S as ReprShapeDyn<i64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<i64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<i64>,
    Lrhs: for<'a> Layout<'a, i64>, 
[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> Layout<'a, i64>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i64, S, Crhs, Lrhs, Prhs>
) -> Tensor<i64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i64, S>,
    Lrhs: for<'a> Layout<'a, i64>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i64, <S as ReprShape<i64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<i64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<i64, <S as ReprShape<i64, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, i64>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i64, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i64, <S as ReprShapeDyn<i64, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<i64, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<i64>,
    Lrhs: for<'a> Layout<'a, i64>, 
[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> Layout<'a, i32>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i32, S, Crhs, Lrhs, Prhs>
) -> Tensor<i32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i32, S>,
    Lrhs: for<'a> Layout<'a, i32>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i32, <S as ReprShape<i32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<i32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<i32, <S as ReprShape<i32, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, i32>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i32, <S as ReprShapeDyn<i32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<i32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<i32>,
    Lrhs: for<'a> Layout<'a, i32>, 
[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> Layout<'a, i32>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i32, S, Crhs, Lrhs, Prhs>
) -> Tensor<i32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i32, S>,
    Lrhs: for<'a> Layout<'a, i32>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i32, <S as ReprShape<i32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<i32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<i32, <S as ReprShape<i32, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, i32>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i32, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i32, <S as ReprShapeDyn<i32, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<i32, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<i32>,
    Lrhs: for<'a> Layout<'a, i32>, 
[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> Layout<'a, i16>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i16, S, Crhs, Lrhs, Prhs>
) -> Tensor<i16, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i16, S>,
    Lrhs: for<'a> Layout<'a, i16>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i16, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i16, <S as ReprShape<i16, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<i16, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<i16, <S as ReprShape<i16, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, i16>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i16, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i16, <S as ReprShapeDyn<i16, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<i16, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<i16>,
    Lrhs: for<'a> Layout<'a, i16>, 
[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> Layout<'a, i16>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i16, S, Crhs, Lrhs, Prhs>
) -> Tensor<i16, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i16, S>,
    Lrhs: for<'a> Layout<'a, i16>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i16, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i16, <S as ReprShape<i16, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<i16, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<i16, <S as ReprShape<i16, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, i16>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i16, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i16, <S as ReprShapeDyn<i16, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<i16, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<i16>,
    Lrhs: for<'a> Layout<'a, i16>, 
[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> Layout<'a, i8>, 
[src]

pub fn div_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i8, S, Crhs, Lrhs, Prhs>
) -> Tensor<i8, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i8, S>,
    Lrhs: for<'a> Layout<'a, i8>, 
[src]

pub fn div_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i8, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i8, <S as ReprShape<i8, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<i8, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<i8, <S as ReprShape<i8, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, i8>, 
[src]

pub fn div_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i8, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i8, <S as ReprShapeDyn<i8, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<i8, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<i8>,
    Lrhs: for<'a> Layout<'a, i8>, 
[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> Layout<'a, i8>, 
[src]

pub fn rem_euclid<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i8, S, Crhs, Lrhs, Prhs>
) -> Tensor<i8, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i8, S>,
    Lrhs: for<'a> Layout<'a, i8>, 
[src]

pub fn rem_euclid_coerce<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i8, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i8, <S as ReprShape<i8, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShape<i8, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: StaticAllocationPolicy<i8, <S as ReprShape<i8, Srhs>>::Output>,
    Lrhs: for<'a> Layout<'a, i8>, 
[src]

pub fn rem_euclid_dynamic<Srhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<i8, Srhs, Crhs, Lrhs, Prhs>
) -> Tensor<i8, <S as ReprShapeDyn<i8, Srhs>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs> + ReprShapeDyn<i8, Srhs>,
    <S as Same<Srhs>>::Output: TRUE,
    P: DynamicAllocationPolicy<i8>,
    Lrhs: for<'a> Layout<'a, i8>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Add<Output = T>, 
[src]

pub fn scal_add(&self, param: T) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>, 
[src]

pub fn scal_add_dynamic(
    &self,
    param: T
) -> Tensor<T, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Sub<Output = T>, 
[src]

pub fn scal_sub(&self, param: T) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>, 
[src]

pub fn scal_sub_dynamic(
    &self,
    param: T
) -> Tensor<T, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Mul<Output = T>, 
[src]

pub fn scal_mul(&self, param: T) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>, 
[src]

pub fn scal_mul_dynamic(
    &self,
    param: T
) -> Tensor<T, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Div<Output = T>, 
[src]

pub fn scal_div(&self, param: T) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>, 
[src]

pub fn scal_div_dynamic(
    &self,
    param: T
) -> Tensor<T, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Rem<Output = T>, 
[src]

pub fn scal_rem(&self, param: T) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>, 
[src]

pub fn scal_rem_dynamic(
    &self,
    param: T
) -> Tensor<T, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<T>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: f64
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: f64
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn scal_max(&self, param: f64) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn scal_max_dynamic(
    &self,
    param: f64
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn scal_min(&self, param: f64) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn scal_min_dynamic(
    &self,
    param: f64
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn powf(&self, param: f64) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn powf_dynamic(
    &self,
    param: f64
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: f64
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: f64
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn powi(&self, param: i32) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn powi_dynamic(
    &self,
    param: i32
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn scal_max(&self, param: f32) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn scal_max_dynamic(
    &self,
    param: f32
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn scal_min(&self, param: f32) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn scal_min_dynamic(
    &self,
    param: f32
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn powf(&self, param: f32) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn powf_dynamic(
    &self,
    param: f32
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: f32
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: f32
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn powi(&self, param: i32) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn powi_dynamic(
    &self,
    param: i32
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<u128, S, C, L, P> where
    L: for<'a> Layout<'a, u128>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: u128
) -> Tensor<u128, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u128, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: u128
) -> Tensor<u128, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<u128>, 
[src]

impl<S, C, L, P> Tensor<u128, S, C, L, P> where
    L: for<'a> Layout<'a, u128>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: u128
) -> Tensor<u128, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u128, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: u128
) -> Tensor<u128, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<u128>, 
[src]

impl<S, C, L, P> Tensor<u64, S, C, L, P> where
    L: for<'a> Layout<'a, u64>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: u64
) -> Tensor<u64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u64, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: u64
) -> Tensor<u64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<u64>, 
[src]

impl<S, C, L, P> Tensor<u64, S, C, L, P> where
    L: for<'a> Layout<'a, u64>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: u64
) -> Tensor<u64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u64, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: u64
) -> Tensor<u64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<u64>, 
[src]

impl<S, C, L, P> Tensor<u32, S, C, L, P> where
    L: for<'a> Layout<'a, u32>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: u32
) -> Tensor<u32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u32, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: u32
) -> Tensor<u32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<u32>, 
[src]

impl<S, C, L, P> Tensor<u32, S, C, L, P> where
    L: for<'a> Layout<'a, u32>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: u32
) -> Tensor<u32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u32, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: u32
) -> Tensor<u32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<u32>, 
[src]

impl<S, C, L, P> Tensor<u16, S, C, L, P> where
    L: for<'a> Layout<'a, u16>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: u16
) -> Tensor<u16, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u16, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: u16
) -> Tensor<u16, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<u16>, 
[src]

impl<S, C, L, P> Tensor<u16, S, C, L, P> where
    L: for<'a> Layout<'a, u16>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: u16
) -> Tensor<u16, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u16, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: u16
) -> Tensor<u16, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<u16>, 
[src]

impl<S, C, L, P> Tensor<u8, S, C, L, P> where
    L: for<'a> Layout<'a, u8>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: u8
) -> Tensor<u8, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u8, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: u8
) -> Tensor<u8, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<u8>, 
[src]

impl<S, C, L, P> Tensor<u8, S, C, L, P> where
    L: for<'a> Layout<'a, u8>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: u8
) -> Tensor<u8, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<u8, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: u8
) -> Tensor<u8, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<u8>, 
[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> Layout<'a, i128>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: i128
) -> Tensor<i128, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i128, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: i128
) -> Tensor<i128, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i128>, 
[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> Layout<'a, i128>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: i128
) -> Tensor<i128, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i128, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: i128
) -> Tensor<i128, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i128>, 
[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> Layout<'a, i64>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: i64
) -> Tensor<i64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i64, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: i64
) -> Tensor<i64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i64>, 
[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> Layout<'a, i64>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: i64
) -> Tensor<i64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i64, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: i64
) -> Tensor<i64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i64>, 
[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> Layout<'a, i32>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: i32
) -> Tensor<i32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i32, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: i32
) -> Tensor<i32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i32>, 
[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> Layout<'a, i32>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: i32
) -> Tensor<i32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i32, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: i32
) -> Tensor<i32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i32>, 
[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> Layout<'a, i16>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: i16
) -> Tensor<i16, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i16, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: i16
) -> Tensor<i16, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i16>, 
[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> Layout<'a, i16>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: i16
) -> Tensor<i16, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i16, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: i16
) -> Tensor<i16, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i16>, 
[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> Layout<'a, i8>, 
[src]

pub fn scal_div_euclid(
    &self,
    param: i8
) -> Tensor<i8, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i8, S>, 
[src]

pub fn scal_div_euclid_dynamic(
    &self,
    param: i8
) -> Tensor<i8, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i8>, 
[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> Layout<'a, i8>, 
[src]

pub fn scal_rem_euclid(
    &self,
    param: i8
) -> Tensor<i8, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i8, S>, 
[src]

pub fn scal_rem_euclid_dynamic(
    &self,
    param: i8
) -> Tensor<i8, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i8>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn exp(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn exp_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn exp2(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn exp2_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn exp_m1(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn exp_m1_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn ln(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn ln_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn ln_1p(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn ln_1p_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn log2(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn log2_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn log10(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn log10_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn sin(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn sin_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn cos(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn cos_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn tan(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn tan_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn sinh(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn sinh_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn cosh(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn cosh_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn tanh(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn tanh_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn asin(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn asin_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn acos(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn acos_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn atan(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn atan_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn asinh(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn asinh_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn acosh(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn acosh_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn atanh(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn atanh_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn sqrt(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn sqrt_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn cbrt(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn cbrt_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn abs(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn abs_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn signum(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn signum_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn ceil(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn ceil_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn floor(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn floor_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn round(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn round_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn recip(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn recip_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn to_degrees(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn to_degrees_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn to_radians(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn to_radians_dynamic(&self) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn exp(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn exp_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn exp2(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn exp2_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn exp_m1(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn exp_m1_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn ln(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn ln_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn ln_1p(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn ln_1p_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn log2(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn log2_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn log10(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn log10_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn sin(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn sin_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn cos(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn cos_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn tan(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn tan_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn sinh(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn sinh_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn cosh(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn cosh_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn tanh(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn tanh_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn asin(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn asin_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn acos(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn acos_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn atan(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn atan_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn asinh(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn asinh_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn acosh(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn acosh_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn atanh(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn atanh_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn sqrt(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn sqrt_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn cbrt(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn cbrt_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn abs(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn abs_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn signum(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn signum_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn ceil(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn ceil_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn floor(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn floor_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn round(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn round_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn recip(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn recip_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn to_degrees(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn to_degrees_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn to_radians(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn to_radians_dynamic(&self) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> Layout<'a, i128>, 
[src]

pub fn abs(&self) -> Tensor<i128, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i128, S>, 
[src]

pub fn abs_dynamic(&self) -> Tensor<i128, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i128>, 
[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> Layout<'a, i128>, 
[src]

pub fn signum(&self) -> Tensor<i128, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i128, S>, 
[src]

pub fn signum_dynamic(&self) -> Tensor<i128, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i128>, 
[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> Layout<'a, i64>, 
[src]

pub fn abs(&self) -> Tensor<i64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i64, S>, 
[src]

pub fn abs_dynamic(&self) -> Tensor<i64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i64>, 
[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> Layout<'a, i64>, 
[src]

pub fn signum(&self) -> Tensor<i64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i64, S>, 
[src]

pub fn signum_dynamic(&self) -> Tensor<i64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i64>, 
[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> Layout<'a, i32>, 
[src]

pub fn abs(&self) -> Tensor<i32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i32, S>, 
[src]

pub fn abs_dynamic(&self) -> Tensor<i32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i32>, 
[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> Layout<'a, i32>, 
[src]

pub fn signum(&self) -> Tensor<i32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i32, S>, 
[src]

pub fn signum_dynamic(&self) -> Tensor<i32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i32>, 
[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> Layout<'a, i16>, 
[src]

pub fn abs(&self) -> Tensor<i16, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i16, S>, 
[src]

pub fn abs_dynamic(&self) -> Tensor<i16, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i16>, 
[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> Layout<'a, i16>, 
[src]

pub fn signum(&self) -> Tensor<i16, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i16, S>, 
[src]

pub fn signum_dynamic(&self) -> Tensor<i16, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i16>, 
[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> Layout<'a, i8>, 
[src]

pub fn abs(&self) -> Tensor<i8, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i8, S>, 
[src]

pub fn abs_dynamic(&self) -> Tensor<i8, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i8>, 
[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> Layout<'a, i8>, 
[src]

pub fn signum(&self) -> Tensor<i8, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<i8, S>, 
[src]

pub fn signum_dynamic(&self) -> Tensor<i8, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<i8>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy
[src]

pub fn as_contiguous(&self) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>, 
[src]

pub fn as_contiguous_dynamic(&self) -> Tensor<T, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + Ring + Div<Output = T>, 
[src]

pub fn inv(&self) -> Tensor<T, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<T, S>, 
[src]

pub fn inv_dynamic(&self) -> Tensor<T, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<T>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn scal_mul_add(
    &self,
    param1: f64,
    param2: f64
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>, 
[src]

pub fn scal_mul_add_dynamic(
    &self,
    param1: f64,
    param2: f64
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn scal_mul_add(
    &self,
    param1: f32,
    param2: f32
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>, 
[src]

pub fn scal_mul_add_dynamic(
    &self,
    param1: f32,
    param2: f32
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn mul_add<Crhs1, Lrhs1, Prhs1, Crhs2, Lrhs2, Prhs2>(
    &self,
    other1: &Tensor<f64, S, Crhs1, Lrhs1, Prhs1>,
    other2: &Tensor<f64, S, Crhs2, Lrhs2, Prhs2>
) -> Tensor<f64, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f64, S>,
    Lrhs1: for<'a> Layout<'a, f64>,
    Lrhs2: for<'a> Layout<'a, f64>, 
[src]

pub fn mul_add_coerce<Srhs1, Crhs1, Lrhs1, Prhs1, Srhs2, Crhs2, Lrhs2, Prhs2>(
    &self,
    other1: &Tensor<f64, Srhs1, Crhs1, Lrhs1, Prhs1>,
    other2: &Tensor<f64, Srhs2, Crhs2, Lrhs2, Prhs2>
) -> Tensor<f64, <S as ReprShape<f64, <Srhs1 as ReprShapeDyn<f64, Srhs2>>::Output>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs1> + Same<Srhs2> + ReprShape<f64, <Srhs1 as ReprShapeDyn<f64, Srhs2>>::Output>,
    <S as Same<Srhs1>>::Output: TRUE,
    <S as Same<Srhs2>>::Output: TRUE,
    P: StaticAllocationPolicy<f64, <S as ReprShape<f64, <Srhs1 as ReprShapeDyn<f64, Srhs2>>::Output>>::Output>,
    Srhs1: Same<Srhs2> + ReprShapeDyn<f64, Srhs2>,
    <Srhs1 as Same<Srhs2>>::Output: TRUE,
    Lrhs1: for<'a> Layout<'a, f64>,
    Lrhs2: for<'a> Layout<'a, f64>, 
[src]

pub fn mul_add_dynamic<Srhs1, Crhs1, Lrhs1, Prhs1, Srhs2, Crhs2, Lrhs2, Prhs2>(
    &self,
    other1: &Tensor<f64, Srhs1, Crhs1, Lrhs1, Prhs1>,
    other2: &Tensor<f64, Srhs2, Crhs2, Lrhs2, Prhs2>
) -> Tensor<f64, <S as ReprShapeDyn<f64, <Srhs1 as ReprShapeDyn<f64, Srhs2>>::Output>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs1> + Same<Srhs2> + ReprShapeDyn<f64, <Srhs1 as ReprShapeDyn<f64, Srhs2>>::Output>,
    <S as Same<Srhs1>>::Output: TRUE,
    <S as Same<Srhs2>>::Output: TRUE,
    P: DynamicAllocationPolicy<f64>,
    Srhs1: Same<Srhs2> + ReprShapeDyn<f64, Srhs2>,
    <Srhs1 as Same<Srhs2>>::Output: TRUE,
    Lrhs1: for<'a> Layout<'a, f64>,
    Lrhs2: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn mul_add<Crhs1, Lrhs1, Prhs1, Crhs2, Lrhs2, Prhs2>(
    &self,
    other1: &Tensor<f32, S, Crhs1, Lrhs1, Prhs1>,
    other2: &Tensor<f32, S, Crhs2, Lrhs2, Prhs2>
) -> Tensor<f32, S, Contiguous, P::Layout, P> where
    S: StaticShape,
    P: StaticAllocationPolicy<f32, S>,
    Lrhs1: for<'a> Layout<'a, f32>,
    Lrhs2: for<'a> Layout<'a, f32>, 
[src]

pub fn mul_add_coerce<Srhs1, Crhs1, Lrhs1, Prhs1, Srhs2, Crhs2, Lrhs2, Prhs2>(
    &self,
    other1: &Tensor<f32, Srhs1, Crhs1, Lrhs1, Prhs1>,
    other2: &Tensor<f32, Srhs2, Crhs2, Lrhs2, Prhs2>
) -> Tensor<f32, <S as ReprShape<f32, <Srhs1 as ReprShapeDyn<f32, Srhs2>>::Output>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs1> + Same<Srhs2> + ReprShape<f32, <Srhs1 as ReprShapeDyn<f32, Srhs2>>::Output>,
    <S as Same<Srhs1>>::Output: TRUE,
    <S as Same<Srhs2>>::Output: TRUE,
    P: StaticAllocationPolicy<f32, <S as ReprShape<f32, <Srhs1 as ReprShapeDyn<f32, Srhs2>>::Output>>::Output>,
    Srhs1: Same<Srhs2> + ReprShapeDyn<f32, Srhs2>,
    <Srhs1 as Same<Srhs2>>::Output: TRUE,
    Lrhs1: for<'a> Layout<'a, f32>,
    Lrhs2: for<'a> Layout<'a, f32>, 
[src]

pub fn mul_add_dynamic<Srhs1, Crhs1, Lrhs1, Prhs1, Srhs2, Crhs2, Lrhs2, Prhs2>(
    &self,
    other1: &Tensor<f32, Srhs1, Crhs1, Lrhs1, Prhs1>,
    other2: &Tensor<f32, Srhs2, Crhs2, Lrhs2, Prhs2>
) -> Tensor<f32, <S as ReprShapeDyn<f32, <Srhs1 as ReprShapeDyn<f32, Srhs2>>::Output>>::Output, Contiguous, P::Layout, P> where
    S: Same<Srhs1> + Same<Srhs2> + ReprShapeDyn<f32, <Srhs1 as ReprShapeDyn<f32, Srhs2>>::Output>,
    <S as Same<Srhs1>>::Output: TRUE,
    <S as Same<Srhs2>>::Output: TRUE,
    P: DynamicAllocationPolicy<f32>,
    Srhs1: Same<Srhs2> + ReprShapeDyn<f32, Srhs2>,
    <Srhs1 as Same<Srhs2>>::Output: TRUE,
    Lrhs1: for<'a> Layout<'a, f32>,
    Lrhs2: for<'a> Layout<'a, f32>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + AddAssign
[src]

pub fn add_<Crhs, Lrhs, Prhs>(&mut self, other: &Tensor<T, S, Crhs, Lrhs, Prhs>) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn add_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<T, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, T>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + SubAssign
[src]

pub fn sub_<Crhs, Lrhs, Prhs>(&mut self, other: &Tensor<T, S, Crhs, Lrhs, Prhs>) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn sub_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<T, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, T>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + MulAssign
[src]

pub fn mul_<Crhs, Lrhs, Prhs>(&mut self, other: &Tensor<T, S, Crhs, Lrhs, Prhs>) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn mul_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<T, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, T>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + DivAssign
[src]

pub fn div_<Crhs, Lrhs, Prhs>(&mut self, other: &Tensor<T, S, Crhs, Lrhs, Prhs>) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn div_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<T, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, T>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + RemAssign
[src]

pub fn rem_<Crhs, Lrhs, Prhs>(&mut self, other: &Tensor<T, S, Crhs, Lrhs, Prhs>) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

pub fn rem_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<T, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, T>,
    Lrhs: for<'a> Layout<'a, T>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f64>, 
[src]

pub fn atan2_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn atan2_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f64>, 
[src]

pub fn copysign_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn copysign_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f64>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f64>, 
[src]

pub fn max_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn max_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f64>, 
[src]

pub fn min_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn min_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f64>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f64, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f64>,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f32>, 
[src]

pub fn atan2_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn atan2_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f32>, 
[src]

pub fn copysign_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn copysign_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f32>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f32>, 
[src]

pub fn max_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn max_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f32>, 
[src]

pub fn min_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn min_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f32>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<f32, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, f32>,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<S, C, L, P> Tensor<u128, S, C, L, P> where
    L: for<'a> LayoutMut<'a, u128>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u128, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, u128>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u128, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, u128>,
    Lrhs: for<'a> Layout<'a, u128>, 
[src]

impl<S, C, L, P> Tensor<u128, S, C, L, P> where
    L: for<'a> LayoutMut<'a, u128>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u128, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, u128>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u128, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, u128>,
    Lrhs: for<'a> Layout<'a, u128>, 
[src]

impl<S, C, L, P> Tensor<u64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, u64>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u64, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, u64>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u64, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, u64>,
    Lrhs: for<'a> Layout<'a, u64>, 
[src]

impl<S, C, L, P> Tensor<u64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, u64>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u64, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, u64>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u64, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, u64>,
    Lrhs: for<'a> Layout<'a, u64>, 
[src]

impl<S, C, L, P> Tensor<u32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, u32>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u32, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, u32>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u32, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, u32>,
    Lrhs: for<'a> Layout<'a, u32>, 
[src]

impl<S, C, L, P> Tensor<u32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, u32>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u32, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, u32>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u32, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, u32>,
    Lrhs: for<'a> Layout<'a, u32>, 
[src]

impl<S, C, L, P> Tensor<u16, S, C, L, P> where
    L: for<'a> LayoutMut<'a, u16>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u16, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, u16>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u16, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, u16>,
    Lrhs: for<'a> Layout<'a, u16>, 
[src]

impl<S, C, L, P> Tensor<u16, S, C, L, P> where
    L: for<'a> LayoutMut<'a, u16>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u16, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, u16>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u16, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, u16>,
    Lrhs: for<'a> Layout<'a, u16>, 
[src]

impl<S, C, L, P> Tensor<u8, S, C, L, P> where
    L: for<'a> LayoutMut<'a, u8>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u8, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, u8>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u8, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, u8>,
    Lrhs: for<'a> Layout<'a, u8>, 
[src]

impl<S, C, L, P> Tensor<u8, S, C, L, P> where
    L: for<'a> LayoutMut<'a, u8>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u8, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, u8>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<u8, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, u8>,
    Lrhs: for<'a> Layout<'a, u8>, 
[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> LayoutMut<'a, i128>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i128, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, i128>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i128, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, i128>,
    Lrhs: for<'a> Layout<'a, i128>, 
[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> LayoutMut<'a, i128>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i128, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, i128>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i128, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, i128>,
    Lrhs: for<'a> Layout<'a, i128>, 
[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, i64>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i64, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, i64>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i64, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, i64>,
    Lrhs: for<'a> Layout<'a, i64>, 
[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, i64>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i64, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, i64>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i64, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, i64>,
    Lrhs: for<'a> Layout<'a, i64>, 
[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, i32>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i32, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, i32>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i32, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, i32>,
    Lrhs: for<'a> Layout<'a, i32>, 
[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, i32>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i32, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, i32>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i32, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, i32>,
    Lrhs: for<'a> Layout<'a, i32>, 
[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> LayoutMut<'a, i16>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i16, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, i16>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i16, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, i16>,
    Lrhs: for<'a> Layout<'a, i16>, 
[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> LayoutMut<'a, i16>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i16, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, i16>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i16, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, i16>,
    Lrhs: for<'a> Layout<'a, i16>, 
[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> LayoutMut<'a, i8>, 
[src]

pub fn div_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i8, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, i8>, 
[src]

pub fn div_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i8, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, i8>,
    Lrhs: for<'a> Layout<'a, i8>, 
[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> LayoutMut<'a, i8>, 
[src]

pub fn rem_euclid_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i8, S, Crhs, Lrhs, Prhs>
) where
    S: StaticShape,
    Lrhs: for<'a> Layout<'a, i8>, 
[src]

pub fn rem_euclid_dynamic_<Crhs, Lrhs, Prhs>(
    &mut self,
    other: &Tensor<i8, S, Crhs, Lrhs, Prhs>
) where
    L: for<'a> Layout<'a, i8>,
    Lrhs: for<'a> Layout<'a, i8>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T> + for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + Add<Output = T>, 
[src]

pub fn scal_add_(&mut self, param: T)[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T> + for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + Sub<Output = T>, 
[src]

pub fn scal_sub_(&mut self, param: T)[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T> + for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + Mul<Output = T>, 
[src]

pub fn scal_mul_(&mut self, param: T)[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T> + for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + Div<Output = T>, 
[src]

pub fn scal_div_(&mut self, param: T)[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T> + for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + Rem<Output = T>, 
[src]

pub fn scal_rem_(&mut self, param: T)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn scal_div_euclid_(&mut self, param: f64)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn scal_max_(&mut self, param: f64)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn scal_min_(&mut self, param: f64)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn powf_(&mut self, param: f64)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: f64)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn powi_(&mut self, param: i32)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn scal_max_(&mut self, param: f32)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn scal_min_(&mut self, param: f32)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn powf_(&mut self, param: f32)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: f32)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn powi_(&mut self, param: i32)[src]

impl<S, C, L, P> Tensor<u128, S, C, L, P> where
    L: for<'a> Layout<'a, u128> + for<'a> LayoutMut<'a, u128>, 
[src]

pub fn scal_div_euclid_(&mut self, param: u128)[src]

impl<S, C, L, P> Tensor<u128, S, C, L, P> where
    L: for<'a> Layout<'a, u128> + for<'a> LayoutMut<'a, u128>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: u128)[src]

impl<S, C, L, P> Tensor<u64, S, C, L, P> where
    L: for<'a> Layout<'a, u64> + for<'a> LayoutMut<'a, u64>, 
[src]

pub fn scal_div_euclid_(&mut self, param: u64)[src]

impl<S, C, L, P> Tensor<u64, S, C, L, P> where
    L: for<'a> Layout<'a, u64> + for<'a> LayoutMut<'a, u64>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: u64)[src]

impl<S, C, L, P> Tensor<u32, S, C, L, P> where
    L: for<'a> Layout<'a, u32> + for<'a> LayoutMut<'a, u32>, 
[src]

pub fn scal_div_euclid_(&mut self, param: u32)[src]

impl<S, C, L, P> Tensor<u32, S, C, L, P> where
    L: for<'a> Layout<'a, u32> + for<'a> LayoutMut<'a, u32>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: u32)[src]

impl<S, C, L, P> Tensor<u16, S, C, L, P> where
    L: for<'a> Layout<'a, u16> + for<'a> LayoutMut<'a, u16>, 
[src]

pub fn scal_div_euclid_(&mut self, param: u16)[src]

impl<S, C, L, P> Tensor<u16, S, C, L, P> where
    L: for<'a> Layout<'a, u16> + for<'a> LayoutMut<'a, u16>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: u16)[src]

impl<S, C, L, P> Tensor<u8, S, C, L, P> where
    L: for<'a> Layout<'a, u8> + for<'a> LayoutMut<'a, u8>, 
[src]

pub fn scal_div_euclid_(&mut self, param: u8)[src]

impl<S, C, L, P> Tensor<u8, S, C, L, P> where
    L: for<'a> Layout<'a, u8> + for<'a> LayoutMut<'a, u8>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: u8)[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> Layout<'a, i128> + for<'a> LayoutMut<'a, i128>, 
[src]

pub fn scal_div_euclid_(&mut self, param: i128)[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> Layout<'a, i128> + for<'a> LayoutMut<'a, i128>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: i128)[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> Layout<'a, i64> + for<'a> LayoutMut<'a, i64>, 
[src]

pub fn scal_div_euclid_(&mut self, param: i64)[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> Layout<'a, i64> + for<'a> LayoutMut<'a, i64>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: i64)[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> Layout<'a, i32> + for<'a> LayoutMut<'a, i32>, 
[src]

pub fn scal_div_euclid_(&mut self, param: i32)[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> Layout<'a, i32> + for<'a> LayoutMut<'a, i32>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: i32)[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> Layout<'a, i16> + for<'a> LayoutMut<'a, i16>, 
[src]

pub fn scal_div_euclid_(&mut self, param: i16)[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> Layout<'a, i16> + for<'a> LayoutMut<'a, i16>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: i16)[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> Layout<'a, i8> + for<'a> LayoutMut<'a, i8>, 
[src]

pub fn scal_div_euclid_(&mut self, param: i8)[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> Layout<'a, i8> + for<'a> LayoutMut<'a, i8>, 
[src]

pub fn scal_rem_euclid_(&mut self, param: i8)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn exp_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn exp2_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn exp_m1_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn ln_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn ln_1p_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn log2_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn log10_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn sin_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn cos_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn tan_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn sinh_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn cosh_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn tanh_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn asin_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn acos_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn atan_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn asinh_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn acosh_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn atanh_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn sqrt_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn cbrt_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn abs_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn signum_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn ceil_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn floor_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn round_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn recip_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn to_degrees_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn to_radians_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn exp_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn exp2_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn exp_m1_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn ln_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn ln_1p_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn log2_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn log10_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn sin_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn cos_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn tan_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn sinh_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn cosh_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn tanh_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn asin_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn acos_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn atan_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn asinh_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn acosh_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn atanh_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn sqrt_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn cbrt_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn abs_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn signum_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn ceil_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn floor_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn round_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn recip_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn to_degrees_(&mut self)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn to_radians_(&mut self)[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> Layout<'a, i128> + for<'a> LayoutMut<'a, i128>, 
[src]

pub fn abs_(&mut self)[src]

impl<S, C, L, P> Tensor<i128, S, C, L, P> where
    L: for<'a> Layout<'a, i128> + for<'a> LayoutMut<'a, i128>, 
[src]

pub fn signum_(&mut self)[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> Layout<'a, i64> + for<'a> LayoutMut<'a, i64>, 
[src]

pub fn abs_(&mut self)[src]

impl<S, C, L, P> Tensor<i64, S, C, L, P> where
    L: for<'a> Layout<'a, i64> + for<'a> LayoutMut<'a, i64>, 
[src]

pub fn signum_(&mut self)[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> Layout<'a, i32> + for<'a> LayoutMut<'a, i32>, 
[src]

pub fn abs_(&mut self)[src]

impl<S, C, L, P> Tensor<i32, S, C, L, P> where
    L: for<'a> Layout<'a, i32> + for<'a> LayoutMut<'a, i32>, 
[src]

pub fn signum_(&mut self)[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> Layout<'a, i16> + for<'a> LayoutMut<'a, i16>, 
[src]

pub fn abs_(&mut self)[src]

impl<S, C, L, P> Tensor<i16, S, C, L, P> where
    L: for<'a> Layout<'a, i16> + for<'a> LayoutMut<'a, i16>, 
[src]

pub fn signum_(&mut self)[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> Layout<'a, i8> + for<'a> LayoutMut<'a, i8>, 
[src]

pub fn abs_(&mut self)[src]

impl<S, C, L, P> Tensor<i8, S, C, L, P> where
    L: for<'a> Layout<'a, i8> + for<'a> LayoutMut<'a, i8>, 
[src]

pub fn signum_(&mut self)[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T> + for<'a> LayoutMut<'a, T>,
    T: Send + Sync + Copy + Ring + Div<Output = T>, 
[src]

pub fn inv_(&mut self)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64> + for<'a> LayoutMut<'a, f64>, 
[src]

pub fn scal_mul_add_(&mut self, param0: f64, param1: f64)[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32> + for<'a> LayoutMut<'a, f32>, 
[src]

pub fn scal_mul_add_(&mut self, param0: f32, param1: f32)[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f64>, 
[src]

pub fn mul_add_<Crhs1, Lrhs1, Prhs1, Crhs2, Lrhs2, Prhs2>(
    &mut self,
    other1: &Tensor<f64, S, Crhs1, Lrhs1, Prhs1>,
    other2: &Tensor<f64, S, Crhs2, Lrhs2, Prhs2>
) where
    S: StaticShape,
    Lrhs1: for<'a> Layout<'a, f64>,
    Lrhs2: for<'a> Layout<'a, f64>, 
[src]

pub fn mul_add_dynamic_<Srhs1, Crhs1, Lrhs1, Prhs1, Srhs2, Crhs2, Lrhs2, Prhs2>(
    &mut self,
    other1: &Tensor<f64, Srhs1, Crhs1, Lrhs1, Prhs1>,
    other2: &Tensor<f64, Srhs2, Crhs2, Lrhs2, Prhs2>
) where
    L: for<'a> Layout<'a, f64>,
    Lrhs1: for<'a> Layout<'a, f64>,
    Lrhs2: for<'a> Layout<'a, f64>, 
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> LayoutMut<'a, f32>, 
[src]

pub fn mul_add_<Crhs1, Lrhs1, Prhs1, Crhs2, Lrhs2, Prhs2>(
    &mut self,
    other1: &Tensor<f32, S, Crhs1, Lrhs1, Prhs1>,
    other2: &Tensor<f32, S, Crhs2, Lrhs2, Prhs2>
) where
    S: StaticShape,
    Lrhs1: for<'a> Layout<'a, f32>,
    Lrhs2: for<'a> Layout<'a, f32>, 
[src]

pub fn mul_add_dynamic_<Srhs1, Crhs1, Lrhs1, Prhs1, Srhs2, Crhs2, Lrhs2, Prhs2>(
    &mut self,
    other1: &Tensor<f32, Srhs1, Crhs1, Lrhs1, Prhs1>,
    other2: &Tensor<f32, Srhs2, Crhs2, Lrhs2, Prhs2>
) where
    L: for<'a> Layout<'a, f32>,
    Lrhs1: for<'a> Layout<'a, f32>,
    Lrhs2: for<'a> Layout<'a, f32>, 
[src]

impl<M, K, C, L, P> Tensor<f64, Shape2D<M, K>, C, L, P> where
    L: for<'a> Layout<'a, f64>,
    C: BLASPolicy
[src]

pub fn dot<N, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Shape2D<K, N>, Crhs, Lrhs, Prhs>
) -> Tensor<f64, Shape2D<M, N>, Contiguous, P::Layout, P> where
    M: Unsigned,
    N: Unsigned,
    K: Unsigned,
    Lrhs: for<'a> Layout<'a, f64>,
    Crhs: BLASPolicy,
    P: StaticAllocationPolicy<f64, Shape2D<M, N>>, 
[src]

pub fn dot_coerce<Krhs, N, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Shape2D<Krhs, N>, Crhs, Lrhs, Prhs>
) -> Tensor<f64, Shape2D<M, N>, Contiguous, P::Layout, P> where
    M: Unsigned,
    N: Unsigned,
    K: IsEqual<Krhs>,
    Eq<K, Krhs>: TRUE,
    Lrhs: for<'a> Layout<'a, f64>,
    Crhs: BLASPolicy,
    P: StaticAllocationPolicy<f64, Shape2D<M, N>>, 
[src]

pub fn dot_dynamic<Krhs, N, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Shape2D<Krhs, N>, Crhs, Lrhs, Prhs>
) -> Tensor<f64, Shape2D<M, N>, Contiguous, P::Layout, P> where
    K: IsEqual<Krhs>,
    Eq<K, Krhs>: TRUE,
    Lrhs: for<'a> Layout<'a, f64>,
    Crhs: BLASPolicy,
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<M, K, C, L, P> Tensor<f32, Shape2D<M, K>, C, L, P> where
    L: for<'a> Layout<'a, f32>,
    C: BLASPolicy
[src]

pub fn dot<N, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Shape2D<K, N>, Crhs, Lrhs, Prhs>
) -> Tensor<f32, Shape2D<M, N>, Contiguous, P::Layout, P> where
    M: Unsigned,
    N: Unsigned,
    K: Unsigned,
    Lrhs: for<'a> Layout<'a, f32>,
    Crhs: BLASPolicy,
    P: StaticAllocationPolicy<f32, Shape2D<M, N>>, 
[src]

pub fn dot_coerce<Krhs, N, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Shape2D<Krhs, N>, Crhs, Lrhs, Prhs>
) -> Tensor<f32, Shape2D<M, N>, Contiguous, P::Layout, P> where
    M: Unsigned,
    N: Unsigned,
    K: IsEqual<Krhs>,
    Eq<K, Krhs>: TRUE,
    Lrhs: for<'a> Layout<'a, f32>,
    Crhs: BLASPolicy,
    P: StaticAllocationPolicy<f32, Shape2D<M, N>>, 
[src]

pub fn dot_dynamic<Krhs, N, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Shape2D<Krhs, N>, Crhs, Lrhs, Prhs>
) -> Tensor<f32, Shape2D<M, N>, Contiguous, P::Layout, P> where
    K: IsEqual<Krhs>,
    Eq<K, Krhs>: TRUE,
    Lrhs: for<'a> Layout<'a, f32>,
    Crhs: BLASPolicy,
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<M, N, C, L, P> Tensor<f64, Shape2D<M, N>, C, L, P> where
    L: for<'a> Layout<'a, f64>,
    C: BLASPolicy
[src]

pub fn dotv<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Shape1D<N>, Crhs, Lrhs, Prhs>
) -> Tensor<f64, Shape1D<M>, Contiguous, P::Layout, P> where
    M: Unsigned,
    N: Unsigned,
    Lrhs: for<'a> Layout<'a, f64>,
    P: StaticAllocationPolicy<f64, Shape1D<M>>, 
[src]

pub fn dotv_coerce<Nrhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Shape1D<Nrhs>, Crhs, Lrhs, Prhs>
) -> Tensor<f64, Shape1D<M>, Contiguous, P::Layout, P> where
    M: Unsigned,
    N: IsEqual<Nrhs>,
    Eq<N, Nrhs>: TRUE,
    Lrhs: for<'a> Layout<'a, f64>,
    P: StaticAllocationPolicy<f64, Shape1D<M>>, 
[src]

pub fn dotv_dynamic<Nrhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Shape1D<Nrhs>, Crhs, Lrhs, Prhs>
) -> Tensor<f64, Shape1D<M>, Contiguous, P::Layout, P> where
    N: IsEqual<Nrhs>,
    Eq<N, Nrhs>: TRUE,
    Lrhs: for<'a> Layout<'a, f64>,
    P: DynamicAllocationPolicy<f64>, 
[src]

impl<M, N, C, L, P> Tensor<f32, Shape2D<M, N>, C, L, P> where
    L: for<'a> Layout<'a, f32>,
    C: BLASPolicy
[src]

pub fn dotv<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Shape1D<N>, Crhs, Lrhs, Prhs>
) -> Tensor<f32, Shape1D<M>, Contiguous, P::Layout, P> where
    M: Unsigned,
    N: Unsigned,
    Lrhs: for<'a> Layout<'a, f32>,
    P: StaticAllocationPolicy<f32, Shape1D<M>>, 
[src]

pub fn dotv_coerce<Nrhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Shape1D<Nrhs>, Crhs, Lrhs, Prhs>
) -> Tensor<f32, Shape1D<M>, Contiguous, P::Layout, P> where
    M: Unsigned,
    N: IsEqual<Nrhs>,
    Eq<N, Nrhs>: TRUE,
    Lrhs: for<'a> Layout<'a, f32>,
    P: StaticAllocationPolicy<f32, Shape1D<M>>, 
[src]

pub fn dotv_dynamic<Nrhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Shape1D<Nrhs>, Crhs, Lrhs, Prhs>
) -> Tensor<f32, Shape1D<M>, Contiguous, P::Layout, P> where
    N: IsEqual<Nrhs>,
    Eq<N, Nrhs>: TRUE,
    Lrhs: for<'a> Layout<'a, f32>,
    P: DynamicAllocationPolicy<f32>, 
[src]

impl<N, C, L, P> Tensor<f64, Shape1D<N>, C, L, P> where
    L: for<'a> Layout<'a, f64>,
    C: BLASPolicy
[src]

pub fn dot<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Shape1D<N>, Crhs, Lrhs, Prhs>
) -> f64 where
    N: Unsigned,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

pub fn dot_dynamic<Nrhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f64, Shape1D<Nrhs>, Crhs, Lrhs, Prhs>
) -> f64 where
    N: IsEqual<Nrhs>,
    Eq<N, Nrhs>: TRUE,
    Lrhs: for<'a> Layout<'a, f64>, 
[src]

impl<N, C, L, P> Tensor<f32, Shape1D<N>, C, L, P> where
    L: for<'a> Layout<'a, f32>,
    C: BLASPolicy
[src]

pub fn dot<Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Shape1D<N>, Crhs, Lrhs, Prhs>
) -> f32 where
    N: Unsigned,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

pub fn dot_dynamic<Nrhs, Crhs, Lrhs, Prhs>(
    &self,
    other: &Tensor<f32, Shape1D<Nrhs>, Crhs, Lrhs, Prhs>
) -> f32 where
    N: IsEqual<Nrhs>,
    Eq<N, Nrhs>: TRUE,
    Lrhs: for<'a> Layout<'a, f32>, 
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + AddAssign
[src]

pub fn sum<Ax>(
    &self
) -> Tensor<T, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax> + ReductionOptChunckSize<T, Ax> + At<Ax>,
    P: StaticAllocationPolicy<T, <S as Reduction<Ax>>::Output>, 
[src]

pub fn sum_dynamic<Ax>(
    &self
) -> Tensor<T, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax>,
    P: DynamicAllocationPolicy<T>,
    Ax: Unsigned
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P> where
    L: for<'a> Layout<'a, T>,
    T: Send + Sync + Copy + MulAssign
[src]

pub fn prod<Ax>(
    &self
) -> Tensor<T, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax> + ReductionOptChunckSize<T, Ax> + At<Ax>,
    P: StaticAllocationPolicy<T, <S as Reduction<Ax>>::Output>, 
[src]

pub fn prod_dynamic<Ax>(
    &self
) -> Tensor<T, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax>,
    P: DynamicAllocationPolicy<T>,
    Ax: Unsigned
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn reduce_max<Ax>(
    &self
) -> Tensor<f64, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax> + ReductionOptChunckSize<f64, Ax> + At<Ax>,
    P: StaticAllocationPolicy<f64, <S as Reduction<Ax>>::Output>, 
[src]

pub fn reduce_max_dynamic<Ax>(
    &self
) -> Tensor<f64, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax>,
    P: DynamicAllocationPolicy<f64>,
    Ax: Unsigned
[src]

impl<S, C, L, P> Tensor<f64, S, C, L, P> where
    L: for<'a> Layout<'a, f64>, 
[src]

pub fn reduce_min<Ax>(
    &self
) -> Tensor<f64, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax> + ReductionOptChunckSize<f64, Ax> + At<Ax>,
    P: StaticAllocationPolicy<f64, <S as Reduction<Ax>>::Output>, 
[src]

pub fn reduce_min_dynamic<Ax>(
    &self
) -> Tensor<f64, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax>,
    P: DynamicAllocationPolicy<f64>,
    Ax: Unsigned
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn reduce_max<Ax>(
    &self
) -> Tensor<f32, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax> + ReductionOptChunckSize<f32, Ax> + At<Ax>,
    P: StaticAllocationPolicy<f32, <S as Reduction<Ax>>::Output>, 
[src]

pub fn reduce_max_dynamic<Ax>(
    &self
) -> Tensor<f32, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax>,
    P: DynamicAllocationPolicy<f32>,
    Ax: Unsigned
[src]

impl<S, C, L, P> Tensor<f32, S, C, L, P> where
    L: for<'a> Layout<'a, f32>, 
[src]

pub fn reduce_min<Ax>(
    &self
) -> Tensor<f32, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax> + ReductionOptChunckSize<f32, Ax> + At<Ax>,
    P: StaticAllocationPolicy<f32, <S as Reduction<Ax>>::Output>, 
[src]

pub fn reduce_min_dynamic<Ax>(
    &self
) -> Tensor<f32, <S as Reduction<Ax>>::Output, Contiguous, P::Layout, P> where
    S: Reduction<Ax>,
    P: DynamicAllocationPolicy<f32>,
    Ax: Unsigned
[src]

impl<'a, T, S, C, P> Tensor<T, S, C, SliceLayout<'a, T>, P>[src]

pub fn from_slice(slice: &'a [T]) -> Self where
    S: StaticShape
[src]

pub fn from_slice_dyn(slice: &'a [T], shape: Vec<usize>) -> Self where
    S: Shape
[src]

impl<T, S, C, L, P> Tensor<T, S, C, L, P>[src]

pub fn broadcast<Z>(
    &self
) -> Tensor<T, Z, Strided, <L as Layout<'_, T>>::View, P> where
    S: StaticShape + Broadcast<Z>,
    Z: StaticShape,
    <S as Broadcast<Z>>::Output: TRUE,
    L: for<'a> Layout<'a, T>, 
[src]

pub fn broadcast_dynamic<Z>(
    &self,
    shape: Vec<usize>
) -> Tensor<T, Z, Strided, <L as Layout<'_, T>>::View, P> where
    S: Broadcast<Z>,
    <S as Broadcast<Z>>::Output: TRUE,
    L: for<'a> Layout<'a, T>, 
[src]

pub fn stride<Z>(
    &self
) -> Tensor<T, <S as StridedShape<Z>>::Output, Strided, <L as Layout<'_, T>>::View, P> where
    S: StaticShape + StridedShape<Z>,
    Z: StaticShape,
    L: for<'a> Layout<'a, T>, 
[src]

pub fn stride_dynamic<Z>(
    &self,
    strides: Vec<usize>
) -> Tensor<T, <S as StridedShapeDyn<Z>>::Output, Strided, <L as Layout<'_, T>>::View, P> where
    S: StridedShapeDyn<Z>,
    L: for<'a> Layout<'a, T>, 
[src]

pub fn transpose(
    &self
) -> Tensor<T, <S as Transpose>::Output, C::Transposed, <L as Layout<'_, T>>::View, P> where
    S: Transpose,
    C: TransposePolicy,
    L: for<'a> Layout<'a, T>, 
[src]

pub fn as_static<Z>(&self) -> Tensor<T, Z, C, <L as Layout<'_, T>>::View, P> where
    Z: StaticShape,
    S: Same<Z>,
    <S as Same<Z>>::Output: TRUE,
    L: for<'a> Layout<'a, T>, 
[src]

pub fn as_view(&self) -> Tensor<T, S, C, <L as Layout<'_, T>>::View, P> where
    S: StaticShape,
    L: for<'a> Layout<'a, T>, 
[src]

pub fn alloc(shape: Vec<usize>) -> Self where
    L: Alloc
[src]

pub fn fill(value: T) -> Self where
    L: StaticFill<T>, 
[src]

pub fn fill_dynamic(value: T, shape: Vec<usize>) -> Self where
    L: DynamicFill<T>, 
[src]

impl<T, S, L, P> Tensor<T, S, Contiguous, L, P>[src]

pub fn reshape<Z>(
    &self
) -> Tensor<T, Z, Contiguous, <L as Layout<'_, T>>::View, P> where
    Z: StaticShape,
    S: SameNumElements<T, Z>,
    <S as SameNumElements<T, Z>>::Output: TRUE,
    L: for<'a> Layout<'a, T>, 
[src]

pub fn reshape_dynamic<Z>(
    &self,
    shape: Vec<usize>
) -> Tensor<T, Z, Contiguous, <L as Layout<'_, T>>::View, P> where
    L: for<'a> Layout<'a, T>, 
[src]

Trait Implementations

impl<T: Clone, S: Clone, C: Clone, L: Clone, P: Clone> Clone for Tensor<T, S, C, L, P>[src]

impl<T: Debug, S: Debug, C: Debug, L: Debug, P: Debug> Debug for Tensor<T, S, C, L, P>[src]

impl<T, S, C, L, P> Default for Tensor<T, S, C, L, P> where
    L: Default
[src]

impl<T, S, C, L, P> Deref for Tensor<T, S, C, L, P>[src]

type Target = L

The resulting type after dereferencing.

impl<T, S, C, L, P> DerefMut for Tensor<T, S, C, L, P>[src]

impl<T: PartialEq, S: PartialEq, C: PartialEq, L: PartialEq, P: PartialEq> PartialEq<Tensor<T, S, C, L, P>> for Tensor<T, S, C, L, P>[src]

impl<T, S, C, L, P> StructuralPartialEq for Tensor<T, S, C, L, P>[src]

Auto Trait Implementations

impl<T, S, C, L, P> RefUnwindSafe for Tensor<T, S, C, L, P> where
    C: RefUnwindSafe,
    L: RefUnwindSafe,
    P: RefUnwindSafe,
    S: RefUnwindSafe,
    T: RefUnwindSafe

impl<T, S, C, L, P> Send for Tensor<T, S, C, L, P> where
    C: Send,
    L: Send,
    P: Send,
    S: Send,
    T: Send

impl<T, S, C, L, P> Sync for Tensor<T, S, C, L, P> where
    C: Sync,
    L: Sync,
    P: Sync,
    S: Sync,
    T: Sync

impl<T, S, C, L, P> Unpin for Tensor<T, S, C, L, P> where
    C: Unpin,
    L: Unpin,
    P: Unpin,
    S: Unpin,
    T: Unpin

impl<T, S, C, L, P> UnwindSafe for Tensor<T, S, C, L, P> where
    C: UnwindSafe,
    L: UnwindSafe,
    P: UnwindSafe,
    S: UnwindSafe,
    T: UnwindSafe

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T> Pointable for T

type Init = T

The type for initializers.

impl<T> Same<T> for T[src]

type Output = T

Should always be Self

impl<T> ToOwned for T where
    T: Clone
[src]

type Owned = T

The resulting type after obtaining ownership.

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.