Struct cv_convert::TensorFromMat[][src]

pub struct TensorFromMat { /* fields omitted */ }

A Tensor which data reference borrows from a Mat. It can be dereferenced to a Tensor.

Implementations

impl TensorFromMat[src]

pub fn tensor(&self) -> &Tensor[src]

Methods from Deref<Target = Tensor>

pub fn as_ptr(&self) -> *const C_tensor[src]

Returns a pointer to the underlying C++ tensor.

The caller must ensures that the Rust tensor object outlives this pointer.

pub fn as_mut_ptr(&mut self) -> *mut C_tensor[src]

Returns a mutable pointer to the underlying C++ tensor.

The caller must ensures that the Rust tensor object outlives this pointer.

pub fn dim(&self) -> usize[src]

Returns the number of dimension of the tensor.

pub fn size(&self) -> Vec<i64, Global>[src]

Returns the shape of the input tensor.

pub fn size1(&self) -> Result<i64, TchError>[src]

Returns the tensor size for single dimension tensors.

pub fn size2(&self) -> Result<(i64, i64), TchError>[src]

Returns the tensor sizes for two dimension tensors.

pub fn size3(&self) -> Result<(i64, i64, i64), TchError>[src]

Returns the tensor sizes for three dimension tensors.

pub fn size4(&self) -> Result<(i64, i64, i64, i64), TchError>[src]

Returns the tensor sizes for four dimension tensors.

pub fn size5(&self) -> Result<(i64, i64, i64, i64, i64), TchError>[src]

Returns the tensor sizes for five dimension tensors.

pub fn size6(&self) -> Result<(i64, i64, i64, i64, i64, i64), TchError>[src]

Returns the tensor sizes for six dimension tensors.

pub fn stride(&self) -> Vec<i64, Global>[src]

Returns the stride of the input tensor.

pub fn stride1(&self) -> Result<i64, TchError>[src]

Returns the tensor strides for single dimension tensors.

pub fn stride2(&self) -> Result<(i64, i64), TchError>[src]

Returns the tensor strides for two dimension tensors.

pub fn stride3(&self) -> Result<(i64, i64, i64), TchError>[src]

Returns the tensor strides for three dimension tensors.

pub fn stride4(&self) -> Result<(i64, i64, i64, i64), TchError>[src]

Returns the tensor strides for four dimension tensors.

pub fn stride5(&self) -> Result<(i64, i64, i64, i64, i64), TchError>[src]

Returns the tensor strides for five dimension tensors.

pub fn stride6(&self) -> Result<(i64, i64, i64, i64, i64, i64), TchError>[src]

Returns the tensor strides for six dimension tensors.

pub fn f_kind(&self) -> Result<Kind, TchError>[src]

Returns the kind of elements stored in the input tensor. Returns an error on undefined tensors and unsupported data types.

pub fn kind(&self) -> Kind[src]

Returns the kind of elements stored in the input tensor. Panics an error on undefined tensors and unsupported data types.

pub fn device(&self) -> Device[src]

Returns the device on which the input tensor is located.

pub fn print(&self)[src]

Prints the input tensor.

Caution: this uses the C++ printer which prints the whole tensor even if it is very large.

pub fn f_double_value(&self, idx: &[i64]) -> Result<f64, TchError>[src]

Returns a double value on tensors holding a single element. An error is returned otherwise.

pub fn f_int64_value(&self, idx: &[i64]) -> Result<i64, TchError>[src]

Returns an int value on tensors holding a single element. An error is returned otherwise.

pub fn double_value(&self, idx: &[i64]) -> f64[src]

Returns a double value on tensors holding a single element. Panics otherwise.

pub fn int64_value(&self, idx: &[i64]) -> i64[src]

Returns an int value on tensors holding a single element. Panics otherwise.

pub fn requires_grad(&self) -> bool[src]

Returns true if gradient are currently tracked for this tensor.

pub fn data_ptr(&self) -> *mut c_void[src]

Returns the address of the first element of this tensor.

pub fn defined(&self) -> bool[src]

Returns true if the tensor is defined.

pub fn is_mkldnn(&self) -> bool[src]

Returns true if the tensor is compatible with MKL-DNN (oneDNN).

pub fn is_sparse(&self) -> bool[src]

Returns true if the tensor is sparse.

pub fn zero_grad(&mut self)[src]

Zeroes the gradient tensor attached to this tensor if defined.

pub fn f_backward(&self) -> Result<(), TchError>[src]

Runs the backward pass, populating the gradient tensors for tensors which gradients are tracked.

Gradients tracking can be turned on via set_requires_grad.

pub fn backward(&self)[src]

Runs the backward pass, populating the gradient tensors for tensors which gradients are tracked.

Gradients tracking can be turned on via set_requires_grad. Panics if the C++ api returns an exception.

pub fn f_copy_data_u8(
    &self,
    dst: &mut [u8],
    numel: usize
) -> Result<(), TchError>
[src]

Copies numel elements from self to dst.

pub fn f_internal_amp_non_finite_check_and_unscale(
    &mut self,
    found_inf: &mut Tensor,
    inv_scale: &Tensor
) -> Result<(), TchError>
[src]

Unscale tensor while checking for infinities.

found_inf is a singleton tensor that is used to record the presence of infinite values. inv_scale is a scalar containing the inverse scaling factor. This method is only available for CUDA tensors.

pub fn internal_amp_non_finite_check_and_unscale(
    &mut self,
    found_inf: &mut Tensor,
    inv_scale: &Tensor
)
[src]

Unscale tensor while checking for infinities.

found_inf is a singleton tensor that is used to record the presence of infinite values. inv_scale is a scalar containing the inverse scaling factor. This method is only available for CUDA tensors.

pub fn copy_data_u8(&self, dst: &mut [u8], numel: usize)[src]

Copies numel elements from self to dst.

pub fn f_copy_data<T>(
    &self,
    dst: &mut [T],
    numel: usize
) -> Result<(), TchError> where
    T: Element
[src]

Copies numel elements from self to dst.

pub fn copy_data<T>(&self, dst: &mut [T], numel: usize) where
    T: Element
[src]

Copies numel elements from self to dst.

pub fn numel(&self) -> usize[src]

Returns the total number of elements stored in a tensor.

pub fn shallow_clone(&self) -> Tensor[src]

Returns a new tensor that share storage with the input tensor.

pub fn f_get(&self, index: i64) -> Result<Tensor, TchError>[src]

Gets the sub-tensor at the given index.

pub fn get(&self, index: i64) -> Tensor[src]

Gets the sub-tensor at the given index.

pub fn f_copy_(&mut self, src: &Tensor) -> Result<(), TchError>[src]

Copies values from the argument tensor to the input tensor.

pub fn copy_(&mut self, src: &Tensor)[src]

Copies values from the argument tensor to the input tensor.

pub fn save<T>(&self, path: T) -> Result<(), TchError> where
    T: AsRef<Path>, 
[src]

Saves a tensor to a file.

The file format is the same as the one used by the PyTorch C++ API.

pub fn to_string(&self, lw: i64) -> Result<String, TchError>[src]

Returns a string representation for the tensor.

The representation will contain all the tensor element hence may be huge for large tensors.

pub fn f_internal_and_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_internal_and_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_internal_iand_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_internal_iand_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_internal_ilshift_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_internal_ilshift_1(
    &mut self,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_ior_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_internal_ior_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_internal_irshift_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_internal_irshift_1(
    &mut self,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_ixor_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_internal_ixor_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_internal_lshift_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_internal_lshift_1(
    &mut self,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_or_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_internal_or_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_internal_rshift_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_internal_rshift_1(
    &mut self,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_xor_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_internal_xor_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_internal_adaptive_avg_pool2d(
    &self,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_adaptive_avg_pool2d_backward(
    &self,
    grad_output: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_add_batch_dim(
    &self,
    batch_dim: i64,
    level: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_add_relu(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_internal_add_relu_(
    &mut self,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_add_relu_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_addmv_impl_(
    &mut self,
    self2: &Tensor,
    mat: &Tensor,
    vec: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_aminmax(&self) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_internal_aminmax1(
    &self,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_baddbmm_mkl_(
    &mut self,
    batch1: &Tensor,
    batch2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_bmm(
    &self,
    mat2: &Tensor,
    deterministic: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_bmm_out(
    &self,
    out: &Tensor,
    mat2: &Tensor,
    deterministic: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cast_byte(
    &self,
    non_blocking: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cast_char(
    &self,
    non_blocking: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cast_double(
    &self,
    non_blocking: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cast_float(
    &self,
    non_blocking: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cast_half(
    &self,
    non_blocking: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cast_int(
    &self,
    non_blocking: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cast_long(
    &self,
    non_blocking: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cast_short(
    &self,
    non_blocking: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cholesky_helper(
    &self,
    upper: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cholesky_solve_helper(
    &self,
    a: &Tensor,
    upper: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_coalesced_(
    &mut self,
    coalesced: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_compute_linear_combination(
    &self,
    coefficients: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_compute_linear_combination_out(
    &self,
    out: &Tensor,
    coefficients: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_conj(&self) -> Result<Tensor, TchError>[src]

pub fn f_internal_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    transposed: bool,
    output_padding: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    cudnn_enabled: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_internal_convolution1<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    transposed: bool,
    output_padding: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    cudnn_enabled: bool,
    allow_tf32: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_internal_convolution_nogroup<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    transposed: bool,
    output_padding: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_internal_copy_from(
    &self,
    dst: &Tensor,
    non_blocking: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cudnn_rnn<T>(
    &self,
    weight: &[T],
    weight_stride0: i64,
    weight_buf: Option<T>,
    hx: &Tensor,
    cx: Option<T>,
    mode: i64,
    hidden_size: i64,
    proj_size: i64,
    num_layers: i64,
    batch_first: bool,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_sizes: &[i64],
    dropout_state: Option<T>
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_internal_cumprod(&self, dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_internal_cumprod_out(
    &self,
    out: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_cumsum(&self, dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_internal_cumsum_out(
    &self,
    out: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_fake_quantize_learnable_per_channel_affine(
    &self,
    scale: &Tensor,
    zero_point: &Tensor,
    axis: i64,
    quant_min: i64,
    quant_max: i64,
    grad_factor: f64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_fake_quantize_learnable_per_channel_affine_backward(
    &self,
    grad: &Tensor,
    scale: &Tensor,
    zero_point: &Tensor,
    axis: i64,
    quant_min: i64,
    quant_max: i64,
    grad_factor: f64
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_internal_fake_quantize_learnable_per_tensor_affine(
    &self,
    scale: &Tensor,
    zero_point: &Tensor,
    quant_min: i64,
    quant_max: i64,
    grad_factor: f64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_fake_quantize_learnable_per_tensor_affine_backward(
    &self,
    grad: &Tensor,
    scale: &Tensor,
    zero_point: &Tensor,
    quant_min: i64,
    quant_max: i64,
    grad_factor: f64
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_internal_fft_c2c(
    &self,
    dim: &[i64],
    normalization: i64,
    forward: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_fft_c2c_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    normalization: i64,
    forward: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_fft_c2r(
    &self,
    dim: &[i64],
    normalization: i64,
    last_dim_size: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_fft_c2r_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    normalization: i64,
    last_dim_size: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_fft_r2c(
    &self,
    dim: &[i64],
    normalization: i64,
    onesided: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_fft_r2c_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    normalization: i64,
    onesided: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_fused_dropout(
    &self,
    p: f64
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_fw_primal(&self, level: i64) -> Result<Tensor, TchError>[src]

pub fn f_internal_gather_sparse_backward(
    &self,
    dim: i64,
    index: &Tensor,
    grad: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_grid_sampler_2d_cpu_fallback(
    &self,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_grid_sampler_2d_cpu_fallback_backward(
    &self,
    grad_output: &Tensor,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_index_copy_(
    &mut self,
    dim: i64,
    index: &Tensor,
    source: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_index_put_impl_<T>(
    &mut self,
    indices: &[Option<T>],
    values: &Tensor,
    accumulate: bool,
    unsafe_: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_internal_indices(&self) -> Result<Tensor, TchError>[src]

pub fn f_internal_inverse_helper(&self) -> Result<Tensor, TchError>[src]

pub fn f_internal_linalg_inv_out_helper_(
    &mut self,
    infos_lu: &Tensor,
    infos_getri: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_linalg_qr_helper(
    &self,
    mode: &str
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_linalg_solve_out_helper_(
    &mut self,
    other: &Tensor,
    infos: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_log_softmax(
    &self,
    dim: i64,
    half_to_float: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_log_softmax_backward_data(
    &self,
    grad_output: &Tensor,
    output: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_logcumsumexp(&self, dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_internal_logcumsumexp_out(
    &self,
    out: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_lu_solve_helper(
    &self,
    lu_data: &Tensor,
    lu_pivots: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_lu_with_info(
    &self,
    pivot: bool,
    check_errors: bool
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_internal_make_per_channel_quantized_tensor(
    &self,
    scale: &Tensor,
    zero_point: &Tensor,
    axis: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_make_per_tensor_quantized_tensor(
    &self,
    scale: f64,
    zero_point: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_masked_scale(
    &self,
    mask: &Tensor,
    scale: f64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_mkldnn_reshape(
    &self,
    shape: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_mkldnn_transpose(
    &self,
    dim0: i64,
    dim1: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_mkldnn_transpose_(
    &mut self,
    dim0: i64,
    dim1: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_mode(
    &self,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_mode_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_nnpack_spatial_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    stride: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_internal_nnpack_spatial_convolution_backward_input(
    &self,
    grad_output: &Tensor,
    weight: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_nnpack_spatial_convolution_backward_weight(
    &self,
    weightsize: &[i64],
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_pack_padded_sequence(
    &self,
    lengths: &Tensor,
    batch_first: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_pdist_backward(
    &self,
    grad: &Tensor,
    p: f64,
    pdist: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_remove_batch_dim(
    &self,
    level: i64,
    batch_size: i64,
    out_dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_reshape_from_tensor(
    &self,
    shape: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_s_where(
    &self,
    condition: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sample_dirichlet(&self) -> Result<Tensor, TchError>[src]

pub fn f_internal_shape_as_tensor(&self) -> Result<Tensor, TchError>[src]

pub fn f_internal_sobol_engine_ff_(
    &mut self,
    n: i64,
    sobolstate: &Tensor,
    dimension: i64,
    num_generated: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sobol_engine_initialize_state_(
    &mut self,
    dimension: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sobol_engine_scramble_(
    &mut self,
    ltm: &Tensor,
    dimension: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_softmax(
    &self,
    dim: i64,
    half_to_float: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_softmax_backward_data(
    &self,
    grad_output: &Tensor,
    output: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_solve_helper(
    &self,
    a: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_sparse_addmm(
    &self,
    sparse: &Tensor,
    dense: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sparse_log_softmax(
    &self,
    dim: i64,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sparse_log_softmax1(
    &self,
    dim: i64,
    half_to_float: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sparse_log_softmax_backward_data(
    &self,
    grad_output: &Tensor,
    output: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sparse_softmax(
    &self,
    dim: i64,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sparse_softmax1(
    &self,
    dim: i64,
    half_to_float: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sparse_softmax_backward_data(
    &self,
    grad_output: &Tensor,
    output: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sparse_sparse_matmul(
    &self,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sparse_sum(&self) -> Result<Tensor, TchError>[src]

pub fn f_internal_sparse_sum1(&self, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_internal_sparse_sum2(&self, dim: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_internal_sparse_sum3(
    &self,
    dim: &[i64],
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_sparse_sum_backward(
    &self,
    grad: &Tensor,
    dim: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_standard_gamma(&self) -> Result<Tensor, TchError>[src]

pub fn f_internal_standard_gamma_grad(
    &self,
    output: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_std(&self, unbiased: bool) -> Result<Tensor, TchError>[src]

pub fn f_internal_svd_helper(
    &self,
    some: bool,
    compute_uv: bool
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_internal_syevd_helper(
    &self,
    compute_eigenvectors: bool,
    uplo: &str
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_symeig_helper(
    &self,
    eigenvectors: bool,
    upper: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_test_serialization_subcmul(
    &self,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_internal_triangular_solve_helper(
    &self,
    a: &Tensor,
    upper: bool,
    transpose: bool,
    unitriangular: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_unique(
    &self,
    sorted: bool,
    return_inverse: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_internal_unique2(
    &self,
    sorted: bool,
    return_inverse: bool,
    return_counts: bool
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_internal_unsafe_view(&self, size: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_internal_values(&self) -> Result<Tensor, TchError>[src]

pub fn f_internal_var(&self, unbiased: bool) -> Result<Tensor, TchError>[src]

pub fn f_abs(&self) -> Result<Tensor, TchError>[src]

pub fn f_abs_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_abs_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_absolute(&self) -> Result<Tensor, TchError>[src]

pub fn f_absolute_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_absolute_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_acos(&self) -> Result<Tensor, TchError>[src]

pub fn f_acos_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_acos_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_acosh(&self) -> Result<Tensor, TchError>[src]

pub fn f_acosh_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_acosh_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_adaptive_avg_pool1d(
    &self,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_avg_pool2d(
    &self,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_avg_pool2d_out(
    &self,
    out: &Tensor,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_avg_pool3d(
    &self,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_avg_pool3d_backward(
    &self,
    grad_output: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_avg_pool3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_avg_pool3d_out(
    &self,
    out: &Tensor,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_max_pool1d(
    &self,
    output_size: &[i64]
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_adaptive_max_pool2d(
    &self,
    output_size: &[i64]
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_adaptive_max_pool2d_backward(
    &self,
    grad_output: &Tensor,
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_max_pool2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_max_pool2d_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    output_size: &[i64]
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_adaptive_max_pool3d(
    &self,
    output_size: &[i64]
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_adaptive_max_pool3d_backward(
    &self,
    grad_output: &Tensor,
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_max_pool3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_adaptive_max_pool3d_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    output_size: &[i64]
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_add(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_add1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_add_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_add_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_add_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addbmm(
    &self,
    batch1: &Tensor,
    batch2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addbmm_(
    &mut self,
    batch1: &Tensor,
    batch2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addbmm_out(
    &self,
    out: &Tensor,
    batch1: &Tensor,
    batch2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addcdiv(
    &self,
    tensor1: &Tensor,
    tensor2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addcdiv_(
    &mut self,
    tensor1: &Tensor,
    tensor2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addcdiv_out(
    &self,
    out: &Tensor,
    tensor1: &Tensor,
    tensor2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addcmul(
    &self,
    tensor1: &Tensor,
    tensor2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addcmul_(
    &mut self,
    tensor1: &Tensor,
    tensor2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addcmul_out(
    &self,
    out: &Tensor,
    tensor1: &Tensor,
    tensor2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_addmm_(
    &mut self,
    mat1: &Tensor,
    mat2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addmm_out(
    &self,
    out: &Tensor,
    mat1: &Tensor,
    mat2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addmv(&self, mat: &Tensor, vec: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_addmv_(
    &mut self,
    mat: &Tensor,
    vec: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addmv_out(
    &self,
    out: &Tensor,
    mat: &Tensor,
    vec: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addr(&self, vec1: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_addr_(
    &mut self,
    vec1: &Tensor,
    vec2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_addr_out(
    &self,
    out: &Tensor,
    vec1: &Tensor,
    vec2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_alias(&self) -> Result<Tensor, TchError>[src]

pub fn f_align_as(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_all(&self) -> Result<Tensor, TchError>[src]

pub fn f_all1(&self, dim: i64, keepdim: bool) -> Result<Tensor, TchError>[src]

pub fn f_all_out(
    &self,
    out: &Tensor,
    dim: i64,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_alpha_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError>[src]

pub fn f_alpha_dropout_(
    &mut self,
    p: f64,
    train: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_amax(&self, dim: &[i64], keepdim: bool) -> Result<Tensor, TchError>[src]

pub fn f_amax_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_amin(&self, dim: &[i64], keepdim: bool) -> Result<Tensor, TchError>[src]

pub fn f_amin_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_angle(&self) -> Result<Tensor, TchError>[src]

pub fn f_angle_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_any(&self) -> Result<Tensor, TchError>[src]

pub fn f_any1(&self, dim: i64, keepdim: bool) -> Result<Tensor, TchError>[src]

pub fn f_any_out(
    &self,
    out: &Tensor,
    dim: i64,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_arccos(&self) -> Result<Tensor, TchError>[src]

pub fn f_arccos_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_arccos_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_arccosh(&self) -> Result<Tensor, TchError>[src]

pub fn f_arccosh_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_arccosh_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_arcsin(&self) -> Result<Tensor, TchError>[src]

pub fn f_arcsin_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_arcsin_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_arcsinh(&self) -> Result<Tensor, TchError>[src]

pub fn f_arcsinh_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_arcsinh_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_arctan(&self) -> Result<Tensor, TchError>[src]

pub fn f_arctan_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_arctan_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_arctanh(&self) -> Result<Tensor, TchError>[src]

pub fn f_arctanh_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_arctanh_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_argmax(
    &self,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_argmax_out(
    &self,
    out: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_argmin(
    &self,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_argmin_out(
    &self,
    out: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_argsort(&self, dim: i64, descending: bool) -> Result<Tensor, TchError>[src]

pub fn f_as_strided(
    &self,
    size: &[i64],
    stride: &[i64],
    storage_offset: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_as_strided_(
    &mut self,
    size: &[i64],
    stride: &[i64],
    storage_offset: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_asin(&self) -> Result<Tensor, TchError>[src]

pub fn f_asin_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_asin_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_asinh(&self) -> Result<Tensor, TchError>[src]

pub fn f_asinh_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_asinh_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_atan(&self) -> Result<Tensor, TchError>[src]

pub fn f_atan2(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_atan2_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_atan2_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_atan_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_atan_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_atanh(&self) -> Result<Tensor, TchError>[src]

pub fn f_atanh_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_atanh_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_atleast_1d(&self) -> Result<Tensor, TchError>[src]

pub fn f_atleast_2d(&self) -> Result<Tensor, TchError>[src]

pub fn f_atleast_3d(&self) -> Result<Tensor, TchError>[src]

pub fn f_avg_pool1d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_avg_pool2d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_avg_pool2d_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_avg_pool2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_avg_pool2d_out(
    &self,
    out: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_avg_pool3d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_avg_pool3d_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_avg_pool3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_avg_pool3d_out(
    &self,
    out: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_baddbmm(
    &self,
    batch1: &Tensor,
    batch2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_baddbmm_(
    &mut self,
    batch1: &Tensor,
    batch2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_baddbmm_out(
    &self,
    out: &Tensor,
    batch1: &Tensor,
    batch2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_batch_norm<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    training: bool,
    momentum: f64,
    eps: f64,
    cudnn_enabled: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_batch_norm_backward_elemt<T>(
    &self,
    grad_out: &Tensor,
    mean: &Tensor,
    invstd: &Tensor,
    weight: Option<T>,
    mean_dy: &Tensor,
    mean_dy_xmu: &Tensor
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_batch_norm_backward_reduce<T>(
    &self,
    grad_out: &Tensor,
    mean: &Tensor,
    invstd: &Tensor,
    weight: Option<T>,
    input_g: bool,
    weight_g: bool,
    bias_g: bool
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_batch_norm_elemt<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    mean: &Tensor,
    invstd: &Tensor,
    eps: f64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_batch_norm_elemt_out<T>(
    &self,
    out: &Tensor,
    weight: Option<T>,
    bias: Option<T>,
    mean: &Tensor,
    invstd: &Tensor,
    eps: f64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_batch_norm_gather_stats<T>(
    &self,
    mean: &Tensor,
    invstd: &Tensor,
    running_mean: Option<T>,
    running_var: Option<T>,
    momentum: f64,
    eps: f64,
    count: i64
) -> Result<(Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_batch_norm_gather_stats_with_counts<T>(
    &self,
    mean: &Tensor,
    invstd: &Tensor,
    running_mean: Option<T>,
    running_var: Option<T>,
    momentum: f64,
    eps: f64,
    counts: &Tensor
) -> Result<(Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_batch_norm_stats(&self, eps: f64) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_batch_norm_update_stats<T>(
    &self,
    running_mean: Option<T>,
    running_var: Option<T>,
    momentum: f64
) -> Result<(Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_bernoulli(&self) -> Result<Tensor, TchError>[src]

pub fn f_bernoulli1(&self, p: f64) -> Result<Tensor, TchError>[src]

pub fn f_bernoulli_(&mut self, p: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_bernoulli_1(&mut self, p: f64) -> Result<Tensor, TchError>[src]

pub fn f_bernoulli_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_binary_cross_entropy<T>(
    &self,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_binary_cross_entropy_backward<T>(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_binary_cross_entropy_backward_out<T>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_binary_cross_entropy_out<T>(
    &self,
    out: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_binary_cross_entropy_with_logits<T>(
    &self,
    target: &Tensor,
    weight: Option<T>,
    pos_weight: Option<T>,
    reduction: Reduction
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_binary_cross_entropy_with_logits_backward<T>(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    pos_weight: Option<T>,
    reduction: Reduction
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_bincount<T>(
    &self,
    weights: Option<T>,
    minlength: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_bitwise_and<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_bitwise_and1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_bitwise_and_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_bitwise_and_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_bitwise_and_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_bitwise_and_out1<S>(
    &self,
    out: &Tensor,
    other: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_bitwise_not(&self) -> Result<Tensor, TchError>[src]

pub fn f_bitwise_not_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_bitwise_not_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_bitwise_or<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_bitwise_or1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_bitwise_or_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_bitwise_or_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_bitwise_or_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_bitwise_or_out1<S>(
    &self,
    out: &Tensor,
    other: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_bitwise_xor<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_bitwise_xor1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_bitwise_xor_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_bitwise_xor_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_bitwise_xor_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_bitwise_xor_out1<S>(
    &self,
    out: &Tensor,
    other: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_bmm(&self, mat2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_bmm_out(&self, out: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_broadcast_to(&self, size: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_bucketize(
    &self,
    boundaries: &Tensor,
    out_int32: bool,
    right: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_bucketize_out(
    &self,
    out: &Tensor,
    boundaries: &Tensor,
    out_int32: bool,
    right: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_cauchy_(&mut self, median: f64, sigma: f64) -> Result<Tensor, TchError>[src]

pub fn f_ceil(&self) -> Result<Tensor, TchError>[src]

pub fn f_ceil_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_ceil_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_celu(&self) -> Result<Tensor, TchError>[src]

pub fn f_celu_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_channel_shuffle(&self, groups: i64) -> Result<Tensor, TchError>[src]

pub fn f_cholesky(&self, upper: bool) -> Result<Tensor, TchError>[src]

pub fn f_cholesky_inverse(&self, upper: bool) -> Result<Tensor, TchError>[src]

pub fn f_cholesky_inverse_out(
    &self,
    out: &Tensor,
    upper: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_cholesky_out(
    &self,
    out: &Tensor,
    upper: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_cholesky_solve(
    &self,
    input2: &Tensor,
    upper: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_cholesky_solve_out(
    &self,
    out: &Tensor,
    input2: &Tensor,
    upper: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_choose_qparams_optimized(
    &self,
    numel: i64,
    n_bins: i64,
    ratio: f64,
    bit_width: i64
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_chunk(
    &self,
    chunks: i64,
    dim: i64
) -> Result<Vec<Tensor, Global>, TchError>
[src]

pub fn f_clamp<S>(&self, min: S, max: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clamp_<S>(&mut self, min: S, max: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clamp_max<S>(&self, max: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clamp_max_<S>(&mut self, max: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clamp_max_out<S>(
    &self,
    out: &Tensor,
    max: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clamp_min<S>(&self, min: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clamp_min_<S>(&mut self, min: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clamp_min_out<S>(
    &self,
    out: &Tensor,
    min: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clamp_out<S>(
    &self,
    out: &Tensor,
    min: S,
    max: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clip<S>(&self, min: S, max: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clip_<S>(&mut self, min: S, max: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_clip_out<S>(
    &self,
    out: &Tensor,
    min: S,
    max: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_coalesce(&self) -> Result<Tensor, TchError>[src]

pub fn f_col2im(
    &self,
    output_size: &[i64],
    kernel_size: &[i64],
    dilation: &[i64],
    padding: &[i64],
    stride: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_col2im_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    kernel_size: &[i64],
    dilation: &[i64],
    padding: &[i64],
    stride: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_combinations(
    &self,
    r: i64,
    with_replacement: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_conj(&self) -> Result<Tensor, TchError>[src]

pub fn f_conj_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_constant_pad_nd(&self, pad: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_contiguous(&self) -> Result<Tensor, TchError>[src]

pub fn f_conv1d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    groups: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_conv2d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    groups: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_conv3d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    groups: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_conv_tbc(
    &self,
    weight: &Tensor,
    bias: &Tensor,
    pad: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_conv_tbc_backward(
    &self,
    input: &Tensor,
    weight: &Tensor,
    bias: &Tensor,
    pad: i64
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_conv_transpose1d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    groups: i64,
    dilation: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_conv_transpose2d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    groups: i64,
    dilation: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_conv_transpose3d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    groups: i64,
    dilation: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    transposed: bool,
    output_padding: &[i64],
    groups: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_convolution_overrideable<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    transposed: bool,
    output_padding: &[i64],
    groups: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_copy_sparse_to_sparse_(
    &mut self,
    src: &Tensor,
    non_blocking: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_copysign(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_copysign1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_copysign_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_copysign_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_copysign_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_cos(&self) -> Result<Tensor, TchError>[src]

pub fn f_cos_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_cos_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_cosh(&self) -> Result<Tensor, TchError>[src]

pub fn f_cosh_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_cosh_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_count_nonzero(&self, dim: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_count_nonzero1(
    &self,
    dim: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_cross(
    &self,
    other: &Tensor,
    dim: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_cross_out(
    &self,
    out: &Tensor,
    other: &Tensor,
    dim: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_cudnn_batch_norm<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    training: bool,
    exponential_average_factor: f64,
    epsilon: f64
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_cudnn_batch_norm_backward<T>(
    &self,
    grad_output: &Tensor,
    weight: &Tensor,
    running_mean: Option<T>,
    running_var: Option<T>,
    save_mean: Option<T>,
    save_var: Option<T>,
    epsilon: f64,
    reservespace: &Tensor
) -> Result<(Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_cudnn_convolution(
    &self,
    weight: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_cudnn_convolution1<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_cudnn_convolution2(
    &self,
    weight: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    allow_tf32: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_cudnn_convolution_backward_weight(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    allow_tf32: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_cudnn_convolution_transpose(
    &self,
    weight: &Tensor,
    padding: &[i64],
    output_padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_cudnn_convolution_transpose1<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    output_padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_cudnn_convolution_transpose2(
    &self,
    weight: &Tensor,
    padding: &[i64],
    output_padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    allow_tf32: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_cudnn_convolution_transpose_backward_weight(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    allow_tf32: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_cudnn_grid_sampler(&self, grid: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_cudnn_grid_sampler_backward(
    &self,
    grid: &Tensor,
    grad_output: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_cummax(&self, dim: i64) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_cummax_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_cummaxmin_backward(
    &self,
    grad: &Tensor,
    indices: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_cummin(&self, dim: i64) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_cummin_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_cumprod(&self, dim: i64, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_cumprod_(&mut self, dim: i64, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_cumprod_backward(
    &self,
    grad: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_cumprod_out(
    &self,
    out: &Tensor,
    dim: i64,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_cumsum(&self, dim: i64, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_cumsum_(&mut self, dim: i64, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_cumsum_out(
    &self,
    out: &Tensor,
    dim: i64,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_data(&self) -> Result<Tensor, TchError>[src]

pub fn f_deg2rad(&self) -> Result<Tensor, TchError>[src]

pub fn f_deg2rad_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_deg2rad_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_dequantize(&self) -> Result<Tensor, TchError>[src]

pub fn f_det(&self) -> Result<Tensor, TchError>[src]

pub fn f_detach(&self) -> Result<Tensor, TchError>[src]

pub fn f_detach_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_diag(&self, diagonal: i64) -> Result<Tensor, TchError>[src]

pub fn f_diag_embed(
    &self,
    offset: i64,
    dim1: i64,
    dim2: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_diag_out(
    &self,
    out: &Tensor,
    diagonal: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_diagflat(&self, offset: i64) -> Result<Tensor, TchError>[src]

pub fn f_diagonal(
    &self,
    offset: i64,
    dim1: i64,
    dim2: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_diff<T>(
    &self,
    n: i64,
    dim: i64,
    prepend: Option<T>,
    append: Option<T>
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_diff_out<T>(
    &self,
    out: &Tensor,
    n: i64,
    dim: i64,
    prepend: Option<T>,
    append: Option<T>
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_digamma(&self) -> Result<Tensor, TchError>[src]

pub fn f_digamma_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_digamma_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_dist(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_div(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_div1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_div2(
    &self,
    other: &Tensor,
    rounding_mode: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_div3<S>(
    &self,
    other: S,
    rounding_mode: &str
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_div_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_div_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_div_2(
    &mut self,
    other: &Tensor,
    rounding_mode: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_div_3<S>(
    &mut self,
    other: S,
    rounding_mode: &str
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_div_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_div_out1(
    &self,
    out: &Tensor,
    other: &Tensor,
    rounding_mode: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_divide(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_divide1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_divide2(
    &self,
    other: &Tensor,
    rounding_mode: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_divide3<S>(
    &self,
    other: S,
    rounding_mode: &str
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_divide_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_divide_2(
    &mut self,
    other: &Tensor,
    rounding_mode: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_divide_3<S>(
    &mut self,
    other: S,
    rounding_mode: &str
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_divide_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_divide_out1(
    &self,
    out: &Tensor,
    other: &Tensor,
    rounding_mode: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_dot(&self, tensor: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_dot_out(
    &self,
    out: &Tensor,
    tensor: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError>[src]

pub fn f_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError>[src]

pub fn f_eig(&self, eigenvectors: bool) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_eig_out(
    &self,
    e: &Tensor,
    v: &Tensor,
    eigenvectors: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_elu(&self) -> Result<Tensor, TchError>[src]

pub fn f_elu_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_elu_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_embedding_renorm_(
    &mut self,
    indices: &Tensor,
    max_norm: f64,
    norm_type: f64
) -> Result<Tensor, TchError>
[src]

pub fn f_empty_like(&self) -> Result<Tensor, TchError>[src]

pub fn f_eq<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_eq1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_eq_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_eq_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_eq_out<S>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_eq_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_erf(&self) -> Result<Tensor, TchError>[src]

pub fn f_erf_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_erf_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_erfc(&self) -> Result<Tensor, TchError>[src]

pub fn f_erfc_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_erfc_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_erfinv(&self) -> Result<Tensor, TchError>[src]

pub fn f_erfinv_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_erfinv_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_exp(&self) -> Result<Tensor, TchError>[src]

pub fn f_exp2(&self) -> Result<Tensor, TchError>[src]

pub fn f_exp2_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_exp2_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_exp_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_exp_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_expand(&self, size: &[i64], implicit: bool) -> Result<Tensor, TchError>[src]

pub fn f_expand_as(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_expm1(&self) -> Result<Tensor, TchError>[src]

pub fn f_expm1_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_expm1_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_exponential_(&mut self, lambd: f64) -> Result<Tensor, TchError>[src]

pub fn f_fake_quantize_per_channel_affine(
    &self,
    scale: &Tensor,
    zero_point: &Tensor,
    axis: i64,
    quant_min: i64,
    quant_max: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_fake_quantize_per_channel_affine_cachemask(
    &self,
    scale: &Tensor,
    zero_point: &Tensor,
    axis: i64,
    quant_min: i64,
    quant_max: i64
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_fake_quantize_per_tensor_affine(
    &self,
    scale: f64,
    zero_point: i64,
    quant_min: i64,
    quant_max: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_fake_quantize_per_tensor_affine_cachemask(
    &self,
    scale: f64,
    zero_point: i64,
    quant_min: i64,
    quant_max: i64
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_fbgemm_linear_fp16_weight(
    &self,
    packed_weight: &Tensor,
    bias: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_fbgemm_linear_fp16_weight_fp32_activation(
    &self,
    packed_weight: &Tensor,
    bias: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_fbgemm_linear_int8_weight<S>(
    &self,
    weight: &Tensor,
    packed: &Tensor,
    col_offsets: &Tensor,
    weight_scale: S,
    weight_zero_point: S,
    bias: &Tensor
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_fbgemm_linear_int8_weight_fp32_activation<S>(
    &self,
    weight: &Tensor,
    packed: &Tensor,
    col_offsets: &Tensor,
    weight_scale: S,
    weight_zero_point: S,
    bias: &Tensor
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_fbgemm_pack_gemm_matrix_fp16(&self) -> Result<Tensor, TchError>[src]

pub fn f_fbgemm_pack_quantized_matrix(&self) -> Result<Tensor, TchError>[src]

pub fn f_fbgemm_pack_quantized_matrix1(
    &self,
    k: i64,
    n: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_feature_alpha_dropout(
    &self,
    p: f64,
    train: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_feature_alpha_dropout_(
    &mut self,
    p: f64,
    train: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_feature_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError>[src]

pub fn f_feature_dropout_(
    &mut self,
    p: f64,
    train: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_fft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_fft2(
    &self,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_fft2_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_fft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_fftn(
    &self,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_fftn_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_fftshift(&self, dim: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_fft_hfft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_hfft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_ifft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_ifft2(
    &self,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_ifft2_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_ifft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_ifftn(
    &self,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_ifftn_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_ifftshift(&self, dim: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_fft_ihfft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_ihfft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_irfft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_irfft2(
    &self,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_irfft2_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_irfft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_irfftn(
    &self,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_irfftn_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_rfft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_rfft2(
    &self,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_rfft2_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_rfft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_rfftn(
    &self,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fft_rfftn_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_fill_<S>(&mut self, value: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_fill_1(&mut self, value: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_fill_diagonal_<S>(
    &mut self,
    fill_value: S,
    wrap: bool
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_fix(&self) -> Result<Tensor, TchError>[src]

pub fn f_fix_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_fix_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_flatten(
    &self,
    start_dim: i64,
    end_dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_flip(&self, dims: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_fliplr(&self) -> Result<Tensor, TchError>[src]

pub fn f_flipud(&self) -> Result<Tensor, TchError>[src]

pub fn f_float_power(&self, exponent: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_float_power2<S>(&self, exponent: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_float_power_<S>(&mut self, exponent: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_float_power_1(&mut self, exponent: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_float_power_out(
    &self,
    out: &Tensor,
    exponent: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_float_power_out2<S>(
    &self,
    out: &Tensor,
    exponent: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_floor(&self) -> Result<Tensor, TchError>[src]

pub fn f_floor_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_floor_divide(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_floor_divide1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_floor_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_floor_divide_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_floor_divide_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_floor_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_fmax(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_fmax_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_fmin(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_fmin_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_fmod<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_fmod1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_fmod_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_fmod_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_fmod_out<S>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_fmod_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_frac(&self) -> Result<Tensor, TchError>[src]

pub fn f_frac_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_frac_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_fractional_max_pool2d(
    &self,
    kernel_size: &[i64],
    output_size: &[i64],
    random_samples: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_fractional_max_pool2d_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_fractional_max_pool2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_fractional_max_pool2d_out(
    &self,
    output: &Tensor,
    indices: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    random_samples: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_fractional_max_pool3d(
    &self,
    kernel_size: &[i64],
    output_size: &[i64],
    random_samples: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_fractional_max_pool3d_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_fractional_max_pool3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_fractional_max_pool3d_out(
    &self,
    output: &Tensor,
    indices: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    random_samples: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_frobenius_norm(&self) -> Result<Tensor, TchError>[src]

pub fn f_frobenius_norm1(
    &self,
    dim: &[i64],
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_frobenius_norm_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_full_like<S>(&self, fill_value: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_gather(
    &self,
    dim: i64,
    index: &Tensor,
    sparse_grad: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_gather_backward(
    &self,
    grad: &Tensor,
    dim: i64,
    index: &Tensor,
    sparse_grad: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_gather_out(
    &self,
    out: &Tensor,
    dim: i64,
    index: &Tensor,
    sparse_grad: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_gcd(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_gcd_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_gcd_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_ge<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_ge1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_ge_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_ge_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_ge_out<S>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_ge_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_gelu(&self) -> Result<Tensor, TchError>[src]

pub fn f_gelu_backward(&self, grad: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_geometric_(&mut self, p: f64) -> Result<Tensor, TchError>[src]

pub fn f_geqrf(&self) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_geqrf_out(
    &self,
    a: &Tensor,
    tau: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_ger(&self, vec2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_ger_out(&self, out: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_glu(&self, dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_glu_backward(
    &self,
    grad_output: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_glu_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_glu_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_grad(&self) -> Result<Tensor, TchError>[src]

pub fn f_greater<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_greater1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_greater_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_greater_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_greater_equal<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_greater_equal1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_greater_equal_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_greater_equal_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_greater_equal_out<S>(
    &self,
    out: &Tensor,
    other: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_greater_equal_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_greater_out<S>(
    &self,
    out: &Tensor,
    other: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_greater_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_grid_sampler(
    &self,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_grid_sampler_2d(
    &self,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_grid_sampler_2d_backward(
    &self,
    grad_output: &Tensor,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_grid_sampler_3d(
    &self,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_grid_sampler_3d_backward(
    &self,
    grad_output: &Tensor,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_group_norm<T>(
    &self,
    num_groups: i64,
    weight: Option<T>,
    bias: Option<T>,
    eps: f64,
    cudnn_enabled: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_gru<T>(
    &self,
    hx: &Tensor,
    params: &[T],
    has_biases: bool,
    num_layers: i64,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_first: bool
) -> Result<(Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_gru_cell<T>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: Option<T>,
    b_hh: Option<T>
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_gt<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_gt1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_gt_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_gt_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_gt_out<S>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_gt_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_hardshrink(&self) -> Result<Tensor, TchError>[src]

pub fn f_hardshrink_backward<S>(
    &self,
    grad_out: &Tensor,
    lambd: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_hardsigmoid(&self) -> Result<Tensor, TchError>[src]

pub fn f_hardsigmoid_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_hardsigmoid_backward(
    &self,
    grad_output: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_hardsigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_hardswish(&self) -> Result<Tensor, TchError>[src]

pub fn f_hardswish_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_hardswish_backward(
    &self,
    grad_output: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_hardswish_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_hardtanh(&self) -> Result<Tensor, TchError>[src]

pub fn f_hardtanh_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_hardtanh_backward<S>(
    &self,
    grad_output: &Tensor,
    min_val: S,
    max_val: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_hardtanh_backward_out<S>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    min_val: S,
    max_val: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_hardtanh_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_heaviside(&self, values: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_heaviside_(&mut self, values: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_heaviside_out(
    &self,
    out: &Tensor,
    values: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_hinge_embedding_loss(
    &self,
    target: &Tensor,
    margin: f64,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_histc(&self, bins: i64) -> Result<Tensor, TchError>[src]

pub fn f_histc_out(&self, out: &Tensor, bins: i64) -> Result<Tensor, TchError>[src]

pub fn f_hypot(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_hypot_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_hypot_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_i0(&self) -> Result<Tensor, TchError>[src]

pub fn f_i0_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_i0_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_igamma(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_igamma_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_igamma_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_igammac(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_igammac_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_igammac_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_im2col(
    &self,
    kernel_size: &[i64],
    dilation: &[i64],
    padding: &[i64],
    stride: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_im2col_out(
    &self,
    out: &Tensor,
    kernel_size: &[i64],
    dilation: &[i64],
    padding: &[i64],
    stride: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_imag(&self) -> Result<Tensor, TchError>[src]

pub fn f_index<T>(&self, indices: &[Option<T>]) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_index_add(
    &self,
    dim: i64,
    index: &Tensor,
    source: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_index_add_(
    &mut self,
    dim: i64,
    index: &Tensor,
    source: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_index_copy(
    &self,
    dim: i64,
    index: &Tensor,
    source: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_index_copy_(
    &mut self,
    dim: i64,
    index: &Tensor,
    source: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_index_fill<S>(
    &self,
    dim: i64,
    index: &Tensor,
    value: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_index_fill1(
    &self,
    dim: i64,
    index: &Tensor,
    value: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_index_fill_<S>(
    &mut self,
    dim: i64,
    index: &Tensor,
    value: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_index_fill_1(
    &mut self,
    dim: i64,
    index: &Tensor,
    value: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_index_put<T>(
    &self,
    indices: &[Option<T>],
    values: &Tensor,
    accumulate: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_index_put_<T>(
    &mut self,
    indices: &[Option<T>],
    values: &Tensor,
    accumulate: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_index_select(
    &self,
    dim: i64,
    index: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_index_select_out(
    &self,
    out: &Tensor,
    dim: i64,
    index: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_indices(&self) -> Result<Tensor, TchError>[src]

pub fn f_infinitely_differentiable_gelu_backward(
    &self,
    grad: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_inner(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_inner_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_instance_norm<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    use_input_stats: bool,
    momentum: f64,
    eps: f64,
    cudnn_enabled: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_int_repr(&self) -> Result<Tensor, TchError>[src]

pub fn f_inverse(&self) -> Result<Tensor, TchError>[src]

pub fn f_inverse_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_isclose(
    &self,
    other: &Tensor,
    rtol: f64,
    atol: f64,
    equal_nan: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_isfinite(&self) -> Result<Tensor, TchError>[src]

pub fn f_isinf(&self) -> Result<Tensor, TchError>[src]

pub fn f_isnan(&self) -> Result<Tensor, TchError>[src]

pub fn f_isneginf(&self) -> Result<Tensor, TchError>[src]

pub fn f_isneginf_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_isposinf(&self) -> Result<Tensor, TchError>[src]

pub fn f_isposinf_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_isreal(&self) -> Result<Tensor, TchError>[src]

pub fn f_istft<T>(
    &self,
    n_fft: i64,
    hop_length: impl Into<Option<i64>>,
    win_length: impl Into<Option<i64>>,
    window: Option<T>,
    center: bool,
    normalized: bool,
    onesided: bool,
    length: impl Into<Option<i64>>,
    return_complex: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_kl_div(
    &self,
    target: &Tensor,
    reduction: Reduction,
    log_target: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_kl_div_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    log_target: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_kron(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_kron_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_kthvalue(
    &self,
    k: i64,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_kthvalue_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    k: i64,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_l1_loss(
    &self,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_l1_loss_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_l1_loss_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_l1_loss_out(
    &self,
    out: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_layer_norm<T>(
    &self,
    normalized_shape: &[i64],
    weight: Option<T>,
    bias: Option<T>,
    eps: f64,
    cudnn_enable: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_lcm(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_lcm_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_lcm_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_ldexp(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_ldexp_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_ldexp_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_le<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_le1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_le_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_le_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_le_out<S>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_le_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_leaky_relu(&self) -> Result<Tensor, TchError>[src]

pub fn f_leaky_relu_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_leaky_relu_backward<S>(
    &self,
    grad_output: &Tensor,
    negative_slope: S,
    self_is_result: bool
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_leaky_relu_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_lerp<S>(&self, end: &Tensor, weight: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_lerp1(&self, end: &Tensor, weight: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_lerp_<S>(
    &mut self,
    end: &Tensor,
    weight: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_lerp_1(
    &mut self,
    end: &Tensor,
    weight: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_lerp_out<S>(
    &self,
    out: &Tensor,
    end: &Tensor,
    weight: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_lerp_out1(
    &self,
    out: &Tensor,
    end: &Tensor,
    weight: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_less<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_less1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_less_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_less_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_less_equal<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_less_equal1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_less_equal_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_less_equal_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_less_equal_out<S>(
    &self,
    out: &Tensor,
    other: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_less_equal_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_less_out<S>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_less_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_lgamma(&self) -> Result<Tensor, TchError>[src]

pub fn f_lgamma_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_lgamma_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_linalg_cholesky(&self) -> Result<Tensor, TchError>[src]

pub fn f_linalg_cholesky_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_linalg_cond<S>(&self, p: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_linalg_cond1(&self, p: &str) -> Result<Tensor, TchError>[src]

pub fn f_linalg_cond_out<S>(
    &self,
    out: &Tensor,
    p: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_linalg_cond_out1(
    &self,
    out: &Tensor,
    p: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_det(&self) -> Result<Tensor, TchError>[src]

pub fn f_linalg_eigh(&self, uplo: &str) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_linalg_eigh_out(
    &self,
    eigvals: &Tensor,
    eigvecs: &Tensor,
    uplo: &str
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_linalg_eigvalsh(&self, uplo: &str) -> Result<Tensor, TchError>[src]

pub fn f_linalg_eigvalsh_out(
    &self,
    out: &Tensor,
    uplo: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_inv(&self) -> Result<Tensor, TchError>[src]

pub fn f_linalg_inv_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_linalg_matrix_rank(
    &self,
    tol: impl Into<Option<f64>>,
    hermitian: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_matrix_rank_out(
    &self,
    out: &Tensor,
    tol: impl Into<Option<f64>>,
    hermitian: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_norm<S>(
    &self,
    ord: S,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_linalg_norm1(
    &self,
    ord: &str,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_norm_out<S>(
    &self,
    out: &Tensor,
    ord: S,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_linalg_norm_out1(
    &self,
    out: &Tensor,
    ord: &str,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_pinv(
    &self,
    rcond: f64,
    hermitian: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_pinv1(
    &self,
    rcond: &Tensor,
    hermitian: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_pinv_out(
    &self,
    out: &Tensor,
    rcond: f64,
    hermitian: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_pinv_out1(
    &self,
    out: &Tensor,
    rcond: &Tensor,
    hermitian: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_qr(&self, mode: &str) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_linalg_qr_out(
    &self,
    q: &Tensor,
    r: &Tensor,
    mode: &str
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_linalg_slogdet(&self) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_linalg_slogdet_out(
    &self,
    sign: &Tensor,
    logabsdet: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_linalg_solve(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_linalg_solve_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_svd(
    &self,
    full_matrices: bool,
    compute_uv: bool
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_linalg_svd_out(
    &self,
    u: &Tensor,
    s: &Tensor,
    v: &Tensor,
    full_matrices: bool,
    compute_uv: bool
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_linalg_tensorinv(&self, ind: i64) -> Result<Tensor, TchError>[src]

pub fn f_linalg_tensorinv_out(
    &self,
    out: &Tensor,
    ind: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_tensorsolve(
    &self,
    other: &Tensor,
    dims: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_linalg_tensorsolve_out(
    &self,
    out: &Tensor,
    other: &Tensor,
    dims: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_linear<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_log(&self) -> Result<Tensor, TchError>[src]

pub fn f_log10(&self) -> Result<Tensor, TchError>[src]

pub fn f_log10_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_log10_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_log1p(&self) -> Result<Tensor, TchError>[src]

pub fn f_log1p_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_log1p_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_log2(&self) -> Result<Tensor, TchError>[src]

pub fn f_log2_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_log2_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_log_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_log_normal_(&mut self, mean: f64, std: f64) -> Result<Tensor, TchError>[src]

pub fn f_log_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_log_sigmoid(&self) -> Result<Tensor, TchError>[src]

pub fn f_log_sigmoid_backward(
    &self,
    grad_output: &Tensor,
    buffer: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_log_sigmoid_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    buffer: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_log_sigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_log_softmax(&self, dim: i64, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_logaddexp(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_logaddexp2(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_logaddexp2_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_logaddexp_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_logcumsumexp(&self, dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_logcumsumexp_out(
    &self,
    out: &Tensor,
    dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_logdet(&self) -> Result<Tensor, TchError>[src]

pub fn f_logical_and(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_logical_and_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_logical_and_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_logical_not(&self) -> Result<Tensor, TchError>[src]

pub fn f_logical_not_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_logical_not_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_logical_or(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_logical_or_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_logical_or_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_logical_xor(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_logical_xor_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_logical_xor_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_logit(&self, eps: impl Into<Option<f64>>) -> Result<Tensor, TchError>[src]

pub fn f_logit_(
    &mut self,
    eps: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_logit_backward(
    &self,
    grad_output: &Tensor,
    eps: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_logit_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    eps: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_logit_out(
    &self,
    out: &Tensor,
    eps: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_logsumexp(
    &self,
    dim: &[i64],
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_logsumexp_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_lstm<T>(
    &self,
    hx: &[T],
    params: &[T],
    has_biases: bool,
    num_layers: i64,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_first: bool
) -> Result<(Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_lstm_cell<T>(
    &self,
    hx: &[T],
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: Option<T>,
    b_hh: Option<T>
) -> Result<(Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_lstsq(&self, a: &Tensor) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_lstsq_out(
    &self,
    x: &Tensor,
    qr: &Tensor,
    a: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_lt<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_lt1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_lt_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_lt_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_lt_out<S>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_lt_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_lu_solve(
    &self,
    lu_data: &Tensor,
    lu_pivots: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_lu_solve_out(
    &self,
    out: &Tensor,
    lu_data: &Tensor,
    lu_pivots: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_masked_fill<S>(
    &self,
    mask: &Tensor,
    value: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_masked_fill1(
    &self,
    mask: &Tensor,
    value: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_masked_fill_<S>(
    &mut self,
    mask: &Tensor,
    value: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_masked_fill_1(
    &mut self,
    mask: &Tensor,
    value: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_masked_scatter(
    &self,
    mask: &Tensor,
    source: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_masked_scatter_(
    &mut self,
    mask: &Tensor,
    source: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_masked_select(&self, mask: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_masked_select_backward(
    &self,
    grad: &Tensor,
    mask: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_masked_select_out(
    &self,
    out: &Tensor,
    mask: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_matmul(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_matmul_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_matrix_exp(&self) -> Result<Tensor, TchError>[src]

pub fn f_matrix_exp_backward(&self, grad: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_matrix_power(&self, n: i64) -> Result<Tensor, TchError>[src]

pub fn f_matrix_rank(&self, symmetric: bool) -> Result<Tensor, TchError>[src]

pub fn f_matrix_rank1(
    &self,
    tol: f64,
    symmetric: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_max(&self) -> Result<Tensor, TchError>[src]

pub fn f_max1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_max2(
    &self,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_max_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_max_out1(
    &self,
    max: &Tensor,
    max_values: &Tensor,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_max_pool1d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_max_pool1d_with_indices(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_max_pool2d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_max_pool2d_with_indices(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_max_pool2d_with_indices_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool,
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_max_pool2d_with_indices_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool,
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_max_pool2d_with_indices_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_max_pool3d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_max_pool3d_with_indices(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_max_pool3d_with_indices_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool,
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_max_pool3d_with_indices_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool,
    indices: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_max_pool3d_with_indices_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_max_unpool2d(
    &self,
    indices: &Tensor,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_max_unpool2d_backward(
    &self,
    grad_output: &Tensor,
    indices: &Tensor,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_max_unpool2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    indices: &Tensor,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_max_unpool2d_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_max_unpool3d(
    &self,
    indices: &Tensor,
    output_size: &[i64],
    stride: &[i64],
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_max_unpool3d_backward(
    &self,
    grad_output: &Tensor,
    indices: &Tensor,
    output_size: &[i64],
    stride: &[i64],
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_max_unpool3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    indices: &Tensor,
    output_size: &[i64],
    stride: &[i64],
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_max_unpool3d_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    output_size: &[i64],
    stride: &[i64],
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_maximum(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_maximum_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_mean(&self, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_mean1(
    &self,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_mean_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_median(&self) -> Result<Tensor, TchError>[src]

pub fn f_median1(
    &self,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_median_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_min(&self) -> Result<Tensor, TchError>[src]

pub fn f_min1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_min2(
    &self,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_min_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_min_out1(
    &self,
    min: &Tensor,
    min_indices: &Tensor,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_minimum(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_minimum_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_miopen_batch_norm<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    training: bool,
    exponential_average_factor: f64,
    epsilon: f64
) -> Result<(Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_miopen_batch_norm_backward<T>(
    &self,
    grad_output: &Tensor,
    weight: &Tensor,
    running_mean: Option<T>,
    running_var: Option<T>,
    save_mean: Option<T>,
    save_var: Option<T>,
    epsilon: f64
) -> Result<(Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_miopen_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_miopen_convolution_backward_weight(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_miopen_convolution_transpose<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    output_padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_miopen_convolution_transpose_backward_weight(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_miopen_depthwise_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_miopen_depthwise_convolution_backward_weight(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_miopen_rnn<T>(
    &self,
    weight: &[T],
    weight_stride0: i64,
    hx: &Tensor,
    cx: Option<T>,
    mode: i64,
    hidden_size: i64,
    num_layers: i64,
    batch_first: bool,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_sizes: &[i64],
    dropout_state: Option<T>
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_mkldnn_adaptive_avg_pool2d(
    &self,
    output_size: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_mkldnn_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_mkldnn_convolution_backward_weights(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    bias_defined: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_mkldnn_linear<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_mkldnn_linear_backward_weights(
    &self,
    grad_output: &Tensor,
    weight: &Tensor,
    bias_defined: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_mkldnn_max_pool2d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_mkldnn_max_pool3d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_mkldnn_reorder_conv2d_weight(
    &self,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_mkldnn_reorder_conv3d_weight(
    &self,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_mm(&self, mat2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_mm_out(&self, out: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_mode(
    &self,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_mode_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_moveaxis(
    &self,
    source: &[i64],
    destination: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_moveaxis1(
    &self,
    source: i64,
    destination: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_movedim(
    &self,
    source: &[i64],
    destination: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_movedim1(
    &self,
    source: i64,
    destination: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_mse_loss(
    &self,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_mse_loss_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_mse_loss_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_mse_loss_out(
    &self,
    out: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_msort(&self) -> Result<Tensor, TchError>[src]

pub fn f_msort_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_mul(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_mul1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_mul_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_mul_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_mul_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_multi_margin_loss_backward<T, S>(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    p: S,
    margin: S,
    weight: Option<T>,
    reduction: Reduction
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>,
    S: Into<Scalar>, 
[src]

pub fn f_multi_margin_loss_backward_out<T, S>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    p: S,
    margin: S,
    weight: Option<T>,
    reduction: Reduction
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>,
    S: Into<Scalar>, 
[src]

pub fn f_multilabel_margin_loss(
    &self,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_multilabel_margin_loss_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    is_target: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_multilabel_margin_loss_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    is_target: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_multilabel_margin_loss_out(
    &self,
    out: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_multinomial(
    &self,
    num_samples: i64,
    replacement: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_multinomial_out(
    &self,
    out: &Tensor,
    num_samples: i64,
    replacement: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_multiply(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_multiply1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_multiply_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_multiply_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_multiply_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_mv(&self, vec: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_mv_out(&self, out: &Tensor, vec: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_mvlgamma(&self, p: i64) -> Result<Tensor, TchError>[src]

pub fn f_mvlgamma_(&mut self, p: i64) -> Result<Tensor, TchError>[src]

pub fn f_nan_to_num(
    &self,
    nan: impl Into<Option<f64>>,
    posinf: impl Into<Option<f64>>,
    neginf: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_nan_to_num_(
    &mut self,
    nan: impl Into<Option<f64>>,
    posinf: impl Into<Option<f64>>,
    neginf: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_nan_to_num_out(
    &self,
    out: &Tensor,
    nan: impl Into<Option<f64>>,
    posinf: impl Into<Option<f64>>,
    neginf: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_nanmedian(&self) -> Result<Tensor, TchError>[src]

pub fn f_nanmedian1(
    &self,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_nanmedian_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_nanquantile(
    &self,
    q: f64,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_nanquantile1(
    &self,
    q: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_nanquantile_out(
    &self,
    out: &Tensor,
    q: f64,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_nanquantile_out1(
    &self,
    out: &Tensor,
    q: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_nansum(&self, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_nansum1(
    &self,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_nansum_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_narrow(
    &self,
    dim: i64,
    start: i64,
    length: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_narrow1(
    &self,
    dim: i64,
    start: &Tensor,
    length: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_narrow_copy(
    &self,
    dim: i64,
    start: i64,
    length: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_narrow_copy_out(
    &self,
    out: &Tensor,
    dim: i64,
    start: i64,
    length: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_native_batch_norm<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    training: bool,
    momentum: f64,
    eps: f64
) -> Result<(Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_native_batch_norm_out<T>(
    &self,
    out: &Tensor,
    save_mean: &Tensor,
    save_invstd: &Tensor,
    weight: Option<T>,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    training: bool,
    momentum: f64,
    eps: f64
) -> Result<(Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_native_group_norm<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    n: i64,
    c: i64,
    hxw: i64,
    group: i64,
    eps: f64
) -> Result<(Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_native_layer_norm<T>(
    &self,
    normalized_shape: &[i64],
    weight: Option<T>,
    bias: Option<T>,
    eps: f64
) -> Result<(Tensor, Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_native_norm(&self) -> Result<Tensor, TchError>[src]

pub fn f_native_norm1<S>(
    &self,
    p: S,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_ne<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_ne1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_ne_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_ne_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_ne_out<S>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_ne_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_neg(&self) -> Result<Tensor, TchError>[src]

pub fn f_neg_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_neg_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_negative(&self) -> Result<Tensor, TchError>[src]

pub fn f_negative_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_negative_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_new_empty(
    &self,
    size: &[i64],
    options: (Kind, Device)
) -> Result<Tensor, TchError>
[src]

pub fn f_new_empty_strided(
    &self,
    size: &[i64],
    stride: &[i64],
    options: (Kind, Device)
) -> Result<Tensor, TchError>
[src]

pub fn f_new_full<S>(
    &self,
    size: &[i64],
    fill_value: S,
    options: (Kind, Device)
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_new_zeros(
    &self,
    size: &[i64],
    options: (Kind, Device)
) -> Result<Tensor, TchError>
[src]

pub fn f_nextafter(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_nextafter_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_nextafter_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_nll_loss<T>(
    &self,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_nll_loss2d<T>(
    &self,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_nll_loss2d_backward<T>(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64,
    total_weight: &Tensor
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_nll_loss2d_backward_out<T>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64,
    total_weight: &Tensor
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_nll_loss2d_out<T>(
    &self,
    out: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_nll_loss_backward<T>(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64,
    total_weight: &Tensor
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_nll_loss_backward_out<T>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64,
    total_weight: &Tensor
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_nll_loss_out<T>(
    &self,
    out: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_nonzero(&self) -> Result<Tensor, TchError>[src]

pub fn f_nonzero_numpy(&self) -> Result<Vec<Tensor, Global>, TchError>[src]

pub fn f_nonzero_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_norm(&self) -> Result<Tensor, TchError>[src]

pub fn f_norm1<S>(&self, p: S, dtype: Kind) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_norm2<S>(
    &self,
    p: S,
    dim: &[i64],
    keepdim: bool
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_norm3<S>(
    &self,
    p: S,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_norm_out<S>(
    &self,
    out: &Tensor,
    p: S,
    dim: &[i64],
    keepdim: bool
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_norm_out1<S>(
    &self,
    out: &Tensor,
    p: S,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_normal_(&mut self, mean: f64, std: f64) -> Result<Tensor, TchError>[src]

pub fn f_not_equal<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_not_equal1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_not_equal_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_not_equal_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_not_equal_out<S>(
    &self,
    out: &Tensor,
    other: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_not_equal_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_nuclear_norm(&self, keepdim: bool) -> Result<Tensor, TchError>[src]

pub fn f_nuclear_norm1(
    &self,
    dim: &[i64],
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_nuclear_norm_out(
    &self,
    out: &Tensor,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_nuclear_norm_out1(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_numpy_t(&self) -> Result<Tensor, TchError>[src]

pub fn f_one_hot(&self, num_classes: i64) -> Result<Tensor, TchError>[src]

pub fn f_ones_like(&self) -> Result<Tensor, TchError>[src]

pub fn f_orgqr(&self, input2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_orgqr_out(
    &self,
    out: &Tensor,
    input2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_ormqr(
    &self,
    input2: &Tensor,
    input3: &Tensor,
    left: bool,
    transpose: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_ormqr_out(
    &self,
    out: &Tensor,
    input2: &Tensor,
    input3: &Tensor,
    left: bool,
    transpose: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_outer(&self, vec2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_outer_out(
    &self,
    out: &Tensor,
    vec2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_pdist(&self, p: f64) -> Result<Tensor, TchError>[src]

pub fn f_permute(&self, dims: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_pin_memory(&self) -> Result<Tensor, TchError>[src]

pub fn f_pinverse(&self, rcond: f64) -> Result<Tensor, TchError>[src]

pub fn f_pixel_shuffle(&self, upscale_factor: i64) -> Result<Tensor, TchError>[src]

pub fn f_pixel_unshuffle(
    &self,
    downscale_factor: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_poisson(&self) -> Result<Tensor, TchError>[src]

pub fn f_poisson_nll_loss(
    &self,
    target: &Tensor,
    log_input: bool,
    full: bool,
    eps: f64,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_polygamma(&self, n: i64) -> Result<Tensor, TchError>[src]

pub fn f_polygamma_(&mut self, n: i64) -> Result<Tensor, TchError>[src]

pub fn f_polygamma_out(&self, out: &Tensor, n: i64) -> Result<Tensor, TchError>[src]

pub fn f_pow<S>(&self, exponent: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_pow1(&self, exponent: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_pow_<S>(&mut self, exponent: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_pow_1(&mut self, exponent: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_pow_out(
    &self,
    out: &Tensor,
    exponent: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_pow_out2<S>(
    &self,
    out: &Tensor,
    exponent: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_prelu(&self, weight: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_prelu_backward(
    &self,
    grad_output: &Tensor,
    weight: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_prod(&self, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_prod1(
    &self,
    dim: i64,
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_prod_out(
    &self,
    out: &Tensor,
    dim: i64,
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_put_(
    &mut self,
    index: &Tensor,
    source: &Tensor,
    accumulate: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_q_per_channel_scales(&self) -> Result<Tensor, TchError>[src]

pub fn f_q_per_channel_zero_points(&self) -> Result<Tensor, TchError>[src]

pub fn f_qr(&self, some: bool) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_qr_out(
    &self,
    q: &Tensor,
    r: &Tensor,
    some: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_quantile(
    &self,
    q: f64,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_quantile1(
    &self,
    q: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_quantile_out(
    &self,
    out: &Tensor,
    q: f64,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_quantile_out1(
    &self,
    out: &Tensor,
    q: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_quantize_per_channel(
    &self,
    scales: &Tensor,
    zero_points: &Tensor,
    axis: i64,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_quantize_per_tensor(
    &self,
    scale: f64,
    zero_point: i64,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_quantized_batch_norm<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    mean: &Tensor,
    var: &Tensor,
    eps: f64,
    output_scale: f64,
    output_zero_point: i64
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_quantized_gru_cell<S>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: &Tensor,
    b_hh: &Tensor,
    packed_ih: &Tensor,
    packed_hh: &Tensor,
    col_offsets_ih: &Tensor,
    col_offsets_hh: &Tensor,
    scale_ih: S,
    scale_hh: S,
    zero_point_ih: S,
    zero_point_hh: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_quantized_lstm_cell<T, S>(
    &self,
    hx: &[T],
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: &Tensor,
    b_hh: &Tensor,
    packed_ih: &Tensor,
    packed_hh: &Tensor,
    col_offsets_ih: &Tensor,
    col_offsets_hh: &Tensor,
    scale_ih: S,
    scale_hh: S,
    zero_point_ih: S,
    zero_point_hh: S
) -> Result<(Tensor, Tensor), TchError> where
    T: Borrow<Tensor>,
    S: Into<Scalar>, 
[src]

pub fn f_quantized_max_pool1d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_quantized_max_pool2d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_quantized_rnn_relu_cell<S>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: &Tensor,
    b_hh: &Tensor,
    packed_ih: &Tensor,
    packed_hh: &Tensor,
    col_offsets_ih: &Tensor,
    col_offsets_hh: &Tensor,
    scale_ih: S,
    scale_hh: S,
    zero_point_ih: S,
    zero_point_hh: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_quantized_rnn_tanh_cell<S>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: &Tensor,
    b_hh: &Tensor,
    packed_ih: &Tensor,
    packed_hh: &Tensor,
    col_offsets_ih: &Tensor,
    col_offsets_hh: &Tensor,
    scale_ih: S,
    scale_hh: S,
    zero_point_ih: S,
    zero_point_hh: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_rad2deg(&self) -> Result<Tensor, TchError>[src]

pub fn f_rad2deg_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_rad2deg_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_rand_like(&self) -> Result<Tensor, TchError>[src]

pub fn f_randint_like(&self, high: i64) -> Result<Tensor, TchError>[src]

pub fn f_randint_like1(&self, low: i64, high: i64) -> Result<Tensor, TchError>[src]

pub fn f_randn_like(&self) -> Result<Tensor, TchError>[src]

pub fn f_random_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_random_1(&mut self, to: i64) -> Result<Tensor, TchError>[src]

pub fn f_random_2(
    &mut self,
    from: i64,
    to: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_ravel(&self) -> Result<Tensor, TchError>[src]

pub fn f_real(&self) -> Result<Tensor, TchError>[src]

pub fn f_reciprocal(&self) -> Result<Tensor, TchError>[src]

pub fn f_reciprocal_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_reciprocal_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_reflection_pad1d(&self, padding: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_reflection_pad1d_backward(
    &self,
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_reflection_pad1d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_reflection_pad1d_out(
    &self,
    out: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_reflection_pad2d(&self, padding: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_reflection_pad2d_backward(
    &self,
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_reflection_pad2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_reflection_pad2d_out(
    &self,
    out: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_relu(&self) -> Result<Tensor, TchError>[src]

pub fn f_relu_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_remainder<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_remainder1(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_remainder_<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_remainder_1(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_remainder_out<S>(
    &self,
    out: &Tensor,
    other: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_remainder_out1(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_renorm<S>(
    &self,
    p: S,
    dim: i64,
    maxnorm: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_renorm_<S>(
    &mut self,
    p: S,
    dim: i64,
    maxnorm: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_renorm_out<S>(
    &self,
    out: &Tensor,
    p: S,
    dim: i64,
    maxnorm: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_repeat(&self, repeats: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_repeat_interleave1(
    &self,
    repeats: &Tensor,
    dim: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_repeat_interleave2(
    &self,
    repeats: i64,
    dim: impl Into<Option<i64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_replication_pad1d(&self, padding: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_replication_pad1d_backward(
    &self,
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_replication_pad1d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_replication_pad1d_out(
    &self,
    out: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_replication_pad2d(&self, padding: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_replication_pad2d_backward(
    &self,
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_replication_pad2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_replication_pad2d_out(
    &self,
    out: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_replication_pad3d(&self, padding: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_replication_pad3d_backward(
    &self,
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_replication_pad3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_replication_pad3d_out(
    &self,
    out: &Tensor,
    padding: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_requires_grad_(
    &mut self,
    requires_grad: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_reshape(&self, shape: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_reshape_as(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_resize_(&mut self, size: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_resize_as_(
    &mut self,
    the_template: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_rnn_relu<T>(
    &self,
    hx: &Tensor,
    params: &[T],
    has_biases: bool,
    num_layers: i64,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_first: bool
) -> Result<(Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_rnn_relu_cell<T>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: Option<T>,
    b_hh: Option<T>
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_rnn_tanh<T>(
    &self,
    hx: &Tensor,
    params: &[T],
    has_biases: bool,
    num_layers: i64,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_first: bool
) -> Result<(Tensor, Tensor), TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_rnn_tanh_cell<T>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: Option<T>,
    b_hh: Option<T>
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_roll(&self, shifts: &[i64], dims: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_rot90(&self, k: i64, dims: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_round(&self) -> Result<Tensor, TchError>[src]

pub fn f_round_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_round_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_rrelu(&self, training: bool) -> Result<Tensor, TchError>[src]

pub fn f_rrelu_(&mut self, training: bool) -> Result<Tensor, TchError>[src]

pub fn f_rrelu_with_noise(
    &self,
    noise: &Tensor,
    training: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_rrelu_with_noise_(
    &mut self,
    noise: &Tensor,
    training: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_rrelu_with_noise_backward<S>(
    &self,
    grad_output: &Tensor,
    noise: &Tensor,
    lower: S,
    upper: S,
    training: bool,
    self_is_result: bool
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_rrelu_with_noise_out(
    &self,
    out: &Tensor,
    noise: &Tensor,
    training: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_rsqrt(&self) -> Result<Tensor, TchError>[src]

pub fn f_rsqrt_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_rsqrt_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_rsub(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_rsub1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_scatter(
    &self,
    dim: i64,
    index: &Tensor,
    src: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_scatter1<S>(
    &self,
    dim: i64,
    index: &Tensor,
    value: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_scatter_(
    &mut self,
    dim: i64,
    index: &Tensor,
    src: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_scatter_1<S>(
    &mut self,
    dim: i64,
    index: &Tensor,
    value: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_scatter_2(
    &mut self,
    dim: i64,
    index: &Tensor,
    src: &Tensor,
    reduce: &str
) -> Result<Tensor, TchError>
[src]

pub fn f_scatter_3<S>(
    &mut self,
    dim: i64,
    index: &Tensor,
    value: S,
    reduce: &str
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_scatter_add(
    &self,
    dim: i64,
    index: &Tensor,
    src: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_scatter_add_(
    &mut self,
    dim: i64,
    index: &Tensor,
    src: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_searchsorted(
    &self,
    sorted_sequence: &Tensor,
    out_int32: bool,
    right: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_searchsorted_out(
    &self,
    out: &Tensor,
    sorted_sequence: &Tensor,
    out_int32: bool,
    right: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_select(&self, dim: i64, index: i64) -> Result<Tensor, TchError>[src]

pub fn f_selu(&self) -> Result<Tensor, TchError>[src]

pub fn f_selu_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_set_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_set_1(&mut self, source: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_set_requires_grad(&self, r: bool) -> Result<Tensor, TchError>[src]

pub fn f_sgn(&self) -> Result<Tensor, TchError>[src]

pub fn f_sgn_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_sgn_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_sigmoid(&self) -> Result<Tensor, TchError>[src]

pub fn f_sigmoid_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_sigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_sign(&self) -> Result<Tensor, TchError>[src]

pub fn f_sign_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_sign_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_signbit(&self) -> Result<Tensor, TchError>[src]

pub fn f_signbit_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_silu(&self) -> Result<Tensor, TchError>[src]

pub fn f_silu_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_silu_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_silu_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_sin(&self) -> Result<Tensor, TchError>[src]

pub fn f_sin_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_sin_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_sinc(&self) -> Result<Tensor, TchError>[src]

pub fn f_sinc_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_sinc_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_sinh(&self) -> Result<Tensor, TchError>[src]

pub fn f_sinh_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_sinh_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_slice(
    &self,
    dim: i64,
    start: impl Into<Option<i64>>,
    end: impl Into<Option<i64>>,
    step: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_slogdet(&self) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_slow_conv3d<T>(
    &self,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_slow_conv3d_out<T>(
    &self,
    out: &Tensor,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_slow_conv_dilated2d<T>(
    &self,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_slow_conv_dilated3d<T>(
    &self,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_slow_conv_transpose2d<T>(
    &self,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    dilation: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_slow_conv_transpose2d_out<T>(
    &self,
    out: &Tensor,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    dilation: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_slow_conv_transpose3d<T>(
    &self,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    dilation: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_slow_conv_transpose3d_out<T>(
    &self,
    out: &Tensor,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    dilation: &[i64]
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_smm(&self, mat2: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_smooth_l1_loss(
    &self,
    target: &Tensor,
    reduction: Reduction,
    beta: f64
) -> Result<Tensor, TchError>
[src]

pub fn f_smooth_l1_loss_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    beta: f64
) -> Result<Tensor, TchError>
[src]

pub fn f_smooth_l1_loss_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    beta: f64
) -> Result<Tensor, TchError>
[src]

pub fn f_smooth_l1_loss_out(
    &self,
    out: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    beta: f64
) -> Result<Tensor, TchError>
[src]

pub fn f_soft_margin_loss(
    &self,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_soft_margin_loss_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_soft_margin_loss_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_soft_margin_loss_out(
    &self,
    out: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Result<Tensor, TchError>
[src]

pub fn f_softmax(&self, dim: i64, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_softplus(&self) -> Result<Tensor, TchError>[src]

pub fn f_softplus_backward<S>(
    &self,
    grad_output: &Tensor,
    beta: S,
    threshold: S,
    output: &Tensor
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_softplus_backward_out<S>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    beta: S,
    threshold: S,
    output: &Tensor
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_softplus_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_softshrink(&self) -> Result<Tensor, TchError>[src]

pub fn f_softshrink_backward<S>(
    &self,
    grad_output: &Tensor,
    lambd: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_softshrink_backward_out<S>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    lambd: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_softshrink_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_solve(&self, a: &Tensor) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_solve_out(
    &self,
    solution: &Tensor,
    lu: &Tensor,
    a: &Tensor
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_sort(
    &self,
    dim: i64,
    descending: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_sort_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64,
    descending: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_sparse_mask(&self, mask: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_sparse_resize_(
    &mut self,
    size: &[i64],
    sparse_dim: i64,
    dense_dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_sparse_resize_and_clear_(
    &mut self,
    size: &[i64],
    sparse_dim: i64,
    dense_dim: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_split(
    &self,
    split_size: i64,
    dim: i64
) -> Result<Vec<Tensor, Global>, TchError>
[src]

pub fn f_split_with_sizes(
    &self,
    split_sizes: &[i64],
    dim: i64
) -> Result<Vec<Tensor, Global>, TchError>
[src]

pub fn f_sqrt(&self) -> Result<Tensor, TchError>[src]

pub fn f_sqrt_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_sqrt_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_square(&self) -> Result<Tensor, TchError>[src]

pub fn f_square_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_squeeze(&self) -> Result<Tensor, TchError>[src]

pub fn f_squeeze1(&self, dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_squeeze_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_squeeze_1(&mut self, dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_sspaddmm(
    &self,
    mat1: &Tensor,
    mat2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_sspaddmm_out(
    &self,
    out: &Tensor,
    mat1: &Tensor,
    mat2: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_std(&self, unbiased: bool) -> Result<Tensor, TchError>[src]

pub fn f_std1(
    &self,
    dim: &[i64],
    unbiased: bool,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_std_mean(&self, unbiased: bool) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_std_mean1(
    &self,
    dim: &[i64],
    unbiased: bool,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_std_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    unbiased: bool,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_stft<T>(
    &self,
    n_fft: i64,
    hop_length: impl Into<Option<i64>>,
    win_length: impl Into<Option<i64>>,
    window: Option<T>,
    normalized: bool,
    onesided: bool,
    return_complex: bool
) -> Result<Tensor, TchError> where
    T: Borrow<Tensor>, 
[src]

pub fn f_sub(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_sub1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_sub_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_sub_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_sub_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_subtract(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_subtract1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_subtract_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_subtract_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_subtract_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_sum(&self, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_sum1(
    &self,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_sum_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Result<Tensor, TchError>
[src]

pub fn f_sum_to_size(&self, size: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_svd(
    &self,
    some: bool,
    compute_uv: bool
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_svd_out(
    &self,
    u: &Tensor,
    s: &Tensor,
    v: &Tensor,
    some: bool,
    compute_uv: bool
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_swapaxes(&self, axis0: i64, axis1: i64) -> Result<Tensor, TchError>[src]

pub fn f_swapaxes_(
    &mut self,
    axis0: i64,
    axis1: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_swapdims(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError>[src]

pub fn f_swapdims_(&mut self, dim0: i64, dim1: i64) -> Result<Tensor, TchError>[src]

pub fn f_symeig(
    &self,
    eigenvectors: bool,
    upper: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_symeig_out(
    &self,
    e: &Tensor,
    v: &Tensor,
    eigenvectors: bool,
    upper: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_tr(&self) -> Result<Tensor, TchError>[src]

pub fn f_t_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_take(&self, index: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_take_backward(
    &self,
    grad: &Tensor,
    index: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_take_out(
    &self,
    out: &Tensor,
    index: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_tan(&self) -> Result<Tensor, TchError>[src]

pub fn f_tan_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_tan_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_tanh(&self) -> Result<Tensor, TchError>[src]

pub fn f_tanh_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_tanh_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_tensor_split(
    &self,
    sections: i64,
    dim: i64
) -> Result<Vec<Tensor, Global>, TchError>
[src]

pub fn f_tensor_split1(
    &self,
    indices: &[i64],
    dim: i64
) -> Result<Vec<Tensor, Global>, TchError>
[src]

pub fn f_tensor_split2(
    &self,
    tensor_indices_or_sections: &Tensor,
    dim: i64
) -> Result<Vec<Tensor, Global>, TchError>
[src]

pub fn f_tensordot(
    &self,
    other: &Tensor,
    dims_self: &[i64],
    dims_other: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_tensordot_out(
    &self,
    out: &Tensor,
    other: &Tensor,
    dims_self: &[i64],
    dims_other: &[i64]
) -> Result<Tensor, TchError>
[src]

pub fn f_threshold<S>(&self, threshold: S, value: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_threshold_<S>(
    &mut self,
    threshold: S,
    value: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_threshold_backward<S>(
    &self,
    grad_output: &Tensor,
    threshold: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_threshold_out<S>(
    &self,
    out: &Tensor,
    threshold: S,
    value: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_tile(&self, dims: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_to(&self, device: Device) -> Result<Tensor, TchError>[src]

pub fn f_to1(
    &self,
    options: (Kind, Device),
    non_blocking: bool,
    copy: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_to2(
    &self,
    dtype: Kind,
    non_blocking: bool,
    copy: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_to3(
    &self,
    other: &Tensor,
    non_blocking: bool,
    copy: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_to4(
    &self,
    device: Device,
    dtype: Kind,
    non_blocking: bool,
    copy: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_to_dense(&self, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_to_dense_backward(&self, grad: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_to_mkldnn(&self, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_to_mkldnn_backward(&self, grad: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_to_sparse(&self) -> Result<Tensor, TchError>[src]

pub fn f_to_sparse1(&self, sparse_dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_topk(
    &self,
    k: i64,
    dim: i64,
    largest: bool,
    sorted: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_topk_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    k: i64,
    dim: i64,
    largest: bool,
    sorted: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_totype(&self, scalar_type: Kind) -> Result<Tensor, TchError>[src]

pub fn f_trace(&self) -> Result<Tensor, TchError>[src]

pub fn f_transpose(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError>[src]

pub fn f_transpose_(&mut self, dim0: i64, dim1: i64) -> Result<Tensor, TchError>[src]

pub fn f_triangular_solve(
    &self,
    a: &Tensor,
    upper: bool,
    transpose: bool,
    unitriangular: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_triangular_solve_out(
    &self,
    x: &Tensor,
    m: &Tensor,
    a: &Tensor,
    upper: bool,
    transpose: bool,
    unitriangular: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_tril(&self, diagonal: i64) -> Result<Tensor, TchError>[src]

pub fn f_tril_(&mut self, diagonal: i64) -> Result<Tensor, TchError>[src]

pub fn f_tril_out(
    &self,
    out: &Tensor,
    diagonal: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_triu(&self, diagonal: i64) -> Result<Tensor, TchError>[src]

pub fn f_triu_(&mut self, diagonal: i64) -> Result<Tensor, TchError>[src]

pub fn f_triu_out(
    &self,
    out: &Tensor,
    diagonal: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_true_divide(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_true_divide1<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_true_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_true_divide_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_true_divide_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_trunc(&self) -> Result<Tensor, TchError>[src]

pub fn f_trunc_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_trunc_out(&self, out: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_type_as(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_unbind(&self, dim: i64) -> Result<Vec<Tensor, Global>, TchError>[src]

pub fn f_unflatten(&self, dim: i64, sizes: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_unfold(
    &self,
    dimension: i64,
    size: i64,
    step: i64
) -> Result<Tensor, TchError>
[src]

pub fn f_uniform_(&mut self, from: f64, to: f64) -> Result<Tensor, TchError>[src]

pub fn f_unique_consecutive(
    &self,
    return_inverse: bool,
    return_counts: bool,
    dim: impl Into<Option<i64>>
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_unique_dim(
    &self,
    dim: i64,
    sorted: bool,
    return_inverse: bool,
    return_counts: bool
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_unique_dim_consecutive(
    &self,
    dim: i64,
    return_inverse: bool,
    return_counts: bool
) -> Result<(Tensor, Tensor, Tensor), TchError>
[src]

pub fn f_unsafe_chunk(
    &self,
    chunks: i64,
    dim: i64
) -> Result<Vec<Tensor, Global>, TchError>
[src]

pub fn f_unsafe_split(
    &self,
    split_size: i64,
    dim: i64
) -> Result<Vec<Tensor, Global>, TchError>
[src]

pub fn f_unsafe_split_with_sizes(
    &self,
    split_sizes: &[i64],
    dim: i64
) -> Result<Vec<Tensor, Global>, TchError>
[src]

pub fn f_unsqueeze(&self, dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_unsqueeze_(&mut self, dim: i64) -> Result<Tensor, TchError>[src]

pub fn f_upsample_bicubic2d(
    &self,
    output_size: &[i64],
    align_corners: bool,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_bicubic2d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    align_corners: bool,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_bilinear2d(
    &self,
    output_size: &[i64],
    align_corners: bool,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_bilinear2d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    align_corners: bool,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_linear1d(
    &self,
    output_size: &[i64],
    align_corners: bool,
    scales: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_linear1d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    align_corners: bool,
    scales: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_nearest1d(
    &self,
    output_size: &[i64],
    scales: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_nearest1d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    scales: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_nearest2d(
    &self,
    output_size: &[i64],
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_nearest2d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_nearest3d(
    &self,
    output_size: &[i64],
    scales_d: impl Into<Option<f64>>,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_nearest3d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    scales_d: impl Into<Option<f64>>,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_trilinear3d(
    &self,
    output_size: &[i64],
    align_corners: bool,
    scales_d: impl Into<Option<f64>>,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_upsample_trilinear3d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    align_corners: bool,
    scales_d: impl Into<Option<f64>>,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Result<Tensor, TchError>
[src]

pub fn f_values(&self) -> Result<Tensor, TchError>[src]

pub fn f_var(&self, unbiased: bool) -> Result<Tensor, TchError>[src]

pub fn f_var1(
    &self,
    dim: &[i64],
    unbiased: bool,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_var_mean(&self, unbiased: bool) -> Result<(Tensor, Tensor), TchError>[src]

pub fn f_var_mean1(
    &self,
    dim: &[i64],
    unbiased: bool,
    keepdim: bool
) -> Result<(Tensor, Tensor), TchError>
[src]

pub fn f_var_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    unbiased: bool,
    keepdim: bool
) -> Result<Tensor, TchError>
[src]

pub fn f_vdot(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_vdot_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_view_(&self, size: &[i64]) -> Result<Tensor, TchError>[src]

pub fn f_view1(&self, dtype: Kind) -> Result<Tensor, TchError>[src]

pub fn f_view_as(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_view_as_complex(&self) -> Result<Tensor, TchError>[src]

pub fn f_view_as_real(&self) -> Result<Tensor, TchError>[src]

pub fn f_where1(
    &self,
    condition: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_where3<S>(
    &self,
    condition: &Tensor,
    other: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_xlogy(&self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_xlogy2<S>(&self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_xlogy_(&mut self, other: &Tensor) -> Result<Tensor, TchError>[src]

pub fn f_xlogy_1<S>(&mut self, other: S) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_xlogy_out(
    &self,
    out: &Tensor,
    other: &Tensor
) -> Result<Tensor, TchError>
[src]

pub fn f_xlogy_out2<S>(
    &self,
    out: &Tensor,
    other: S
) -> Result<Tensor, TchError> where
    S: Into<Scalar>, 
[src]

pub fn f_zero_(&mut self) -> Result<Tensor, TchError>[src]

pub fn f_zeros_like(&self) -> Result<Tensor, TchError>[src]

pub fn internal_and_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn internal_and_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_iand_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn internal_iand_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_ilshift_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn internal_ilshift_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_ior_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn internal_ior_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_irshift_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn internal_irshift_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_ixor_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn internal_ixor_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_lshift_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn internal_lshift_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_or_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn internal_or_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_rshift_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn internal_rshift_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_xor_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn internal_xor_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_adaptive_avg_pool2d(&self, output_size: &[i64]) -> Tensor[src]

pub fn internal_adaptive_avg_pool2d_backward(
    &self,
    grad_output: &Tensor
) -> Tensor
[src]

pub fn internal_add_batch_dim(&self, batch_dim: i64, level: i64) -> Tensor[src]

pub fn internal_add_relu(&self, other: &Tensor) -> Tensor[src]

pub fn internal_add_relu_(&mut self, other: &Tensor) -> Tensor[src]

pub fn internal_add_relu_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn internal_addmv_impl_(
    &mut self,
    self2: &Tensor,
    mat: &Tensor,
    vec: &Tensor
) -> Tensor
[src]

pub fn internal_aminmax(&self) -> (Tensor, Tensor)[src]

pub fn internal_aminmax1(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor)[src]

pub fn internal_baddbmm_mkl_(
    &mut self,
    batch1: &Tensor,
    batch2: &Tensor
) -> Tensor
[src]

pub fn internal_bmm(&self, mat2: &Tensor, deterministic: bool) -> Tensor[src]

pub fn internal_bmm_out(
    &self,
    out: &Tensor,
    mat2: &Tensor,
    deterministic: bool
) -> Tensor
[src]

pub fn internal_cast_byte(&self, non_blocking: bool) -> Tensor[src]

pub fn internal_cast_char(&self, non_blocking: bool) -> Tensor[src]

pub fn internal_cast_double(&self, non_blocking: bool) -> Tensor[src]

pub fn internal_cast_float(&self, non_blocking: bool) -> Tensor[src]

pub fn internal_cast_half(&self, non_blocking: bool) -> Tensor[src]

pub fn internal_cast_int(&self, non_blocking: bool) -> Tensor[src]

pub fn internal_cast_long(&self, non_blocking: bool) -> Tensor[src]

pub fn internal_cast_short(&self, non_blocking: bool) -> Tensor[src]

pub fn internal_cholesky_helper(&self, upper: bool) -> Tensor[src]

pub fn internal_cholesky_solve_helper(&self, a: &Tensor, upper: bool) -> Tensor[src]

pub fn internal_coalesced_(&mut self, coalesced: bool) -> Tensor[src]

pub fn internal_compute_linear_combination(
    &self,
    coefficients: &Tensor
) -> Tensor
[src]

pub fn internal_compute_linear_combination_out(
    &self,
    out: &Tensor,
    coefficients: &Tensor
) -> Tensor
[src]

pub fn internal_conj(&self) -> Tensor[src]

pub fn internal_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    transposed: bool,
    output_padding: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    cudnn_enabled: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn internal_convolution1<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    transposed: bool,
    output_padding: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    cudnn_enabled: bool,
    allow_tf32: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn internal_convolution_nogroup<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    transposed: bool,
    output_padding: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn internal_copy_from(&self, dst: &Tensor, non_blocking: bool) -> Tensor[src]

pub fn internal_cudnn_rnn<T>(
    &self,
    weight: &[T],
    weight_stride0: i64,
    weight_buf: Option<T>,
    hx: &Tensor,
    cx: Option<T>,
    mode: i64,
    hidden_size: i64,
    proj_size: i64,
    num_layers: i64,
    batch_first: bool,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_sizes: &[i64],
    dropout_state: Option<T>
) -> (Tensor, Tensor, Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn internal_cumprod(&self, dim: i64) -> Tensor[src]

pub fn internal_cumprod_out(&self, out: &Tensor, dim: i64) -> Tensor[src]

pub fn internal_cumsum(&self, dim: i64) -> Tensor[src]

pub fn internal_cumsum_out(&self, out: &Tensor, dim: i64) -> Tensor[src]

pub fn internal_fake_quantize_learnable_per_channel_affine(
    &self,
    scale: &Tensor,
    zero_point: &Tensor,
    axis: i64,
    quant_min: i64,
    quant_max: i64,
    grad_factor: f64
) -> Tensor
[src]

pub fn internal_fake_quantize_learnable_per_channel_affine_backward(
    &self,
    grad: &Tensor,
    scale: &Tensor,
    zero_point: &Tensor,
    axis: i64,
    quant_min: i64,
    quant_max: i64,
    grad_factor: f64
) -> (Tensor, Tensor, Tensor)
[src]

pub fn internal_fake_quantize_learnable_per_tensor_affine(
    &self,
    scale: &Tensor,
    zero_point: &Tensor,
    quant_min: i64,
    quant_max: i64,
    grad_factor: f64
) -> Tensor
[src]

pub fn internal_fake_quantize_learnable_per_tensor_affine_backward(
    &self,
    grad: &Tensor,
    scale: &Tensor,
    zero_point: &Tensor,
    quant_min: i64,
    quant_max: i64,
    grad_factor: f64
) -> (Tensor, Tensor, Tensor)
[src]

pub fn internal_fft_c2c(
    &self,
    dim: &[i64],
    normalization: i64,
    forward: bool
) -> Tensor
[src]

pub fn internal_fft_c2c_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    normalization: i64,
    forward: bool
) -> Tensor
[src]

pub fn internal_fft_c2r(
    &self,
    dim: &[i64],
    normalization: i64,
    last_dim_size: i64
) -> Tensor
[src]

pub fn internal_fft_c2r_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    normalization: i64,
    last_dim_size: i64
) -> Tensor
[src]

pub fn internal_fft_r2c(
    &self,
    dim: &[i64],
    normalization: i64,
    onesided: bool
) -> Tensor
[src]

pub fn internal_fft_r2c_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    normalization: i64,
    onesided: bool
) -> Tensor
[src]

pub fn internal_fused_dropout(&self, p: f64) -> (Tensor, Tensor)[src]

pub fn internal_fw_primal(&self, level: i64) -> Tensor[src]

pub fn internal_gather_sparse_backward(
    &self,
    dim: i64,
    index: &Tensor,
    grad: &Tensor
) -> Tensor
[src]

pub fn internal_grid_sampler_2d_cpu_fallback(
    &self,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Tensor
[src]

pub fn internal_grid_sampler_2d_cpu_fallback_backward(
    &self,
    grad_output: &Tensor,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> (Tensor, Tensor)
[src]

pub fn internal_index_copy_(
    &mut self,
    dim: i64,
    index: &Tensor,
    source: &Tensor
) -> Tensor
[src]

pub fn internal_index_put_impl_<T>(
    &mut self,
    indices: &[Option<T>],
    values: &Tensor,
    accumulate: bool,
    unsafe_: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn internal_indices(&self) -> Tensor[src]

pub fn internal_inverse_helper(&self) -> Tensor[src]

pub fn internal_linalg_inv_out_helper_(
    &mut self,
    infos_lu: &Tensor,
    infos_getri: &Tensor
) -> Tensor
[src]

pub fn internal_linalg_qr_helper(&self, mode: &str) -> (Tensor, Tensor)[src]

pub fn internal_linalg_solve_out_helper_(
    &mut self,
    other: &Tensor,
    infos: &Tensor
) -> Tensor
[src]

pub fn internal_log_softmax(&self, dim: i64, half_to_float: bool) -> Tensor[src]

pub fn internal_log_softmax_backward_data(
    &self,
    grad_output: &Tensor,
    output: &Tensor,
    dim: i64
) -> Tensor
[src]

pub fn internal_logcumsumexp(&self, dim: i64) -> Tensor[src]

pub fn internal_logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Tensor[src]

pub fn internal_lu_solve_helper(
    &self,
    lu_data: &Tensor,
    lu_pivots: &Tensor
) -> Tensor
[src]

pub fn internal_lu_with_info(
    &self,
    pivot: bool,
    check_errors: bool
) -> (Tensor, Tensor, Tensor)
[src]

pub fn internal_make_per_channel_quantized_tensor(
    &self,
    scale: &Tensor,
    zero_point: &Tensor,
    axis: i64
) -> Tensor
[src]

pub fn internal_make_per_tensor_quantized_tensor(
    &self,
    scale: f64,
    zero_point: i64
) -> Tensor
[src]

pub fn internal_masked_scale(&self, mask: &Tensor, scale: f64) -> Tensor[src]

pub fn internal_mkldnn_reshape(&self, shape: &[i64]) -> Tensor[src]

pub fn internal_mkldnn_transpose(&self, dim0: i64, dim1: i64) -> Tensor[src]

pub fn internal_mkldnn_transpose_(&mut self, dim0: i64, dim1: i64) -> Tensor[src]

pub fn internal_mode(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor)[src]

pub fn internal_mode_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64,
    keepdim: bool
) -> (Tensor, Tensor)
[src]

pub fn internal_nnpack_spatial_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    stride: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn internal_nnpack_spatial_convolution_backward_input(
    &self,
    grad_output: &Tensor,
    weight: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn internal_nnpack_spatial_convolution_backward_weight(
    &self,
    weightsize: &[i64],
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn internal_pack_padded_sequence(
    &self,
    lengths: &Tensor,
    batch_first: bool
) -> (Tensor, Tensor)
[src]

pub fn internal_pdist_backward(
    &self,
    grad: &Tensor,
    p: f64,
    pdist: &Tensor
) -> Tensor
[src]

pub fn internal_remove_batch_dim(
    &self,
    level: i64,
    batch_size: i64,
    out_dim: i64
) -> Tensor
[src]

pub fn internal_reshape_from_tensor(&self, shape: &Tensor) -> Tensor[src]

pub fn internal_s_where(&self, condition: &Tensor, other: &Tensor) -> Tensor[src]

pub fn internal_sample_dirichlet(&self) -> Tensor[src]

pub fn internal_shape_as_tensor(&self) -> Tensor[src]

pub fn internal_sobol_engine_ff_(
    &mut self,
    n: i64,
    sobolstate: &Tensor,
    dimension: i64,
    num_generated: i64
) -> Tensor
[src]

pub fn internal_sobol_engine_initialize_state_(
    &mut self,
    dimension: i64
) -> Tensor
[src]

pub fn internal_sobol_engine_scramble_(
    &mut self,
    ltm: &Tensor,
    dimension: i64
) -> Tensor
[src]

pub fn internal_softmax(&self, dim: i64, half_to_float: bool) -> Tensor[src]

pub fn internal_softmax_backward_data(
    &self,
    grad_output: &Tensor,
    output: &Tensor,
    dim: i64
) -> Tensor
[src]

pub fn internal_solve_helper(&self, a: &Tensor) -> (Tensor, Tensor)[src]

pub fn internal_sparse_addmm(&self, sparse: &Tensor, dense: &Tensor) -> Tensor[src]

pub fn internal_sparse_log_softmax(&self, dim: i64, dtype: Kind) -> Tensor[src]

pub fn internal_sparse_log_softmax1(
    &self,
    dim: i64,
    half_to_float: bool
) -> Tensor
[src]

pub fn internal_sparse_log_softmax_backward_data(
    &self,
    grad_output: &Tensor,
    output: &Tensor,
    dim: i64
) -> Tensor
[src]

pub fn internal_sparse_softmax(&self, dim: i64, dtype: Kind) -> Tensor[src]

pub fn internal_sparse_softmax1(&self, dim: i64, half_to_float: bool) -> Tensor[src]

pub fn internal_sparse_softmax_backward_data(
    &self,
    grad_output: &Tensor,
    output: &Tensor,
    dim: i64
) -> Tensor
[src]

pub fn internal_sparse_sparse_matmul(&self, other: &Tensor) -> Tensor[src]

pub fn internal_sparse_sum(&self) -> Tensor[src]

pub fn internal_sparse_sum1(&self, dtype: Kind) -> Tensor[src]

pub fn internal_sparse_sum2(&self, dim: &[i64]) -> Tensor[src]

pub fn internal_sparse_sum3(&self, dim: &[i64], dtype: Kind) -> Tensor[src]

pub fn internal_sparse_sum_backward(&self, grad: &Tensor, dim: &[i64]) -> Tensor[src]

pub fn internal_standard_gamma(&self) -> Tensor[src]

pub fn internal_standard_gamma_grad(&self, output: &Tensor) -> Tensor[src]

pub fn internal_std(&self, unbiased: bool) -> Tensor[src]

pub fn internal_svd_helper(
    &self,
    some: bool,
    compute_uv: bool
) -> (Tensor, Tensor, Tensor)
[src]

pub fn internal_syevd_helper(
    &self,
    compute_eigenvectors: bool,
    uplo: &str
) -> (Tensor, Tensor)
[src]

pub fn internal_symeig_helper(
    &self,
    eigenvectors: bool,
    upper: bool
) -> (Tensor, Tensor)
[src]

pub fn internal_test_serialization_subcmul(&self, other: &Tensor) -> Tensor[src]

pub fn internal_triangular_solve_helper(
    &self,
    a: &Tensor,
    upper: bool,
    transpose: bool,
    unitriangular: bool
) -> (Tensor, Tensor)
[src]

pub fn internal_unique(
    &self,
    sorted: bool,
    return_inverse: bool
) -> (Tensor, Tensor)
[src]

pub fn internal_unique2(
    &self,
    sorted: bool,
    return_inverse: bool,
    return_counts: bool
) -> (Tensor, Tensor, Tensor)
[src]

pub fn internal_unsafe_view(&self, size: &[i64]) -> Tensor[src]

pub fn internal_values(&self) -> Tensor[src]

pub fn internal_var(&self, unbiased: bool) -> Tensor[src]

pub fn abs(&self) -> Tensor[src]

pub fn abs_(&mut self) -> Tensor[src]

pub fn abs_out(&self, out: &Tensor) -> Tensor[src]

pub fn absolute(&self) -> Tensor[src]

pub fn absolute_(&mut self) -> Tensor[src]

pub fn absolute_out(&self, out: &Tensor) -> Tensor[src]

pub fn acos(&self) -> Tensor[src]

pub fn acos_(&mut self) -> Tensor[src]

pub fn acos_out(&self, out: &Tensor) -> Tensor[src]

pub fn acosh(&self) -> Tensor[src]

pub fn acosh_(&mut self) -> Tensor[src]

pub fn acosh_out(&self, out: &Tensor) -> Tensor[src]

pub fn adaptive_avg_pool1d(&self, output_size: &[i64]) -> Tensor[src]

pub fn adaptive_avg_pool2d(&self, output_size: &[i64]) -> Tensor[src]

pub fn adaptive_avg_pool2d_out(
    &self,
    out: &Tensor,
    output_size: &[i64]
) -> Tensor
[src]

pub fn adaptive_avg_pool3d(&self, output_size: &[i64]) -> Tensor[src]

pub fn adaptive_avg_pool3d_backward(&self, grad_output: &Tensor) -> Tensor[src]

pub fn adaptive_avg_pool3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor
) -> Tensor
[src]

pub fn adaptive_avg_pool3d_out(
    &self,
    out: &Tensor,
    output_size: &[i64]
) -> Tensor
[src]

pub fn adaptive_max_pool1d(&self, output_size: &[i64]) -> (Tensor, Tensor)[src]

pub fn adaptive_max_pool2d(&self, output_size: &[i64]) -> (Tensor, Tensor)[src]

pub fn adaptive_max_pool2d_backward(
    &self,
    grad_output: &Tensor,
    indices: &Tensor
) -> Tensor
[src]

pub fn adaptive_max_pool2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    indices: &Tensor
) -> Tensor
[src]

pub fn adaptive_max_pool2d_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    output_size: &[i64]
) -> (Tensor, Tensor)
[src]

pub fn adaptive_max_pool3d(&self, output_size: &[i64]) -> (Tensor, Tensor)[src]

pub fn adaptive_max_pool3d_backward(
    &self,
    grad_output: &Tensor,
    indices: &Tensor
) -> Tensor
[src]

pub fn adaptive_max_pool3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    indices: &Tensor
) -> Tensor
[src]

pub fn adaptive_max_pool3d_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    output_size: &[i64]
) -> (Tensor, Tensor)
[src]

pub fn g_add(&self, other: &Tensor) -> Tensor[src]

pub fn g_add1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn g_add_(&mut self, other: &Tensor) -> Tensor[src]

pub fn g_add_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn add_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn addbmm(&self, batch1: &Tensor, batch2: &Tensor) -> Tensor[src]

pub fn addbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Tensor[src]

pub fn addbmm_out(
    &self,
    out: &Tensor,
    batch1: &Tensor,
    batch2: &Tensor
) -> Tensor
[src]

pub fn addcdiv(&self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor[src]

pub fn addcdiv_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor[src]

pub fn addcdiv_out(
    &self,
    out: &Tensor,
    tensor1: &Tensor,
    tensor2: &Tensor
) -> Tensor
[src]

pub fn addcmul(&self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor[src]

pub fn addcmul_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor[src]

pub fn addcmul_out(
    &self,
    out: &Tensor,
    tensor1: &Tensor,
    tensor2: &Tensor
) -> Tensor
[src]

pub fn addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Tensor[src]

pub fn addmm_(&mut self, mat1: &Tensor, mat2: &Tensor) -> Tensor[src]

pub fn addmm_out(&self, out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor[src]

pub fn addmv(&self, mat: &Tensor, vec: &Tensor) -> Tensor[src]

pub fn addmv_(&mut self, mat: &Tensor, vec: &Tensor) -> Tensor[src]

pub fn addmv_out(&self, out: &Tensor, mat: &Tensor, vec: &Tensor) -> Tensor[src]

pub fn addr(&self, vec1: &Tensor, vec2: &Tensor) -> Tensor[src]

pub fn addr_(&mut self, vec1: &Tensor, vec2: &Tensor) -> Tensor[src]

pub fn addr_out(&self, out: &Tensor, vec1: &Tensor, vec2: &Tensor) -> Tensor[src]

pub fn alias(&self) -> Tensor[src]

pub fn align_as(&self, other: &Tensor) -> Tensor[src]

pub fn all(&self) -> Tensor[src]

pub fn all1(&self, dim: i64, keepdim: bool) -> Tensor[src]

pub fn all_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Tensor[src]

pub fn alpha_dropout(&self, p: f64, train: bool) -> Tensor[src]

pub fn alpha_dropout_(&mut self, p: f64, train: bool) -> Tensor[src]

pub fn amax(&self, dim: &[i64], keepdim: bool) -> Tensor[src]

pub fn amax_out(&self, out: &Tensor, dim: &[i64], keepdim: bool) -> Tensor[src]

pub fn amin(&self, dim: &[i64], keepdim: bool) -> Tensor[src]

pub fn amin_out(&self, out: &Tensor, dim: &[i64], keepdim: bool) -> Tensor[src]

pub fn angle(&self) -> Tensor[src]

pub fn angle_out(&self, out: &Tensor) -> Tensor[src]

pub fn any(&self) -> Tensor[src]

pub fn any1(&self, dim: i64, keepdim: bool) -> Tensor[src]

pub fn any_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Tensor[src]

pub fn arccos(&self) -> Tensor[src]

pub fn arccos_(&mut self) -> Tensor[src]

pub fn arccos_out(&self, out: &Tensor) -> Tensor[src]

pub fn arccosh(&self) -> Tensor[src]

pub fn arccosh_(&mut self) -> Tensor[src]

pub fn arccosh_out(&self, out: &Tensor) -> Tensor[src]

pub fn arcsin(&self) -> Tensor[src]

pub fn arcsin_(&mut self) -> Tensor[src]

pub fn arcsin_out(&self, out: &Tensor) -> Tensor[src]

pub fn arcsinh(&self) -> Tensor[src]

pub fn arcsinh_(&mut self) -> Tensor[src]

pub fn arcsinh_out(&self, out: &Tensor) -> Tensor[src]

pub fn arctan(&self) -> Tensor[src]

pub fn arctan_(&mut self) -> Tensor[src]

pub fn arctan_out(&self, out: &Tensor) -> Tensor[src]

pub fn arctanh(&self) -> Tensor[src]

pub fn arctanh_(&mut self) -> Tensor[src]

pub fn arctanh_out(&self, out: &Tensor) -> Tensor[src]

pub fn argmax(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Tensor[src]

pub fn argmax_out(
    &self,
    out: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Tensor
[src]

pub fn argmin(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Tensor[src]

pub fn argmin_out(
    &self,
    out: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Tensor
[src]

pub fn argsort(&self, dim: i64, descending: bool) -> Tensor[src]

pub fn as_strided(
    &self,
    size: &[i64],
    stride: &[i64],
    storage_offset: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn as_strided_(
    &mut self,
    size: &[i64],
    stride: &[i64],
    storage_offset: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn asin(&self) -> Tensor[src]

pub fn asin_(&mut self) -> Tensor[src]

pub fn asin_out(&self, out: &Tensor) -> Tensor[src]

pub fn asinh(&self) -> Tensor[src]

pub fn asinh_(&mut self) -> Tensor[src]

pub fn asinh_out(&self, out: &Tensor) -> Tensor[src]

pub fn atan(&self) -> Tensor[src]

pub fn atan2(&self, other: &Tensor) -> Tensor[src]

pub fn atan2_(&mut self, other: &Tensor) -> Tensor[src]

pub fn atan2_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn atan_(&mut self) -> Tensor[src]

pub fn atan_out(&self, out: &Tensor) -> Tensor[src]

pub fn atanh(&self) -> Tensor[src]

pub fn atanh_(&mut self) -> Tensor[src]

pub fn atanh_out(&self, out: &Tensor) -> Tensor[src]

pub fn atleast_1d(&self) -> Tensor[src]

pub fn atleast_2d(&self) -> Tensor[src]

pub fn atleast_3d(&self) -> Tensor[src]

pub fn avg_pool1d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool
) -> Tensor
[src]

pub fn avg_pool2d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn avg_pool2d_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn avg_pool2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn avg_pool2d_out(
    &self,
    out: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn avg_pool3d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn avg_pool3d_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn avg_pool3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn avg_pool3d_out(
    &self,
    out: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    ceil_mode: bool,
    count_include_pad: bool,
    divisor_override: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn baddbmm(&self, batch1: &Tensor, batch2: &Tensor) -> Tensor[src]

pub fn baddbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Tensor[src]

pub fn baddbmm_out(
    &self,
    out: &Tensor,
    batch1: &Tensor,
    batch2: &Tensor
) -> Tensor
[src]

pub fn batch_norm<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    training: bool,
    momentum: f64,
    eps: f64,
    cudnn_enabled: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn batch_norm_backward_elemt<T>(
    &self,
    grad_out: &Tensor,
    mean: &Tensor,
    invstd: &Tensor,
    weight: Option<T>,
    mean_dy: &Tensor,
    mean_dy_xmu: &Tensor
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn batch_norm_backward_reduce<T>(
    &self,
    grad_out: &Tensor,
    mean: &Tensor,
    invstd: &Tensor,
    weight: Option<T>,
    input_g: bool,
    weight_g: bool,
    bias_g: bool
) -> (Tensor, Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn batch_norm_elemt<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    mean: &Tensor,
    invstd: &Tensor,
    eps: f64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn batch_norm_elemt_out<T>(
    &self,
    out: &Tensor,
    weight: Option<T>,
    bias: Option<T>,
    mean: &Tensor,
    invstd: &Tensor,
    eps: f64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn batch_norm_gather_stats<T>(
    &self,
    mean: &Tensor,
    invstd: &Tensor,
    running_mean: Option<T>,
    running_var: Option<T>,
    momentum: f64,
    eps: f64,
    count: i64
) -> (Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn batch_norm_gather_stats_with_counts<T>(
    &self,
    mean: &Tensor,
    invstd: &Tensor,
    running_mean: Option<T>,
    running_var: Option<T>,
    momentum: f64,
    eps: f64,
    counts: &Tensor
) -> (Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn batch_norm_stats(&self, eps: f64) -> (Tensor, Tensor)[src]

pub fn batch_norm_update_stats<T>(
    &self,
    running_mean: Option<T>,
    running_var: Option<T>,
    momentum: f64
) -> (Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn bernoulli(&self) -> Tensor[src]

pub fn bernoulli1(&self, p: f64) -> Tensor[src]

pub fn bernoulli_(&mut self, p: &Tensor) -> Tensor[src]

pub fn bernoulli_1(&mut self, p: f64) -> Tensor[src]

pub fn bernoulli_out(&self, out: &Tensor) -> Tensor[src]

pub fn binary_cross_entropy<T>(
    &self,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn binary_cross_entropy_backward<T>(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn binary_cross_entropy_backward_out<T>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn binary_cross_entropy_out<T>(
    &self,
    out: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn binary_cross_entropy_with_logits<T>(
    &self,
    target: &Tensor,
    weight: Option<T>,
    pos_weight: Option<T>,
    reduction: Reduction
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn binary_cross_entropy_with_logits_backward<T>(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    pos_weight: Option<T>,
    reduction: Reduction
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn bincount<T>(&self, weights: Option<T>, minlength: i64) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn bitwise_and<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn bitwise_and1(&self, other: &Tensor) -> Tensor[src]

pub fn bitwise_and_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn bitwise_and_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn bitwise_and_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn bitwise_and_out1<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn bitwise_not(&self) -> Tensor[src]

pub fn bitwise_not_(&mut self) -> Tensor[src]

pub fn bitwise_not_out(&self, out: &Tensor) -> Tensor[src]

pub fn bitwise_or<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn bitwise_or1(&self, other: &Tensor) -> Tensor[src]

pub fn bitwise_or_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn bitwise_or_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn bitwise_or_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn bitwise_or_out1<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn bitwise_xor<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn bitwise_xor1(&self, other: &Tensor) -> Tensor[src]

pub fn bitwise_xor_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn bitwise_xor_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn bitwise_xor_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn bitwise_xor_out1<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn bmm(&self, mat2: &Tensor) -> Tensor[src]

pub fn bmm_out(&self, out: &Tensor, mat2: &Tensor) -> Tensor[src]

pub fn broadcast_to(&self, size: &[i64]) -> Tensor[src]

pub fn bucketize(
    &self,
    boundaries: &Tensor,
    out_int32: bool,
    right: bool
) -> Tensor
[src]

pub fn bucketize_out(
    &self,
    out: &Tensor,
    boundaries: &Tensor,
    out_int32: bool,
    right: bool
) -> Tensor
[src]

pub fn cauchy_(&mut self, median: f64, sigma: f64) -> Tensor[src]

pub fn ceil(&self) -> Tensor[src]

pub fn ceil_(&mut self) -> Tensor[src]

pub fn ceil_out(&self, out: &Tensor) -> Tensor[src]

pub fn celu(&self) -> Tensor[src]

pub fn celu_(&mut self) -> Tensor[src]

pub fn channel_shuffle(&self, groups: i64) -> Tensor[src]

pub fn cholesky(&self, upper: bool) -> Tensor[src]

pub fn cholesky_inverse(&self, upper: bool) -> Tensor[src]

pub fn cholesky_inverse_out(&self, out: &Tensor, upper: bool) -> Tensor[src]

pub fn cholesky_out(&self, out: &Tensor, upper: bool) -> Tensor[src]

pub fn cholesky_solve(&self, input2: &Tensor, upper: bool) -> Tensor[src]

pub fn cholesky_solve_out(
    &self,
    out: &Tensor,
    input2: &Tensor,
    upper: bool
) -> Tensor
[src]

pub fn choose_qparams_optimized(
    &self,
    numel: i64,
    n_bins: i64,
    ratio: f64,
    bit_width: i64
) -> (Tensor, Tensor)
[src]

pub fn chunk(&self, chunks: i64, dim: i64) -> Vec<Tensor, Global>[src]

pub fn clamp<S>(&self, min: S, max: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clamp_<S>(&mut self, min: S, max: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clamp_max<S>(&self, max: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clamp_max_<S>(&mut self, max: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clamp_max_out<S>(&self, out: &Tensor, max: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clamp_min<S>(&self, min: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clamp_min_<S>(&mut self, min: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clamp_min_out<S>(&self, out: &Tensor, min: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clamp_out<S>(&self, out: &Tensor, min: S, max: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clip<S>(&self, min: S, max: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clip_<S>(&mut self, min: S, max: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn clip_out<S>(&self, out: &Tensor, min: S, max: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn coalesce(&self) -> Tensor[src]

pub fn col2im(
    &self,
    output_size: &[i64],
    kernel_size: &[i64],
    dilation: &[i64],
    padding: &[i64],
    stride: &[i64]
) -> Tensor
[src]

pub fn col2im_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    kernel_size: &[i64],
    dilation: &[i64],
    padding: &[i64],
    stride: &[i64]
) -> Tensor
[src]

pub fn combinations(&self, r: i64, with_replacement: bool) -> Tensor[src]

pub fn conj(&self) -> Tensor[src]

pub fn conj_out(&self, out: &Tensor) -> Tensor[src]

pub fn constant_pad_nd(&self, pad: &[i64]) -> Tensor[src]

pub fn contiguous(&self) -> Tensor[src]

pub fn conv1d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    groups: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn conv2d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    groups: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn conv3d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    groups: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn conv_tbc(&self, weight: &Tensor, bias: &Tensor, pad: i64) -> Tensor[src]

pub fn conv_tbc_backward(
    &self,
    input: &Tensor,
    weight: &Tensor,
    bias: &Tensor,
    pad: i64
) -> (Tensor, Tensor, Tensor)
[src]

pub fn conv_transpose1d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    groups: i64,
    dilation: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn conv_transpose2d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    groups: i64,
    dilation: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn conv_transpose3d<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    groups: i64,
    dilation: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    transposed: bool,
    output_padding: &[i64],
    groups: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn convolution_overrideable<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    transposed: bool,
    output_padding: &[i64],
    groups: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn copy_sparse_to_sparse_(
    &mut self,
    src: &Tensor,
    non_blocking: bool
) -> Tensor
[src]

pub fn copysign(&self, other: &Tensor) -> Tensor[src]

pub fn copysign1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn copysign_(&mut self, other: &Tensor) -> Tensor[src]

pub fn copysign_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn copysign_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn cos(&self) -> Tensor[src]

pub fn cos_(&mut self) -> Tensor[src]

pub fn cos_out(&self, out: &Tensor) -> Tensor[src]

pub fn cosh(&self) -> Tensor[src]

pub fn cosh_(&mut self) -> Tensor[src]

pub fn cosh_out(&self, out: &Tensor) -> Tensor[src]

pub fn count_nonzero(&self, dim: &[i64]) -> Tensor[src]

pub fn count_nonzero1(&self, dim: impl Into<Option<i64>>) -> Tensor[src]

pub fn cross(&self, other: &Tensor, dim: impl Into<Option<i64>>) -> Tensor[src]

pub fn cross_out(
    &self,
    out: &Tensor,
    other: &Tensor,
    dim: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn cudnn_batch_norm<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    training: bool,
    exponential_average_factor: f64,
    epsilon: f64
) -> (Tensor, Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn cudnn_batch_norm_backward<T>(
    &self,
    grad_output: &Tensor,
    weight: &Tensor,
    running_mean: Option<T>,
    running_var: Option<T>,
    save_mean: Option<T>,
    save_var: Option<T>,
    epsilon: f64,
    reservespace: &Tensor
) -> (Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn cudnn_convolution(
    &self,
    weight: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Tensor
[src]

pub fn cudnn_convolution1<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn cudnn_convolution2(
    &self,
    weight: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    allow_tf32: bool
) -> Tensor
[src]

pub fn cudnn_convolution_backward_weight(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    allow_tf32: bool
) -> Tensor
[src]

pub fn cudnn_convolution_transpose(
    &self,
    weight: &Tensor,
    padding: &[i64],
    output_padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Tensor
[src]

pub fn cudnn_convolution_transpose1<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    output_padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn cudnn_convolution_transpose2(
    &self,
    weight: &Tensor,
    padding: &[i64],
    output_padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    allow_tf32: bool
) -> Tensor
[src]

pub fn cudnn_convolution_transpose_backward_weight(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool,
    allow_tf32: bool
) -> Tensor
[src]

pub fn cudnn_grid_sampler(&self, grid: &Tensor) -> Tensor[src]

pub fn cudnn_grid_sampler_backward(
    &self,
    grid: &Tensor,
    grad_output: &Tensor
) -> (Tensor, Tensor)
[src]

pub fn cummax(&self, dim: i64) -> (Tensor, Tensor)[src]

pub fn cummax_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64
) -> (Tensor, Tensor)
[src]

pub fn cummaxmin_backward(
    &self,
    grad: &Tensor,
    indices: &Tensor,
    dim: i64
) -> Tensor
[src]

pub fn cummin(&self, dim: i64) -> (Tensor, Tensor)[src]

pub fn cummin_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64
) -> (Tensor, Tensor)
[src]

pub fn cumprod(&self, dim: i64, dtype: Kind) -> Tensor[src]

pub fn cumprod_(&mut self, dim: i64, dtype: Kind) -> Tensor[src]

pub fn cumprod_backward(&self, grad: &Tensor, dim: i64) -> Tensor[src]

pub fn cumprod_out(&self, out: &Tensor, dim: i64, dtype: Kind) -> Tensor[src]

pub fn cumsum(&self, dim: i64, dtype: Kind) -> Tensor[src]

pub fn cumsum_(&mut self, dim: i64, dtype: Kind) -> Tensor[src]

pub fn cumsum_out(&self, out: &Tensor, dim: i64, dtype: Kind) -> Tensor[src]

pub fn data(&self) -> Tensor[src]

pub fn deg2rad(&self) -> Tensor[src]

pub fn deg2rad_(&mut self) -> Tensor[src]

pub fn deg2rad_out(&self, out: &Tensor) -> Tensor[src]

pub fn dequantize(&self) -> Tensor[src]

pub fn det(&self) -> Tensor[src]

pub fn detach(&self) -> Tensor[src]

pub fn detach_(&mut self) -> Tensor[src]

pub fn diag(&self, diagonal: i64) -> Tensor[src]

pub fn diag_embed(&self, offset: i64, dim1: i64, dim2: i64) -> Tensor[src]

pub fn diag_out(&self, out: &Tensor, diagonal: i64) -> Tensor[src]

pub fn diagflat(&self, offset: i64) -> Tensor[src]

pub fn diagonal(&self, offset: i64, dim1: i64, dim2: i64) -> Tensor[src]

pub fn diff<T>(
    &self,
    n: i64,
    dim: i64,
    prepend: Option<T>,
    append: Option<T>
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn diff_out<T>(
    &self,
    out: &Tensor,
    n: i64,
    dim: i64,
    prepend: Option<T>,
    append: Option<T>
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn digamma(&self) -> Tensor[src]

pub fn digamma_(&mut self) -> Tensor[src]

pub fn digamma_out(&self, out: &Tensor) -> Tensor[src]

pub fn dist(&self, other: &Tensor) -> Tensor[src]

pub fn g_div(&self, other: &Tensor) -> Tensor[src]

pub fn g_div1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn g_div2(&self, other: &Tensor, rounding_mode: &str) -> Tensor[src]

pub fn g_div3<S>(&self, other: S, rounding_mode: &str) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn g_div_(&mut self, other: &Tensor) -> Tensor[src]

pub fn g_div_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn g_div_2(&mut self, other: &Tensor, rounding_mode: &str) -> Tensor[src]

pub fn g_div_3<S>(&mut self, other: S, rounding_mode: &str) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn div_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn div_out1(
    &self,
    out: &Tensor,
    other: &Tensor,
    rounding_mode: &str
) -> Tensor
[src]

pub fn divide(&self, other: &Tensor) -> Tensor[src]

pub fn divide1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn divide2(&self, other: &Tensor, rounding_mode: &str) -> Tensor[src]

pub fn divide3<S>(&self, other: S, rounding_mode: &str) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn divide_(&mut self, other: &Tensor) -> Tensor[src]

pub fn divide_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn divide_2(&mut self, other: &Tensor, rounding_mode: &str) -> Tensor[src]

pub fn divide_3<S>(&mut self, other: S, rounding_mode: &str) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn divide_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn divide_out1(
    &self,
    out: &Tensor,
    other: &Tensor,
    rounding_mode: &str
) -> Tensor
[src]

pub fn dot(&self, tensor: &Tensor) -> Tensor[src]

pub fn dot_out(&self, out: &Tensor, tensor: &Tensor) -> Tensor[src]

pub fn dropout(&self, p: f64, train: bool) -> Tensor[src]

pub fn dropout_(&mut self, p: f64, train: bool) -> Tensor[src]

pub fn eig(&self, eigenvectors: bool) -> (Tensor, Tensor)[src]

pub fn eig_out(
    &self,
    e: &Tensor,
    v: &Tensor,
    eigenvectors: bool
) -> (Tensor, Tensor)
[src]

pub fn elu(&self) -> Tensor[src]

pub fn elu_(&mut self) -> Tensor[src]

pub fn elu_out(&self, out: &Tensor) -> Tensor[src]

pub fn embedding_renorm_(
    &mut self,
    indices: &Tensor,
    max_norm: f64,
    norm_type: f64
) -> Tensor
[src]

pub fn empty_like(&self) -> Tensor[src]

pub fn eq<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn eq1(&self, other: &Tensor) -> Tensor[src]

pub fn eq_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn eq_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn eq_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn eq_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn erf(&self) -> Tensor[src]

pub fn erf_(&mut self) -> Tensor[src]

pub fn erf_out(&self, out: &Tensor) -> Tensor[src]

pub fn erfc(&self) -> Tensor[src]

pub fn erfc_(&mut self) -> Tensor[src]

pub fn erfc_out(&self, out: &Tensor) -> Tensor[src]

pub fn erfinv(&self) -> Tensor[src]

pub fn erfinv_(&mut self) -> Tensor[src]

pub fn erfinv_out(&self, out: &Tensor) -> Tensor[src]

pub fn exp(&self) -> Tensor[src]

pub fn exp2(&self) -> Tensor[src]

pub fn exp2_(&mut self) -> Tensor[src]

pub fn exp2_out(&self, out: &Tensor) -> Tensor[src]

pub fn exp_(&mut self) -> Tensor[src]

pub fn exp_out(&self, out: &Tensor) -> Tensor[src]

pub fn expand(&self, size: &[i64], implicit: bool) -> Tensor[src]

pub fn expand_as(&self, other: &Tensor) -> Tensor[src]

pub fn expm1(&self) -> Tensor[src]

pub fn expm1_(&mut self) -> Tensor[src]

pub fn expm1_out(&self, out: &Tensor) -> Tensor[src]

pub fn exponential_(&mut self, lambd: f64) -> Tensor[src]

pub fn fake_quantize_per_channel_affine(
    &self,
    scale: &Tensor,
    zero_point: &Tensor,
    axis: i64,
    quant_min: i64,
    quant_max: i64
) -> Tensor
[src]

pub fn fake_quantize_per_channel_affine_cachemask(
    &self,
    scale: &Tensor,
    zero_point: &Tensor,
    axis: i64,
    quant_min: i64,
    quant_max: i64
) -> (Tensor, Tensor)
[src]

pub fn fake_quantize_per_tensor_affine(
    &self,
    scale: f64,
    zero_point: i64,
    quant_min: i64,
    quant_max: i64
) -> Tensor
[src]

pub fn fake_quantize_per_tensor_affine_cachemask(
    &self,
    scale: f64,
    zero_point: i64,
    quant_min: i64,
    quant_max: i64
) -> (Tensor, Tensor)
[src]

pub fn fbgemm_linear_fp16_weight(
    &self,
    packed_weight: &Tensor,
    bias: &Tensor
) -> Tensor
[src]

pub fn fbgemm_linear_fp16_weight_fp32_activation(
    &self,
    packed_weight: &Tensor,
    bias: &Tensor
) -> Tensor
[src]

pub fn fbgemm_linear_int8_weight<S>(
    &self,
    weight: &Tensor,
    packed: &Tensor,
    col_offsets: &Tensor,
    weight_scale: S,
    weight_zero_point: S,
    bias: &Tensor
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn fbgemm_linear_int8_weight_fp32_activation<S>(
    &self,
    weight: &Tensor,
    packed: &Tensor,
    col_offsets: &Tensor,
    weight_scale: S,
    weight_zero_point: S,
    bias: &Tensor
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn fbgemm_pack_gemm_matrix_fp16(&self) -> Tensor[src]

pub fn fbgemm_pack_quantized_matrix(&self) -> Tensor[src]

pub fn fbgemm_pack_quantized_matrix1(&self, k: i64, n: i64) -> Tensor[src]

pub fn feature_alpha_dropout(&self, p: f64, train: bool) -> Tensor[src]

pub fn feature_alpha_dropout_(&mut self, p: f64, train: bool) -> Tensor[src]

pub fn feature_dropout(&self, p: f64, train: bool) -> Tensor[src]

pub fn feature_dropout_(&mut self, p: f64, train: bool) -> Tensor[src]

pub fn fft_fft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor[src]

pub fn fft_fft2(&self, s: &[i64], dim: &[i64], norm: &str) -> Tensor[src]

pub fn fft_fft2_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Tensor
[src]

pub fn fft_fft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_fftn(&self, s: &[i64], dim: &[i64], norm: &str) -> Tensor[src]

pub fn fft_fftn_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Tensor
[src]

pub fn fft_fftshift(&self, dim: &[i64]) -> Tensor[src]

pub fn fft_hfft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_hfft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_ifft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_ifft2(&self, s: &[i64], dim: &[i64], norm: &str) -> Tensor[src]

pub fn fft_ifft2_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Tensor
[src]

pub fn fft_ifft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_ifftn(&self, s: &[i64], dim: &[i64], norm: &str) -> Tensor[src]

pub fn fft_ifftn_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Tensor
[src]

pub fn fft_ifftshift(&self, dim: &[i64]) -> Tensor[src]

pub fn fft_ihfft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_ihfft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_irfft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_irfft2(&self, s: &[i64], dim: &[i64], norm: &str) -> Tensor[src]

pub fn fft_irfft2_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Tensor
[src]

pub fn fft_irfft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_irfftn(&self, s: &[i64], dim: &[i64], norm: &str) -> Tensor[src]

pub fn fft_irfftn_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Tensor
[src]

pub fn fft_rfft(
    &self,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_rfft2(&self, s: &[i64], dim: &[i64], norm: &str) -> Tensor[src]

pub fn fft_rfft2_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Tensor
[src]

pub fn fft_rfft_out(
    &self,
    out: &Tensor,
    n: impl Into<Option<i64>>,
    dim: i64,
    norm: &str
) -> Tensor
[src]

pub fn fft_rfftn(&self, s: &[i64], dim: &[i64], norm: &str) -> Tensor[src]

pub fn fft_rfftn_out(
    &self,
    out: &Tensor,
    s: &[i64],
    dim: &[i64],
    norm: &str
) -> Tensor
[src]

pub fn fill_<S>(&mut self, value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn fill_1(&mut self, value: &Tensor) -> Tensor[src]

pub fn fill_diagonal_<S>(&mut self, fill_value: S, wrap: bool) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn fix(&self) -> Tensor[src]

pub fn fix_(&mut self) -> Tensor[src]

pub fn fix_out(&self, out: &Tensor) -> Tensor[src]

pub fn flatten(&self, start_dim: i64, end_dim: i64) -> Tensor[src]

pub fn flip(&self, dims: &[i64]) -> Tensor[src]

pub fn fliplr(&self) -> Tensor[src]

pub fn flipud(&self) -> Tensor[src]

pub fn float_power(&self, exponent: &Tensor) -> Tensor[src]

pub fn float_power2<S>(&self, exponent: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn float_power_<S>(&mut self, exponent: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn float_power_1(&mut self, exponent: &Tensor) -> Tensor[src]

pub fn float_power_out(&self, out: &Tensor, exponent: &Tensor) -> Tensor[src]

pub fn float_power_out2<S>(&self, out: &Tensor, exponent: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn floor(&self) -> Tensor[src]

pub fn floor_(&mut self) -> Tensor[src]

pub fn floor_divide(&self, other: &Tensor) -> Tensor[src]

pub fn floor_divide1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn floor_divide_(&mut self, other: &Tensor) -> Tensor[src]

pub fn floor_divide_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn floor_divide_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn floor_out(&self, out: &Tensor) -> Tensor[src]

pub fn fmax(&self, other: &Tensor) -> Tensor[src]

pub fn fmax_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn fmin(&self, other: &Tensor) -> Tensor[src]

pub fn fmin_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn fmod<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn fmod1(&self, other: &Tensor) -> Tensor[src]

pub fn fmod_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn fmod_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn fmod_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn fmod_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn frac(&self) -> Tensor[src]

pub fn frac_(&mut self) -> Tensor[src]

pub fn frac_out(&self, out: &Tensor) -> Tensor[src]

pub fn fractional_max_pool2d(
    &self,
    kernel_size: &[i64],
    output_size: &[i64],
    random_samples: &Tensor
) -> (Tensor, Tensor)
[src]

pub fn fractional_max_pool2d_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    indices: &Tensor
) -> Tensor
[src]

pub fn fractional_max_pool2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    indices: &Tensor
) -> Tensor
[src]

pub fn fractional_max_pool2d_out(
    &self,
    output: &Tensor,
    indices: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    random_samples: &Tensor
) -> (Tensor, Tensor)
[src]

pub fn fractional_max_pool3d(
    &self,
    kernel_size: &[i64],
    output_size: &[i64],
    random_samples: &Tensor
) -> (Tensor, Tensor)
[src]

pub fn fractional_max_pool3d_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    indices: &Tensor
) -> Tensor
[src]

pub fn fractional_max_pool3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    indices: &Tensor
) -> Tensor
[src]

pub fn fractional_max_pool3d_out(
    &self,
    output: &Tensor,
    indices: &Tensor,
    kernel_size: &[i64],
    output_size: &[i64],
    random_samples: &Tensor
) -> (Tensor, Tensor)
[src]

pub fn frobenius_norm(&self) -> Tensor[src]

pub fn frobenius_norm1(&self, dim: &[i64], keepdim: bool) -> Tensor[src]

pub fn frobenius_norm_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool
) -> Tensor
[src]

pub fn full_like<S>(&self, fill_value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn gather(&self, dim: i64, index: &Tensor, sparse_grad: bool) -> Tensor[src]

pub fn gather_backward(
    &self,
    grad: &Tensor,
    dim: i64,
    index: &Tensor,
    sparse_grad: bool
) -> Tensor
[src]

pub fn gather_out(
    &self,
    out: &Tensor,
    dim: i64,
    index: &Tensor,
    sparse_grad: bool
) -> Tensor
[src]

pub fn gcd(&self, other: &Tensor) -> Tensor[src]

pub fn gcd_(&mut self, other: &Tensor) -> Tensor[src]

pub fn gcd_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn ge<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn ge1(&self, other: &Tensor) -> Tensor[src]

pub fn ge_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn ge_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn ge_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn ge_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn gelu(&self) -> Tensor[src]

pub fn gelu_backward(&self, grad: &Tensor) -> Tensor[src]

pub fn geometric_(&mut self, p: f64) -> Tensor[src]

pub fn geqrf(&self) -> (Tensor, Tensor)[src]

pub fn geqrf_out(&self, a: &Tensor, tau: &Tensor) -> (Tensor, Tensor)[src]

pub fn ger(&self, vec2: &Tensor) -> Tensor[src]

pub fn ger_out(&self, out: &Tensor, vec2: &Tensor) -> Tensor[src]

pub fn glu(&self, dim: i64) -> Tensor[src]

pub fn glu_backward(&self, grad_output: &Tensor, dim: i64) -> Tensor[src]

pub fn glu_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    dim: i64
) -> Tensor
[src]

pub fn glu_out(&self, out: &Tensor, dim: i64) -> Tensor[src]

pub fn grad(&self) -> Tensor[src]

pub fn greater<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn greater1(&self, other: &Tensor) -> Tensor[src]

pub fn greater_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn greater_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn greater_equal<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn greater_equal1(&self, other: &Tensor) -> Tensor[src]

pub fn greater_equal_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn greater_equal_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn greater_equal_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn greater_equal_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn greater_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn greater_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn grid_sampler(
    &self,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Tensor
[src]

pub fn grid_sampler_2d(
    &self,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Tensor
[src]

pub fn grid_sampler_2d_backward(
    &self,
    grad_output: &Tensor,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> (Tensor, Tensor)
[src]

pub fn grid_sampler_3d(
    &self,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> Tensor
[src]

pub fn grid_sampler_3d_backward(
    &self,
    grad_output: &Tensor,
    grid: &Tensor,
    interpolation_mode: i64,
    padding_mode: i64,
    align_corners: bool
) -> (Tensor, Tensor)
[src]

pub fn group_norm<T>(
    &self,
    num_groups: i64,
    weight: Option<T>,
    bias: Option<T>,
    eps: f64,
    cudnn_enabled: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn gru<T>(
    &self,
    hx: &Tensor,
    params: &[T],
    has_biases: bool,
    num_layers: i64,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_first: bool
) -> (Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn gru_cell<T>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: Option<T>,
    b_hh: Option<T>
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn gt<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn gt1(&self, other: &Tensor) -> Tensor[src]

pub fn gt_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn gt_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn gt_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn gt_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn hardshrink(&self) -> Tensor[src]

pub fn hardshrink_backward<S>(&self, grad_out: &Tensor, lambd: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn hardsigmoid(&self) -> Tensor[src]

pub fn hardsigmoid_(&mut self) -> Tensor[src]

pub fn hardsigmoid_backward(&self, grad_output: &Tensor) -> Tensor[src]

pub fn hardsigmoid_out(&self, out: &Tensor) -> Tensor[src]

pub fn hardswish(&self) -> Tensor[src]

pub fn hardswish_(&mut self) -> Tensor[src]

pub fn hardswish_backward(&self, grad_output: &Tensor) -> Tensor[src]

pub fn hardswish_out(&self, out: &Tensor) -> Tensor[src]

pub fn hardtanh(&self) -> Tensor[src]

pub fn hardtanh_(&mut self) -> Tensor[src]

pub fn hardtanh_backward<S>(
    &self,
    grad_output: &Tensor,
    min_val: S,
    max_val: S
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn hardtanh_backward_out<S>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    min_val: S,
    max_val: S
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn hardtanh_out(&self, out: &Tensor) -> Tensor[src]

pub fn heaviside(&self, values: &Tensor) -> Tensor[src]

pub fn heaviside_(&mut self, values: &Tensor) -> Tensor[src]

pub fn heaviside_out(&self, out: &Tensor, values: &Tensor) -> Tensor[src]

pub fn hinge_embedding_loss(
    &self,
    target: &Tensor,
    margin: f64,
    reduction: Reduction
) -> Tensor
[src]

pub fn histc(&self, bins: i64) -> Tensor[src]

pub fn histc_out(&self, out: &Tensor, bins: i64) -> Tensor[src]

pub fn hypot(&self, other: &Tensor) -> Tensor[src]

pub fn hypot_(&mut self, other: &Tensor) -> Tensor[src]

pub fn hypot_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn i0(&self) -> Tensor[src]

pub fn i0_(&mut self) -> Tensor[src]

pub fn i0_out(&self, out: &Tensor) -> Tensor[src]

pub fn igamma(&self, other: &Tensor) -> Tensor[src]

pub fn igamma_(&mut self, other: &Tensor) -> Tensor[src]

pub fn igamma_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn igammac(&self, other: &Tensor) -> Tensor[src]

pub fn igammac_(&mut self, other: &Tensor) -> Tensor[src]

pub fn igammac_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn im2col(
    &self,
    kernel_size: &[i64],
    dilation: &[i64],
    padding: &[i64],
    stride: &[i64]
) -> Tensor
[src]

pub fn im2col_out(
    &self,
    out: &Tensor,
    kernel_size: &[i64],
    dilation: &[i64],
    padding: &[i64],
    stride: &[i64]
) -> Tensor
[src]

pub fn imag(&self) -> Tensor[src]

pub fn index<T>(&self, indices: &[Option<T>]) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn index_add(&self, dim: i64, index: &Tensor, source: &Tensor) -> Tensor[src]

pub fn index_add_(
    &mut self,
    dim: i64,
    index: &Tensor,
    source: &Tensor
) -> Tensor
[src]

pub fn index_copy(&self, dim: i64, index: &Tensor, source: &Tensor) -> Tensor[src]

pub fn index_copy_(
    &mut self,
    dim: i64,
    index: &Tensor,
    source: &Tensor
) -> Tensor
[src]

pub fn index_fill<S>(&self, dim: i64, index: &Tensor, value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn index_fill1(&self, dim: i64, index: &Tensor, value: &Tensor) -> Tensor[src]

pub fn index_fill_<S>(&mut self, dim: i64, index: &Tensor, value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn index_fill_1(
    &mut self,
    dim: i64,
    index: &Tensor,
    value: &Tensor
) -> Tensor
[src]

pub fn index_put<T>(
    &self,
    indices: &[Option<T>],
    values: &Tensor,
    accumulate: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn index_put_<T>(
    &mut self,
    indices: &[Option<T>],
    values: &Tensor,
    accumulate: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn index_select(&self, dim: i64, index: &Tensor) -> Tensor[src]

pub fn index_select_out(&self, out: &Tensor, dim: i64, index: &Tensor) -> Tensor[src]

pub fn indices(&self) -> Tensor[src]

pub fn infinitely_differentiable_gelu_backward(&self, grad: &Tensor) -> Tensor[src]

pub fn inner(&self, other: &Tensor) -> Tensor[src]

pub fn inner_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn instance_norm<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    use_input_stats: bool,
    momentum: f64,
    eps: f64,
    cudnn_enabled: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn int_repr(&self) -> Tensor[src]

pub fn inverse(&self) -> Tensor[src]

pub fn inverse_out(&self, out: &Tensor) -> Tensor[src]

pub fn isclose(
    &self,
    other: &Tensor,
    rtol: f64,
    atol: f64,
    equal_nan: bool
) -> Tensor
[src]

pub fn isfinite(&self) -> Tensor[src]

pub fn isinf(&self) -> Tensor[src]

pub fn isnan(&self) -> Tensor[src]

pub fn isneginf(&self) -> Tensor[src]

pub fn isneginf_out(&self, out: &Tensor) -> Tensor[src]

pub fn isposinf(&self) -> Tensor[src]

pub fn isposinf_out(&self, out: &Tensor) -> Tensor[src]

pub fn isreal(&self) -> Tensor[src]

pub fn istft<T>(
    &self,
    n_fft: i64,
    hop_length: impl Into<Option<i64>>,
    win_length: impl Into<Option<i64>>,
    window: Option<T>,
    center: bool,
    normalized: bool,
    onesided: bool,
    length: impl Into<Option<i64>>,
    return_complex: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn kl_div(
    &self,
    target: &Tensor,
    reduction: Reduction,
    log_target: bool
) -> Tensor
[src]

pub fn kl_div_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    log_target: bool
) -> Tensor
[src]

pub fn kron(&self, other: &Tensor) -> Tensor[src]

pub fn kron_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn kthvalue(&self, k: i64, dim: i64, keepdim: bool) -> (Tensor, Tensor)[src]

pub fn kthvalue_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    k: i64,
    dim: i64,
    keepdim: bool
) -> (Tensor, Tensor)
[src]

pub fn l1_loss(&self, target: &Tensor, reduction: Reduction) -> Tensor[src]

pub fn l1_loss_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn l1_loss_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn l1_loss_out(
    &self,
    out: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn layer_norm<T>(
    &self,
    normalized_shape: &[i64],
    weight: Option<T>,
    bias: Option<T>,
    eps: f64,
    cudnn_enable: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn lcm(&self, other: &Tensor) -> Tensor[src]

pub fn lcm_(&mut self, other: &Tensor) -> Tensor[src]

pub fn lcm_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn ldexp(&self, other: &Tensor) -> Tensor[src]

pub fn ldexp_(&mut self, other: &Tensor) -> Tensor[src]

pub fn ldexp_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn le<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn le1(&self, other: &Tensor) -> Tensor[src]

pub fn le_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn le_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn le_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn le_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn leaky_relu(&self) -> Tensor[src]

pub fn leaky_relu_(&mut self) -> Tensor[src]

pub fn leaky_relu_backward<S>(
    &self,
    grad_output: &Tensor,
    negative_slope: S,
    self_is_result: bool
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn leaky_relu_out(&self, out: &Tensor) -> Tensor[src]

pub fn lerp<S>(&self, end: &Tensor, weight: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn lerp1(&self, end: &Tensor, weight: &Tensor) -> Tensor[src]

pub fn lerp_<S>(&mut self, end: &Tensor, weight: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn lerp_1(&mut self, end: &Tensor, weight: &Tensor) -> Tensor[src]

pub fn lerp_out<S>(&self, out: &Tensor, end: &Tensor, weight: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn lerp_out1(&self, out: &Tensor, end: &Tensor, weight: &Tensor) -> Tensor[src]

pub fn less<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn less1(&self, other: &Tensor) -> Tensor[src]

pub fn less_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn less_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn less_equal<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn less_equal1(&self, other: &Tensor) -> Tensor[src]

pub fn less_equal_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn less_equal_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn less_equal_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn less_equal_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn less_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn less_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn lgamma(&self) -> Tensor[src]

pub fn lgamma_(&mut self) -> Tensor[src]

pub fn lgamma_out(&self, out: &Tensor) -> Tensor[src]

pub fn linalg_cholesky(&self) -> Tensor[src]

pub fn linalg_cholesky_out(&self, out: &Tensor) -> Tensor[src]

pub fn linalg_cond<S>(&self, p: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn linalg_cond1(&self, p: &str) -> Tensor[src]

pub fn linalg_cond_out<S>(&self, out: &Tensor, p: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn linalg_cond_out1(&self, out: &Tensor, p: &str) -> Tensor[src]

pub fn linalg_det(&self) -> Tensor[src]

pub fn linalg_eigh(&self, uplo: &str) -> (Tensor, Tensor)[src]

pub fn linalg_eigh_out(
    &self,
    eigvals: &Tensor,
    eigvecs: &Tensor,
    uplo: &str
) -> (Tensor, Tensor)
[src]

pub fn linalg_eigvalsh(&self, uplo: &str) -> Tensor[src]

pub fn linalg_eigvalsh_out(&self, out: &Tensor, uplo: &str) -> Tensor[src]

pub fn linalg_inv(&self) -> Tensor[src]

pub fn linalg_inv_out(&self, out: &Tensor) -> Tensor[src]

pub fn linalg_matrix_rank(
    &self,
    tol: impl Into<Option<f64>>,
    hermitian: bool
) -> Tensor
[src]

pub fn linalg_matrix_rank_out(
    &self,
    out: &Tensor,
    tol: impl Into<Option<f64>>,
    hermitian: bool
) -> Tensor
[src]

pub fn linalg_norm<S>(
    &self,
    ord: S,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn linalg_norm1(
    &self,
    ord: &str,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Tensor
[src]

pub fn linalg_norm_out<S>(
    &self,
    out: &Tensor,
    ord: S,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn linalg_norm_out1(
    &self,
    out: &Tensor,
    ord: &str,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Tensor
[src]

pub fn linalg_pinv(&self, rcond: f64, hermitian: bool) -> Tensor[src]

pub fn linalg_pinv1(&self, rcond: &Tensor, hermitian: bool) -> Tensor[src]

pub fn linalg_pinv_out(
    &self,
    out: &Tensor,
    rcond: f64,
    hermitian: bool
) -> Tensor
[src]

pub fn linalg_pinv_out1(
    &self,
    out: &Tensor,
    rcond: &Tensor,
    hermitian: bool
) -> Tensor
[src]

pub fn linalg_qr(&self, mode: &str) -> (Tensor, Tensor)[src]

pub fn linalg_qr_out(
    &self,
    q: &Tensor,
    r: &Tensor,
    mode: &str
) -> (Tensor, Tensor)
[src]

pub fn linalg_slogdet(&self) -> (Tensor, Tensor)[src]

pub fn linalg_slogdet_out(
    &self,
    sign: &Tensor,
    logabsdet: &Tensor
) -> (Tensor, Tensor)
[src]

pub fn linalg_solve(&self, other: &Tensor) -> Tensor[src]

pub fn linalg_solve_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn linalg_svd(
    &self,
    full_matrices: bool,
    compute_uv: bool
) -> (Tensor, Tensor, Tensor)
[src]

pub fn linalg_svd_out(
    &self,
    u: &Tensor,
    s: &Tensor,
    v: &Tensor,
    full_matrices: bool,
    compute_uv: bool
) -> (Tensor, Tensor, Tensor)
[src]

pub fn linalg_tensorinv(&self, ind: i64) -> Tensor[src]

pub fn linalg_tensorinv_out(&self, out: &Tensor, ind: i64) -> Tensor[src]

pub fn linalg_tensorsolve(&self, other: &Tensor, dims: &[i64]) -> Tensor[src]

pub fn linalg_tensorsolve_out(
    &self,
    out: &Tensor,
    other: &Tensor,
    dims: &[i64]
) -> Tensor
[src]

pub fn linear<T>(&self, weight: &Tensor, bias: Option<T>) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn log(&self) -> Tensor[src]

pub fn log10(&self) -> Tensor[src]

pub fn log10_(&mut self) -> Tensor[src]

pub fn log10_out(&self, out: &Tensor) -> Tensor[src]

pub fn log1p(&self) -> Tensor[src]

pub fn log1p_(&mut self) -> Tensor[src]

pub fn log1p_out(&self, out: &Tensor) -> Tensor[src]

pub fn log2(&self) -> Tensor[src]

pub fn log2_(&mut self) -> Tensor[src]

pub fn log2_out(&self, out: &Tensor) -> Tensor[src]

pub fn log_(&mut self) -> Tensor[src]

pub fn log_normal_(&mut self, mean: f64, std: f64) -> Tensor[src]

pub fn log_out(&self, out: &Tensor) -> Tensor[src]

pub fn log_sigmoid(&self) -> Tensor[src]

pub fn log_sigmoid_backward(
    &self,
    grad_output: &Tensor,
    buffer: &Tensor
) -> Tensor
[src]

pub fn log_sigmoid_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    buffer: &Tensor
) -> Tensor
[src]

pub fn log_sigmoid_out(&self, out: &Tensor) -> Tensor[src]

pub fn log_softmax(&self, dim: i64, dtype: Kind) -> Tensor[src]

pub fn logaddexp(&self, other: &Tensor) -> Tensor[src]

pub fn logaddexp2(&self, other: &Tensor) -> Tensor[src]

pub fn logaddexp2_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn logaddexp_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn logcumsumexp(&self, dim: i64) -> Tensor[src]

pub fn logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Tensor[src]

pub fn logdet(&self) -> Tensor[src]

pub fn logical_and(&self, other: &Tensor) -> Tensor[src]

pub fn logical_and_(&mut self, other: &Tensor) -> Tensor[src]

pub fn logical_and_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn logical_not(&self) -> Tensor[src]

pub fn logical_not_(&mut self) -> Tensor[src]

pub fn logical_not_out(&self, out: &Tensor) -> Tensor[src]

pub fn logical_or(&self, other: &Tensor) -> Tensor[src]

pub fn logical_or_(&mut self, other: &Tensor) -> Tensor[src]

pub fn logical_or_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn logical_xor(&self, other: &Tensor) -> Tensor[src]

pub fn logical_xor_(&mut self, other: &Tensor) -> Tensor[src]

pub fn logical_xor_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn logit(&self, eps: impl Into<Option<f64>>) -> Tensor[src]

pub fn logit_(&mut self, eps: impl Into<Option<f64>>) -> Tensor[src]

pub fn logit_backward(
    &self,
    grad_output: &Tensor,
    eps: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn logit_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    eps: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn logit_out(&self, out: &Tensor, eps: impl Into<Option<f64>>) -> Tensor[src]

pub fn logsumexp(&self, dim: &[i64], keepdim: bool) -> Tensor[src]

pub fn logsumexp_out(&self, out: &Tensor, dim: &[i64], keepdim: bool) -> Tensor[src]

pub fn lstm<T>(
    &self,
    hx: &[T],
    params: &[T],
    has_biases: bool,
    num_layers: i64,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_first: bool
) -> (Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn lstm_cell<T>(
    &self,
    hx: &[T],
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: Option<T>,
    b_hh: Option<T>
) -> (Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn lstsq(&self, a: &Tensor) -> (Tensor, Tensor)[src]

pub fn lstsq_out(&self, x: &Tensor, qr: &Tensor, a: &Tensor) -> (Tensor, Tensor)[src]

pub fn lt<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn lt1(&self, other: &Tensor) -> Tensor[src]

pub fn lt_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn lt_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn lt_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn lt_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn lu_solve(&self, lu_data: &Tensor, lu_pivots: &Tensor) -> Tensor[src]

pub fn lu_solve_out(
    &self,
    out: &Tensor,
    lu_data: &Tensor,
    lu_pivots: &Tensor
) -> Tensor
[src]

pub fn masked_fill<S>(&self, mask: &Tensor, value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn masked_fill1(&self, mask: &Tensor, value: &Tensor) -> Tensor[src]

pub fn masked_fill_<S>(&mut self, mask: &Tensor, value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn masked_fill_1(&mut self, mask: &Tensor, value: &Tensor) -> Tensor[src]

pub fn masked_scatter(&self, mask: &Tensor, source: &Tensor) -> Tensor[src]

pub fn masked_scatter_(&mut self, mask: &Tensor, source: &Tensor) -> Tensor[src]

pub fn masked_select(&self, mask: &Tensor) -> Tensor[src]

pub fn masked_select_backward(&self, grad: &Tensor, mask: &Tensor) -> Tensor[src]

pub fn masked_select_out(&self, out: &Tensor, mask: &Tensor) -> Tensor[src]

pub fn matmul(&self, other: &Tensor) -> Tensor[src]

pub fn matmul_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn matrix_exp(&self) -> Tensor[src]

pub fn matrix_exp_backward(&self, grad: &Tensor) -> Tensor[src]

pub fn matrix_power(&self, n: i64) -> Tensor[src]

pub fn matrix_rank(&self, symmetric: bool) -> Tensor[src]

pub fn matrix_rank1(&self, tol: f64, symmetric: bool) -> Tensor[src]

pub fn max(&self) -> Tensor[src]

pub fn max1(&self, other: &Tensor) -> Tensor[src]

pub fn max2(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor)[src]

pub fn max_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn max_out1(
    &self,
    max: &Tensor,
    max_values: &Tensor,
    dim: i64,
    keepdim: bool
) -> (Tensor, Tensor)
[src]

pub fn max_pool1d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Tensor
[src]

pub fn max_pool1d_with_indices(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> (Tensor, Tensor)
[src]

pub fn max_pool2d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Tensor
[src]

pub fn max_pool2d_with_indices(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> (Tensor, Tensor)
[src]

pub fn max_pool2d_with_indices_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool,
    indices: &Tensor
) -> Tensor
[src]

pub fn max_pool2d_with_indices_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool,
    indices: &Tensor
) -> Tensor
[src]

pub fn max_pool2d_with_indices_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> (Tensor, Tensor)
[src]

pub fn max_pool3d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Tensor
[src]

pub fn max_pool3d_with_indices(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> (Tensor, Tensor)
[src]

pub fn max_pool3d_with_indices_backward(
    &self,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool,
    indices: &Tensor
) -> Tensor
[src]

pub fn max_pool3d_with_indices_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool,
    indices: &Tensor
) -> Tensor
[src]

pub fn max_pool3d_with_indices_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> (Tensor, Tensor)
[src]

pub fn max_unpool2d(&self, indices: &Tensor, output_size: &[i64]) -> Tensor[src]

pub fn max_unpool2d_backward(
    &self,
    grad_output: &Tensor,
    indices: &Tensor,
    output_size: &[i64]
) -> Tensor
[src]

pub fn max_unpool2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    indices: &Tensor,
    output_size: &[i64]
) -> Tensor
[src]

pub fn max_unpool2d_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    output_size: &[i64]
) -> Tensor
[src]

pub fn max_unpool3d(
    &self,
    indices: &Tensor,
    output_size: &[i64],
    stride: &[i64],
    padding: &[i64]
) -> Tensor
[src]

pub fn max_unpool3d_backward(
    &self,
    grad_output: &Tensor,
    indices: &Tensor,
    output_size: &[i64],
    stride: &[i64],
    padding: &[i64]
) -> Tensor
[src]

pub fn max_unpool3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    indices: &Tensor,
    output_size: &[i64],
    stride: &[i64],
    padding: &[i64]
) -> Tensor
[src]

pub fn max_unpool3d_out(
    &self,
    out: &Tensor,
    indices: &Tensor,
    output_size: &[i64],
    stride: &[i64],
    padding: &[i64]
) -> Tensor
[src]

pub fn maximum(&self, other: &Tensor) -> Tensor[src]

pub fn maximum_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn mean(&self, dtype: Kind) -> Tensor[src]

pub fn mean1(&self, dim: &[i64], keepdim: bool, dtype: Kind) -> Tensor[src]

pub fn mean_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Tensor
[src]

pub fn median(&self) -> Tensor[src]

pub fn median1(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor)[src]

pub fn median_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64,
    keepdim: bool
) -> (Tensor, Tensor)
[src]

pub fn min(&self) -> Tensor[src]

pub fn min1(&self, other: &Tensor) -> Tensor[src]

pub fn min2(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor)[src]

pub fn min_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn min_out1(
    &self,
    min: &Tensor,
    min_indices: &Tensor,
    dim: i64,
    keepdim: bool
) -> (Tensor, Tensor)
[src]

pub fn minimum(&self, other: &Tensor) -> Tensor[src]

pub fn minimum_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn miopen_batch_norm<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    training: bool,
    exponential_average_factor: f64,
    epsilon: f64
) -> (Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn miopen_batch_norm_backward<T>(
    &self,
    grad_output: &Tensor,
    weight: &Tensor,
    running_mean: Option<T>,
    running_var: Option<T>,
    save_mean: Option<T>,
    save_var: Option<T>,
    epsilon: f64
) -> (Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn miopen_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn miopen_convolution_backward_weight(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Tensor
[src]

pub fn miopen_convolution_transpose<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    output_padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn miopen_convolution_transpose_backward_weight(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Tensor
[src]

pub fn miopen_depthwise_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn miopen_depthwise_convolution_backward_weight(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    benchmark: bool,
    deterministic: bool
) -> Tensor
[src]

pub fn miopen_rnn<T>(
    &self,
    weight: &[T],
    weight_stride0: i64,
    hx: &Tensor,
    cx: Option<T>,
    mode: i64,
    hidden_size: i64,
    num_layers: i64,
    batch_first: bool,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_sizes: &[i64],
    dropout_state: Option<T>
) -> (Tensor, Tensor, Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn mkldnn_adaptive_avg_pool2d(&self, output_size: &[i64]) -> Tensor[src]

pub fn mkldnn_convolution<T>(
    &self,
    weight: &Tensor,
    bias: Option<T>,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn mkldnn_convolution_backward_weights(
    &self,
    weight_size: &[i64],
    grad_output: &Tensor,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64,
    bias_defined: bool
) -> (Tensor, Tensor)
[src]

pub fn mkldnn_linear<T>(&self, weight: &Tensor, bias: Option<T>) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn mkldnn_linear_backward_weights(
    &self,
    grad_output: &Tensor,
    weight: &Tensor,
    bias_defined: bool
) -> (Tensor, Tensor)
[src]

pub fn mkldnn_max_pool2d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Tensor
[src]

pub fn mkldnn_max_pool3d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Tensor
[src]

pub fn mkldnn_reorder_conv2d_weight(
    &self,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64
) -> Tensor
[src]

pub fn mkldnn_reorder_conv3d_weight(
    &self,
    padding: &[i64],
    stride: &[i64],
    dilation: &[i64],
    groups: i64
) -> Tensor
[src]

pub fn mm(&self, mat2: &Tensor) -> Tensor[src]

pub fn mm_out(&self, out: &Tensor, mat2: &Tensor) -> Tensor[src]

pub fn mode(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor)[src]

pub fn mode_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64,
    keepdim: bool
) -> (Tensor, Tensor)
[src]

pub fn moveaxis(&self, source: &[i64], destination: &[i64]) -> Tensor[src]

pub fn moveaxis1(&self, source: i64, destination: i64) -> Tensor[src]

pub fn movedim(&self, source: &[i64], destination: &[i64]) -> Tensor[src]

pub fn movedim1(&self, source: i64, destination: i64) -> Tensor[src]

pub fn mse_loss(&self, target: &Tensor, reduction: Reduction) -> Tensor[src]

pub fn mse_loss_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn mse_loss_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn mse_loss_out(
    &self,
    out: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn msort(&self) -> Tensor[src]

pub fn msort_out(&self, out: &Tensor) -> Tensor[src]

pub fn g_mul(&self, other: &Tensor) -> Tensor[src]

pub fn g_mul1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn g_mul_(&mut self, other: &Tensor) -> Tensor[src]

pub fn g_mul_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn mul_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn multi_margin_loss_backward<T, S>(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    p: S,
    margin: S,
    weight: Option<T>,
    reduction: Reduction
) -> Tensor where
    T: Borrow<Tensor>,
    S: Into<Scalar>, 
[src]

pub fn multi_margin_loss_backward_out<T, S>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    p: S,
    margin: S,
    weight: Option<T>,
    reduction: Reduction
) -> Tensor where
    T: Borrow<Tensor>,
    S: Into<Scalar>, 
[src]

pub fn multilabel_margin_loss(
    &self,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn multilabel_margin_loss_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    is_target: &Tensor
) -> Tensor
[src]

pub fn multilabel_margin_loss_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    is_target: &Tensor
) -> Tensor
[src]

pub fn multilabel_margin_loss_out(
    &self,
    out: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn multinomial(&self, num_samples: i64, replacement: bool) -> Tensor[src]

pub fn multinomial_out(
    &self,
    out: &Tensor,
    num_samples: i64,
    replacement: bool
) -> Tensor
[src]

pub fn multiply(&self, other: &Tensor) -> Tensor[src]

pub fn multiply1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn multiply_(&mut self, other: &Tensor) -> Tensor[src]

pub fn multiply_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn multiply_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn mv(&self, vec: &Tensor) -> Tensor[src]

pub fn mv_out(&self, out: &Tensor, vec: &Tensor) -> Tensor[src]

pub fn mvlgamma(&self, p: i64) -> Tensor[src]

pub fn mvlgamma_(&mut self, p: i64) -> Tensor[src]

pub fn nan_to_num(
    &self,
    nan: impl Into<Option<f64>>,
    posinf: impl Into<Option<f64>>,
    neginf: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn nan_to_num_(
    &mut self,
    nan: impl Into<Option<f64>>,
    posinf: impl Into<Option<f64>>,
    neginf: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn nan_to_num_out(
    &self,
    out: &Tensor,
    nan: impl Into<Option<f64>>,
    posinf: impl Into<Option<f64>>,
    neginf: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn nanmedian(&self) -> Tensor[src]

pub fn nanmedian1(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor)[src]

pub fn nanmedian_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64,
    keepdim: bool
) -> (Tensor, Tensor)
[src]

pub fn nanquantile(
    &self,
    q: f64,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Tensor
[src]

pub fn nanquantile1(
    &self,
    q: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Tensor
[src]

pub fn nanquantile_out(
    &self,
    out: &Tensor,
    q: f64,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Tensor
[src]

pub fn nanquantile_out1(
    &self,
    out: &Tensor,
    q: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Tensor
[src]

pub fn nansum(&self, dtype: Kind) -> Tensor[src]

pub fn nansum1(&self, dim: &[i64], keepdim: bool, dtype: Kind) -> Tensor[src]

pub fn nansum_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Tensor
[src]

pub fn narrow(&self, dim: i64, start: i64, length: i64) -> Tensor[src]

pub fn narrow1(&self, dim: i64, start: &Tensor, length: i64) -> Tensor[src]

pub fn narrow_copy(&self, dim: i64, start: i64, length: i64) -> Tensor[src]

pub fn narrow_copy_out(
    &self,
    out: &Tensor,
    dim: i64,
    start: i64,
    length: i64
) -> Tensor
[src]

pub fn native_batch_norm<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    training: bool,
    momentum: f64,
    eps: f64
) -> (Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn native_batch_norm_out<T>(
    &self,
    out: &Tensor,
    save_mean: &Tensor,
    save_invstd: &Tensor,
    weight: Option<T>,
    bias: Option<T>,
    running_mean: Option<T>,
    running_var: Option<T>,
    training: bool,
    momentum: f64,
    eps: f64
) -> (Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn native_group_norm<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    n: i64,
    c: i64,
    hxw: i64,
    group: i64,
    eps: f64
) -> (Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn native_layer_norm<T>(
    &self,
    normalized_shape: &[i64],
    weight: Option<T>,
    bias: Option<T>,
    eps: f64
) -> (Tensor, Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn native_norm(&self) -> Tensor[src]

pub fn native_norm1<S>(
    &self,
    p: S,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn ne<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn ne1(&self, other: &Tensor) -> Tensor[src]

pub fn ne_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn ne_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn ne_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn ne_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn neg(&self) -> Tensor[src]

pub fn neg_(&mut self) -> Tensor[src]

pub fn neg_out(&self, out: &Tensor) -> Tensor[src]

pub fn negative(&self) -> Tensor[src]

pub fn negative_(&mut self) -> Tensor[src]

pub fn negative_out(&self, out: &Tensor) -> Tensor[src]

pub fn new_empty(&self, size: &[i64], options: (Kind, Device)) -> Tensor[src]

pub fn new_empty_strided(
    &self,
    size: &[i64],
    stride: &[i64],
    options: (Kind, Device)
) -> Tensor
[src]

pub fn new_full<S>(
    &self,
    size: &[i64],
    fill_value: S,
    options: (Kind, Device)
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn new_zeros(&self, size: &[i64], options: (Kind, Device)) -> Tensor[src]

pub fn nextafter(&self, other: &Tensor) -> Tensor[src]

pub fn nextafter_(&mut self, other: &Tensor) -> Tensor[src]

pub fn nextafter_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn g_nll_loss<T>(
    &self,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn nll_loss2d<T>(
    &self,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn nll_loss2d_backward<T>(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64,
    total_weight: &Tensor
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn nll_loss2d_backward_out<T>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64,
    total_weight: &Tensor
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn nll_loss2d_out<T>(
    &self,
    out: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn nll_loss_backward<T>(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64,
    total_weight: &Tensor
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn nll_loss_backward_out<T>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64,
    total_weight: &Tensor
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn nll_loss_out<T>(
    &self,
    out: &Tensor,
    target: &Tensor,
    weight: Option<T>,
    reduction: Reduction,
    ignore_index: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn nonzero(&self) -> Tensor[src]

pub fn nonzero_numpy(&self) -> Vec<Tensor, Global>[src]

pub fn nonzero_out(&self, out: &Tensor) -> Tensor[src]

pub fn norm(&self) -> Tensor[src]

pub fn norm1<S>(&self, p: S, dtype: Kind) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn norm2<S>(&self, p: S, dim: &[i64], keepdim: bool) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn norm3<S>(&self, p: S, dim: &[i64], keepdim: bool, dtype: Kind) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn norm_out<S>(
    &self,
    out: &Tensor,
    p: S,
    dim: &[i64],
    keepdim: bool
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn norm_out1<S>(
    &self,
    out: &Tensor,
    p: S,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn normal_(&mut self, mean: f64, std: f64) -> Tensor[src]

pub fn not_equal<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn not_equal1(&self, other: &Tensor) -> Tensor[src]

pub fn not_equal_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn not_equal_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn not_equal_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn not_equal_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn nuclear_norm(&self, keepdim: bool) -> Tensor[src]

pub fn nuclear_norm1(&self, dim: &[i64], keepdim: bool) -> Tensor[src]

pub fn nuclear_norm_out(&self, out: &Tensor, keepdim: bool) -> Tensor[src]

pub fn nuclear_norm_out1(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool
) -> Tensor
[src]

pub fn numpy_t(&self) -> Tensor[src]

pub fn one_hot(&self, num_classes: i64) -> Tensor[src]

pub fn ones_like(&self) -> Tensor[src]

pub fn orgqr(&self, input2: &Tensor) -> Tensor[src]

pub fn orgqr_out(&self, out: &Tensor, input2: &Tensor) -> Tensor[src]

pub fn ormqr(
    &self,
    input2: &Tensor,
    input3: &Tensor,
    left: bool,
    transpose: bool
) -> Tensor
[src]

pub fn ormqr_out(
    &self,
    out: &Tensor,
    input2: &Tensor,
    input3: &Tensor,
    left: bool,
    transpose: bool
) -> Tensor
[src]

pub fn outer(&self, vec2: &Tensor) -> Tensor[src]

pub fn outer_out(&self, out: &Tensor, vec2: &Tensor) -> Tensor[src]

pub fn pdist(&self, p: f64) -> Tensor[src]

pub fn permute(&self, dims: &[i64]) -> Tensor[src]

pub fn pin_memory(&self) -> Tensor[src]

pub fn pinverse(&self, rcond: f64) -> Tensor[src]

pub fn pixel_shuffle(&self, upscale_factor: i64) -> Tensor[src]

pub fn pixel_unshuffle(&self, downscale_factor: i64) -> Tensor[src]

pub fn poisson(&self) -> Tensor[src]

pub fn poisson_nll_loss(
    &self,
    target: &Tensor,
    log_input: bool,
    full: bool,
    eps: f64,
    reduction: Reduction
) -> Tensor
[src]

pub fn polygamma(&self, n: i64) -> Tensor[src]

pub fn polygamma_(&mut self, n: i64) -> Tensor[src]

pub fn polygamma_out(&self, out: &Tensor, n: i64) -> Tensor[src]

pub fn pow<S>(&self, exponent: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn pow1(&self, exponent: &Tensor) -> Tensor[src]

pub fn pow_<S>(&mut self, exponent: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn pow_1(&mut self, exponent: &Tensor) -> Tensor[src]

pub fn pow_out(&self, out: &Tensor, exponent: &Tensor) -> Tensor[src]

pub fn pow_out2<S>(&self, out: &Tensor, exponent: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn prelu(&self, weight: &Tensor) -> Tensor[src]

pub fn prelu_backward(
    &self,
    grad_output: &Tensor,
    weight: &Tensor
) -> (Tensor, Tensor)
[src]

pub fn prod(&self, dtype: Kind) -> Tensor[src]

pub fn prod1(&self, dim: i64, keepdim: bool, dtype: Kind) -> Tensor[src]

pub fn prod_out(
    &self,
    out: &Tensor,
    dim: i64,
    keepdim: bool,
    dtype: Kind
) -> Tensor
[src]

pub fn put_(
    &mut self,
    index: &Tensor,
    source: &Tensor,
    accumulate: bool
) -> Tensor
[src]

pub fn q_per_channel_scales(&self) -> Tensor[src]

pub fn q_per_channel_zero_points(&self) -> Tensor[src]

pub fn qr(&self, some: bool) -> (Tensor, Tensor)[src]

pub fn qr_out(&self, q: &Tensor, r: &Tensor, some: bool) -> (Tensor, Tensor)[src]

pub fn quantile(
    &self,
    q: f64,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Tensor
[src]

pub fn quantile1(
    &self,
    q: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Tensor
[src]

pub fn quantile_out(
    &self,
    out: &Tensor,
    q: f64,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Tensor
[src]

pub fn quantile_out1(
    &self,
    out: &Tensor,
    q: &Tensor,
    dim: impl Into<Option<i64>>,
    keepdim: bool
) -> Tensor
[src]

pub fn quantize_per_channel(
    &self,
    scales: &Tensor,
    zero_points: &Tensor,
    axis: i64,
    dtype: Kind
) -> Tensor
[src]

pub fn quantize_per_tensor(
    &self,
    scale: f64,
    zero_point: i64,
    dtype: Kind
) -> Tensor
[src]

pub fn quantized_batch_norm<T>(
    &self,
    weight: Option<T>,
    bias: Option<T>,
    mean: &Tensor,
    var: &Tensor,
    eps: f64,
    output_scale: f64,
    output_zero_point: i64
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn quantized_gru_cell<S>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: &Tensor,
    b_hh: &Tensor,
    packed_ih: &Tensor,
    packed_hh: &Tensor,
    col_offsets_ih: &Tensor,
    col_offsets_hh: &Tensor,
    scale_ih: S,
    scale_hh: S,
    zero_point_ih: S,
    zero_point_hh: S
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn quantized_lstm_cell<T, S>(
    &self,
    hx: &[T],
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: &Tensor,
    b_hh: &Tensor,
    packed_ih: &Tensor,
    packed_hh: &Tensor,
    col_offsets_ih: &Tensor,
    col_offsets_hh: &Tensor,
    scale_ih: S,
    scale_hh: S,
    zero_point_ih: S,
    zero_point_hh: S
) -> (Tensor, Tensor) where
    T: Borrow<Tensor>,
    S: Into<Scalar>, 
[src]

pub fn quantized_max_pool1d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Tensor
[src]

pub fn quantized_max_pool2d(
    &self,
    kernel_size: &[i64],
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64],
    ceil_mode: bool
) -> Tensor
[src]

pub fn quantized_rnn_relu_cell<S>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: &Tensor,
    b_hh: &Tensor,
    packed_ih: &Tensor,
    packed_hh: &Tensor,
    col_offsets_ih: &Tensor,
    col_offsets_hh: &Tensor,
    scale_ih: S,
    scale_hh: S,
    zero_point_ih: S,
    zero_point_hh: S
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn quantized_rnn_tanh_cell<S>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: &Tensor,
    b_hh: &Tensor,
    packed_ih: &Tensor,
    packed_hh: &Tensor,
    col_offsets_ih: &Tensor,
    col_offsets_hh: &Tensor,
    scale_ih: S,
    scale_hh: S,
    zero_point_ih: S,
    zero_point_hh: S
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn rad2deg(&self) -> Tensor[src]

pub fn rad2deg_(&mut self) -> Tensor[src]

pub fn rad2deg_out(&self, out: &Tensor) -> Tensor[src]

pub fn rand_like(&self) -> Tensor[src]

pub fn randint_like(&self, high: i64) -> Tensor[src]

pub fn randint_like1(&self, low: i64, high: i64) -> Tensor[src]

pub fn randn_like(&self) -> Tensor[src]

pub fn random_(&mut self) -> Tensor[src]

pub fn random_1(&mut self, to: i64) -> Tensor[src]

pub fn random_2(&mut self, from: i64, to: impl Into<Option<i64>>) -> Tensor[src]

pub fn ravel(&self) -> Tensor[src]

pub fn real(&self) -> Tensor[src]

pub fn reciprocal(&self) -> Tensor[src]

pub fn reciprocal_(&mut self) -> Tensor[src]

pub fn reciprocal_out(&self, out: &Tensor) -> Tensor[src]

pub fn reflection_pad1d(&self, padding: &[i64]) -> Tensor[src]

pub fn reflection_pad1d_backward(
    &self,
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn reflection_pad1d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn reflection_pad1d_out(&self, out: &Tensor, padding: &[i64]) -> Tensor[src]

pub fn reflection_pad2d(&self, padding: &[i64]) -> Tensor[src]

pub fn reflection_pad2d_backward(
    &self,
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn reflection_pad2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn reflection_pad2d_out(&self, out: &Tensor, padding: &[i64]) -> Tensor[src]

pub fn relu(&self) -> Tensor[src]

pub fn relu_(&mut self) -> Tensor[src]

pub fn remainder<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn remainder1(&self, other: &Tensor) -> Tensor[src]

pub fn remainder_<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn remainder_1(&mut self, other: &Tensor) -> Tensor[src]

pub fn remainder_out<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn remainder_out1(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn renorm<S>(&self, p: S, dim: i64, maxnorm: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn renorm_<S>(&mut self, p: S, dim: i64, maxnorm: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn renorm_out<S>(&self, out: &Tensor, p: S, dim: i64, maxnorm: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn repeat(&self, repeats: &[i64]) -> Tensor[src]

pub fn repeat_interleave1(
    &self,
    repeats: &Tensor,
    dim: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn repeat_interleave2(
    &self,
    repeats: i64,
    dim: impl Into<Option<i64>>
) -> Tensor
[src]

pub fn replication_pad1d(&self, padding: &[i64]) -> Tensor[src]

pub fn replication_pad1d_backward(
    &self,
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn replication_pad1d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn replication_pad1d_out(&self, out: &Tensor, padding: &[i64]) -> Tensor[src]

pub fn replication_pad2d(&self, padding: &[i64]) -> Tensor[src]

pub fn replication_pad2d_backward(
    &self,
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn replication_pad2d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn replication_pad2d_out(&self, out: &Tensor, padding: &[i64]) -> Tensor[src]

pub fn replication_pad3d(&self, padding: &[i64]) -> Tensor[src]

pub fn replication_pad3d_backward(
    &self,
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn replication_pad3d_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    padding: &[i64]
) -> Tensor
[src]

pub fn replication_pad3d_out(&self, out: &Tensor, padding: &[i64]) -> Tensor[src]

pub fn requires_grad_(&mut self, requires_grad: bool) -> Tensor[src]

pub fn reshape(&self, shape: &[i64]) -> Tensor[src]

pub fn reshape_as(&self, other: &Tensor) -> Tensor[src]

pub fn resize_(&mut self, size: &[i64]) -> Tensor[src]

pub fn resize_as_(&mut self, the_template: &Tensor) -> Tensor[src]

pub fn rnn_relu<T>(
    &self,
    hx: &Tensor,
    params: &[T],
    has_biases: bool,
    num_layers: i64,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_first: bool
) -> (Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn rnn_relu_cell<T>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: Option<T>,
    b_hh: Option<T>
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn rnn_tanh<T>(
    &self,
    hx: &Tensor,
    params: &[T],
    has_biases: bool,
    num_layers: i64,
    dropout: f64,
    train: bool,
    bidirectional: bool,
    batch_first: bool
) -> (Tensor, Tensor) where
    T: Borrow<Tensor>, 
[src]

pub fn rnn_tanh_cell<T>(
    &self,
    hx: &Tensor,
    w_ih: &Tensor,
    w_hh: &Tensor,
    b_ih: Option<T>,
    b_hh: Option<T>
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn roll(&self, shifts: &[i64], dims: &[i64]) -> Tensor[src]

pub fn rot90(&self, k: i64, dims: &[i64]) -> Tensor[src]

pub fn round(&self) -> Tensor[src]

pub fn round_(&mut self) -> Tensor[src]

pub fn round_out(&self, out: &Tensor) -> Tensor[src]

pub fn rrelu(&self, training: bool) -> Tensor[src]

pub fn rrelu_(&mut self, training: bool) -> Tensor[src]

pub fn rrelu_with_noise(&self, noise: &Tensor, training: bool) -> Tensor[src]

pub fn rrelu_with_noise_(&mut self, noise: &Tensor, training: bool) -> Tensor[src]

pub fn rrelu_with_noise_backward<S>(
    &self,
    grad_output: &Tensor,
    noise: &Tensor,
    lower: S,
    upper: S,
    training: bool,
    self_is_result: bool
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn rrelu_with_noise_out(
    &self,
    out: &Tensor,
    noise: &Tensor,
    training: bool
) -> Tensor
[src]

pub fn rsqrt(&self) -> Tensor[src]

pub fn rsqrt_(&mut self) -> Tensor[src]

pub fn rsqrt_out(&self, out: &Tensor) -> Tensor[src]

pub fn rsub(&self, other: &Tensor) -> Tensor[src]

pub fn rsub1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn scatter(&self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor[src]

pub fn scatter1<S>(&self, dim: i64, index: &Tensor, value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn scatter_(&mut self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor[src]

pub fn scatter_1<S>(&mut self, dim: i64, index: &Tensor, value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn scatter_2(
    &mut self,
    dim: i64,
    index: &Tensor,
    src: &Tensor,
    reduce: &str
) -> Tensor
[src]

pub fn scatter_3<S>(
    &mut self,
    dim: i64,
    index: &Tensor,
    value: S,
    reduce: &str
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn scatter_add(&self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor[src]

pub fn scatter_add_(&mut self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor[src]

pub fn searchsorted(
    &self,
    sorted_sequence: &Tensor,
    out_int32: bool,
    right: bool
) -> Tensor
[src]

pub fn searchsorted_out(
    &self,
    out: &Tensor,
    sorted_sequence: &Tensor,
    out_int32: bool,
    right: bool
) -> Tensor
[src]

pub fn select(&self, dim: i64, index: i64) -> Tensor[src]

pub fn selu(&self) -> Tensor[src]

pub fn selu_(&mut self) -> Tensor[src]

pub fn set_(&mut self) -> Tensor[src]

pub fn set_1(&mut self, source: &Tensor) -> Tensor[src]

pub fn set_requires_grad(&self, r: bool) -> Tensor[src]

pub fn sgn(&self) -> Tensor[src]

pub fn sgn_(&mut self) -> Tensor[src]

pub fn sgn_out(&self, out: &Tensor) -> Tensor[src]

pub fn sigmoid(&self) -> Tensor[src]

pub fn sigmoid_(&mut self) -> Tensor[src]

pub fn sigmoid_out(&self, out: &Tensor) -> Tensor[src]

pub fn sign(&self) -> Tensor[src]

pub fn sign_(&mut self) -> Tensor[src]

pub fn sign_out(&self, out: &Tensor) -> Tensor[src]

pub fn signbit(&self) -> Tensor[src]

pub fn signbit_out(&self, out: &Tensor) -> Tensor[src]

pub fn silu(&self) -> Tensor[src]

pub fn silu_(&mut self) -> Tensor[src]

pub fn silu_backward(&self, grad_output: &Tensor) -> Tensor[src]

pub fn silu_out(&self, out: &Tensor) -> Tensor[src]

pub fn sin(&self) -> Tensor[src]

pub fn sin_(&mut self) -> Tensor[src]

pub fn sin_out(&self, out: &Tensor) -> Tensor[src]

pub fn sinc(&self) -> Tensor[src]

pub fn sinc_(&mut self) -> Tensor[src]

pub fn sinc_out(&self, out: &Tensor) -> Tensor[src]

pub fn sinh(&self) -> Tensor[src]

pub fn sinh_(&mut self) -> Tensor[src]

pub fn sinh_out(&self, out: &Tensor) -> Tensor[src]

pub fn slice(
    &self,
    dim: i64,
    start: impl Into<Option<i64>>,
    end: impl Into<Option<i64>>,
    step: i64
) -> Tensor
[src]

pub fn slogdet(&self) -> (Tensor, Tensor)[src]

pub fn slow_conv3d<T>(
    &self,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn slow_conv3d_out<T>(
    &self,
    out: &Tensor,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn slow_conv_dilated2d<T>(
    &self,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn slow_conv_dilated3d<T>(
    &self,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    dilation: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn slow_conv_transpose2d<T>(
    &self,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    dilation: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn slow_conv_transpose2d_out<T>(
    &self,
    out: &Tensor,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    dilation: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn slow_conv_transpose3d<T>(
    &self,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    dilation: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn slow_conv_transpose3d_out<T>(
    &self,
    out: &Tensor,
    weight: &Tensor,
    kernel_size: &[i64],
    bias: Option<T>,
    stride: &[i64],
    padding: &[i64],
    output_padding: &[i64],
    dilation: &[i64]
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn smm(&self, mat2: &Tensor) -> Tensor[src]

pub fn smooth_l1_loss(
    &self,
    target: &Tensor,
    reduction: Reduction,
    beta: f64
) -> Tensor
[src]

pub fn smooth_l1_loss_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    beta: f64
) -> Tensor
[src]

pub fn smooth_l1_loss_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    beta: f64
) -> Tensor
[src]

pub fn smooth_l1_loss_out(
    &self,
    out: &Tensor,
    target: &Tensor,
    reduction: Reduction,
    beta: f64
) -> Tensor
[src]

pub fn soft_margin_loss(&self, target: &Tensor, reduction: Reduction) -> Tensor[src]

pub fn soft_margin_loss_backward(
    &self,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn soft_margin_loss_backward_out(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn soft_margin_loss_out(
    &self,
    out: &Tensor,
    target: &Tensor,
    reduction: Reduction
) -> Tensor
[src]

pub fn softmax(&self, dim: i64, dtype: Kind) -> Tensor[src]

pub fn softplus(&self) -> Tensor[src]

pub fn softplus_backward<S>(
    &self,
    grad_output: &Tensor,
    beta: S,
    threshold: S,
    output: &Tensor
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn softplus_backward_out<S>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    beta: S,
    threshold: S,
    output: &Tensor
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn softplus_out(&self, out: &Tensor) -> Tensor[src]

pub fn softshrink(&self) -> Tensor[src]

pub fn softshrink_backward<S>(&self, grad_output: &Tensor, lambd: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn softshrink_backward_out<S>(
    &self,
    grad_input: &Tensor,
    grad_output: &Tensor,
    lambd: S
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn softshrink_out(&self, out: &Tensor) -> Tensor[src]

pub fn solve(&self, a: &Tensor) -> (Tensor, Tensor)[src]

pub fn solve_out(
    &self,
    solution: &Tensor,
    lu: &Tensor,
    a: &Tensor
) -> (Tensor, Tensor)
[src]

pub fn sort(&self, dim: i64, descending: bool) -> (Tensor, Tensor)[src]

pub fn sort_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    dim: i64,
    descending: bool
) -> (Tensor, Tensor)
[src]

pub fn sparse_mask(&self, mask: &Tensor) -> Tensor[src]

pub fn sparse_resize_(
    &mut self,
    size: &[i64],
    sparse_dim: i64,
    dense_dim: i64
) -> Tensor
[src]

pub fn sparse_resize_and_clear_(
    &mut self,
    size: &[i64],
    sparse_dim: i64,
    dense_dim: i64
) -> Tensor
[src]

pub fn split(&self, split_size: i64, dim: i64) -> Vec<Tensor, Global>[src]

pub fn split_with_sizes(
    &self,
    split_sizes: &[i64],
    dim: i64
) -> Vec<Tensor, Global>
[src]

pub fn sqrt(&self) -> Tensor[src]

pub fn sqrt_(&mut self) -> Tensor[src]

pub fn sqrt_out(&self, out: &Tensor) -> Tensor[src]

pub fn square(&self) -> Tensor[src]

pub fn square_(&mut self) -> Tensor[src]

pub fn squeeze(&self) -> Tensor[src]

pub fn squeeze1(&self, dim: i64) -> Tensor[src]

pub fn squeeze_(&mut self) -> Tensor[src]

pub fn squeeze_1(&mut self, dim: i64) -> Tensor[src]

pub fn sspaddmm(&self, mat1: &Tensor, mat2: &Tensor) -> Tensor[src]

pub fn sspaddmm_out(&self, out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor[src]

pub fn std(&self, unbiased: bool) -> Tensor[src]

pub fn std1(&self, dim: &[i64], unbiased: bool, keepdim: bool) -> Tensor[src]

pub fn std_mean(&self, unbiased: bool) -> (Tensor, Tensor)[src]

pub fn std_mean1(
    &self,
    dim: &[i64],
    unbiased: bool,
    keepdim: bool
) -> (Tensor, Tensor)
[src]

pub fn std_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    unbiased: bool,
    keepdim: bool
) -> Tensor
[src]

pub fn stft<T>(
    &self,
    n_fft: i64,
    hop_length: impl Into<Option<i64>>,
    win_length: impl Into<Option<i64>>,
    window: Option<T>,
    normalized: bool,
    onesided: bool,
    return_complex: bool
) -> Tensor where
    T: Borrow<Tensor>, 
[src]

pub fn g_sub(&self, other: &Tensor) -> Tensor[src]

pub fn g_sub1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn g_sub_(&mut self, other: &Tensor) -> Tensor[src]

pub fn g_sub_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn sub_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn subtract(&self, other: &Tensor) -> Tensor[src]

pub fn subtract1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn subtract_(&mut self, other: &Tensor) -> Tensor[src]

pub fn subtract_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn subtract_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn sum(&self, dtype: Kind) -> Tensor[src]

pub fn sum1(&self, dim: &[i64], keepdim: bool, dtype: Kind) -> Tensor[src]

pub fn sum_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    keepdim: bool,
    dtype: Kind
) -> Tensor
[src]

pub fn sum_to_size(&self, size: &[i64]) -> Tensor[src]

pub fn svd(&self, some: bool, compute_uv: bool) -> (Tensor, Tensor, Tensor)[src]

pub fn svd_out(
    &self,
    u: &Tensor,
    s: &Tensor,
    v: &Tensor,
    some: bool,
    compute_uv: bool
) -> (Tensor, Tensor, Tensor)
[src]

pub fn swapaxes(&self, axis0: i64, axis1: i64) -> Tensor[src]

pub fn swapaxes_(&mut self, axis0: i64, axis1: i64) -> Tensor[src]

pub fn swapdims(&self, dim0: i64, dim1: i64) -> Tensor[src]

pub fn swapdims_(&mut self, dim0: i64, dim1: i64) -> Tensor[src]

pub fn symeig(&self, eigenvectors: bool, upper: bool) -> (Tensor, Tensor)[src]

pub fn symeig_out(
    &self,
    e: &Tensor,
    v: &Tensor,
    eigenvectors: bool,
    upper: bool
) -> (Tensor, Tensor)
[src]

pub fn tr(&self) -> Tensor[src]

pub fn t_(&mut self) -> Tensor[src]

pub fn take(&self, index: &Tensor) -> Tensor[src]

pub fn take_backward(&self, grad: &Tensor, index: &Tensor) -> Tensor[src]

pub fn take_out(&self, out: &Tensor, index: &Tensor) -> Tensor[src]

pub fn tan(&self) -> Tensor[src]

pub fn tan_(&mut self) -> Tensor[src]

pub fn tan_out(&self, out: &Tensor) -> Tensor[src]

pub fn tanh(&self) -> Tensor[src]

pub fn tanh_(&mut self) -> Tensor[src]

pub fn tanh_out(&self, out: &Tensor) -> Tensor[src]

pub fn tensor_split(&self, sections: i64, dim: i64) -> Vec<Tensor, Global>[src]

pub fn tensor_split1(&self, indices: &[i64], dim: i64) -> Vec<Tensor, Global>[src]

pub fn tensor_split2(
    &self,
    tensor_indices_or_sections: &Tensor,
    dim: i64
) -> Vec<Tensor, Global>
[src]

pub fn tensordot(
    &self,
    other: &Tensor,
    dims_self: &[i64],
    dims_other: &[i64]
) -> Tensor
[src]

pub fn tensordot_out(
    &self,
    out: &Tensor,
    other: &Tensor,
    dims_self: &[i64],
    dims_other: &[i64]
) -> Tensor
[src]

pub fn threshold<S>(&self, threshold: S, value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn threshold_<S>(&mut self, threshold: S, value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn threshold_backward<S>(
    &self,
    grad_output: &Tensor,
    threshold: S
) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn threshold_out<S>(&self, out: &Tensor, threshold: S, value: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn tile(&self, dims: &[i64]) -> Tensor[src]

pub fn to(&self, device: Device) -> Tensor[src]

pub fn to1(
    &self,
    options: (Kind, Device),
    non_blocking: bool,
    copy: bool
) -> Tensor
[src]

pub fn to2(&self, dtype: Kind, non_blocking: bool, copy: bool) -> Tensor[src]

pub fn to3(&self, other: &Tensor, non_blocking: bool, copy: bool) -> Tensor[src]

pub fn to4(
    &self,
    device: Device,
    dtype: Kind,
    non_blocking: bool,
    copy: bool
) -> Tensor
[src]

pub fn to_dense(&self, dtype: Kind) -> Tensor[src]

pub fn to_dense_backward(&self, grad: &Tensor) -> Tensor[src]

pub fn g_to_mkldnn(&self, dtype: Kind) -> Tensor[src]

pub fn to_mkldnn_backward(&self, grad: &Tensor) -> Tensor[src]

pub fn to_sparse(&self) -> Tensor[src]

pub fn to_sparse1(&self, sparse_dim: i64) -> Tensor[src]

pub fn topk(
    &self,
    k: i64,
    dim: i64,
    largest: bool,
    sorted: bool
) -> (Tensor, Tensor)
[src]

pub fn topk_out(
    &self,
    values: &Tensor,
    indices: &Tensor,
    k: i64,
    dim: i64,
    largest: bool,
    sorted: bool
) -> (Tensor, Tensor)
[src]

pub fn totype(&self, scalar_type: Kind) -> Tensor[src]

pub fn trace(&self) -> Tensor[src]

pub fn transpose(&self, dim0: i64, dim1: i64) -> Tensor[src]

pub fn transpose_(&mut self, dim0: i64, dim1: i64) -> Tensor[src]

pub fn triangular_solve(
    &self,
    a: &Tensor,
    upper: bool,
    transpose: bool,
    unitriangular: bool
) -> (Tensor, Tensor)
[src]

pub fn triangular_solve_out(
    &self,
    x: &Tensor,
    m: &Tensor,
    a: &Tensor,
    upper: bool,
    transpose: bool,
    unitriangular: bool
) -> (Tensor, Tensor)
[src]

pub fn tril(&self, diagonal: i64) -> Tensor[src]

pub fn tril_(&mut self, diagonal: i64) -> Tensor[src]

pub fn tril_out(&self, out: &Tensor, diagonal: i64) -> Tensor[src]

pub fn triu(&self, diagonal: i64) -> Tensor[src]

pub fn triu_(&mut self, diagonal: i64) -> Tensor[src]

pub fn triu_out(&self, out: &Tensor, diagonal: i64) -> Tensor[src]

pub fn true_divide(&self, other: &Tensor) -> Tensor[src]

pub fn true_divide1<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn true_divide_(&mut self, other: &Tensor) -> Tensor[src]

pub fn true_divide_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn true_divide_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn trunc(&self) -> Tensor[src]

pub fn trunc_(&mut self) -> Tensor[src]

pub fn trunc_out(&self, out: &Tensor) -> Tensor[src]

pub fn type_as(&self, other: &Tensor) -> Tensor[src]

pub fn unbind(&self, dim: i64) -> Vec<Tensor, Global>[src]

pub fn unflatten(&self, dim: i64, sizes: &[i64]) -> Tensor[src]

pub fn unfold(&self, dimension: i64, size: i64, step: i64) -> Tensor[src]

pub fn uniform_(&mut self, from: f64, to: f64) -> Tensor[src]

pub fn unique_consecutive(
    &self,
    return_inverse: bool,
    return_counts: bool,
    dim: impl Into<Option<i64>>
) -> (Tensor, Tensor, Tensor)
[src]

pub fn unique_dim(
    &self,
    dim: i64,
    sorted: bool,
    return_inverse: bool,
    return_counts: bool
) -> (Tensor, Tensor, Tensor)
[src]

pub fn unique_dim_consecutive(
    &self,
    dim: i64,
    return_inverse: bool,
    return_counts: bool
) -> (Tensor, Tensor, Tensor)
[src]

pub fn unsafe_chunk(&self, chunks: i64, dim: i64) -> Vec<Tensor, Global>[src]

pub fn unsafe_split(&self, split_size: i64, dim: i64) -> Vec<Tensor, Global>[src]

pub fn unsafe_split_with_sizes(
    &self,
    split_sizes: &[i64],
    dim: i64
) -> Vec<Tensor, Global>
[src]

pub fn unsqueeze(&self, dim: i64) -> Tensor[src]

pub fn unsqueeze_(&mut self, dim: i64) -> Tensor[src]

pub fn upsample_bicubic2d(
    &self,
    output_size: &[i64],
    align_corners: bool,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_bicubic2d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    align_corners: bool,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_bilinear2d(
    &self,
    output_size: &[i64],
    align_corners: bool,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_bilinear2d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    align_corners: bool,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_linear1d(
    &self,
    output_size: &[i64],
    align_corners: bool,
    scales: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_linear1d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    align_corners: bool,
    scales: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_nearest1d(
    &self,
    output_size: &[i64],
    scales: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_nearest1d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    scales: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_nearest2d(
    &self,
    output_size: &[i64],
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_nearest2d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_nearest3d(
    &self,
    output_size: &[i64],
    scales_d: impl Into<Option<f64>>,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_nearest3d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    scales_d: impl Into<Option<f64>>,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_trilinear3d(
    &self,
    output_size: &[i64],
    align_corners: bool,
    scales_d: impl Into<Option<f64>>,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn upsample_trilinear3d_out(
    &self,
    out: &Tensor,
    output_size: &[i64],
    align_corners: bool,
    scales_d: impl Into<Option<f64>>,
    scales_h: impl Into<Option<f64>>,
    scales_w: impl Into<Option<f64>>
) -> Tensor
[src]

pub fn values(&self) -> Tensor[src]

pub fn var(&self, unbiased: bool) -> Tensor[src]

pub fn var1(&self, dim: &[i64], unbiased: bool, keepdim: bool) -> Tensor[src]

pub fn var_mean(&self, unbiased: bool) -> (Tensor, Tensor)[src]

pub fn var_mean1(
    &self,
    dim: &[i64],
    unbiased: bool,
    keepdim: bool
) -> (Tensor, Tensor)
[src]

pub fn var_out(
    &self,
    out: &Tensor,
    dim: &[i64],
    unbiased: bool,
    keepdim: bool
) -> Tensor
[src]

pub fn vdot(&self, other: &Tensor) -> Tensor[src]

pub fn vdot_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn view_(&self, size: &[i64]) -> Tensor[src]

pub fn view1(&self, dtype: Kind) -> Tensor[src]

pub fn view_as(&self, other: &Tensor) -> Tensor[src]

pub fn view_as_complex(&self) -> Tensor[src]

pub fn view_as_real(&self) -> Tensor[src]

pub fn where1(&self, condition: &Tensor, other: &Tensor) -> Tensor[src]

pub fn where3<S>(&self, condition: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn xlogy(&self, other: &Tensor) -> Tensor[src]

pub fn xlogy2<S>(&self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn xlogy_(&mut self, other: &Tensor) -> Tensor[src]

pub fn xlogy_1<S>(&mut self, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn xlogy_out(&self, out: &Tensor, other: &Tensor) -> Tensor[src]

pub fn xlogy_out2<S>(&self, out: &Tensor, other: S) -> Tensor where
    S: Into<Scalar>, 
[src]

pub fn zero_(&mut self) -> Tensor[src]

pub fn zeros_like(&self) -> Tensor[src]

pub fn iter<T>(&self) -> Result<Iter<T>, TchError>[src]

pub fn write_npy<T>(&self, path: T) -> Result<(), TchError> where
    T: AsRef<Path>, 
[src]

Writes a tensor in the npy format so that it can be read using python.

pub fn f_view<T>(&self, s: T) -> Result<Tensor, TchError> where
    T: Shape
[src]

pub fn view<T>(&self, s: T) -> Tensor where
    T: Shape
[src]

pub fn f_zero_pad1d(&self, left: i64, right: i64) -> Result<Tensor, TchError>[src]

pub fn zero_pad1d(&self, left: i64, right: i64) -> Tensor[src]

pub fn f_zero_pad2d(
    &self,
    left: i64,
    right: i64,
    top: i64,
    bottom: i64
) -> Result<Tensor, TchError>
[src]

pub fn zero_pad2d(&self, left: i64, right: i64, top: i64, bottom: i64) -> Tensor[src]

pub fn to_kind(&self, kind: Kind) -> Tensor[src]

Casts a tensor to a specified kind.

pub fn f_to_kind(&self, kind: Kind) -> Result<Tensor, TchError>[src]

pub fn nll_loss(&self, targets: &Tensor) -> Tensor[src]

pub fn cross_entropy_for_logits(&self, targets: &Tensor) -> Tensor[src]

Computes the cross-entropy loss based on some logits and targets.

pub fn accuracy_for_logits(&self, targets: &Tensor) -> Tensor[src]

Returns the average accuracy for some given logits assuming that targets represent ground-truth.

pub fn random_batch(&self, batch_size: i64) -> Tensor[src]

pub fn to_device(&self, device: Device) -> Tensor[src]

Moves a tensor to a specified device.

pub fn f_to_device(&self, device: Device) -> Result<Tensor, TchError>[src]

pub fn avg_pool2d_default(&self, ksize: i64) -> Tensor[src]

pub fn max_pool2d_default(&self, ksize: i64) -> Tensor[src]

pub fn flat_view(&self) -> Tensor[src]

Flattens a tensor.

This returns a flattened version of the given tensor. The first dimension is preserved as it is assumed to be the mini-batch dimension.

pub fn onehot(&self, labels: i64) -> Tensor[src]

Converts a tensor to a one-hot encoded version.

If the input has a size [N1, N2, …, Nk], the returned tensor has a size [N1, …, Nk, labels]. The returned tensor uses float values. Elements of the input vector are expected to be between 0 and labels-1.

pub fn copy(&self) -> Tensor[src]

Copies a tensor to a newly allocated tensor using the same shape and device.

pub fn to_mkldnn(&self) -> Tensor[src]

pub fn init(&mut self, i: Init)[src]

Re-initializes the tensor using the specified initialization.

pub fn apply<M>(&self, m: &M) -> Tensor where
    M: Module
[src]

pub fn apply_t<M>(&self, m: &M, train: bool) -> Tensor where
    M: ModuleT
[src]

pub fn apply_opt<M>(&self, m: &Option<M>) -> Tensor where
    M: Module
[src]

pub fn apply_opt_t<M>(&self, m: &Option<M>, train: bool) -> Tensor where
    M: ModuleT
[src]

Trait Implementations

impl Debug for TensorFromMat[src]

impl Deref for TensorFromMat[src]

type Target = Tensor

The resulting type after dereferencing.

impl DerefMut for TensorFromMat[src]

impl Drop for TensorFromMat[src]

impl TryFromCv<Mat> for TensorFromMat[src]

type Error = Error

Auto Trait Implementations

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T> Pointable for T

type Init = T

The type for initializers.

impl<T> Same<T> for T

type Output = T

Should always be Self

impl<SS, SP> SupersetOf<SS> for SP where
    SS: SubsetOf<SP>, 

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

impl<V, T> VZip<V> for T where
    V: MultiLane<T>,