#[allow(clippy::all)]
use crate::device::Device;
use crate::kind::Kind;
use crate::scalar::{C_scalar, Scalar};
use libc::c_int;
use super::c_wrapper::{C_tensor, Tensor};
extern "C" {
fn atg_abs(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_abs_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_abs_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_acos(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_acos_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_acos_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_adaptive_avg_pool1d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_adaptive_avg_pool2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_adaptive_avg_pool2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor);
fn atg_adaptive_avg_pool2d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor);
fn atg_adaptive_avg_pool2d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_adaptive_avg_pool3d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_adaptive_avg_pool3d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor);
fn atg_adaptive_avg_pool3d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor);
fn atg_adaptive_avg_pool3d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_adaptive_max_pool1d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_adaptive_max_pool2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_adaptive_max_pool2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor);
fn atg_adaptive_max_pool2d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor);
fn atg_adaptive_max_pool2d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, indices_: *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_adaptive_max_pool3d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_adaptive_max_pool3d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor);
fn atg_adaptive_max_pool3d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor);
fn atg_adaptive_max_pool3d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, indices_: *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_add(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_add1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_add_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_add_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_add_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_addbmm(out__: *mut *mut C_tensor, self_: *mut C_tensor, batch1_: *mut C_tensor, batch2_: *mut C_tensor);
fn atg_addbmm_(out__: *mut *mut C_tensor, self_: *mut C_tensor, batch1_: *mut C_tensor, batch2_: *mut C_tensor);
fn atg_addbmm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, batch1_: *mut C_tensor, batch2_: *mut C_tensor);
fn atg_addcdiv(out__: *mut *mut C_tensor, self_: *mut C_tensor, tensor1_: *mut C_tensor, tensor2_: *mut C_tensor);
fn atg_addcdiv_(out__: *mut *mut C_tensor, self_: *mut C_tensor, tensor1_: *mut C_tensor, tensor2_: *mut C_tensor);
fn atg_addcdiv_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, tensor1_: *mut C_tensor, tensor2_: *mut C_tensor);
fn atg_addcmul(out__: *mut *mut C_tensor, self_: *mut C_tensor, tensor1_: *mut C_tensor, tensor2_: *mut C_tensor);
fn atg_addcmul_(out__: *mut *mut C_tensor, self_: *mut C_tensor, tensor1_: *mut C_tensor, tensor2_: *mut C_tensor);
fn atg_addcmul_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, tensor1_: *mut C_tensor, tensor2_: *mut C_tensor);
fn atg_addmm(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_addmm_(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_addmm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_addmv(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat_: *mut C_tensor, vec_: *mut C_tensor);
fn atg_addmv_(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat_: *mut C_tensor, vec_: *mut C_tensor);
fn atg_addmv_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, mat_: *mut C_tensor, vec_: *mut C_tensor);
fn atg_addr(out__: *mut *mut C_tensor, self_: *mut C_tensor, vec1_: *mut C_tensor, vec2_: *mut C_tensor);
fn atg_addr_(out__: *mut *mut C_tensor, self_: *mut C_tensor, vec1_: *mut C_tensor, vec2_: *mut C_tensor);
fn atg_addr_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, vec1_: *mut C_tensor, vec2_: *mut C_tensor);
fn atg_alias(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_all(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_all1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_all_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_alpha_dropout(out__: *mut *mut C_tensor, input_: *mut C_tensor, p_: f64, train_: c_int);
fn atg_alpha_dropout_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64, train_: c_int);
fn atg_any(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_any1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_any_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_arange(out__: *mut *mut C_tensor, end_: *mut C_scalar, options_kind: c_int, options_device: c_int);
fn atg_arange1(out__: *mut *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, options_kind: c_int, options_device: c_int);
fn atg_arange2(out__: *mut *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, step_: *mut C_scalar, options_kind: c_int, options_device: c_int);
fn atg_arange_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, end_: *mut C_scalar);
fn atg_arange_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar);
fn atg_arange_out2(out__: *mut *mut C_tensor, result_: *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, step_: *mut C_scalar);
fn atg_argmax(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_argmax1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_argmin(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_argmin1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_as_strided(out__: *mut *mut C_tensor, self_: *mut C_tensor, size_data: *const i64, size_len: c_int, stride_data: *const i64, stride_len: c_int);
fn atg_as_strided1(out__: *mut *mut C_tensor, self_: *mut C_tensor, size_data: *const i64, size_len: c_int, stride_data: *const i64, stride_len: c_int, storage_offset_: i64);
fn atg_as_strided_(out__: *mut *mut C_tensor, self_: *mut C_tensor, size_data: *const i64, size_len: c_int, stride_data: *const i64, stride_len: c_int);
fn atg_as_strided_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, size_data: *const i64, size_len: c_int, stride_data: *const i64, stride_len: c_int, storage_offset_: i64);
fn atg_asin(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_asin_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_asin_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_atan(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_atan2(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_atan2_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_atan2_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_atan_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_atan_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_avg_pool1d(out__: *mut *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, ceil_mode_: c_int, count_include_pad_: c_int);
fn atg_avg_pool2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, ceil_mode_: c_int, count_include_pad_: c_int);
fn atg_avg_pool2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, ceil_mode_: c_int, count_include_pad_: c_int);
fn atg_avg_pool2d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, ceil_mode_: c_int, count_include_pad_: c_int);
fn atg_avg_pool2d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, ceil_mode_: c_int, count_include_pad_: c_int);
fn atg_avg_pool3d(out__: *mut *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, ceil_mode_: c_int, count_include_pad_: c_int);
fn atg_avg_pool3d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, ceil_mode_: c_int, count_include_pad_: c_int);
fn atg_avg_pool3d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, ceil_mode_: c_int, count_include_pad_: c_int);
fn atg_avg_pool3d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, ceil_mode_: c_int, count_include_pad_: c_int);
fn atg_baddbmm(out__: *mut *mut C_tensor, self_: *mut C_tensor, batch1_: *mut C_tensor, batch2_: *mut C_tensor);
fn atg_baddbmm_(out__: *mut *mut C_tensor, self_: *mut C_tensor, batch1_: *mut C_tensor, batch2_: *mut C_tensor);
fn atg_baddbmm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, batch1_: *mut C_tensor, batch2_: *mut C_tensor);
fn atg_bartlett_window(out__: *mut *mut C_tensor, window_length_: i64, options_kind: c_int, options_device: c_int);
fn atg_bartlett_window1(out__: *mut *mut C_tensor, window_length_: i64, periodic_: c_int, options_kind: c_int, options_device: c_int);
fn atg_batch_norm(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, running_mean_: *mut C_tensor, running_var_: *mut C_tensor, training_: c_int, momentum_: f64, eps_: f64, cudnn_enabled_: c_int);
fn atg_bernoulli(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_bernoulli1(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64);
fn atg_bernoulli_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: *mut C_tensor);
fn atg_bernoulli_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64);
fn atg_bernoulli_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_bilinear(out__: *mut *mut C_tensor, input1_: *mut C_tensor, input2_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor);
fn atg_binary_cross_entropy(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64);
fn atg_binary_cross_entropy_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64);
fn atg_binary_cross_entropy_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64);
fn atg_binary_cross_entropy_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64);
fn atg_binary_cross_entropy_with_logits(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, pos_weight_: *mut C_tensor, reduction_: i64);
fn atg_binary_cross_entropy_with_logits_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, pos_weight_: *mut C_tensor, reduction_: i64);
fn atg_bincount(out__: *mut *mut C_tensor, self_: *mut C_tensor, weights_: *mut C_tensor, minlength_: i64);
fn atg_blackman_window(out__: *mut *mut C_tensor, window_length_: i64, options_kind: c_int, options_device: c_int);
fn atg_blackman_window1(out__: *mut *mut C_tensor, window_length_: i64, periodic_: c_int, options_kind: c_int, options_device: c_int);
fn atg_bmm(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_bmm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_btrifact(out__: *mut *mut C_tensor, self_: *mut C_tensor, pivot_: c_int);
fn atg_btrifact_out(out__: *mut *mut C_tensor, A_LU_: *mut C_tensor, pivots_: *mut C_tensor, self_: *mut C_tensor, pivot_: c_int);
fn atg_btrifact_with_info(out__: *mut *mut C_tensor, self_: *mut C_tensor, pivot_: c_int);
fn atg_btrifact_with_info_out(out__: *mut *mut C_tensor, A_LU_: *mut C_tensor, pivots_: *mut C_tensor, info_: *mut C_tensor, self_: *mut C_tensor, pivot_: c_int);
fn atg_btrisolve(out__: *mut *mut C_tensor, self_: *mut C_tensor, LU_data_: *mut C_tensor, LU_pivots_: *mut C_tensor);
fn atg_btrisolve_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, LU_data_: *mut C_tensor, LU_pivots_: *mut C_tensor);
fn atg_cat(out__: *mut *mut C_tensor, tensors_data: *const *mut C_tensor, tensors_len: c_int, dim_: i64);
fn atg_cat_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, tensors_data: *const *mut C_tensor, tensors_len: c_int, dim_: i64);
fn atg_cauchy_(out__: *mut *mut C_tensor, self_: *mut C_tensor, median_: f64, sigma_: f64);
fn atg_ceil(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_ceil_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_ceil_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_celu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_celu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_chain_matmul(out__: *mut *mut C_tensor, matrices_data: *const *mut C_tensor, matrices_len: c_int);
fn atg_cholesky(out__: *mut *mut C_tensor, self_: *mut C_tensor, upper_: c_int);
fn atg_cholesky_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, upper_: c_int);
fn atg_clamp(out__: *mut *mut C_tensor, self_: *mut C_tensor, min_: *mut C_scalar, max_: *mut C_scalar);
fn atg_clamp_(out__: *mut *mut C_tensor, self_: *mut C_tensor, min_: *mut C_scalar, max_: *mut C_scalar);
fn atg_clamp_max(out__: *mut *mut C_tensor, self_: *mut C_tensor, max_: *mut C_scalar);
fn atg_clamp_max_(out__: *mut *mut C_tensor, self_: *mut C_tensor, max_: *mut C_scalar);
fn atg_clamp_max_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, max_: *mut C_scalar);
fn atg_clamp_min(out__: *mut *mut C_tensor, self_: *mut C_tensor, min_: *mut C_scalar);
fn atg_clamp_min_(out__: *mut *mut C_tensor, self_: *mut C_tensor, min_: *mut C_scalar);
fn atg_clamp_min_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, min_: *mut C_scalar);
fn atg_clamp_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, min_: *mut C_scalar, max_: *mut C_scalar);
fn atg_clone(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_coalesce(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_constant_pad_nd(out__: *mut *mut C_tensor, self_: *mut C_tensor, pad_data: *const i64, pad_len: c_int);
fn atg_contiguous(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_conv1d(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64);
fn atg_conv2d(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64);
fn atg_conv3d(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64);
fn atg_conv_tbc(out__: *mut *mut C_tensor, self_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, pad_: i64);
fn atg_conv_tbc_backward(out__: *mut *mut C_tensor, self_: *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, pad_: i64);
fn atg_conv_transpose1d(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, output_padding_data: *const i64, output_padding_len: c_int, groups_: i64, dilation_data: *const i64, dilation_len: c_int);
fn atg_conv_transpose2d(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, output_padding_data: *const i64, output_padding_len: c_int, groups_: i64, dilation_data: *const i64, dilation_len: c_int);
fn atg_conv_transpose3d(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, output_padding_data: *const i64, output_padding_len: c_int, groups_: i64, dilation_data: *const i64, dilation_len: c_int);
fn atg_convolution(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, transposed_: c_int, output_padding_data: *const i64, output_padding_len: c_int, groups_: i64);
fn atg_copy_sparse_to_sparse_(out__: *mut *mut C_tensor, self_: *mut C_tensor, src_: *mut C_tensor, non_blocking_: c_int);
fn atg_cos(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_cos_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_cos_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_cosh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_cosh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_cosh_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_cosine_embedding_loss(out__: *mut *mut C_tensor, input1_: *mut C_tensor, input2_: *mut C_tensor, target_: *mut C_tensor, margin_: f64, reduction_: i64);
fn atg_cross(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor, dim_: i64);
fn atg_cross_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor, dim_: i64);
fn atg_ctc_loss(out__: *mut *mut C_tensor, log_probs_: *mut C_tensor, targets_: *mut C_tensor, input_lengths_data: *const i64, input_lengths_len: c_int, target_lengths_data: *const i64, target_lengths_len: c_int, blank_: i64, reduction_: i64);
fn atg_ctc_loss1(out__: *mut *mut C_tensor, log_probs_: *mut C_tensor, targets_: *mut C_tensor, input_lengths_: *mut C_tensor, target_lengths_: *mut C_tensor, blank_: i64, reduction_: i64);
fn atg_cudnn_affine_grid_generator(out__: *mut *mut C_tensor, theta_: *mut C_tensor, N_: i64, C_: i64, H_: i64, W_: i64);
fn atg_cudnn_affine_grid_generator_backward(out__: *mut *mut C_tensor, grad_: *mut C_tensor, N_: i64, C_: i64, H_: i64, W_: i64);
fn atg_cudnn_batch_norm(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, running_mean_: *mut C_tensor, running_var_: *mut C_tensor, training_: c_int, exponential_average_factor_: f64, epsilon_: f64);
fn atg_cudnn_batch_norm_backward(out__: *mut *mut C_tensor, input_: *mut C_tensor, grad_output_: *mut C_tensor, weight_: *mut C_tensor, running_mean_: *mut C_tensor, running_var_: *mut C_tensor, save_mean_: *mut C_tensor, save_var_: *mut C_tensor, epsilon_: f64);
fn atg_cudnn_convolution(out__: *mut *mut C_tensor, self_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_cudnn_convolution_backward_bias(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor);
fn atg_cudnn_convolution_backward_input(out__: *mut *mut C_tensor, self_size_data: *const i64, self_size_len: c_int, grad_output_: *mut C_tensor, weight_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_cudnn_convolution_backward_weight(out__: *mut *mut C_tensor, weight_size_data: *const i64, weight_size_len: c_int, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_cudnn_convolution_transpose(out__: *mut *mut C_tensor, self_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, output_padding_data: *const i64, output_padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_cudnn_convolution_transpose_backward_bias(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor);
fn atg_cudnn_convolution_transpose_backward_input(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, weight_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_cudnn_convolution_transpose_backward_weight(out__: *mut *mut C_tensor, weight_size_data: *const i64, weight_size_len: c_int, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_cudnn_grid_sampler(out__: *mut *mut C_tensor, self_: *mut C_tensor, grid_: *mut C_tensor);
fn atg_cudnn_grid_sampler_backward(out__: *mut *mut C_tensor, self_: *mut C_tensor, grid_: *mut C_tensor, grad_output_: *mut C_tensor);
fn atg_cumprod(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_cumprod1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
fn atg_cumprod_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_cumprod_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
fn atg_cumsum(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_cumsum1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
fn atg_cumsum_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_cumsum_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
fn atg_det(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_detach(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_detach_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_diag(out__: *mut *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
fn atg_diag_embed(out__: *mut *mut C_tensor, self_: *mut C_tensor, offset_: i64, dim1_: i64, dim2_: i64);
fn atg_diag_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
fn atg_diagflat(out__: *mut *mut C_tensor, self_: *mut C_tensor, offset_: i64);
fn atg_diagonal(out__: *mut *mut C_tensor, self_: *mut C_tensor, offset_: i64, dim1_: i64, dim2_: i64);
fn atg_digamma(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_digamma_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_digamma_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_dist(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_div(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_div1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_div_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_div_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_div_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_dot(out__: *mut *mut C_tensor, self_: *mut C_tensor, tensor_: *mut C_tensor);
fn atg_dot_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, tensor_: *mut C_tensor);
fn atg_dropout(out__: *mut *mut C_tensor, input_: *mut C_tensor, p_: f64, train_: c_int);
fn atg_dropout_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64, train_: c_int);
fn atg_eig(out__: *mut *mut C_tensor, self_: *mut C_tensor, eigenvectors_: c_int);
fn atg_eig_out(out__: *mut *mut C_tensor, e_: *mut C_tensor, v_: *mut C_tensor, self_: *mut C_tensor, eigenvectors_: c_int);
fn atg_elu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_elu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_elu_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor);
fn atg_embedding(out__: *mut *mut C_tensor, weight_: *mut C_tensor, indices_: *mut C_tensor, padding_idx_: i64, scale_grad_by_freq_: c_int, sparse_: c_int);
fn atg_embedding_backward(out__: *mut *mut C_tensor, grad_: *mut C_tensor, indices_: *mut C_tensor, num_weights_: i64, padding_idx_: i64, scale_grad_by_freq_: c_int, sparse_: c_int);
fn atg_embedding_bag(out__: *mut *mut C_tensor, weight_: *mut C_tensor, indices_: *mut C_tensor, offsets_: *mut C_tensor, scale_grad_by_freq_: c_int, mode_: i64, sparse_: c_int);
fn atg_embedding_dense_backward(out__: *mut *mut C_tensor, grad_: *mut C_tensor, indices_: *mut C_tensor, num_weights_: i64, padding_idx_: i64, scale_grad_by_freq_: c_int);
fn atg_embedding_renorm_(out__: *mut *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor, max_norm_: f64, norm_type_: f64);
fn atg_embedding_sparse_backward(out__: *mut *mut C_tensor, grad_: *mut C_tensor, indices_: *mut C_tensor, num_weights_: i64, padding_idx_: i64, scale_grad_by_freq_: c_int);
fn atg_empty(out__: *mut *mut C_tensor, size_data: *const i64, size_len: c_int, options_kind: c_int, options_device: c_int);
fn atg_empty_like(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_empty_like1(out__: *mut *mut C_tensor, self_: *mut C_tensor, options_kind: c_int, options_device: c_int);
fn atg_empty_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, size_data: *const i64, size_len: c_int);
fn atg_empty_strided(out__: *mut *mut C_tensor, size_data: *const i64, size_len: c_int, stride_data: *const i64, stride_len: c_int, options_kind: c_int, options_device: c_int);
fn atg_eq(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_eq1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_eq_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_eq_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_eq_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_eq_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_erf(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_erf_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_erf_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_erfc(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_erfc_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_erfc_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_erfinv(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_erfinv_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_erfinv_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_exp(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_exp_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_exp_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_expand(out__: *mut *mut C_tensor, self_: *mut C_tensor, size_data: *const i64, size_len: c_int, implicit_: c_int);
fn atg_expand_as(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_expm1(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_expm1_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_expm1_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_exponential_(out__: *mut *mut C_tensor, self_: *mut C_tensor, lambd_: f64);
fn atg_eye(out__: *mut *mut C_tensor, n_: i64, options_kind: c_int, options_device: c_int);
fn atg_eye1(out__: *mut *mut C_tensor, n_: i64, m_: i64, options_kind: c_int, options_device: c_int);
fn atg_eye_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, n_: i64);
fn atg_eye_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, n_: i64, m_: i64);
fn atg_feature_alpha_dropout(out__: *mut *mut C_tensor, input_: *mut C_tensor, p_: f64, train_: c_int);
fn atg_feature_alpha_dropout_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64, train_: c_int);
fn atg_feature_dropout(out__: *mut *mut C_tensor, input_: *mut C_tensor, p_: f64, train_: c_int);
fn atg_feature_dropout_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64, train_: c_int);
fn atg_fft(out__: *mut *mut C_tensor, self_: *mut C_tensor, signal_ndim_: i64, normalized_: c_int);
fn atg_fill_(out__: *mut *mut C_tensor, self_: *mut C_tensor, value_: *mut C_scalar);
fn atg_fill_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, value_: *mut C_tensor);
fn atg_flatten(out__: *mut *mut C_tensor, self_: *mut C_tensor, start_dim_: i64, end_dim_: i64);
fn atg_flip(out__: *mut *mut C_tensor, self_: *mut C_tensor, dims_data: *const i64, dims_len: c_int);
fn atg_floor(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_floor_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_floor_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_fmod(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_fmod1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_fmod_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_fmod_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_fmod_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_fmod_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_frac(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_frac_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_frac_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_fractional_max_pool2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, output_size_data: *const i64, output_size_len: c_int, random_samples_: *mut C_tensor);
fn atg_fractional_max_pool2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, output_size_data: *const i64, output_size_len: c_int, indices_: *mut C_tensor);
fn atg_fractional_max_pool2d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, output_size_data: *const i64, output_size_len: c_int, indices_: *mut C_tensor);
fn atg_fractional_max_pool2d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, indices_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, output_size_data: *const i64, output_size_len: c_int, random_samples_: *mut C_tensor);
fn atg_frobenius_norm(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_frobenius_norm1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_data: *const i64, dim_len: c_int, keepdim_: c_int);
fn atg_frobenius_norm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_data: *const i64, dim_len: c_int, keepdim_: c_int);
fn atg_full(out__: *mut *mut C_tensor, size_data: *const i64, size_len: c_int, fill_value_: *mut C_scalar, options_kind: c_int, options_device: c_int);
fn atg_full_like(out__: *mut *mut C_tensor, self_: *mut C_tensor, fill_value_: *mut C_scalar);
fn atg_full_like1(out__: *mut *mut C_tensor, self_: *mut C_tensor, fill_value_: *mut C_scalar, options_kind: c_int, options_device: c_int);
fn atg_full_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, size_data: *const i64, size_len: c_int, fill_value_: *mut C_scalar);
fn atg_gather(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor);
fn atg_gather_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor);
fn atg_ge(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_ge1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_ge_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_ge_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_ge_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_ge_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_gels(out__: *mut *mut C_tensor, self_: *mut C_tensor, A_: *mut C_tensor);
fn atg_gels_out(out__: *mut *mut C_tensor, X_: *mut C_tensor, qr_: *mut C_tensor, self_: *mut C_tensor, A_: *mut C_tensor);
fn atg_geometric_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64);
fn atg_geqrf(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_geqrf_out(out__: *mut *mut C_tensor, result0_: *mut C_tensor, result1_: *mut C_tensor, self_: *mut C_tensor);
fn atg_ger(out__: *mut *mut C_tensor, self_: *mut C_tensor, vec2_: *mut C_tensor);
fn atg_ger_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, vec2_: *mut C_tensor);
fn atg_gesv(out__: *mut *mut C_tensor, self_: *mut C_tensor, A_: *mut C_tensor);
fn atg_gesv_out(out__: *mut *mut C_tensor, solution_: *mut C_tensor, lu_: *mut C_tensor, self_: *mut C_tensor, A_: *mut C_tensor);
fn atg_glu(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_glu_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_glu_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_glu_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_grad(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_grid_sampler(out__: *mut *mut C_tensor, input_: *mut C_tensor, grid_: *mut C_tensor, interpolation_mode_: i64, padding_mode_: i64);
fn atg_grid_sampler_2d(out__: *mut *mut C_tensor, input_: *mut C_tensor, grid_: *mut C_tensor, interpolation_mode_: i64, padding_mode_: i64);
fn atg_grid_sampler_2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, input_: *mut C_tensor, grid_: *mut C_tensor, interpolation_mode_: i64, padding_mode_: i64);
fn atg_grid_sampler_3d(out__: *mut *mut C_tensor, input_: *mut C_tensor, grid_: *mut C_tensor, interpolation_mode_: i64, padding_mode_: i64);
fn atg_grid_sampler_3d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, input_: *mut C_tensor, grid_: *mut C_tensor, interpolation_mode_: i64, padding_mode_: i64);
fn atg_group_norm(out__: *mut *mut C_tensor, input_: *mut C_tensor, num_groups_: i64, weight_: *mut C_tensor, bias_: *mut C_tensor, eps_: f64, cudnn_enabled_: c_int);
fn atg_gru(out__: *mut *mut C_tensor, input_: *mut C_tensor, hx_: *mut C_tensor, params_data: *const *mut C_tensor, params_len: c_int, has_biases_: c_int, num_layers_: i64, dropout_: f64, train_: c_int, bidirectional_: c_int, batch_first_: c_int);
fn atg_gru1(out__: *mut *mut C_tensor, data_: *mut C_tensor, batch_sizes_: *mut C_tensor, hx_: *mut C_tensor, params_data: *const *mut C_tensor, params_len: c_int, has_biases_: c_int, num_layers_: i64, dropout_: f64, train_: c_int, bidirectional_: c_int);
fn atg_gru_cell(out__: *mut *mut C_tensor, input_: *mut C_tensor, hx_: *mut C_tensor, w_ih_: *mut C_tensor, w_hh_: *mut C_tensor, b_ih_: *mut C_tensor, b_hh_: *mut C_tensor);
fn atg_gt(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_gt1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_gt_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_gt_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_gt_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_gt_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_hamming_window(out__: *mut *mut C_tensor, window_length_: i64, options_kind: c_int, options_device: c_int);
fn atg_hamming_window1(out__: *mut *mut C_tensor, window_length_: i64, periodic_: c_int, options_kind: c_int, options_device: c_int);
fn atg_hamming_window2(out__: *mut *mut C_tensor, window_length_: i64, periodic_: c_int, alpha_: f64, options_kind: c_int, options_device: c_int);
fn atg_hamming_window3(out__: *mut *mut C_tensor, window_length_: i64, periodic_: c_int, alpha_: f64, beta_: f64, options_kind: c_int, options_device: c_int);
fn atg_hann_window(out__: *mut *mut C_tensor, window_length_: i64, options_kind: c_int, options_device: c_int);
fn atg_hann_window1(out__: *mut *mut C_tensor, window_length_: i64, periodic_: c_int, options_kind: c_int, options_device: c_int);
fn atg_hardshrink(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_hardshrink_backward(out__: *mut *mut C_tensor, grad_out_: *mut C_tensor, self_: *mut C_tensor, lambd_: *mut C_scalar);
fn atg_hardtanh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_hardtanh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_hardtanh_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor);
fn atg_hinge_embedding_loss(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, margin_: f64, reduction_: i64);
fn atg_histc(out__: *mut *mut C_tensor, self_: *mut C_tensor, bins_: i64);
fn atg_histc_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, bins_: i64);
fn atg_hspmm(out__: *mut *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_hspmm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_ifft(out__: *mut *mut C_tensor, self_: *mut C_tensor, signal_ndim_: i64, normalized_: c_int);
fn atg_index(out__: *mut *mut C_tensor, self_: *mut C_tensor, indices_data: *const *mut C_tensor, indices_len: c_int);
fn atg_index_add_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor, source_: *mut C_tensor);
fn atg_index_copy_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor, source_: *mut C_tensor);
fn atg_index_fill_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor, value_: *mut C_scalar);
fn atg_index_fill_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor, value_: *mut C_tensor);
fn atg_index_put(out__: *mut *mut C_tensor, self_: *mut C_tensor, indices_data: *const *mut C_tensor, indices_len: c_int, values_: *mut C_tensor);
fn atg_index_put_(out__: *mut *mut C_tensor, self_: *mut C_tensor, indices_data: *const *mut C_tensor, indices_len: c_int, values_: *mut C_tensor);
fn atg_index_select(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor);
fn atg_index_select_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor);
fn atg_indices(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_instance_norm(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, running_mean_: *mut C_tensor, running_var_: *mut C_tensor, use_input_stats_: c_int, momentum_: f64, eps_: f64, cudnn_enabled_: c_int);
fn atg_inverse(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_inverse_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_irfft(out__: *mut *mut C_tensor, self_: *mut C_tensor, signal_ndim_: i64, normalized_: c_int, onesided_: c_int, signal_sizes_data: *const i64, signal_sizes_len: c_int);
fn atg_isclose(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor, rtol_: f64, atol_: f64, equal_nan_: c_int);
fn atg_kl_div(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_kl_div_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_kthvalue(out__: *mut *mut C_tensor, self_: *mut C_tensor, k_: i64, dim_: i64, keepdim_: c_int);
fn atg_kthvalue_out(out__: *mut *mut C_tensor, values_: *mut C_tensor, indices_: *mut C_tensor, self_: *mut C_tensor, k_: i64, dim_: i64, keepdim_: c_int);
fn atg_l1_loss(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_l1_loss_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_l1_loss_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_l1_loss_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_layer_norm(out__: *mut *mut C_tensor, input_: *mut C_tensor, normalized_shape_data: *const i64, normalized_shape_len: c_int, weight_: *mut C_tensor, bias_: *mut C_tensor, eps_: f64, cudnn_enable_: c_int);
fn atg_le(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_le1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_le_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_le_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_le_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_le_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_leaky_relu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_leaky_relu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_leaky_relu_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor);
fn atg_lerp(out__: *mut *mut C_tensor, self_: *mut C_tensor, end_: *mut C_tensor, weight_: *mut C_scalar);
fn atg_lerp_(out__: *mut *mut C_tensor, self_: *mut C_tensor, end_: *mut C_tensor, weight_: *mut C_scalar);
fn atg_lerp_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, end_: *mut C_tensor, weight_: *mut C_scalar);
fn atg_lgamma(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_lgamma_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_lgamma_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_linear(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor);
fn atg_linspace(out__: *mut *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, options_kind: c_int, options_device: c_int);
fn atg_linspace1(out__: *mut *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, steps_: i64, options_kind: c_int, options_device: c_int);
fn atg_linspace_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar);
fn atg_linspace_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, steps_: i64);
fn atg_log(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_log10(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_log10_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_log10_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_log1p(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_log1p_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_log1p_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_log2(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_log2_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_log2_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_log_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_log_normal_(out__: *mut *mut C_tensor, self_: *mut C_tensor, mean_: f64, std_: f64);
fn atg_log_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_log_sigmoid(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_log_sigmoid_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, buffer_: *mut C_tensor);
fn atg_log_sigmoid_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, buffer_: *mut C_tensor);
fn atg_log_sigmoid_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor);
fn atg_log_softmax(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_log_softmax1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
fn atg_logdet(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_logspace(out__: *mut *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, options_kind: c_int, options_device: c_int);
fn atg_logspace1(out__: *mut *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, steps_: i64, options_kind: c_int, options_device: c_int);
fn atg_logspace_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar);
fn atg_logspace_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, steps_: i64);
fn atg_logsumexp(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_logsumexp_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_lstm(out__: *mut *mut C_tensor, input_: *mut C_tensor, hx_data: *const *mut C_tensor, hx_len: c_int, params_data: *const *mut C_tensor, params_len: c_int, has_biases_: c_int, num_layers_: i64, dropout_: f64, train_: c_int, bidirectional_: c_int, batch_first_: c_int);
fn atg_lstm1(out__: *mut *mut C_tensor, data_: *mut C_tensor, batch_sizes_: *mut C_tensor, hx_data: *const *mut C_tensor, hx_len: c_int, params_data: *const *mut C_tensor, params_len: c_int, has_biases_: c_int, num_layers_: i64, dropout_: f64, train_: c_int, bidirectional_: c_int);
fn atg_lstm_cell(out__: *mut *mut C_tensor, input_: *mut C_tensor, hx_data: *const *mut C_tensor, hx_len: c_int, w_ih_: *mut C_tensor, w_hh_: *mut C_tensor, b_ih_: *mut C_tensor, b_hh_: *mut C_tensor);
fn atg_lt(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_lt1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_lt_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_lt_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_lt_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_lt_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_margin_ranking_loss(out__: *mut *mut C_tensor, input1_: *mut C_tensor, input2_: *mut C_tensor, target_: *mut C_tensor, margin_: f64, reduction_: i64);
fn atg_masked_fill_(out__: *mut *mut C_tensor, self_: *mut C_tensor, mask_: *mut C_tensor, value_: *mut C_scalar);
fn atg_masked_fill_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, mask_: *mut C_tensor, value_: *mut C_tensor);
fn atg_masked_scatter_(out__: *mut *mut C_tensor, self_: *mut C_tensor, mask_: *mut C_tensor, source_: *mut C_tensor);
fn atg_masked_select(out__: *mut *mut C_tensor, self_: *mut C_tensor, mask_: *mut C_tensor);
fn atg_masked_select_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, mask_: *mut C_tensor);
fn atg_matmul(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_matmul_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_matrix_power(out__: *mut *mut C_tensor, self_: *mut C_tensor, n_: i64);
fn atg_matrix_rank(out__: *mut *mut C_tensor, self_: *mut C_tensor, symmetric_: c_int);
fn atg_matrix_rank1(out__: *mut *mut C_tensor, self_: *mut C_tensor, tol_: f64, symmetric_: c_int);
fn atg_max(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_max1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_max2(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_max_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_max_out1(out__: *mut *mut C_tensor, max_: *mut C_tensor, max_values_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_max_pool1d(out__: *mut *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int);
fn atg_max_pool1d_with_indices(out__: *mut *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int);
fn atg_max_pool2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int);
fn atg_max_pool2d_with_indices(out__: *mut *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int);
fn atg_max_pool2d_with_indices_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int, indices_: *mut C_tensor);
fn atg_max_pool2d_with_indices_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int, indices_: *mut C_tensor);
fn atg_max_pool2d_with_indices_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, indices_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int);
fn atg_max_pool3d(out__: *mut *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int);
fn atg_max_pool3d_with_indices(out__: *mut *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int);
fn atg_max_pool3d_with_indices_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int, indices_: *mut C_tensor);
fn atg_max_pool3d_with_indices_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int, indices_: *mut C_tensor);
fn atg_max_pool3d_with_indices_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, indices_: *mut C_tensor, self_: *mut C_tensor, kernel_size_data: *const i64, kernel_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int, dilation_data: *const i64, dilation_len: c_int, ceil_mode_: c_int);
fn atg_max_unpool2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_max_unpool2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_max_unpool2d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_max_unpool2d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_max_unpool3d(out__: *mut *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int);
fn atg_max_unpool3d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int);
fn atg_max_unpool3d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int);
fn atg_max_unpool3d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, indices_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, stride_data: *const i64, stride_len: c_int, padding_data: *const i64, padding_len: c_int);
fn atg_max_values(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_mean(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_mean1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int);
fn atg_mean2(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_mean3(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
fn atg_mean4(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int, dtype_: c_int);
fn atg_mean_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_mean_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
fn atg_mean_out2(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int, dtype_: c_int);
fn atg_median(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_median1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_median_out(out__: *mut *mut C_tensor, values_: *mut C_tensor, indices_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_min(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_min1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_min2(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_min_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_min_out1(out__: *mut *mut C_tensor, min_: *mut C_tensor, min_indices_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_min_values(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_miopen_batch_norm(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, running_mean_: *mut C_tensor, running_var_: *mut C_tensor, training_: c_int, exponential_average_factor_: f64, epsilon_: f64);
fn atg_miopen_batch_norm_backward(out__: *mut *mut C_tensor, input_: *mut C_tensor, grad_output_: *mut C_tensor, weight_: *mut C_tensor, running_mean_: *mut C_tensor, running_var_: *mut C_tensor, save_mean_: *mut C_tensor, save_var_: *mut C_tensor, epsilon_: f64);
fn atg_miopen_convolution(out__: *mut *mut C_tensor, self_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_miopen_convolution_backward_bias(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor);
fn atg_miopen_convolution_backward_input(out__: *mut *mut C_tensor, self_size_data: *const i64, self_size_len: c_int, grad_output_: *mut C_tensor, weight_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_miopen_convolution_backward_weight(out__: *mut *mut C_tensor, weight_size_data: *const i64, weight_size_len: c_int, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_miopen_convolution_transpose(out__: *mut *mut C_tensor, self_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, output_padding_data: *const i64, output_padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_miopen_convolution_transpose_backward_input(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, weight_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_miopen_convolution_transpose_backward_weight(out__: *mut *mut C_tensor, weight_size_data: *const i64, weight_size_len: c_int, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, benchmark_: c_int, deterministic_: c_int);
fn atg_mkldnn_convolution(out__: *mut *mut C_tensor, self_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64);
fn atg_mkldnn_convolution_backward_input(out__: *mut *mut C_tensor, self_size_data: *const i64, self_size_len: c_int, grad_output_: *mut C_tensor, weight_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, bias_defined_: c_int);
fn atg_mkldnn_convolution_backward_weights(out__: *mut *mut C_tensor, weight_size_data: *const i64, weight_size_len: c_int, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int, stride_data: *const i64, stride_len: c_int, dilation_data: *const i64, dilation_len: c_int, groups_: i64, bias_defined_: c_int);
fn atg_mm(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_mm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_mode(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_mode_out(out__: *mut *mut C_tensor, values_: *mut C_tensor, indices_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_mse_loss(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_mse_loss_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_mse_loss_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_mse_loss_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_mul(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_mul1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_mul_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_mul_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_mul_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_multilabel_margin_loss(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_multilabel_margin_loss_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64, is_target_: *mut C_tensor);
fn atg_multilabel_margin_loss_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64, is_target_: *mut C_tensor);
fn atg_multilabel_margin_loss_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_multinomial(out__: *mut *mut C_tensor, self_: *mut C_tensor, num_samples_: i64, replacement_: c_int);
fn atg_multinomial_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, num_samples_: i64, replacement_: c_int);
fn atg_mv(out__: *mut *mut C_tensor, self_: *mut C_tensor, vec_: *mut C_tensor);
fn atg_mv_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, vec_: *mut C_tensor);
fn atg_mvlgamma(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: i64);
fn atg_mvlgamma_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: i64);
fn atg_narrow(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, start_: i64, length_: i64);
fn atg_narrow_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, start_: i64, length_: i64);
fn atg_native_batch_norm(out__: *mut *mut C_tensor, input_: *mut C_tensor, weight_: *mut C_tensor, bias_: *mut C_tensor, running_mean_: *mut C_tensor, running_var_: *mut C_tensor, training_: c_int, momentum_: f64, eps_: f64);
fn atg_native_clone(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_native_norm(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_native_pow(out__: *mut *mut C_tensor, self_: *mut C_tensor, exponent_: *mut C_scalar);
fn atg_native_pow_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, exponent_: *mut C_scalar);
fn atg_native_resize_as_(out__: *mut *mut C_tensor, self_: *mut C_tensor, the_template_: *mut C_tensor);
fn atg_native_zero_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_ne(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_ne1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_ne_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_ne_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_ne_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_ne_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_neg(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_neg_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_neg_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_nll_loss(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64, ignore_index_: i64);
fn atg_nll_loss2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64, ignore_index_: i64);
fn atg_nll_loss2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64, ignore_index_: i64, total_weight_: *mut C_tensor);
fn atg_nll_loss2d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64, ignore_index_: i64, total_weight_: *mut C_tensor);
fn atg_nll_loss2d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64, ignore_index_: i64);
fn atg_nll_loss_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64, ignore_index_: i64, total_weight_: *mut C_tensor);
fn atg_nll_loss_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64, ignore_index_: i64, total_weight_: *mut C_tensor);
fn atg_nll_loss_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, weight_: *mut C_tensor, reduction_: i64, ignore_index_: i64);
fn atg_nonzero(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_nonzero_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_norm(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_norm1(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: *mut C_scalar, dim_: i64, keepdim_: c_int);
fn atg_norm_except_dim(out__: *mut *mut C_tensor, v_: *mut C_tensor, pow_: i64, dim_: i64);
fn atg_norm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, p_: *mut C_scalar, dim_: i64, keepdim_: c_int);
fn atg_normal(out__: *mut *mut C_tensor, mean_: *mut C_tensor, std_: f64);
fn atg_normal1(out__: *mut *mut C_tensor, mean_: f64, std_: *mut C_tensor);
fn atg_normal2(out__: *mut *mut C_tensor, mean_: *mut C_tensor, std_: *mut C_tensor);
fn atg_normal_(out__: *mut *mut C_tensor, self_: *mut C_tensor, mean_: f64, std_: f64);
fn atg_normal_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, mean_: *mut C_tensor, std_: f64);
fn atg_normal_out1(out__: *mut *mut C_tensor, output_: *mut C_tensor, mean_: f64, std_: *mut C_tensor);
fn atg_normal_out2(out__: *mut *mut C_tensor, output_: *mut C_tensor, mean_: *mut C_tensor, std_: *mut C_tensor);
fn atg_nuclear_norm(out__: *mut *mut C_tensor, self_: *mut C_tensor, keepdim_: c_int);
fn atg_nuclear_norm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, keepdim_: c_int);
fn atg_ones(out__: *mut *mut C_tensor, size_data: *const i64, size_len: c_int, options_kind: c_int, options_device: c_int);
fn atg_ones_like(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_ones_like1(out__: *mut *mut C_tensor, self_: *mut C_tensor, options_kind: c_int, options_device: c_int);
fn atg_ones_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, size_data: *const i64, size_len: c_int);
fn atg_orgqr(out__: *mut *mut C_tensor, self_: *mut C_tensor, input2_: *mut C_tensor);
fn atg_orgqr_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, input2_: *mut C_tensor);
fn atg_ormqr(out__: *mut *mut C_tensor, self_: *mut C_tensor, input2_: *mut C_tensor, input3_: *mut C_tensor, left_: c_int, transpose_: c_int);
fn atg_ormqr_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, input2_: *mut C_tensor, input3_: *mut C_tensor, left_: c_int, transpose_: c_int);
fn atg_pairwise_distance(out__: *mut *mut C_tensor, x1_: *mut C_tensor, x2_: *mut C_tensor, p_: f64, eps_: f64, keepdim_: c_int);
fn atg_pdist(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64);
fn atg_permute(out__: *mut *mut C_tensor, self_: *mut C_tensor, dims_data: *const i64, dims_len: c_int);
fn atg_pin_memory(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_pinverse(out__: *mut *mut C_tensor, self_: *mut C_tensor, rcond_: f64);
fn atg_pixel_shuffle(out__: *mut *mut C_tensor, self_: *mut C_tensor, upscale_factor_: i64);
fn atg_poisson(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_polygamma(out__: *mut *mut C_tensor, n_: i64, self_: *mut C_tensor);
fn atg_polygamma_(out__: *mut *mut C_tensor, self_: *mut C_tensor, n_: i64);
fn atg_polygamma_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, n_: i64, self_: *mut C_tensor);
fn atg_potri(out__: *mut *mut C_tensor, self_: *mut C_tensor, upper_: c_int);
fn atg_potri_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, upper_: c_int);
fn atg_potrs(out__: *mut *mut C_tensor, self_: *mut C_tensor, input2_: *mut C_tensor, upper_: c_int);
fn atg_potrs_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, input2_: *mut C_tensor, upper_: c_int);
fn atg_pow(out__: *mut *mut C_tensor, self_: *mut C_tensor, exponent_: *mut C_scalar);
fn atg_pow1(out__: *mut *mut C_tensor, self_: *mut C_tensor, exponent_: *mut C_tensor);
fn atg_pow2(out__: *mut *mut C_tensor, self_scalar_: *mut C_scalar, exponent_: *mut C_tensor);
fn atg_pow_(out__: *mut *mut C_tensor, self_: *mut C_tensor, exponent_: *mut C_scalar);
fn atg_pow_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, exponent_: *mut C_tensor);
fn atg_pow_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, exponent_: *mut C_scalar);
fn atg_pow_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, exponent_: *mut C_tensor);
fn atg_pow_out2(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_scalar_: *mut C_scalar, exponent_: *mut C_tensor);
fn atg_prelu(out__: *mut *mut C_tensor, self_: *mut C_tensor, weight_: *mut C_tensor);
fn atg_prelu_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, weight_: *mut C_tensor);
fn atg_prod(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_prod1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int);
fn atg_prod2(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_prod3(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
fn atg_prod4(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int, dtype_: c_int);
fn atg_prod_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
fn atg_prod_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
fn atg_prod_out2(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int, dtype_: c_int);
fn atg_pstrf(out__: *mut *mut C_tensor, self_: *mut C_tensor, upper_: c_int);
fn atg_pstrf_out(out__: *mut *mut C_tensor, u_: *mut C_tensor, piv_: *mut C_tensor, self_: *mut C_tensor, upper_: c_int);
fn atg_put_(out__: *mut *mut C_tensor, self_: *mut C_tensor, index_: *mut C_tensor, source_: *mut C_tensor, accumulate_: c_int);
fn atg_qr(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_qr_out(out__: *mut *mut C_tensor, Q_: *mut C_tensor, R_: *mut C_tensor, self_: *mut C_tensor);
fn atg_rand(out__: *mut *mut C_tensor, size_data: *const i64, size_len: c_int, options_kind: c_int, options_device: c_int);
fn atg_rand_like(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_rand_like1(out__: *mut *mut C_tensor, self_: *mut C_tensor, options_kind: c_int, options_device: c_int);
fn atg_rand_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, size_data: *const i64, size_len: c_int);
fn atg_randint(out__: *mut *mut C_tensor, high_: i64, size_data: *const i64, size_len: c_int, options_kind: c_int, options_device: c_int);
fn atg_randint1(out__: *mut *mut C_tensor, low_: i64, high_: i64, size_data: *const i64, size_len: c_int, options_kind: c_int, options_device: c_int);
fn atg_randint_like(out__: *mut *mut C_tensor, self_: *mut C_tensor, high_: i64);
fn atg_randint_like1(out__: *mut *mut C_tensor, self_: *mut C_tensor, low_: i64, high_: i64);
fn atg_randint_like2(out__: *mut *mut C_tensor, self_: *mut C_tensor, high_: i64, options_kind: c_int, options_device: c_int);
fn atg_randint_like3(out__: *mut *mut C_tensor, self_: *mut C_tensor, low_: i64, high_: i64, options_kind: c_int, options_device: c_int);
fn atg_randint_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, high_: i64, size_data: *const i64, size_len: c_int);
fn atg_randint_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, low_: i64, high_: i64, size_data: *const i64, size_len: c_int);
fn atg_randn(out__: *mut *mut C_tensor, size_data: *const i64, size_len: c_int, options_kind: c_int, options_device: c_int);
fn atg_randn_like(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_randn_like1(out__: *mut *mut C_tensor, self_: *mut C_tensor, options_kind: c_int, options_device: c_int);
fn atg_randn_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, size_data: *const i64, size_len: c_int);
fn atg_random_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_random_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, to_: i64);
fn atg_random_2(out__: *mut *mut C_tensor, self_: *mut C_tensor, from_: i64, to_: i64);
fn atg_randperm(out__: *mut *mut C_tensor, n_: i64, options_kind: c_int, options_device: c_int);
fn atg_randperm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, n_: i64);
fn atg_range(out__: *mut *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, options_kind: c_int, options_device: c_int);
fn atg_range1(out__: *mut *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, step_: *mut C_scalar, options_kind: c_int, options_device: c_int);
fn atg_range_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar);
fn atg_range_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, start_: *mut C_scalar, end_: *mut C_scalar, step_: *mut C_scalar);
fn atg_reciprocal(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_reciprocal_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_reciprocal_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_reflection_pad1d(out__: *mut *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_reflection_pad1d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_reflection_pad1d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_reflection_pad1d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_reflection_pad2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_reflection_pad2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_reflection_pad2d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_reflection_pad2d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_relu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_relu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_remainder(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_remainder1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_remainder_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_remainder_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_remainder_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_remainder_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_renorm(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: *mut C_scalar, dim_: i64, maxnorm_: *mut C_scalar);
fn atg_renorm_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: *mut C_scalar, dim_: i64, maxnorm_: *mut C_scalar);
fn atg_renorm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, p_: *mut C_scalar, dim_: i64, maxnorm_: *mut C_scalar);
fn atg_repeat(out__: *mut *mut C_tensor, self_: *mut C_tensor, repeats_data: *const i64, repeats_len: c_int);
fn atg_replication_pad1d(out__: *mut *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad1d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad1d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad1d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad2d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad2d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad3d(out__: *mut *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad3d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad3d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_replication_pad3d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, padding_data: *const i64, padding_len: c_int);
fn atg_reshape(out__: *mut *mut C_tensor, self_: *mut C_tensor, shape_data: *const i64, shape_len: c_int);
fn atg_reshape_as(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_resize_(out__: *mut *mut C_tensor, self_: *mut C_tensor, size_data: *const i64, size_len: c_int);
fn atg_resize_as_(out__: *mut *mut C_tensor, self_: *mut C_tensor, the_template_: *mut C_tensor);
fn atg_rfft(out__: *mut *mut C_tensor, self_: *mut C_tensor, signal_ndim_: i64, normalized_: c_int, onesided_: c_int);
fn atg_rnn_relu(out__: *mut *mut C_tensor, input_: *mut C_tensor, hx_: *mut C_tensor, params_data: *const *mut C_tensor, params_len: c_int, has_biases_: c_int, num_layers_: i64, dropout_: f64, train_: c_int, bidirectional_: c_int, batch_first_: c_int);
fn atg_rnn_relu1(out__: *mut *mut C_tensor, data_: *mut C_tensor, batch_sizes_: *mut C_tensor, hx_: *mut C_tensor, params_data: *const *mut C_tensor, params_len: c_int, has_biases_: c_int, num_layers_: i64, dropout_: f64, train_: c_int, bidirectional_: c_int);
fn atg_rnn_relu_cell(out__: *mut *mut C_tensor, input_: *mut C_tensor, hx_: *mut C_tensor, w_ih_: *mut C_tensor, w_hh_: *mut C_tensor, b_ih_: *mut C_tensor, b_hh_: *mut C_tensor);
fn atg_rnn_tanh(out__: *mut *mut C_tensor, input_: *mut C_tensor, hx_: *mut C_tensor, params_data: *const *mut C_tensor, params_len: c_int, has_biases_: c_int, num_layers_: i64, dropout_: f64, train_: c_int, bidirectional_: c_int, batch_first_: c_int);
fn atg_rnn_tanh1(out__: *mut *mut C_tensor, data_: *mut C_tensor, batch_sizes_: *mut C_tensor, hx_: *mut C_tensor, params_data: *const *mut C_tensor, params_len: c_int, has_biases_: c_int, num_layers_: i64, dropout_: f64, train_: c_int, bidirectional_: c_int);
fn atg_rnn_tanh_cell(out__: *mut *mut C_tensor, input_: *mut C_tensor, hx_: *mut C_tensor, w_ih_: *mut C_tensor, w_hh_: *mut C_tensor, b_ih_: *mut C_tensor, b_hh_: *mut C_tensor);
fn atg_roipooling2d_backward(out__: *mut *mut C_tensor, input_: *mut C_tensor, rois_: *mut C_tensor, pooledHeight_: i64, pooledWidth_: i64, spatialScale_: f64, gradOutput_: *mut C_tensor, argmaxes_: *mut C_tensor);
fn atg_roll(out__: *mut *mut C_tensor, self_: *mut C_tensor, shifts_data: *const i64, shifts_len: c_int, dims_data: *const i64, dims_len: c_int);
fn atg_rot90(out__: *mut *mut C_tensor, self_: *mut C_tensor, k_: i64, dims_data: *const i64, dims_len: c_int);
fn atg_round(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_round_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_round_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_rrelu(out__: *mut *mut C_tensor, self_: *mut C_tensor, training_: c_int);
fn atg_rrelu_(out__: *mut *mut C_tensor, self_: *mut C_tensor, training_: c_int);
fn atg_rrelu_with_noise(out__: *mut *mut C_tensor, self_: *mut C_tensor, noise_: *mut C_tensor, training_: c_int);
fn atg_rrelu_with_noise_(out__: *mut *mut C_tensor, self_: *mut C_tensor, noise_: *mut C_tensor, training_: c_int);
fn atg_rrelu_with_noise_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, noise_: *mut C_tensor, training_: c_int);
fn atg_rsqrt(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_rsqrt_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_rsqrt_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_rsub(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_rsub1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_s_native_addmm(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_s_native_addmm_(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_s_native_addmm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_scatter_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor, src_: *mut C_tensor);
fn atg_scatter_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor, value_: *mut C_scalar);
fn atg_scatter_add_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: *mut C_tensor, src_: *mut C_tensor);
fn atg_select(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: i64);
fn atg_selu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_selu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_set_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_set_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, source_: *mut C_tensor);
fn atg_set_requires_grad(out__: *mut *mut C_tensor, self_: *mut C_tensor, r_: c_int);
fn atg_sigmoid(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sigmoid_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sigmoid_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_sign(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sign_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sign_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_sin(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sin_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sin_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_sinh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sinh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sinh_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_slice(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, start_: i64, end_: i64, step_: i64);
fn atg_slogdet(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_smm(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_smooth_l1_loss(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_smooth_l1_loss_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_smooth_l1_loss_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_smooth_l1_loss_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_soft_margin_loss(out__: *mut *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_soft_margin_loss_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_soft_margin_loss_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_soft_margin_loss_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, target_: *mut C_tensor, reduction_: i64);
fn atg_softmax(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_softmax1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
fn atg_softplus(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_softplus_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor);
fn atg_softshrink(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_softshrink_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor);
fn atg_sort(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, descending_: c_int);
fn atg_sort_out(out__: *mut *mut C_tensor, values_: *mut C_tensor, indices_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, descending_: c_int);
fn atg_sparse_coo_tensor(out__: *mut *mut C_tensor, size_data: *const i64, size_len: c_int, options_kind: c_int, options_device: c_int);
fn atg_sparse_coo_tensor1(out__: *mut *mut C_tensor, indices_: *mut C_tensor, values_: *mut C_tensor, options_kind: c_int, options_device: c_int);
fn atg_sparse_coo_tensor2(out__: *mut *mut C_tensor, indices_: *mut C_tensor, values_: *mut C_tensor, size_data: *const i64, size_len: c_int, options_kind: c_int, options_device: c_int);
fn atg_sparse_resize_(out__: *mut *mut C_tensor, self_: *mut C_tensor, size_data: *const i64, size_len: c_int, sparse_dim_: i64, dense_dim_: i64);
fn atg_sparse_resize_and_clear_(out__: *mut *mut C_tensor, self_: *mut C_tensor, size_data: *const i64, size_len: c_int, sparse_dim_: i64, dense_dim_: i64);
fn atg_sqrt(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sqrt_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sqrt_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_squeeze(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_squeeze1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_squeeze_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_squeeze_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_sspaddmm(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_sspaddmm_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
fn atg_stack(out__: *mut *mut C_tensor, tensors_data: *const *mut C_tensor, tensors_len: c_int, dim_: i64);
fn atg_stack_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, tensors_data: *const *mut C_tensor, tensors_len: c_int, dim_: i64);
fn atg_std(out__: *mut *mut C_tensor, self_: *mut C_tensor, unbiased_: c_int);
fn atg_std1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, unbiased_: c_int, keepdim_: c_int);
fn atg_std_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, unbiased_: c_int, keepdim_: c_int);
fn atg_stft(out__: *mut *mut C_tensor, self_: *mut C_tensor, n_fft_: i64, hop_length_: i64, win_length_: i64, window_: *mut C_tensor, normalized_: c_int, onesided_: c_int);
fn atg_sub(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_sub1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_sub_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_sub_1(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
fn atg_sub_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_sum(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_sum1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int);
fn atg_sum2(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_data: *const i64, dim_len: c_int, keepdim_: c_int);
fn atg_sum3(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_data: *const i64, dim_len: c_int, dtype_: c_int);
fn atg_sum4(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_data: *const i64, dim_len: c_int, keepdim_: c_int, dtype_: c_int);
fn atg_sum_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_data: *const i64, dim_len: c_int, keepdim_: c_int);
fn atg_sum_out1(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_data: *const i64, dim_len: c_int, dtype_: c_int);
fn atg_sum_out2(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_data: *const i64, dim_len: c_int, keepdim_: c_int, dtype_: c_int);
fn atg_svd(out__: *mut *mut C_tensor, self_: *mut C_tensor, some_: c_int, compute_uv_: c_int);
fn atg_svd_out(out__: *mut *mut C_tensor, U_: *mut C_tensor, S_: *mut C_tensor, V_: *mut C_tensor, self_: *mut C_tensor, some_: c_int, compute_uv_: c_int);
fn atg_symeig(out__: *mut *mut C_tensor, self_: *mut C_tensor, eigenvectors_: c_int, upper_: c_int);
fn atg_symeig_out(out__: *mut *mut C_tensor, e_: *mut C_tensor, V_: *mut C_tensor, self_: *mut C_tensor, eigenvectors_: c_int, upper_: c_int);
fn atg_t(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_t_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_take(out__: *mut *mut C_tensor, self_: *mut C_tensor, index_: *mut C_tensor);
fn atg_take_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, index_: *mut C_tensor);
fn atg_tan(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_tan_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_tan_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_tanh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_tanh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_tanh_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_tensordot(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor, dims_self_data: *const i64, dims_self_len: c_int, dims_other_data: *const i64, dims_other_len: c_int);
fn atg_threshold(out__: *mut *mut C_tensor, self_: *mut C_tensor, threshold_: *mut C_scalar, value_: *mut C_scalar);
fn atg_threshold_(out__: *mut *mut C_tensor, self_: *mut C_tensor, threshold_: *mut C_scalar, value_: *mut C_scalar);
fn atg_threshold_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, self_: *mut C_tensor, threshold_: *mut C_scalar);
fn atg_threshold_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, threshold_: *mut C_scalar, value_: *mut C_scalar);
fn atg_to(out__: *mut *mut C_tensor, self_: *mut C_tensor, device_: c_int);
fn atg_to1(out__: *mut *mut C_tensor, self_: *mut C_tensor, options_kind: c_int, options_device: c_int, non_blocking_: c_int, copy_: c_int);
fn atg_to2(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int, non_blocking_: c_int, copy_: c_int);
fn atg_to3(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor, non_blocking_: c_int, copy_: c_int);
fn atg_to4(out__: *mut *mut C_tensor, self_: *mut C_tensor, device_: c_int, dtype_: c_int, non_blocking_: c_int, copy_: c_int);
fn atg_to_dense(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_to_sparse(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_to_sparse1(out__: *mut *mut C_tensor, self_: *mut C_tensor, sparse_dim_: i64);
fn atg_topk(out__: *mut *mut C_tensor, self_: *mut C_tensor, k_: i64, dim_: i64, largest_: c_int, sorted_: c_int);
fn atg_topk_out(out__: *mut *mut C_tensor, values_: *mut C_tensor, indices_: *mut C_tensor, self_: *mut C_tensor, k_: i64, dim_: i64, largest_: c_int, sorted_: c_int);
fn atg_totype(out__: *mut *mut C_tensor, self_: *mut C_tensor, scalar_type_: c_int);
fn atg_trace(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_transpose(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim0_: i64, dim1_: i64);
fn atg_transpose_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim0_: i64, dim1_: i64);
fn atg_tril(out__: *mut *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
fn atg_tril_(out__: *mut *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
fn atg_tril_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
fn atg_triplet_margin_loss(out__: *mut *mut C_tensor, anchor_: *mut C_tensor, positive_: *mut C_tensor, negative_: *mut C_tensor, margin_: f64, p_: f64, eps_: f64, swap_: c_int, reduction_: i64);
fn atg_triu(out__: *mut *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
fn atg_triu_(out__: *mut *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
fn atg_triu_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
fn atg_trtrs(out__: *mut *mut C_tensor, self_: *mut C_tensor, A_: *mut C_tensor, upper_: c_int, transpose_: c_int, unitriangular_: c_int);
fn atg_trtrs_out(out__: *mut *mut C_tensor, X_: *mut C_tensor, M_: *mut C_tensor, self_: *mut C_tensor, A_: *mut C_tensor, upper_: c_int, transpose_: c_int, unitriangular_: c_int);
fn atg_trunc(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_trunc_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_trunc_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor);
fn atg_type_as(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_unfold(out__: *mut *mut C_tensor, self_: *mut C_tensor, dimension_: i64, size_: i64, step_: i64);
fn atg_uniform_(out__: *mut *mut C_tensor, self_: *mut C_tensor, from_: f64, to_: f64);
fn atg_unsqueeze(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_unsqueeze_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
fn atg_upsample_bilinear2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, align_corners_: c_int);
fn atg_upsample_bilinear2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int, align_corners_: c_int);
fn atg_upsample_bilinear2d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int, align_corners_: c_int);
fn atg_upsample_bilinear2d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, align_corners_: c_int);
fn atg_upsample_linear1d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, align_corners_: c_int);
fn atg_upsample_linear1d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int, align_corners_: c_int);
fn atg_upsample_linear1d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int, align_corners_: c_int);
fn atg_upsample_linear1d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, align_corners_: c_int);
fn atg_upsample_nearest1d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_upsample_nearest1d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int);
fn atg_upsample_nearest1d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int);
fn atg_upsample_nearest1d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_upsample_nearest2d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_upsample_nearest2d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int);
fn atg_upsample_nearest2d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int);
fn atg_upsample_nearest2d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_upsample_nearest3d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_upsample_nearest3d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int);
fn atg_upsample_nearest3d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int);
fn atg_upsample_nearest3d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int);
fn atg_upsample_trilinear3d(out__: *mut *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, align_corners_: c_int);
fn atg_upsample_trilinear3d_backward(out__: *mut *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int, align_corners_: c_int);
fn atg_upsample_trilinear3d_backward_out(out__: *mut *mut C_tensor, grad_input_: *mut C_tensor, grad_output_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, input_size_data: *const i64, input_size_len: c_int, align_corners_: c_int);
fn atg_upsample_trilinear3d_out(out__: *mut *mut C_tensor, output_: *mut C_tensor, self_: *mut C_tensor, output_size_data: *const i64, output_size_len: c_int, align_corners_: c_int);
fn atg_values(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_var(out__: *mut *mut C_tensor, self_: *mut C_tensor, unbiased_: c_int);
fn atg_var1(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, unbiased_: c_int, keepdim_: c_int);
fn atg_var_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, self_: *mut C_tensor, dim_: i64, unbiased_: c_int, keepdim_: c_int);
fn atg_view(out__: *mut *mut C_tensor, self_: *mut C_tensor, size_data: *const i64, size_len: c_int);
fn atg_view_as(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_where(out__: *mut *mut C_tensor, condition_: *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
fn atg_zero_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_zeros(out__: *mut *mut C_tensor, size_data: *const i64, size_len: c_int, options_kind: c_int, options_device: c_int);
fn atg_zeros_like(out__: *mut *mut C_tensor, self_: *mut C_tensor);
fn atg_zeros_like1(out__: *mut *mut C_tensor, self_: *mut C_tensor, options_kind: c_int, options_device: c_int);
fn atg_zeros_out(out__: *mut *mut C_tensor, result_: *mut C_tensor, size_data: *const i64, size_len: c_int);
}
fn ptr_list(l: &[&Tensor]) -> Vec<*mut C_tensor> {
l.iter().map(|x| x.c_tensor).collect()
}
impl Tensor {
pub fn abs(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_abs(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn abs_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_abs_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn abs_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_abs_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn acos(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_acos(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn acos_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_acos_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn acos_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_acos_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_avg_pool1d(
&self, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_avg_pool1d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_avg_pool2d(
&self, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_avg_pool2d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_avg_pool2d_backward(
&self, grad_output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_avg_pool2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_avg_pool2d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_avg_pool2d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_avg_pool2d_out(
&self, output: &Tensor, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_avg_pool2d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_avg_pool3d(
&self, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_avg_pool3d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_avg_pool3d_backward(
&self, grad_output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_avg_pool3d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_avg_pool3d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_avg_pool3d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_avg_pool3d_out(
&self, output: &Tensor, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_avg_pool3d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_max_pool1d(
&self, output_size: &[i64]
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_adaptive_max_pool1d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn adaptive_max_pool2d(
&self, output_size: &[i64]
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_adaptive_max_pool2d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn adaptive_max_pool2d_backward(
&self, grad_output: &Tensor, indices: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_max_pool2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_max_pool2d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, indices: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_max_pool2d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_max_pool2d_out(
&self, output: &Tensor, indices: &Tensor, output_size: &[i64]
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_adaptive_max_pool2d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
indices.c_tensor,
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn adaptive_max_pool3d(
&self, output_size: &[i64]
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_adaptive_max_pool3d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn adaptive_max_pool3d_backward(
&self, grad_output: &Tensor, indices: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_max_pool3d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_max_pool3d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, indices: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_adaptive_max_pool3d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn adaptive_max_pool3d_out(
&self, output: &Tensor, indices: &Tensor, output_size: &[i64]
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_adaptive_max_pool3d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
indices.c_tensor,
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn g_add(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_add(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_add1(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_add1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_add_(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_add_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_add_1(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_add_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn add_out(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_add_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addbmm(
&self, batch1: &Tensor, batch2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addbmm(c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addbmm_(
&self, batch1: &Tensor, batch2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addbmm_(c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addbmm_out(
&self, result: &Tensor, batch1: &Tensor, batch2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addbmm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addcdiv(
&self, tensor1: &Tensor, tensor2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addcdiv(c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addcdiv_(
&self, tensor1: &Tensor, tensor2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addcdiv_(c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addcdiv_out(
&self, result: &Tensor, tensor1: &Tensor, tensor2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addcdiv_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addcmul(
&self, tensor1: &Tensor, tensor2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addcmul(c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addcmul_(
&self, tensor1: &Tensor, tensor2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addcmul_(c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addcmul_out(
&self, result: &Tensor, tensor1: &Tensor, tensor2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addcmul_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addmm(
&self, mat1: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addmm(c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addmm_(
&self, mat1: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addmm_(c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addmm_out(
&self, result: &Tensor, mat1: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addmm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addmv(
&self, mat: &Tensor, vec: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addmv(c_tensors.as_mut_ptr(),
self.c_tensor,
mat.c_tensor,
vec.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addmv_(
&self, mat: &Tensor, vec: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addmv_(c_tensors.as_mut_ptr(),
self.c_tensor,
mat.c_tensor,
vec.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addmv_out(
&self, result: &Tensor, mat: &Tensor, vec: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addmv_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
mat.c_tensor,
vec.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addr(
&self, vec1: &Tensor, vec2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addr(c_tensors.as_mut_ptr(),
self.c_tensor,
vec1.c_tensor,
vec2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addr_(
&self, vec1: &Tensor, vec2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addr_(c_tensors.as_mut_ptr(),
self.c_tensor,
vec1.c_tensor,
vec2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn addr_out(
&self, result: &Tensor, vec1: &Tensor, vec2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_addr_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
vec1.c_tensor,
vec2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn alias(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_alias(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn all(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_all(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn all1(
&self, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_all1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn all_out(
&self, result: &Tensor, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_all_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn alpha_dropout(
&self, p: f64, train: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_alpha_dropout(c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn alpha_dropout_(
&self, p: f64, train: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_alpha_dropout_(c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn any(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_any(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn any1(
&self, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_any1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn any_out(
&self, result: &Tensor, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_any_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn arange(
end_: &Scalar, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_arange(c_tensors.as_mut_ptr(),
end_.c_scalar,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn arange1(
start: &Scalar, end_: &Scalar, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_arange1(c_tensors.as_mut_ptr(),
start.c_scalar,
end_.c_scalar,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn arange2(
start: &Scalar, end_: &Scalar, step: &Scalar, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_arange2(c_tensors.as_mut_ptr(),
start.c_scalar,
end_.c_scalar,
step.c_scalar,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn arange_out(
result: &Tensor, end_: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_arange_out(c_tensors.as_mut_ptr(),
result.c_tensor,
end_.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn arange_out1(
result: &Tensor, start: &Scalar, end_: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_arange_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
start.c_scalar,
end_.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn arange_out2(
result: &Tensor, start: &Scalar, end_: &Scalar, step: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_arange_out2(c_tensors.as_mut_ptr(),
result.c_tensor,
start.c_scalar,
end_.c_scalar,
step.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn argmax(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_argmax(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn argmax1(
&self, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_argmax1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn argmin(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_argmin(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn argmin1(
&self, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_argmin1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn as_strided(
&self, size: &[i64], stride: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_as_strided(c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(), size.len() as i32,
stride.as_ptr(), stride.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn as_strided1(
&self, size: &[i64], stride: &[i64], storage_offset: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_as_strided1(c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(), size.len() as i32,
stride.as_ptr(), stride.len() as i32,
storage_offset
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn as_strided_(
&self, size: &[i64], stride: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_as_strided_(c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(), size.len() as i32,
stride.as_ptr(), stride.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn as_strided_1(
&self, size: &[i64], stride: &[i64], storage_offset: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_as_strided_1(c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(), size.len() as i32,
stride.as_ptr(), stride.len() as i32,
storage_offset
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn asin(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_asin(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn asin_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_asin_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn asin_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_asin_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn atan(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_atan(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn atan2(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_atan2(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn atan2_(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_atan2_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn atan2_out(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_atan2_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn atan_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_atan_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn atan_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_atan_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn avg_pool1d(
&self, kernel_size: &[i64], stride: &[i64], padding: &[i64], ceil_mode: bool, count_include_pad: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_avg_pool1d(c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn avg_pool2d(
&self, kernel_size: &[i64], stride: &[i64], padding: &[i64], ceil_mode: bool, count_include_pad: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_avg_pool2d(c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn avg_pool2d_backward(
&self, grad_output: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], ceil_mode: bool, count_include_pad: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_avg_pool2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn avg_pool2d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], ceil_mode: bool, count_include_pad: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_avg_pool2d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn avg_pool2d_out(
&self, output: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], ceil_mode: bool, count_include_pad: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_avg_pool2d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn avg_pool3d(
&self, kernel_size: &[i64], stride: &[i64], padding: &[i64], ceil_mode: bool, count_include_pad: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_avg_pool3d(c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn avg_pool3d_backward(
&self, grad_output: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], ceil_mode: bool, count_include_pad: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_avg_pool3d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn avg_pool3d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], ceil_mode: bool, count_include_pad: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_avg_pool3d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn avg_pool3d_out(
&self, output: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], ceil_mode: bool, count_include_pad: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_avg_pool3d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn baddbmm(
&self, batch1: &Tensor, batch2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_baddbmm(c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn baddbmm_(
&self, batch1: &Tensor, batch2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_baddbmm_(c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn baddbmm_out(
&self, result: &Tensor, batch1: &Tensor, batch2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_baddbmm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bartlett_window(
window_length: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bartlett_window(c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bartlett_window1(
window_length: i64, periodic: bool, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bartlett_window1(c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn batch_norm(
&self, weight: Option<&Tensor>, bias: Option<&Tensor>, running_mean: Option<&Tensor>, running_var: Option<&Tensor>, training: bool, momentum: f64, eps: f64, cudnn_enabled: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_batch_norm(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.c_tensor),
if training { 1 } else { 0 },
momentum,
eps,
if cudnn_enabled { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bernoulli(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bernoulli(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bernoulli1(
&self, p: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bernoulli1(c_tensors.as_mut_ptr(),
self.c_tensor,
p
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bernoulli_(
&self, p: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bernoulli_(c_tensors.as_mut_ptr(),
self.c_tensor,
p.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bernoulli_1(
&self, p: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bernoulli_1(c_tensors.as_mut_ptr(),
self.c_tensor,
p
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bernoulli_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bernoulli_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bilinear(
input1: &Tensor, input2: &Tensor, weight: &Tensor, bias: Option<&Tensor>
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bilinear(c_tensors.as_mut_ptr(),
input1.c_tensor,
input2.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor)
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn binary_cross_entropy(
&self, target: &Tensor, weight: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_binary_cross_entropy(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn binary_cross_entropy_backward(
&self, grad_output: &Tensor, target: &Tensor, weight: Option<&Tensor>, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_binary_cross_entropy_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn binary_cross_entropy_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, target: &Tensor, weight: Option<&Tensor>, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_binary_cross_entropy_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn binary_cross_entropy_out(
&self, output: &Tensor, target: &Tensor, weight: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_binary_cross_entropy_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn binary_cross_entropy_with_logits(
&self, target: &Tensor, weight: Option<&Tensor>, pos_weight: Option<&Tensor>, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_binary_cross_entropy_with_logits(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
pos_weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn binary_cross_entropy_with_logits_backward(
&self, grad_output: &Tensor, target: &Tensor, weight: Option<&Tensor>, pos_weight: Option<&Tensor>, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_binary_cross_entropy_with_logits_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
pos_weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bincount(
&self, weights: Option<&Tensor>, minlength: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bincount(c_tensors.as_mut_ptr(),
self.c_tensor,
weights.map_or(std::ptr::null_mut(), |t| t.c_tensor),
minlength
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn blackman_window(
window_length: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_blackman_window(c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn blackman_window1(
window_length: i64, periodic: bool, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_blackman_window1(c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bmm(
&self, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bmm(c_tensors.as_mut_ptr(),
self.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn bmm_out(
&self, result: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_bmm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn btrifact(
&self, pivot: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_btrifact(c_tensors.as_mut_ptr(),
self.c_tensor,
if pivot { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn btrifact_out(
&self, a_lu: &Tensor, pivots: &Tensor, pivot: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_btrifact_out(c_tensors.as_mut_ptr(),
a_lu.c_tensor,
pivots.c_tensor,
self.c_tensor,
if pivot { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn btrifact_with_info(
&self, pivot: bool
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_btrifact_with_info(c_tensors.as_mut_ptr(),
self.c_tensor,
if pivot { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn btrifact_with_info_out(
&self, a_lu: &Tensor, pivots: &Tensor, info: &Tensor, pivot: bool
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_btrifact_with_info_out(c_tensors.as_mut_ptr(),
a_lu.c_tensor,
pivots.c_tensor,
info.c_tensor,
self.c_tensor,
if pivot { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn btrisolve(
&self, lu_data: &Tensor, lu_pivots: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_btrisolve(c_tensors.as_mut_ptr(),
self.c_tensor,
lu_data.c_tensor,
lu_pivots.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn btrisolve_out(
&self, result: &Tensor, lu_data: &Tensor, lu_pivots: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_btrisolve_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
lu_data.c_tensor,
lu_pivots.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cat(
tensors: &[&Tensor], dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cat(c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(), tensors.len() as i32,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cat_out(
result: &Tensor, tensors: &[&Tensor], dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cat_out(c_tensors.as_mut_ptr(),
result.c_tensor,
ptr_list(tensors).as_ptr(), tensors.len() as i32,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cauchy_(
&self, median: f64, sigma: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cauchy_(c_tensors.as_mut_ptr(),
self.c_tensor,
median,
sigma
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ceil(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ceil(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ceil_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ceil_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ceil_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ceil_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn celu(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_celu(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn celu_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_celu_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn chain_matmul(
matrices: &[&Tensor]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_chain_matmul(c_tensors.as_mut_ptr(),
ptr_list(matrices).as_ptr(), matrices.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cholesky(
&self, upper: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cholesky(c_tensors.as_mut_ptr(),
self.c_tensor,
if upper { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cholesky_out(
&self, result: &Tensor, upper: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cholesky_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
if upper { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn clamp(
&self, min: &Scalar, max: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_clamp(c_tensors.as_mut_ptr(),
self.c_tensor,
min.c_scalar,
max.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn clamp_(
&self, min: &Scalar, max: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_clamp_(c_tensors.as_mut_ptr(),
self.c_tensor,
min.c_scalar,
max.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn clamp_max(
&self, max: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_clamp_max(c_tensors.as_mut_ptr(),
self.c_tensor,
max.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn clamp_max_(
&self, max: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_clamp_max_(c_tensors.as_mut_ptr(),
self.c_tensor,
max.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn clamp_max_out(
&self, result: &Tensor, max: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_clamp_max_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
max.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn clamp_min(
&self, min: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_clamp_min(c_tensors.as_mut_ptr(),
self.c_tensor,
min.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn clamp_min_(
&self, min: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_clamp_min_(c_tensors.as_mut_ptr(),
self.c_tensor,
min.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn clamp_min_out(
&self, result: &Tensor, min: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_clamp_min_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
min.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn clamp_out(
&self, result: &Tensor, min: &Scalar, max: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_clamp_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
min.c_scalar,
max.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn clone(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_clone(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn coalesce(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_coalesce(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn constant_pad_nd(
&self, pad: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_constant_pad_nd(c_tensors.as_mut_ptr(),
self.c_tensor,
pad.as_ptr(), pad.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn contiguous(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_contiguous(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn conv1d(
&self, weight: &Tensor, bias: &Tensor, stride: &[i64], padding: &[i64], dilation: &[i64], groups: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_conv1d(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.c_tensor,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn conv2d(
&self, weight: &Tensor, bias: &Tensor, stride: &[i64], padding: &[i64], dilation: &[i64], groups: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_conv2d(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.c_tensor,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn conv3d(
&self, weight: &Tensor, bias: &Tensor, stride: &[i64], padding: &[i64], dilation: &[i64], groups: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_conv3d(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.c_tensor,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn conv_tbc(
&self, weight: &Tensor, bias: &Tensor, pad: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_conv_tbc(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.c_tensor,
pad
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn conv_tbc_backward(
&self, input: &Tensor, weight: &Tensor, bias: &Tensor, pad: i64
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_conv_tbc_backward(c_tensors.as_mut_ptr(),
self.c_tensor,
input.c_tensor,
weight.c_tensor,
bias.c_tensor,
pad
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn conv_transpose1d(
&self, weight: &Tensor, bias: &Tensor, stride: &[i64], padding: &[i64], output_padding: &[i64], groups: i64, dilation: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_conv_transpose1d(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.c_tensor,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
output_padding.as_ptr(), output_padding.len() as i32,
groups,
dilation.as_ptr(), dilation.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn conv_transpose2d(
&self, weight: &Tensor, bias: &Tensor, stride: &[i64], padding: &[i64], output_padding: &[i64], groups: i64, dilation: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_conv_transpose2d(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.c_tensor,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
output_padding.as_ptr(), output_padding.len() as i32,
groups,
dilation.as_ptr(), dilation.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn conv_transpose3d(
&self, weight: &Tensor, bias: &Tensor, stride: &[i64], padding: &[i64], output_padding: &[i64], groups: i64, dilation: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_conv_transpose3d(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.c_tensor,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
output_padding.as_ptr(), output_padding.len() as i32,
groups,
dilation.as_ptr(), dilation.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn convolution(
&self, weight: &Tensor, bias: Option<&Tensor>, stride: &[i64], padding: &[i64], dilation: &[i64], transposed: bool, output_padding: &[i64], groups: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_convolution(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if transposed { 1 } else { 0 },
output_padding.as_ptr(), output_padding.len() as i32,
groups
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn copy_sparse_to_sparse_(
&self, src: &Tensor, non_blocking: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_copy_sparse_to_sparse_(c_tensors.as_mut_ptr(),
self.c_tensor,
src.c_tensor,
if non_blocking { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cos(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cos(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cos_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cos_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cos_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cos_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cosh(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cosh(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cosh_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cosh_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cosh_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cosh_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cosine_embedding_loss(
input1: &Tensor, input2: &Tensor, target: &Tensor, margin: f64, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cosine_embedding_loss(c_tensors.as_mut_ptr(),
input1.c_tensor,
input2.c_tensor,
target.c_tensor,
margin,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cross(
&self, other: &Tensor, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cross(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cross_out(
&self, result: &Tensor, other: &Tensor, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cross_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ctc_loss(
log_probs: &Tensor, targets: &Tensor, input_lengths: &[i64], target_lengths: &[i64], blank: i64, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ctc_loss(c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(), input_lengths.len() as i32,
target_lengths.as_ptr(), target_lengths.len() as i32,
blank,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ctc_loss1(
log_probs: &Tensor, targets: &Tensor, input_lengths: &Tensor, target_lengths: &Tensor, blank: i64, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ctc_loss1(c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.c_tensor,
target_lengths.c_tensor,
blank,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_affine_grid_generator(
theta: &Tensor, n: i64, c: i64, h: i64, w: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_affine_grid_generator(c_tensors.as_mut_ptr(),
theta.c_tensor,
n,
c,
h,
w
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_affine_grid_generator_backward(
grad: &Tensor, n: i64, c: i64, h: i64, w: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_affine_grid_generator_backward(c_tensors.as_mut_ptr(),
grad.c_tensor,
n,
c,
h,
w
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_batch_norm(
&self, weight: &Tensor, bias: Option<&Tensor>, running_mean: Option<&Tensor>, running_var: Option<&Tensor>, training: bool, exponential_average_factor: f64, epsilon: f64
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_cudnn_batch_norm(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.c_tensor),
if training { 1 } else { 0 },
exponential_average_factor,
epsilon
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn cudnn_batch_norm_backward(
&self, grad_output: &Tensor, weight: &Tensor, running_mean: Option<&Tensor>, running_var: Option<&Tensor>, save_mean: Option<&Tensor>, save_var: Option<&Tensor>, epsilon: f64
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_cudnn_batch_norm_backward(c_tensors.as_mut_ptr(),
self.c_tensor,
grad_output.c_tensor,
weight.c_tensor,
running_mean.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.c_tensor),
save_mean.map_or(std::ptr::null_mut(), |t| t.c_tensor),
save_var.map_or(std::ptr::null_mut(), |t| t.c_tensor),
epsilon
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn cudnn_convolution(
&self, weight: &Tensor, bias: Option<&Tensor>, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_convolution(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_convolution_backward_bias(
grad_output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_convolution_backward_bias(c_tensors.as_mut_ptr(),
grad_output.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_convolution_backward_input(
self_size: &[i64], grad_output: &Tensor, weight: &Tensor, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_convolution_backward_input(c_tensors.as_mut_ptr(),
self_size.as_ptr(), self_size.len() as i32,
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_convolution_backward_weight(
&self, weight_size: &[i64], grad_output: &Tensor, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_convolution_backward_weight(c_tensors.as_mut_ptr(),
weight_size.as_ptr(), weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_convolution_transpose(
&self, weight: &Tensor, bias: Option<&Tensor>, padding: &[i64], output_padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_convolution_transpose(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
padding.as_ptr(), padding.len() as i32,
output_padding.as_ptr(), output_padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_convolution_transpose_backward_bias(
grad_output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_convolution_transpose_backward_bias(c_tensors.as_mut_ptr(),
grad_output.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_convolution_transpose_backward_input(
grad_output: &Tensor, weight: &Tensor, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_convolution_transpose_backward_input(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_convolution_transpose_backward_weight(
&self, weight_size: &[i64], grad_output: &Tensor, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_convolution_transpose_backward_weight(c_tensors.as_mut_ptr(),
weight_size.as_ptr(), weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_grid_sampler(
&self, grid: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cudnn_grid_sampler(c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cudnn_grid_sampler_backward(
&self, grid: &Tensor, grad_output: &Tensor
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_cudnn_grid_sampler_backward(c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
grad_output.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn cumprod(
&self, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cumprod(c_tensors.as_mut_ptr(),
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cumprod1(
&self, dim: i64, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cumprod1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cumprod_out(
&self, result: &Tensor, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cumprod_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cumprod_out1(
&self, result: &Tensor, dim: i64, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cumprod_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cumsum(
&self, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cumsum(c_tensors.as_mut_ptr(),
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cumsum1(
&self, dim: i64, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cumsum1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cumsum_out(
&self, result: &Tensor, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cumsum_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn cumsum_out1(
&self, result: &Tensor, dim: i64, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_cumsum_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn det(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_det(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn detach(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_detach(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn detach_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_detach_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn diag(
&self, diagonal: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_diag(c_tensors.as_mut_ptr(),
self.c_tensor,
diagonal
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn diag_embed(
&self, offset: i64, dim1: i64, dim2: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_diag_embed(c_tensors.as_mut_ptr(),
self.c_tensor,
offset,
dim1,
dim2
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn diag_out(
&self, result: &Tensor, diagonal: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_diag_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
diagonal
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn diagflat(
&self, offset: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_diagflat(c_tensors.as_mut_ptr(),
self.c_tensor,
offset
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn diagonal(
&self, offset: i64, dim1: i64, dim2: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_diagonal(c_tensors.as_mut_ptr(),
self.c_tensor,
offset,
dim1,
dim2
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn digamma(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_digamma(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn digamma_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_digamma_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn digamma_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_digamma_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn dist(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_dist(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_div(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_div(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_div1(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_div1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_div_(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_div_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_div_1(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_div_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn div_out(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_div_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn dot(
&self, tensor: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_dot(c_tensors.as_mut_ptr(),
self.c_tensor,
tensor.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn dot_out(
&self, result: &Tensor, tensor: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_dot_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
tensor.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn dropout(
&self, p: f64, train: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_dropout(c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn dropout_(
&self, p: f64, train: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_dropout_(c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eig(
&self, eigenvectors: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_eig(c_tensors.as_mut_ptr(),
self.c_tensor,
if eigenvectors { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn eig_out(
&self, e: &Tensor, v: &Tensor, eigenvectors: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_eig_out(c_tensors.as_mut_ptr(),
e.c_tensor,
v.c_tensor,
self.c_tensor,
if eigenvectors { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn elu(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_elu(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn elu_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_elu_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn elu_out(
&self, output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_elu_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn embedding(
weight: &Tensor, indices: &Tensor, padding_idx: i64, scale_grad_by_freq: bool, sparse: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_embedding(c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 },
if sparse { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn embedding_backward(
grad: &Tensor, indices: &Tensor, num_weights: i64, padding_idx: i64, scale_grad_by_freq: bool, sparse: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_embedding_backward(c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
num_weights,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 },
if sparse { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn embedding_bag(
weight: &Tensor, indices: &Tensor, offsets: &Tensor, scale_grad_by_freq: bool, mode: i64, sparse: bool
) -> (Tensor, Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch!({
atg_embedding_bag(c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] }, Tensor { c_tensor: c_tensors[3] })
}
pub fn embedding_dense_backward(
grad: &Tensor, indices: &Tensor, num_weights: i64, padding_idx: i64, scale_grad_by_freq: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_embedding_dense_backward(c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
num_weights,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn embedding_renorm_(
&self, indices: &Tensor, max_norm: f64, norm_type: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_embedding_renorm_(c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
max_norm,
norm_type
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn embedding_sparse_backward(
grad: &Tensor, indices: &Tensor, num_weights: i64, padding_idx: i64, scale_grad_by_freq: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_embedding_sparse_backward(c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
num_weights,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn empty(
size: &[i64], options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_empty(c_tensors.as_mut_ptr(),
size.as_ptr(), size.len() as i32,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn empty_like(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_empty_like(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn empty_like1(
&self, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_empty_like1(c_tensors.as_mut_ptr(),
self.c_tensor,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn empty_out(
result: &Tensor, size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_empty_out(c_tensors.as_mut_ptr(),
result.c_tensor,
size.as_ptr(), size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn empty_strided(
size: &[i64], stride: &[i64], options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_empty_strided(c_tensors.as_mut_ptr(),
size.as_ptr(), size.len() as i32,
stride.as_ptr(), stride.len() as i32,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eq(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_eq(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eq1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_eq1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eq_(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_eq_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eq_1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_eq_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eq_out(
&self, result: &Tensor, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_eq_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eq_out1(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_eq_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn erf(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_erf(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn erf_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_erf_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn erf_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_erf_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn erfc(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_erfc(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn erfc_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_erfc_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn erfc_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_erfc_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn erfinv(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_erfinv(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn erfinv_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_erfinv_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn erfinv_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_erfinv_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn exp(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_exp(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn exp_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_exp_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn exp_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_exp_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn expand(
&self, size: &[i64], implicit: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_expand(c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(), size.len() as i32,
if implicit { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn expand_as(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_expand_as(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn expm1(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_expm1(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn expm1_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_expm1_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn expm1_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_expm1_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn exponential_(
&self, lambd: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_exponential_(c_tensors.as_mut_ptr(),
self.c_tensor,
lambd
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eye(
n: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_eye(c_tensors.as_mut_ptr(),
n,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eye1(
n: i64, m: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_eye1(c_tensors.as_mut_ptr(),
n,
m,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eye_out(
result: &Tensor, n: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_eye_out(c_tensors.as_mut_ptr(),
result.c_tensor,
n
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn eye_out1(
result: &Tensor, n: i64, m: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_eye_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
n,
m
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn feature_alpha_dropout(
&self, p: f64, train: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_feature_alpha_dropout(c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn feature_alpha_dropout_(
&self, p: f64, train: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_feature_alpha_dropout_(c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn feature_dropout(
&self, p: f64, train: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_feature_dropout(c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn feature_dropout_(
&self, p: f64, train: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_feature_dropout_(c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fft(
&self, signal_ndim: i64, normalized: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fft(c_tensors.as_mut_ptr(),
self.c_tensor,
signal_ndim,
if normalized { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fill_(
&self, value: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fill_(c_tensors.as_mut_ptr(),
self.c_tensor,
value.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fill_1(
&self, value: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fill_1(c_tensors.as_mut_ptr(),
self.c_tensor,
value.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn flatten(
&self, start_dim: i64, end_dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_flatten(c_tensors.as_mut_ptr(),
self.c_tensor,
start_dim,
end_dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn flip(
&self, dims: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_flip(c_tensors.as_mut_ptr(),
self.c_tensor,
dims.as_ptr(), dims.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn floor(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_floor(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn floor_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_floor_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn floor_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_floor_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fmod(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fmod(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fmod1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fmod1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fmod_(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fmod_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fmod_1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fmod_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fmod_out(
&self, result: &Tensor, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fmod_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fmod_out1(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fmod_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn frac(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_frac(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn frac_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_frac_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn frac_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_frac_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fractional_max_pool2d(
&self, kernel_size: &[i64], output_size: &[i64], random_samples: &Tensor
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_fractional_max_pool2d(c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
output_size.as_ptr(), output_size.len() as i32,
random_samples.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn fractional_max_pool2d_backward(
&self, grad_output: &Tensor, kernel_size: &[i64], output_size: &[i64], indices: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fractional_max_pool2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
output_size.as_ptr(), output_size.len() as i32,
indices.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fractional_max_pool2d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, kernel_size: &[i64], output_size: &[i64], indices: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_fractional_max_pool2d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
output_size.as_ptr(), output_size.len() as i32,
indices.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn fractional_max_pool2d_out(
&self, output: &Tensor, indices: &Tensor, kernel_size: &[i64], output_size: &[i64], random_samples: &Tensor
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_fractional_max_pool2d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
output_size.as_ptr(), output_size.len() as i32,
random_samples.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn frobenius_norm(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_frobenius_norm(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn frobenius_norm1(
&self, dim: &[i64], keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_frobenius_norm1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(), dim.len() as i32,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn frobenius_norm_out(
&self, result: &Tensor, dim: &[i64], keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_frobenius_norm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim.as_ptr(), dim.len() as i32,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn full(
size: &[i64], fill_value: &Scalar, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_full(c_tensors.as_mut_ptr(),
size.as_ptr(), size.len() as i32,
fill_value.c_scalar,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn full_like(
&self, fill_value: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_full_like(c_tensors.as_mut_ptr(),
self.c_tensor,
fill_value.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn full_like1(
&self, fill_value: &Scalar, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_full_like1(c_tensors.as_mut_ptr(),
self.c_tensor,
fill_value.c_scalar,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn full_out(
result: &Tensor, size: &[i64], fill_value: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_full_out(c_tensors.as_mut_ptr(),
result.c_tensor,
size.as_ptr(), size.len() as i32,
fill_value.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gather(
&self, dim: i64, index: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_gather(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gather_out(
&self, result: &Tensor, dim: i64, index: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_gather_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
index.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ge(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ge(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ge1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ge1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ge_(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ge_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ge_1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ge_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ge_out(
&self, result: &Tensor, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ge_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ge_out1(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ge_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gels(
&self, a: &Tensor
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_gels(c_tensors.as_mut_ptr(),
self.c_tensor,
a.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn gels_out(
&self, x: &Tensor, qr: &Tensor, a: &Tensor
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_gels_out(c_tensors.as_mut_ptr(),
x.c_tensor,
qr.c_tensor,
self.c_tensor,
a.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn geometric_(
&self, p: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_geometric_(c_tensors.as_mut_ptr(),
self.c_tensor,
p
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn geqrf(
&self,
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_geqrf(c_tensors.as_mut_ptr(),
self.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn geqrf_out(
&self, result0: &Tensor, result1: &Tensor
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_geqrf_out(c_tensors.as_mut_ptr(),
result0.c_tensor,
result1.c_tensor,
self.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn ger(
&self, vec2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ger(c_tensors.as_mut_ptr(),
self.c_tensor,
vec2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ger_out(
&self, result: &Tensor, vec2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ger_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
vec2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gesv(
&self, a: &Tensor
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_gesv(c_tensors.as_mut_ptr(),
self.c_tensor,
a.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn gesv_out(
&self, solution: &Tensor, lu: &Tensor, a: &Tensor
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_gesv_out(c_tensors.as_mut_ptr(),
solution.c_tensor,
lu.c_tensor,
self.c_tensor,
a.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn glu(
&self, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_glu(c_tensors.as_mut_ptr(),
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn glu_backward(
&self, grad_output: &Tensor, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_glu_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn glu_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_glu_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn glu_out(
&self, output: &Tensor, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_glu_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn grad(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_grad(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn grid_sampler(
&self, grid: &Tensor, interpolation_mode: i64, padding_mode: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_grid_sampler(c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn grid_sampler_2d(
&self, grid: &Tensor, interpolation_mode: i64, padding_mode: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_grid_sampler_2d(c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn grid_sampler_2d_backward(
&self, grad_output: &Tensor, grid: &Tensor, interpolation_mode: i64, padding_mode: i64
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_grid_sampler_2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn grid_sampler_3d(
&self, grid: &Tensor, interpolation_mode: i64, padding_mode: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_grid_sampler_3d(c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn grid_sampler_3d_backward(
&self, grad_output: &Tensor, grid: &Tensor, interpolation_mode: i64, padding_mode: i64
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_grid_sampler_3d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn group_norm(
&self, num_groups: i64, weight: Option<&Tensor>, bias: Option<&Tensor>, eps: f64, cudnn_enabled: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_group_norm(c_tensors.as_mut_ptr(),
self.c_tensor,
num_groups,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
eps,
if cudnn_enabled { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gru(
&self, hx: &Tensor, params: &[&Tensor], has_biases: bool, num_layers: i64, dropout: f64, train: bool, bidirectional: bool, batch_first: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_gru(c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(), params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn gru1(
data: &Tensor, batch_sizes: &Tensor, hx: &Tensor, params: &[&Tensor], has_biases: bool, num_layers: i64, dropout: f64, train: bool, bidirectional: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_gru1(c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(), params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn gru_cell(
&self, hx: &Tensor, w_ih: &Tensor, w_hh: &Tensor, b_ih: Option<&Tensor>, b_hh: Option<&Tensor>
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_gru_cell(c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.map_or(std::ptr::null_mut(), |t| t.c_tensor),
b_hh.map_or(std::ptr::null_mut(), |t| t.c_tensor)
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gt(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_gt(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gt1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_gt1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gt_(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_gt_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gt_1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_gt_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gt_out(
&self, result: &Tensor, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_gt_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn gt_out1(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_gt_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hamming_window(
window_length: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hamming_window(c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hamming_window1(
window_length: i64, periodic: bool, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hamming_window1(c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hamming_window2(
window_length: i64, periodic: bool, alpha: f64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hamming_window2(c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
alpha,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hamming_window3(
window_length: i64, periodic: bool, alpha: f64, beta: f64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hamming_window3(c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
alpha,
beta,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hann_window(
window_length: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hann_window(c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hann_window1(
window_length: i64, periodic: bool, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hann_window1(c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hardshrink(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hardshrink(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hardshrink_backward(
&self, grad_out: &Tensor, lambd: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hardshrink_backward(c_tensors.as_mut_ptr(),
grad_out.c_tensor,
self.c_tensor,
lambd.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hardtanh(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hardtanh(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hardtanh_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hardtanh_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hardtanh_out(
&self, output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hardtanh_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hinge_embedding_loss(
&self, target: &Tensor, margin: f64, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hinge_embedding_loss(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
margin,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn histc(
&self, bins: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_histc(c_tensors.as_mut_ptr(),
self.c_tensor,
bins
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn histc_out(
&self, result: &Tensor, bins: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_histc_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
bins
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hspmm(
mat1: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hspmm(c_tensors.as_mut_ptr(),
mat1.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn hspmm_out(
result: &Tensor, mat1: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_hspmm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
mat1.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ifft(
&self, signal_ndim: i64, normalized: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ifft(c_tensors.as_mut_ptr(),
self.c_tensor,
signal_ndim,
if normalized { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn index(
&self, indices: &[&Tensor]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_index(c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(indices).as_ptr(), indices.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn index_add_(
&self, dim: i64, index: &Tensor, source: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_index_add_(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn index_copy_(
&self, dim: i64, index: &Tensor, source: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_index_copy_(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn index_fill_(
&self, dim: i64, index: &Tensor, value: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_index_fill_(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn index_fill_1(
&self, dim: i64, index: &Tensor, value: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_index_fill_1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn index_put(
&self, indices: &[&Tensor], values: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_index_put(c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(indices).as_ptr(), indices.len() as i32,
values.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn index_put_(
&self, indices: &[&Tensor], values: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_index_put_(c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(indices).as_ptr(), indices.len() as i32,
values.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn index_select(
&self, dim: i64, index: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_index_select(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn index_select_out(
&self, result: &Tensor, dim: i64, index: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_index_select_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
index.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn indices(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_indices(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn instance_norm(
&self, weight: Option<&Tensor>, bias: Option<&Tensor>, running_mean: Option<&Tensor>, running_var: Option<&Tensor>, use_input_stats: bool, momentum: f64, eps: f64, cudnn_enabled: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_instance_norm(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.c_tensor),
if use_input_stats { 1 } else { 0 },
momentum,
eps,
if cudnn_enabled { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn inverse(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_inverse(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn inverse_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_inverse_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn irfft(
&self, signal_ndim: i64, normalized: bool, onesided: bool, signal_sizes: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_irfft(c_tensors.as_mut_ptr(),
self.c_tensor,
signal_ndim,
if normalized { 1 } else { 0 },
if onesided { 1 } else { 0 },
signal_sizes.as_ptr(), signal_sizes.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn isclose(
&self, other: &Tensor, rtol: f64, atol: f64, equal_nan: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_isclose(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rtol,
atol,
if equal_nan { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn kl_div(
&self, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_kl_div(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn kl_div_backward(
&self, grad_output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_kl_div_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn kthvalue(
&self, k: i64, dim: i64, keepdim: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_kthvalue(c_tensors.as_mut_ptr(),
self.c_tensor,
k,
dim,
if keepdim { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn kthvalue_out(
&self, values: &Tensor, indices: &Tensor, k: i64, dim: i64, keepdim: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_kthvalue_out(c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
k,
dim,
if keepdim { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn l1_loss(
&self, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_l1_loss(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn l1_loss_backward(
&self, grad_output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_l1_loss_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn l1_loss_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_l1_loss_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn l1_loss_out(
&self, output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_l1_loss_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn layer_norm(
&self, normalized_shape: &[i64], weight: Option<&Tensor>, bias: Option<&Tensor>, eps: f64, cudnn_enable: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_layer_norm(c_tensors.as_mut_ptr(),
self.c_tensor,
normalized_shape.as_ptr(), normalized_shape.len() as i32,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
eps,
if cudnn_enable { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn le(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_le(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn le1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_le1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn le_(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_le_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn le_1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_le_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn le_out(
&self, result: &Tensor, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_le_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn le_out1(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_le_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn leaky_relu(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_leaky_relu(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn leaky_relu_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_leaky_relu_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn leaky_relu_out(
&self, output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_leaky_relu_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lerp(
&self, end_: &Tensor, weight: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lerp(c_tensors.as_mut_ptr(),
self.c_tensor,
end_.c_tensor,
weight.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lerp_(
&self, end_: &Tensor, weight: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lerp_(c_tensors.as_mut_ptr(),
self.c_tensor,
end_.c_tensor,
weight.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lerp_out(
&self, result: &Tensor, end_: &Tensor, weight: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lerp_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
end_.c_tensor,
weight.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lgamma(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lgamma(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lgamma_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lgamma_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lgamma_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lgamma_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn linear(
&self, weight: &Tensor, bias: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_linear(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn linspace(
start: &Scalar, end_: &Scalar, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_linspace(c_tensors.as_mut_ptr(),
start.c_scalar,
end_.c_scalar,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn linspace1(
start: &Scalar, end_: &Scalar, steps: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_linspace1(c_tensors.as_mut_ptr(),
start.c_scalar,
end_.c_scalar,
steps,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn linspace_out(
result: &Tensor, start: &Scalar, end_: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_linspace_out(c_tensors.as_mut_ptr(),
result.c_tensor,
start.c_scalar,
end_.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn linspace_out1(
result: &Tensor, start: &Scalar, end_: &Scalar, steps: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_linspace_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
start.c_scalar,
end_.c_scalar,
steps
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log10(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log10(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log10_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log10_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log10_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log10_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log1p(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log1p(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log1p_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log1p_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log1p_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log1p_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log2(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log2(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log2_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log2_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log2_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log2_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log_normal_(
&self, mean: f64, std: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log_normal_(c_tensors.as_mut_ptr(),
self.c_tensor,
mean,
std
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log_sigmoid(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log_sigmoid(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log_sigmoid_backward(
&self, grad_output: &Tensor, buffer: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log_sigmoid_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
buffer.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log_sigmoid_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, buffer: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log_sigmoid_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
buffer.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log_sigmoid_out(
&self, output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log_sigmoid_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log_softmax(
&self, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log_softmax(c_tensors.as_mut_ptr(),
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn log_softmax1(
&self, dim: i64, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_log_softmax1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn logdet(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_logdet(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn logspace(
start: &Scalar, end_: &Scalar, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_logspace(c_tensors.as_mut_ptr(),
start.c_scalar,
end_.c_scalar,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn logspace1(
start: &Scalar, end_: &Scalar, steps: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_logspace1(c_tensors.as_mut_ptr(),
start.c_scalar,
end_.c_scalar,
steps,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn logspace_out(
result: &Tensor, start: &Scalar, end_: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_logspace_out(c_tensors.as_mut_ptr(),
result.c_tensor,
start.c_scalar,
end_.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn logspace_out1(
result: &Tensor, start: &Scalar, end_: &Scalar, steps: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_logspace_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
start.c_scalar,
end_.c_scalar,
steps
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn logsumexp(
&self, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_logsumexp(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn logsumexp_out(
&self, result: &Tensor, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_logsumexp_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lstm(
&self, hx: &[&Tensor], params: &[&Tensor], has_biases: bool, num_layers: i64, dropout: f64, train: bool, bidirectional: bool, batch_first: bool
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_lstm(c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(hx).as_ptr(), hx.len() as i32,
ptr_list(params).as_ptr(), params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn lstm1(
data: &Tensor, batch_sizes: &Tensor, hx: &[&Tensor], params: &[&Tensor], has_biases: bool, num_layers: i64, dropout: f64, train: bool, bidirectional: bool
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_lstm1(c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
ptr_list(hx).as_ptr(), hx.len() as i32,
ptr_list(params).as_ptr(), params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn lstm_cell(
&self, hx: &[&Tensor], w_ih: &Tensor, w_hh: &Tensor, b_ih: Option<&Tensor>, b_hh: Option<&Tensor>
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_lstm_cell(c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(hx).as_ptr(), hx.len() as i32,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.map_or(std::ptr::null_mut(), |t| t.c_tensor),
b_hh.map_or(std::ptr::null_mut(), |t| t.c_tensor)
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn lt(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lt(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lt1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lt1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lt_(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lt_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lt_1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lt_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lt_out(
&self, result: &Tensor, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lt_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn lt_out1(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_lt_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn margin_ranking_loss(
input1: &Tensor, input2: &Tensor, target: &Tensor, margin: f64, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_margin_ranking_loss(c_tensors.as_mut_ptr(),
input1.c_tensor,
input2.c_tensor,
target.c_tensor,
margin,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn masked_fill_(
&self, mask: &Tensor, value: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_masked_fill_(c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
value.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn masked_fill_1(
&self, mask: &Tensor, value: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_masked_fill_1(c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
value.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn masked_scatter_(
&self, mask: &Tensor, source: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_masked_scatter_(c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
source.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn masked_select(
&self, mask: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_masked_select(c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn masked_select_out(
&self, result: &Tensor, mask: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_masked_select_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
mask.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn matmul(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_matmul(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn matmul_out(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_matmul_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn matrix_power(
&self, n: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_matrix_power(c_tensors.as_mut_ptr(),
self.c_tensor,
n
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn matrix_rank(
&self, symmetric: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_matrix_rank(c_tensors.as_mut_ptr(),
self.c_tensor,
if symmetric { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn matrix_rank1(
&self, tol: f64, symmetric: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_matrix_rank1(c_tensors.as_mut_ptr(),
self.c_tensor,
tol,
if symmetric { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max2(
&self, dim: i64, keepdim: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_max2(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn max_out(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_out1(
&self, max: &Tensor, max_values: &Tensor, dim: i64, keepdim: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_max_out1(c_tensors.as_mut_ptr(),
max.c_tensor,
max_values.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn max_pool1d(
&self, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_pool1d(c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_pool1d_with_indices(
&self, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_max_pool1d_with_indices(c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn max_pool2d(
&self, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_pool2d(c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_pool2d_with_indices(
&self, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_max_pool2d_with_indices(c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn max_pool2d_with_indices_backward(
&self, grad_output: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool, indices: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_pool2d_with_indices_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 },
indices.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_pool2d_with_indices_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool, indices: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_pool2d_with_indices_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 },
indices.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_pool2d_with_indices_out(
&self, output: &Tensor, indices: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_max_pool2d_with_indices_out(c_tensors.as_mut_ptr(),
output.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn max_pool3d(
&self, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_pool3d(c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_pool3d_with_indices(
&self, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_max_pool3d_with_indices(c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn max_pool3d_with_indices_backward(
&self, grad_output: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool, indices: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_pool3d_with_indices_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 },
indices.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_pool3d_with_indices_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool, indices: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_pool3d_with_indices_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 },
indices.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_pool3d_with_indices_out(
&self, output: &Tensor, indices: &Tensor, kernel_size: &[i64], stride: &[i64], padding: &[i64], dilation: &[i64], ceil_mode: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_max_pool3d_with_indices_out(c_tensors.as_mut_ptr(),
output.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(), kernel_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn max_unpool2d(
&self, indices: &Tensor, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_unpool2d(c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_unpool2d_backward(
&self, grad_output: &Tensor, indices: &Tensor, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_unpool2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_unpool2d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, indices: &Tensor, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_unpool2d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_unpool2d_out(
&self, output: &Tensor, indices: &Tensor, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_unpool2d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_unpool3d(
&self, indices: &Tensor, output_size: &[i64], stride: &[i64], padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_unpool3d(c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_unpool3d_backward(
&self, grad_output: &Tensor, indices: &Tensor, output_size: &[i64], stride: &[i64], padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_unpool3d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_unpool3d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, indices: &Tensor, output_size: &[i64], stride: &[i64], padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_unpool3d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_unpool3d_out(
&self, output: &Tensor, indices: &Tensor, output_size: &[i64], stride: &[i64], padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_unpool3d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
stride.as_ptr(), stride.len() as i32,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn max_values(
&self, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_max_values(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mean(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mean(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mean1(
&self, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mean1(c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mean2(
&self, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mean2(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mean3(
&self, dim: i64, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mean3(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mean4(
&self, dim: i64, keepdim: bool, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mean4(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 },
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mean_out(
&self, result: &Tensor, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mean_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mean_out1(
&self, result: &Tensor, dim: i64, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mean_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mean_out2(
&self, result: &Tensor, dim: i64, keepdim: bool, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mean_out2(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 },
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn median(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_median(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn median1(
&self, dim: i64, keepdim: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_median1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn median_out(
&self, values: &Tensor, indices: &Tensor, dim: i64, keepdim: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_median_out(c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn min(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_min(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn min1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_min1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn min2(
&self, dim: i64, keepdim: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_min2(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn min_out(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_min_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn min_out1(
&self, min: &Tensor, min_indices: &Tensor, dim: i64, keepdim: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_min_out1(c_tensors.as_mut_ptr(),
min.c_tensor,
min_indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn min_values(
&self, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_min_values(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn miopen_batch_norm(
&self, weight: &Tensor, bias: Option<&Tensor>, running_mean: Option<&Tensor>, running_var: Option<&Tensor>, training: bool, exponential_average_factor: f64, epsilon: f64
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_miopen_batch_norm(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.c_tensor),
if training { 1 } else { 0 },
exponential_average_factor,
epsilon
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn miopen_batch_norm_backward(
&self, grad_output: &Tensor, weight: &Tensor, running_mean: Option<&Tensor>, running_var: Option<&Tensor>, save_mean: Option<&Tensor>, save_var: Option<&Tensor>, epsilon: f64
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_miopen_batch_norm_backward(c_tensors.as_mut_ptr(),
self.c_tensor,
grad_output.c_tensor,
weight.c_tensor,
running_mean.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.c_tensor),
save_mean.map_or(std::ptr::null_mut(), |t| t.c_tensor),
save_var.map_or(std::ptr::null_mut(), |t| t.c_tensor),
epsilon
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn miopen_convolution(
&self, weight: &Tensor, bias: Option<&Tensor>, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_miopen_convolution(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn miopen_convolution_backward_bias(
grad_output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_miopen_convolution_backward_bias(c_tensors.as_mut_ptr(),
grad_output.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn miopen_convolution_backward_input(
self_size: &[i64], grad_output: &Tensor, weight: &Tensor, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_miopen_convolution_backward_input(c_tensors.as_mut_ptr(),
self_size.as_ptr(), self_size.len() as i32,
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn miopen_convolution_backward_weight(
&self, weight_size: &[i64], grad_output: &Tensor, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_miopen_convolution_backward_weight(c_tensors.as_mut_ptr(),
weight_size.as_ptr(), weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn miopen_convolution_transpose(
&self, weight: &Tensor, bias: Option<&Tensor>, padding: &[i64], output_padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_miopen_convolution_transpose(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
padding.as_ptr(), padding.len() as i32,
output_padding.as_ptr(), output_padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn miopen_convolution_transpose_backward_input(
grad_output: &Tensor, weight: &Tensor, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_miopen_convolution_transpose_backward_input(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn miopen_convolution_transpose_backward_weight(
&self, weight_size: &[i64], grad_output: &Tensor, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, benchmark: bool, deterministic: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_miopen_convolution_transpose_backward_weight(c_tensors.as_mut_ptr(),
weight_size.as_ptr(), weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mkldnn_convolution(
&self, weight: &Tensor, bias: Option<&Tensor>, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mkldnn_convolution(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mkldnn_convolution_backward_input(
self_size: &[i64], grad_output: &Tensor, weight: &Tensor, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, bias_defined: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mkldnn_convolution_backward_input(c_tensors.as_mut_ptr(),
self_size.as_ptr(), self_size.len() as i32,
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if bias_defined { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mkldnn_convolution_backward_weights(
&self, weight_size: &[i64], grad_output: &Tensor, padding: &[i64], stride: &[i64], dilation: &[i64], groups: i64, bias_defined: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_mkldnn_convolution_backward_weights(c_tensors.as_mut_ptr(),
weight_size.as_ptr(), weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32,
stride.as_ptr(), stride.len() as i32,
dilation.as_ptr(), dilation.len() as i32,
groups,
if bias_defined { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn mm(
&self, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mm(c_tensors.as_mut_ptr(),
self.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mm_out(
&self, result: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mode(
&self, dim: i64, keepdim: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_mode(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn mode_out(
&self, values: &Tensor, indices: &Tensor, dim: i64, keepdim: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_mode_out(c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn mse_loss(
&self, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mse_loss(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mse_loss_backward(
&self, grad_output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mse_loss_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mse_loss_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mse_loss_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mse_loss_out(
&self, output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mse_loss_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_mul(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mul(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_mul1(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mul1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_mul_(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mul_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_mul_1(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mul_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mul_out(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mul_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn multilabel_margin_loss(
&self, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_multilabel_margin_loss(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn multilabel_margin_loss_backward(
&self, grad_output: &Tensor, target: &Tensor, reduction: i64, is_target: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_multilabel_margin_loss_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction,
is_target.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn multilabel_margin_loss_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, target: &Tensor, reduction: i64, is_target: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_multilabel_margin_loss_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction,
is_target.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn multilabel_margin_loss_out(
&self, output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_multilabel_margin_loss_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn multinomial(
&self, num_samples: i64, replacement: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_multinomial(c_tensors.as_mut_ptr(),
self.c_tensor,
num_samples,
if replacement { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn multinomial_out(
&self, result: &Tensor, num_samples: i64, replacement: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_multinomial_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
num_samples,
if replacement { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mv(
&self, vec: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mv(c_tensors.as_mut_ptr(),
self.c_tensor,
vec.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mv_out(
&self, result: &Tensor, vec: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mv_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
vec.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mvlgamma(
&self, p: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mvlgamma(c_tensors.as_mut_ptr(),
self.c_tensor,
p
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn mvlgamma_(
&self, p: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_mvlgamma_(c_tensors.as_mut_ptr(),
self.c_tensor,
p
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn narrow(
&self, dim: i64, start: i64, length: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_narrow(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start,
length
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn narrow_copy(
&self, dim: i64, start: i64, length: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_narrow_copy(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start,
length
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn native_batch_norm(
&self, weight: Option<&Tensor>, bias: Option<&Tensor>, running_mean: Option<&Tensor>, running_var: Option<&Tensor>, training: bool, momentum: f64, eps: f64
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_native_batch_norm(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.c_tensor),
if training { 1 } else { 0 },
momentum,
eps
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn native_clone(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_native_clone(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn native_norm(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_native_norm(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn native_pow(
&self, exponent: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_native_pow(c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn native_pow_out(
&self, result: &Tensor, exponent: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_native_pow_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
exponent.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn native_resize_as_(
&self, the_template: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_native_resize_as_(c_tensors.as_mut_ptr(),
self.c_tensor,
the_template.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn native_zero_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_native_zero_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ne(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ne(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ne1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ne1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ne_(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ne_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ne_1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ne_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ne_out(
&self, result: &Tensor, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ne_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ne_out1(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ne_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn neg(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_neg(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn neg_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_neg_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn neg_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_neg_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_nll_loss(
&self, target: &Tensor, weight: &Tensor, reduction: i64, ignore_index: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nll_loss(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.c_tensor,
reduction,
ignore_index
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nll_loss2d(
&self, target: &Tensor, weight: &Tensor, reduction: i64, ignore_index: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nll_loss2d(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.c_tensor,
reduction,
ignore_index
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nll_loss2d_backward(
&self, grad_output: &Tensor, target: &Tensor, weight: Option<&Tensor>, reduction: i64, ignore_index: i64, total_weight: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nll_loss2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
reduction,
ignore_index,
total_weight.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nll_loss2d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, target: &Tensor, weight: Option<&Tensor>, reduction: i64, ignore_index: i64, total_weight: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nll_loss2d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
reduction,
ignore_index,
total_weight.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nll_loss2d_out(
&self, output: &Tensor, target: &Tensor, weight: &Tensor, reduction: i64, ignore_index: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nll_loss2d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.c_tensor,
reduction,
ignore_index
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nll_loss_backward(
&self, grad_output: &Tensor, target: &Tensor, weight: Option<&Tensor>, reduction: i64, ignore_index: i64, total_weight: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nll_loss_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
reduction,
ignore_index,
total_weight.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nll_loss_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, target: &Tensor, weight: Option<&Tensor>, reduction: i64, ignore_index: i64, total_weight: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nll_loss_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.c_tensor),
reduction,
ignore_index,
total_weight.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nll_loss_out(
&self, output: &Tensor, target: &Tensor, weight: &Tensor, reduction: i64, ignore_index: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nll_loss_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.c_tensor,
reduction,
ignore_index
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nonzero(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nonzero(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nonzero_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nonzero_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn norm(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_norm(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn norm1(
&self, p: &Scalar, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_norm1(c_tensors.as_mut_ptr(),
self.c_tensor,
p.c_scalar,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn norm_except_dim(
v: &Tensor, pow: i64, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_norm_except_dim(c_tensors.as_mut_ptr(),
v.c_tensor,
pow,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn norm_out(
&self, result: &Tensor, p: &Scalar, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_norm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
p.c_scalar,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn normal(
mean: &Tensor, std: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_normal(c_tensors.as_mut_ptr(),
mean.c_tensor,
std
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn normal1(
mean: f64, std: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_normal1(c_tensors.as_mut_ptr(),
mean,
std.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn normal2(
mean: &Tensor, std: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_normal2(c_tensors.as_mut_ptr(),
mean.c_tensor,
std.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn normal_(
&self, mean: f64, std: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_normal_(c_tensors.as_mut_ptr(),
self.c_tensor,
mean,
std
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn normal_out(
output: &Tensor, mean: &Tensor, std: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_normal_out(c_tensors.as_mut_ptr(),
output.c_tensor,
mean.c_tensor,
std
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn normal_out1(
output: &Tensor, mean: f64, std: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_normal_out1(c_tensors.as_mut_ptr(),
output.c_tensor,
mean,
std.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn normal_out2(
output: &Tensor, mean: &Tensor, std: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_normal_out2(c_tensors.as_mut_ptr(),
output.c_tensor,
mean.c_tensor,
std.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nuclear_norm(
&self, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nuclear_norm(c_tensors.as_mut_ptr(),
self.c_tensor,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn nuclear_norm_out(
&self, result: &Tensor, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_nuclear_norm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ones(
size: &[i64], options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ones(c_tensors.as_mut_ptr(),
size.as_ptr(), size.len() as i32,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ones_like(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ones_like(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ones_like1(
&self, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ones_like1(c_tensors.as_mut_ptr(),
self.c_tensor,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ones_out(
result: &Tensor, size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ones_out(c_tensors.as_mut_ptr(),
result.c_tensor,
size.as_ptr(), size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn orgqr(
&self, input2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_orgqr(c_tensors.as_mut_ptr(),
self.c_tensor,
input2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn orgqr_out(
&self, result: &Tensor, input2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_orgqr_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
input2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ormqr(
&self, input2: &Tensor, input3: &Tensor, left: bool, transpose: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ormqr(c_tensors.as_mut_ptr(),
self.c_tensor,
input2.c_tensor,
input3.c_tensor,
if left { 1 } else { 0 },
if transpose { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn ormqr_out(
&self, result: &Tensor, input2: &Tensor, input3: &Tensor, left: bool, transpose: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_ormqr_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
input2.c_tensor,
input3.c_tensor,
if left { 1 } else { 0 },
if transpose { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pairwise_distance(
x1: &Tensor, x2: &Tensor, p: f64, eps: f64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pairwise_distance(c_tensors.as_mut_ptr(),
x1.c_tensor,
x2.c_tensor,
p,
eps,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pdist(
&self, p: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pdist(c_tensors.as_mut_ptr(),
self.c_tensor,
p
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn permute(
&self, dims: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_permute(c_tensors.as_mut_ptr(),
self.c_tensor,
dims.as_ptr(), dims.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pin_memory(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pin_memory(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pinverse(
&self, rcond: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pinverse(c_tensors.as_mut_ptr(),
self.c_tensor,
rcond
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pixel_shuffle(
&self, upscale_factor: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pixel_shuffle(c_tensors.as_mut_ptr(),
self.c_tensor,
upscale_factor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn poisson(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_poisson(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn polygamma(
&self, n: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_polygamma(c_tensors.as_mut_ptr(),
n,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn polygamma_(
&self, n: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_polygamma_(c_tensors.as_mut_ptr(),
self.c_tensor,
n
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn polygamma_out(
&self, result: &Tensor, n: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_polygamma_out(c_tensors.as_mut_ptr(),
result.c_tensor,
n,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn potri(
&self, upper: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_potri(c_tensors.as_mut_ptr(),
self.c_tensor,
if upper { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn potri_out(
&self, result: &Tensor, upper: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_potri_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
if upper { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn potrs(
&self, input2: &Tensor, upper: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_potrs(c_tensors.as_mut_ptr(),
self.c_tensor,
input2.c_tensor,
if upper { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn potrs_out(
&self, result: &Tensor, input2: &Tensor, upper: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_potrs_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
input2.c_tensor,
if upper { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pow(
&self, exponent: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pow(c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pow1(
&self, exponent: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pow1(c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pow2(
self_scalar: &Scalar, exponent: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pow2(c_tensors.as_mut_ptr(),
self_scalar.c_scalar,
exponent.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pow_(
&self, exponent: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pow_(c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pow_1(
&self, exponent: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pow_1(c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pow_out(
&self, result: &Tensor, exponent: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pow_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
exponent.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pow_out1(
&self, result: &Tensor, exponent: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pow_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
exponent.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pow_out2(
result: &Tensor, self_scalar: &Scalar, exponent: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_pow_out2(c_tensors.as_mut_ptr(),
result.c_tensor,
self_scalar.c_scalar,
exponent.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn prelu(
&self, weight: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_prelu(c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn prelu_backward(
&self, grad_output: &Tensor, weight: &Tensor
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_prelu_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
weight.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn prod(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_prod(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn prod1(
&self, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_prod1(c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn prod2(
&self, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_prod2(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn prod3(
&self, dim: i64, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_prod3(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn prod4(
&self, dim: i64, keepdim: bool, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_prod4(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 },
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn prod_out(
&self, result: &Tensor, dim: i64, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_prod_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn prod_out1(
&self, result: &Tensor, dim: i64, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_prod_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn prod_out2(
&self, result: &Tensor, dim: i64, keepdim: bool, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_prod_out2(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 },
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn pstrf(
&self, upper: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_pstrf(c_tensors.as_mut_ptr(),
self.c_tensor,
if upper { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn pstrf_out(
&self, u: &Tensor, piv: &Tensor, upper: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_pstrf_out(c_tensors.as_mut_ptr(),
u.c_tensor,
piv.c_tensor,
self.c_tensor,
if upper { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn put_(
&self, index: &Tensor, source: &Tensor, accumulate: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_put_(c_tensors.as_mut_ptr(),
self.c_tensor,
index.c_tensor,
source.c_tensor,
if accumulate { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn qr(
&self,
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_qr(c_tensors.as_mut_ptr(),
self.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn qr_out(
&self, q: &Tensor, r: &Tensor
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_qr_out(c_tensors.as_mut_ptr(),
q.c_tensor,
r.c_tensor,
self.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn rand(
size: &[i64], options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rand(c_tensors.as_mut_ptr(),
size.as_ptr(), size.len() as i32,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rand_like(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rand_like(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rand_like1(
&self, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rand_like1(c_tensors.as_mut_ptr(),
self.c_tensor,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rand_out(
result: &Tensor, size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rand_out(c_tensors.as_mut_ptr(),
result.c_tensor,
size.as_ptr(), size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randint(
high: i64, size: &[i64], options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randint(c_tensors.as_mut_ptr(),
high,
size.as_ptr(), size.len() as i32,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randint1(
low: i64, high: i64, size: &[i64], options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randint1(c_tensors.as_mut_ptr(),
low,
high,
size.as_ptr(), size.len() as i32,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randint_like(
&self, high: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randint_like(c_tensors.as_mut_ptr(),
self.c_tensor,
high
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randint_like1(
&self, low: i64, high: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randint_like1(c_tensors.as_mut_ptr(),
self.c_tensor,
low,
high
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randint_like2(
&self, high: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randint_like2(c_tensors.as_mut_ptr(),
self.c_tensor,
high,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randint_like3(
&self, low: i64, high: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randint_like3(c_tensors.as_mut_ptr(),
self.c_tensor,
low,
high,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randint_out(
result: &Tensor, high: i64, size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randint_out(c_tensors.as_mut_ptr(),
result.c_tensor,
high,
size.as_ptr(), size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randint_out1(
result: &Tensor, low: i64, high: i64, size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randint_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
low,
high,
size.as_ptr(), size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randn(
size: &[i64], options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randn(c_tensors.as_mut_ptr(),
size.as_ptr(), size.len() as i32,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randn_like(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randn_like(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randn_like1(
&self, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randn_like1(c_tensors.as_mut_ptr(),
self.c_tensor,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randn_out(
result: &Tensor, size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randn_out(c_tensors.as_mut_ptr(),
result.c_tensor,
size.as_ptr(), size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn random_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_random_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn random_1(
&self, to_: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_random_1(c_tensors.as_mut_ptr(),
self.c_tensor,
to_
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn random_2(
&self, from: i64, to_: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_random_2(c_tensors.as_mut_ptr(),
self.c_tensor,
from,
to_
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randperm(
n: i64, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randperm(c_tensors.as_mut_ptr(),
n,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn randperm_out(
result: &Tensor, n: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_randperm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
n
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn range(
start: &Scalar, end_: &Scalar, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_range(c_tensors.as_mut_ptr(),
start.c_scalar,
end_.c_scalar,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn range1(
start: &Scalar, end_: &Scalar, step: &Scalar, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_range1(c_tensors.as_mut_ptr(),
start.c_scalar,
end_.c_scalar,
step.c_scalar,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn range_out(
result: &Tensor, start: &Scalar, end_: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_range_out(c_tensors.as_mut_ptr(),
result.c_tensor,
start.c_scalar,
end_.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn range_out1(
result: &Tensor, start: &Scalar, end_: &Scalar, step: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_range_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
start.c_scalar,
end_.c_scalar,
step.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reciprocal(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reciprocal(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reciprocal_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reciprocal_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reciprocal_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reciprocal_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reflection_pad1d(
&self, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reflection_pad1d(c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reflection_pad1d_backward(
&self, grad_output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reflection_pad1d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reflection_pad1d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reflection_pad1d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reflection_pad1d_out(
&self, output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reflection_pad1d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reflection_pad2d(
&self, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reflection_pad2d(c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reflection_pad2d_backward(
&self, grad_output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reflection_pad2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reflection_pad2d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reflection_pad2d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reflection_pad2d_out(
&self, output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reflection_pad2d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn relu(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_relu(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn relu_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_relu_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn remainder(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_remainder(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn remainder1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_remainder1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn remainder_(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_remainder_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn remainder_1(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_remainder_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn remainder_out(
&self, result: &Tensor, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_remainder_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn remainder_out1(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_remainder_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn renorm(
&self, p: &Scalar, dim: i64, maxnorm: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_renorm(c_tensors.as_mut_ptr(),
self.c_tensor,
p.c_scalar,
dim,
maxnorm.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn renorm_(
&self, p: &Scalar, dim: i64, maxnorm: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_renorm_(c_tensors.as_mut_ptr(),
self.c_tensor,
p.c_scalar,
dim,
maxnorm.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn renorm_out(
&self, result: &Tensor, p: &Scalar, dim: i64, maxnorm: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_renorm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
p.c_scalar,
dim,
maxnorm.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn repeat(
&self, repeats: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_repeat(c_tensors.as_mut_ptr(),
self.c_tensor,
repeats.as_ptr(), repeats.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad1d(
&self, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad1d(c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad1d_backward(
&self, grad_output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad1d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad1d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad1d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad1d_out(
&self, output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad1d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad2d(
&self, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad2d(c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad2d_backward(
&self, grad_output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad2d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad2d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad2d_out(
&self, output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad2d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad3d(
&self, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad3d(c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad3d_backward(
&self, grad_output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad3d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad3d_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad3d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn replication_pad3d_out(
&self, output: &Tensor, padding: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_replication_pad3d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
padding.as_ptr(), padding.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reshape(
&self, shape: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reshape(c_tensors.as_mut_ptr(),
self.c_tensor,
shape.as_ptr(), shape.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn reshape_as(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_reshape_as(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn resize_(
&self, size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_resize_(c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(), size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn resize_as_(
&self, the_template: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_resize_as_(c_tensors.as_mut_ptr(),
self.c_tensor,
the_template.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rfft(
&self, signal_ndim: i64, normalized: bool, onesided: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rfft(c_tensors.as_mut_ptr(),
self.c_tensor,
signal_ndim,
if normalized { 1 } else { 0 },
if onesided { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rnn_relu(
&self, hx: &Tensor, params: &[&Tensor], has_biases: bool, num_layers: i64, dropout: f64, train: bool, bidirectional: bool, batch_first: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_rnn_relu(c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(), params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn rnn_relu1(
data: &Tensor, batch_sizes: &Tensor, hx: &Tensor, params: &[&Tensor], has_biases: bool, num_layers: i64, dropout: f64, train: bool, bidirectional: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_rnn_relu1(c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(), params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn rnn_relu_cell(
&self, hx: &Tensor, w_ih: &Tensor, w_hh: &Tensor, b_ih: Option<&Tensor>, b_hh: Option<&Tensor>
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rnn_relu_cell(c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.map_or(std::ptr::null_mut(), |t| t.c_tensor),
b_hh.map_or(std::ptr::null_mut(), |t| t.c_tensor)
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rnn_tanh(
&self, hx: &Tensor, params: &[&Tensor], has_biases: bool, num_layers: i64, dropout: f64, train: bool, bidirectional: bool, batch_first: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_rnn_tanh(c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(), params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn rnn_tanh1(
data: &Tensor, batch_sizes: &Tensor, hx: &Tensor, params: &[&Tensor], has_biases: bool, num_layers: i64, dropout: f64, train: bool, bidirectional: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_rnn_tanh1(c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(), params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn rnn_tanh_cell(
&self, hx: &Tensor, w_ih: &Tensor, w_hh: &Tensor, b_ih: Option<&Tensor>, b_hh: Option<&Tensor>
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rnn_tanh_cell(c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.map_or(std::ptr::null_mut(), |t| t.c_tensor),
b_hh.map_or(std::ptr::null_mut(), |t| t.c_tensor)
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn roipooling2d_backward(
&self, rois: &Tensor, pooledheight: i64, pooledwidth: i64, spatialscale: f64, gradoutput: &Tensor, argmaxes: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_roipooling2d_backward(c_tensors.as_mut_ptr(),
self.c_tensor,
rois.c_tensor,
pooledheight,
pooledwidth,
spatialscale,
gradoutput.c_tensor,
argmaxes.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn roll(
&self, shifts: &[i64], dims: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_roll(c_tensors.as_mut_ptr(),
self.c_tensor,
shifts.as_ptr(), shifts.len() as i32,
dims.as_ptr(), dims.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rot90(
&self, k: i64, dims: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rot90(c_tensors.as_mut_ptr(),
self.c_tensor,
k,
dims.as_ptr(), dims.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn round(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_round(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn round_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_round_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn round_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_round_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rrelu(
&self, training: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rrelu(c_tensors.as_mut_ptr(),
self.c_tensor,
if training { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rrelu_(
&self, training: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rrelu_(c_tensors.as_mut_ptr(),
self.c_tensor,
if training { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rrelu_with_noise(
&self, noise: &Tensor, training: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rrelu_with_noise(c_tensors.as_mut_ptr(),
self.c_tensor,
noise.c_tensor,
if training { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rrelu_with_noise_(
&self, noise: &Tensor, training: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rrelu_with_noise_(c_tensors.as_mut_ptr(),
self.c_tensor,
noise.c_tensor,
if training { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rrelu_with_noise_out(
&self, output: &Tensor, noise: &Tensor, training: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rrelu_with_noise_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
noise.c_tensor,
if training { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rsqrt(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rsqrt(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rsqrt_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rsqrt_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rsqrt_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rsqrt_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rsub(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rsub(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn rsub1(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_rsub1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn s_native_addmm(
&self, mat1: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_s_native_addmm(c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn s_native_addmm_(
&self, mat1: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_s_native_addmm_(c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn s_native_addmm_out(
&self, result: &Tensor, mat1: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_s_native_addmm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn scatter_(
&self, dim: i64, index: &Tensor, src: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_scatter_(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn scatter_1(
&self, dim: i64, index: &Tensor, value: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_scatter_1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn scatter_add_(
&self, dim: i64, index: &Tensor, src: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_scatter_add_(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn select(
&self, dim: i64, index: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_select(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn selu(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_selu(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn selu_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_selu_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn set_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_set_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn set_1(
&self, source: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_set_1(c_tensors.as_mut_ptr(),
self.c_tensor,
source.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn set_requires_grad(
&self, r: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_set_requires_grad(c_tensors.as_mut_ptr(),
self.c_tensor,
if r { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sigmoid(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sigmoid(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sigmoid_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sigmoid_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sigmoid_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sigmoid_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sign(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sign(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sign_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sign_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sign_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sign_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sin(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sin(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sin_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sin_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sin_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sin_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sinh(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sinh(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sinh_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sinh_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sinh_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sinh_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn slice(
&self, dim: i64, start: i64, end_: i64, step: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_slice(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start,
end_,
step
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn slogdet(
&self,
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_slogdet(c_tensors.as_mut_ptr(),
self.c_tensor
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn smm(
&self, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_smm(c_tensors.as_mut_ptr(),
self.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn smooth_l1_loss(
&self, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_smooth_l1_loss(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn smooth_l1_loss_backward(
&self, grad_output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_smooth_l1_loss_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn smooth_l1_loss_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_smooth_l1_loss_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn smooth_l1_loss_out(
&self, output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_smooth_l1_loss_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn soft_margin_loss(
&self, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_soft_margin_loss(c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn soft_margin_loss_backward(
&self, grad_output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_soft_margin_loss_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn soft_margin_loss_backward_out(
&self, grad_input: &Tensor, grad_output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_soft_margin_loss_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn soft_margin_loss_out(
&self, output: &Tensor, target: &Tensor, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_soft_margin_loss_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn softmax(
&self, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_softmax(c_tensors.as_mut_ptr(),
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn softmax1(
&self, dim: i64, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_softmax1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn softplus(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_softplus(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn softplus_out(
&self, output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_softplus_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn softshrink(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_softshrink(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn softshrink_out(
&self, output: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_softshrink_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sort(
&self, dim: i64, descending: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_sort(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if descending { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn sort_out(
&self, values: &Tensor, indices: &Tensor, dim: i64, descending: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_sort_out(c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if descending { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn sparse_coo_tensor(
size: &[i64], options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sparse_coo_tensor(c_tensors.as_mut_ptr(),
size.as_ptr(), size.len() as i32,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sparse_coo_tensor1(
indices: &Tensor, values: &Tensor, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sparse_coo_tensor1(c_tensors.as_mut_ptr(),
indices.c_tensor,
values.c_tensor,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sparse_coo_tensor2(
indices: &Tensor, values: &Tensor, size: &[i64], options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sparse_coo_tensor2(c_tensors.as_mut_ptr(),
indices.c_tensor,
values.c_tensor,
size.as_ptr(), size.len() as i32,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sparse_resize_(
&self, size: &[i64], sparse_dim: i64, dense_dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sparse_resize_(c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(), size.len() as i32,
sparse_dim,
dense_dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sparse_resize_and_clear_(
&self, size: &[i64], sparse_dim: i64, dense_dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sparse_resize_and_clear_(c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(), size.len() as i32,
sparse_dim,
dense_dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sqrt(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sqrt(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sqrt_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sqrt_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sqrt_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sqrt_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn squeeze(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_squeeze(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn squeeze1(
&self, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_squeeze1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn squeeze_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_squeeze_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn squeeze_1(
&self, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_squeeze_1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sspaddmm(
&self, mat1: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sspaddmm(c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sspaddmm_out(
&self, result: &Tensor, mat1: &Tensor, mat2: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sspaddmm_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn stack(
tensors: &[&Tensor], dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_stack(c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(), tensors.len() as i32,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn stack_out(
result: &Tensor, tensors: &[&Tensor], dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_stack_out(c_tensors.as_mut_ptr(),
result.c_tensor,
ptr_list(tensors).as_ptr(), tensors.len() as i32,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn std(
&self, unbiased: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_std(c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn std1(
&self, dim: i64, unbiased: bool, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_std1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn std_out(
&self, result: &Tensor, dim: i64, unbiased: bool, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_std_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn stft(
&self, n_fft: i64, hop_length: i64, win_length: i64, window: Option<&Tensor>, normalized: bool, onesided: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_stft(c_tensors.as_mut_ptr(),
self.c_tensor,
n_fft,
hop_length,
win_length,
window.map_or(std::ptr::null_mut(), |t| t.c_tensor),
if normalized { 1 } else { 0 },
if onesided { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_sub(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sub(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_sub1(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sub1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_sub_(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sub_(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn g_sub_1(
&self, other: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sub_1(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sub_out(
&self, result: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sub_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sum(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sum(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sum1(
&self, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sum1(c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sum2(
&self, dim: &[i64], keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sum2(c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(), dim.len() as i32,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sum3(
&self, dim: &[i64], dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sum3(c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(), dim.len() as i32,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sum4(
&self, dim: &[i64], keepdim: bool, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sum4(c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(), dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sum_out(
&self, result: &Tensor, dim: &[i64], keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sum_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim.as_ptr(), dim.len() as i32,
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sum_out1(
&self, result: &Tensor, dim: &[i64], dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sum_out1(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim.as_ptr(), dim.len() as i32,
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn sum_out2(
&self, result: &Tensor, dim: &[i64], keepdim: bool, dtype: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_sum_out2(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim.as_ptr(), dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn svd(
&self, some: bool, compute_uv: bool
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_svd(c_tensors.as_mut_ptr(),
self.c_tensor,
if some { 1 } else { 0 },
if compute_uv { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn svd_out(
&self, u: &Tensor, s: &Tensor, v: &Tensor, some: bool, compute_uv: bool
) -> (Tensor, Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch!({
atg_svd_out(c_tensors.as_mut_ptr(),
u.c_tensor,
s.c_tensor,
v.c_tensor,
self.c_tensor,
if some { 1 } else { 0 },
if compute_uv { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }, Tensor { c_tensor: c_tensors[2] })
}
pub fn symeig(
&self, eigenvectors: bool, upper: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_symeig(c_tensors.as_mut_ptr(),
self.c_tensor,
if eigenvectors { 1 } else { 0 },
if upper { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn symeig_out(
&self, e: &Tensor, v: &Tensor, eigenvectors: bool, upper: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_symeig_out(c_tensors.as_mut_ptr(),
e.c_tensor,
v.c_tensor,
self.c_tensor,
if eigenvectors { 1 } else { 0 },
if upper { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn tr(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_t(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn t_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_t_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn take(
&self, index: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_take(c_tensors.as_mut_ptr(),
self.c_tensor,
index.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn take_out(
&self, result: &Tensor, index: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_take_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
index.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn tan(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_tan(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn tan_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_tan_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn tan_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_tan_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn tanh(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_tanh(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn tanh_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_tanh_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn tanh_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_tanh_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn tensordot(
&self, other: &Tensor, dims_self: &[i64], dims_other: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_tensordot(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
dims_self.as_ptr(), dims_self.len() as i32,
dims_other.as_ptr(), dims_other.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn threshold(
&self, threshold: &Scalar, value: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_threshold(c_tensors.as_mut_ptr(),
self.c_tensor,
threshold.c_scalar,
value.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn threshold_(
&self, threshold: &Scalar, value: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_threshold_(c_tensors.as_mut_ptr(),
self.c_tensor,
threshold.c_scalar,
value.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn threshold_backward(
&self, grad_output: &Tensor, threshold: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_threshold_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
threshold.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn threshold_out(
&self, result: &Tensor, threshold: &Scalar, value: &Scalar
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_threshold_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
threshold.c_scalar,
value.c_scalar
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn to_(
&self, device: Device
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_to(c_tensors.as_mut_ptr(),
self.c_tensor,
device.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn to1(
&self, options: (Kind, Device), non_blocking: bool, copy: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_to1(c_tensors.as_mut_ptr(),
self.c_tensor,
options.0.c_int(), options.1.c_int(),
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn to2(
&self, dtype: Kind, non_blocking: bool, copy: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_to2(c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int(),
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn to3(
&self, other: &Tensor, non_blocking: bool, copy: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_to3(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn to4(
&self, device: Device, dtype: Kind, non_blocking: bool, copy: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_to4(c_tensors.as_mut_ptr(),
self.c_tensor,
device.c_int(),
dtype.c_int(),
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn to_dense(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_to_dense(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn to_sparse(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_to_sparse(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn to_sparse1(
&self, sparse_dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_to_sparse1(c_tensors.as_mut_ptr(),
self.c_tensor,
sparse_dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn topk(
&self, k: i64, dim: i64, largest: bool, sorted: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_topk(c_tensors.as_mut_ptr(),
self.c_tensor,
k,
dim,
if largest { 1 } else { 0 },
if sorted { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn topk_out(
&self, values: &Tensor, indices: &Tensor, k: i64, dim: i64, largest: bool, sorted: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_topk_out(c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
k,
dim,
if largest { 1 } else { 0 },
if sorted { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn totype(
&self, scalar_type: Kind
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_totype(c_tensors.as_mut_ptr(),
self.c_tensor,
scalar_type.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn trace(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_trace(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn transpose(
&self, dim0: i64, dim1: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_transpose(c_tensors.as_mut_ptr(),
self.c_tensor,
dim0,
dim1
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn transpose_(
&self, dim0: i64, dim1: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_transpose_(c_tensors.as_mut_ptr(),
self.c_tensor,
dim0,
dim1
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn tril(
&self, diagonal: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_tril(c_tensors.as_mut_ptr(),
self.c_tensor,
diagonal
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn tril_(
&self, diagonal: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_tril_(c_tensors.as_mut_ptr(),
self.c_tensor,
diagonal
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn tril_out(
&self, result: &Tensor, diagonal: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_tril_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
diagonal
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn triplet_margin_loss(
anchor: &Tensor, positive: &Tensor, negative: &Tensor, margin: f64, p: f64, eps: f64, swap: bool, reduction: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_triplet_margin_loss(c_tensors.as_mut_ptr(),
anchor.c_tensor,
positive.c_tensor,
negative.c_tensor,
margin,
p,
eps,
if swap { 1 } else { 0 },
reduction
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn triu(
&self, diagonal: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_triu(c_tensors.as_mut_ptr(),
self.c_tensor,
diagonal
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn triu_(
&self, diagonal: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_triu_(c_tensors.as_mut_ptr(),
self.c_tensor,
diagonal
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn triu_out(
&self, result: &Tensor, diagonal: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_triu_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
diagonal
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn trtrs(
&self, a: &Tensor, upper: bool, transpose: bool, unitriangular: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_trtrs(c_tensors.as_mut_ptr(),
self.c_tensor,
a.c_tensor,
if upper { 1 } else { 0 },
if transpose { 1 } else { 0 },
if unitriangular { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn trtrs_out(
&self, x: &Tensor, m: &Tensor, a: &Tensor, upper: bool, transpose: bool, unitriangular: bool
) -> (Tensor, Tensor) {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch!({
atg_trtrs_out(c_tensors.as_mut_ptr(),
x.c_tensor,
m.c_tensor,
self.c_tensor,
a.c_tensor,
if upper { 1 } else { 0 },
if transpose { 1 } else { 0 },
if unitriangular { 1 } else { 0 }
) });
(Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })
}
pub fn trunc(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_trunc(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn trunc_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_trunc_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn trunc_out(
&self, result: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_trunc_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn type_as(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_type_as(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn unfold(
&self, dimension: i64, size: i64, step: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_unfold(c_tensors.as_mut_ptr(),
self.c_tensor,
dimension,
size,
step
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn uniform_(
&self, from: f64, to_: f64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_uniform_(c_tensors.as_mut_ptr(),
self.c_tensor,
from,
to_
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn unsqueeze(
&self, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_unsqueeze(c_tensors.as_mut_ptr(),
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn unsqueeze_(
&self, dim: i64
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_unsqueeze_(c_tensors.as_mut_ptr(),
self.c_tensor,
dim
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_bilinear2d(
&self, output_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_bilinear2d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_bilinear2d_backward(
grad_output: &Tensor, output_size: &[i64], input_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_bilinear2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_bilinear2d_backward_out(
grad_input: &Tensor, grad_output: &Tensor, output_size: &[i64], input_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_bilinear2d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_bilinear2d_out(
&self, output: &Tensor, output_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_bilinear2d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_linear1d(
&self, output_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_linear1d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_linear1d_backward(
grad_output: &Tensor, output_size: &[i64], input_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_linear1d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_linear1d_backward_out(
grad_input: &Tensor, grad_output: &Tensor, output_size: &[i64], input_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_linear1d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_linear1d_out(
&self, output: &Tensor, output_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_linear1d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest1d(
&self, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest1d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest1d_backward(
grad_output: &Tensor, output_size: &[i64], input_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest1d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest1d_backward_out(
grad_input: &Tensor, grad_output: &Tensor, output_size: &[i64], input_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest1d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest1d_out(
&self, output: &Tensor, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest1d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest2d(
&self, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest2d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest2d_backward(
grad_output: &Tensor, output_size: &[i64], input_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest2d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest2d_backward_out(
grad_input: &Tensor, grad_output: &Tensor, output_size: &[i64], input_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest2d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest2d_out(
&self, output: &Tensor, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest2d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest3d(
&self, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest3d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest3d_backward(
grad_output: &Tensor, output_size: &[i64], input_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest3d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest3d_backward_out(
grad_input: &Tensor, grad_output: &Tensor, output_size: &[i64], input_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest3d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_nearest3d_out(
&self, output: &Tensor, output_size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_nearest3d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_trilinear3d(
&self, output_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_trilinear3d(c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_trilinear3d_backward(
grad_output: &Tensor, output_size: &[i64], input_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_trilinear3d_backward(c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_trilinear3d_backward_out(
grad_input: &Tensor, grad_output: &Tensor, output_size: &[i64], input_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_trilinear3d_backward_out(c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
input_size.as_ptr(), input_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn upsample_trilinear3d_out(
&self, output: &Tensor, output_size: &[i64], align_corners: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_upsample_trilinear3d_out(c_tensors.as_mut_ptr(),
output.c_tensor,
self.c_tensor,
output_size.as_ptr(), output_size.len() as i32,
if align_corners { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn values(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_values(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn var(
&self, unbiased: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_var(c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn var1(
&self, dim: i64, unbiased: bool, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_var1(c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn var_out(
&self, result: &Tensor, dim: i64, unbiased: bool, keepdim: bool
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_var_out(c_tensors.as_mut_ptr(),
result.c_tensor,
self.c_tensor,
dim,
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn view(
&self, size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_view(c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(), size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn view_as(
&self, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_view_as(c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn where_(
&self, condition: &Tensor, other: &Tensor
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_where(c_tensors.as_mut_ptr(),
condition.c_tensor,
self.c_tensor,
other.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn zero_(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_zero_(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn zeros(
size: &[i64], options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_zeros(c_tensors.as_mut_ptr(),
size.as_ptr(), size.len() as i32,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn zeros_like(
&self,
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_zeros_like(c_tensors.as_mut_ptr(),
self.c_tensor
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn zeros_like1(
&self, options: (Kind, Device)
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_zeros_like1(c_tensors.as_mut_ptr(),
self.c_tensor,
options.0.c_int(), options.1.c_int()
) });
Tensor { c_tensor: c_tensors[0] }
}
pub fn zeros_out(
result: &Tensor, size: &[i64]
) -> Tensor {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch!({
atg_zeros_out(c_tensors.as_mut_ptr(),
result.c_tensor,
size.as_ptr(), size.len() as i32
) });
Tensor { c_tensor: c_tensors[0] }
}
}