use crate::{Device, Kind, Scalar, TchError, Tensor};
use std::borrow::Borrow;
use std::convert::Into;
use torch_sys::c_generated::*;
#[allow(clippy::all)]
use torch_sys::*;
fn ptr_list_opt<T: Borrow<Tensor>>(l: &[Option<T>]) -> Vec<*mut C_tensor> {
l.iter()
.map(|x| {
x.as_ref()
.map_or(std::ptr::null_mut(), |x| x.borrow().c_tensor)
})
.collect()
}
fn ptr_list<T: Borrow<Tensor>>(l: &[T]) -> Vec<*mut C_tensor> {
l.iter().map(|x| x.borrow().c_tensor).collect()
}
impl Tensor {
pub fn f_internal_and_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___and__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_and_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___and__1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_iand_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___iand__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_iand_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___iand__1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_ilshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ilshift__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_ilshift_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ilshift__1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_ior_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ior__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_ior_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ior__1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_irshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___irshift__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_irshift_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___irshift__1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_ixor_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ixor__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_ixor_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ixor__1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_lshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___lshift__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_lshift_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___lshift__1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_or_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___or__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_or_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___or__1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_rshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___rshift__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_rshift_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___rshift__1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_xor_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___xor__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_xor_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___xor__1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_adaptive_avg_pool2d(&self, output_size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__adaptive_avg_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_adaptive_avg_pool2d_backward(
&self,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__adaptive_avg_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_add_batch_dim(&self, batch_dim: i64, level: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_batch_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch_dim,
level
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_add_relu(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_relu(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_add_relu_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_relu_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_add_relu_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_relu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_addmv_impl_(
&mut self,
self2: &Tensor,
mat: &Tensor,
vec: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__addmv_impl_(
c_tensors.as_mut_ptr(),
self.c_tensor,
self2.c_tensor,
mat.c_tensor,
vec.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_aminmax(&self) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__aminmax(c_tensors.as_mut_ptr(), self.c_tensor));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_aminmax1(
&self,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__aminmax1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_amp_update_scale(
growth_tracker: &Tensor,
current_scale: &Tensor,
found_inf: &Tensor,
scale_growth_factor: f64,
scale_backoff_factor: f64,
growth_interval: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__amp_update_scale(
c_tensors.as_mut_ptr(),
growth_tracker.c_tensor,
current_scale.c_tensor,
found_inf.c_tensor,
scale_growth_factor,
scale_backoff_factor,
growth_interval
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_baddbmm_mkl_(
&mut self,
batch1: &Tensor,
batch2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__baddbmm_mkl_(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_bmm(&self, mat2: &Tensor, deterministic: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__bmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat2.c_tensor,
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_bmm_out(
&self,
out: &Tensor,
mat2: &Tensor,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__bmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat2.c_tensor,
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cast_byte(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_byte(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cast_char(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_char(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cast_double(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_double(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cast_float(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_float(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cast_half(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_half(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cast_int(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_int(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cast_long(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_long(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cast_short(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_short(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cat<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cat(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cat_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cat_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cdist_backward(
grad: &Tensor,
x1: &Tensor,
x2: &Tensor,
p: f64,
cdist: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cdist_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
x1.c_tensor,
x2.c_tensor,
p,
cdist.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cholesky_helper(&self, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cholesky_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cholesky_solve_helper(
&self,
a: &Tensor,
upper: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cholesky_solve_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
a.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_coalesced_(&mut self, coalesced: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__coalesced_(
c_tensors.as_mut_ptr(),
self.c_tensor,
if coalesced { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_compute_linear_combination(
&self,
coefficients: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__compute_linear_combination(
c_tensors.as_mut_ptr(),
self.c_tensor,
coefficients.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_compute_linear_combination_out(
&self,
out: &Tensor,
coefficients: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__compute_linear_combination_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
coefficients.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_conj(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__conj(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
dilation: &[i64],
transposed: bool,
output_padding: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
cudnn_enabled: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if cudnn_enabled { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_convolution1<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
dilation: &[i64],
transposed: bool,
output_padding: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
cudnn_enabled: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convolution1(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if cudnn_enabled { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_convolution_nogroup<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
dilation: &[i64],
transposed: bool,
output_padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convolution_nogroup(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_copy_from(
&self,
dst: &Tensor,
non_blocking: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__copy_from(
c_tensors.as_mut_ptr(),
self.c_tensor,
dst.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &[i64],
target_lengths: &[i64],
blank: i64,
zero_infinity: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__ctc_loss(
c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len() as i32,
target_lengths.as_ptr(),
target_lengths.len() as i32,
blank,
if zero_infinity { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_ctc_loss_backward(
grad: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &[i64],
target_lengths: &[i64],
neg_log_likelihood: &Tensor,
log_alpha: &Tensor,
blank: i64,
zero_infinity: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__ctc_loss_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len() as i32,
target_lengths.as_ptr(),
target_lengths.len() as i32,
neg_log_likelihood.c_tensor,
log_alpha.c_tensor,
blank,
if zero_infinity { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cudnn_ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &[i64],
target_lengths: &[i64],
blank: i64,
deterministic: bool,
zero_infinity: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__cudnn_ctc_loss(
c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len() as i32,
target_lengths.as_ptr(),
target_lengths.len() as i32,
blank,
if deterministic { 1 } else { 0 },
if zero_infinity { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_cudnn_init_dropout_state(
dropout: f64,
train: bool,
dropout_seed: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cudnn_init_dropout_state(
c_tensors.as_mut_ptr(),
dropout,
if train { 1 } else { 0 },
dropout_seed,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cudnn_rnn<T: Borrow<Tensor>>(
&self,
weight: &[T],
weight_stride0: i64,
weight_buf: Option<T>,
hx: &Tensor,
cx: Option<T>,
mode: i64,
hidden_size: i64,
proj_size: i64,
num_layers: i64,
batch_first: bool,
dropout: f64,
train: bool,
bidirectional: bool,
batch_sizes: &[i64],
dropout_state: Option<T>,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 5];
unsafe_torch_err!(atg__cudnn_rnn(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(weight).as_ptr(),
weight.len() as i32,
weight_stride0,
weight_buf.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
hx.c_tensor,
cx.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mode,
hidden_size,
proj_size,
num_layers,
if batch_first { 1 } else { 0 },
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
batch_sizes.as_ptr(),
batch_sizes.len() as i32,
dropout_state.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
Tensor {
c_tensor: c_tensors[3],
},
Tensor {
c_tensor: c_tensors[4],
},
))
}
pub fn f_internal_cudnn_rnn_flatten_weight<T: Borrow<Tensor>>(
weight_arr: &[T],
weight_stride0: i64,
input_size: i64,
mode: i64,
hidden_size: i64,
proj_size: i64,
num_layers: i64,
batch_first: bool,
bidirectional: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cudnn_rnn_flatten_weight(
c_tensors.as_mut_ptr(),
ptr_list(weight_arr).as_ptr(),
weight_arr.len() as i32,
weight_stride0,
input_size,
mode,
hidden_size,
proj_size,
num_layers,
if batch_first { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cumprod(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cumprod(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cumprod_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cumprod_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cumsum(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cumsum(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_cumsum_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cumsum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_dim_arange(like: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__dim_arange(c_tensors.as_mut_ptr(), like.c_tensor, dim));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_dirichlet_grad(
x: &Tensor,
alpha: &Tensor,
total: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__dirichlet_grad(
c_tensors.as_mut_ptr(),
x.c_tensor,
alpha.c_tensor,
total.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_embedding_bag<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__embedding_bag(
c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if include_last_offset { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
Tensor {
c_tensor: c_tensors[3],
},
))
}
pub fn f_internal_embedding_bag_backward<T: Borrow<Tensor>>(
grad: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
maximum_indices: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__embedding_bag_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
offsets.c_tensor,
offset2bag.c_tensor,
bag_size.c_tensor,
maximum_indices.c_tensor,
num_weights,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_embedding_bag_dense_backward<T: Borrow<Tensor>>(
grad: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
maximum_indices: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
per_sample_weights: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__embedding_bag_dense_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
offsets.c_tensor,
offset2bag.c_tensor,
bag_size.c_tensor,
maximum_indices.c_tensor,
num_weights,
if scale_grad_by_freq { 1 } else { 0 },
mode,
per_sample_weights.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_embedding_bag_forward_only<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__embedding_bag_forward_only(
c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if include_last_offset { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
Tensor {
c_tensor: c_tensors[3],
},
))
}
pub fn f_internal_embedding_bag_per_sample_weights_backward(
grad: &Tensor,
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
mode: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__embedding_bag_per_sample_weights_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
offset2bag.c_tensor,
mode
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_embedding_bag_sparse_backward<T: Borrow<Tensor>>(
grad: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
per_sample_weights: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__embedding_bag_sparse_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
offsets.c_tensor,
offset2bag.c_tensor,
bag_size.c_tensor,
num_weights,
if scale_grad_by_freq { 1 } else { 0 },
mode,
per_sample_weights.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_empty_affine_quantized(
size: &[i64],
options: (Kind, Device),
scale: f64,
zero_point: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__empty_affine_quantized(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int(),
scale,
zero_point
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_empty_per_channel_affine_quantized(
size: &[i64],
scales: &Tensor,
zero_points: &Tensor,
axis: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__empty_per_channel_affine_quantized(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
scales.c_tensor,
zero_points.c_tensor,
axis,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_euclidean_dist(x1: &Tensor, x2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__euclidean_dist(
c_tensors.as_mut_ptr(),
x1.c_tensor,
x2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_fake_quantize_learnable_per_channel_affine(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fake_quantize_learnable_per_channel_affine(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis,
quant_min,
quant_max,
grad_factor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_fake_quantize_learnable_per_channel_affine_backward(
&self,
grad: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__fake_quantize_learnable_per_channel_affine_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis,
quant_min,
quant_max,
grad_factor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_internal_fake_quantize_learnable_per_tensor_affine(
&self,
scale: &Tensor,
zero_point: &Tensor,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fake_quantize_learnable_per_tensor_affine(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
quant_min,
quant_max,
grad_factor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_fake_quantize_learnable_per_tensor_affine_backward(
&self,
grad: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__fake_quantize_learnable_per_tensor_affine_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
quant_min,
quant_max,
grad_factor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_internal_fft_c2c(
&self,
dim: &[i64],
normalization: i64,
forward: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_c2c(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
normalization,
if forward { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_fft_c2c_out(
&self,
out: &Tensor,
dim: &[i64],
normalization: i64,
forward: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_c2c_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
normalization,
if forward { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_fft_c2r(
&self,
dim: &[i64],
normalization: i64,
last_dim_size: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_c2r(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
normalization,
last_dim_size
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_fft_c2r_out(
&self,
out: &Tensor,
dim: &[i64],
normalization: i64,
last_dim_size: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_c2r_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
normalization,
last_dim_size
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_fft_r2c(
&self,
dim: &[i64],
normalization: i64,
onesided: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_r2c(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
normalization,
if onesided { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_fft_r2c_out(
&self,
out: &Tensor,
dim: &[i64],
normalization: i64,
onesided: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_r2c_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
normalization,
if onesided { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_fused_dropout(&self, p: f64) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__fused_dropout(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_fw_primal(&self, level: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fw_primal(c_tensors.as_mut_ptr(), self.c_tensor, level));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_gather_sparse_backward(
&self,
dim: i64,
index: &Tensor,
grad: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__gather_sparse_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
grad.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_grid_sampler_2d_cpu_fallback(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__grid_sampler_2d_cpu_fallback(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_grid_sampler_2d_cpu_fallback_backward(
&self,
grad_output: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__grid_sampler_2d_cpu_fallback_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_index_copy_(
&mut self,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__index_copy_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_index_put_impl_<T: Borrow<Tensor>>(
&mut self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
unsafe_: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__index_put_impl_(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32,
values.c_tensor,
if accumulate { 1 } else { 0 },
if unsafe_ { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_indices(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__indices(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_inverse_helper(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__inverse_helper(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_linalg_inv_out_helper_(
&mut self,
infos_lu: &Tensor,
infos_getri: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__linalg_inv_out_helper_(
c_tensors.as_mut_ptr(),
self.c_tensor,
infos_lu.c_tensor,
infos_getri.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_linalg_qr_helper(&self, mode: &str) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__linalg_qr_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
mode.as_ptr(),
mode.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_linalg_solve_out_helper_(
&mut self,
other: &Tensor,
infos: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__linalg_solve_out_helper_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
infos.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_log_softmax(
&self,
dim: i64,
half_to_float: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__log_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_log_softmax_backward_data(
&self,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__log_softmax_backward_data(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
dim,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_logcumsumexp(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__logcumsumexp(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__logcumsumexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_lu_solve_helper(
&self,
lu_data: &Tensor,
lu_pivots: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__lu_solve_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
lu_data.c_tensor,
lu_pivots.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_lu_with_info(
&self,
pivot: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__lu_with_info(
c_tensors.as_mut_ptr(),
self.c_tensor,
if pivot { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_internal_make_dual(
primal: &Tensor,
tangent: &Tensor,
level: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_dual(
c_tensors.as_mut_ptr(),
primal.c_tensor,
tangent.c_tensor,
level
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_make_per_channel_quantized_tensor(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_per_channel_quantized_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_make_per_tensor_quantized_tensor(
&self,
scale: f64,
zero_point: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_per_tensor_quantized_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale,
zero_point
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_masked_scale(&self, mask: &Tensor, scale: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__masked_scale(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
scale
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_mkldnn_reshape(&self, shape: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mkldnn_reshape(
c_tensors.as_mut_ptr(),
self.c_tensor,
shape.as_ptr(),
shape.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_mkldnn_transpose(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mkldnn_transpose(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim0,
dim1
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_mkldnn_transpose_(
&mut self,
dim0: i64,
dim1: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mkldnn_transpose_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim0,
dim1
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_mode(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__mode(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_mode_out(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__mode_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_nnpack_spatial_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: &[i64],
stride: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nnpack_spatial_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_nnpack_spatial_convolution_backward_input(
&self,
grad_output: &Tensor,
weight: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nnpack_spatial_convolution_backward_input(
c_tensors.as_mut_ptr(),
self.c_tensor,
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_nnpack_spatial_convolution_backward_weight(
&self,
weightsize: &[i64],
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nnpack_spatial_convolution_backward_weight(
c_tensors.as_mut_ptr(),
self.c_tensor,
weightsize.as_ptr(),
weightsize.len() as i32,
grad_output.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_pack_padded_sequence(
&self,
lengths: &Tensor,
batch_first: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__pack_padded_sequence(
c_tensors.as_mut_ptr(),
self.c_tensor,
lengths.c_tensor,
if batch_first { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_pack_padded_sequence_backward(
grad: &Tensor,
input_size: &[i64],
batch_sizes: &Tensor,
batch_first: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__pack_padded_sequence_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
input_size.as_ptr(),
input_size.len() as i32,
batch_sizes.c_tensor,
if batch_first { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_pad_packed_sequence<S: Into<Scalar>>(
data: &Tensor,
batch_sizes: &Tensor,
batch_first: bool,
padding_value: S,
total_length: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__pad_packed_sequence(
c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
if batch_first { 1 } else { 0 },
padding_value.into().c_scalar,
total_length
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_pdist_backward(
&self,
grad: &Tensor,
p: f64,
pdist: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__pdist_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
p,
pdist.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_remove_batch_dim(
&self,
level: i64,
batch_size: i64,
out_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__remove_batch_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
level,
batch_size,
out_dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_reshape_from_tensor(&self, shape: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__reshape_from_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
shape.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_rowwise_prune(
weight: &Tensor,
mask: &Tensor,
compressed_indices_dtype: Kind,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__rowwise_prune(
c_tensors.as_mut_ptr(),
weight.c_tensor,
mask.c_tensor,
compressed_indices_dtype.c_int()
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_s_where(
&self,
condition: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__s_where(
c_tensors.as_mut_ptr(),
condition.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sample_dirichlet(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sample_dirichlet(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_saturate_weight_to_fp16(weight: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__saturate_weight_to_fp16(
c_tensors.as_mut_ptr(),
weight.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_shape_as_tensor(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__shape_as_tensor(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sobol_engine_draw(
quasi: &Tensor,
n: i64,
sobolstate: &Tensor,
dimension: i64,
num_generated: i64,
dtype: Kind,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__sobol_engine_draw(
c_tensors.as_mut_ptr(),
quasi.c_tensor,
n,
sobolstate.c_tensor,
dimension,
num_generated,
dtype.c_int()
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_sobol_engine_ff_(
&mut self,
n: i64,
sobolstate: &Tensor,
dimension: i64,
num_generated: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sobol_engine_ff_(
c_tensors.as_mut_ptr(),
self.c_tensor,
n,
sobolstate.c_tensor,
dimension,
num_generated
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sobol_engine_initialize_state_(
&mut self,
dimension: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sobol_engine_initialize_state_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dimension
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sobol_engine_scramble_(
&mut self,
ltm: &Tensor,
dimension: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sobol_engine_scramble_(
c_tensors.as_mut_ptr(),
self.c_tensor,
ltm.c_tensor,
dimension
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_softmax(&self, dim: i64, half_to_float: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_softmax_backward_data(
&self,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__softmax_backward_data(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
dim,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_solve_helper(&self, a: &Tensor) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__solve_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
a.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_sparse_addmm(
&self,
sparse: &Tensor,
dense: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_addmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
sparse.c_tensor,
dense.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_coo_tensor_unsafe(
indices: &Tensor,
values: &Tensor,
size: &[i64],
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_coo_tensor_unsafe(
c_tensors.as_mut_ptr(),
indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_coo_tensor_with_dims(
sparse_dim: i64,
dense_dim: i64,
size: &[i64],
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_coo_tensor_with_dims(
c_tensors.as_mut_ptr(),
sparse_dim,
dense_dim,
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_coo_tensor_with_dims_and_tensors(
sparse_dim: i64,
dense_dim: i64,
size: &[i64],
indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_coo_tensor_with_dims_and_tensors(
c_tensors.as_mut_ptr(),
sparse_dim,
dense_dim,
size.as_ptr(),
size.len() as i32,
indices.c_tensor,
values.c_tensor,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_log_softmax(&self, dim: i64, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_log_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_log_softmax1(
&self,
dim: i64,
half_to_float: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_log_softmax1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_log_softmax_backward_data(
&self,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_log_softmax_backward_data(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
dim,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_matrix_mask_helper(
tr: &Tensor,
mask_indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_matrix_mask_helper(
c_tensors.as_mut_ptr(),
tr.c_tensor,
mask_indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_mm(sparse: &Tensor, dense: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_mm(
c_tensors.as_mut_ptr(),
sparse.c_tensor,
dense.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_softmax(&self, dim: i64, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_softmax1(
&self,
dim: i64,
half_to_float: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_softmax1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_softmax_backward_data(
&self,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_softmax_backward_data(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
dim,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_sparse_matmul(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sparse_matmul(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_sum(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_sum1(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_sum2(&self, dim: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum2(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_sum3(&self, dim: &[i64], dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum3(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_sparse_sum_backward(
&self,
grad: &Tensor,
dim: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_stack<T: Borrow<Tensor>>(
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__stack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_stack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__stack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_standard_gamma(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__standard_gamma(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_standard_gamma_grad(&self, output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__standard_gamma_grad(
c_tensors.as_mut_ptr(),
self.c_tensor,
output.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_std(&self, unbiased: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__std(
c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_svd_helper(
&self,
some: bool,
compute_uv: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__svd_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
if some { 1 } else { 0 },
if compute_uv { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_internal_syevd_helper(
&self,
compute_eigenvectors: bool,
uplo: &str,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__syevd_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
if compute_eigenvectors { 1 } else { 0 },
uplo.as_ptr(),
uplo.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_symeig_helper(
&self,
eigenvectors: bool,
upper: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__symeig_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
if eigenvectors { 1 } else { 0 },
if upper { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_test_ambiguous_defaults(
dummy: &Tensor,
a: i64,
b: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_ambiguous_defaults(
c_tensors.as_mut_ptr(),
dummy.c_tensor,
a,
b
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_test_ambiguous_defaults1(
dummy: &Tensor,
a: i64,
b: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_ambiguous_defaults1(
c_tensors.as_mut_ptr(),
dummy.c_tensor,
a,
b.as_ptr(),
b.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_test_optional_filled_intlist(
values: &Tensor,
addends: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_optional_filled_intlist(
c_tensors.as_mut_ptr(),
values.c_tensor,
addends.as_ptr(),
addends.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_test_optional_intlist(
values: &Tensor,
addends: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_optional_intlist(
c_tensors.as_mut_ptr(),
values.c_tensor,
addends.as_ptr(),
addends.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_test_serialization_subcmul(
&self,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_serialization_subcmul(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_test_string_default(
dummy: &Tensor,
a: &str,
b: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_string_default(
c_tensors.as_mut_ptr(),
dummy.c_tensor,
a.as_ptr(),
a.len() as i32,
b.as_ptr(),
b.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_triangular_solve_helper(
&self,
a: &Tensor,
upper: bool,
transpose: bool,
unitriangular: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__triangular_solve_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
a.c_tensor,
if upper { 1 } else { 0 },
if transpose { 1 } else { 0 },
if unitriangular { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_trilinear(
i1: &Tensor,
i2: &Tensor,
i3: &Tensor,
expand1: &[i64],
expand2: &[i64],
expand3: &[i64],
sumdim: &[i64],
unroll_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__trilinear(
c_tensors.as_mut_ptr(),
i1.c_tensor,
i2.c_tensor,
i3.c_tensor,
expand1.as_ptr(),
expand1.len() as i32,
expand2.as_ptr(),
expand2.len() as i32,
expand3.as_ptr(),
expand3.len() as i32,
sumdim.as_ptr(),
sumdim.len() as i32,
unroll_dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_unique(
&self,
sorted: bool,
return_inverse: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__unique(
c_tensors.as_mut_ptr(),
self.c_tensor,
if sorted { 1 } else { 0 },
if return_inverse { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_unique2(
&self,
sorted: bool,
return_inverse: bool,
return_counts: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__unique2(
c_tensors.as_mut_ptr(),
self.c_tensor,
if sorted { 1 } else { 0 },
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_internal_unpack_dual(dual: &Tensor, level: i64) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__unpack_dual(
c_tensors.as_mut_ptr(),
dual.c_tensor,
level
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_unsafe_view(&self, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__unsafe_view(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_values(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__values(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_var(&self, unbiased: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__var(
c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_weight_norm(v: &Tensor, g: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__weight_norm(
c_tensors.as_mut_ptr(),
v.c_tensor,
g.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_internal_weight_norm_cuda_interface(
v: &Tensor,
g: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__weight_norm_cuda_interface(
c_tensors.as_mut_ptr(),
v.c_tensor,
g.c_tensor,
dim
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_weight_norm_cuda_interface_backward(
grad_w: &Tensor,
saved_v: &Tensor,
saved_g: &Tensor,
saved_norms: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__weight_norm_cuda_interface_backward(
c_tensors.as_mut_ptr(),
grad_w.c_tensor,
saved_v.c_tensor,
saved_g.c_tensor,
saved_norms.c_tensor,
dim
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_internal_weight_norm_differentiable_backward(
grad_w: &Tensor,
saved_v: &Tensor,
saved_g: &Tensor,
saved_norms: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__weight_norm_differentiable_backward(
c_tensors.as_mut_ptr(),
grad_w.c_tensor,
saved_v.c_tensor,
saved_g.c_tensor,
saved_norms.c_tensor,
dim
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_abs(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_abs(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_abs_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_abs_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_abs_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_abs_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_absolute(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_absolute(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_absolute_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_absolute_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_absolute_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_absolute_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_acos(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acos(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_acos_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acos_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_acos_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acos_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_acosh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acosh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_acosh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acosh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_acosh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acosh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_avg_pool1d(&self, output_size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_avg_pool2d(&self, output_size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_avg_pool2d_out(
&self,
out: &Tensor,
output_size: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_avg_pool3d(&self, output_size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_avg_pool3d_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_avg_pool3d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool3d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_avg_pool3d_out(
&self,
out: &Tensor,
output_size: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_max_pool1d(&self, output_size: &[i64]) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_adaptive_max_pool1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_adaptive_max_pool2d(&self, output_size: &[i64]) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_adaptive_max_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_adaptive_max_pool2d_backward(
&self,
grad_output: &Tensor,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_max_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_max_pool2d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_max_pool2d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_max_pool2d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: &[i64],
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_adaptive_max_pool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
indices.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_adaptive_max_pool3d(&self, output_size: &[i64]) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_adaptive_max_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_adaptive_max_pool3d_backward(
&self,
grad_output: &Tensor,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_max_pool3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_max_pool3d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_max_pool3d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_adaptive_max_pool3d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: &[i64],
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_adaptive_max_pool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
indices.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_add(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_add1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_add_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_add_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_add_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addbmm(&self, batch1: &Tensor, batch2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addbmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addbmm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addbmm_out(
&self,
out: &Tensor,
batch1: &Tensor,
batch2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addbmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addcdiv(&self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcdiv(
c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addcdiv_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcdiv_(
c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addcdiv_out(
&self,
out: &Tensor,
tensor1: &Tensor,
tensor2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcdiv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addcmul(&self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcmul(
c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addcmul_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcmul_(
c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addcmul_out(
&self,
out: &Tensor,
tensor1: &Tensor,
tensor2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcmul_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addmm_(&mut self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addmm_out(
&self,
out: &Tensor,
mat1: &Tensor,
mat2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addmv(&self, mat: &Tensor, vec: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmv(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat.c_tensor,
vec.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addmv_(&mut self, mat: &Tensor, vec: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmv_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat.c_tensor,
vec.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addmv_out(
&self,
out: &Tensor,
mat: &Tensor,
vec: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat.c_tensor,
vec.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addr(&self, vec1: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addr(
c_tensors.as_mut_ptr(),
self.c_tensor,
vec1.c_tensor,
vec2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addr_(&mut self, vec1: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addr_(
c_tensors.as_mut_ptr(),
self.c_tensor,
vec1.c_tensor,
vec2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_addr_out(
&self,
out: &Tensor,
vec1: &Tensor,
vec2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
vec1.c_tensor,
vec2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_affine_grid_generator(
theta: &Tensor,
size: &[i64],
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_affine_grid_generator(
c_tensors.as_mut_ptr(),
theta.c_tensor,
size.as_ptr(),
size.len() as i32,
if align_corners { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_affine_grid_generator_backward(
grad: &Tensor,
size: &[i64],
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_affine_grid_generator_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
size.as_ptr(),
size.len() as i32,
if align_corners { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_alias(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_alias(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_align_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_align_as(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_align_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_align_tensors(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_all(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_all(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_all1(&self, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_all1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_all_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_all_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_alpha_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_alpha_dropout(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_alpha_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_alpha_dropout_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_amax(&self, dim: &[i64], keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_amax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_amax_out(&self, out: &Tensor, dim: &[i64], keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_amax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_amin(&self, dim: &[i64], keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_amin(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_amin_out(&self, out: &Tensor, dim: &[i64], keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_amin_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_angle(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_angle(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_angle_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_angle_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_any(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_any(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_any1(&self, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_any1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_any_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_any_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arange<S: Into<Scalar>>(end: S, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arange(
c_tensors.as_mut_ptr(),
end.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arange1<S: Into<Scalar>>(
start: S,
end: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arange1(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arange2<S: Into<Scalar>>(
start: S,
end: S,
step: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arange2(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
step.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arange_out<S: Into<Scalar>>(out: &Tensor, end: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arange_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
end.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arange_out1<S: Into<Scalar>>(
out: &Tensor,
start: S,
end: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arange_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
start.into().c_scalar,
end.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arccos(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccos(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arccos_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccos_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arccos_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccos_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arccosh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccosh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arccosh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccosh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arccosh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccosh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arcsin(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsin(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arcsin_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsin_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arcsin_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsin_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arcsinh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsinh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arcsinh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsinh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arcsinh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsinh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arctan(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctan(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arctan_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctan_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arctan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctan_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arctanh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctanh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arctanh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctanh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_arctanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctanh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_argmax(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_argmax_out(
&self,
out: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argmax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_argmin(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argmin(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_argmin_out(
&self,
out: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argmin_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_argsort(&self, dim: i64, descending: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argsort(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if descending { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_as_strided(
&self,
size: &[i64],
stride: &[i64],
storage_offset: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let storage_offset = storage_offset.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_as_strided(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
storage_offset.unwrap_or(0i64),
storage_offset.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_as_strided_(
&mut self,
size: &[i64],
stride: &[i64],
storage_offset: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let storage_offset = storage_offset.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_as_strided_(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
storage_offset.unwrap_or(0i64),
storage_offset.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_asin(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asin(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_asin_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asin_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_asin_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asin_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_asinh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asinh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_asinh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asinh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_asinh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asinh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atan(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atan2(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan2(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atan2_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan2_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atan2_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atan_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atanh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atanh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atanh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atanh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atanh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atleast_1d(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atleast_1d(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atleast_1d1<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_atleast_1d1(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_atleast_2d(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atleast_2d(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atleast_2d1<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_atleast_2d1(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_atleast_3d(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atleast_3d(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_atleast_3d1<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_atleast_3d1(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_avg_pool1d(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
ceil_mode: bool,
count_include_pad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_avg_pool2d(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_avg_pool2d_backward(
&self,
grad_output: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_avg_pool2d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool2d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_avg_pool2d_out(
&self,
out: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_avg_pool3d(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_avg_pool3d_backward(
&self,
grad_output: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_avg_pool3d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool3d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_avg_pool3d_out(
&self,
out: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_baddbmm(&self, batch1: &Tensor, batch2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_baddbmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_baddbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_baddbmm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_baddbmm_out(
&self,
out: &Tensor,
batch1: &Tensor,
batch2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_baddbmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bartlett_window(
window_length: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bartlett_window(
c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bartlett_window1(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bartlett_window1(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_batch_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
momentum: f64,
eps: f64,
cudnn_enabled: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_batch_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
momentum,
eps,
if cudnn_enabled { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_batch_norm_backward_elemt<T: Borrow<Tensor>>(
&self,
grad_out: &Tensor,
mean: &Tensor,
invstd: &Tensor,
weight: Option<T>,
mean_dy: &Tensor,
mean_dy_xmu: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_batch_norm_backward_elemt(
c_tensors.as_mut_ptr(),
grad_out.c_tensor,
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mean_dy.c_tensor,
mean_dy_xmu.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_batch_norm_backward_reduce<T: Borrow<Tensor>>(
&self,
grad_out: &Tensor,
mean: &Tensor,
invstd: &Tensor,
weight: Option<T>,
input_g: bool,
weight_g: bool,
bias_g: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_batch_norm_backward_reduce(
c_tensors.as_mut_ptr(),
grad_out.c_tensor,
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if input_g { 1 } else { 0 },
if weight_g { 1 } else { 0 },
if bias_g { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
Tensor {
c_tensor: c_tensors[3],
},
))
}
pub fn f_batch_norm_elemt<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
invstd: &Tensor,
eps: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_batch_norm_elemt(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mean.c_tensor,
invstd.c_tensor,
eps
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_batch_norm_elemt_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
invstd: &Tensor,
eps: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_batch_norm_elemt_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mean.c_tensor,
invstd.c_tensor,
eps
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_batch_norm_gather_stats<T: Borrow<Tensor>>(
&self,
mean: &Tensor,
invstd: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
eps: f64,
count: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_gather_stats(
c_tensors.as_mut_ptr(),
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
momentum,
eps,
count
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_batch_norm_gather_stats_with_counts<T: Borrow<Tensor>>(
&self,
mean: &Tensor,
invstd: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
eps: f64,
counts: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_gather_stats_with_counts(
c_tensors.as_mut_ptr(),
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
momentum,
eps,
counts.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_batch_norm_stats(&self, eps: f64) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_stats(
c_tensors.as_mut_ptr(),
self.c_tensor,
eps
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_batch_norm_update_stats<T: Borrow<Tensor>>(
&self,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_update_stats(
c_tensors.as_mut_ptr(),
self.c_tensor,
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
momentum
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_bernoulli(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bernoulli(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bernoulli1(&self, p: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bernoulli1(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bernoulli_(&mut self, p: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bernoulli_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bernoulli_1(&mut self, p: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bernoulli_1(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bernoulli_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bernoulli_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bilinear<T: Borrow<Tensor>>(
input1: &Tensor,
input2: &Tensor,
weight: &Tensor,
bias: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bilinear(
c_tensors.as_mut_ptr(),
input1.c_tensor,
input2.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_binary_cross_entropy<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_binary_cross_entropy_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_binary_cross_entropy_backward_out<T: Borrow<Tensor>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_binary_cross_entropy_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_binary_cross_entropy_with_logits<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
pos_weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy_with_logits(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
pos_weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_binary_cross_entropy_with_logits_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
pos_weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy_with_logits_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
pos_weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bincount<T: Borrow<Tensor>>(
&self,
weights: Option<T>,
minlength: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bincount(
c_tensors.as_mut_ptr(),
self.c_tensor,
weights.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
minlength
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_binomial(count: &Tensor, prob: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binomial(
c_tensors.as_mut_ptr(),
count.c_tensor,
prob.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_and<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_and1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_and_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_and_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_and_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_and_out1<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_not(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_not(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_not_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_not_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_not_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_not_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_or<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_or1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_or_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_or_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_or_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_or_out1<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_xor<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_xor1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_xor_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_xor_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_xor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bitwise_xor_out1<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_blackman_window(
window_length: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_blackman_window(
c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_blackman_window1(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_blackman_window1(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_block_diag<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_block_diag(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bmm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bmm_out(&self, out: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_broadcast_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_broadcast_tensors(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_broadcast_to(&self, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_broadcast_to(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bucketize(
&self,
boundaries: &Tensor,
out_int32: bool,
right: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bucketize(
c_tensors.as_mut_ptr(),
self.c_tensor,
boundaries.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bucketize1<S: Into<Scalar>>(
self_scalar: S,
boundaries: &Tensor,
out_int32: bool,
right: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bucketize1(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
boundaries.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_bucketize_out(
&self,
out: &Tensor,
boundaries: &Tensor,
out_int32: bool,
right: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bucketize_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
boundaries.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cartesian_prod<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cartesian_prod(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cat<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cat(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cat_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cat_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cauchy_(&mut self, median: f64, sigma: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cauchy_(
c_tensors.as_mut_ptr(),
self.c_tensor,
median,
sigma
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cdist(
x1: &Tensor,
x2: &Tensor,
p: f64,
compute_mode: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let compute_mode = compute_mode.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cdist(
c_tensors.as_mut_ptr(),
x1.c_tensor,
x2.c_tensor,
p,
compute_mode.unwrap_or(0i64),
compute_mode.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ceil(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ceil(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ceil_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ceil_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ceil_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ceil_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_celu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_celu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_celu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_celu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_chain_matmul<T: Borrow<Tensor>>(matrices: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_chain_matmul(
c_tensors.as_mut_ptr(),
ptr_list(matrices).as_ptr(),
matrices.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_channel_shuffle(&self, groups: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_channel_shuffle(
c_tensors.as_mut_ptr(),
self.c_tensor,
groups
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cholesky(&self, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky(
c_tensors.as_mut_ptr(),
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cholesky_inverse(&self, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky_inverse(
c_tensors.as_mut_ptr(),
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cholesky_inverse_out(&self, out: &Tensor, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky_inverse_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cholesky_out(&self, out: &Tensor, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cholesky_solve(&self, input2: &Tensor, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky_solve(
c_tensors.as_mut_ptr(),
self.c_tensor,
input2.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cholesky_solve_out(
&self,
out: &Tensor,
input2: &Tensor,
upper: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky_solve_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
input2.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_choose_qparams_optimized(
&self,
numel: i64,
n_bins: i64,
ratio: f64,
bit_width: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_choose_qparams_optimized(
c_tensors.as_mut_ptr(),
self.c_tensor,
numel,
n_bins,
ratio,
bit_width
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_chunk(&self, chunks: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_chunk(self.c_tensor, chunks, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_clamp<S: Into<Scalar>>(&self, min: S, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clamp_<S: Into<Scalar>>(&mut self, min: S, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clamp_max<S: Into<Scalar>>(&self, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_max(
c_tensors.as_mut_ptr(),
self.c_tensor,
max.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clamp_max_<S: Into<Scalar>>(&mut self, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_max_(
c_tensors.as_mut_ptr(),
self.c_tensor,
max.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clamp_max_out<S: Into<Scalar>>(
&self,
out: &Tensor,
max: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_max_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
max.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clamp_min<S: Into<Scalar>>(&self, min: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_min(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clamp_min_<S: Into<Scalar>>(&mut self, min: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_min_(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clamp_min_out<S: Into<Scalar>>(
&self,
out: &Tensor,
min: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_min_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
min.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clamp_out<S: Into<Scalar>>(
&self,
out: &Tensor,
min: S,
max: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clip<S: Into<Scalar>>(&self, min: S, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clip(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clip_<S: Into<Scalar>>(&mut self, min: S, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clip_(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_clip_out<S: Into<Scalar>>(
&self,
out: &Tensor,
min: S,
max: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clip_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_coalesce(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_coalesce(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_col2im(
&self,
output_size: &[i64],
kernel_size: &[i64],
dilation: &[i64],
padding: &[i64],
stride: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_col2im(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
kernel_size.as_ptr(),
kernel_size.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_col2im_backward(
grad_output: &Tensor,
kernel_size: &[i64],
dilation: &[i64],
padding: &[i64],
stride: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_col2im_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_col2im_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: &[i64],
dilation: &[i64],
padding: &[i64],
stride: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_col2im_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_col2im_out(
&self,
out: &Tensor,
output_size: &[i64],
kernel_size: &[i64],
dilation: &[i64],
padding: &[i64],
stride: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_col2im_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
kernel_size.as_ptr(),
kernel_size.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_column_stack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_column_stack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_column_stack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_column_stack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_combinations(&self, r: i64, with_replacement: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_combinations(
c_tensors.as_mut_ptr(),
self.c_tensor,
r,
if with_replacement { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_complex(real: &Tensor, imag: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_complex(
c_tensors.as_mut_ptr(),
real.c_tensor,
imag.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_complex_out(out: &Tensor, real: &Tensor, imag: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_complex_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
real.c_tensor,
imag.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_conj(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conj(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_conj_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conj_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_constant_pad_nd(&self, pad: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_constant_pad_nd(
c_tensors.as_mut_ptr(),
self.c_tensor,
pad.as_ptr(),
pad.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_contiguous(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_contiguous(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_conv1d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
dilation: &[i64],
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_conv2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
dilation: &[i64],
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_conv3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
dilation: &[i64],
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_conv_tbc(&self, weight: &Tensor, bias: &Tensor, pad: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_tbc(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.c_tensor,
pad
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_conv_tbc_backward(
&self,
input: &Tensor,
weight: &Tensor,
bias: &Tensor,
pad: i64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_conv_tbc_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
input.c_tensor,
weight.c_tensor,
bias.c_tensor,
pad
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_conv_transpose1d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
output_padding: &[i64],
groups: i64,
dilation: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_transpose1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
groups,
dilation.as_ptr(),
dilation.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_conv_transpose2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
output_padding: &[i64],
groups: i64,
dilation: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_transpose2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
groups,
dilation.as_ptr(),
dilation.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_conv_transpose3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
output_padding: &[i64],
groups: i64,
dilation: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_transpose3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
groups,
dilation.as_ptr(),
dilation.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
dilation: &[i64],
transposed: bool,
output_padding: &[i64],
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len() as i32,
groups
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_convolution_overrideable<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: &[i64],
padding: &[i64],
dilation: &[i64],
transposed: bool,
output_padding: &[i64],
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_convolution_overrideable(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len() as i32,
groups
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_copy_sparse_to_sparse_(
&mut self,
src: &Tensor,
non_blocking: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copy_sparse_to_sparse_(
c_tensors.as_mut_ptr(),
self.c_tensor,
src.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_copysign(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_copysign1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_copysign_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_copysign_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_copysign_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cos(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cos(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cos_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cos_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cos_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cos_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cosh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cosh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cosh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cosh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cosh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cosh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cosine_embedding_loss(
input1: &Tensor,
input2: &Tensor,
target: &Tensor,
margin: f64,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cosine_embedding_loss(
c_tensors.as_mut_ptr(),
input1.c_tensor,
input2.c_tensor,
target.c_tensor,
margin,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cosine_similarity(
x1: &Tensor,
x2: &Tensor,
dim: i64,
eps: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cosine_similarity(
c_tensors.as_mut_ptr(),
x1.c_tensor,
x2.c_tensor,
dim,
eps
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_count_nonzero(&self, dim: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_count_nonzero(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_count_nonzero1(&self, dim: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_count_nonzero1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cross(&self, other: &Tensor, dim: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cross(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cross_out(
&self,
out: &Tensor,
other: &Tensor,
dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cross_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &[i64],
target_lengths: &[i64],
blank: i64,
reduction: crate::Reduction,
zero_infinity: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ctc_loss(
c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len() as i32,
target_lengths.as_ptr(),
target_lengths.len() as i32,
blank,
reduction.to_int(),
if zero_infinity { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ctc_loss1(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
reduction: crate::Reduction,
zero_infinity: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ctc_loss1(
c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.c_tensor,
target_lengths.c_tensor,
blank,
reduction.to_int(),
if zero_infinity { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_affine_grid_generator(
theta: &Tensor,
n: i64,
c: i64,
h: i64,
w: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_affine_grid_generator(
c_tensors.as_mut_ptr(),
theta.c_tensor,
n,
c,
h,
w
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_affine_grid_generator_backward(
grad: &Tensor,
n: i64,
c: i64,
h: i64,
w: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_affine_grid_generator_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
n,
c,
h,
w
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_batch_norm<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
exponential_average_factor: f64,
epsilon: f64,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_cudnn_batch_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
exponential_average_factor,
epsilon
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
Tensor {
c_tensor: c_tensors[3],
},
))
}
pub fn f_cudnn_batch_norm_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
weight: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
save_mean: Option<T>,
save_var: Option<T>,
epsilon: f64,
reservespace: &Tensor,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_cudnn_batch_norm_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
grad_output.c_tensor,
weight.c_tensor,
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
epsilon,
reservespace.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_cudnn_convolution(
&self,
weight: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_convolution1<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution1(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_convolution2(
&self,
weight: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution2(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_convolution_backward_input(
self_size: &[i64],
grad_output: &Tensor,
weight: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_backward_input(
c_tensors.as_mut_ptr(),
self_size.as_ptr(),
self_size.len() as i32,
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_convolution_backward_weight(
&self,
weight_size: &[i64],
grad_output: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_backward_weight(
c_tensors.as_mut_ptr(),
weight_size.as_ptr(),
weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_convolution_transpose(
&self,
weight: &Tensor,
padding: &[i64],
output_padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_transpose(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_convolution_transpose1<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: &[i64],
output_padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_transpose1(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_convolution_transpose2(
&self,
weight: &Tensor,
padding: &[i64],
output_padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_transpose2(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_convolution_transpose_backward_input(
grad_output: &Tensor,
weight: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_transpose_backward_input(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_convolution_transpose_backward_weight(
&self,
weight_size: &[i64],
grad_output: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_transpose_backward_weight(
c_tensors.as_mut_ptr(),
weight_size.as_ptr(),
weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_grid_sampler(&self, grid: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_grid_sampler(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cudnn_grid_sampler_backward(
&self,
grid: &Tensor,
grad_output: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cudnn_grid_sampler_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
grad_output.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_cummax(&self, dim: i64) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cummax(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_cummax_out(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cummax_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_cummaxmin_backward(
&self,
grad: &Tensor,
indices: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cummaxmin_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
indices.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cummin(&self, dim: i64) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cummin(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_cummin_out(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cummin_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_cumprod(&self, dim: i64, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumprod(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cumprod_(&mut self, dim: i64, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumprod_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cumprod_backward(&self, grad: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumprod_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cumprod_out(&self, out: &Tensor, dim: i64, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumprod_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cumsum(&self, dim: i64, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumsum(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cumsum_(&mut self, dim: i64, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumsum_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_cumsum_out(&self, out: &Tensor, dim: i64, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumsum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_data(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_data(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_deg2rad(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_deg2rad(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_deg2rad_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_deg2rad_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_deg2rad_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_deg2rad_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_dequantize(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dequantize(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_dequantize1<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_dequantize1(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_det(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_det(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_detach(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_detach(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_detach_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_detach_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_diag(&self, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diag(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_diag_backward(
grad: &Tensor,
input_sizes: &[i64],
diagonal: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diag_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
input_sizes.as_ptr(),
input_sizes.len() as i32,
diagonal
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_diag_embed(&self, offset: i64, dim1: i64, dim2: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diag_embed(
c_tensors.as_mut_ptr(),
self.c_tensor,
offset,
dim1,
dim2
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_diag_out(&self, out: &Tensor, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diag_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
diagonal
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_diagflat(&self, offset: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagflat(c_tensors.as_mut_ptr(), self.c_tensor, offset));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_diagonal(&self, offset: i64, dim1: i64, dim2: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagonal(
c_tensors.as_mut_ptr(),
self.c_tensor,
offset,
dim1,
dim2
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_diagonal_backward(
grad: &Tensor,
input_sizes: &[i64],
offset: i64,
dim1: i64,
dim2: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagonal_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
input_sizes.as_ptr(),
input_sizes.len() as i32,
offset,
dim1,
dim2
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_diff<T: Borrow<Tensor>>(
&self,
n: i64,
dim: i64,
prepend: Option<T>,
append: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diff(
c_tensors.as_mut_ptr(),
self.c_tensor,
n,
dim,
prepend.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
append.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_diff_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
n: i64,
dim: i64,
prepend: Option<T>,
append: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diff_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n,
dim,
prepend.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
append.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_digamma(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_digamma(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_digamma_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_digamma_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_digamma_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_digamma_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_dist(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dist(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_div(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_div1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_div2(&self, other: &Tensor, rounding_mode: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div2(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_div3<S: Into<Scalar>>(
&self,
other: S,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div3(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_div_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_div_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_div_2(&mut self, other: &Tensor, rounding_mode: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_2(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_div_3<S: Into<Scalar>>(
&mut self,
other: S,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_3(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_div_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_div_out1(
&self,
out: &Tensor,
other: &Tensor,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_divide(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_divide1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_divide2(&self, other: &Tensor, rounding_mode: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide2(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_divide3<S: Into<Scalar>>(
&self,
other: S,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide3(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_divide_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_divide_2(&mut self, other: &Tensor, rounding_mode: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_2(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_divide_3<S: Into<Scalar>>(
&mut self,
other: S,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_3(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_divide_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_divide_out1(
&self,
out: &Tensor,
other: &Tensor,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_dot(&self, tensor: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dot(
c_tensors.as_mut_ptr(),
self.c_tensor,
tensor.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_dot_out(&self, out: &Tensor, tensor: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dot_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
tensor.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dropout(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dropout_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_dstack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dstack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_dstack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dstack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eig(&self, eigenvectors: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_eig(
c_tensors.as_mut_ptr(),
self.c_tensor,
if eigenvectors { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_eig_out(
&self,
e: &Tensor,
v: &Tensor,
eigenvectors: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_eig_out(
c_tensors.as_mut_ptr(),
e.c_tensor,
v.c_tensor,
self.c_tensor,
if eigenvectors { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_einsum<T: Borrow<Tensor>>(equation: &str, tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_einsum(
c_tensors.as_mut_ptr(),
equation.as_ptr(),
equation.len() as i32,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_elu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_elu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_elu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_elu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_elu_backward<S: Into<Scalar>>(
grad_output: &Tensor,
alpha: S,
scale: S,
input_scale: S,
is_result: bool,
self_or_result: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_elu_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
alpha.into().c_scalar,
scale.into().c_scalar,
input_scale.into().c_scalar,
if is_result { 1 } else { 0 },
self_or_result.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_elu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_elu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_embedding(
weight: &Tensor,
indices: &Tensor,
padding_idx: i64,
scale_grad_by_freq: bool,
sparse: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding(
c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 },
if sparse { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_embedding_backward(
grad: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
sparse: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
num_weights,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 },
if sparse { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_embedding_bag<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_embedding_bag(
c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if include_last_offset { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
Tensor {
c_tensor: c_tensors[3],
},
))
}
pub fn f_embedding_dense_backward(
grad_output: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_dense_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
indices.c_tensor,
num_weights,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_embedding_renorm_(
&mut self,
indices: &Tensor,
max_norm: f64,
norm_type: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_renorm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
max_norm,
norm_type
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_embedding_sparse_backward(
grad: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_sparse_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
num_weights,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_empty(size: &[i64], options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_empty_like(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_like(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_empty_meta(size: &[i64], options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_meta(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_empty_out(out: &Tensor, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_empty_quantized(size: &[i64], qtensor: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_quantized(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
qtensor.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_empty_strided(
size: &[i64],
stride: &[i64],
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_strided(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eq<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eq1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eq_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eq_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eq_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eq_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_erf(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_erf_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erf_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_erf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erf_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_erfc(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfc(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_erfc_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfc_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_erfc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_erfinv(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfinv(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_erfinv_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfinv_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_erfinv_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfinv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_exp(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_exp2(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp2(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_exp2_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp2_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_exp2_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_exp_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_exp_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_expand(&self, size: &[i64], implicit: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expand(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32,
if implicit { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_expand_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expand_as(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_expm1(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expm1(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_expm1_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expm1_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_expm1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expm1_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_exponential_(&mut self, lambd: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exponential_(
c_tensors.as_mut_ptr(),
self.c_tensor,
lambd
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eye(n: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eye(
c_tensors.as_mut_ptr(),
n,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eye1(n: i64, m: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eye1(
c_tensors.as_mut_ptr(),
n,
m,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eye_out(out: &Tensor, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eye_out(c_tensors.as_mut_ptr(), out.c_tensor, n));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_eye_out1(out: &Tensor, n: i64, m: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eye_out1(c_tensors.as_mut_ptr(), out.c_tensor, n, m));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fake_quantize_per_channel_affine(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fake_quantize_per_channel_affine(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis,
quant_min,
quant_max
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fake_quantize_per_channel_affine_cachemask(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fake_quantize_per_channel_affine_cachemask(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis,
quant_min,
quant_max
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_fake_quantize_per_channel_affine_cachemask_backward(
grad: &Tensor,
mask: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fake_quantize_per_channel_affine_cachemask_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
mask.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fake_quantize_per_tensor_affine(
&self,
scale: f64,
zero_point: i64,
quant_min: i64,
quant_max: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fake_quantize_per_tensor_affine(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale,
zero_point,
quant_min,
quant_max
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fake_quantize_per_tensor_affine_cachemask(
&self,
scale: f64,
zero_point: i64,
quant_min: i64,
quant_max: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fake_quantize_per_tensor_affine_cachemask(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale,
zero_point,
quant_min,
quant_max
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_fake_quantize_per_tensor_affine_cachemask_backward(
grad: &Tensor,
mask: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fake_quantize_per_tensor_affine_cachemask_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
mask.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fbgemm_linear_fp16_weight(
&self,
packed_weight: &Tensor,
bias: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_linear_fp16_weight(
c_tensors.as_mut_ptr(),
self.c_tensor,
packed_weight.c_tensor,
bias.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fbgemm_linear_fp16_weight_fp32_activation(
&self,
packed_weight: &Tensor,
bias: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_linear_fp16_weight_fp32_activation(
c_tensors.as_mut_ptr(),
self.c_tensor,
packed_weight.c_tensor,
bias.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fbgemm_linear_int8_weight<S: Into<Scalar>>(
&self,
weight: &Tensor,
packed: &Tensor,
col_offsets: &Tensor,
weight_scale: S,
weight_zero_point: S,
bias: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_linear_int8_weight(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
packed.c_tensor,
col_offsets.c_tensor,
weight_scale.into().c_scalar,
weight_zero_point.into().c_scalar,
bias.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fbgemm_linear_int8_weight_fp32_activation<S: Into<Scalar>>(
&self,
weight: &Tensor,
packed: &Tensor,
col_offsets: &Tensor,
weight_scale: S,
weight_zero_point: S,
bias: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_linear_int8_weight_fp32_activation(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
packed.c_tensor,
col_offsets.c_tensor,
weight_scale.into().c_scalar,
weight_zero_point.into().c_scalar,
bias.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fbgemm_pack_gemm_matrix_fp16(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_pack_gemm_matrix_fp16(
c_tensors.as_mut_ptr(),
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fbgemm_pack_quantized_matrix(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_pack_quantized_matrix(
c_tensors.as_mut_ptr(),
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fbgemm_pack_quantized_matrix1(&self, k: i64, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_pack_quantized_matrix1(
c_tensors.as_mut_ptr(),
self.c_tensor,
k,
n
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_feature_alpha_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_feature_alpha_dropout(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_feature_alpha_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_feature_alpha_dropout_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_feature_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_feature_dropout(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_feature_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_feature_dropout_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_fft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_fft2(&self, s: &[i64], dim: &[i64], norm: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fft2(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_fft2_out(
&self,
out: &Tensor,
s: &[i64],
dim: &[i64],
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fft2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_fft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_fftfreq(n: i64, d: f64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fftfreq(
c_tensors.as_mut_ptr(),
n,
d,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_fftfreq_out(out: &Tensor, n: i64, d: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fftfreq_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
n,
d
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_fftn(&self, s: &[i64], dim: &[i64], norm: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fftn(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_fftn_out(
&self,
out: &Tensor,
s: &[i64],
dim: &[i64],
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fftn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_fftshift(&self, dim: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fftshift(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_hfft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_hfft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_hfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_hfft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_ifft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_ifft2(&self, s: &[i64], dim: &[i64], norm: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifft2(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_ifft2_out(
&self,
out: &Tensor,
s: &[i64],
dim: &[i64],
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifft2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_ifft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_ifftn(&self, s: &[i64], dim: &[i64], norm: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifftn(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_ifftn_out(
&self,
out: &Tensor,
s: &[i64],
dim: &[i64],
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifftn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_ifftshift(&self, dim: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifftshift(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_ihfft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ihfft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_ihfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ihfft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_irfft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_irfft2(&self, s: &[i64], dim: &[i64], norm: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfft2(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_irfft2_out(
&self,
out: &Tensor,
s: &[i64],
dim: &[i64],
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfft2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_irfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_irfftn(&self, s: &[i64], dim: &[i64], norm: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfftn(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_irfftn_out(
&self,
out: &Tensor,
s: &[i64],
dim: &[i64],
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfftn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_rfft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_rfft2(&self, s: &[i64], dim: &[i64], norm: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfft2(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_rfft2_out(
&self,
out: &Tensor,
s: &[i64],
dim: &[i64],
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfft2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_rfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_rfftfreq(n: i64, d: f64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfftfreq(
c_tensors.as_mut_ptr(),
n,
d,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_rfftfreq_out(out: &Tensor, n: i64, d: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfftfreq_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
n,
d
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_rfftn(&self, s: &[i64], dim: &[i64], norm: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfftn(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fft_rfftn_out(
&self,
out: &Tensor,
s: &[i64],
dim: &[i64],
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfftn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len() as i32,
dim.as_ptr(),
dim.len() as i32,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fill_<S: Into<Scalar>>(&mut self, value: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fill_(
c_tensors.as_mut_ptr(),
self.c_tensor,
value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fill_1(&mut self, value: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fill_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
value.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fill_diagonal_<S: Into<Scalar>>(
&mut self,
fill_value: S,
wrap: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fill_diagonal_(
c_tensors.as_mut_ptr(),
self.c_tensor,
fill_value.into().c_scalar,
if wrap { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fix(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fix(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fix_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fix_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fix_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fix_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_flatten(&self, start_dim: i64, end_dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_flatten(
c_tensors.as_mut_ptr(),
self.c_tensor,
start_dim,
end_dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_flip(&self, dims: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_flip(
c_tensors.as_mut_ptr(),
self.c_tensor,
dims.as_ptr(),
dims.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fliplr(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fliplr(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_flipud(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_flipud(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_float_power(&self, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_float_power1<S: Into<Scalar>>(
self_scalar: S,
exponent: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power1(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
exponent.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_float_power2<S: Into<Scalar>>(&self, exponent: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power2(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_float_power_<S: Into<Scalar>>(&mut self, exponent: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_float_power_1(&mut self, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_float_power_out(&self, out: &Tensor, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_float_power_out1<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
exponent: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
exponent.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_float_power_out2<S: Into<Scalar>>(
&self,
out: &Tensor,
exponent: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_out2(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_floor(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_floor_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_floor_divide(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_divide(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_floor_divide1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_divide1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_floor_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_divide_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_floor_divide_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_divide_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_floor_divide_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_divide_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_floor_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fmax(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fmax_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fmin(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmin(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fmin_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmin_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fmod<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fmod1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fmod_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fmod_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fmod_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fmod_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_frac(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frac(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_frac_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frac_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_frac_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frac_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fractional_max_pool2d(
&self,
kernel_size: &[i64],
output_size: &[i64],
random_samples: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fractional_max_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
output_size.as_ptr(),
output_size.len() as i32,
random_samples.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_fractional_max_pool2d_backward(
&self,
grad_output: &Tensor,
kernel_size: &[i64],
output_size: &[i64],
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fractional_max_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
output_size.as_ptr(),
output_size.len() as i32,
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fractional_max_pool2d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: &[i64],
output_size: &[i64],
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fractional_max_pool2d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
output_size.as_ptr(),
output_size.len() as i32,
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fractional_max_pool2d_out(
&self,
output: &Tensor,
indices: &Tensor,
kernel_size: &[i64],
output_size: &[i64],
random_samples: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fractional_max_pool2d_out(
c_tensors.as_mut_ptr(),
output.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
output_size.as_ptr(),
output_size.len() as i32,
random_samples.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_fractional_max_pool3d(
&self,
kernel_size: &[i64],
output_size: &[i64],
random_samples: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fractional_max_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
output_size.as_ptr(),
output_size.len() as i32,
random_samples.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_fractional_max_pool3d_backward(
&self,
grad_output: &Tensor,
kernel_size: &[i64],
output_size: &[i64],
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fractional_max_pool3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
output_size.as_ptr(),
output_size.len() as i32,
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fractional_max_pool3d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: &[i64],
output_size: &[i64],
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fractional_max_pool3d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
output_size.as_ptr(),
output_size.len() as i32,
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_fractional_max_pool3d_out(
&self,
output: &Tensor,
indices: &Tensor,
kernel_size: &[i64],
output_size: &[i64],
random_samples: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fractional_max_pool3d_out(
c_tensors.as_mut_ptr(),
output.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
output_size.as_ptr(),
output_size.len() as i32,
random_samples.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_frobenius_norm(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frobenius_norm(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_frobenius_norm1(&self, dim: &[i64], keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frobenius_norm1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_frobenius_norm_out(
&self,
out: &Tensor,
dim: &[i64],
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frobenius_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_from_file(
filename: &str,
shared: bool,
size: impl Into<Option<i64>>,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let size = size.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_from_file(
c_tensors.as_mut_ptr(),
filename.as_ptr(),
filename.len() as i32,
if shared { 1 } else { 0 },
size.unwrap_or(0i64),
size.is_none() as i8,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_full<S: Into<Scalar>>(
size: &[i64],
fill_value: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_full(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
fill_value.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_full_like<S: Into<Scalar>>(&self, fill_value: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_full_like(
c_tensors.as_mut_ptr(),
self.c_tensor,
fill_value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_full_out<S: Into<Scalar>>(
out: &Tensor,
size: &[i64],
fill_value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_full_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len() as i32,
fill_value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gather(
&self,
dim: i64,
index: &Tensor,
sparse_grad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gather(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
if sparse_grad { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gather_backward(
&self,
grad: &Tensor,
dim: i64,
index: &Tensor,
sparse_grad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gather_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
if sparse_grad { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gather_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
sparse_grad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gather_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
if sparse_grad { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gcd(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gcd(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gcd_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gcd_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gcd_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gcd_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ge<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ge1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ge_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ge_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ge_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ge_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gelu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gelu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gelu_backward(&self, grad: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gelu_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_geometric_(&mut self, p: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_geometric_(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_geqrf(&self) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_geqrf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_geqrf_out(&self, a: &Tensor, tau: &Tensor) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_geqrf_out(
c_tensors.as_mut_ptr(),
a.c_tensor,
tau.c_tensor,
self.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_ger(&self, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ger(
c_tensors.as_mut_ptr(),
self.c_tensor,
vec2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ger_out(&self, out: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ger_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
vec2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_glu(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_glu_backward(&self, grad_output: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_glu_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_glu_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_grad(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_grad(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater_equal<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater_equal1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater_equal_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater_equal_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater_equal_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater_equal_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_greater_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_grid_sampler(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_grid_sampler(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_grid_sampler_2d(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_grid_sampler_2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_grid_sampler_2d_backward(
&self,
grad_output: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_grid_sampler_2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_grid_sampler_3d(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_grid_sampler_3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_grid_sampler_3d_backward(
&self,
grad_output: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_grid_sampler_3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_group_norm<T: Borrow<Tensor>>(
&self,
num_groups: i64,
weight: Option<T>,
bias: Option<T>,
eps: f64,
cudnn_enabled: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_group_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
num_groups,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
eps,
if cudnn_enabled { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gru<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_gru(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_gru1<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_gru1(
c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_gru_cell<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gru_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
b_hh.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gt<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gt1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gt_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gt_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gt_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_gt_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hamming_window(
window_length: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window(
c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hamming_window1(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window1(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hamming_window2(
window_length: i64,
periodic: bool,
alpha: f64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window2(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
alpha,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hamming_window3(
window_length: i64,
periodic: bool,
alpha: f64,
beta: f64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window3(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
alpha,
beta,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hann_window(window_length: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hann_window(
c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hann_window1(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hann_window1(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardshrink(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardshrink(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardshrink_backward<S: Into<Scalar>>(
&self,
grad_out: &Tensor,
lambd: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardshrink_backward(
c_tensors.as_mut_ptr(),
grad_out.c_tensor,
self.c_tensor,
lambd.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardsigmoid(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardsigmoid(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardsigmoid_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardsigmoid_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardsigmoid_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardsigmoid_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardsigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardsigmoid_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardswish(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardswish(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardswish_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardswish_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardswish_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardswish_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardswish_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardswish_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardtanh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardtanh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardtanh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardtanh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardtanh_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
min_val: S,
max_val: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardtanh_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
min_val.into().c_scalar,
max_val.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardtanh_backward_out<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
min_val: S,
max_val: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardtanh_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
min_val.into().c_scalar,
max_val.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hardtanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardtanh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_heaviside(&self, values: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_heaviside(
c_tensors.as_mut_ptr(),
self.c_tensor,
values.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_heaviside_(&mut self, values: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_heaviside_(
c_tensors.as_mut_ptr(),
self.c_tensor,
values.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_heaviside_out(&self, out: &Tensor, values: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_heaviside_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
values.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hinge_embedding_loss(
&self,
target: &Tensor,
margin: f64,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hinge_embedding_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
margin,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_histc(&self, bins: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_histc(c_tensors.as_mut_ptr(), self.c_tensor, bins));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_histc_out(&self, out: &Tensor, bins: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_histc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
bins
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hspmm(mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hspmm(
c_tensors.as_mut_ptr(),
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hspmm_out(out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hspmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hstack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hstack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hstack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hstack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hypot(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hypot(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hypot_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hypot_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_hypot_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hypot_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_i0(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_i0(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_i0_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_i0_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_i0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_i0_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_igamma(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igamma(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_igamma_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igamma_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_igamma_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igamma_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_igammac(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igammac(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_igammac_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igammac_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_igammac_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igammac_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_im2col(
&self,
kernel_size: &[i64],
dilation: &[i64],
padding: &[i64],
stride: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_im2col(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_im2col_backward(
grad_output: &Tensor,
input_size: &[i64],
kernel_size: &[i64],
dilation: &[i64],
padding: &[i64],
stride: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_im2col_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
input_size.as_ptr(),
input_size.len() as i32,
kernel_size.as_ptr(),
kernel_size.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_im2col_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
input_size: &[i64],
kernel_size: &[i64],
dilation: &[i64],
padding: &[i64],
stride: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_im2col_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
input_size.as_ptr(),
input_size.len() as i32,
kernel_size.as_ptr(),
kernel_size.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_im2col_out(
&self,
out: &Tensor,
kernel_size: &[i64],
dilation: &[i64],
padding: &[i64],
stride: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_im2col_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_imag(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_imag(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index<T: Borrow<Tensor>>(&self, indices: &[Option<T>]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_add(
&self,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_add(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_add_(
&mut self,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_add_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_copy(
&self,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_copy_(
&mut self,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_copy_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_fill<S: Into<Scalar>>(
&self,
dim: i64,
index: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_fill(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_fill1(
&self,
dim: i64,
index: &Tensor,
value: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_fill1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_fill_<S: Into<Scalar>>(
&mut self,
dim: i64,
index: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_fill_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_fill_1(
&mut self,
dim: i64,
index: &Tensor,
value: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_fill_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_put<T: Borrow<Tensor>>(
&self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_put(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32,
values.c_tensor,
if accumulate { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_put_<T: Borrow<Tensor>>(
&mut self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_put_(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32,
values.c_tensor,
if accumulate { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_select(&self, dim: i64, index: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_select(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_select_backward(
grad: &Tensor,
self_sizes: &[i64],
dim: i64,
index: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_select_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self_sizes.as_ptr(),
self_sizes.len() as i32,
dim,
index.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_index_select_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_select_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_indices(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_indices(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_infinitely_differentiable_gelu_backward(
&self,
grad: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_infinitely_differentiable_gelu_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_inner(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_inner(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_inner_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_inner_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_instance_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
use_input_stats: bool,
momentum: f64,
eps: f64,
cudnn_enabled: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_instance_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if use_input_stats { 1 } else { 0 },
momentum,
eps,
if cudnn_enabled { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_int_repr(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_int_repr(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_inverse(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_inverse(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_inverse_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_inverse_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_isclose(
&self,
other: &Tensor,
rtol: f64,
atol: f64,
equal_nan: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isclose(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rtol,
atol,
if equal_nan { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_isfinite(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isfinite(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_isinf(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isinf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_isnan(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isnan(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_isneginf(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isneginf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_isneginf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isneginf_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_isposinf(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isposinf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_isposinf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isposinf_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_isreal(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isreal(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_istft<T: Borrow<Tensor>>(
&self,
n_fft: i64,
hop_length: impl Into<Option<i64>>,
win_length: impl Into<Option<i64>>,
window: Option<T>,
center: bool,
normalized: bool,
onesided: bool,
length: impl Into<Option<i64>>,
return_complex: bool,
) -> Result<Tensor, TchError> {
let hop_length = hop_length.into();
let win_length = win_length.into();
let length = length.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_istft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n_fft,
hop_length.unwrap_or(0i64),
hop_length.is_none() as i8,
win_length.unwrap_or(0i64),
win_length.is_none() as i8,
window.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if center { 1 } else { 0 },
if normalized { 1 } else { 0 },
if onesided { 1 } else { 0 },
length.unwrap_or(0i64),
length.is_none() as i8,
if return_complex { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_kaiser_window(
window_length: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kaiser_window(
c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_kaiser_window1(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kaiser_window1(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_kaiser_window2(
window_length: i64,
periodic: bool,
beta: f64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kaiser_window2(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
beta,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_kl_div(
&self,
target: &Tensor,
reduction: crate::Reduction,
log_target: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kl_div(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int(),
if log_target { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_kl_div_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
log_target: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kl_div_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
if log_target { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_kron(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kron(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_kron_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kron_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_kthvalue(
&self,
k: i64,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_kthvalue(
c_tensors.as_mut_ptr(),
self.c_tensor,
k,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_kthvalue_out(
&self,
values: &Tensor,
indices: &Tensor,
k: i64,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_kthvalue_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
k,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_l1_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_l1_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_l1_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_l1_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_l1_loss_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_l1_loss_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_l1_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_l1_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_layer_norm<T: Borrow<Tensor>>(
&self,
normalized_shape: &[i64],
weight: Option<T>,
bias: Option<T>,
eps: f64,
cudnn_enable: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_layer_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
normalized_shape.as_ptr(),
normalized_shape.len() as i32,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
eps,
if cudnn_enable { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lcm(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lcm(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lcm_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lcm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lcm_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lcm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ldexp(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ldexp(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ldexp_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ldexp_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ldexp_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ldexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_le<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_le1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_le_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_le_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_le_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_le_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_leaky_relu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_leaky_relu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_leaky_relu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_leaky_relu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_leaky_relu_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
negative_slope: S,
self_is_result: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_leaky_relu_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
negative_slope.into().c_scalar,
if self_is_result { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_leaky_relu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_leaky_relu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lerp<S: Into<Scalar>>(&self, end: &Tensor, weight: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp(
c_tensors.as_mut_ptr(),
self.c_tensor,
end.c_tensor,
weight.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lerp1(&self, end: &Tensor, weight: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp1(
c_tensors.as_mut_ptr(),
self.c_tensor,
end.c_tensor,
weight.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lerp_<S: Into<Scalar>>(
&mut self,
end: &Tensor,
weight: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp_(
c_tensors.as_mut_ptr(),
self.c_tensor,
end.c_tensor,
weight.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lerp_1(&mut self, end: &Tensor, weight: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
end.c_tensor,
weight.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lerp_out<S: Into<Scalar>>(
&self,
out: &Tensor,
end: &Tensor,
weight: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
end.c_tensor,
weight.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lerp_out1(
&self,
out: &Tensor,
end: &Tensor,
weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
end.c_tensor,
weight.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less_equal<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less_equal1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less_equal_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less_equal_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less_equal_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less_equal_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_less_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lgamma(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lgamma(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lgamma_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lgamma_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lgamma_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lgamma_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_cholesky(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cholesky(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_cholesky_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cholesky_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_cond<S: Into<Scalar>>(&self, p: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cond(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_cond1(&self, p: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cond1(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.as_ptr(),
p.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_cond_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cond_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_cond_out1(&self, out: &Tensor, p: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cond_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.as_ptr(),
p.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_det(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_det(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_eigh(&self, uplo: &str) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_eigh(
c_tensors.as_mut_ptr(),
self.c_tensor,
uplo.as_ptr(),
uplo.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_linalg_eigh_out(
&self,
eigvals: &Tensor,
eigvecs: &Tensor,
uplo: &str,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_eigh_out(
c_tensors.as_mut_ptr(),
eigvals.c_tensor,
eigvecs.c_tensor,
self.c_tensor,
uplo.as_ptr(),
uplo.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_linalg_eigvalsh(&self, uplo: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_eigvalsh(
c_tensors.as_mut_ptr(),
self.c_tensor,
uplo.as_ptr(),
uplo.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_eigvalsh_out(&self, out: &Tensor, uplo: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_eigvalsh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
uplo.as_ptr(),
uplo.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_inv(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_inv(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_inv_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_inv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_matrix_rank(
&self,
tol: impl Into<Option<f64>>,
hermitian: bool,
) -> Result<Tensor, TchError> {
let tol = tol.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_rank(
c_tensors.as_mut_ptr(),
self.c_tensor,
tol.unwrap_or(std::f64::NAN),
tol.is_none() as i8,
if hermitian { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_matrix_rank_out(
&self,
out: &Tensor,
tol: impl Into<Option<f64>>,
hermitian: bool,
) -> Result<Tensor, TchError> {
let tol = tol.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_rank_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
tol.unwrap_or(std::f64::NAN),
tol.is_none() as i8,
if hermitian { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_norm<S: Into<Scalar>>(
&self,
ord: S,
dim: &[i64],
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
ord.into().c_scalar,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_norm1(
&self,
ord: &str,
dim: &[i64],
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_norm1(
c_tensors.as_mut_ptr(),
self.c_tensor,
ord.as_ptr(),
ord.len() as i32,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_norm_out<S: Into<Scalar>>(
&self,
out: &Tensor,
ord: S,
dim: &[i64],
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
ord.into().c_scalar,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_norm_out1(
&self,
out: &Tensor,
ord: &str,
dim: &[i64],
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_norm_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
ord.as_ptr(),
ord.len() as i32,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_pinv(&self, rcond: f64, hermitian: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv(
c_tensors.as_mut_ptr(),
self.c_tensor,
rcond,
if hermitian { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_pinv1(&self, rcond: &Tensor, hermitian: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv1(
c_tensors.as_mut_ptr(),
self.c_tensor,
rcond.c_tensor,
if hermitian { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_pinv_out(
&self,
out: &Tensor,
rcond: f64,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
rcond,
if hermitian { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_pinv_out1(
&self,
out: &Tensor,
rcond: &Tensor,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
rcond.c_tensor,
if hermitian { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_qr(&self, mode: &str) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_qr(
c_tensors.as_mut_ptr(),
self.c_tensor,
mode.as_ptr(),
mode.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_linalg_qr_out(
&self,
q: &Tensor,
r: &Tensor,
mode: &str,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_qr_out(
c_tensors.as_mut_ptr(),
q.c_tensor,
r.c_tensor,
self.c_tensor,
mode.as_ptr(),
mode.len() as i32
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_linalg_slogdet(&self) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_slogdet(c_tensors.as_mut_ptr(), self.c_tensor));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_linalg_slogdet_out(
&self,
sign: &Tensor,
logabsdet: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_slogdet_out(
c_tensors.as_mut_ptr(),
sign.c_tensor,
logabsdet.c_tensor,
self.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_linalg_solve(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_solve(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_solve_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_solve_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_svd(
&self,
full_matrices: bool,
compute_uv: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_linalg_svd(
c_tensors.as_mut_ptr(),
self.c_tensor,
if full_matrices { 1 } else { 0 },
if compute_uv { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_linalg_svd_out(
&self,
u: &Tensor,
s: &Tensor,
v: &Tensor,
full_matrices: bool,
compute_uv: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_linalg_svd_out(
c_tensors.as_mut_ptr(),
u.c_tensor,
s.c_tensor,
v.c_tensor,
self.c_tensor,
if full_matrices { 1 } else { 0 },
if compute_uv { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_linalg_tensorinv(&self, ind: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_tensorinv(
c_tensors.as_mut_ptr(),
self.c_tensor,
ind
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_tensorinv_out(&self, out: &Tensor, ind: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_tensorinv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
ind
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_tensorsolve(&self, other: &Tensor, dims: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_tensorsolve(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
dims.as_ptr(),
dims.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linalg_tensorsolve_out(
&self,
out: &Tensor,
other: &Tensor,
dims: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_tensorsolve_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
dims.as_ptr(),
dims.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linear<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linear(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linspace<S: Into<Scalar>>(
start: S,
end: S,
steps: impl Into<Option<i64>>,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let steps = steps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linspace(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
steps.unwrap_or(0i64),
steps.is_none() as i8,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_linspace_out<S: Into<Scalar>>(
out: &Tensor,
start: S,
end: S,
steps: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let steps = steps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linspace_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
start.into().c_scalar,
end.into().c_scalar,
steps.unwrap_or(0i64),
steps.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log10(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log10(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log10_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log10_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log10_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log10_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log1p(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log1p(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log1p_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log1p_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log1p_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log1p_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log2(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log2(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log2_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log2_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log2_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log_normal_(&mut self, mean: f64, std: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_normal_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mean,
std
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log_sigmoid(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_sigmoid(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log_sigmoid_backward(
&self,
grad_output: &Tensor,
buffer: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_sigmoid_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
buffer.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log_sigmoid_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
buffer: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_sigmoid_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
buffer.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log_sigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_sigmoid_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_log_softmax(&self, dim: i64, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logaddexp(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logaddexp(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logaddexp2(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logaddexp2(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logaddexp2_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logaddexp2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logaddexp_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logaddexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logcumsumexp(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logcumsumexp(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logcumsumexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logdet(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logdet(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_and(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_and(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_and_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_and_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_and_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_and_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_not(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_not(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_not_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_not_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_not_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_not_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_or(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_or(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_or_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_or_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_or_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_or_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_xor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_xor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_xor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_xor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logical_xor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_xor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logit(&self, eps: impl Into<Option<f64>>) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logit(
c_tensors.as_mut_ptr(),
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logit_(&mut self, eps: impl Into<Option<f64>>) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logit_(
c_tensors.as_mut_ptr(),
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logit_backward(
&self,
grad_output: &Tensor,
eps: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logit_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logit_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
eps: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logit_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logit_out(
&self,
out: &Tensor,
eps: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logit_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logspace<S: Into<Scalar>>(
start: S,
end: S,
steps: impl Into<Option<i64>>,
base: f64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let steps = steps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logspace(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
steps.unwrap_or(0i64),
steps.is_none() as i8,
base,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logspace_out<S: Into<Scalar>>(
out: &Tensor,
start: S,
end: S,
steps: impl Into<Option<i64>>,
base: f64,
) -> Result<Tensor, TchError> {
let steps = steps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logspace_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
start.into().c_scalar,
end.into().c_scalar,
steps.unwrap_or(0i64),
steps.is_none() as i8,
base
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logsumexp(&self, dim: &[i64], keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logsumexp(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_logsumexp_out(
&self,
out: &Tensor,
dim: &[i64],
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logsumexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lstm<T: Borrow<Tensor>>(
&self,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_lstm(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_lstm1<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_lstm1(
c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_lstm_cell<T: Borrow<Tensor>>(
&self,
hx: &[T],
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_lstm_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
b_hh.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_lstsq(&self, a: &Tensor) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_lstsq(c_tensors.as_mut_ptr(), self.c_tensor, a.c_tensor));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_lstsq_out(
&self,
x: &Tensor,
qr: &Tensor,
a: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_lstsq_out(
c_tensors.as_mut_ptr(),
x.c_tensor,
qr.c_tensor,
self.c_tensor,
a.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_lt<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lt1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lt_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lt_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lt_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lt_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lu_solve(&self, lu_data: &Tensor, lu_pivots: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lu_solve(
c_tensors.as_mut_ptr(),
self.c_tensor,
lu_data.c_tensor,
lu_pivots.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_lu_solve_out(
&self,
out: &Tensor,
lu_data: &Tensor,
lu_pivots: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lu_solve_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
lu_data.c_tensor,
lu_pivots.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_margin_ranking_loss(
input1: &Tensor,
input2: &Tensor,
target: &Tensor,
margin: f64,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_margin_ranking_loss(
c_tensors.as_mut_ptr(),
input1.c_tensor,
input2.c_tensor,
target.c_tensor,
margin,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_masked_fill<S: Into<Scalar>>(
&self,
mask: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_fill(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_masked_fill1(&self, mask: &Tensor, value: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_fill1(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
value.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_masked_fill_<S: Into<Scalar>>(
&mut self,
mask: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_fill_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_masked_fill_1(&mut self, mask: &Tensor, value: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_fill_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
value.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_masked_scatter(&self, mask: &Tensor, source: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_scatter(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
source.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_masked_scatter_(
&mut self,
mask: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_scatter_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
source.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_masked_select(&self, mask: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_select(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_masked_select_backward(
&self,
grad: &Tensor,
mask: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_select_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
mask.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_masked_select_out(&self, out: &Tensor, mask: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_select_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mask.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_matmul(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matmul(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_matmul_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matmul_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_matrix_exp(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matrix_exp(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_matrix_exp_backward(&self, grad: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matrix_exp_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
grad.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_matrix_power(&self, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matrix_power(c_tensors.as_mut_ptr(), self.c_tensor, n));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_matrix_rank(&self, symmetric: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matrix_rank(
c_tensors.as_mut_ptr(),
self.c_tensor,
if symmetric { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_matrix_rank1(&self, tol: f64, symmetric: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matrix_rank1(
c_tensors.as_mut_ptr(),
self.c_tensor,
tol,
if symmetric { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max2(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max2(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_max_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_out1(
&self,
max: &Tensor,
max_values: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_out1(
c_tensors.as_mut_ptr(),
max.c_tensor,
max_values.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_max_pool1d(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_pool1d_with_indices(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_pool1d_with_indices(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_max_pool2d(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_pool2d_with_indices(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_pool2d_with_indices(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_max_pool2d_with_indices_backward(
&self,
grad_output: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool2d_with_indices_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 },
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_pool2d_with_indices_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool2d_with_indices_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 },
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_pool2d_with_indices_out(
&self,
out: &Tensor,
indices: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_pool2d_with_indices_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_max_pool3d(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_pool3d_with_indices(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_pool3d_with_indices(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_max_pool3d_with_indices_backward(
&self,
grad_output: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool3d_with_indices_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 },
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_pool3d_with_indices_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool3d_with_indices_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 },
indices.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_pool3d_with_indices_out(
&self,
out: &Tensor,
indices: &Tensor,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_pool3d_with_indices_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_max_unpool2d(
&self,
indices: &Tensor,
output_size: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_unpool2d_backward(
&self,
grad_output: &Tensor,
indices: &Tensor,
output_size: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_unpool2d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
indices: &Tensor,
output_size: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool2d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_unpool2d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_unpool3d(
&self,
indices: &Tensor,
output_size: &[i64],
stride: &[i64],
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_unpool3d_backward(
&self,
grad_output: &Tensor,
indices: &Tensor,
output_size: &[i64],
stride: &[i64],
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_unpool3d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
indices: &Tensor,
output_size: &[i64],
stride: &[i64],
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool3d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_max_unpool3d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: &[i64],
stride: &[i64],
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_maximum(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_maximum(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_maximum_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_maximum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mean(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mean(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mean1(&self, dim: &[i64], keepdim: bool, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mean1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mean_out(
&self,
out: &Tensor,
dim: &[i64],
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mean_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_median(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_median(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_median1(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_median1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_median_out(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_median_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_meshgrid<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_meshgrid(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_min(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_min(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_min1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_min1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_min2(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_min2(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_min_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_min_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_min_out1(
&self,
min: &Tensor,
min_indices: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_min_out1(
c_tensors.as_mut_ptr(),
min.c_tensor,
min_indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_minimum(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_minimum(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_minimum_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_minimum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_batch_norm<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
exponential_average_factor: f64,
epsilon: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_miopen_batch_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
exponential_average_factor,
epsilon
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_miopen_batch_norm_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
weight: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
save_mean: Option<T>,
save_var: Option<T>,
epsilon: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_miopen_batch_norm_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
grad_output.c_tensor,
weight.c_tensor,
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
epsilon
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_miopen_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_convolution_backward_bias(grad_output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_backward_bias(
c_tensors.as_mut_ptr(),
grad_output.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_convolution_backward_input(
self_size: &[i64],
grad_output: &Tensor,
weight: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_backward_input(
c_tensors.as_mut_ptr(),
self_size.as_ptr(),
self_size.len() as i32,
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_convolution_backward_weight(
&self,
weight_size: &[i64],
grad_output: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_backward_weight(
c_tensors.as_mut_ptr(),
weight_size.as_ptr(),
weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_convolution_transpose<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: &[i64],
output_padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_transpose(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_convolution_transpose_backward_input(
grad_output: &Tensor,
weight: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_transpose_backward_input(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_convolution_transpose_backward_weight(
&self,
weight_size: &[i64],
grad_output: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_transpose_backward_weight(
c_tensors.as_mut_ptr(),
weight_size.as_ptr(),
weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_depthwise_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_depthwise_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_depthwise_convolution_backward_input(
self_size: &[i64],
grad_output: &Tensor,
weight: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_depthwise_convolution_backward_input(
c_tensors.as_mut_ptr(),
self_size.as_ptr(),
self_size.len() as i32,
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_depthwise_convolution_backward_weight(
&self,
weight_size: &[i64],
grad_output: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_depthwise_convolution_backward_weight(
c_tensors.as_mut_ptr(),
weight_size.as_ptr(),
weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_miopen_rnn<T: Borrow<Tensor>>(
&self,
weight: &[T],
weight_stride0: i64,
hx: &Tensor,
cx: Option<T>,
mode: i64,
hidden_size: i64,
num_layers: i64,
batch_first: bool,
dropout: f64,
train: bool,
bidirectional: bool,
batch_sizes: &[i64],
dropout_state: Option<T>,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 5];
unsafe_torch_err!(atg_miopen_rnn(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(weight).as_ptr(),
weight.len() as i32,
weight_stride0,
hx.c_tensor,
cx.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mode,
hidden_size,
num_layers,
if batch_first { 1 } else { 0 },
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
batch_sizes.as_ptr(),
batch_sizes.len() as i32,
dropout_state.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
Tensor {
c_tensor: c_tensors[3],
},
Tensor {
c_tensor: c_tensors[4],
},
))
}
pub fn f_mkldnn_adaptive_avg_pool2d(&self, output_size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_adaptive_avg_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mkldnn_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mkldnn_convolution_backward_input(
self_size: &[i64],
grad_output: &Tensor,
weight: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
bias_defined: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_convolution_backward_input(
c_tensors.as_mut_ptr(),
self_size.as_ptr(),
self_size.len() as i32,
grad_output.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if bias_defined { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mkldnn_convolution_backward_weights(
&self,
weight_size: &[i64],
grad_output: &Tensor,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
bias_defined: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_mkldnn_convolution_backward_weights(
c_tensors.as_mut_ptr(),
weight_size.as_ptr(),
weight_size.len() as i32,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups,
if bias_defined { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_mkldnn_linear<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_linear(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mkldnn_linear_backward_input(
input_size: &[i64],
grad_output: &Tensor,
weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_linear_backward_input(
c_tensors.as_mut_ptr(),
input_size.as_ptr(),
input_size.len() as i32,
grad_output.c_tensor,
weight.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mkldnn_linear_backward_weights(
&self,
grad_output: &Tensor,
weight: &Tensor,
bias_defined: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_mkldnn_linear_backward_weights(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
weight.c_tensor,
if bias_defined { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_mkldnn_max_pool2d(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_max_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mkldnn_max_pool3d(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_max_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mkldnn_reorder_conv2d_weight(
&self,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_reorder_conv2d_weight(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mkldnn_reorder_conv3d_weight(
&self,
padding: &[i64],
stride: &[i64],
dilation: &[i64],
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_reorder_conv3d_weight(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len() as i32,
stride.as_ptr(),
stride.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
groups
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mm(c_tensors.as_mut_ptr(), self.c_tensor, mat2.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mm_out(&self, out: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mode(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_mode(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_mode_out(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_mode_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_moveaxis(&self, source: &[i64], destination: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_moveaxis(
c_tensors.as_mut_ptr(),
self.c_tensor,
source.as_ptr(),
source.len() as i32,
destination.as_ptr(),
destination.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_moveaxis1(&self, source: i64, destination: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_moveaxis1(
c_tensors.as_mut_ptr(),
self.c_tensor,
source,
destination
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_movedim(&self, source: &[i64], destination: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_movedim(
c_tensors.as_mut_ptr(),
self.c_tensor,
source.as_ptr(),
source.len() as i32,
destination.as_ptr(),
destination.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_movedim1(&self, source: i64, destination: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_movedim1(
c_tensors.as_mut_ptr(),
self.c_tensor,
source,
destination
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mse_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mse_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mse_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mse_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mse_loss_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mse_loss_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mse_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mse_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_msort(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_msort(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_msort_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_msort_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mul(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mul1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mul_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mul_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mul_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multi_margin_loss_backward<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
grad_output: &Tensor,
target: &Tensor,
p: S,
margin: S,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multi_margin_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
p.into().c_scalar,
margin.into().c_scalar,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multi_margin_loss_backward_out<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
p: S,
margin: S,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multi_margin_loss_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
p.into().c_scalar,
margin.into().c_scalar,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multilabel_margin_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multilabel_margin_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multilabel_margin_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
is_target: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multilabel_margin_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
is_target.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multilabel_margin_loss_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
is_target: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multilabel_margin_loss_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
is_target.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multilabel_margin_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multilabel_margin_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multinomial(&self, num_samples: i64, replacement: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multinomial(
c_tensors.as_mut_ptr(),
self.c_tensor,
num_samples,
if replacement { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multinomial_out(
&self,
out: &Tensor,
num_samples: i64,
replacement: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multinomial_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
num_samples,
if replacement { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multiply(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multiply(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multiply1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multiply1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multiply_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multiply_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multiply_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multiply_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_multiply_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multiply_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mv(&self, vec: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mv(c_tensors.as_mut_ptr(), self.c_tensor, vec.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mv_out(&self, out: &Tensor, vec: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
vec.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mvlgamma(&self, p: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mvlgamma(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_mvlgamma_(&mut self, p: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mvlgamma_(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nan_to_num(
&self,
nan: impl Into<Option<f64>>,
posinf: impl Into<Option<f64>>,
neginf: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let nan = nan.into();
let posinf = posinf.into();
let neginf = neginf.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nan_to_num(
c_tensors.as_mut_ptr(),
self.c_tensor,
nan.unwrap_or(std::f64::NAN),
nan.is_none() as i8,
posinf.unwrap_or(std::f64::NAN),
posinf.is_none() as i8,
neginf.unwrap_or(std::f64::NAN),
neginf.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nan_to_num_(
&mut self,
nan: impl Into<Option<f64>>,
posinf: impl Into<Option<f64>>,
neginf: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let nan = nan.into();
let posinf = posinf.into();
let neginf = neginf.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nan_to_num_(
c_tensors.as_mut_ptr(),
self.c_tensor,
nan.unwrap_or(std::f64::NAN),
nan.is_none() as i8,
posinf.unwrap_or(std::f64::NAN),
posinf.is_none() as i8,
neginf.unwrap_or(std::f64::NAN),
neginf.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nan_to_num_out(
&self,
out: &Tensor,
nan: impl Into<Option<f64>>,
posinf: impl Into<Option<f64>>,
neginf: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let nan = nan.into();
let posinf = posinf.into();
let neginf = neginf.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nan_to_num_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
nan.unwrap_or(std::f64::NAN),
nan.is_none() as i8,
posinf.unwrap_or(std::f64::NAN),
posinf.is_none() as i8,
neginf.unwrap_or(std::f64::NAN),
neginf.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nanmedian(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanmedian(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nanmedian1(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_nanmedian1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_nanmedian_out(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_nanmedian_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_nanquantile(
&self,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanquantile(
c_tensors.as_mut_ptr(),
self.c_tensor,
q,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nanquantile1(
&self,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanquantile1(
c_tensors.as_mut_ptr(),
self.c_tensor,
q.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nanquantile_out(
&self,
out: &Tensor,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanquantile_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
q,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nanquantile_out1(
&self,
out: &Tensor,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanquantile_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
q.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nansum(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nansum(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nansum1(&self, dim: &[i64], keepdim: bool, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nansum1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nansum_out(
&self,
out: &Tensor,
dim: &[i64],
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nansum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_narrow(&self, dim: i64, start: i64, length: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_narrow(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start,
length
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_narrow1(&self, dim: i64, start: &Tensor, length: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_narrow1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start.c_tensor,
length
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_narrow_copy(&self, dim: i64, start: i64, length: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_narrow_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start,
length
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_narrow_copy_out(
&self,
out: &Tensor,
dim: i64,
start: i64,
length: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_narrow_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
start,
length
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_native_batch_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_native_batch_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
momentum,
eps
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_native_batch_norm_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
save_mean: &Tensor,
save_invstd: &Tensor,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_native_batch_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
save_mean.c_tensor,
save_invstd.c_tensor,
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
momentum,
eps
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_native_group_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
n: i64,
c: i64,
hxw: i64,
group: i64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_native_group_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
n,
c,
hxw,
group,
eps
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_native_layer_norm<T: Borrow<Tensor>>(
&self,
normalized_shape: &[i64],
weight: Option<T>,
bias: Option<T>,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_native_layer_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
normalized_shape.as_ptr(),
normalized_shape.len() as i32,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
eps
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_native_norm(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_native_norm(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_native_norm1<S: Into<Scalar>>(
&self,
p: S,
dim: &[i64],
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_native_norm1(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ne<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ne1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ne_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ne_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ne_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ne_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_neg(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_neg(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_neg_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_neg_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_neg_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_neg_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_negative(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_negative(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_negative_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_negative_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_negative_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_negative_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_new_empty(&self, size: &[i64], options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_empty(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_new_empty_strided(
&self,
size: &[i64],
stride: &[i64],
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_empty_strided(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_new_full<S: Into<Scalar>>(
&self,
size: &[i64],
fill_value: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_full(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32,
fill_value.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_new_zeros(&self, size: &[i64], options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_zeros(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nextafter(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nextafter(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nextafter_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nextafter_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nextafter_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nextafter_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nll_loss<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nll_loss2d<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nll_loss2d_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index,
total_weight.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nll_loss2d_backward_out<T: Borrow<Tensor>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss2d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index,
total_weight.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nll_loss2d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nll_loss_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index,
total_weight.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nll_loss_backward_out<T: Borrow<Tensor>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index,
total_weight.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nll_loss_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nonzero(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nonzero(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nonzero_numpy(&self) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_nonzero_numpy(self.c_tensor));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_nonzero_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nonzero_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_norm(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_norm1<S: Into<Scalar>>(&self, p: S, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm1(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_norm2<S: Into<Scalar>>(
&self,
p: S,
dim: &[i64],
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm2(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_norm3<S: Into<Scalar>>(
&self,
p: S,
dim: &[i64],
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm3(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_norm_except_dim(v: &Tensor, pow: i64, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_except_dim(
c_tensors.as_mut_ptr(),
v.c_tensor,
pow,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_norm_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dim: &[i64],
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_norm_out1<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dim: &[i64],
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_normal_(&mut self, mean: f64, std: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_normal_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mean,
std
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_normal_out(out: &Tensor, mean: &Tensor, std: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_normal_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
mean.c_tensor,
std
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_normal_out1(out: &Tensor, mean: f64, std: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_normal_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
mean,
std.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_normal_out2(out: &Tensor, mean: &Tensor, std: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_normal_out2(
c_tensors.as_mut_ptr(),
out.c_tensor,
mean.c_tensor,
std.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_normal_out3(
out: &Tensor,
mean: f64,
std: f64,
size: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_normal_out3(
c_tensors.as_mut_ptr(),
out.c_tensor,
mean,
std,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_not_equal<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_not_equal1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_not_equal_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_not_equal_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_not_equal_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_not_equal_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nuclear_norm(&self, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nuclear_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nuclear_norm1(&self, dim: &[i64], keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nuclear_norm1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nuclear_norm_out(&self, out: &Tensor, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nuclear_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_nuclear_norm_out1(
&self,
out: &Tensor,
dim: &[i64],
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nuclear_norm_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_numpy_t(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_numpy_t(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_one_hot(&self, num_classes: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_one_hot(
c_tensors.as_mut_ptr(),
self.c_tensor,
num_classes
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ones(size: &[i64], options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ones(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ones_like(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ones_like(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ones_out(out: &Tensor, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ones_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_orgqr(&self, input2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_orgqr(
c_tensors.as_mut_ptr(),
self.c_tensor,
input2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_orgqr_out(&self, out: &Tensor, input2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_orgqr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
input2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ormqr(
&self,
input2: &Tensor,
input3: &Tensor,
left: bool,
transpose: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ormqr(
c_tensors.as_mut_ptr(),
self.c_tensor,
input2.c_tensor,
input3.c_tensor,
if left { 1 } else { 0 },
if transpose { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ormqr_out(
&self,
out: &Tensor,
input2: &Tensor,
input3: &Tensor,
left: bool,
transpose: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ormqr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
input2.c_tensor,
input3.c_tensor,
if left { 1 } else { 0 },
if transpose { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_outer(&self, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_outer(
c_tensors.as_mut_ptr(),
self.c_tensor,
vec2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_outer_out(&self, out: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_outer_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
vec2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pairwise_distance(
x1: &Tensor,
x2: &Tensor,
p: f64,
eps: f64,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pairwise_distance(
c_tensors.as_mut_ptr(),
x1.c_tensor,
x2.c_tensor,
p,
eps,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pdist(&self, p: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pdist(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_permute(&self, dims: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_permute(
c_tensors.as_mut_ptr(),
self.c_tensor,
dims.as_ptr(),
dims.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pin_memory(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pin_memory(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pinverse(&self, rcond: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pinverse(c_tensors.as_mut_ptr(), self.c_tensor, rcond));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pixel_shuffle(&self, upscale_factor: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pixel_shuffle(
c_tensors.as_mut_ptr(),
self.c_tensor,
upscale_factor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pixel_unshuffle(&self, downscale_factor: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pixel_unshuffle(
c_tensors.as_mut_ptr(),
self.c_tensor,
downscale_factor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_poisson(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_poisson(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_poisson_nll_loss(
&self,
target: &Tensor,
log_input: bool,
full: bool,
eps: f64,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_poisson_nll_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
if log_input { 1 } else { 0 },
if full { 1 } else { 0 },
eps,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_polar(abs: &Tensor, angle: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_polar(
c_tensors.as_mut_ptr(),
abs.c_tensor,
angle.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_polar_out(out: &Tensor, abs: &Tensor, angle: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_polar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
abs.c_tensor,
angle.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_polygamma(&self, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_polygamma(c_tensors.as_mut_ptr(), n, self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_polygamma_(&mut self, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_polygamma_(c_tensors.as_mut_ptr(), self.c_tensor, n));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_polygamma_out(&self, out: &Tensor, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_polygamma_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
n,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pow<S: Into<Scalar>>(&self, exponent: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pow1(&self, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow1(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pow2<S: Into<Scalar>>(self_scalar: S, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow2(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
exponent.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pow_<S: Into<Scalar>>(&mut self, exponent: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pow_1(&mut self, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pow_out(&self, out: &Tensor, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pow_out1<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
exponent: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
exponent.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_pow_out2<S: Into<Scalar>>(
&self,
out: &Tensor,
exponent: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_out2(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_prelu(&self, weight: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_prelu(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_prelu_backward(
&self,
grad_output: &Tensor,
weight: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_prelu_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
weight.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_prod(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_prod(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_prod1(&self, dim: i64, keepdim: bool, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_prod1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_prod_out(
&self,
out: &Tensor,
dim: i64,
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_prod_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_put_(
&mut self,
index: &Tensor,
source: &Tensor,
accumulate: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_put_(
c_tensors.as_mut_ptr(),
self.c_tensor,
index.c_tensor,
source.c_tensor,
if accumulate { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_q_per_channel_scales(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_q_per_channel_scales(
c_tensors.as_mut_ptr(),
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_q_per_channel_zero_points(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_q_per_channel_zero_points(
c_tensors.as_mut_ptr(),
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_qr(&self, some: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_qr(
c_tensors.as_mut_ptr(),
self.c_tensor,
if some { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_qr_out(
&self,
q: &Tensor,
r: &Tensor,
some: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_qr_out(
c_tensors.as_mut_ptr(),
q.c_tensor,
r.c_tensor,
self.c_tensor,
if some { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_quantile(
&self,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantile(
c_tensors.as_mut_ptr(),
self.c_tensor,
q,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantile1(
&self,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantile1(
c_tensors.as_mut_ptr(),
self.c_tensor,
q.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantile_out(
&self,
out: &Tensor,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantile_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
q,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantile_out1(
&self,
out: &Tensor,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantile_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
q.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantize_per_channel(
&self,
scales: &Tensor,
zero_points: &Tensor,
axis: i64,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantize_per_channel(
c_tensors.as_mut_ptr(),
self.c_tensor,
scales.c_tensor,
zero_points.c_tensor,
axis,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantize_per_tensor(
&self,
scale: f64,
zero_point: i64,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantize_per_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale,
zero_point,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantize_per_tensor1<T: Borrow<Tensor>>(
tensors: &[T],
scales: &Tensor,
zero_points: &Tensor,
dtype: Kind,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_quantize_per_tensor1(
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
scales.c_tensor,
zero_points.c_tensor,
dtype.c_int()
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_quantized_batch_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
var: &Tensor,
eps: f64,
output_scale: f64,
output_zero_point: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_batch_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mean.c_tensor,
var.c_tensor,
eps,
output_scale,
output_zero_point
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantized_gru_cell<S: Into<Scalar>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_gru_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.c_tensor,
b_hh.c_tensor,
packed_ih.c_tensor,
packed_hh.c_tensor,
col_offsets_ih.c_tensor,
col_offsets_hh.c_tensor,
scale_ih.into().c_scalar,
scale_hh.into().c_scalar,
zero_point_ih.into().c_scalar,
zero_point_hh.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantized_lstm_cell<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
hx: &[T],
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_quantized_lstm_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.c_tensor,
b_hh.c_tensor,
packed_ih.c_tensor,
packed_hh.c_tensor,
col_offsets_ih.c_tensor,
col_offsets_hh.c_tensor,
scale_ih.into().c_scalar,
scale_hh.into().c_scalar,
zero_point_ih.into().c_scalar,
zero_point_hh.into().c_scalar
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_quantized_max_pool1d(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_max_pool1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantized_max_pool2d(
&self,
kernel_size: &[i64],
stride: &[i64],
padding: &[i64],
dilation: &[i64],
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_max_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32,
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantized_rnn_relu_cell<S: Into<Scalar>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_rnn_relu_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.c_tensor,
b_hh.c_tensor,
packed_ih.c_tensor,
packed_hh.c_tensor,
col_offsets_ih.c_tensor,
col_offsets_hh.c_tensor,
scale_ih.into().c_scalar,
scale_hh.into().c_scalar,
zero_point_ih.into().c_scalar,
zero_point_hh.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_quantized_rnn_tanh_cell<S: Into<Scalar>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_rnn_tanh_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.c_tensor,
b_hh.c_tensor,
packed_ih.c_tensor,
packed_hh.c_tensor,
col_offsets_ih.c_tensor,
col_offsets_hh.c_tensor,
scale_ih.into().c_scalar,
scale_hh.into().c_scalar,
zero_point_ih.into().c_scalar,
zero_point_hh.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rad2deg(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rad2deg(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rad2deg_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rad2deg_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rad2deg_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rad2deg_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rand(size: &[i64], options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rand(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rand_like(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rand_like(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rand_out(out: &Tensor, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rand_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randint(high: i64, size: &[i64], options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint(
c_tensors.as_mut_ptr(),
high,
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randint1(
low: i64,
high: i64,
size: &[i64],
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint1(
c_tensors.as_mut_ptr(),
low,
high,
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randint_like(&self, high: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_like(
c_tensors.as_mut_ptr(),
self.c_tensor,
high
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randint_like1(&self, low: i64, high: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_like1(
c_tensors.as_mut_ptr(),
self.c_tensor,
low,
high
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randint_out(out: &Tensor, high: i64, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
high,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randint_out1(
out: &Tensor,
low: i64,
high: i64,
size: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
low,
high,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randn(size: &[i64], options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randn(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randn_like(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randn_like(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randn_out(out: &Tensor, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_random_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_random_1(&mut self, to: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_1(c_tensors.as_mut_ptr(), self.c_tensor, to));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_random_2(
&mut self,
from: i64,
to: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let to = to.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_2(
c_tensors.as_mut_ptr(),
self.c_tensor,
from,
to.unwrap_or(0i64),
to.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randperm(n: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randperm(
c_tensors.as_mut_ptr(),
n,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_randperm_out(out: &Tensor, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randperm_out(c_tensors.as_mut_ptr(), out.c_tensor, n));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_range<S: Into<Scalar>>(
start: S,
end: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_range(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_range1<S: Into<Scalar>>(
start: S,
end: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_range1(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_range_out<S: Into<Scalar>>(
out: &Tensor,
start: S,
end: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_range_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
start.into().c_scalar,
end.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_ravel(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ravel(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_real(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_real(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reciprocal(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reciprocal(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reciprocal_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reciprocal_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reciprocal_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reciprocal_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reflection_pad1d(&self, padding: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reflection_pad1d_backward(
&self,
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad1d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reflection_pad1d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad1d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reflection_pad1d_out(
&self,
out: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reflection_pad2d(&self, padding: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reflection_pad2d_backward(
&self,
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reflection_pad2d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad2d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reflection_pad2d_out(
&self,
out: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_relu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_relu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_relu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_relu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_remainder<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_remainder1(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_remainder_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_remainder_1(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_remainder_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_remainder_out1(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_renorm<S: Into<Scalar>>(
&self,
p: S,
dim: i64,
maxnorm: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_renorm(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dim,
maxnorm.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_renorm_<S: Into<Scalar>>(
&mut self,
p: S,
dim: i64,
maxnorm: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_renorm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dim,
maxnorm.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_renorm_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dim: i64,
maxnorm: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_renorm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.into().c_scalar,
dim,
maxnorm.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_repeat(&self, repeats: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_repeat(
c_tensors.as_mut_ptr(),
self.c_tensor,
repeats.as_ptr(),
repeats.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_repeat_interleave(repeats: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_repeat_interleave(
c_tensors.as_mut_ptr(),
repeats.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_repeat_interleave1(
&self,
repeats: &Tensor,
dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_repeat_interleave1(
c_tensors.as_mut_ptr(),
self.c_tensor,
repeats.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_repeat_interleave2(
&self,
repeats: i64,
dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_repeat_interleave2(
c_tensors.as_mut_ptr(),
self.c_tensor,
repeats,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad1d(&self, padding: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad1d_backward(
&self,
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad1d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad1d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad1d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad1d_out(
&self,
out: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad2d(&self, padding: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad2d_backward(
&self,
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad2d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad2d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad2d_out(
&self,
out: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad3d(&self, padding: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad3d_backward(
&self,
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad3d_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad3d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_replication_pad3d_out(
&self,
out: &Tensor,
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_requires_grad_(&mut self, requires_grad: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_requires_grad_(
c_tensors.as_mut_ptr(),
self.c_tensor,
if requires_grad { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reshape(&self, shape: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reshape(
c_tensors.as_mut_ptr(),
self.c_tensor,
shape.as_ptr(),
shape.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_reshape_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reshape_as(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_resize_(&mut self, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize_(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_resize_as_(&mut self, the_template: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize_as_(
c_tensors.as_mut_ptr(),
self.c_tensor,
the_template.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rnn_relu<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_rnn_relu(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_rnn_relu1<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_rnn_relu1(
c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_rnn_relu_cell<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rnn_relu_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
b_hh.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rnn_tanh<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_rnn_tanh(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_rnn_tanh1<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_rnn_tanh1(
c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_rnn_tanh_cell<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rnn_tanh_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
b_hh.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_roll(&self, shifts: &[i64], dims: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_roll(
c_tensors.as_mut_ptr(),
self.c_tensor,
shifts.as_ptr(),
shifts.len() as i32,
dims.as_ptr(),
dims.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rot90(&self, k: i64, dims: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rot90(
c_tensors.as_mut_ptr(),
self.c_tensor,
k,
dims.as_ptr(),
dims.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_round(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_round(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_round_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_round_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_round_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_round_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_row_stack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_row_stack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_row_stack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_row_stack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rrelu(&self, training: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu(
c_tensors.as_mut_ptr(),
self.c_tensor,
if training { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rrelu_(&mut self, training: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_(
c_tensors.as_mut_ptr(),
self.c_tensor,
if training { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rrelu_with_noise(&self, noise: &Tensor, training: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_with_noise(
c_tensors.as_mut_ptr(),
self.c_tensor,
noise.c_tensor,
if training { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rrelu_with_noise_(
&mut self,
noise: &Tensor,
training: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_with_noise_(
c_tensors.as_mut_ptr(),
self.c_tensor,
noise.c_tensor,
if training { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rrelu_with_noise_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
noise: &Tensor,
lower: S,
upper: S,
training: bool,
self_is_result: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_with_noise_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
noise.c_tensor,
lower.into().c_scalar,
upper.into().c_scalar,
if training { 1 } else { 0 },
if self_is_result { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rrelu_with_noise_out(
&self,
out: &Tensor,
noise: &Tensor,
training: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_with_noise_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
noise.c_tensor,
if training { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rsqrt(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsqrt(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rsqrt_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsqrt_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rsqrt_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsqrt_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rsub(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsub(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_rsub1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsub1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_scalar_tensor<S: Into<Scalar>>(
s: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scalar_tensor(
c_tensors.as_mut_ptr(),
s.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_scatter(&self, dim: i64, index: &Tensor, src: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_scatter1<S: Into<Scalar>>(
&self,
dim: i64,
index: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_scatter_(
&mut self,
dim: i64,
index: &Tensor,
src: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_scatter_1<S: Into<Scalar>>(
&mut self,
dim: i64,
index: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_scatter_2(
&mut self,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_2(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor,
reduce.as_ptr(),
reduce.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_scatter_3<S: Into<Scalar>>(
&mut self,
dim: i64,
index: &Tensor,
value: S,
reduce: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_3(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar,
reduce.as_ptr(),
reduce.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_scatter_add(
&self,
dim: i64,
index: &Tensor,
src: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_add(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_scatter_add_(
&mut self,
dim: i64,
index: &Tensor,
src: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_add_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_searchsorted(
&self,
sorted_sequence: &Tensor,
out_int32: bool,
right: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_searchsorted(
c_tensors.as_mut_ptr(),
sorted_sequence.c_tensor,
self.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_searchsorted1<S: Into<Scalar>>(
sorted_sequence: &Tensor,
self_scalar: S,
out_int32: bool,
right: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_searchsorted1(
c_tensors.as_mut_ptr(),
sorted_sequence.c_tensor,
self_scalar.into().c_scalar,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_searchsorted_out(
&self,
out: &Tensor,
sorted_sequence: &Tensor,
out_int32: bool,
right: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_searchsorted_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
sorted_sequence.c_tensor,
self.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_select(&self, dim: i64, index: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_select(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_select_backward(
grad: &Tensor,
input_sizes: &[i64],
dim: i64,
index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_select_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
input_sizes.as_ptr(),
input_sizes.len() as i32,
dim,
index
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_selu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_selu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_selu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_selu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_set_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_set_1(&mut self, source: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
source.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_set_requires_grad(&self, r: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set_requires_grad(
c_tensors.as_mut_ptr(),
self.c_tensor,
if r { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sgn(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sgn(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sgn_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sgn_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sgn_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sgn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sigmoid(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sigmoid(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sigmoid_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sigmoid_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sigmoid_backward(grad_output: &Tensor, output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sigmoid_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sigmoid_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sigmoid_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sigmoid_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sign(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sign(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sign_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sign_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sign_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sign_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_signbit(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_signbit(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_signbit_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_signbit_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_silu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_silu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_silu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_silu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_silu_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_silu_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_silu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_silu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sin(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sin(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sin_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sin_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sin_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sin_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sinc(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinc(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sinc_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinc_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sinc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sinh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sinh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sinh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_slice(
&self,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Result<Tensor, TchError> {
let start = start.into();
let end = end.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slice(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start.unwrap_or(0i64),
start.is_none() as i8,
end.unwrap_or(0i64),
end.is_none() as i8,
step
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_slice_backward(
grad: &Tensor,
input_sizes: &[i64],
dim: i64,
start: i64,
end: i64,
step: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slice_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
input_sizes.as_ptr(),
input_sizes.len() as i32,
dim,
start,
end,
step
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_slogdet(&self) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_slogdet(c_tensors.as_mut_ptr(), self.c_tensor));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_slow_conv3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: &[i64],
bias: Option<T>,
stride: &[i64],
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_slow_conv3d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: &[i64],
bias: Option<T>,
stride: &[i64],
padding: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_slow_conv_dilated2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: &[i64],
bias: Option<T>,
stride: &[i64],
padding: &[i64],
dilation: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_dilated2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_slow_conv_dilated3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: &[i64],
bias: Option<T>,
stride: &[i64],
padding: &[i64],
dilation: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_dilated3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_slow_conv_transpose2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: &[i64],
bias: Option<T>,
stride: &[i64],
padding: &[i64],
output_padding: &[i64],
dilation: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_transpose2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_slow_conv_transpose2d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: &[i64],
bias: Option<T>,
stride: &[i64],
padding: &[i64],
output_padding: &[i64],
dilation: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_transpose2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_slow_conv_transpose3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: &[i64],
bias: Option<T>,
stride: &[i64],
padding: &[i64],
output_padding: &[i64],
dilation: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_transpose3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_slow_conv_transpose3d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: &[i64],
bias: Option<T>,
stride: &[i64],
padding: &[i64],
output_padding: &[i64],
dilation: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_transpose3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len() as i32,
bias.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len() as i32,
padding.as_ptr(),
padding.len() as i32,
output_padding.as_ptr(),
output_padding.len() as i32,
dilation.as_ptr(),
dilation.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_smm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_smm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_smooth_l1_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_smooth_l1_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int(),
beta
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_smooth_l1_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_smooth_l1_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
beta
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_smooth_l1_loss_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_smooth_l1_loss_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
beta
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_smooth_l1_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_smooth_l1_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
beta
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_soft_margin_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_soft_margin_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_soft_margin_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_soft_margin_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_soft_margin_loss_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_soft_margin_loss_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_soft_margin_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_soft_margin_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_softmax(&self, dim: i64, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_softplus(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softplus(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_softplus_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
beta: S,
threshold: S,
output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softplus_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
beta.into().c_scalar,
threshold.into().c_scalar,
output.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_softplus_backward_out<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
beta: S,
threshold: S,
output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softplus_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
beta.into().c_scalar,
threshold.into().c_scalar,
output.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_softplus_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softplus_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_softshrink(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softshrink(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_softshrink_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
lambd: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softshrink_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
lambd.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_softshrink_backward_out<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
lambd: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softshrink_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
lambd.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_softshrink_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softshrink_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_solve(&self, a: &Tensor) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_solve(c_tensors.as_mut_ptr(), self.c_tensor, a.c_tensor));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_solve_out(
&self,
solution: &Tensor,
lu: &Tensor,
a: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_solve_out(
c_tensors.as_mut_ptr(),
solution.c_tensor,
lu.c_tensor,
self.c_tensor,
a.c_tensor
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_sort(&self, dim: i64, descending: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_sort(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if descending { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_sort_out(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
descending: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_sort_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if descending { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_sparse_coo_tensor(size: &[i64], options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_coo_tensor(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sparse_coo_tensor1(
indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_coo_tensor1(
c_tensors.as_mut_ptr(),
indices.c_tensor,
values.c_tensor,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sparse_coo_tensor2(
indices: &Tensor,
values: &Tensor,
size: &[i64],
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_coo_tensor2(
c_tensors.as_mut_ptr(),
indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sparse_mask(&self, mask: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_mask(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sparse_resize_(
&mut self,
size: &[i64],
sparse_dim: i64,
dense_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_resize_(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32,
sparse_dim,
dense_dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sparse_resize_and_clear_(
&mut self,
size: &[i64],
sparse_dim: i64,
dense_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_resize_and_clear_(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32,
sparse_dim,
dense_dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_split(&self, split_size: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_split(self.c_tensor, split_size, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_split_with_sizes(
&self,
split_sizes: &[i64],
dim: i64,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_split_with_sizes(
self.c_tensor,
split_sizes.as_ptr(),
split_sizes.len() as i32,
dim
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_sqrt(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sqrt(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sqrt_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sqrt_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sqrt_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sqrt_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_square(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_square(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_square_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_square_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_squeeze(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_squeeze1(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze1(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_squeeze_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_squeeze_1(&mut self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_1(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sspaddmm(&self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sspaddmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sspaddmm_out(
&self,
out: &Tensor,
mat1: &Tensor,
mat2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sspaddmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_stack<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_stack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_stack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_stack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_std(&self, unbiased: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_std(
c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_std1(&self, dim: &[i64], unbiased: bool, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_std1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_std_mean(&self, unbiased: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_std_mean(
c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_std_mean1(
&self,
dim: &[i64],
unbiased: bool,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_std_mean1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_std_out(
&self,
out: &Tensor,
dim: &[i64],
unbiased: bool,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_std_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_stft<T: Borrow<Tensor>>(
&self,
n_fft: i64,
hop_length: impl Into<Option<i64>>,
win_length: impl Into<Option<i64>>,
window: Option<T>,
normalized: bool,
onesided: bool,
return_complex: bool,
) -> Result<Tensor, TchError> {
let hop_length = hop_length.into();
let win_length = win_length.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_stft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n_fft,
hop_length.unwrap_or(0i64),
hop_length.is_none() as i8,
win_length.unwrap_or(0i64),
win_length.is_none() as i8,
window.map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if normalized { 1 } else { 0 },
if onesided { 1 } else { 0 },
if return_complex { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sub(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sub1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sub_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sub_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sub_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_subtract(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_subtract(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_subtract1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_subtract1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_subtract_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_subtract_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_subtract_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_subtract_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_subtract_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_subtract_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sum(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sum(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sum1(&self, dim: &[i64], keepdim: bool, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sum1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sum_out(
&self,
out: &Tensor,
dim: &[i64],
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_sum_to_size(&self, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sum_to_size(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_svd(
&self,
some: bool,
compute_uv: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_svd(
c_tensors.as_mut_ptr(),
self.c_tensor,
if some { 1 } else { 0 },
if compute_uv { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_svd_out(
&self,
u: &Tensor,
s: &Tensor,
v: &Tensor,
some: bool,
compute_uv: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_svd_out(
c_tensors.as_mut_ptr(),
u.c_tensor,
s.c_tensor,
v.c_tensor,
self.c_tensor,
if some { 1 } else { 0 },
if compute_uv { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_swapaxes(&self, axis0: i64, axis1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_swapaxes(
c_tensors.as_mut_ptr(),
self.c_tensor,
axis0,
axis1
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_swapaxes_(&mut self, axis0: i64, axis1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_swapaxes_(
c_tensors.as_mut_ptr(),
self.c_tensor,
axis0,
axis1
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_swapdims(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_swapdims(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim0,
dim1
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_swapdims_(&mut self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_swapdims_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim0,
dim1
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_symeig(&self, eigenvectors: bool, upper: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_symeig(
c_tensors.as_mut_ptr(),
self.c_tensor,
if eigenvectors { 1 } else { 0 },
if upper { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_symeig_out(
&self,
e: &Tensor,
v: &Tensor,
eigenvectors: bool,
upper: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_symeig_out(
c_tensors.as_mut_ptr(),
e.c_tensor,
v.c_tensor,
self.c_tensor,
if eigenvectors { 1 } else { 0 },
if upper { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_tr(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_t(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_t_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_t_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_take(&self, index: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_take(
c_tensors.as_mut_ptr(),
self.c_tensor,
index.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_take_backward(&self, grad: &Tensor, index: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_take_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
index.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_take_out(&self, out: &Tensor, index: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_take_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
index.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tan(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tan(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tan_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tan_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tan_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tanh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tanh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tanh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tanh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tanh_backward(grad_output: &Tensor, output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tanh_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tanh_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tanh_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tanh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tensor_split(&self, sections: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_tensor_split(self.c_tensor, sections, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_tensor_split1(&self, indices: &[i64], dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_tensor_split1(
self.c_tensor,
indices.as_ptr(),
indices.len() as i32,
dim
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_tensor_split2(
&self,
tensor_indices_or_sections: &Tensor,
dim: i64,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_tensor_split2(
self.c_tensor,
tensor_indices_or_sections.c_tensor,
dim
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_tensordot(
&self,
other: &Tensor,
dims_self: &[i64],
dims_other: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tensordot(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
dims_self.as_ptr(),
dims_self.len() as i32,
dims_other.as_ptr(),
dims_other.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tensordot_out(
&self,
out: &Tensor,
other: &Tensor,
dims_self: &[i64],
dims_other: &[i64],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tensordot_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
dims_self.as_ptr(),
dims_self.len() as i32,
dims_other.as_ptr(),
dims_other.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_threshold<S: Into<Scalar>>(&self, threshold: S, value: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_threshold(
c_tensors.as_mut_ptr(),
self.c_tensor,
threshold.into().c_scalar,
value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_threshold_<S: Into<Scalar>>(
&mut self,
threshold: S,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_threshold_(
c_tensors.as_mut_ptr(),
self.c_tensor,
threshold.into().c_scalar,
value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_threshold_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
threshold: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_threshold_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
threshold.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_threshold_out<S: Into<Scalar>>(
&self,
out: &Tensor,
threshold: S,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_threshold_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
threshold.into().c_scalar,
value.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tile(&self, dims: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tile(
c_tensors.as_mut_ptr(),
self.c_tensor,
dims.as_ptr(),
dims.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to(&self, device: Device) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to(
c_tensors.as_mut_ptr(),
self.c_tensor,
device.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to1(
&self,
options: (Kind, Device),
non_blocking: bool,
copy: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to1(
c_tensors.as_mut_ptr(),
self.c_tensor,
options.0.c_int(),
options.1.c_int(),
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to2(&self, dtype: Kind, non_blocking: bool, copy: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to2(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int(),
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to3(
&self,
other: &Tensor,
non_blocking: bool,
copy: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to3(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to4(
&self,
device: Device,
dtype: Kind,
non_blocking: bool,
copy: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to4(
c_tensors.as_mut_ptr(),
self.c_tensor,
device.c_int(),
dtype.c_int(),
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to_dense(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_dense(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to_dense_backward(&self, grad: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_dense_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to_mkldnn(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_mkldnn(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to_mkldnn_backward(&self, grad: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_mkldnn_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to_sparse(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_sparse(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_to_sparse1(&self, sparse_dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_sparse1(
c_tensors.as_mut_ptr(),
self.c_tensor,
sparse_dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_topk(
&self,
k: i64,
dim: i64,
largest: bool,
sorted: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_topk(
c_tensors.as_mut_ptr(),
self.c_tensor,
k,
dim,
if largest { 1 } else { 0 },
if sorted { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_topk_out(
&self,
values: &Tensor,
indices: &Tensor,
k: i64,
dim: i64,
largest: bool,
sorted: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_topk_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
k,
dim,
if largest { 1 } else { 0 },
if sorted { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_totype(&self, scalar_type: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_totype(
c_tensors.as_mut_ptr(),
self.c_tensor,
scalar_type.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_trace(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trace(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_trace_backward(grad: &Tensor, sizes: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trace_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
sizes.as_ptr(),
sizes.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_transpose(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_transpose(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim0,
dim1
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_transpose_(&mut self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_transpose_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim0,
dim1
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_trapz(y: &Tensor, x: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trapz(
c_tensors.as_mut_ptr(),
y.c_tensor,
x.c_tensor,
dim
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_trapz1(y: &Tensor, dx: f64, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trapz1(c_tensors.as_mut_ptr(), y.c_tensor, dx, dim));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_triangular_solve(
&self,
a: &Tensor,
upper: bool,
transpose: bool,
unitriangular: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_triangular_solve(
c_tensors.as_mut_ptr(),
self.c_tensor,
a.c_tensor,
if upper { 1 } else { 0 },
if transpose { 1 } else { 0 },
if unitriangular { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_triangular_solve_out(
&self,
x: &Tensor,
m: &Tensor,
a: &Tensor,
upper: bool,
transpose: bool,
unitriangular: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_triangular_solve_out(
c_tensors.as_mut_ptr(),
x.c_tensor,
m.c_tensor,
self.c_tensor,
a.c_tensor,
if upper { 1 } else { 0 },
if transpose { 1 } else { 0 },
if unitriangular { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_tril(&self, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tril(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tril_(&mut self, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tril_(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tril_indices(
row: i64,
col: i64,
offset: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tril_indices(
c_tensors.as_mut_ptr(),
row,
col,
offset,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_tril_out(&self, out: &Tensor, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tril_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
diagonal
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_triplet_margin_loss(
anchor: &Tensor,
positive: &Tensor,
negative: &Tensor,
margin: f64,
p: f64,
eps: f64,
swap: bool,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triplet_margin_loss(
c_tensors.as_mut_ptr(),
anchor.c_tensor,
positive.c_tensor,
negative.c_tensor,
margin,
p,
eps,
if swap { 1 } else { 0 },
reduction.to_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_triu(&self, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triu(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_triu_(&mut self, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triu_(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_triu_indices(
row: i64,
col: i64,
offset: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triu_indices(
c_tensors.as_mut_ptr(),
row,
col,
offset,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_triu_out(&self, out: &Tensor, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
diagonal
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_true_divide(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_true_divide(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_true_divide1<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_true_divide1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_true_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_true_divide_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_true_divide_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_true_divide_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_true_divide_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_true_divide_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_trunc(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trunc(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_trunc_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trunc_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_trunc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trunc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_type_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_type_as(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_unbind(&self, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_unbind(self.c_tensor, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_unflatten(&self, dim: i64, sizes: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unflatten(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
sizes.as_ptr(),
sizes.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_unfold(&self, dimension: i64, size: i64, step: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unfold(
c_tensors.as_mut_ptr(),
self.c_tensor,
dimension,
size,
step
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_unfold_backward(
grad_in: &Tensor,
input_sizes: &[i64],
dim: i64,
size: i64,
step: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unfold_backward(
c_tensors.as_mut_ptr(),
grad_in.c_tensor,
input_sizes.as_ptr(),
input_sizes.len() as i32,
dim,
size,
step
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_uniform_(&mut self, from: f64, to: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_uniform_(
c_tensors.as_mut_ptr(),
self.c_tensor,
from,
to
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_unique_consecutive(
&self,
return_inverse: bool,
return_counts: bool,
dim: impl Into<Option<i64>>,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_unique_consecutive(
c_tensors.as_mut_ptr(),
self.c_tensor,
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 },
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_unique_dim(
&self,
dim: i64,
sorted: bool,
return_inverse: bool,
return_counts: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_unique_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if sorted { 1 } else { 0 },
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_unique_dim_consecutive(
&self,
dim: i64,
return_inverse: bool,
return_counts: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_unique_dim_consecutive(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
Tensor {
c_tensor: c_tensors[2],
},
))
}
pub fn f_unsafe_chunk(&self, chunks: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_unsafe_chunk(self.c_tensor, chunks, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_unsafe_split(&self, split_size: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_unsafe_split(self.c_tensor, split_size, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_unsafe_split_with_sizes(
&self,
split_sizes: &[i64],
dim: i64,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_unsafe_split_with_sizes(
self.c_tensor,
split_sizes.as_ptr(),
split_sizes.len() as i32,
dim
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_unsqueeze(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unsqueeze(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_unsqueeze_(&mut self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unsqueeze_(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_bicubic2d(
&self,
output_size: &[i64],
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bicubic2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_bicubic2d_backward(
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bicubic2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_bicubic2d_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bicubic2d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_bicubic2d_out(
&self,
out: &Tensor,
output_size: &[i64],
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bicubic2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_bilinear2d(
&self,
output_size: &[i64],
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bilinear2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_bilinear2d_backward(
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bilinear2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_bilinear2d_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bilinear2d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_bilinear2d_out(
&self,
out: &Tensor,
output_size: &[i64],
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bilinear2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_linear1d(
&self,
output_size: &[i64],
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_linear1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
if align_corners { 1 } else { 0 },
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_linear1d_backward(
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_linear1d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
if align_corners { 1 } else { 0 },
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_linear1d_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_linear1d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
if align_corners { 1 } else { 0 },
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_linear1d_out(
&self,
out: &Tensor,
output_size: &[i64],
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_linear1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
if align_corners { 1 } else { 0 },
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest1d(
&self,
output_size: &[i64],
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest1d_backward(
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest1d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest1d_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest1d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest1d_out(
&self,
out: &Tensor,
output_size: &[i64],
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest2d(
&self,
output_size: &[i64],
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest2d_backward(
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest2d_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest2d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest2d_out(
&self,
out: &Tensor,
output_size: &[i64],
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest3d(
&self,
output_size: &[i64],
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest3d_backward(
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest3d_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest3d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_nearest3d_out(
&self,
out: &Tensor,
output_size: &[i64],
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_trilinear3d(
&self,
output_size: &[i64],
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_trilinear3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_trilinear3d_backward(
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_trilinear3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_trilinear3d_backward_out(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: &[i64],
input_size: &[i64],
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_trilinear3d_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
input_size.as_ptr(),
input_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_upsample_trilinear3d_out(
&self,
out: &Tensor,
output_size: &[i64],
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_trilinear3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len() as i32,
if align_corners { 1 } else { 0 },
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_value_selecting_reduction_backward(
grad: &Tensor,
dim: i64,
indices: &Tensor,
sizes: &[i64],
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_value_selecting_reduction_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
dim,
indices.c_tensor,
sizes.as_ptr(),
sizes.len() as i32,
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_values(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_values(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_vander(
x: &Tensor,
n: impl Into<Option<i64>>,
increasing: bool,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_vander(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
if increasing { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_var(&self, unbiased: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_var(
c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_var1(&self, dim: &[i64], unbiased: bool, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_var1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_var_mean(&self, unbiased: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_var_mean(
c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_var_mean1(
&self,
dim: &[i64],
unbiased: bool,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_var_mean1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok((
Tensor {
c_tensor: c_tensors[0],
},
Tensor {
c_tensor: c_tensors[1],
},
))
}
pub fn f_var_out(
&self,
out: &Tensor,
dim: &[i64],
unbiased: bool,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_var_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len() as i32,
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_vdot(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_vdot(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_vdot_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_vdot_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_view_(&self, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_view1(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view1(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_view_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_as(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_view_as_complex(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_as_complex(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_view_as_real(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_as_real(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_vstack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_vstack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_vstack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_vstack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_where_(condition: &Tensor) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_where(condition.c_tensor));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_where1(&self, condition: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_where1(
c_tensors.as_mut_ptr(),
condition.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_where2<S: Into<Scalar>>(
condition: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_where2(
c_tensors.as_mut_ptr(),
condition.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_where3<S: Into<Scalar>>(
&self,
condition: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_where3(
c_tensors.as_mut_ptr(),
condition.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_where4<S: Into<Scalar>>(
condition: &Tensor,
self_scalar: S,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_where4(
c_tensors.as_mut_ptr(),
condition.c_tensor,
self_scalar.into().c_scalar,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_xlogy(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_xlogy1<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy1(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_xlogy2<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy2(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_xlogy_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_xlogy_1<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_1(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_xlogy_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_xlogy_out1<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_out1(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_xlogy_out2<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_out2(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_zero_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zero_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_zeros(size: &[i64], options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zeros(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len() as i32,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_zeros_like(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zeros_like(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
pub fn f_zeros_out(out: &Tensor, size: &[i64]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zeros_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len() as i32
));
Ok(Tensor {
c_tensor: c_tensors[0],
})
}
}