use std::ops::Range;
use burn_backend::{
BoolDType, Distribution, ExecutionError, FloatDType, IntDType, Scalar, Shape, TensorData,
TensorMetadata,
ops::{FloatTensorOps, IntTensorOps},
tensor::IntTensor,
};
use crate::{IntoKind, LibTorch, LibTorchDevice, TchShape, TchTensor, element::TchElement};
use super::TchOps;
impl<E: TchElement> IntTensorOps<Self> for LibTorch<E> {
fn int_from_data(data: TensorData, device: &LibTorchDevice) -> TchTensor {
match data.dtype {
burn_backend::DType::I64 => TchTensor::from_data::<i64>(data, (*device).into()),
burn_backend::DType::I32 => TchTensor::from_data::<i32>(data, (*device).into()),
burn_backend::DType::I16 => TchTensor::from_data::<i16>(data, (*device).into()),
burn_backend::DType::I8 => TchTensor::from_data::<i8>(data, (*device).into()),
burn_backend::DType::U8 => TchTensor::from_data::<u8>(data, (*device).into()),
_ => unimplemented!("Unsupported dtype for `int_from_data`: {:?}", data.dtype),
}
}
fn int_repeat_dim(tensor: TchTensor, dim: usize, times: usize) -> TchTensor {
TchOps::repeat_dim(tensor, dim, times)
}
async fn int_into_data(tensor: TchTensor) -> Result<TensorData, ExecutionError> {
let shape = tensor.shape();
let tensor = Self::int_reshape(tensor.clone(), Shape::new([shape.num_elements()]));
let values: Result<Vec<i64>, tch::TchError> = tensor.tensor.shallow_clone().try_into();
Ok(TensorData::new(values.unwrap(), shape))
}
fn int_to_device(tensor: TchTensor, device: &LibTorchDevice) -> TchTensor {
TchOps::to_device(tensor, device)
}
fn int_reshape(tensor: TchTensor, shape: Shape) -> TchTensor {
TchOps::reshape(tensor, shape)
}
fn int_device(tensor: &TchTensor) -> LibTorchDevice {
tensor.tensor.device().into()
}
fn int_empty(shape: Shape, device: &LibTorchDevice, dtype: IntDType) -> TchTensor {
let tensor = tch::Tensor::empty(
TchShape::from(shape).dims,
(dtype.into_kind(), (*device).into()),
);
TchTensor::new(tensor)
}
fn int_slice(tensor: TchTensor, slices: &[burn_backend::Slice]) -> TchTensor {
TchOps::slice_with_steps(tensor, slices)
}
fn int_slice_assign(
tensor: TchTensor,
slices: &[burn_backend::Slice],
value: TchTensor,
) -> TchTensor {
TchOps::slice_assign(tensor, slices, value)
}
fn int_cat(tensors: Vec<TchTensor>, dim: usize) -> TchTensor {
TchOps::cat(tensors, dim)
}
fn int_matmul(lhs: IntTensor<Self>, rhs: IntTensor<Self>) -> IntTensor<Self> {
let int_dtype = lhs.dtype();
let lhs = Self::int_into_float(lhs, FloatDType::F32);
let rhs = Self::int_into_float(rhs, FloatDType::F32);
let out = lhs.tensor.f_matmul(&rhs.tensor).unwrap();
Self::float_into_int(TchTensor::new(out), int_dtype.into())
}
fn int_equal(lhs: TchTensor, rhs: TchTensor, _out_dtype: BoolDType) -> TchTensor {
TchOps::equal(lhs, rhs)
}
fn int_equal_elem(lhs: TchTensor, rhs: Scalar, _out_dtype: BoolDType) -> TchTensor {
TchOps::equal_elem(lhs, rhs.elem::<i64>())
}
fn int_greater(lhs: TchTensor, rhs: TchTensor, _out_dtype: BoolDType) -> TchTensor {
TchOps::greater(lhs, rhs)
}
fn int_greater_elem(lhs: TchTensor, rhs: Scalar, _out_dtype: BoolDType) -> TchTensor {
TchOps::greater_elem(lhs, rhs.elem::<i64>())
}
fn int_greater_equal(lhs: TchTensor, rhs: TchTensor, _out_dtype: BoolDType) -> TchTensor {
TchOps::greater_equal(lhs, rhs)
}
fn int_greater_equal_elem(lhs: TchTensor, rhs: Scalar, _out_dtype: BoolDType) -> TchTensor {
TchOps::greater_equal_elem(lhs, rhs.elem::<i64>())
}
fn int_lower(lhs: TchTensor, rhs: TchTensor, _out_dtype: BoolDType) -> TchTensor {
TchOps::lower(lhs, rhs)
}
fn int_lower_elem(lhs: TchTensor, rhs: Scalar, _out_dtype: BoolDType) -> TchTensor {
TchOps::lower_elem(lhs, rhs.elem::<i64>())
}
fn int_lower_equal(lhs: TchTensor, rhs: TchTensor, _out_dtype: BoolDType) -> TchTensor {
TchOps::lower_equal(lhs, rhs)
}
fn int_lower_equal_elem(lhs: TchTensor, rhs: Scalar, _out_dtype: BoolDType) -> TchTensor {
TchOps::lower_equal_elem(lhs, rhs.elem::<i64>())
}
fn int_add(lhs: TchTensor, rhs: TchTensor) -> TchTensor {
TchOps::add(lhs, rhs)
}
fn int_add_scalar(lhs: TchTensor, rhs: Scalar) -> TchTensor {
lhs.unary_ops(
|mut tensor| tensor.f_add_scalar_(rhs.elem::<i64>()).unwrap(),
|tensor| tensor.f_add_scalar(rhs.elem::<i64>()).unwrap(),
)
}
fn int_sub(lhs: TchTensor, rhs: TchTensor) -> TchTensor {
TchOps::sub(lhs, rhs)
}
fn int_sub_scalar(lhs: TchTensor, rhs: Scalar) -> TchTensor {
lhs.unary_ops(
|mut tensor| tensor.f_sub_scalar_(rhs.elem::<i64>()).unwrap(),
|tensor| tensor.f_sub_scalar(rhs.elem::<i64>()).unwrap(),
)
}
fn int_mul(lhs: TchTensor, rhs: TchTensor) -> TchTensor {
TchOps::mul(lhs, rhs)
}
fn int_mul_scalar(lhs: TchTensor, rhs: Scalar) -> TchTensor {
lhs.unary_ops(
|mut tensor| tensor.f_mul_scalar_(rhs.elem::<i64>()).unwrap(),
|tensor| tensor.f_mul_scalar(rhs.elem::<i64>()).unwrap(),
)
}
fn int_div(lhs: TchTensor, rhs: TchTensor) -> TchTensor {
let dtype = lhs.tensor.kind();
let copy = false;
let non_blocking = true;
let lhs: TchTensor =
TchTensor::new(lhs.tensor.to_dtype(tch::Kind::Float, non_blocking, copy));
let rhs: TchTensor =
TchTensor::new(rhs.tensor.to_dtype(tch::Kind::Float, non_blocking, copy));
let out = TchOps::div(lhs, rhs);
TchTensor::new(out.tensor.to_dtype(dtype, non_blocking, copy))
}
fn int_div_scalar(lhs: TchTensor, rhs: Scalar) -> TchTensor {
let dtype = lhs.tensor.kind();
let copy = false;
let non_blocking = true;
let lhs: TchTensor =
TchTensor::new(lhs.tensor.to_dtype(tch::Kind::Float, non_blocking, copy));
let out: TchTensor = lhs.unary_ops(
|mut tensor| tensor.f_div_scalar_(rhs.elem::<i64>()).unwrap(),
|tensor| tensor.f_div_scalar(rhs.elem::<i64>()).unwrap(),
);
TchTensor::new(out.tensor.to_dtype(dtype, non_blocking, copy))
}
fn int_remainder(lhs: TchTensor, rhs: TchTensor) -> TchTensor {
let dtype = lhs.tensor.kind();
let copy = false;
let non_blocking = true;
let lhs: TchTensor =
TchTensor::new(lhs.tensor.to_dtype(tch::Kind::Float, non_blocking, copy));
let rhs: TchTensor =
TchTensor::new(rhs.tensor.to_dtype(tch::Kind::Float, non_blocking, copy));
let out = TchOps::remainder(lhs, rhs);
TchTensor::new(out.tensor.to_dtype(dtype, non_blocking, copy))
}
fn int_remainder_scalar(lhs: TchTensor, rhs: Scalar) -> TchTensor {
lhs.unary_ops(
|tensor| tensor.f_remainder(rhs.elem::<i64>()).unwrap(),
|tensor| tensor.f_remainder(rhs.elem::<i64>()).unwrap(),
)
}
fn int_zeros(shape: Shape, device: &LibTorchDevice, dtype: IntDType) -> TchTensor {
let shape = TchShape::from(shape);
let device: tch::Device = (*device).into();
TchTensor::new(tch::Tensor::zeros(shape.dims, (dtype.into_kind(), device)))
}
fn int_ones(shape: Shape, device: &LibTorchDevice, dtype: IntDType) -> TchTensor {
let shape = TchShape::from(shape);
let device: tch::Device = (*device).into();
TchTensor::new(tch::Tensor::ones(shape.dims, (dtype.into_kind(), device)))
}
fn int_full(
shape: Shape,
fill_value: Scalar,
device: &LibTorchDevice,
dtype: IntDType,
) -> TchTensor {
let shape = TchShape::from(shape);
let device: tch::Device = (*device).into();
TchTensor::new(tch::Tensor::full(
shape.dims,
fill_value.elem::<i64>(),
(dtype.into_kind(), device),
))
}
fn int_sum(tensor: TchTensor) -> TchTensor {
TchOps::sum(tensor)
}
fn int_sum_dim(tensor: TchTensor, dim: usize) -> TchTensor {
TchOps::sum_dim(tensor, dim)
}
fn int_prod(tensor: TchTensor) -> TchTensor {
TchOps::prod(tensor)
}
fn int_prod_dim(tensor: TchTensor, dim: usize) -> TchTensor {
TchOps::prod_dim(tensor, dim)
}
fn int_mean(tensor: TchTensor) -> TchTensor {
let dtype = tensor.tensor.kind();
let tensor: TchTensor =
TchTensor::new(tensor.tensor.to_dtype(tch::Kind::Float, true, false));
let output: TchTensor = TchTensor::new(TchOps::mean(tensor).tensor);
TchTensor::new(output.tensor.to_dtype(dtype, true, false))
}
fn int_mean_dim(tensor: TchTensor, dim: usize) -> TchTensor {
let dtype = tensor.tensor.kind();
let tensor: TchTensor =
TchTensor::new(tensor.tensor.to_dtype(tch::Kind::Float, true, false));
let output: TchTensor = TchTensor::new(TchOps::mean_dim(tensor, dim).tensor);
TchTensor::new(output.tensor.to_dtype(dtype, true, false))
}
fn int_cumsum(tensor: TchTensor, dim: usize) -> TchTensor {
TchOps::cumsum(tensor, dim)
}
fn int_cumprod(tensor: TchTensor, dim: usize) -> TchTensor {
TchOps::cumprod(tensor, dim)
}
fn int_cummin(tensor: TchTensor, dim: usize) -> TchTensor {
TchOps::cummin(tensor, dim)
}
fn int_cummax(tensor: TchTensor, dim: usize) -> TchTensor {
TchOps::cummax(tensor, dim)
}
fn int_gather(dim: usize, tensor: TchTensor, indices: TchTensor) -> TchTensor {
TchOps::gather(dim, tensor, indices)
}
fn int_scatter_add(
dim: usize,
tensor: TchTensor,
indices: TchTensor,
value: TchTensor,
) -> TchTensor {
TchOps::scatter(dim, tensor, indices, value)
}
fn int_scatter_nd(
data: TchTensor,
indices: TchTensor,
values: TchTensor,
reduction: burn_backend::tensor::IndexingUpdateOp,
) -> TchTensor {
TchOps::scatter_nd(data, indices, values, reduction)
}
fn int_gather_nd(data: TchTensor, indices: TchTensor) -> TchTensor {
TchOps::gather_nd(data, indices)
}
fn int_select(tensor: TchTensor, dim: usize, indices: TchTensor) -> TchTensor {
TchOps::index_select_dim(tensor, dim, indices)
}
fn int_select_add(
tensor: TchTensor,
dim: usize,
indices: TchTensor,
value: TchTensor,
) -> TchTensor {
TchOps::select_assign(tensor, dim, indices, value)
}
fn int_mask_where(tensor: TchTensor, mask: TchTensor, source: TchTensor) -> TchTensor {
TchTensor::binary_ops_tensor(
tensor,
source,
|tensor, source| source.f_where_self(&mask.tensor, tensor).unwrap(),
|tensor, source| source.f_where_self(&mask.tensor, tensor).unwrap(),
|tensor, source| source.f_where_self(&mask.tensor, tensor).unwrap(),
)
}
fn int_mask_fill(tensor: TchTensor, mask: TchTensor, value: Scalar) -> TchTensor {
let value = value.elem::<i64>();
tensor.unary_ops(
|mut tensor| tensor.f_masked_fill_(&mask.tensor, value).unwrap(),
|tensor| tensor.f_masked_fill(&mask.tensor, value).unwrap(),
)
}
fn int_argmax(tensor: TchTensor, dim: usize) -> TchTensor {
TchOps::argmax(tensor, dim)
}
fn int_argtopk(_tensor: TchTensor, _dim: usize, _k: usize) -> TchTensor {
panic!("argtopk not implemented for torch")
}
fn int_topk(tensor: TchTensor, dim: usize, k: usize) -> TchTensor {
TchOps::topk(tensor, dim, k)
}
fn int_argmin(tensor: TchTensor, dim: usize) -> TchTensor {
TchOps::argmin(tensor, dim)
}
fn int_max_dim(tensor: TchTensor, dim: usize) -> TchTensor {
TchOps::max_dim(tensor, dim)
}
fn int_max_dim_with_indices(tensor: TchTensor, dim: usize) -> (TchTensor, TchTensor) {
TchOps::max_dim_with_indices(tensor, dim)
}
fn int_min_dim(tensor: TchTensor, dim: usize) -> TchTensor {
TchOps::min_dim(tensor, dim)
}
fn int_min_dim_with_indices(tensor: TchTensor, dim: usize) -> (TchTensor, TchTensor) {
TchOps::min_dim_with_indices(tensor, dim)
}
fn int_clamp_min(tensor: TchTensor, min: Scalar) -> TchTensor {
TchOps::clamp_min(tensor, min.elem::<i64>())
}
fn int_clamp_max(tensor: TchTensor, max: Scalar) -> TchTensor {
TchOps::clamp_max(tensor, max.elem::<i64>())
}
fn int_clamp(tensor: TchTensor, min: Scalar, max: Scalar) -> TchTensor {
TchOps::clamp(tensor, min.elem::<i64>(), max.elem::<i64>())
}
fn int_abs(tensor: TchTensor) -> TchTensor {
tensor.unary_ops(|mut tensor| tensor.abs_(), |tensor| tensor.abs())
}
fn int_into_float(tensor: TchTensor, out_dtype: FloatDType) -> TchTensor {
let tensor = tensor.tensor.to_kind(out_dtype.into_kind());
TchTensor::new(tensor)
}
fn int_swap_dims(tensor: IntTensor<Self>, dim1: usize, dim2: usize) -> IntTensor<Self> {
TchOps::swap_dims(tensor, dim1, dim2)
}
fn int_random(
shape: Shape,
distribution: Distribution,
device: &LibTorchDevice,
dtype: IntDType,
) -> TchTensor {
match distribution {
Distribution::Default => TchTensor::new(tch::Tensor::randint_low(
0,
255,
shape.iter().map(|i| *i as i64).collect::<Vec<_>>(),
(dtype.into_kind(), (*device).into()),
)),
Distribution::Bernoulli(prob) => {
let mut tensor = TchTensor::empty(shape, *device, dtype.into());
tensor
.mut_ops(|tensor| tensor.f_bernoulli_float_(prob).unwrap())
.unwrap()
}
Distribution::Uniform(from, to) => TchTensor::new(tch::Tensor::randint_low(
from as i64,
to as i64,
shape.iter().map(|i| *i as i64).collect::<Vec<_>>(),
(dtype.into_kind(), (*device).into()),
)),
Distribution::Normal(mean, std) => {
let mut tensor = TchTensor::empty(shape, *device, dtype.into());
tensor.mut_ops(|tensor| tensor.normal_(mean, std)).unwrap()
}
}
}
fn int_arange(range: Range<i64>, device: &LibTorchDevice, dtype: IntDType) -> TchTensor {
let device: tch::Device = (*device).into();
let mut tensor = tch::Tensor::arange(range.end - range.start, (dtype.into_kind(), device));
if range.start != 0 {
tensor = tensor.f_add_scalar_(range.start).unwrap();
}
TchTensor::new(tensor)
}
fn int_permute(tensor: IntTensor<Self>, axes: &[usize]) -> IntTensor<Self> {
TchOps::permute(tensor, axes)
}
fn int_flip(tensor: IntTensor<Self>, axes: &[usize]) -> IntTensor<Self> {
TchOps::flip(tensor, axes)
}
fn int_sign(tensor: IntTensor<Self>) -> IntTensor<Self> {
TchOps::sign(tensor)
}
fn int_expand(tensor: IntTensor<Self>, shape: Shape) -> IntTensor<Self> {
TchOps::expand(tensor, shape)
}
fn int_sort(tensor: IntTensor<Self>, dim: usize, descending: bool) -> IntTensor<Self> {
TchOps::sort(tensor, dim, descending)
}
fn int_argsort(tensor: IntTensor<Self>, dim: usize, descending: bool) -> IntTensor<Self> {
TchOps::argsort(tensor, dim, descending)
}
fn bitwise_and(lhs: IntTensor<Self>, rhs: IntTensor<Self>) -> IntTensor<Self> {
TchOps::bitwise_and(lhs, rhs)
}
fn bitwise_or(lhs: IntTensor<Self>, rhs: IntTensor<Self>) -> IntTensor<Self> {
TchOps::bitwise_or(lhs, rhs)
}
fn bitwise_xor(lhs: IntTensor<Self>, rhs: IntTensor<Self>) -> IntTensor<Self> {
TchOps::bitwise_xor(lhs, rhs)
}
fn bitwise_not(tensor: IntTensor<Self>) -> IntTensor<Self> {
TchOps::bitwise_not(tensor)
}
fn bitwise_and_scalar(lhs: IntTensor<Self>, rhs: Scalar) -> IntTensor<Self> {
TchOps::bitwise_and_scalar(lhs, rhs.elem::<i64>())
}
fn bitwise_or_scalar(lhs: IntTensor<Self>, rhs: Scalar) -> IntTensor<Self> {
TchOps::bitwise_or_scalar(lhs, rhs.elem::<i64>())
}
fn bitwise_xor_scalar(lhs: IntTensor<Self>, rhs: Scalar) -> IntTensor<Self> {
TchOps::bitwise_xor_scalar(lhs, rhs.elem::<i64>())
}
fn bitwise_left_shift(lhs: IntTensor<Self>, rhs: IntTensor<Self>) -> IntTensor<Self> {
TchOps::bitwise_left_shift(lhs, rhs)
}
fn bitwise_right_shift(lhs: IntTensor<Self>, rhs: IntTensor<Self>) -> IntTensor<Self> {
TchOps::bitwise_right_shift(lhs, rhs)
}
fn bitwise_left_shift_scalar(lhs: IntTensor<Self>, rhs: Scalar) -> IntTensor<Self> {
TchOps::bitwise_left_shift_scalar(lhs, rhs.elem::<i64>())
}
fn bitwise_right_shift_scalar(lhs: IntTensor<Self>, rhs: Scalar) -> IntTensor<Self> {
TchOps::bitwise_right_shift_scalar(lhs, rhs.elem::<i64>())
}
fn int_cast(tensor: IntTensor<Self>, dtype: IntDType) -> IntTensor<Self> {
let kind = dtype.into_kind();
if tensor.tensor.kind() == kind {
tensor
} else {
TchTensor::new(tensor.tensor.to_kind(kind))
}
}
fn int_unfold(
tensor: IntTensor<Self>,
dim: usize,
size: usize,
step: usize,
) -> IntTensor<Self> {
TchOps::unfold(tensor, dim, size, step)
}
fn int_powi(lhs: IntTensor<Self>, rhs: IntTensor<Self>) -> IntTensor<Self> {
TchOps::pow(lhs, rhs)
}
fn int_powi_scalar_impl(lhs: IntTensor<Self>, rhs: Scalar) -> IntTensor<Self> {
lhs.unary_ops(
|mut tensor| tensor.f_pow_(rhs.elem::<i64>()).unwrap(),
|tensor| tensor.pow_tensor_scalar(rhs.elem::<i64>()),
)
}
}