use super::Tensor;
use crate::errors::{TrustformersError, Result};
pub use crate::tensor::math_ops::*;
use crate::tensor::math_ops;
impl Tensor {
pub fn add(&self, other: &Tensor) -> Result<Tensor> {
math_ops::arithmetic::add(self, other)
}
pub fn sub(&self, other: &Tensor) -> Result<Tensor> {
math_ops::arithmetic::sub(self, other)
}
pub fn mul(&self, other: &Tensor) -> Result<Tensor> {
math_ops::arithmetic::mul(self, other)
}
pub fn div(&self, other: &Tensor) -> Result<Tensor> {
math_ops::arithmetic::div(self, other)
}
pub fn scalar_mul(&self, scalar: f32) -> Result<Tensor> {
math_ops::arithmetic::scalar_mul(self, scalar)
}
pub fn scalar_div(&self, scalar: f32) -> Result<Tensor> {
math_ops::arithmetic::scalar_div(self, scalar)
}
pub fn add_scalar(&self, scalar: f32) -> Result<Tensor> {
math_ops::arithmetic::add_scalar(self, scalar)
}
pub fn sub_scalar(&self, scalar: f32) -> Result<Tensor> {
math_ops::arithmetic::sub_scalar(self, scalar)
}
pub fn div_scalar(&self, scalar: f32) -> Result<Tensor> {
self.scalar_div(scalar)
}
pub fn mul_scalar(&self, scalar: f32) -> Result<Tensor> {
self.scalar_mul(scalar)
}
pub fn matmul(&self, other: &Tensor) -> Result<Tensor> {
math_ops::linear_algebra::matmul(self, other)
}
pub fn norm(&self) -> Result<f32> {
math_ops::linear_algebra::norm(self)
}
pub fn norm_squared(&self) -> Result<Tensor> {
math_ops::linear_algebra::norm_squared(self)
}
pub fn clip_grad_norm(&self, max_norm: f32) -> Result<Tensor> {
math_ops::linear_algebra::clip_grad_norm(self, max_norm)
}
pub fn pow(&self, exponent: f32) -> Result<Tensor> {
math_ops::mathematical::pow(self, exponent)
}
pub fn sqrt(&self) -> Result<Tensor> {
math_ops::mathematical::sqrt(self)
}
pub fn rsqrt(&self) -> Result<Tensor> {
math_ops::mathematical::rsqrt(self)
}
pub fn square(&self) -> Result<Tensor> {
math_ops::mathematical::square(self)
}
pub fn reciprocal(&self) -> Result<Tensor> {
math_ops::mathematical::reciprocal(self)
}
pub fn exp(&self) -> Result<Tensor> {
math_ops::mathematical::exp(self)
}
pub fn log(&self) -> Result<Tensor> {
math_ops::mathematical::log(self)
}
pub fn sin(&self) -> Result<Tensor> {
math_ops::mathematical::sin(self)
}
pub fn cos(&self) -> Result<Tensor> {
math_ops::mathematical::cos(self)
}
pub fn tan(&self) -> Result<Tensor> {
math_ops::mathematical::tan(self)
}
pub fn asin(&self) -> Result<Tensor> {
math_ops::mathematical::asin(self)
}
pub fn acos(&self) -> Result<Tensor> {
math_ops::mathematical::acos(self)
}
pub fn atan(&self) -> Result<Tensor> {
math_ops::mathematical::atan(self)
}
pub fn abs(&self) -> Result<Tensor> {
math_ops::mathematical::abs(self)
}
pub fn neg(&self) -> Result<Tensor> {
math_ops::mathematical::neg(self)
}
pub fn sign(&self) -> Result<Tensor> {
math_ops::mathematical::sign(self)
}
pub fn round(&self) -> Result<Tensor> {
math_ops::mathematical::round(self)
}
pub fn floor(&self) -> Result<Tensor> {
math_ops::mathematical::floor(self)
}
pub fn ceil(&self) -> Result<Tensor> {
math_ops::mathematical::ceil(self)
}
pub fn trunc(&self) -> Result<Tensor> {
math_ops::mathematical::trunc(self)
}
pub fn isnan(&self) -> Result<Tensor> {
math_ops::mathematical::isnan(self)
}
pub fn isinf(&self) -> Result<Tensor> {
math_ops::mathematical::isinf(self)
}
pub fn isfinite(&self) -> Result<Tensor> {
math_ops::mathematical::isfinite(self)
}
pub fn sum(&self, axes: Option<Vec<usize>>, keepdims: bool) -> Result<Tensor> {
math_ops::statistical::sum(self, axes, keepdims)
}
pub fn sum_axes(&self, axes: &[usize]) -> Result<Tensor> {
math_ops::statistical::sum_axes(self, axes)
}
pub fn sum_axis(&self, axis: usize) -> Result<Tensor> {
math_ops::statistical::sum_axis(self, axis)
}
pub fn mean(&self) -> Result<Tensor> {
math_ops::statistical::mean(self)
}
pub fn mean_axes(&self, axes: &[usize]) -> Result<Tensor> {
math_ops::statistical::mean_axes(self, axes)
}
pub fn mean_axis(&self, axis: usize) -> Result<Tensor> {
math_ops::statistical::mean_axis(self, axis)
}
pub fn variance(&self, axes: Option<&[usize]>, keepdims: bool) -> Result<Tensor> {
math_ops::statistical::variance(self, axes, keepdims)
}
pub fn std_dev(&self, axes: Option<&[usize]>, keepdims: bool) -> Result<Tensor> {
math_ops::statistical::std_dev(self, axes, keepdims)
}
pub fn std(&self) -> Result<Tensor> {
math_ops::statistical::std(self)
}
pub fn min_max(&self) -> Result<(f32, f32)> {
math_ops::statistical::min_max(self)
}
pub fn max_value(&self) -> Result<Tensor> {
math_ops::statistical::max_value(self)
}
pub fn max(&self, other: &Tensor) -> Result<Tensor> {
math_ops::statistical::max(self, other)
}
pub fn argmax(&self, axis: i32) -> Result<Tensor> {
math_ops::statistical::argmax(self, axis)
}
pub fn scale(&self, factor: f32) -> Result<Tensor> {
math_ops::utilities::scale(self, factor)
}
pub fn clamp(&self, min_val: f32, max_val: f32) -> Result<Tensor> {
math_ops::utilities::clamp(self, min_val, max_val)
}
pub fn broadcast_to(&self, shape: &[usize]) -> Result<Tensor> {
math_ops::utilities::broadcast_to(self, shape)
}
pub fn get_scalar(&self, indices: &[usize]) -> Result<f32> {
math_ops::utilities::get_scalar(self, indices)
}
pub fn set_scalar(&self, indices: &[usize], value: f32) -> Result<Tensor> {
math_ops::utilities::set_scalar(self, indices, value)
}
pub fn greater(&self, other: &Tensor) -> Result<Tensor> {
math_ops::utilities::greater(self, other)
}
pub fn lerp(&self, other: &Tensor, weight: f32) -> Result<Tensor> {
math_ops::utilities::lerp(self, other, weight)
}
pub fn sub_scaled(&self, other: &Tensor, factor: f32) -> Result<Tensor> {
math_ops::utilities::sub_scaled(self, other, factor)
}
pub fn add_scaled(&self, other: &Tensor, factor: f32) -> Result<Tensor> {
math_ops::utilities::add_scaled(self, other, factor)
}
pub fn pow_scalar(&self, exponent: f64) -> Result<Tensor> {
math_ops::mathematical::pow_scalar(self, exponent)
}
pub fn layer_norm(&self, axis: i32, epsilon: f32) -> Result<Tensor> {
Err(TrustformersError::tensor_op_error("Layer norm not yet implemented in modular structure", "layer_norm"))
}
pub fn cross_entropy(&self, targets: &Tensor, reduction: &str) -> Result<Tensor> {
Err(TrustformersError::tensor_op_error("Cross entropy not yet implemented in modular structure", "cross_entropy"))
}
pub fn cosine_similarity(&self, other: &Tensor, dim: i32, eps: f32) -> Result<Tensor> {
Err(TrustformersError::tensor_op_error("Cosine similarity not yet implemented in modular structure", "cosine_similarity"))
}
pub fn log_softmax(&self, dim: i32) -> Result<Tensor> {
Err(TrustformersError::tensor_op_error("Log softmax not yet implemented in modular structure", "log_softmax"))
}
}
pub use math_ops::broadcasting::shapes_are_broadcastable;
pub use math_ops::stability::{
is_stable_f32, is_stable_f64, stabilize_f32, stabilize_f64,
STABILITY_EPSILON_F32, STABILITY_EPSILON_F64, MAX_SAFE_VALUE_F32, MAX_SAFE_VALUE_F64
};