Tensor

Struct Tensor 

Source
pub struct Tensor<T = f32>
where T: TensorElement,
{ /* private fields */ }
Expand description

The main Tensor type for ToRSh

A tensor implementation with automatic memory mapping for large tensors and efficient views with reference counting

Implementations§

Source§

impl<T> Tensor<T>
where T: FloatElement + Copy,

Source

pub fn scalar(value: T) -> Result<Tensor<T>, TorshError>

Create a 0-dimensional tensor (scalar) from a single value

Source

pub fn max( &self, dim: Option<usize>, keepdim: bool, ) -> Result<Tensor<T>, TorshError>

Maximum element in tensor

Source

pub fn max_dim(&self, dim: i32, keepdim: bool) -> Result<Tensor<T>, TorshError>

Maximum along specified dimension

Source

pub fn min_dim(&self, dim: i32, keepdim: bool) -> Result<Tensor<T>, TorshError>

Minimum along specified dimension

Source§

impl<T> Tensor<T>

Boolean reduction operations for tensors

Source

pub fn all(&self) -> Result<Tensor<bool>, TorshError>

Check if all elements are non-zero (true)

Source

pub fn any(&self) -> Result<Tensor<bool>, TorshError>

Check if any element is non-zero (true)

Source

pub fn all_dim( &self, dim: i32, keepdim: bool, ) -> Result<Tensor<bool>, TorshError>

Check if all elements along dimension are non-zero (true)

Source

pub fn any_dim( &self, dim: i32, keepdim: bool, ) -> Result<Tensor<bool>, TorshError>

Check if any element along dimension is non-zero (true)

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Source

pub fn sum(&self) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T> + Zero,

Compute sum of all elements

Source

pub fn sum_dim( &self, dims: &[i32], keepdim: bool, ) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T> + Zero,

Compute sum along specified dimensions

Source

pub fn mean( &self, dims: Option<&[usize]>, keepdim: bool, ) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T> + Div<Output = T> + Zero + One + FromPrimitive,

Compute mean along specified dimensions

Source

pub fn cumprod(&self, dim: i32) -> Result<Tensor<T>, TorshError>
where T: Mul<Output = T> + One + Copy,

Compute cumulative product along specified dimension

Source

pub fn matmul(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Float + Sum,

Matrix multiplication

Source

pub fn sort( &self, _dim: Option<i32>, _descending: bool, ) -> Result<(Tensor<T>, Tensor<T>), TorshError>

Sort tensor along specified dimension

Source

pub fn min(&self) -> Result<Tensor<T>, TorshError>
where T: PartialOrd + Copy,

Min reduction method without trait bounds (for Iterator compatibility)

Source

pub fn t(&self) -> Result<Tensor<T>, TorshError>
where T: Copy + Zero,

Transpose operation (2D tensor)

Source

pub fn shares_storage(&self, other: &Tensor<T>) -> bool

Check if two tensors share the same underlying storage

Source

pub fn data(&self) -> Result<Vec<T>, TorshError>
where T: Copy,

Get data as a vector (backward compatibility method)

Source

pub fn data_mut_apply<F>(&mut self, func: F) -> Result<(), TorshError>
where F: FnMut(&mut T), T: Copy,

Apply a function to all elements in-place using direct storage access

Source

pub fn clone_data(&self) -> Tensor<T>
where T: Copy,

Clone the tensor with independent data (deep copy)

Source

pub fn make_unique(&mut self) -> Result<(), TorshError>

Ensure tensor has unique data (copy-on-write semantics)

Source

pub fn apply_<F>(&mut self, func: F) -> Result<(), TorshError>
where F: Fn(T) -> T, T: Copy,

Apply function in-place

Source

pub fn map<F>(&self, func: F) -> Result<Tensor<T>, TorshError>
where F: Fn(T) -> T, T: Copy,

Apply function element-wise to create new tensor

Source

pub fn item(&self) -> Result<T, TorshError>
where T: Copy,

Extract a scalar value from a single-element tensor

Source

pub fn cat(tensors: &[&Tensor<T>], dim: i32) -> Result<Tensor<T>, TorshError>
where T: Copy,

Concatenate tensors along a dimension

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy + Float,

Source

pub fn norm(&self) -> Result<Tensor<T>, TorshError>

Compute the L2 norm of the tensor

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Source

pub fn matmul_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Float + Zero + One + Sum,

Use SciRS2 backend for optimized matrix multiplication

Source

pub fn sum_scirs2(&self) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T> + Zero,

Use SciRS2 backend for optimized sum reduction

Source

pub fn mean_scirs2(&self) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T> + Div<Output = T> + Zero + From<usize> + FromPrimitive,

Use SciRS2 backend for optimized mean reduction

Source

pub fn relu_scirs2(&self) -> Result<Tensor<T>, TorshError>
where T: PartialOrd + Zero,

Use SciRS2 backend for optimized ReLU activation

Source

pub fn sigmoid_scirs2(&self) -> Result<Tensor<T>, TorshError>
where T: Float,

Use SciRS2 backend for optimized sigmoid activation

Source

pub fn tanh_scirs2(&self) -> Result<Tensor<T>, TorshError>
where T: Float,

Use SciRS2 backend for optimized tanh activation

Source

pub fn softmax(&self, dim: i32) -> Result<Tensor<T>, TorshError>
where T: FloatElement<Output = T, Output = T> + Copy + Sub + Div,

Softmax activation along specified dimension Computes softmax(x_i) = exp(x_i) / sum(exp(x_j)) for all j

Source

pub fn log_softmax(&self, dim: i32) -> Result<Tensor<T>, TorshError>
where T: FloatElement<Output = T> + Copy + Sub,

Log softmax activation along specified dimension Computes log_softmax(x_i) = log(softmax(x_i))

Source

pub fn cumsum(&self, dim: i32) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T> + Zero + Copy,

Computes cumulative sum along a dimension

Source

pub fn argmin(&self, dim: Option<i32>) -> Result<Tensor<i64>, TorshError>
where T: PartialOrd + Copy,

Find the indices of minimum values along a dimension

Source

pub fn argmax(&self, dim: Option<i32>) -> Result<Tensor<i64>, TorshError>
where T: PartialOrd + Copy,

Find the indices of maximum values along a dimension

Source

pub fn topk( &self, k: usize, dim: Option<i32>, largest: bool, sorted: bool, ) -> Result<(Tensor<T>, Tensor<i64>), TorshError>
where T: PartialOrd + Copy + Zero,

Returns the k largest elements along a dimension

Source§

impl<T> Tensor<T>
where T: ComplexElement + Copy,

Source

pub fn complex_conj(&self) -> Result<Tensor<T>, TorshError>
where T: Copy,

Complex conjugate for complex tensors

Source

pub fn real(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>

Get real part of complex tensor

Source

pub fn imag(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>

Get imaginary part of complex tensor

Source

pub fn abs(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>

Get magnitude (absolute value) of complex tensor

Source

pub fn angle(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>

Get phase (argument) of complex tensor

Source

pub fn complex( real: &Tensor<<T as ComplexElement>::Real>, imag: &Tensor<<T as ComplexElement>::Real>, ) -> Result<Tensor<T>, TorshError>

Create complex tensor from real and imaginary parts

Source

pub fn polar( magnitude: &Tensor<<T as ComplexElement>::Real>, phase: &Tensor<<T as ComplexElement>::Real>, ) -> Result<Tensor<T>, TorshError>

Create complex tensor from polar representation (magnitude and phase)

Source

pub fn backward_complex(&self) -> Result<(), TorshError>
where T: Copy + Default + Add<Output = T> + Sub<Output = T> + Mul<Output = T> + Div<Output = T>,

Backward pass for complex tensors (compute gradients)

Complex autograd follows PyTorch’s approach where gradients are computed treating complex numbers as 2D vectors of real numbers.

Source

pub fn complex_mul(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Mul<Output = T> + Add<Output = T> + Sub<Output = T>,

Element-wise complex multiplication with proper gradient tracking

Source

pub fn complex_add(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T>,

Element-wise complex addition with proper gradient tracking

Source

pub fn is_real(&self) -> Result<bool, TorshError>
where <T as ComplexElement>::Real: PartialEq + Zero,

Check if all elements in the tensor are real (imaginary part is zero)

Source

pub fn is_complex(&self) -> Result<bool, TorshError>
where <T as ComplexElement>::Real: PartialEq + Zero,

Check if any elements in the tensor are complex (imaginary part is non-zero)

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Source

pub fn cleanup_operation_refs(&mut self)

Clean up dead weak references in custom operations to improve memory efficiency

Source

pub fn from_data( data: Vec<T>, shape: Vec<usize>, device: DeviceType, ) -> Result<Tensor<T>, TorshError>

Create from raw data

Source

pub fn from_data_with_storage( data: Vec<T>, shape: Vec<usize>, device: DeviceType, use_memory_mapping: bool, ) -> Result<Tensor<T>, TorshError>

Create from raw data with explicit storage type

Source

pub fn from_data_memory_mapped( data: Vec<T>, shape: Vec<usize>, device: DeviceType, file_path: PathBuf, ) -> Result<Tensor<T>, TorshError>

Create from raw data with specified memory-mapped file path

Source

pub fn zeros( shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>

Create a tensor filled with zeros

Source

pub fn ones( shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>

Create a tensor filled with ones

Source

pub fn shape(&self) -> Shape

Get the shape of the tensor

Source

pub fn ndim(&self) -> usize

Get the number of dimensions

Source

pub fn numel(&self) -> usize

Get the total number of elements

Source

pub fn dtype(&self) -> DType

Get the data type

Source

pub fn to_dtype(&self, dtype: DType) -> Result<Tensor<T>, TorshError>

Convert tensor to a different data type

Source

pub fn device(&self) -> DeviceType

Get the device

Source

pub fn get(&self, indices: &[usize]) -> Result<T, TorshError>
where T: Copy,

Get element at multi-dimensional index

Source

pub fn get_flat(&self, index: usize) -> Result<T, TorshError>
where T: Copy,

Get element at single flat index

Source

pub fn set(&self, indices: &[usize], value: T) -> Result<(), TorshError>
where T: Copy,

Set element at index (requires multi-dimensional indices for views)

Source

pub fn get_slice(&self, start: usize, len: usize) -> Result<Vec<T>, TorshError>
where T: Copy,

Get slice of elements

Source

pub fn set_slice(&self, start: usize, values: &[T]) -> Result<(), TorshError>
where T: Copy,

Set slice of elements

Source

pub fn to_vec(&self) -> Result<Vec<T>, TorshError>
where T: Copy,

Get all data as a vector (may be expensive for large memory-mapped tensors) For views, extracts only the data visible by this view

Source

pub fn storage_type(&self) -> &'static str

Get storage type information

Source

pub fn memory_usage(&self) -> usize

Get estimated memory usage in bytes

Source

pub fn is_memory_mapped(&self) -> bool

Check if tensor uses memory mapping

Source

pub fn is_view(&self) -> bool

Check if this tensor is a view of another tensor

Source

pub fn strides(&self) -> Vec<usize>

Get the strides for this tensor (either custom strides for views or default contiguous strides)

Source

pub fn ones_like(&self) -> Result<Tensor<T>, TorshError>

Create a tensor of ones with the same shape as this tensor

Source

pub fn zeros_like(&self) -> Result<Tensor<T>, TorshError>

Create a tensor of zeros with the same shape as this tensor

Source

pub fn requires_grad_(self, requires_grad: bool) -> Tensor<T>

Set whether this tensor requires gradients

Source

pub fn requires_grad(&self) -> bool

Get whether this tensor requires gradients

Source

pub fn set_grad(&self, grad: Option<Tensor<T>>)

Set gradient tensor

Source

pub fn grad_mut(&mut self) -> Option<&mut Tensor<T>>

Get mutable access to gradient

Source

pub fn to<D>(self, device: D) -> Result<Tensor<T>, TorshError>
where D: Into<DeviceType>,

🚀 Enhanced device transfer with multi-backend GPU support Automatically selects optimal transfer strategy and backend

Source

pub fn distribute_multi_gpu( &self, gpu_count: usize, ) -> Result<Vec<Tensor<T>>, TorshError>

🚀 Advanced multi-GPU distribution for parallel processing Automatically distributes tensor across multiple GPUs with optimal strategy

Source

pub fn get_optimal_gpu_backend() -> Option<GpuBackendType>

🚀 Get optimal GPU backend for current hardware

Source

pub fn zeros_gpu(shape: &[usize]) -> Result<Tensor<T>, TorshError>

🚀 Create tensor on optimal GPU device

Source

pub fn ones_gpu(shape: &[usize]) -> Result<Tensor<T>, TorshError>

🚀 Create tensor on optimal GPU device filled with ones

Source

pub fn detach(&self) -> Tensor<T>

Detach from the computation graph

Source

pub fn grad(&self) -> Option<Tensor<T>>

Get the gradient of this tensor (if it exists)

Source

pub fn has_grad(&self) -> bool

Check if this tensor has a gradient

Source

pub fn zero_grad(&mut self)

Zero the gradient

Source

pub fn backward(&self) -> Result<(), TorshError>
where T: FloatElement<Output = T, Output = T, Output = T, Output = T> + Copy + Default + Add + Sub + Mul + Div + Clone + Debug, f32: From<T>,

Backward pass (compute gradients) - integrated with autograd system

Source

pub fn backward_with_grad( &self, _gradient: Option<&Tensor<T>>, ) -> Result<(), TorshError>
where T: FloatElement<Output = T, Output = T, Output = T, Output = T> + Copy + Default + Add + Sub + Mul + Div + Clone + Debug, f32: From<T>,

Backward pass with gradient - integrated with autograd system

Source§

impl<T> Tensor<T>

Comparison operations for tensors

Source

pub fn gt(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>

Element-wise greater than comparison

Source

pub fn lt(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>

Element-wise less than comparison

Source

pub fn ge(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>

Element-wise greater than or equal comparison

Source

pub fn le(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>

Element-wise less than or equal comparison

Source

pub fn eq(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>

Element-wise equality comparison

Source

pub fn ne(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>

Element-wise inequality comparison

Source

pub fn eq_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>
where T: PartialEq + Copy,

Scalar comparison methods Element-wise equality comparison with scalar

Source

pub fn ne_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>
where T: PartialEq + Copy,

Element-wise inequality comparison with scalar

Source

pub fn gt_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>
where T: PartialOrd + Copy,

Element-wise greater than comparison with scalar

Source

pub fn lt_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>
where T: PartialOrd + Copy,

Element-wise less than comparison with scalar

Source

pub fn le_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>
where T: PartialOrd + Copy,

Element-wise less than or equal comparison with scalar

Source

pub fn ge_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>
where T: PartialOrd + Copy,

Element-wise greater than or equal comparison with scalar

Source§

impl<T> Tensor<T>
where T: TensorElement,

Shape manipulation operations for tensors

Source

pub fn flatten(&self) -> Result<Tensor<T>, TorshError>

Flatten tensor to 1D

Source

pub fn broadcast_to(&self, shape: &Shape) -> Result<Tensor<T>, TorshError>

Broadcast tensor to specified shape

Source

pub fn where_tensor( &self, condition: &Tensor<bool>, other: &Tensor<T>, ) -> Result<Tensor<T>, TorshError>

Conditional tensor selection - where condition is true, select from self, otherwise from other

Source

pub fn add_bias(&self, bias: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T>,

Add bias vector to tensor (element-wise addition)

Source§

impl Tensor<bool>

Logical operations for boolean tensors

Source

pub fn logical_and( &self, other: &Tensor<bool>, ) -> Result<Tensor<bool>, TorshError>

Element-wise logical AND operation

Source

pub fn logical_or( &self, other: &Tensor<bool>, ) -> Result<Tensor<bool>, TorshError>

Element-wise logical OR operation

Source

pub fn logical_xor( &self, other: &Tensor<bool>, ) -> Result<Tensor<bool>, TorshError>

Element-wise logical XOR operation

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Source

pub fn from_scalar( value: T, shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>
where T: Copy,

Create tensor from a scalar value repeated to fill the shape

Source

pub fn fill_(&mut self, value: T) -> Result<(), TorshError>
where T: Copy,

Fill tensor with a single value (in-place)

Source

pub fn zero_(&mut self) -> Result<(), TorshError>
where T: Copy,

Zero out the tensor (in-place)

Source

pub fn ones_(&mut self) -> Result<(), TorshError>
where T: Copy,

Fill with ones (in-place)

Source

pub fn copy_(&mut self, other: &Tensor<T>) -> Result<(), TorshError>
where T: Copy,

Copy data from another tensor (in-place)

Source

pub fn get_item(&self, indices: &[usize]) -> Result<T, TorshError>
where T: Copy,

Get an element by multi-dimensional index

Source

pub fn set_item( &mut self, indices: &[usize], value: T, ) -> Result<(), TorshError>
where T: Copy,

Set an element by multi-dimensional index

Source

pub fn get_item_flat(&self, index: usize) -> Result<T, TorshError>
where T: Copy,

Get element by flat index

Source

pub fn set_item_flat( &mut self, index: usize, value: T, ) -> Result<(), TorshError>
where T: Copy,

Set element by flat index

Source

pub fn multi_to_flat_index( &self, indices: &[usize], ) -> Result<usize, TorshError>

Convert multi-dimensional indices to flat index

Source

pub fn gather( &self, dim: usize, indices: &Tensor<i64>, ) -> Result<Tensor<T>, TorshError>

Gather values along an axis using indices

Source

pub fn scatter( &self, dim: usize, indices: &Tensor<i64>, src: &Tensor<T>, ) -> Result<Tensor<T>, TorshError>

Scatter values along an axis using indices

Source

pub fn repeat(&self, repeats: &[usize]) -> Result<Tensor<T>, TorshError>

Repeat tensor along specified dimensions

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Source

pub fn add_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
where T: Copy + Add<Output = T>,

Add scalar to all elements in-place

Source

pub fn add_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
where T: Copy + Add<Output = T>,

Add scalar to all elements (returns new tensor)

Source

pub fn sub_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
where T: Copy + Sub<Output = T>,

Subtract scalar from all elements in-place

Source

pub fn sub_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
where T: Copy + Sub<Output = T>,

Subtract scalar from all elements (returns new tensor)

Source

pub fn mul_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
where T: Copy + Mul<Output = T>,

Multiply all elements by scalar in-place

Source

pub fn mul_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
where T: Copy + Mul<Output = T>,

Multiply all elements by scalar (returns new tensor)

Source

pub fn div_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
where T: Copy + Div<Output = T>,

Divide all elements by scalar in-place

Source

pub fn div_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
where T: Copy + Div<Output = T>,

Divide all elements by scalar (returns new tensor)

Source

pub fn add(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T>,

Element-wise addition with another tensor (supports broadcasting)

Source

pub fn sub(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Sub<Output = T>,

Element-wise subtraction with another tensor

Source

pub fn mul(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Mul<Output = T>,

Element-wise multiplication with another tensor

Source

pub fn div(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Div<Output = T>,

Element-wise division with another tensor

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy + Float,

Source

pub fn sqrt(&self) -> Result<Tensor<T>, TorshError>

Square root of all elements

Source

pub fn square(&self) -> Result<Tensor<T>, TorshError>

Square of all elements

Source

pub fn rsqrt(&self) -> Result<Tensor<T>, TorshError>

Reciprocal square root of all elements (1/sqrt(x))

Source

pub fn reciprocal(&self) -> Result<Tensor<T>, TorshError>

Reciprocal of all elements (1/x)

Source

pub fn exp(&self) -> Result<Tensor<T>, TorshError>

Exponential of all elements

Source

pub fn ln(&self) -> Result<Tensor<T>, TorshError>

Natural logarithm of all elements

Source

pub fn log10(&self) -> Result<Tensor<T>, TorshError>

Logarithm base 10 of all elements

Source

pub fn log2(&self) -> Result<Tensor<T>, TorshError>

Logarithm base 2 of all elements

Source

pub fn log(&self) -> Result<Tensor<T>, TorshError>

Natural logarithm of all elements

Source

pub fn sin(&self) -> Result<Tensor<T>, TorshError>

Sine of all elements

Source

pub fn cos(&self) -> Result<Tensor<T>, TorshError>

Cosine of all elements

Source

pub fn tan(&self) -> Result<Tensor<T>, TorshError>

Tangent of all elements

Source

pub fn gelu(&self) -> Result<Tensor<T>, TorshError>

GELU (Gaussian Error Linear Unit) activation function with GPU and SIMD optimization

Source

pub fn leaky_relu(&self, negative_slope: T) -> Result<Tensor<T>, TorshError>

Leaky ReLU activation function with negative slope

Source

pub fn asin(&self) -> Result<Tensor<T>, TorshError>

Arcsine of all elements

Source

pub fn acos(&self) -> Result<Tensor<T>, TorshError>

Arccosine of all elements

Source

pub fn atan(&self) -> Result<Tensor<T>, TorshError>

Arctangent of all elements

Source

pub fn sinh(&self) -> Result<Tensor<T>, TorshError>

Hyperbolic sine of all elements

Source

pub fn cosh(&self) -> Result<Tensor<T>, TorshError>

Hyperbolic cosine of all elements

Source

pub fn tanh(&self) -> Result<Tensor<T>, TorshError>

Hyperbolic tangent of all elements

Source

pub fn pow(&self, exponent: T) -> Result<Tensor<T>, TorshError>
where T: TensorElement + Into<f32>,

Power function (element-wise)

Source

pub fn pow_scalar(&self, exponent: T) -> Result<Tensor<T>, TorshError>
where T: TensorElement + Into<f32>,

Power function with scalar exponent (alias for pow)

Source

pub fn pow_tensor(&self, exponent: &Tensor<T>) -> Result<Tensor<T>, TorshError>

Power function with tensor exponents

Source

pub fn floor(&self) -> Result<Tensor<T>, TorshError>

Floor of all elements

Source

pub fn ceil(&self) -> Result<Tensor<T>, TorshError>

Ceiling of all elements

Source

pub fn round(&self) -> Result<Tensor<T>, TorshError>

Round to nearest integer

Source

pub fn trunc(&self) -> Result<Tensor<T>, TorshError>

Truncate to integer part

Source

pub fn fract(&self) -> Result<Tensor<T>, TorshError>

Fractional part

Source

pub fn neg(&self) -> Result<Tensor<T>, TorshError>
where T: Neg<Output = T>,

Negation of all elements

Source

pub fn sign(&self) -> Result<Tensor<T>, TorshError>

Sign of all elements (-1, 0, or 1)

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Source

pub fn add_op(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T>,

Add operation (used by autograd backward pass)

Source

pub fn mul_op(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Mul<Output = T>,

Multiply operation (used by autograd backward pass)

Source

pub fn sigmoid(&self) -> Result<Tensor<T>, TorshError>
where T: FloatElement,

Sigmoid activation function with SIMD optimization

Source

pub fn relu(&self) -> Result<Tensor<T>, TorshError>
where T: PartialOrd + Zero,

ReLU activation function (Rectified Linear Unit) with SIMD optimization

Source

pub fn minimum(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: PartialOrd,

Element-wise minimum with another tensor

Source

pub fn maximum(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: PartialOrd,

Element-wise maximum with another tensor

Source

pub fn clamp(&self, min: T, max: T) -> Result<Tensor<T>, TorshError>
where T: PartialOrd + Copy,

Clamp tensor values between min and max bounds

Source

pub fn clamp_(&mut self, min: T, max: T) -> Result<(), TorshError>
where T: PartialOrd + Copy,

Clamp tensor values between min and max bounds (in-place)

Source

pub fn dot(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Mul<Output = T> + Add<Output = T> + Zero,

Dot product with another tensor (for 1D tensors)

Source§

impl<T> Tensor<T>

Source

pub fn add_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Add<Output = T> + Float,

Use SciRS2 backend for optimized tensor addition

Source

pub fn mul_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Mul<Output = T> + Float,

Use SciRS2 backend for optimized tensor multiplication

Source

pub fn sub_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Sub<Output = T> + Float,

Use SciRS2 backend for optimized tensor subtraction

Source

pub fn div_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
where T: Div<Output = T> + Float,

Use SciRS2 backend for optimized tensor division

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy + Float,

Source

pub fn reduce_memory_efficient<F>(&self, func: F) -> Result<T, TorshError>
where F: Fn(T, T) -> T + Send + Sync,

Memory-efficient reduction using SciRS2 intelligent chunking and lazy evaluation

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Source

pub fn size(&self, dim: i32) -> Result<usize, TorshError>

Get size of a specific dimension

Source

pub fn view(&self, shape: &[i32]) -> Result<Tensor<T>, TorshError>

Reshape the tensor

Source

pub fn view_as(&self, shape: &[usize]) -> Result<Tensor<T>, TorshError>

Create an efficient view with different shape (shares data, no copying) This is the zero-copy version of view() for compatible shapes

Source

pub fn slice_tensor( &self, dim: usize, start: usize, end: usize, ) -> Result<Tensor<T>, TorshError>

Create a view of a slice along a dimension (shares data, no copying)

Source

pub fn transpose_view( &self, dim0: usize, dim1: usize, ) -> Result<Tensor<T>, TorshError>

Create a transposed view (shares data, no copying)

Source

pub fn squeeze_tensor(&self, dim: usize) -> Result<Tensor<T>, TorshError>

Squeeze a tensor along a specific dimension (removes dimension of size 1)

Source

pub fn unsqueeze_tensor(&self, dim: usize) -> Result<Tensor<T>, TorshError>

Unsqueeze a tensor at a specific dimension (adds dimension of size 1)

Source

pub fn transpose(&self, dim0: i32, dim1: i32) -> Result<Tensor<T>, TorshError>

Transpose two dimensions (with data copying)

Source

pub fn permute(&self, dims: &[i32]) -> Result<Tensor<T>, TorshError>

Permute dimensions according to the given order

Source

pub fn squeeze(&self, dim: i32) -> Result<Tensor<T>, TorshError>

Squeeze dimension with size 1

Source

pub fn squeeze_all(&self) -> Result<Tensor<T>, TorshError>

Squeeze all dimensions with size 1

Source

pub fn unsqueeze(&self, dim: i32) -> Result<Tensor<T>, TorshError>

Add a dimension of size 1 at the specified position

Source

pub fn reshape(&self, shape: &[i32]) -> Result<Tensor<T>, TorshError>

Reshape tensor to new shape

Source

pub fn is_contiguous(&self) -> bool

Check if tensor is contiguous in memory

Source

pub fn contiguous(&self) -> Result<Tensor<T>, TorshError>

Make tensor contiguous if it isn’t already

Source

pub fn expand(&self, shape: &[usize]) -> Result<Tensor<T>, TorshError>

Expand tensor to a larger size

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Source

pub fn to_device( &self, target_device: DeviceType, ) -> Result<Tensor<T>, TorshError>

Transfer tensor to another device with optimization

Source

pub fn synchronize_devices( &self, devices: &[DeviceType], ) -> Result<(), TorshError>

Synchronize operations across devices

Source

pub fn can_transfer_efficiently(&self, target_device: DeviceType) -> bool

Check if tensor can be efficiently transferred to target device

Source

pub fn get_transfer_strategy( &self, target_device: DeviceType, ) -> TransferStrategy

Get optimal transfer strategy for device pair

Source§

impl Tensor<bf16>

Specialized bf16 arithmetic operations with proper rounding

Source

pub fn add_with_rounding( &self, other: &Tensor<bf16>, mode: BF16RoundingMode, ) -> Result<Tensor<bf16>, TorshError>

Add two bf16 tensors with specified rounding mode

Source

pub fn mul_with_rounding( &self, other: &Tensor<bf16>, mode: BF16RoundingMode, ) -> Result<Tensor<bf16>, TorshError>

Multiply two bf16 tensors with specified rounding mode

Source

pub fn fma_with_rounding( &self, other: &Tensor<bf16>, addend: &Tensor<bf16>, mode: BF16RoundingMode, ) -> Result<Tensor<bf16>, TorshError>

Fused multiply-add with proper bf16 rounding

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Source

pub fn optimize_cache_layout(&mut self) -> Result<(), TorshError>

Memory layout optimization for cache efficiency Analyzes and optimizes the tensor’s memory layout to improve cache performance

Source

pub fn analyze_cache_performance(&self) -> CacheAnalysisReport

Analyze memory access patterns and provide optimization recommendations

Source

pub fn to_cache_optimized(&self) -> Result<Tensor<T>, TorshError>

Create a cache-optimized copy of the tensor

Source

pub fn memory_stats(&self) -> MemoryStats

Get memory usage statistics for the tensor

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy + Default,

Source

pub fn optimize_memory_layout( &mut self, numa_hint: Option<NumaAllocationHint>, ) -> Result<(), TorshError>

Advanced memory optimization with NUMA awareness

Source

pub fn create_memory_mapped_optimized( data: Vec<T>, shape: Vec<usize>, numa_hint: Option<NumaAllocationHint>, ) -> Result<Tensor<T>, TorshError>

Memory-mapped tensor creation with optimization hints

Source

pub fn prefetch_data(&self) -> Result<(), TorshError>

Prefetch memory pages for better performance

Source§

impl<T> Tensor<T>
where T: FloatElement,

Source

pub fn conv1d( &self, weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: usize, padding: usize, dilation: usize, groups: usize, ) -> Result<Tensor<T>, TorshError>

1D convolution operation

Source

pub fn conv2d( &self, weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: (usize, usize), padding: (usize, usize), dilation: (usize, usize), groups: usize, ) -> Result<Tensor<T>, TorshError>

2D convolution operation

Source

pub fn conv3d( &self, weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: (usize, usize, usize), padding: (usize, usize, usize), dilation: (usize, usize, usize), groups: usize, ) -> Result<Tensor<T>, TorshError>

3D convolution operation

Source

pub fn depthwise_conv2d( &self, weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: (usize, usize), padding: (usize, usize), dilation: (usize, usize), ) -> Result<Tensor<T>, TorshError>

Depthwise 2D convolution operation Each input channel is convolved with its own kernel independently

Source

pub fn separable_conv2d( &self, depthwise_weight: &Tensor<T>, pointwise_weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: (usize, usize), padding: (usize, usize), dilation: (usize, usize), ) -> Result<Tensor<T>, TorshError>

Separable 2D convolution operation Factorized into depthwise convolution followed by pointwise (1x1) convolution

Source

pub fn conv_transpose2d( &self, weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: (usize, usize), padding: (usize, usize), output_padding: (usize, usize), dilation: (usize, usize), groups: usize, ) -> Result<Tensor<T>, TorshError>

Transposed (deconvolution) 2D convolution operation

Source

pub fn xcorr1d( &self, other: &Tensor<T>, mode: CorrelationMode, ) -> Result<Tensor<T>, TorshError>

1D cross-correlation operation Computes the cross-correlation between two 1D signals

Source

pub fn autocorr1d( &self, max_lag: Option<usize>, ) -> Result<Tensor<T>, TorshError>

1D auto-correlation operation Computes the auto-correlation of a 1D signal

Source

pub fn xcorr2d( &self, other: &Tensor<T>, mode: CorrelationMode, ) -> Result<Tensor<T>, TorshError>

2D cross-correlation operation Computes the 2D cross-correlation between two signals

Source

pub fn median_filter1d( &self, window_size: usize, ) -> Result<Tensor<T>, TorshError>

1D median filter Applies a median filter with the specified window size

Source

pub fn median_filter2d( &self, window_size: (usize, usize), ) -> Result<Tensor<T>, TorshError>

2D median filter Applies a 2D median filter with the specified window size

Source

pub fn gaussian_filter1d( &self, sigma: f32, kernel_size: Option<usize>, ) -> Result<Tensor<T>, TorshError>

1D Gaussian filter Applies a Gaussian filter with specified sigma (standard deviation)

Source

pub fn gaussian_filter2d( &self, sigma: (f32, f32), kernel_size: Option<(usize, usize)>, ) -> Result<Tensor<T>, TorshError>

2D Gaussian filter Applies a 2D Gaussian filter with specified sigma values

Source§

impl<T> Tensor<T>
where T: TensorElement,

Indexing implementation

Source

pub fn index(&self, indices: &[TensorIndex]) -> Result<Tensor<T>, TorshError>

Index into the tensor

Source

pub fn get_1d(&self, index: usize) -> Result<T, TorshError>

Get a single element (1D indexing)

Source

pub fn get_2d(&self, row: usize, col: usize) -> Result<T, TorshError>

Get a single element (2D indexing)

Source

pub fn get_3d(&self, x: usize, y: usize, z: usize) -> Result<T, TorshError>

Get a single element (3D indexing)

Source

pub fn set_1d(&mut self, index: usize, value: T) -> Result<(), TorshError>

Set a single element (1D indexing)

Source

pub fn set_2d( &mut self, row: usize, col: usize, value: T, ) -> Result<(), TorshError>

Set a single element (2D indexing)

Source

pub fn set_3d( &mut self, x: usize, y: usize, z: usize, value: T, ) -> Result<(), TorshError>

Set a single element (3D indexing)

Source

pub fn select(&self, dim: i32, index: i64) -> Result<Tensor<T>, TorshError>

Select along a dimension

Source

pub fn slice_with_step( &self, dim: i32, start: Option<i64>, end: Option<i64>, step: Option<i64>, ) -> Result<Tensor<T>, TorshError>

Slice along a dimension with PyTorch-style parameters

Source

pub fn narrow( &self, dim: i32, start: i64, length: usize, ) -> Result<Tensor<T>, TorshError>

Narrow along a dimension

Source

pub fn masked_select( &self, mask: &Tensor<bool>, ) -> Result<Tensor<T>, TorshError>

Boolean indexing (masking)

Source

pub fn take(&self, indices: &Tensor<i64>) -> Result<Tensor<T>, TorshError>

Source

pub fn put( &self, indices: &Tensor<i64>, values: &Tensor<T>, ) -> Result<Tensor<T>, TorshError>

Put values at indices

Source

pub fn index_select( &self, dim: i32, index: &Tensor<i64>, ) -> Result<Tensor<T>, TorshError>

Select indices along a dimension

Source§

impl<T> Tensor<T>
where T: TensorElement,

Convenient indexing syntax

Source

pub fn index_with_list( &self, dim: i32, indices: &[i64], ) -> Result<Tensor<T>, TorshError>

Advanced indexing with list of indices (fancy indexing)

Source

pub fn index_with_mask( &self, dim: i32, mask: &Tensor<bool>, ) -> Result<Tensor<T>, TorshError>

Boolean mask indexing for a specific dimension

Source

pub fn mask_select(&self, mask: &Tensor<bool>) -> Result<Tensor<T>, TorshError>

Global boolean mask indexing (flattens to 1D result)

Source

pub fn where_condition<F>( &self, condition: F, ) -> Result<Tensor<bool>, TorshError>
where F: Fn(&T) -> bool, T: Clone,

Create boolean mask from condition

Source

pub fn scatter_indexed( &self, dim: i32, index: &Tensor<i64>, src: &Tensor<T>, ) -> Result<Tensor<T>, TorshError>

Scatter values along an axis using indices (indexing version)

Source§

impl<T> Tensor<T>
where T: TensorElement,

✅ Enhanced Tensor creation interface with SciRS2 memory optimization

Source

pub fn create_efficient( shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>
where T: Clone + Default,

Create memory-efficient tensor with automatic strategy selection

Source

pub fn lazy( shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>
where T: Clone + Default,

Create lazy tensor that defers allocation until first access

Source

pub fn memory_mapped( shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>
where T: Clone + Default,

Create zero-copy view of existing tensor (disabled due to conflict with shape_ops) ✅ SciRS2 Memory-Mapped Tensor for very large datasets

Source

pub fn chunked( shape: &[usize], chunk_size: usize, device: DeviceType, ) -> Result<Tensor<T>, TorshError>
where T: Clone + Default,

✅ SciRS2 Chunked Tensor for cache-efficient large data processing

Source

pub fn disk_backed( shape: &[usize], device: DeviceType, file_path: Option<&str>, ) -> Result<Tensor<T>, TorshError>
where T: Clone + Default,

✅ SciRS2 Disk-Backed Tensor for datasets larger than RAM

Source

pub fn process_chunked<F, R>( &self, chunk_size: usize, processor: F, ) -> Result<Vec<R>, TorshError>
where F: FnMut(&[T]) -> Result<R, TorshError>, T: Clone,

Process tensor in memory-efficient chunks

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy + Default,

Convenient functions for creating pooled tensors

Source

pub fn pooled( shape: &[usize], device: DeviceType, ) -> Result<PooledTensor<T>, TorshError>

Create a tensor using the memory pool

Source

pub fn temporary( shape: &[usize], device: DeviceType, ) -> Result<PooledTensor<T>, TorshError>

Create temporary tensor for intermediate calculations

Source§

impl<T> Tensor<T>

NaN/Inf detection utilities for tensors

Source

pub fn has_nan_inf(&self) -> bool

Quick check if tensor contains any NaN or infinite values (optimized fast path)

This is the fastest check - it returns true if any issues are found, false if the tensor is clean. No detailed information is provided.

§Examples
let clean = Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3], DeviceType::Cpu).unwrap();
assert!(!clean.has_nan_inf());

let dirty = Tensor::from_data(vec![1.0, f32::NAN, 3.0], vec![3], DeviceType::Cpu).unwrap();
assert!(dirty.has_nan_inf());
Source

pub fn has_nan(&self) -> bool

Check for NaN values only

Source

pub fn has_inf(&self) -> bool

Check for infinite values only

Source

pub fn count_nan_inf(&self) -> NanInfStats

Count NaN and infinite values

Source

pub fn check_nan_inf_with_config(&self, config: &NanInfConfig) -> NanInfReport

Comprehensive NaN/Inf detection with detailed reporting

§Examples
let tensor = Tensor::from_data(
    vec![1.0, f32::NAN, f32::INFINITY, -f32::INFINITY],
    vec![4],
    DeviceType::Cpu
).unwrap();

let config = NanInfConfig::detailed();
let report = tensor.check_nan_inf_with_config(&config);

assert_eq!(report.stats.nan_count, 1);
assert_eq!(report.stats.pos_inf_count, 1);
assert_eq!(report.stats.neg_inf_count, 1);
assert_eq!(report.locations.len(), 3);
Source

pub fn assert_finite(&self)

Assert that tensor contains no NaN or infinite values

§Panics

Panics if any NaN or infinite values are found

§Examples
let tensor = Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3], DeviceType::Cpu).unwrap();
tensor.assert_finite(); // OK

// This would panic:
// let bad = Tensor::from_data(vec![1.0, f32::NAN], vec![2], DeviceType::Cpu).unwrap();
// bad.assert_finite(); // Panics!
Source

pub fn replace_nan_inf( &self, nan_replacement: T, pos_inf_replacement: T, neg_inf_replacement: T, ) -> Result<Tensor<T>, TorshError>

Replace NaN and infinite values with specified replacements

§Examples
let mut tensor = Tensor::from_data(
    vec![1.0, f32::NAN, f32::INFINITY, -f32::INFINITY],
    vec![4],
    DeviceType::Cpu
).unwrap();

let cleaned = tensor.replace_nan_inf(0.0, 1e6, -1e6).unwrap();
assert!(!cleaned.has_nan_inf());
Source

pub fn nan_inf_mask(&self) -> Result<Tensor<bool>, TorshError>

Create a boolean mask indicating locations of NaN/Inf values

§Examples
let tensor = Tensor::from_data(
    vec![1.0, f32::NAN, 3.0, f32::INFINITY],
    vec![4],
    DeviceType::Cpu
).unwrap();

let mask = tensor.nan_inf_mask().unwrap();
let mask_data = mask.to_vec().unwrap();
assert_eq!(mask_data, vec![false, true, false, true]);
Source§

impl<T> Tensor<T>
where T: TensorElement + Into<f64> + From<f64>,

FFT operations for tensors

Source

pub fn fft(&self) -> Result<Tensor<Complex<f64>>, TorshError>

Compute 1D FFT along the last dimension

Source

pub fn fft_with_plan( &self, plan: Option<&FFTPlan>, ) -> Result<Tensor<Complex<f64>>, TorshError>

Compute 1D FFT with a precomputed plan

Source

pub fn ifft(&self) -> Result<Tensor<T>, TorshError>
where T: TensorElement + From<f64>,

Compute 1D inverse FFT along the last dimension

Source

pub fn fft2(&self) -> Result<Tensor<Complex<f64>>, TorshError>

Compute 2D FFT on the last two dimensions

Source

pub fn ifft2(&self) -> Result<Tensor<T>, TorshError>
where T: TensorElement + From<f64>,

Compute 2D inverse FFT on the last two dimensions

Source

pub fn fft_along_dim_real( &self, dim: usize, ) -> Result<Tensor<Complex<f64>>, TorshError>

Compute FFT along a specific dimension for real tensors

Source

pub fn rfft(&self) -> Result<Tensor<Complex<f64>>, TorshError>

Real-to-complex FFT (more efficient for real inputs)

Source

pub fn irfft(&self, output_size: Option<usize>) -> Result<Tensor<T>, TorshError>
where T: TensorElement + From<f64>,

Complex-to-real inverse FFT

Source

pub fn power_spectrum(&self) -> Result<Tensor<T>, TorshError>
where T: TensorElement + From<f64>,

Compute power spectral density

Source

pub fn magnitude_spectrum(&self) -> Result<Tensor<T>, TorshError>
where T: TensorElement + From<f64>,

Compute magnitude spectrum

Source

pub fn phase_spectrum(&self) -> Result<Tensor<T>, TorshError>
where T: TensorElement + From<f64>,

Compute phase spectrum

Source§

impl<T> Tensor<T>
where T: TensorElement,

This impl block contains no items.

General tensor operations that don’t require Into

Source§

impl Tensor<Complex<f64>>

Operations specific to complex tensors

Source

pub fn from_complex_data( data: Vec<Complex<f64>>, shape: Vec<usize>, device: DeviceType, ) -> Result<Tensor<Complex<f64>>, TorshError>

Create tensor from complex data

Source

pub fn to_real<T>(&self) -> Result<Tensor<T>, TorshError>
where T: TensorElement + From<f64>,

Convert complex tensor to real by taking the real part

Source

pub fn power_spectrum_from_fft<T>(&self) -> Result<Tensor<T>, TorshError>
where T: TensorElement + From<f64>,

Compute power spectrum from FFT result

Source

pub fn magnitude_spectrum_from_fft<T>(&self) -> Result<Tensor<T>, TorshError>
where T: TensorElement + From<f64>,

Compute magnitude spectrum from FFT result

Source

pub fn phase_spectrum_from_fft<T>(&self) -> Result<Tensor<T>, TorshError>
where T: TensorElement + From<f64>,

Compute phase spectrum from FFT result

Source

pub fn fft_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>

Compute FFT for complex data

Source

pub fn ifft_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>

Compute inverse FFT for complex data

Source

pub fn ifft2_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>

Compute 2D inverse FFT for complex data

Source

pub fn ifft_along_dim( &self, dim: usize, ) -> Result<Tensor<Complex<f64>>, TorshError>

Compute inverse FFT along a specific dimension

Source

pub fn fft2_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>

2D FFT for complex tensors

Source

pub fn fft_along_dim( &self, dim: usize, ) -> Result<Tensor<Complex<f64>>, TorshError>

Compute FFT along a specific dimension for complex tensors

Source

pub fn fft_along_dim_complex( &self, dim: usize, ) -> Result<Tensor<Complex<f64>>, TorshError>

Internal implementation of FFT along dimension for complex tensors

Source

pub fn slice_last_dim_complex( &self, start: usize, size: usize, ) -> Result<Tensor<Complex<f64>>, TorshError>

Slice along the last dimension for complex tensors

Source§

impl<T> Tensor<T>
where T: Default + Add<Output = T> + AddAssign + Sub<Output = T> + TensorElement + Mul<Output = T> + FloatElement<Output = T> + MulAssign + Div + Copy + PartialOrd + FromPrimitive + Sum,

Statistical operations for tensors

Source

pub fn mean_stats( &self, dims: Option<&[usize]>, keepdim: bool, ) -> Result<Tensor<T>, TorshError>

Compute mean along specified dimensions (legacy stats implementation)

Source

pub fn var( &self, dims: Option<&[usize]>, keepdim: bool, mode: StatMode, ) -> Result<Tensor<T>, TorshError>

Compute variance along specified dimensions

Source

pub fn std( &self, dims: Option<&[usize]>, keepdim: bool, mode: StatMode, ) -> Result<Tensor<T>, TorshError>

Compute standard deviation along specified dimensions

Source

pub fn percentile( &self, q: f64, dim: Option<usize>, _keepdim: bool, ) -> Result<Tensor<T>, TorshError>

Compute percentile along the last dimension

Source

pub fn median( &self, dim: Option<usize>, keepdim: bool, ) -> Result<Tensor<T>, TorshError>

Compute median (50th percentile)

Source

pub fn quantile( &self, q: &[f64], dim: Option<usize>, keepdim: bool, ) -> Result<Vec<Tensor<T>>, TorshError>

Compute quantiles at specified levels

Source

pub fn histogram( &self, config: &HistogramConfig, ) -> Result<Histogram, TorshError>

Create histogram of tensor values

Source

pub fn correlation( &self, other: &Tensor<T>, method: CorrelationMethod, ) -> Result<T, TorshError>

Compute correlation coefficient with another tensor

Source

pub fn describe(&self) -> Result<StatSummary, TorshError>

Generate comprehensive statistical summary

Source

pub fn cov(&self, mode: StatMode) -> Result<Tensor<T>, TorshError>

Compute covariance matrix for 2D tensor (each column is a variable)

Source

pub fn corrcoef(&self) -> Result<Tensor<T>, TorshError>

Compute correlation matrix for 2D tensor

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Source

pub fn calculate_strides(&self) -> Vec<usize>

Calculate strides for current tensor shape

Source

pub fn create_view( &self, new_shape: &[usize], ) -> Result<TensorView<T>, TorshError>

Create a view of this tensor with a new shape (must have same number of elements)

Source

pub fn view_with_strides( &self, new_shape: &[usize], strides: &[usize], ) -> Result<TensorView<T>, TorshError>

Create a view with custom strides (advanced usage)

Source

pub fn slice( &self, dim: usize, start: usize, end: usize, ) -> Result<TensorView<T>, TorshError>

Create a slice view of the tensor along a specific dimension

Source

pub fn alias(&self) -> TensorAlias<T>

Create an alias (shared reference) to this tensor

Source

pub fn alias_mut(&mut self) -> TensorAlias<T>

Create a mutable alias to this tensor

Source§

impl Tensor

f32 tensor conversions with SIMD optimization

Source

pub fn to_f64_simd(&self) -> Result<Tensor<f64>, TorshError>

Convert to f64 tensor with SIMD optimization

Source

pub fn to_i32_simd(&self) -> Result<Tensor<i32>, TorshError>

Convert to i32 tensor with SIMD optimization (with bounds checking)

Source§

impl Tensor<i32>

i32 tensor conversions with SIMD optimization

Source

pub fn to_f32_simd(&self) -> Result<Tensor, TorshError>

Convert to f32 tensor with SIMD optimization

Source

pub fn to_f64_simd(&self) -> Result<Tensor<f64>, TorshError>

Convert to f64 tensor with SIMD optimization

Source

pub fn to_i64_simd(&self) -> Result<Tensor<i64>, TorshError>

Convert to i64 tensor with SIMD optimization

Source§

impl Tensor<i64>

i64 tensor conversions with SIMD optimization

Source

pub fn to_f32_simd(&self) -> Result<Tensor, TorshError>

Convert to f32 tensor with SIMD optimization

Source

pub fn to_f64_simd(&self) -> Result<Tensor<f64>, TorshError>

Convert to f64 tensor with SIMD optimization

Source

pub fn to_i32_simd(&self) -> Result<Tensor<i32>, TorshError>

Convert to i32 tensor with SIMD optimization (with bounds checking)

Source§

impl Tensor<f64>

f64 tensor conversions with SIMD optimization

Source

pub fn to_f32_simd(&self) -> Result<Tensor, TorshError>

Convert to f32 tensor with SIMD optimization

Source§

impl<T> Tensor<T>
where T: TensorElement + Copy,

Generic SIMD conversion implementations

Source

pub fn convert_with_optimal_simd<U>(&self) -> Result<Tensor<U>, TorshError>
where U: TensorElement + Copy + From<T>,

Convert tensor using optimal SIMD strategy for this system

Source

pub fn convert_with_strategy<U>( &self, strategy: SIMDStrategy, ) -> Result<Tensor<U>, TorshError>
where U: TensorElement + Copy + From<T>,

Convert tensor using specific SIMD strategy

Source§

impl<T> Tensor<T>
where T: TensorElement,

Source

pub fn from_vec(data: Vec<T>, shape: &[usize]) -> Result<Tensor<T>, TorshError>
where T: Copy,

Create from vec with shape (convenience method)

Trait Implementations§

Source§

impl<T> Add for &Tensor<T>
where T: TensorElement + Copy + Add<Output = T>,

Source§

type Output = Tensor<T>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<T>) -> <&Tensor<T> as Add>::Output

Performs the + operation. Read more
Source§

impl BFloat16TensorOps<bf16> for Tensor<bf16>

Source§

fn to_bf16_with_rounding( &self, _mode: BF16RoundingMode, ) -> Result<Tensor<bf16>, TorshError>

Convert tensor to bf16 with specified rounding mode
Source§

fn to_f32(&self) -> Result<Tensor, TorshError>

Convert from bf16 tensor to higher precision
Source§

fn bf16_high_precision_op<F>(&self, op: F) -> Result<Tensor<bf16>, TorshError>
where F: Fn(&Tensor) -> Result<Tensor, TorshError>,

Perform operation in higher precision then round back to bf16
Source§

impl BFloat16TensorOps<f32> for Tensor

Source§

fn to_bf16_with_rounding( &self, mode: BF16RoundingMode, ) -> Result<Tensor<bf16>, TorshError>

Convert tensor to bf16 with specified rounding mode
Source§

fn to_f32(&self) -> Result<Tensor, TorshError>

Convert from bf16 tensor to higher precision
Source§

fn bf16_high_precision_op<F>(&self, op: F) -> Result<Tensor<bf16>, TorshError>
where F: Fn(&Tensor) -> Result<Tensor, TorshError>,

Perform operation in higher precision then round back to bf16
Source§

impl<T> Clone for Tensor<T>
where T: Clone + TensorElement,

Source§

fn clone(&self) -> Tensor<T>

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl<T> Debug for Tensor<T>
where T: TensorElement,

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>

Formats the value using the given formatter. Read more
Source§

impl<T> Div for &Tensor<T>
where T: TensorElement + Copy + Div<Output = T>,

Source§

type Output = Tensor<T>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<T>) -> <&Tensor<T> as Div>::Output

Performs the / operation. Read more
Source§

impl<T> Mul for &Tensor<T>
where T: TensorElement + Copy + Mul<Output = T>,

Source§

type Output = Tensor<T>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<T>) -> <&Tensor<T> as Mul>::Output

Performs the * operation. Read more
Source§

impl<T> Neg for &Tensor<T>
where T: TensorElement + Copy + Neg<Output = T>,

Source§

type Output = Tensor<T>

The resulting type after applying the - operator.
Source§

fn neg(self) -> <&Tensor<T> as Neg>::Output

Performs the unary - operation. Read more
Source§

impl<T> Sub for &Tensor<T>
where T: TensorElement + Copy + Sub<Output = T>,

Source§

type Output = Tensor<T>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<T>) -> <&Tensor<T> as Sub>::Output

Performs the - operation. Read more
Source§

impl<T> TensorConvenience<T> for Tensor<T>

Source§

fn T(&self) -> Result<Tensor<T>, TorshError>

Transpose shortcut (equivalent to .transpose()) Read more
Source§

fn mT(&self) -> Result<Tensor<T>, TorshError>

Matrix transpose (alias for .T())
Source§

fn H(&self) -> Result<Tensor<T>, TorshError>

Hermitian transpose (conjugate transpose for complex numbers)
Source§

fn t(&self) -> Result<Tensor<T>, TorshError>

Transpose shortcut (snake_case version)
Source§

fn m_t(&self) -> Result<Tensor<T>, TorshError>

Matrix transpose (snake_case version)
Source§

fn h(&self) -> Result<Tensor<T>, TorshError>

Hermitian transpose (snake_case version)
Source§

fn detach(&self) -> Tensor<T>

Detach tensor from computational graph (creates a new tensor without gradients)
Source§

fn clone_tensor(&self) -> Result<Tensor<T>, TorshError>

Clone tensor data (creates a deep copy)
Source§

fn is_contiguous(&self) -> bool

Check if tensor is contiguous in memory
Source§

fn contiguous(&self) -> Result<Tensor<T>, TorshError>

Make tensor contiguous (reorganize memory layout)
Source§

fn numel(&self) -> usize

Get number of elements in tensor
Source§

fn size(&self) -> Vec<usize>

Get tensor size (alias for shape().dims())
Source§

fn is_empty(&self) -> bool

Check if tensor is empty (has zero elements)
Source§

fn is_scalar(&self) -> bool

Check if tensor is scalar (zero dimensions)
Source§

fn item(&self) -> T

Get tensor item as scalar (only works for scalar tensors)
Source§

fn to_scalar(&self) -> Result<T, TorshError>

Convert tensor to scalar (squeezes all dimensions of size 1 first)
Source§

impl<T> TensorCustomOps<T> for Tensor<T>
where T: TensorElement + 'static,

Source§

fn apply_custom_op( &self, op_name: &str, other_inputs: &[&Tensor<T>], params: &OperationParams, ) -> Result<Vec<Tensor<T>>, TorshError>

Apply a custom operation to this tensor Read more
Source§

fn apply_custom_op_with_registry( &self, registry: &CustomOperationRegistry, op_name: &str, other_inputs: &[&Tensor<T>], params: &OperationParams, ) -> Result<Vec<Tensor<T>>, TorshError>

Apply a custom operation using a specific registry
Source§

impl<T> TensorExpressionOps<T> for Tensor<T>
where T: TensorElement,

Source§

fn build_expression_graph(&self) -> ExpressionGraph

Build an expression graph from tensor operations
Source§

fn optimize_expressions( &self, config: OptimizerConfig, ) -> Result<OptimizationStats, TorshError>

Optimize tensor expressions using the expression optimizer
Source§

impl<T> TensorFluentExt<T> for Tensor<T>
where T: TensorElement,

Source§

fn fluent(self) -> FluentTensor<T>

Start fluent chaining
Source§

impl<T> TensorShapeConvenience<T> for Tensor<T>
where T: TensorElement + Copy,

Source§

fn unsqueeze_at(&self, dim: i32) -> Result<Tensor<T>, TorshError>

Add singleton dimension at specified position
Source§

fn squeeze_all(&self) -> Result<Tensor<T>, TorshError>

Remove all singleton dimensions
Source§

fn flatten(&self) -> Result<Tensor<T>, TorshError>

Flatten tensor to 1D (preserving total number of elements)
Source§

fn flatten_from(&self, start_dim: i32) -> Result<Tensor<T>, TorshError>

Flatten tensor starting from specified dimension
Source§

fn unflatten(&self, dim: i32, sizes: &[usize]) -> Result<Tensor<T>, TorshError>

Unflatten tensor back to specified shape

Auto Trait Implementations§

§

impl<T> Freeze for Tensor<T>

§

impl<T> RefUnwindSafe for Tensor<T>

§

impl<T> Send for Tensor<T>

§

impl<T> Sync for Tensor<T>

§

impl<T> Unpin for Tensor<T>

§

impl<T> UnwindSafe for Tensor<T>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V