pub struct Tensor<T = f32>where
T: TensorElement,{ /* private fields */ }Expand description
The main Tensor type for ToRSh
A tensor implementation with automatic memory mapping for large tensors and efficient views with reference counting
Implementations§
Source§impl<T> Tensor<T>where
T: FloatElement + Copy,
impl<T> Tensor<T>where
T: FloatElement + Copy,
Sourcepub fn scalar(value: T) -> Result<Tensor<T>, TorshError>
pub fn scalar(value: T) -> Result<Tensor<T>, TorshError>
Create a 0-dimensional tensor (scalar) from a single value
Sourcepub fn max(
&self,
dim: Option<usize>,
keepdim: bool,
) -> Result<Tensor<T>, TorshError>
pub fn max( &self, dim: Option<usize>, keepdim: bool, ) -> Result<Tensor<T>, TorshError>
Maximum element in tensor
Source§impl<T> Tensor<T>
Boolean reduction operations for tensors
impl<T> Tensor<T>
Boolean reduction operations for tensors
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
impl<T> Tensor<T>where
T: TensorElement + Copy,
Sourcepub fn sum(&self) -> Result<Tensor<T>, TorshError>
pub fn sum(&self) -> Result<Tensor<T>, TorshError>
Compute sum of all elements
Sourcepub fn sum_dim(
&self,
dims: &[i32],
keepdim: bool,
) -> Result<Tensor<T>, TorshError>
pub fn sum_dim( &self, dims: &[i32], keepdim: bool, ) -> Result<Tensor<T>, TorshError>
Compute sum along specified dimensions
Sourcepub fn mean(
&self,
dims: Option<&[usize]>,
keepdim: bool,
) -> Result<Tensor<T>, TorshError>
pub fn mean( &self, dims: Option<&[usize]>, keepdim: bool, ) -> Result<Tensor<T>, TorshError>
Compute mean along specified dimensions
Sourcepub fn cumprod(&self, dim: i32) -> Result<Tensor<T>, TorshError>
pub fn cumprod(&self, dim: i32) -> Result<Tensor<T>, TorshError>
Compute cumulative product along specified dimension
Sourcepub fn sort(
&self,
_dim: Option<i32>,
_descending: bool,
) -> Result<(Tensor<T>, Tensor<T>), TorshError>
pub fn sort( &self, _dim: Option<i32>, _descending: bool, ) -> Result<(Tensor<T>, Tensor<T>), TorshError>
Sort tensor along specified dimension
Sourcepub fn min(&self) -> Result<Tensor<T>, TorshError>where
T: PartialOrd + Copy,
pub fn min(&self) -> Result<Tensor<T>, TorshError>where
T: PartialOrd + Copy,
Min reduction method without trait bounds (for Iterator compatibility)
Sourcepub fn t(&self) -> Result<Tensor<T>, TorshError>
pub fn t(&self) -> Result<Tensor<T>, TorshError>
Transpose operation (2D tensor)
Check if two tensors share the same underlying storage
Sourcepub fn data(&self) -> Result<Vec<T>, TorshError>where
T: Copy,
pub fn data(&self) -> Result<Vec<T>, TorshError>where
T: Copy,
Get data as a vector (backward compatibility method)
Sourcepub fn data_mut_apply<F>(&mut self, func: F) -> Result<(), TorshError>
pub fn data_mut_apply<F>(&mut self, func: F) -> Result<(), TorshError>
Apply a function to all elements in-place using direct storage access
Sourcepub fn clone_data(&self) -> Tensor<T>where
T: Copy,
pub fn clone_data(&self) -> Tensor<T>where
T: Copy,
Clone the tensor with independent data (deep copy)
Sourcepub fn make_unique(&mut self) -> Result<(), TorshError>
pub fn make_unique(&mut self) -> Result<(), TorshError>
Ensure tensor has unique data (copy-on-write semantics)
Sourcepub fn apply_<F>(&mut self, func: F) -> Result<(), TorshError>
pub fn apply_<F>(&mut self, func: F) -> Result<(), TorshError>
Apply function in-place
Sourcepub fn map<F>(&self, func: F) -> Result<Tensor<T>, TorshError>
pub fn map<F>(&self, func: F) -> Result<Tensor<T>, TorshError>
Apply function element-wise to create new tensor
Sourcepub fn item(&self) -> Result<T, TorshError>where
T: Copy,
pub fn item(&self) -> Result<T, TorshError>where
T: Copy,
Extract a scalar value from a single-element tensor
Source§impl<T> Tensor<T>
impl<T> Tensor<T>
Sourcepub fn norm(&self) -> Result<Tensor<T>, TorshError>
pub fn norm(&self) -> Result<Tensor<T>, TorshError>
Compute the L2 norm of the tensor
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
impl<T> Tensor<T>where
T: TensorElement + Copy,
Sourcepub fn matmul_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
pub fn matmul_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
Use SciRS2 backend for optimized matrix multiplication
Sourcepub fn sum_scirs2(&self) -> Result<Tensor<T>, TorshError>
pub fn sum_scirs2(&self) -> Result<Tensor<T>, TorshError>
Use SciRS2 backend for optimized sum reduction
Sourcepub fn mean_scirs2(&self) -> Result<Tensor<T>, TorshError>
pub fn mean_scirs2(&self) -> Result<Tensor<T>, TorshError>
Use SciRS2 backend for optimized mean reduction
Sourcepub fn relu_scirs2(&self) -> Result<Tensor<T>, TorshError>where
T: PartialOrd + Zero,
pub fn relu_scirs2(&self) -> Result<Tensor<T>, TorshError>where
T: PartialOrd + Zero,
Use SciRS2 backend for optimized ReLU activation
Sourcepub fn sigmoid_scirs2(&self) -> Result<Tensor<T>, TorshError>where
T: Float,
pub fn sigmoid_scirs2(&self) -> Result<Tensor<T>, TorshError>where
T: Float,
Use SciRS2 backend for optimized sigmoid activation
Sourcepub fn tanh_scirs2(&self) -> Result<Tensor<T>, TorshError>where
T: Float,
pub fn tanh_scirs2(&self) -> Result<Tensor<T>, TorshError>where
T: Float,
Use SciRS2 backend for optimized tanh activation
Sourcepub fn softmax(&self, dim: i32) -> Result<Tensor<T>, TorshError>
pub fn softmax(&self, dim: i32) -> Result<Tensor<T>, TorshError>
Softmax activation along specified dimension Computes softmax(x_i) = exp(x_i) / sum(exp(x_j)) for all j
Sourcepub fn log_softmax(&self, dim: i32) -> Result<Tensor<T>, TorshError>
pub fn log_softmax(&self, dim: i32) -> Result<Tensor<T>, TorshError>
Log softmax activation along specified dimension Computes log_softmax(x_i) = log(softmax(x_i))
Sourcepub fn cumsum(&self, dim: i32) -> Result<Tensor<T>, TorshError>
pub fn cumsum(&self, dim: i32) -> Result<Tensor<T>, TorshError>
Computes cumulative sum along a dimension
Sourcepub fn argmin(&self, dim: Option<i32>) -> Result<Tensor<i64>, TorshError>where
T: PartialOrd + Copy,
pub fn argmin(&self, dim: Option<i32>) -> Result<Tensor<i64>, TorshError>where
T: PartialOrd + Copy,
Find the indices of minimum values along a dimension
Sourcepub fn argmax(&self, dim: Option<i32>) -> Result<Tensor<i64>, TorshError>where
T: PartialOrd + Copy,
pub fn argmax(&self, dim: Option<i32>) -> Result<Tensor<i64>, TorshError>where
T: PartialOrd + Copy,
Find the indices of maximum values along a dimension
Source§impl<T> Tensor<T>where
T: ComplexElement + Copy,
impl<T> Tensor<T>where
T: ComplexElement + Copy,
Sourcepub fn complex_conj(&self) -> Result<Tensor<T>, TorshError>where
T: Copy,
pub fn complex_conj(&self) -> Result<Tensor<T>, TorshError>where
T: Copy,
Complex conjugate for complex tensors
Sourcepub fn real(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>
pub fn real(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>
Get real part of complex tensor
Sourcepub fn imag(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>
pub fn imag(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>
Get imaginary part of complex tensor
Sourcepub fn abs(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>
pub fn abs(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>
Get magnitude (absolute value) of complex tensor
Sourcepub fn angle(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>
pub fn angle(&self) -> Result<Tensor<<T as ComplexElement>::Real>, TorshError>
Get phase (argument) of complex tensor
Sourcepub fn complex(
real: &Tensor<<T as ComplexElement>::Real>,
imag: &Tensor<<T as ComplexElement>::Real>,
) -> Result<Tensor<T>, TorshError>
pub fn complex( real: &Tensor<<T as ComplexElement>::Real>, imag: &Tensor<<T as ComplexElement>::Real>, ) -> Result<Tensor<T>, TorshError>
Create complex tensor from real and imaginary parts
Sourcepub fn polar(
magnitude: &Tensor<<T as ComplexElement>::Real>,
phase: &Tensor<<T as ComplexElement>::Real>,
) -> Result<Tensor<T>, TorshError>
pub fn polar( magnitude: &Tensor<<T as ComplexElement>::Real>, phase: &Tensor<<T as ComplexElement>::Real>, ) -> Result<Tensor<T>, TorshError>
Create complex tensor from polar representation (magnitude and phase)
Sourcepub fn backward_complex(&self) -> Result<(), TorshError>
pub fn backward_complex(&self) -> Result<(), TorshError>
Backward pass for complex tensors (compute gradients)
Complex autograd follows PyTorch’s approach where gradients are computed treating complex numbers as 2D vectors of real numbers.
Sourcepub fn complex_mul(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
pub fn complex_mul(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
Element-wise complex multiplication with proper gradient tracking
Sourcepub fn complex_add(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: Add<Output = T>,
pub fn complex_add(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: Add<Output = T>,
Element-wise complex addition with proper gradient tracking
Sourcepub fn is_real(&self) -> Result<bool, TorshError>
pub fn is_real(&self) -> Result<bool, TorshError>
Check if all elements in the tensor are real (imaginary part is zero)
Sourcepub fn is_complex(&self) -> Result<bool, TorshError>
pub fn is_complex(&self) -> Result<bool, TorshError>
Check if any elements in the tensor are complex (imaginary part is non-zero)
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
impl<T> Tensor<T>where
T: TensorElement + Copy,
Sourcepub fn cleanup_operation_refs(&mut self)
pub fn cleanup_operation_refs(&mut self)
Clean up dead weak references in custom operations to improve memory efficiency
Sourcepub fn from_data(
data: Vec<T>,
shape: Vec<usize>,
device: DeviceType,
) -> Result<Tensor<T>, TorshError>
pub fn from_data( data: Vec<T>, shape: Vec<usize>, device: DeviceType, ) -> Result<Tensor<T>, TorshError>
Create from raw data
Sourcepub fn from_data_with_storage(
data: Vec<T>,
shape: Vec<usize>,
device: DeviceType,
use_memory_mapping: bool,
) -> Result<Tensor<T>, TorshError>
pub fn from_data_with_storage( data: Vec<T>, shape: Vec<usize>, device: DeviceType, use_memory_mapping: bool, ) -> Result<Tensor<T>, TorshError>
Create from raw data with explicit storage type
Sourcepub fn from_data_memory_mapped(
data: Vec<T>,
shape: Vec<usize>,
device: DeviceType,
file_path: PathBuf,
) -> Result<Tensor<T>, TorshError>
pub fn from_data_memory_mapped( data: Vec<T>, shape: Vec<usize>, device: DeviceType, file_path: PathBuf, ) -> Result<Tensor<T>, TorshError>
Create from raw data with specified memory-mapped file path
Sourcepub fn zeros(
shape: &[usize],
device: DeviceType,
) -> Result<Tensor<T>, TorshError>
pub fn zeros( shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>
Create a tensor filled with zeros
Sourcepub fn ones(
shape: &[usize],
device: DeviceType,
) -> Result<Tensor<T>, TorshError>
pub fn ones( shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>
Create a tensor filled with ones
Sourcepub fn to_dtype(&self, dtype: DType) -> Result<Tensor<T>, TorshError>
pub fn to_dtype(&self, dtype: DType) -> Result<Tensor<T>, TorshError>
Convert tensor to a different data type
Sourcepub fn device(&self) -> DeviceType
pub fn device(&self) -> DeviceType
Get the device
Sourcepub fn get(&self, indices: &[usize]) -> Result<T, TorshError>where
T: Copy,
pub fn get(&self, indices: &[usize]) -> Result<T, TorshError>where
T: Copy,
Get element at multi-dimensional index
Sourcepub fn get_flat(&self, index: usize) -> Result<T, TorshError>where
T: Copy,
pub fn get_flat(&self, index: usize) -> Result<T, TorshError>where
T: Copy,
Get element at single flat index
Sourcepub fn set(&self, indices: &[usize], value: T) -> Result<(), TorshError>where
T: Copy,
pub fn set(&self, indices: &[usize], value: T) -> Result<(), TorshError>where
T: Copy,
Set element at index (requires multi-dimensional indices for views)
Sourcepub fn get_slice(&self, start: usize, len: usize) -> Result<Vec<T>, TorshError>where
T: Copy,
pub fn get_slice(&self, start: usize, len: usize) -> Result<Vec<T>, TorshError>where
T: Copy,
Get slice of elements
Sourcepub fn set_slice(&self, start: usize, values: &[T]) -> Result<(), TorshError>where
T: Copy,
pub fn set_slice(&self, start: usize, values: &[T]) -> Result<(), TorshError>where
T: Copy,
Set slice of elements
Sourcepub fn to_vec(&self) -> Result<Vec<T>, TorshError>where
T: Copy,
pub fn to_vec(&self) -> Result<Vec<T>, TorshError>where
T: Copy,
Get all data as a vector (may be expensive for large memory-mapped tensors) For views, extracts only the data visible by this view
Sourcepub fn storage_type(&self) -> &'static str
pub fn storage_type(&self) -> &'static str
Get storage type information
Sourcepub fn memory_usage(&self) -> usize
pub fn memory_usage(&self) -> usize
Get estimated memory usage in bytes
Sourcepub fn is_memory_mapped(&self) -> bool
pub fn is_memory_mapped(&self) -> bool
Check if tensor uses memory mapping
Sourcepub fn strides(&self) -> Vec<usize>
pub fn strides(&self) -> Vec<usize>
Get the strides for this tensor (either custom strides for views or default contiguous strides)
Sourcepub fn ones_like(&self) -> Result<Tensor<T>, TorshError>
pub fn ones_like(&self) -> Result<Tensor<T>, TorshError>
Create a tensor of ones with the same shape as this tensor
Sourcepub fn zeros_like(&self) -> Result<Tensor<T>, TorshError>
pub fn zeros_like(&self) -> Result<Tensor<T>, TorshError>
Create a tensor of zeros with the same shape as this tensor
Sourcepub fn requires_grad_(self, requires_grad: bool) -> Tensor<T>
pub fn requires_grad_(self, requires_grad: bool) -> Tensor<T>
Set whether this tensor requires gradients
Sourcepub fn requires_grad(&self) -> bool
pub fn requires_grad(&self) -> bool
Get whether this tensor requires gradients
Sourcepub fn to<D>(self, device: D) -> Result<Tensor<T>, TorshError>where
D: Into<DeviceType>,
pub fn to<D>(self, device: D) -> Result<Tensor<T>, TorshError>where
D: Into<DeviceType>,
🚀 Enhanced device transfer with multi-backend GPU support Automatically selects optimal transfer strategy and backend
Sourcepub fn distribute_multi_gpu(
&self,
gpu_count: usize,
) -> Result<Vec<Tensor<T>>, TorshError>
pub fn distribute_multi_gpu( &self, gpu_count: usize, ) -> Result<Vec<Tensor<T>>, TorshError>
🚀 Advanced multi-GPU distribution for parallel processing Automatically distributes tensor across multiple GPUs with optimal strategy
Sourcepub fn get_optimal_gpu_backend() -> Option<GpuBackendType>
pub fn get_optimal_gpu_backend() -> Option<GpuBackendType>
🚀 Get optimal GPU backend for current hardware
Sourcepub fn zeros_gpu(shape: &[usize]) -> Result<Tensor<T>, TorshError>
pub fn zeros_gpu(shape: &[usize]) -> Result<Tensor<T>, TorshError>
🚀 Create tensor on optimal GPU device
Sourcepub fn ones_gpu(shape: &[usize]) -> Result<Tensor<T>, TorshError>
pub fn ones_gpu(shape: &[usize]) -> Result<Tensor<T>, TorshError>
🚀 Create tensor on optimal GPU device filled with ones
Sourcepub fn backward(&self) -> Result<(), TorshError>
pub fn backward(&self) -> Result<(), TorshError>
Backward pass (compute gradients) - integrated with autograd system
Sourcepub fn backward_with_grad(
&self,
_gradient: Option<&Tensor<T>>,
) -> Result<(), TorshError>
pub fn backward_with_grad( &self, _gradient: Option<&Tensor<T>>, ) -> Result<(), TorshError>
Backward pass with gradient - integrated with autograd system
Source§impl<T> Tensor<T>
Comparison operations for tensors
impl<T> Tensor<T>
Comparison operations for tensors
Sourcepub fn gt(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
pub fn gt(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
Element-wise greater than comparison
Sourcepub fn lt(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
pub fn lt(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
Element-wise less than comparison
Sourcepub fn ge(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
pub fn ge(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
Element-wise greater than or equal comparison
Sourcepub fn le(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
pub fn le(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
Element-wise less than or equal comparison
Sourcepub fn eq(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
pub fn eq(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
Element-wise equality comparison
Sourcepub fn ne(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
pub fn ne(&self, other: &Tensor<T>) -> Result<Tensor<bool>, TorshError>
Element-wise inequality comparison
Sourcepub fn eq_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>
pub fn eq_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>
Scalar comparison methods Element-wise equality comparison with scalar
Sourcepub fn ne_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>
pub fn ne_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>
Element-wise inequality comparison with scalar
Sourcepub fn gt_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>where
T: PartialOrd + Copy,
pub fn gt_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>where
T: PartialOrd + Copy,
Element-wise greater than comparison with scalar
Sourcepub fn lt_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>where
T: PartialOrd + Copy,
pub fn lt_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>where
T: PartialOrd + Copy,
Element-wise less than comparison with scalar
Sourcepub fn le_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>where
T: PartialOrd + Copy,
pub fn le_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>where
T: PartialOrd + Copy,
Element-wise less than or equal comparison with scalar
Sourcepub fn ge_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>where
T: PartialOrd + Copy,
pub fn ge_scalar(&self, value: T) -> Result<Tensor<bool>, TorshError>where
T: PartialOrd + Copy,
Element-wise greater than or equal comparison with scalar
Source§impl<T> Tensor<T>where
T: TensorElement,
Shape manipulation operations for tensors
impl<T> Tensor<T>where
T: TensorElement,
Shape manipulation operations for tensors
Sourcepub fn flatten(&self) -> Result<Tensor<T>, TorshError>
pub fn flatten(&self) -> Result<Tensor<T>, TorshError>
Flatten tensor to 1D
Sourcepub fn broadcast_to(&self, shape: &Shape) -> Result<Tensor<T>, TorshError>
pub fn broadcast_to(&self, shape: &Shape) -> Result<Tensor<T>, TorshError>
Broadcast tensor to specified shape
Sourcepub fn where_tensor(
&self,
condition: &Tensor<bool>,
other: &Tensor<T>,
) -> Result<Tensor<T>, TorshError>
pub fn where_tensor( &self, condition: &Tensor<bool>, other: &Tensor<T>, ) -> Result<Tensor<T>, TorshError>
Conditional tensor selection - where condition is true, select from self, otherwise from other
Source§impl Tensor<bool>
Logical operations for boolean tensors
impl Tensor<bool>
Logical operations for boolean tensors
Sourcepub fn logical_and(
&self,
other: &Tensor<bool>,
) -> Result<Tensor<bool>, TorshError>
pub fn logical_and( &self, other: &Tensor<bool>, ) -> Result<Tensor<bool>, TorshError>
Element-wise logical AND operation
Sourcepub fn logical_or(
&self,
other: &Tensor<bool>,
) -> Result<Tensor<bool>, TorshError>
pub fn logical_or( &self, other: &Tensor<bool>, ) -> Result<Tensor<bool>, TorshError>
Element-wise logical OR operation
Sourcepub fn logical_xor(
&self,
other: &Tensor<bool>,
) -> Result<Tensor<bool>, TorshError>
pub fn logical_xor( &self, other: &Tensor<bool>, ) -> Result<Tensor<bool>, TorshError>
Element-wise logical XOR operation
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
impl<T> Tensor<T>where
T: TensorElement + Copy,
Sourcepub fn from_scalar(
value: T,
shape: &[usize],
device: DeviceType,
) -> Result<Tensor<T>, TorshError>where
T: Copy,
pub fn from_scalar(
value: T,
shape: &[usize],
device: DeviceType,
) -> Result<Tensor<T>, TorshError>where
T: Copy,
Create tensor from a scalar value repeated to fill the shape
Sourcepub fn fill_(&mut self, value: T) -> Result<(), TorshError>where
T: Copy,
pub fn fill_(&mut self, value: T) -> Result<(), TorshError>where
T: Copy,
Fill tensor with a single value (in-place)
Sourcepub fn copy_(&mut self, other: &Tensor<T>) -> Result<(), TorshError>where
T: Copy,
pub fn copy_(&mut self, other: &Tensor<T>) -> Result<(), TorshError>where
T: Copy,
Copy data from another tensor (in-place)
Sourcepub fn get_item(&self, indices: &[usize]) -> Result<T, TorshError>where
T: Copy,
pub fn get_item(&self, indices: &[usize]) -> Result<T, TorshError>where
T: Copy,
Get an element by multi-dimensional index
Sourcepub fn set_item(
&mut self,
indices: &[usize],
value: T,
) -> Result<(), TorshError>where
T: Copy,
pub fn set_item(
&mut self,
indices: &[usize],
value: T,
) -> Result<(), TorshError>where
T: Copy,
Set an element by multi-dimensional index
Sourcepub fn get_item_flat(&self, index: usize) -> Result<T, TorshError>where
T: Copy,
pub fn get_item_flat(&self, index: usize) -> Result<T, TorshError>where
T: Copy,
Get element by flat index
Sourcepub fn set_item_flat(
&mut self,
index: usize,
value: T,
) -> Result<(), TorshError>where
T: Copy,
pub fn set_item_flat(
&mut self,
index: usize,
value: T,
) -> Result<(), TorshError>where
T: Copy,
Set element by flat index
Sourcepub fn multi_to_flat_index(
&self,
indices: &[usize],
) -> Result<usize, TorshError>
pub fn multi_to_flat_index( &self, indices: &[usize], ) -> Result<usize, TorshError>
Convert multi-dimensional indices to flat index
Sourcepub fn gather(
&self,
dim: usize,
indices: &Tensor<i64>,
) -> Result<Tensor<T>, TorshError>
pub fn gather( &self, dim: usize, indices: &Tensor<i64>, ) -> Result<Tensor<T>, TorshError>
Gather values along an axis using indices
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
impl<T> Tensor<T>where
T: TensorElement + Copy,
Sourcepub fn add_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
pub fn add_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
Add scalar to all elements in-place
Sourcepub fn add_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
pub fn add_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
Add scalar to all elements (returns new tensor)
Sourcepub fn sub_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
pub fn sub_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
Subtract scalar from all elements in-place
Sourcepub fn sub_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
pub fn sub_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
Subtract scalar from all elements (returns new tensor)
Sourcepub fn mul_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
pub fn mul_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
Multiply all elements by scalar in-place
Sourcepub fn mul_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
pub fn mul_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
Multiply all elements by scalar (returns new tensor)
Sourcepub fn div_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
pub fn div_scalar_(&mut self, scalar: T) -> Result<(), TorshError>
Divide all elements by scalar in-place
Sourcepub fn div_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
pub fn div_scalar(&self, scalar: T) -> Result<Tensor<T>, TorshError>
Divide all elements by scalar (returns new tensor)
Sourcepub fn add(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: Add<Output = T>,
pub fn add(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: Add<Output = T>,
Element-wise addition with another tensor (supports broadcasting)
Sourcepub fn sub(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: Sub<Output = T>,
pub fn sub(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: Sub<Output = T>,
Element-wise subtraction with another tensor
Source§impl<T> Tensor<T>
impl<T> Tensor<T>
Sourcepub fn sqrt(&self) -> Result<Tensor<T>, TorshError>
pub fn sqrt(&self) -> Result<Tensor<T>, TorshError>
Square root of all elements
Sourcepub fn square(&self) -> Result<Tensor<T>, TorshError>
pub fn square(&self) -> Result<Tensor<T>, TorshError>
Square of all elements
Sourcepub fn rsqrt(&self) -> Result<Tensor<T>, TorshError>
pub fn rsqrt(&self) -> Result<Tensor<T>, TorshError>
Reciprocal square root of all elements (1/sqrt(x))
Sourcepub fn reciprocal(&self) -> Result<Tensor<T>, TorshError>
pub fn reciprocal(&self) -> Result<Tensor<T>, TorshError>
Reciprocal of all elements (1/x)
Sourcepub fn exp(&self) -> Result<Tensor<T>, TorshError>
pub fn exp(&self) -> Result<Tensor<T>, TorshError>
Exponential of all elements
Sourcepub fn ln(&self) -> Result<Tensor<T>, TorshError>
pub fn ln(&self) -> Result<Tensor<T>, TorshError>
Natural logarithm of all elements
Sourcepub fn log10(&self) -> Result<Tensor<T>, TorshError>
pub fn log10(&self) -> Result<Tensor<T>, TorshError>
Logarithm base 10 of all elements
Sourcepub fn log2(&self) -> Result<Tensor<T>, TorshError>
pub fn log2(&self) -> Result<Tensor<T>, TorshError>
Logarithm base 2 of all elements
Sourcepub fn log(&self) -> Result<Tensor<T>, TorshError>
pub fn log(&self) -> Result<Tensor<T>, TorshError>
Natural logarithm of all elements
Sourcepub fn sin(&self) -> Result<Tensor<T>, TorshError>
pub fn sin(&self) -> Result<Tensor<T>, TorshError>
Sine of all elements
Sourcepub fn cos(&self) -> Result<Tensor<T>, TorshError>
pub fn cos(&self) -> Result<Tensor<T>, TorshError>
Cosine of all elements
Sourcepub fn tan(&self) -> Result<Tensor<T>, TorshError>
pub fn tan(&self) -> Result<Tensor<T>, TorshError>
Tangent of all elements
Sourcepub fn gelu(&self) -> Result<Tensor<T>, TorshError>
pub fn gelu(&self) -> Result<Tensor<T>, TorshError>
GELU (Gaussian Error Linear Unit) activation function with GPU and SIMD optimization
Sourcepub fn leaky_relu(&self, negative_slope: T) -> Result<Tensor<T>, TorshError>
pub fn leaky_relu(&self, negative_slope: T) -> Result<Tensor<T>, TorshError>
Leaky ReLU activation function with negative slope
Sourcepub fn asin(&self) -> Result<Tensor<T>, TorshError>
pub fn asin(&self) -> Result<Tensor<T>, TorshError>
Arcsine of all elements
Sourcepub fn acos(&self) -> Result<Tensor<T>, TorshError>
pub fn acos(&self) -> Result<Tensor<T>, TorshError>
Arccosine of all elements
Sourcepub fn atan(&self) -> Result<Tensor<T>, TorshError>
pub fn atan(&self) -> Result<Tensor<T>, TorshError>
Arctangent of all elements
Sourcepub fn sinh(&self) -> Result<Tensor<T>, TorshError>
pub fn sinh(&self) -> Result<Tensor<T>, TorshError>
Hyperbolic sine of all elements
Sourcepub fn cosh(&self) -> Result<Tensor<T>, TorshError>
pub fn cosh(&self) -> Result<Tensor<T>, TorshError>
Hyperbolic cosine of all elements
Sourcepub fn tanh(&self) -> Result<Tensor<T>, TorshError>
pub fn tanh(&self) -> Result<Tensor<T>, TorshError>
Hyperbolic tangent of all elements
Sourcepub fn pow(&self, exponent: T) -> Result<Tensor<T>, TorshError>
pub fn pow(&self, exponent: T) -> Result<Tensor<T>, TorshError>
Power function (element-wise)
Sourcepub fn pow_scalar(&self, exponent: T) -> Result<Tensor<T>, TorshError>
pub fn pow_scalar(&self, exponent: T) -> Result<Tensor<T>, TorshError>
Power function with scalar exponent (alias for pow)
Sourcepub fn pow_tensor(&self, exponent: &Tensor<T>) -> Result<Tensor<T>, TorshError>
pub fn pow_tensor(&self, exponent: &Tensor<T>) -> Result<Tensor<T>, TorshError>
Power function with tensor exponents
Sourcepub fn floor(&self) -> Result<Tensor<T>, TorshError>
pub fn floor(&self) -> Result<Tensor<T>, TorshError>
Floor of all elements
Sourcepub fn ceil(&self) -> Result<Tensor<T>, TorshError>
pub fn ceil(&self) -> Result<Tensor<T>, TorshError>
Ceiling of all elements
Sourcepub fn round(&self) -> Result<Tensor<T>, TorshError>
pub fn round(&self) -> Result<Tensor<T>, TorshError>
Round to nearest integer
Sourcepub fn trunc(&self) -> Result<Tensor<T>, TorshError>
pub fn trunc(&self) -> Result<Tensor<T>, TorshError>
Truncate to integer part
Sourcepub fn fract(&self) -> Result<Tensor<T>, TorshError>
pub fn fract(&self) -> Result<Tensor<T>, TorshError>
Fractional part
Sourcepub fn neg(&self) -> Result<Tensor<T>, TorshError>where
T: Neg<Output = T>,
pub fn neg(&self) -> Result<Tensor<T>, TorshError>where
T: Neg<Output = T>,
Negation of all elements
Sourcepub fn sign(&self) -> Result<Tensor<T>, TorshError>
pub fn sign(&self) -> Result<Tensor<T>, TorshError>
Sign of all elements (-1, 0, or 1)
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
impl<T> Tensor<T>where
T: TensorElement + Copy,
Sourcepub fn add_op(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: Add<Output = T>,
pub fn add_op(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: Add<Output = T>,
Add operation (used by autograd backward pass)
Sourcepub fn mul_op(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: Mul<Output = T>,
pub fn mul_op(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: Mul<Output = T>,
Multiply operation (used by autograd backward pass)
Sourcepub fn sigmoid(&self) -> Result<Tensor<T>, TorshError>where
T: FloatElement,
pub fn sigmoid(&self) -> Result<Tensor<T>, TorshError>where
T: FloatElement,
Sigmoid activation function with SIMD optimization
Sourcepub fn relu(&self) -> Result<Tensor<T>, TorshError>where
T: PartialOrd + Zero,
pub fn relu(&self) -> Result<Tensor<T>, TorshError>where
T: PartialOrd + Zero,
ReLU activation function (Rectified Linear Unit) with SIMD optimization
Sourcepub fn minimum(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: PartialOrd,
pub fn minimum(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: PartialOrd,
Element-wise minimum with another tensor
Sourcepub fn maximum(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: PartialOrd,
pub fn maximum(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>where
T: PartialOrd,
Element-wise maximum with another tensor
Sourcepub fn clamp(&self, min: T, max: T) -> Result<Tensor<T>, TorshError>where
T: PartialOrd + Copy,
pub fn clamp(&self, min: T, max: T) -> Result<Tensor<T>, TorshError>where
T: PartialOrd + Copy,
Clamp tensor values between min and max bounds
Sourcepub fn clamp_(&mut self, min: T, max: T) -> Result<(), TorshError>where
T: PartialOrd + Copy,
pub fn clamp_(&mut self, min: T, max: T) -> Result<(), TorshError>where
T: PartialOrd + Copy,
Clamp tensor values between min and max bounds (in-place)
Source§impl<T> Tensor<T>
impl<T> Tensor<T>
Sourcepub fn add_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
pub fn add_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
Use SciRS2 backend for optimized tensor addition
Sourcepub fn mul_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
pub fn mul_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
Use SciRS2 backend for optimized tensor multiplication
Sourcepub fn sub_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
pub fn sub_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
Use SciRS2 backend for optimized tensor subtraction
Sourcepub fn div_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
pub fn div_scirs2(&self, other: &Tensor<T>) -> Result<Tensor<T>, TorshError>
Use SciRS2 backend for optimized tensor division
Source§impl<T> Tensor<T>
impl<T> Tensor<T>
Sourcepub fn reduce_memory_efficient<F>(&self, func: F) -> Result<T, TorshError>
pub fn reduce_memory_efficient<F>(&self, func: F) -> Result<T, TorshError>
Memory-efficient reduction using SciRS2 intelligent chunking and lazy evaluation
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
impl<T> Tensor<T>where
T: TensorElement + Copy,
Sourcepub fn view_as(&self, shape: &[usize]) -> Result<Tensor<T>, TorshError>
pub fn view_as(&self, shape: &[usize]) -> Result<Tensor<T>, TorshError>
Create an efficient view with different shape (shares data, no copying) This is the zero-copy version of view() for compatible shapes
Sourcepub fn slice_tensor(
&self,
dim: usize,
start: usize,
end: usize,
) -> Result<Tensor<T>, TorshError>
pub fn slice_tensor( &self, dim: usize, start: usize, end: usize, ) -> Result<Tensor<T>, TorshError>
Create a view of a slice along a dimension (shares data, no copying)
Sourcepub fn transpose_view(
&self,
dim0: usize,
dim1: usize,
) -> Result<Tensor<T>, TorshError>
pub fn transpose_view( &self, dim0: usize, dim1: usize, ) -> Result<Tensor<T>, TorshError>
Create a transposed view (shares data, no copying)
Sourcepub fn squeeze_tensor(&self, dim: usize) -> Result<Tensor<T>, TorshError>
pub fn squeeze_tensor(&self, dim: usize) -> Result<Tensor<T>, TorshError>
Squeeze a tensor along a specific dimension (removes dimension of size 1)
Sourcepub fn unsqueeze_tensor(&self, dim: usize) -> Result<Tensor<T>, TorshError>
pub fn unsqueeze_tensor(&self, dim: usize) -> Result<Tensor<T>, TorshError>
Unsqueeze a tensor at a specific dimension (adds dimension of size 1)
Sourcepub fn transpose(&self, dim0: i32, dim1: i32) -> Result<Tensor<T>, TorshError>
pub fn transpose(&self, dim0: i32, dim1: i32) -> Result<Tensor<T>, TorshError>
Transpose two dimensions (with data copying)
Sourcepub fn permute(&self, dims: &[i32]) -> Result<Tensor<T>, TorshError>
pub fn permute(&self, dims: &[i32]) -> Result<Tensor<T>, TorshError>
Permute dimensions according to the given order
Sourcepub fn squeeze_all(&self) -> Result<Tensor<T>, TorshError>
pub fn squeeze_all(&self) -> Result<Tensor<T>, TorshError>
Squeeze all dimensions with size 1
Sourcepub fn unsqueeze(&self, dim: i32) -> Result<Tensor<T>, TorshError>
pub fn unsqueeze(&self, dim: i32) -> Result<Tensor<T>, TorshError>
Add a dimension of size 1 at the specified position
Sourcepub fn reshape(&self, shape: &[i32]) -> Result<Tensor<T>, TorshError>
pub fn reshape(&self, shape: &[i32]) -> Result<Tensor<T>, TorshError>
Reshape tensor to new shape
Sourcepub fn is_contiguous(&self) -> bool
pub fn is_contiguous(&self) -> bool
Check if tensor is contiguous in memory
Sourcepub fn contiguous(&self) -> Result<Tensor<T>, TorshError>
pub fn contiguous(&self) -> Result<Tensor<T>, TorshError>
Make tensor contiguous if it isn’t already
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
impl<T> Tensor<T>where
T: TensorElement + Copy,
Sourcepub fn to_device(
&self,
target_device: DeviceType,
) -> Result<Tensor<T>, TorshError>
pub fn to_device( &self, target_device: DeviceType, ) -> Result<Tensor<T>, TorshError>
Transfer tensor to another device with optimization
Sourcepub fn synchronize_devices(
&self,
devices: &[DeviceType],
) -> Result<(), TorshError>
pub fn synchronize_devices( &self, devices: &[DeviceType], ) -> Result<(), TorshError>
Synchronize operations across devices
Sourcepub fn can_transfer_efficiently(&self, target_device: DeviceType) -> bool
pub fn can_transfer_efficiently(&self, target_device: DeviceType) -> bool
Check if tensor can be efficiently transferred to target device
Sourcepub fn get_transfer_strategy(
&self,
target_device: DeviceType,
) -> TransferStrategy
pub fn get_transfer_strategy( &self, target_device: DeviceType, ) -> TransferStrategy
Get optimal transfer strategy for device pair
Source§impl Tensor<bf16>
Specialized bf16 arithmetic operations with proper rounding
impl Tensor<bf16>
Specialized bf16 arithmetic operations with proper rounding
Sourcepub fn add_with_rounding(
&self,
other: &Tensor<bf16>,
mode: BF16RoundingMode,
) -> Result<Tensor<bf16>, TorshError>
pub fn add_with_rounding( &self, other: &Tensor<bf16>, mode: BF16RoundingMode, ) -> Result<Tensor<bf16>, TorshError>
Add two bf16 tensors with specified rounding mode
Sourcepub fn mul_with_rounding(
&self,
other: &Tensor<bf16>,
mode: BF16RoundingMode,
) -> Result<Tensor<bf16>, TorshError>
pub fn mul_with_rounding( &self, other: &Tensor<bf16>, mode: BF16RoundingMode, ) -> Result<Tensor<bf16>, TorshError>
Multiply two bf16 tensors with specified rounding mode
Sourcepub fn fma_with_rounding(
&self,
other: &Tensor<bf16>,
addend: &Tensor<bf16>,
mode: BF16RoundingMode,
) -> Result<Tensor<bf16>, TorshError>
pub fn fma_with_rounding( &self, other: &Tensor<bf16>, addend: &Tensor<bf16>, mode: BF16RoundingMode, ) -> Result<Tensor<bf16>, TorshError>
Fused multiply-add with proper bf16 rounding
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
impl<T> Tensor<T>where
T: TensorElement + Copy,
Sourcepub fn optimize_cache_layout(&mut self) -> Result<(), TorshError>
pub fn optimize_cache_layout(&mut self) -> Result<(), TorshError>
Memory layout optimization for cache efficiency Analyzes and optimizes the tensor’s memory layout to improve cache performance
Sourcepub fn analyze_cache_performance(&self) -> CacheAnalysisReport
pub fn analyze_cache_performance(&self) -> CacheAnalysisReport
Analyze memory access patterns and provide optimization recommendations
Sourcepub fn to_cache_optimized(&self) -> Result<Tensor<T>, TorshError>
pub fn to_cache_optimized(&self) -> Result<Tensor<T>, TorshError>
Create a cache-optimized copy of the tensor
Sourcepub fn memory_stats(&self) -> MemoryStats
pub fn memory_stats(&self) -> MemoryStats
Get memory usage statistics for the tensor
Source§impl<T> Tensor<T>
impl<T> Tensor<T>
Sourcepub fn optimize_memory_layout(
&mut self,
numa_hint: Option<NumaAllocationHint>,
) -> Result<(), TorshError>
pub fn optimize_memory_layout( &mut self, numa_hint: Option<NumaAllocationHint>, ) -> Result<(), TorshError>
Advanced memory optimization with NUMA awareness
Sourcepub fn create_memory_mapped_optimized(
data: Vec<T>,
shape: Vec<usize>,
numa_hint: Option<NumaAllocationHint>,
) -> Result<Tensor<T>, TorshError>
pub fn create_memory_mapped_optimized( data: Vec<T>, shape: Vec<usize>, numa_hint: Option<NumaAllocationHint>, ) -> Result<Tensor<T>, TorshError>
Memory-mapped tensor creation with optimization hints
Sourcepub fn prefetch_data(&self) -> Result<(), TorshError>
pub fn prefetch_data(&self) -> Result<(), TorshError>
Prefetch memory pages for better performance
Source§impl<T> Tensor<T>where
T: FloatElement,
impl<T> Tensor<T>where
T: FloatElement,
Sourcepub fn conv1d(
&self,
weight: &Tensor<T>,
bias: Option<&Tensor<T>>,
stride: usize,
padding: usize,
dilation: usize,
groups: usize,
) -> Result<Tensor<T>, TorshError>
pub fn conv1d( &self, weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: usize, padding: usize, dilation: usize, groups: usize, ) -> Result<Tensor<T>, TorshError>
1D convolution operation
Sourcepub fn conv2d(
&self,
weight: &Tensor<T>,
bias: Option<&Tensor<T>>,
stride: (usize, usize),
padding: (usize, usize),
dilation: (usize, usize),
groups: usize,
) -> Result<Tensor<T>, TorshError>
pub fn conv2d( &self, weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: (usize, usize), padding: (usize, usize), dilation: (usize, usize), groups: usize, ) -> Result<Tensor<T>, TorshError>
2D convolution operation
Sourcepub fn conv3d(
&self,
weight: &Tensor<T>,
bias: Option<&Tensor<T>>,
stride: (usize, usize, usize),
padding: (usize, usize, usize),
dilation: (usize, usize, usize),
groups: usize,
) -> Result<Tensor<T>, TorshError>
pub fn conv3d( &self, weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: (usize, usize, usize), padding: (usize, usize, usize), dilation: (usize, usize, usize), groups: usize, ) -> Result<Tensor<T>, TorshError>
3D convolution operation
Sourcepub fn depthwise_conv2d(
&self,
weight: &Tensor<T>,
bias: Option<&Tensor<T>>,
stride: (usize, usize),
padding: (usize, usize),
dilation: (usize, usize),
) -> Result<Tensor<T>, TorshError>
pub fn depthwise_conv2d( &self, weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: (usize, usize), padding: (usize, usize), dilation: (usize, usize), ) -> Result<Tensor<T>, TorshError>
Depthwise 2D convolution operation Each input channel is convolved with its own kernel independently
Sourcepub fn separable_conv2d(
&self,
depthwise_weight: &Tensor<T>,
pointwise_weight: &Tensor<T>,
bias: Option<&Tensor<T>>,
stride: (usize, usize),
padding: (usize, usize),
dilation: (usize, usize),
) -> Result<Tensor<T>, TorshError>
pub fn separable_conv2d( &self, depthwise_weight: &Tensor<T>, pointwise_weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: (usize, usize), padding: (usize, usize), dilation: (usize, usize), ) -> Result<Tensor<T>, TorshError>
Separable 2D convolution operation Factorized into depthwise convolution followed by pointwise (1x1) convolution
Sourcepub fn conv_transpose2d(
&self,
weight: &Tensor<T>,
bias: Option<&Tensor<T>>,
stride: (usize, usize),
padding: (usize, usize),
output_padding: (usize, usize),
dilation: (usize, usize),
groups: usize,
) -> Result<Tensor<T>, TorshError>
pub fn conv_transpose2d( &self, weight: &Tensor<T>, bias: Option<&Tensor<T>>, stride: (usize, usize), padding: (usize, usize), output_padding: (usize, usize), dilation: (usize, usize), groups: usize, ) -> Result<Tensor<T>, TorshError>
Transposed (deconvolution) 2D convolution operation
Sourcepub fn xcorr1d(
&self,
other: &Tensor<T>,
mode: CorrelationMode,
) -> Result<Tensor<T>, TorshError>
pub fn xcorr1d( &self, other: &Tensor<T>, mode: CorrelationMode, ) -> Result<Tensor<T>, TorshError>
1D cross-correlation operation Computes the cross-correlation between two 1D signals
Sourcepub fn autocorr1d(
&self,
max_lag: Option<usize>,
) -> Result<Tensor<T>, TorshError>
pub fn autocorr1d( &self, max_lag: Option<usize>, ) -> Result<Tensor<T>, TorshError>
1D auto-correlation operation Computes the auto-correlation of a 1D signal
Sourcepub fn xcorr2d(
&self,
other: &Tensor<T>,
mode: CorrelationMode,
) -> Result<Tensor<T>, TorshError>
pub fn xcorr2d( &self, other: &Tensor<T>, mode: CorrelationMode, ) -> Result<Tensor<T>, TorshError>
2D cross-correlation operation Computes the 2D cross-correlation between two signals
Sourcepub fn median_filter1d(
&self,
window_size: usize,
) -> Result<Tensor<T>, TorshError>
pub fn median_filter1d( &self, window_size: usize, ) -> Result<Tensor<T>, TorshError>
1D median filter Applies a median filter with the specified window size
Sourcepub fn median_filter2d(
&self,
window_size: (usize, usize),
) -> Result<Tensor<T>, TorshError>
pub fn median_filter2d( &self, window_size: (usize, usize), ) -> Result<Tensor<T>, TorshError>
2D median filter Applies a 2D median filter with the specified window size
Sourcepub fn gaussian_filter1d(
&self,
sigma: f32,
kernel_size: Option<usize>,
) -> Result<Tensor<T>, TorshError>
pub fn gaussian_filter1d( &self, sigma: f32, kernel_size: Option<usize>, ) -> Result<Tensor<T>, TorshError>
1D Gaussian filter Applies a Gaussian filter with specified sigma (standard deviation)
Sourcepub fn gaussian_filter2d(
&self,
sigma: (f32, f32),
kernel_size: Option<(usize, usize)>,
) -> Result<Tensor<T>, TorshError>
pub fn gaussian_filter2d( &self, sigma: (f32, f32), kernel_size: Option<(usize, usize)>, ) -> Result<Tensor<T>, TorshError>
2D Gaussian filter Applies a 2D Gaussian filter with specified sigma values
Source§impl<T> Tensor<T>where
T: TensorElement,
Indexing implementation
impl<T> Tensor<T>where
T: TensorElement,
Indexing implementation
Sourcepub fn index(&self, indices: &[TensorIndex]) -> Result<Tensor<T>, TorshError>
pub fn index(&self, indices: &[TensorIndex]) -> Result<Tensor<T>, TorshError>
Index into the tensor
Sourcepub fn get_1d(&self, index: usize) -> Result<T, TorshError>
pub fn get_1d(&self, index: usize) -> Result<T, TorshError>
Get a single element (1D indexing)
Sourcepub fn get_2d(&self, row: usize, col: usize) -> Result<T, TorshError>
pub fn get_2d(&self, row: usize, col: usize) -> Result<T, TorshError>
Get a single element (2D indexing)
Sourcepub fn get_3d(&self, x: usize, y: usize, z: usize) -> Result<T, TorshError>
pub fn get_3d(&self, x: usize, y: usize, z: usize) -> Result<T, TorshError>
Get a single element (3D indexing)
Sourcepub fn set_1d(&mut self, index: usize, value: T) -> Result<(), TorshError>
pub fn set_1d(&mut self, index: usize, value: T) -> Result<(), TorshError>
Set a single element (1D indexing)
Sourcepub fn set_2d(
&mut self,
row: usize,
col: usize,
value: T,
) -> Result<(), TorshError>
pub fn set_2d( &mut self, row: usize, col: usize, value: T, ) -> Result<(), TorshError>
Set a single element (2D indexing)
Sourcepub fn set_3d(
&mut self,
x: usize,
y: usize,
z: usize,
value: T,
) -> Result<(), TorshError>
pub fn set_3d( &mut self, x: usize, y: usize, z: usize, value: T, ) -> Result<(), TorshError>
Set a single element (3D indexing)
Sourcepub fn select(&self, dim: i32, index: i64) -> Result<Tensor<T>, TorshError>
pub fn select(&self, dim: i32, index: i64) -> Result<Tensor<T>, TorshError>
Select along a dimension
Sourcepub fn slice_with_step(
&self,
dim: i32,
start: Option<i64>,
end: Option<i64>,
step: Option<i64>,
) -> Result<Tensor<T>, TorshError>
pub fn slice_with_step( &self, dim: i32, start: Option<i64>, end: Option<i64>, step: Option<i64>, ) -> Result<Tensor<T>, TorshError>
Slice along a dimension with PyTorch-style parameters
Sourcepub fn narrow(
&self,
dim: i32,
start: i64,
length: usize,
) -> Result<Tensor<T>, TorshError>
pub fn narrow( &self, dim: i32, start: i64, length: usize, ) -> Result<Tensor<T>, TorshError>
Narrow along a dimension
Sourcepub fn masked_select(
&self,
mask: &Tensor<bool>,
) -> Result<Tensor<T>, TorshError>
pub fn masked_select( &self, mask: &Tensor<bool>, ) -> Result<Tensor<T>, TorshError>
Boolean indexing (masking)
pub fn take(&self, indices: &Tensor<i64>) -> Result<Tensor<T>, TorshError>
Sourcepub fn put(
&self,
indices: &Tensor<i64>,
values: &Tensor<T>,
) -> Result<Tensor<T>, TorshError>
pub fn put( &self, indices: &Tensor<i64>, values: &Tensor<T>, ) -> Result<Tensor<T>, TorshError>
Put values at indices
Sourcepub fn index_select(
&self,
dim: i32,
index: &Tensor<i64>,
) -> Result<Tensor<T>, TorshError>
pub fn index_select( &self, dim: i32, index: &Tensor<i64>, ) -> Result<Tensor<T>, TorshError>
Select indices along a dimension
Source§impl<T> Tensor<T>where
T: TensorElement,
Convenient indexing syntax
impl<T> Tensor<T>where
T: TensorElement,
Convenient indexing syntax
Sourcepub fn index_with_list(
&self,
dim: i32,
indices: &[i64],
) -> Result<Tensor<T>, TorshError>
pub fn index_with_list( &self, dim: i32, indices: &[i64], ) -> Result<Tensor<T>, TorshError>
Advanced indexing with list of indices (fancy indexing)
Sourcepub fn index_with_mask(
&self,
dim: i32,
mask: &Tensor<bool>,
) -> Result<Tensor<T>, TorshError>
pub fn index_with_mask( &self, dim: i32, mask: &Tensor<bool>, ) -> Result<Tensor<T>, TorshError>
Boolean mask indexing for a specific dimension
Sourcepub fn mask_select(&self, mask: &Tensor<bool>) -> Result<Tensor<T>, TorshError>
pub fn mask_select(&self, mask: &Tensor<bool>) -> Result<Tensor<T>, TorshError>
Global boolean mask indexing (flattens to 1D result)
Sourcepub fn where_condition<F>(
&self,
condition: F,
) -> Result<Tensor<bool>, TorshError>
pub fn where_condition<F>( &self, condition: F, ) -> Result<Tensor<bool>, TorshError>
Create boolean mask from condition
Sourcepub fn scatter_indexed(
&self,
dim: i32,
index: &Tensor<i64>,
src: &Tensor<T>,
) -> Result<Tensor<T>, TorshError>
pub fn scatter_indexed( &self, dim: i32, index: &Tensor<i64>, src: &Tensor<T>, ) -> Result<Tensor<T>, TorshError>
Scatter values along an axis using indices (indexing version)
Source§impl<T> Tensor<T>where
T: TensorElement,
✅ Enhanced Tensor creation interface with SciRS2 memory optimization
impl<T> Tensor<T>where
T: TensorElement,
✅ Enhanced Tensor creation interface with SciRS2 memory optimization
Sourcepub fn create_efficient(
shape: &[usize],
device: DeviceType,
) -> Result<Tensor<T>, TorshError>
pub fn create_efficient( shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>
Create memory-efficient tensor with automatic strategy selection
Sourcepub fn lazy(
shape: &[usize],
device: DeviceType,
) -> Result<Tensor<T>, TorshError>
pub fn lazy( shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>
Create lazy tensor that defers allocation until first access
Sourcepub fn memory_mapped(
shape: &[usize],
device: DeviceType,
) -> Result<Tensor<T>, TorshError>
pub fn memory_mapped( shape: &[usize], device: DeviceType, ) -> Result<Tensor<T>, TorshError>
Create zero-copy view of existing tensor (disabled due to conflict with shape_ops) ✅ SciRS2 Memory-Mapped Tensor for very large datasets
Sourcepub fn chunked(
shape: &[usize],
chunk_size: usize,
device: DeviceType,
) -> Result<Tensor<T>, TorshError>
pub fn chunked( shape: &[usize], chunk_size: usize, device: DeviceType, ) -> Result<Tensor<T>, TorshError>
✅ SciRS2 Chunked Tensor for cache-efficient large data processing
Sourcepub fn disk_backed(
shape: &[usize],
device: DeviceType,
file_path: Option<&str>,
) -> Result<Tensor<T>, TorshError>
pub fn disk_backed( shape: &[usize], device: DeviceType, file_path: Option<&str>, ) -> Result<Tensor<T>, TorshError>
✅ SciRS2 Disk-Backed Tensor for datasets larger than RAM
Sourcepub fn process_chunked<F, R>(
&self,
chunk_size: usize,
processor: F,
) -> Result<Vec<R>, TorshError>
pub fn process_chunked<F, R>( &self, chunk_size: usize, processor: F, ) -> Result<Vec<R>, TorshError>
Process tensor in memory-efficient chunks
Source§impl<T> Tensor<T>
Convenient functions for creating pooled tensors
impl<T> Tensor<T>
Convenient functions for creating pooled tensors
Sourcepub fn pooled(
shape: &[usize],
device: DeviceType,
) -> Result<PooledTensor<T>, TorshError>
pub fn pooled( shape: &[usize], device: DeviceType, ) -> Result<PooledTensor<T>, TorshError>
Create a tensor using the memory pool
Sourcepub fn temporary(
shape: &[usize],
device: DeviceType,
) -> Result<PooledTensor<T>, TorshError>
pub fn temporary( shape: &[usize], device: DeviceType, ) -> Result<PooledTensor<T>, TorshError>
Create temporary tensor for intermediate calculations
Source§impl<T> Tensor<T>where
T: TensorElement + FloatElement,
NaN/Inf detection utilities for tensors
impl<T> Tensor<T>where
T: TensorElement + FloatElement,
NaN/Inf detection utilities for tensors
Sourcepub fn has_nan_inf(&self) -> bool
pub fn has_nan_inf(&self) -> bool
Quick check if tensor contains any NaN or infinite values (optimized fast path)
This is the fastest check - it returns true if any issues are found,
false if the tensor is clean. No detailed information is provided.
§Examples
let clean = Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3], DeviceType::Cpu).unwrap();
assert!(!clean.has_nan_inf());
let dirty = Tensor::from_data(vec![1.0, f32::NAN, 3.0], vec![3], DeviceType::Cpu).unwrap();
assert!(dirty.has_nan_inf());Sourcepub fn count_nan_inf(&self) -> NanInfStats
pub fn count_nan_inf(&self) -> NanInfStats
Count NaN and infinite values
Sourcepub fn check_nan_inf_with_config(&self, config: &NanInfConfig) -> NanInfReport
pub fn check_nan_inf_with_config(&self, config: &NanInfConfig) -> NanInfReport
Comprehensive NaN/Inf detection with detailed reporting
§Examples
let tensor = Tensor::from_data(
vec![1.0, f32::NAN, f32::INFINITY, -f32::INFINITY],
vec![4],
DeviceType::Cpu
).unwrap();
let config = NanInfConfig::detailed();
let report = tensor.check_nan_inf_with_config(&config);
assert_eq!(report.stats.nan_count, 1);
assert_eq!(report.stats.pos_inf_count, 1);
assert_eq!(report.stats.neg_inf_count, 1);
assert_eq!(report.locations.len(), 3);Sourcepub fn assert_finite(&self)
pub fn assert_finite(&self)
Assert that tensor contains no NaN or infinite values
§Panics
Panics if any NaN or infinite values are found
§Examples
let tensor = Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3], DeviceType::Cpu).unwrap();
tensor.assert_finite(); // OK
// This would panic:
// let bad = Tensor::from_data(vec![1.0, f32::NAN], vec![2], DeviceType::Cpu).unwrap();
// bad.assert_finite(); // Panics!Sourcepub fn replace_nan_inf(
&self,
nan_replacement: T,
pos_inf_replacement: T,
neg_inf_replacement: T,
) -> Result<Tensor<T>, TorshError>
pub fn replace_nan_inf( &self, nan_replacement: T, pos_inf_replacement: T, neg_inf_replacement: T, ) -> Result<Tensor<T>, TorshError>
Replace NaN and infinite values with specified replacements
§Examples
let mut tensor = Tensor::from_data(
vec![1.0, f32::NAN, f32::INFINITY, -f32::INFINITY],
vec![4],
DeviceType::Cpu
).unwrap();
let cleaned = tensor.replace_nan_inf(0.0, 1e6, -1e6).unwrap();
assert!(!cleaned.has_nan_inf());Sourcepub fn nan_inf_mask(&self) -> Result<Tensor<bool>, TorshError>
pub fn nan_inf_mask(&self) -> Result<Tensor<bool>, TorshError>
Create a boolean mask indicating locations of NaN/Inf values
§Examples
let tensor = Tensor::from_data(
vec![1.0, f32::NAN, 3.0, f32::INFINITY],
vec![4],
DeviceType::Cpu
).unwrap();
let mask = tensor.nan_inf_mask().unwrap();
let mask_data = mask.to_vec().unwrap();
assert_eq!(mask_data, vec![false, true, false, true]);Source§impl<T> Tensor<T>
FFT operations for tensors
impl<T> Tensor<T>
FFT operations for tensors
Sourcepub fn fft(&self) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn fft(&self) -> Result<Tensor<Complex<f64>>, TorshError>
Compute 1D FFT along the last dimension
Sourcepub fn fft_with_plan(
&self,
plan: Option<&FFTPlan>,
) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn fft_with_plan( &self, plan: Option<&FFTPlan>, ) -> Result<Tensor<Complex<f64>>, TorshError>
Compute 1D FFT with a precomputed plan
Sourcepub fn ifft(&self) -> Result<Tensor<T>, TorshError>
pub fn ifft(&self) -> Result<Tensor<T>, TorshError>
Compute 1D inverse FFT along the last dimension
Sourcepub fn fft2(&self) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn fft2(&self) -> Result<Tensor<Complex<f64>>, TorshError>
Compute 2D FFT on the last two dimensions
Sourcepub fn ifft2(&self) -> Result<Tensor<T>, TorshError>
pub fn ifft2(&self) -> Result<Tensor<T>, TorshError>
Compute 2D inverse FFT on the last two dimensions
Sourcepub fn fft_along_dim_real(
&self,
dim: usize,
) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn fft_along_dim_real( &self, dim: usize, ) -> Result<Tensor<Complex<f64>>, TorshError>
Compute FFT along a specific dimension for real tensors
Sourcepub fn rfft(&self) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn rfft(&self) -> Result<Tensor<Complex<f64>>, TorshError>
Real-to-complex FFT (more efficient for real inputs)
Sourcepub fn irfft(&self, output_size: Option<usize>) -> Result<Tensor<T>, TorshError>
pub fn irfft(&self, output_size: Option<usize>) -> Result<Tensor<T>, TorshError>
Complex-to-real inverse FFT
Sourcepub fn power_spectrum(&self) -> Result<Tensor<T>, TorshError>
pub fn power_spectrum(&self) -> Result<Tensor<T>, TorshError>
Compute power spectral density
Sourcepub fn magnitude_spectrum(&self) -> Result<Tensor<T>, TorshError>
pub fn magnitude_spectrum(&self) -> Result<Tensor<T>, TorshError>
Compute magnitude spectrum
Sourcepub fn phase_spectrum(&self) -> Result<Tensor<T>, TorshError>
pub fn phase_spectrum(&self) -> Result<Tensor<T>, TorshError>
Compute phase spectrum
impl<T> Tensor<T>where
T: TensorElement,
General tensor operations that don’t require Into
Source§impl Tensor<Complex<f64>>
Operations specific to complex tensors
impl Tensor<Complex<f64>>
Operations specific to complex tensors
Sourcepub fn from_complex_data(
data: Vec<Complex<f64>>,
shape: Vec<usize>,
device: DeviceType,
) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn from_complex_data( data: Vec<Complex<f64>>, shape: Vec<usize>, device: DeviceType, ) -> Result<Tensor<Complex<f64>>, TorshError>
Create tensor from complex data
Sourcepub fn to_real<T>(&self) -> Result<Tensor<T>, TorshError>
pub fn to_real<T>(&self) -> Result<Tensor<T>, TorshError>
Convert complex tensor to real by taking the real part
Sourcepub fn power_spectrum_from_fft<T>(&self) -> Result<Tensor<T>, TorshError>
pub fn power_spectrum_from_fft<T>(&self) -> Result<Tensor<T>, TorshError>
Compute power spectrum from FFT result
Sourcepub fn magnitude_spectrum_from_fft<T>(&self) -> Result<Tensor<T>, TorshError>
pub fn magnitude_spectrum_from_fft<T>(&self) -> Result<Tensor<T>, TorshError>
Compute magnitude spectrum from FFT result
Sourcepub fn phase_spectrum_from_fft<T>(&self) -> Result<Tensor<T>, TorshError>
pub fn phase_spectrum_from_fft<T>(&self) -> Result<Tensor<T>, TorshError>
Compute phase spectrum from FFT result
Sourcepub fn fft_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn fft_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>
Compute FFT for complex data
Sourcepub fn ifft_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn ifft_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>
Compute inverse FFT for complex data
Sourcepub fn ifft2_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn ifft2_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>
Compute 2D inverse FFT for complex data
Sourcepub fn ifft_along_dim(
&self,
dim: usize,
) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn ifft_along_dim( &self, dim: usize, ) -> Result<Tensor<Complex<f64>>, TorshError>
Compute inverse FFT along a specific dimension
Sourcepub fn fft2_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn fft2_complex(&self) -> Result<Tensor<Complex<f64>>, TorshError>
2D FFT for complex tensors
Sourcepub fn fft_along_dim(
&self,
dim: usize,
) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn fft_along_dim( &self, dim: usize, ) -> Result<Tensor<Complex<f64>>, TorshError>
Compute FFT along a specific dimension for complex tensors
Sourcepub fn fft_along_dim_complex(
&self,
dim: usize,
) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn fft_along_dim_complex( &self, dim: usize, ) -> Result<Tensor<Complex<f64>>, TorshError>
Internal implementation of FFT along dimension for complex tensors
Sourcepub fn slice_last_dim_complex(
&self,
start: usize,
size: usize,
) -> Result<Tensor<Complex<f64>>, TorshError>
pub fn slice_last_dim_complex( &self, start: usize, size: usize, ) -> Result<Tensor<Complex<f64>>, TorshError>
Slice along the last dimension for complex tensors
Source§impl<T> Tensor<T>where
T: Default + Add<Output = T> + AddAssign + Sub<Output = T> + TensorElement + Mul<Output = T> + FloatElement<Output = T> + MulAssign + Div + Copy + PartialOrd + FromPrimitive + Sum,
Statistical operations for tensors
impl<T> Tensor<T>where
T: Default + Add<Output = T> + AddAssign + Sub<Output = T> + TensorElement + Mul<Output = T> + FloatElement<Output = T> + MulAssign + Div + Copy + PartialOrd + FromPrimitive + Sum,
Statistical operations for tensors
Sourcepub fn mean_stats(
&self,
dims: Option<&[usize]>,
keepdim: bool,
) -> Result<Tensor<T>, TorshError>
pub fn mean_stats( &self, dims: Option<&[usize]>, keepdim: bool, ) -> Result<Tensor<T>, TorshError>
Compute mean along specified dimensions (legacy stats implementation)
Sourcepub fn var(
&self,
dims: Option<&[usize]>,
keepdim: bool,
mode: StatMode,
) -> Result<Tensor<T>, TorshError>
pub fn var( &self, dims: Option<&[usize]>, keepdim: bool, mode: StatMode, ) -> Result<Tensor<T>, TorshError>
Compute variance along specified dimensions
Sourcepub fn std(
&self,
dims: Option<&[usize]>,
keepdim: bool,
mode: StatMode,
) -> Result<Tensor<T>, TorshError>
pub fn std( &self, dims: Option<&[usize]>, keepdim: bool, mode: StatMode, ) -> Result<Tensor<T>, TorshError>
Compute standard deviation along specified dimensions
Sourcepub fn percentile(
&self,
q: f64,
dim: Option<usize>,
_keepdim: bool,
) -> Result<Tensor<T>, TorshError>
pub fn percentile( &self, q: f64, dim: Option<usize>, _keepdim: bool, ) -> Result<Tensor<T>, TorshError>
Compute percentile along the last dimension
Sourcepub fn median(
&self,
dim: Option<usize>,
keepdim: bool,
) -> Result<Tensor<T>, TorshError>
pub fn median( &self, dim: Option<usize>, keepdim: bool, ) -> Result<Tensor<T>, TorshError>
Compute median (50th percentile)
Sourcepub fn quantile(
&self,
q: &[f64],
dim: Option<usize>,
keepdim: bool,
) -> Result<Vec<Tensor<T>>, TorshError>
pub fn quantile( &self, q: &[f64], dim: Option<usize>, keepdim: bool, ) -> Result<Vec<Tensor<T>>, TorshError>
Compute quantiles at specified levels
Sourcepub fn histogram(
&self,
config: &HistogramConfig,
) -> Result<Histogram, TorshError>
pub fn histogram( &self, config: &HistogramConfig, ) -> Result<Histogram, TorshError>
Create histogram of tensor values
Sourcepub fn correlation(
&self,
other: &Tensor<T>,
method: CorrelationMethod,
) -> Result<T, TorshError>
pub fn correlation( &self, other: &Tensor<T>, method: CorrelationMethod, ) -> Result<T, TorshError>
Compute correlation coefficient with another tensor
Sourcepub fn describe(&self) -> Result<StatSummary, TorshError>
pub fn describe(&self) -> Result<StatSummary, TorshError>
Generate comprehensive statistical summary
Sourcepub fn cov(&self, mode: StatMode) -> Result<Tensor<T>, TorshError>
pub fn cov(&self, mode: StatMode) -> Result<Tensor<T>, TorshError>
Compute covariance matrix for 2D tensor (each column is a variable)
Sourcepub fn corrcoef(&self) -> Result<Tensor<T>, TorshError>
pub fn corrcoef(&self) -> Result<Tensor<T>, TorshError>
Compute correlation matrix for 2D tensor
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
impl<T> Tensor<T>where
T: TensorElement + Copy,
Sourcepub fn calculate_strides(&self) -> Vec<usize>
pub fn calculate_strides(&self) -> Vec<usize>
Calculate strides for current tensor shape
Sourcepub fn create_view(
&self,
new_shape: &[usize],
) -> Result<TensorView<T>, TorshError>
pub fn create_view( &self, new_shape: &[usize], ) -> Result<TensorView<T>, TorshError>
Create a view of this tensor with a new shape (must have same number of elements)
Sourcepub fn view_with_strides(
&self,
new_shape: &[usize],
strides: &[usize],
) -> Result<TensorView<T>, TorshError>
pub fn view_with_strides( &self, new_shape: &[usize], strides: &[usize], ) -> Result<TensorView<T>, TorshError>
Create a view with custom strides (advanced usage)
Sourcepub fn slice(
&self,
dim: usize,
start: usize,
end: usize,
) -> Result<TensorView<T>, TorshError>
pub fn slice( &self, dim: usize, start: usize, end: usize, ) -> Result<TensorView<T>, TorshError>
Create a slice view of the tensor along a specific dimension
Sourcepub fn alias(&self) -> TensorAlias<T>
pub fn alias(&self) -> TensorAlias<T>
Create an alias (shared reference) to this tensor
Sourcepub fn alias_mut(&mut self) -> TensorAlias<T>
pub fn alias_mut(&mut self) -> TensorAlias<T>
Create a mutable alias to this tensor
Source§impl Tensor
f32 tensor conversions with SIMD optimization
impl Tensor
f32 tensor conversions with SIMD optimization
Sourcepub fn to_f64_simd(&self) -> Result<Tensor<f64>, TorshError>
pub fn to_f64_simd(&self) -> Result<Tensor<f64>, TorshError>
Convert to f64 tensor with SIMD optimization
Sourcepub fn to_i32_simd(&self) -> Result<Tensor<i32>, TorshError>
pub fn to_i32_simd(&self) -> Result<Tensor<i32>, TorshError>
Convert to i32 tensor with SIMD optimization (with bounds checking)
Source§impl Tensor<i32>
i32 tensor conversions with SIMD optimization
impl Tensor<i32>
i32 tensor conversions with SIMD optimization
Sourcepub fn to_f32_simd(&self) -> Result<Tensor, TorshError>
pub fn to_f32_simd(&self) -> Result<Tensor, TorshError>
Convert to f32 tensor with SIMD optimization
Sourcepub fn to_f64_simd(&self) -> Result<Tensor<f64>, TorshError>
pub fn to_f64_simd(&self) -> Result<Tensor<f64>, TorshError>
Convert to f64 tensor with SIMD optimization
Sourcepub fn to_i64_simd(&self) -> Result<Tensor<i64>, TorshError>
pub fn to_i64_simd(&self) -> Result<Tensor<i64>, TorshError>
Convert to i64 tensor with SIMD optimization
Source§impl Tensor<i64>
i64 tensor conversions with SIMD optimization
impl Tensor<i64>
i64 tensor conversions with SIMD optimization
Sourcepub fn to_f32_simd(&self) -> Result<Tensor, TorshError>
pub fn to_f32_simd(&self) -> Result<Tensor, TorshError>
Convert to f32 tensor with SIMD optimization
Sourcepub fn to_f64_simd(&self) -> Result<Tensor<f64>, TorshError>
pub fn to_f64_simd(&self) -> Result<Tensor<f64>, TorshError>
Convert to f64 tensor with SIMD optimization
Sourcepub fn to_i32_simd(&self) -> Result<Tensor<i32>, TorshError>
pub fn to_i32_simd(&self) -> Result<Tensor<i32>, TorshError>
Convert to i32 tensor with SIMD optimization (with bounds checking)
Source§impl Tensor<f64>
f64 tensor conversions with SIMD optimization
impl Tensor<f64>
f64 tensor conversions with SIMD optimization
Sourcepub fn to_f32_simd(&self) -> Result<Tensor, TorshError>
pub fn to_f32_simd(&self) -> Result<Tensor, TorshError>
Convert to f32 tensor with SIMD optimization
Source§impl<T> Tensor<T>where
T: TensorElement + Copy,
Generic SIMD conversion implementations
impl<T> Tensor<T>where
T: TensorElement + Copy,
Generic SIMD conversion implementations
Sourcepub fn convert_with_optimal_simd<U>(&self) -> Result<Tensor<U>, TorshError>
pub fn convert_with_optimal_simd<U>(&self) -> Result<Tensor<U>, TorshError>
Convert tensor using optimal SIMD strategy for this system
Sourcepub fn convert_with_strategy<U>(
&self,
strategy: SIMDStrategy,
) -> Result<Tensor<U>, TorshError>
pub fn convert_with_strategy<U>( &self, strategy: SIMDStrategy, ) -> Result<Tensor<U>, TorshError>
Convert tensor using specific SIMD strategy
Trait Implementations§
Source§impl BFloat16TensorOps<bf16> for Tensor<bf16>
impl BFloat16TensorOps<bf16> for Tensor<bf16>
Source§fn to_bf16_with_rounding(
&self,
_mode: BF16RoundingMode,
) -> Result<Tensor<bf16>, TorshError>
fn to_bf16_with_rounding( &self, _mode: BF16RoundingMode, ) -> Result<Tensor<bf16>, TorshError>
Source§fn bf16_high_precision_op<F>(&self, op: F) -> Result<Tensor<bf16>, TorshError>
fn bf16_high_precision_op<F>(&self, op: F) -> Result<Tensor<bf16>, TorshError>
Source§impl BFloat16TensorOps<f32> for Tensor
impl BFloat16TensorOps<f32> for Tensor
Source§fn to_bf16_with_rounding(
&self,
mode: BF16RoundingMode,
) -> Result<Tensor<bf16>, TorshError>
fn to_bf16_with_rounding( &self, mode: BF16RoundingMode, ) -> Result<Tensor<bf16>, TorshError>
Source§fn bf16_high_precision_op<F>(&self, op: F) -> Result<Tensor<bf16>, TorshError>
fn bf16_high_precision_op<F>(&self, op: F) -> Result<Tensor<bf16>, TorshError>
Source§impl<T> Debug for Tensor<T>where
T: TensorElement,
impl<T> Debug for Tensor<T>where
T: TensorElement,
Source§impl<T> TensorConvenience<T> for Tensor<T>
impl<T> TensorConvenience<T> for Tensor<T>
Source§fn T(&self) -> Result<Tensor<T>, TorshError>
fn T(&self) -> Result<Tensor<T>, TorshError>
Source§fn H(&self) -> Result<Tensor<T>, TorshError>
fn H(&self) -> Result<Tensor<T>, TorshError>
Source§fn detach(&self) -> Tensor<T>
fn detach(&self) -> Tensor<T>
Source§fn clone_tensor(&self) -> Result<Tensor<T>, TorshError>
fn clone_tensor(&self) -> Result<Tensor<T>, TorshError>
Source§fn is_contiguous(&self) -> bool
fn is_contiguous(&self) -> bool
Source§fn contiguous(&self) -> Result<Tensor<T>, TorshError>
fn contiguous(&self) -> Result<Tensor<T>, TorshError>
Source§fn to_scalar(&self) -> Result<T, TorshError>
fn to_scalar(&self) -> Result<T, TorshError>
Source§impl<T> TensorCustomOps<T> for Tensor<T>where
T: TensorElement + 'static,
impl<T> TensorCustomOps<T> for Tensor<T>where
T: TensorElement + 'static,
Source§fn apply_custom_op(
&self,
op_name: &str,
other_inputs: &[&Tensor<T>],
params: &OperationParams,
) -> Result<Vec<Tensor<T>>, TorshError>
fn apply_custom_op( &self, op_name: &str, other_inputs: &[&Tensor<T>], params: &OperationParams, ) -> Result<Vec<Tensor<T>>, TorshError>
Source§fn apply_custom_op_with_registry(
&self,
registry: &CustomOperationRegistry,
op_name: &str,
other_inputs: &[&Tensor<T>],
params: &OperationParams,
) -> Result<Vec<Tensor<T>>, TorshError>
fn apply_custom_op_with_registry( &self, registry: &CustomOperationRegistry, op_name: &str, other_inputs: &[&Tensor<T>], params: &OperationParams, ) -> Result<Vec<Tensor<T>>, TorshError>
Source§impl<T> TensorExpressionOps<T> for Tensor<T>where
T: TensorElement,
impl<T> TensorExpressionOps<T> for Tensor<T>where
T: TensorElement,
Source§fn build_expression_graph(&self) -> ExpressionGraph
fn build_expression_graph(&self) -> ExpressionGraph
Source§fn optimize_expressions(
&self,
config: OptimizerConfig,
) -> Result<OptimizationStats, TorshError>
fn optimize_expressions( &self, config: OptimizerConfig, ) -> Result<OptimizationStats, TorshError>
Source§impl<T> TensorFluentExt<T> for Tensor<T>where
T: TensorElement,
impl<T> TensorFluentExt<T> for Tensor<T>where
T: TensorElement,
Source§fn fluent(self) -> FluentTensor<T>
fn fluent(self) -> FluentTensor<T>
Source§impl<T> TensorShapeConvenience<T> for Tensor<T>where
T: TensorElement + Copy,
impl<T> TensorShapeConvenience<T> for Tensor<T>where
T: TensorElement + Copy,
Source§fn unsqueeze_at(&self, dim: i32) -> Result<Tensor<T>, TorshError>
fn unsqueeze_at(&self, dim: i32) -> Result<Tensor<T>, TorshError>
Source§fn squeeze_all(&self) -> Result<Tensor<T>, TorshError>
fn squeeze_all(&self) -> Result<Tensor<T>, TorshError>
Source§fn flatten(&self) -> Result<Tensor<T>, TorshError>
fn flatten(&self) -> Result<Tensor<T>, TorshError>
Source§fn flatten_from(&self, start_dim: i32) -> Result<Tensor<T>, TorshError>
fn flatten_from(&self, start_dim: i32) -> Result<Tensor<T>, TorshError>
Auto Trait Implementations§
impl<T> Freeze for Tensor<T>
impl<T> RefUnwindSafe for Tensor<T>
impl<T> Send for Tensor<T>
impl<T> Sync for Tensor<T>
impl<T> Unpin for Tensor<T>
impl<T> UnwindSafe for Tensor<T>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more