pub struct Tensor<T: WithDType>(/* private fields */);Implementations§
Source§impl<T: WithDType> Tensor<T>
impl<T: WithDType> Tensor<T>
Sourcepub fn new<A: ToTensor<T>>(array: A) -> Result<Self>
pub fn new<A: ToTensor<T>>(array: A) -> Result<Self>
Creates a new Tensor from any supported Rust array or slice.
use lumen_core::Tensor;
let a = Tensor::new(&[1, 2, 3]).unwrap();
println!("{}", a.shape());Sourcepub fn full<S: Into<Shape>>(shape: S, value: T) -> Result<Self>
pub fn full<S: Into<Shape>>(shape: S, value: T) -> Result<Self>
Creates an array full with a constant value.
use lumen_core::Tensor;
let a = Tensor::full((2, 2), 7).unwrap();
println!("{}", a);Source§impl<T: WithDType> Tensor<T>
impl<T: WithDType> Tensor<T>
Sourcepub fn zeros<S: Into<Shape>>(shape: S) -> Result<Self>
pub fn zeros<S: Into<Shape>>(shape: S) -> Result<Self>
Creates an array of zeros with the given shape.
use lumen_core::Tensor;
let a = Tensor::<f32>::zeros((2, 3)).unwrap();
println!("{}", a);Sourcepub fn zeros_like(&self) -> Result<Self>
pub fn zeros_like(&self) -> Result<Self>
Creates a zero-filled array with the same shape as self.
use lumen_core::Tensor;
let a = Tensor::<i32>::ones((2, 2)).unwrap();
let b = a.zeros_like().unwrap();
println!("{}", b);Source§impl<T: NumDType> Tensor<T>
impl<T: NumDType> Tensor<T>
Source§impl<F: FloatDType> Tensor<F>
impl<F: FloatDType> Tensor<F>
Source§impl<F: FloatDType> Tensor<F>
impl<F: FloatDType> Tensor<F>
Sourcepub fn randn<S: Into<Shape>>(mean: F, std: F, shape: S) -> Result<Self>
pub fn randn<S: Into<Shape>>(mean: F, std: F, shape: S) -> Result<Self>
Creates an array with normally distributed random values
with given mean and std.
use lumen_core::Tensor;
let a = Tensor::<f64>::randn(0.0, 1.0, (2, 2)).unwrap();
println!("{}", a);Sourcepub fn randn_like(&self, mean: F, std: F) -> Result<Self>
pub fn randn_like(&self, mean: F, std: F) -> Result<Self>
Creates a normal-distributed random array with the same shape as self.
Source§impl<T: FloatDType> Tensor<T>
impl<T: FloatDType> Tensor<T>
pub fn new_var<A: ToTensor<T>>(array: A) -> Result<Self>
pub fn empty_var<S: Into<Shape>>(shape: S) -> Result<Self>
pub fn meta_var<S: Into<Shape>>(shape: S) -> Result<Self>
pub fn full_var<S: Into<Shape>>(shape: S, value: T) -> Result<Self>
pub fn zeros_var<S: Into<Shape>>(shape: S) -> Result<Self>
pub fn zeros_like_var(&self) -> Result<Self>
pub fn ones_var<S: Into<Shape>>(shape: S) -> Result<Self>
pub fn ones_like_var(&self) -> Result<Self>
pub fn arange_var(start: T, end: T) -> Result<Self>
pub fn from_vec_var<V: Into<Vec<T>>, S: Into<Shape>>( vec: V, shape: S, ) -> Result<Self>
pub fn eye_var(size: usize) -> Result<Self>
pub fn tril_var(size: usize, diagonal: bool) -> Result<Self>
pub fn triu_var(size: usize, diagonal: bool) -> Result<Self>
pub fn diag_var(diag: &[T]) -> Result<Self>
pub fn linspace_var(start: T, stop: T, num: usize) -> Result<Self>
pub fn randn_var<S: Into<Shape>>(mean: T, std: T, shape: S) -> Result<Self>
pub fn rand_var<S: Into<Shape>>(min: T, max: T, shape: S) -> Result<Self>
Source§impl<T: WithDType> Tensor<T>
impl<T: WithDType> Tensor<T>
pub fn indexes(&self, indexers: &[Indexer]) -> Result<Self>
Sourcepub fn get(&self, i: usize) -> Result<Self>
pub fn get(&self, i: usize) -> Result<Self>
Returns the sub-tensor fixing the index at i on the first dimension.
use lumen_core::Tensor;
let tensor = Tensor::<f32>::new(&[[0f32, 1.], [2., 3.], [4., 5.]]).unwrap();
let t = tensor.get(0).unwrap();
assert_eq!(t.to_vec().unwrap(), &[0., 1.]);
let t = tensor.get(1).unwrap();
assert_eq!(t.to_vec().unwrap(), &[2., 3.]);pub fn index_select<D: Dim>( &self, indexes: impl Into<IntTensor>, dim: D, ) -> Result<Self>
Sourcepub fn gather<D: Dim>(
&self,
indexes: impl Into<IntTensor>,
dim: D,
) -> Result<Self>
pub fn gather<D: Dim>( &self, indexes: impl Into<IntTensor>, dim: D, ) -> Result<Self>
Gather values across the target dimension.
§Arguments
self- The input tensor.indexes- The indices of elements to gather, this should have same number of dimensions asselfand indexes.dims()[d] <= self.dims()[d] for all dimensions d != dimdim- the target dimension.
The resulting tensor has the same shape as indexes and use values from self indexed on
dimension dim by the values in indexes.
Source§impl<T: WithDType> Tensor<T>
impl<T: WithDType> Tensor<T>
pub fn matrix_get(&self, row: usize, col: usize) -> Result<T>
pub fn matrix_set(&self, row: usize, col: usize, val: T) -> Result<()>
pub fn vector_get(&self, n: usize) -> Result<T>
Source§impl<T: WithDType> Tensor<T>
impl<T: WithDType> Tensor<T>
Sourcepub fn squeeze<D: Dim>(&self, dim: D) -> Result<Self>
pub fn squeeze<D: Dim>(&self, dim: D) -> Result<Self>
Creates a new tensor with the specified dimension removed if its size was one.
use lumen_core::{Tensor, DType, D};
let a = Tensor::<f32>::zeros((2, 3, 1)).unwrap();
let c = a.squeeze(2).unwrap();
assert_eq!(c.shape().dims(), &[2, 3]);Sourcepub fn unsqueeze<D: Dim>(&self, dim: D) -> Result<Self>
pub fn unsqueeze<D: Dim>(&self, dim: D) -> Result<Self>
Creates a new tensor with a dimension of size one inserted at the specified position.
use lumen_core::{Tensor, DType, D};
let a = Tensor::<f32>::zeros((2, 3)).unwrap();
let c = a.unsqueeze(0).unwrap();
assert_eq!(c.shape().dims(), &[1, 2, 3]);
let c = a.unsqueeze(D::Minus1).unwrap();
assert_eq!(c.shape().dims(), &[2, 3, 1]);Sourcepub fn narrow<D: Dim>(&self, dim: D, start: usize, len: usize) -> Result<Self>
pub fn narrow<D: Dim>(&self, dim: D, start: usize, len: usize) -> Result<Self>
Returns a new tensor that is a narrowed version of the input, the dimension dim
ranges from start to start + len.
use lumen_core::Tensor;
let a = Tensor::new(&[
[0f32, 1., 2.],
[3. , 4., 5.],
[6. , 7., 8.]
]).unwrap();
let b = a.narrow(0, 1, 2).unwrap();
assert_eq!(b.shape().dims(), &[2, 3]);
let c = a.narrow(1, 1, 1).unwrap();
assert_eq!(c.shape().dims(), &[3, 1]);Sourcepub fn slice<D: Dim>(&self, dim: D, slice: &Slice) -> Result<Self>
pub fn slice<D: Dim>(&self, dim: D, slice: &Slice) -> Result<Self>
Returns a new tensor that is a narrowed version of the input, the dimension dim
slice from start to start : end : step.
use lumen_core::{Tensor, DType, s, Slice};
let a = Tensor::<i32>::zeros((5, 5, 5)).unwrap();
let b = a.narrow(0, 1, 2).unwrap();
assert_eq!(b.shape().dims(), &[2, 5, 5]);
let c = a.slice(1, &s!(::2)).unwrap();
assert_eq!(c.shape().dims(), &[5, 3, 5]);Sourcepub fn reshape<S: Into<Shape>>(&self, shape: S) -> Result<Self>
pub fn reshape<S: Into<Shape>>(&self, shape: S) -> Result<Self>
Reshape returns a tensor with the target shape provided that the number of elements of the original tensor is the same. If the input tensor is contiguous, this is a view on the original data. Otherwise this uses a new storage and copies the data over, the returned tensor is always contiguous.
The shape can be specified using a tuple of usize and at most one () in which case
the behavior is the same as when using -1 in PyTorch: this dimension size is adjusted so
as to match the number of elements in the tensor.
use lumen_core::{Tensor, DType, D};
let a = Tensor::<f32>::zeros((2, 3)).unwrap();
let c = a.reshape((1, 6)).unwrap();
assert_eq!(c.shape().dims(), &[1, 6]);Sourcepub fn transpose<D1: Dim, D2: Dim>(&self, dim1: D1, dim2: D2) -> Result<Self>
pub fn transpose<D1: Dim, D2: Dim>(&self, dim1: D1, dim2: D2) -> Result<Self>
Returns a Tensor that is a transposed version of the input, the given dimensions are
pub fn transpose_last(&self) -> Result<Self>
Sourcepub fn permute<D: Dims>(&self, dims: D) -> Result<Self>
pub fn permute<D: Dims>(&self, dims: D) -> Result<Self>
Returns a tensor with the same data as the input where the dimensions have been permuted. dims must be a permutation, i.e. include each dimension index exactly once.
use lumen_core::Tensor;
let tensor = Tensor::<u32>::arange(0u32, 120u32).unwrap().reshape((2, 3, 4, 5)).unwrap();
assert_eq!(tensor.dims(), &[2, 3, 4, 5]);
let tensor = tensor.permute((2, 3, 1, 0)).unwrap();
assert_eq!(tensor.dims(), &[4, 5, 3, 2]);Sourcepub fn cat<A: AsRef<Tensor<T>>, D: Dim>(arrs: &[A], dim: D) -> Result<Self>
pub fn cat<A: AsRef<Tensor<T>>, D: Dim>(arrs: &[A], dim: D) -> Result<Self>
Concatenates two or more tensors along a particular dimension.
All tensors must of the same rank, and the output will have the same rank
use lumen_core::Tensor;
let a = Tensor::<f32>::zeros((2, 3)).unwrap();
let b = Tensor::<f32>::zeros((2, 3)).unwrap();
let c = Tensor::cat(&[&a, &b], 0).unwrap();
assert_eq!(c.dims(), &[4, 3]);
let c = Tensor::cat(&[&a, &b], 1).unwrap();
assert_eq!(c.dims(), &[2, 6]);Sourcepub fn stack<A: AsRef<Tensor<T>>, D: Dim>(args: &[A], dim: D) -> Result<Self>
pub fn stack<A: AsRef<Tensor<T>>, D: Dim>(args: &[A], dim: D) -> Result<Self>
Stacks two or more tensors along a particular dimension.
All tensors must have the same rank, and the output has one additional rank
use lumen_core::Tensor;
let a = Tensor::<f32>::zeros((2, 3)).unwrap();
let b = Tensor::<f32>::zeros((2, 3)).unwrap();
let c = Tensor::stack(&[&a, &b], 0).unwrap();
assert_eq!(c.dims(), &[2, 2, 3]);
let c = Tensor::stack(&[&a, &b], 2).unwrap();
assert_eq!(c.dims(), &[2, 3, 2]);Sourcepub fn split<D: Dim>(&self, dim: D) -> Result<Vec<Self>>
pub fn split<D: Dim>(&self, dim: D) -> Result<Vec<Self>>
Splits a tensor along a specified dimension into multiple sub-tensors.
The tensor is split along the given dim into as many sub-tensors as
the size of that dimension. Each sub-tensor has the same shape as the
original tensor, except the size along dim becomes 1.
use lumen_core::Tensor;
let a = Tensor::new(&[[1, 2], [3, 4], [5, 6]]).unwrap();
// Split along axis 0 (rows)
let splits = a.split(0).unwrap();
assert_eq!(splits.len(), 3);
assert_eq!(splits[0].to_vec().unwrap(), [1, 2]);
assert_eq!(splits[1].to_vec().unwrap(), [3, 4]);
assert_eq!(splits[2].to_vec().unwrap(), [5, 6]);
// Split along axis 1 (columns)
let splits = a.split(1).unwrap();
assert_eq!(splits.len(), 2);
assert_eq!(splits[0].to_vec().unwrap(), [1, 3, 5]);
assert_eq!(splits[1].to_vec().unwrap(), [2, 4, 6]);
// 1D array
let b = Tensor::new(&[10, 20, 30]).unwrap();
let splits = b.split(0).unwrap();
assert_eq!(splits.len(), 3);
assert_eq!(splits[0].to_vec().unwrap(), [10]);
assert_eq!(splits[1].to_vec().unwrap(), [20]);
assert_eq!(splits[2].to_vec().unwrap(), [30]);Sourcepub fn chunk<D: Dim>(&self, chunks: usize, dim: D) -> Result<Vec<Self>>
pub fn chunk<D: Dim>(&self, chunks: usize, dim: D) -> Result<Vec<Self>>
Split a tensor into the specified number of chunks, this may return less chunks than specified.
Sourcepub fn flatten<D1: Dim, D2: Dim>(
&self,
start_dim: D1,
end_dim: D2,
) -> Result<Self>
pub fn flatten<D1: Dim, D2: Dim>( &self, start_dim: D1, end_dim: D2, ) -> Result<Self>
Flattens the input tensor on the dimension indexes from start_dim to end_dim (both
inclusive).
Sourcepub fn flatten_to<D: Dim>(&self, end_dim: D) -> Result<Self>
pub fn flatten_to<D: Dim>(&self, end_dim: D) -> Result<Self>
Flattens the input tensor on the dimension indexes from 0 to end_dim (inclusive).
Sourcepub fn flatten_from<D: Dim>(&self, start_dim: D) -> Result<Self>
pub fn flatten_from<D: Dim>(&self, start_dim: D) -> Result<Self>
Flattens the input tensor on the dimension indexes from start_dim (inclusive) to the last
dimension.
Sourcepub fn flatten_all(&self) -> Result<Self>
pub fn flatten_all(&self) -> Result<Self>
Flattens the input tensor by reshaping it into a one dimension tensor.
use lumen_core::Tensor;
let arr = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]]).unwrap();
let arr = arr.flatten_all().unwrap();
let len = arr.dims1().unwrap();
assert_eq!(len, 6);
assert_eq!(arr.to_vec().unwrap(), [0., 1., 2., 3., 4., 5.]);Sourcepub fn repeat<S: Into<Shape>>(&self, shape: S) -> Result<Self>
pub fn repeat<S: Into<Shape>>(&self, shape: S) -> Result<Self>
Repeat this tensor along the specified dimensions.
Sourcepub fn repeat_dim<D: Dim>(&self, dim: D, times: usize) -> Result<Self>
pub fn repeat_dim<D: Dim>(&self, dim: D, times: usize) -> Result<Self>
Repeat this tensor along the specified dimension with specified times
Source§impl<T: NumDType> Tensor<T>
impl<T: NumDType> Tensor<T>
pub fn add_tensor(&self, rhs: &Self) -> Result<Self>
pub fn add_scalar(&self, rhs: T) -> Result<Self>
pub fn scalar_add(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>
pub fn add(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>
pub fn mul_tensor(&self, rhs: &Self) -> Result<Self>
pub fn mul_scalar(&self, rhs: T) -> Result<Self>
pub fn scalar_mul(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>
pub fn mul(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>
pub fn sub_tensor(&self, rhs: &Self) -> Result<Self>
pub fn sub_scalar(&self, rhs: T) -> Result<Self>
pub fn scalar_sub(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>
pub fn sub(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>
pub fn div_tensor(&self, rhs: &Self) -> Result<Self>
pub fn div_scalar(&self, rhs: T) -> Result<Self>
pub fn scalar_div(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>
pub fn div(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>
pub fn minimum_tensor(&self, rhs: &Self) -> Result<Self>
pub fn minimum_scalar(&self, rhs: T) -> Result<Self>
pub fn scalar_minimum(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>
pub fn minimum(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>
pub fn maximum_tensor(&self, rhs: &Self) -> Result<Self>
pub fn maximum_scalar(&self, rhs: T) -> Result<Self>
pub fn scalar_maximum(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>
pub fn maximum(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>
pub fn clamp(&self, min: T, max: T) -> Result<Self>
Source§impl<T: NumDType> Tensor<T>
impl<T: NumDType> Tensor<T>
pub fn add_(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>
pub fn sub_(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>
pub fn mul_(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>
pub fn div_(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>
Source§impl<T: NumDType> Tensor<T>
impl<T: NumDType> Tensor<T>
pub fn eq(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>
pub fn ne(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>
pub fn le(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>
pub fn ge(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>
pub fn lt(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>
pub fn gt(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>
pub fn cmp( &self, rhs: impl Into<TensorOrScalar<T>>, op: CmpOp, ) -> Result<Tensor<bool>>
Source§impl Tensor<bool>
impl Tensor<bool>
pub fn and(&self, rhs: impl Into<TensorOrScalar<bool>>) -> Result<Tensor<bool>>
pub fn or(&self, rhs: impl Into<TensorOrScalar<bool>>) -> Result<Tensor<bool>>
pub fn xor(&self, rhs: impl Into<TensorOrScalar<bool>>) -> Result<Tensor<bool>>
pub fn not(&self) -> Result<Tensor<bool>>
Source§impl<F: FloatDType> Tensor<F>
impl<F: FloatDType> Tensor<F>
pub fn floor(&self) -> Result<Self>
pub fn ceil(&self) -> Result<Self>
pub fn round(&self) -> Result<Self>
pub fn exp(&self) -> Result<Self>
pub fn ln(&self) -> Result<Self>
pub fn sin(&self) -> Result<Self>
pub fn cos(&self) -> Result<Self>
pub fn tanh(&self) -> Result<Self>
pub fn sqrt(&self) -> Result<Self>
pub fn sqr(&self) -> Result<Self>
pub fn abs(&self) -> Result<Self>
pub fn recip(&self) -> Result<Self>
pub fn gelu(&self) -> Result<Self>
pub fn gelu_erf(&self) -> Result<Self>
pub fn erf(&self) -> Result<Self>
pub fn relu(&self) -> Result<Self>
pub fn silu(&self) -> Result<Self>
pub fn sigmoid(&self) -> Result<Self>
pub fn leaky_relu(&self, negative_slope: F) -> Result<Self>
Source§impl<T: NumDType> Tensor<T>
impl<T: NumDType> Tensor<T>
Sourcepub fn matmul(&self, rhs: &Self) -> Result<Self>
pub fn matmul(&self, rhs: &Self) -> Result<Self>
Returns the matrix-multiplication of the input tensor with the other provided tensor.
§Arguments
self- A tensor with dimensionsb1, b2, ..., bi, m, k.rhs- A tensor with dimensionsb1, b2, ..., bi, k, n.
The resulting tensor has dimensions b1, b2, ..., bi, m, n.
Source§impl<T: NumDType> Tensor<T>
impl<T: NumDType> Tensor<T>
pub fn sum<D: Dim>(&self, axis: D) -> Result<Self>
pub fn sum_keepdim<D: Dim>(&self, axis: D) -> Result<Self>
pub fn sum_all(&self) -> Result<Self>
pub fn min<D: Dim>(&self, axis: D) -> Result<Self>
pub fn min_keepdim<D: Dim>(&self, axis: D) -> Result<Self>
pub fn min_all(&self) -> Result<Self>
pub fn max<D: Dim>(&self, axis: D) -> Result<Self>
pub fn max_keepdim<D: Dim>(&self, axis: D) -> Result<Self>
pub fn max_all(&self) -> Result<Self>
pub fn mean<D: Dim>(&self, axis: D) -> Result<Self>
pub fn mean_keepdim<D: Dim>(&self, axis: D) -> Result<Self>
pub fn mean_all(&self) -> Result<Self>
pub fn var_keepdim<D: Dim>(&self, axis: D) -> Result<Self>
pub fn var_unbiased_keepdim<D: Dim>(&self, axis: D) -> Result<Self>
pub fn var<D: Dim>(&self, axis: D) -> Result<Self>
pub fn var_unbiased<D: Dim>(&self, axis: D) -> Result<Self>
pub fn var_all(&self) -> Result<Self>
pub fn var_unbiased_all(&self) -> Result<Self>
pub fn argmin_keepdim<D: Dim>(&self, axis: D) -> Result<Tensor<u32>>
pub fn argmin<D: Dim>(&self, axis: D) -> Result<Tensor<u32>>
pub fn argmax_keepdim<D: Dim>(&self, axis: D) -> Result<Tensor<u32>>
pub fn argmax<D: Dim>(&self, axis: D) -> Result<Tensor<u32>>
Source§impl<T: WithDType> Tensor<T>
impl<T: WithDType> Tensor<T>
Sourcepub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self>
pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self>
Broadcast the input tensor to the target shape. This returns an error if the input shape is not compatible with the target shape.
If the input shape is i_1, i_2, ... i_k, the target shape has to have k dimensions or
more and shape j_1, ..., j_l, t_1, t_2, ..., t_k. The dimensions j_1 to j_l can have
any value, the dimension t_a must be equal to i_a if i_a is different from 1. If
i_a is equal to 1, any value can be used.
Source§impl<T: NumDType> Tensor<T>
impl<T: NumDType> Tensor<T>
pub fn broadcast_add(&self, rhs: &Self) -> Result<Self>
pub fn broadcast_mul(&self, rhs: &Self) -> Result<Self>
pub fn broadcast_sub(&self, rhs: &Self) -> Result<Self>
pub fn broadcast_div(&self, rhs: &Self) -> Result<Self>
pub fn broadcast_maximum(&self, rhs: &Self) -> Result<Self>
pub fn broadcast_minimum(&self, rhs: &Self) -> Result<Self>
pub fn broadcast_eq(&self, rhs: &Self) -> Result<Tensor<bool>>
pub fn broadcast_ne(&self, rhs: &Self) -> Result<Tensor<bool>>
pub fn broadcast_lt(&self, rhs: &Self) -> Result<Tensor<bool>>
pub fn broadcast_le(&self, rhs: &Self) -> Result<Tensor<bool>>
pub fn broadcast_gt(&self, rhs: &Self) -> Result<Tensor<bool>>
pub fn broadcast_ge(&self, rhs: &Self) -> Result<Tensor<bool>>
Source§impl Tensor<bool>
impl Tensor<bool>
pub fn if_else<T: WithDType>( &self, true_val: impl Into<TensorOrScalar<T>>, false_val: impl Into<TensorOrScalar<T>>, ) -> Result<Tensor<T>>
pub fn true_count(&self) -> Result<usize>
pub fn false_count(&self) -> Result<usize>
Source§impl<T: WithDType> Tensor<T>
impl<T: WithDType> Tensor<T>
pub fn masked_fill( &self, mask: &Tensor<bool>, value: impl Into<TensorOrScalar<T>>, ) -> Result<Tensor<T>>
Source§impl<T: WithDType> Tensor<T>
impl<T: WithDType> Tensor<T>
pub fn is_scalar(&self) -> bool
pub fn check_scalar(&self) -> Result<()>
pub fn to_scalar(&self) -> Result<T>
pub fn set_scalar(&self, val: T) -> Result<()>
pub fn storage_ref<'a>( &'a self, start_offset: usize, ) -> Result<StorageRef<'a, T>>
pub fn storage_mut<'a>( &'a self, start_offset: usize, ) -> Result<StorageMut<'a, T>>
pub fn storage_ptr(&self, start_offset: usize) -> Result<*mut T>
pub fn is_meta(&self) -> bool
Source§impl<T: WithDType> Tensor<T>
impl<T: WithDType> Tensor<T>
pub fn id(&self) -> TensorId
pub fn shape(&self) -> &Shape
pub fn dtype(&self) -> DType
pub fn layout(&self) -> &Layout
pub fn dims(&self) -> &[usize]
pub fn dim<D: Dim>(&self, dim: D) -> Result<usize>
pub fn storage_read(&self) -> Result<RwLockReadGuard<'_, Storage<T>>>
pub fn storage_write(&self) -> Result<RwLockWriteGuard<'_, Storage<T>>>
pub fn element_count(&self) -> usize
pub fn is_contiguous(&self) -> bool
pub fn rank(&self) -> usize
pub fn to_vec(&self) -> Result<Vec<T>>
Sourcepub fn storage_indices(&self) -> StorageIndices<'_> ⓘ
pub fn storage_indices(&self) -> StorageIndices<'_> ⓘ
Returns an iterator over storage indices.
This iterator yields the linear (flat) indices as they are laid out in the underlying storage buffer. The order depends on the memory layout (e.g., row-major / column-major / with strides).
Example for shape = (2, 2) in row-major layout:
yields: 0, 1, 2, 3
Sourcepub fn dim_coordinates(&self) -> DimCoordinates ⓘ
pub fn dim_coordinates(&self) -> DimCoordinates ⓘ
Returns an iterator over dimension coordinates.
This iterator yields the multi-dimensional coordinates
(e.g., [i, j, k, ...]) of each element in the array, independent
of the physical storage layout.
Example for shape = (2, 2):
yields: [0, 0], [0, 1], [1, 0], [1, 1]