Skip to main content

Tensor

Struct Tensor 

Source
pub struct Tensor<T: WithDType>(/* private fields */);

Implementations§

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn dims0(&self) -> Result<()>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn dims1(&self) -> Result<usize>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn dims2(&self) -> Result<(usize, usize)>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn dims3(&self) -> Result<(usize, usize, usize)>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn dims4(&self) -> Result<(usize, usize, usize, usize)>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn dims5(&self) -> Result<(usize, usize, usize, usize, usize)>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn new<A: ToTensor<T>>(array: A) -> Result<Self>

Creates a new Tensor from any supported Rust array or slice.

use lumen_core::Tensor;

let a = Tensor::new(&[1, 2, 3]).unwrap();
println!("{}", a.shape());
Source

pub fn full<S: Into<Shape>>(shape: S, value: T) -> Result<Self>

Creates an array full with a constant value.

use lumen_core::Tensor;

let a = Tensor::full((2, 2), 7).unwrap();
println!("{}", a);
Source

pub fn empty<S: Into<Shape>>(shape: S) -> Result<Self>

Creates a new Tensor with un initialze value!

Source

pub fn meta<S: Into<Shape>>(shape: S) -> Result<Self>

Creates a new Tensor no storage

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn zeros<S: Into<Shape>>(shape: S) -> Result<Self>

Creates an array of zeros with the given shape.

use lumen_core::Tensor;

let a = Tensor::<f32>::zeros((2, 3)).unwrap();
println!("{}", a);
Source

pub fn zeros_like(&self) -> Result<Self>

Creates a zero-filled array with the same shape as self.

use lumen_core::Tensor;

let a = Tensor::<i32>::ones((2, 2)).unwrap();
let b = a.zeros_like().unwrap();
println!("{}", b);
Source

pub fn ones<S: Into<Shape>>(shape: S) -> Result<Self>

Creates an array of ones with the given shape.

use lumen_core::Tensor;

let a = Tensor::<f64>::ones((3, 3)).unwrap();
println!("{}", a);
Source

pub fn ones_like(&self) -> Result<Self>

Creates a one-filled array with the same shape as self.

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn arange(start: T, end: T) -> Result<Self>

Creates a 1-D array with values from start up to (but not including) end.

use lumen_core::Tensor;

let a = Tensor::arange(0., 5.).unwrap();
println!("{}", a);
Source§

impl<T: WithDType> Tensor<T>

Source

pub fn from_vec<V: Into<Vec<T>>, S: Into<Shape>>( vec: V, shape: S, ) -> Result<Self>

Creates an array from a flat Vec<T> and explicit shape.

use lumen_core::Tensor;

let a = Tensor::from_vec(vec![1, 2, 3, 4], (2, 2)).unwrap();
println!("{}", a);
Source

pub fn diag(diag: &[T]) -> Result<Self>

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn rand<S: Into<Shape>>(min: T, max: T, shape: S) -> Result<Self>

Creates an array with uniformly distributed random values in [min, max).

use lumen_core::Tensor;

let a = Tensor::<f32>::rand(0., 1., (2, 3)).unwrap();
println!("{}", a);
Source

pub fn rand_like(&self, min: T, max: T) -> Result<Self>

Creates a random array with the same shape as self.

Source§

impl<F: FloatDType> Tensor<F>

Source

pub fn linspace(start: F, stop: F, num: usize) -> Result<Self>

Generate a 1-D Tensor of num evenly spaced values over the interval [start, stop).

§Example
let arr = Tensor::linspace(0.0, 1.0, 5).unwrap();
assert_eq!(arr.to_vec().unwrap(), [0.0, 0.2, 0.4, 0.6000000000000001, 0.8]);
Source§

impl<F: FloatDType> Tensor<F>

Source

pub fn randn<S: Into<Shape>>(mean: F, std: F, shape: S) -> Result<Self>

Creates an array with normally distributed random values with given mean and std.

use lumen_core::Tensor;

let a = Tensor::<f64>::randn(0.0, 1.0, (2, 2)).unwrap();
println!("{}", a);
Source

pub fn randn_like(&self, mean: F, std: F) -> Result<Self>

Creates a normal-distributed random array with the same shape as self.

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn eye(size: usize) -> Result<Self>

Source

pub fn tril(size: usize, diagonal: bool) -> Result<Self>

Source

pub fn triu(size: usize, diagonal: bool) -> Result<Self>

Source§

impl<T: FloatDType> Tensor<T>

Source

pub fn new_var<A: ToTensor<T>>(array: A) -> Result<Self>

Source

pub fn empty_var<S: Into<Shape>>(shape: S) -> Result<Self>

Source

pub fn meta_var<S: Into<Shape>>(shape: S) -> Result<Self>

Source

pub fn full_var<S: Into<Shape>>(shape: S, value: T) -> Result<Self>

Source

pub fn zeros_var<S: Into<Shape>>(shape: S) -> Result<Self>

Source

pub fn zeros_like_var(&self) -> Result<Self>

Source

pub fn ones_var<S: Into<Shape>>(shape: S) -> Result<Self>

Source

pub fn ones_like_var(&self) -> Result<Self>

Source

pub fn arange_var(start: T, end: T) -> Result<Self>

Source

pub fn from_vec_var<V: Into<Vec<T>>, S: Into<Shape>>( vec: V, shape: S, ) -> Result<Self>

Source

pub fn eye_var(size: usize) -> Result<Self>

Source

pub fn tril_var(size: usize, diagonal: bool) -> Result<Self>

Source

pub fn triu_var(size: usize, diagonal: bool) -> Result<Self>

Source

pub fn diag_var(diag: &[T]) -> Result<Self>

Source

pub fn linspace_var(start: T, stop: T, num: usize) -> Result<Self>

Source

pub fn randn_var<S: Into<Shape>>(mean: T, std: T, shape: S) -> Result<Self>

Source

pub fn rand_var<S: Into<Shape>>(min: T, max: T, shape: S) -> Result<Self>

Source§

impl Tensor<bool>

Source

pub fn trues<S: Into<Shape>>(shape: S) -> Result<Self>

Creates a boolean array filled with true.

use lumen_core::Tensor;

let a = Tensor::trues((2, 2)).unwrap();
println!("{}", a);
Source

pub fn falses<S: Into<Shape>>(shape: S) -> Result<Self>

Creates a boolean array filled with false.

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn indexes(&self, indexers: &[Indexer]) -> Result<Self>

Source

pub fn get(&self, i: usize) -> Result<Self>

Returns the sub-tensor fixing the index at i on the first dimension.

use lumen_core::Tensor;
let tensor = Tensor::<f32>::new(&[[0f32, 1.], [2., 3.], [4., 5.]]).unwrap();
let t = tensor.get(0).unwrap();
assert_eq!(t.to_vec().unwrap(), &[0., 1.]);
let t = tensor.get(1).unwrap();
assert_eq!(t.to_vec().unwrap(), &[2., 3.]);
Source

pub fn index_select<D: Dim>( &self, indexes: impl Into<IntTensor>, dim: D, ) -> Result<Self>

Source

pub fn gather<D: Dim>( &self, indexes: impl Into<IntTensor>, dim: D, ) -> Result<Self>

Gather values across the target dimension.

§Arguments
  • self - The input tensor.
  • indexes - The indices of elements to gather, this should have same number of dimensions as self and indexes.dims()[d] <= self.dims()[d] for all dimensions d != dim
  • dim - the target dimension.

The resulting tensor has the same shape as indexes and use values from self indexed on dimension dim by the values in indexes.

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn index_add<D: Dim>( &self, indexes: impl Into<IntTensor>, source: &Tensor<T>, dim: D, ) -> Result<Self>

Source

pub fn scatter_add<D: Dim>( &self, indexes: impl Into<IntTensor>, source: &Self, dim: D, ) -> Result<Self>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn matrix_get(&self, row: usize, col: usize) -> Result<T>

Source

pub fn matrix_set(&self, row: usize, col: usize, val: T) -> Result<()>

Source

pub fn vector_get(&self, n: usize) -> Result<T>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn iter(&self) -> Result<TensorIter<'_, T>>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn squeeze<D: Dim>(&self, dim: D) -> Result<Self>

Creates a new tensor with the specified dimension removed if its size was one.

use lumen_core::{Tensor, DType, D};
let a = Tensor::<f32>::zeros((2, 3, 1)).unwrap();

let c = a.squeeze(2).unwrap();
assert_eq!(c.shape().dims(), &[2, 3]);
Source

pub fn unsqueeze<D: Dim>(&self, dim: D) -> Result<Self>

Creates a new tensor with a dimension of size one inserted at the specified position.

use lumen_core::{Tensor, DType, D};
let a = Tensor::<f32>::zeros((2, 3)).unwrap();

let c = a.unsqueeze(0).unwrap();
assert_eq!(c.shape().dims(), &[1, 2, 3]);

let c = a.unsqueeze(D::Minus1).unwrap();
assert_eq!(c.shape().dims(), &[2, 3, 1]);
Source

pub fn narrow<D: Dim>(&self, dim: D, start: usize, len: usize) -> Result<Self>

Returns a new tensor that is a narrowed version of the input, the dimension dim ranges from start to start + len.

use lumen_core::Tensor;
let a = Tensor::new(&[
    [0f32, 1., 2.],
    [3.  , 4., 5.],
    [6.  , 7., 8.]
]).unwrap();

let b = a.narrow(0, 1, 2).unwrap();
assert_eq!(b.shape().dims(), &[2, 3]);

let c = a.narrow(1, 1, 1).unwrap();
assert_eq!(c.shape().dims(), &[3, 1]);
Source

pub fn slice<D: Dim>(&self, dim: D, slice: &Slice) -> Result<Self>

Returns a new tensor that is a narrowed version of the input, the dimension dim slice from start to start : end : step.

use lumen_core::{Tensor, DType, s, Slice};
let a = Tensor::<i32>::zeros((5, 5, 5)).unwrap();

let b = a.narrow(0, 1, 2).unwrap();
assert_eq!(b.shape().dims(), &[2, 5, 5]);

let c = a.slice(1, &s!(::2)).unwrap();
assert_eq!(c.shape().dims(), &[5, 3, 5]);
Source

pub fn reshape<S: Into<Shape>>(&self, shape: S) -> Result<Self>

Reshape returns a tensor with the target shape provided that the number of elements of the original tensor is the same. If the input tensor is contiguous, this is a view on the original data. Otherwise this uses a new storage and copies the data over, the returned tensor is always contiguous.

The shape can be specified using a tuple of usize and at most one () in which case the behavior is the same as when using -1 in PyTorch: this dimension size is adjusted so as to match the number of elements in the tensor.

use lumen_core::{Tensor, DType, D};
let a = Tensor::<f32>::zeros((2, 3)).unwrap();

let c = a.reshape((1, 6)).unwrap();
assert_eq!(c.shape().dims(), &[1, 6]);
Source

pub fn transpose<D1: Dim, D2: Dim>(&self, dim1: D1, dim2: D2) -> Result<Self>

Returns a Tensor that is a transposed version of the input, the given dimensions are

Source

pub fn transpose_last(&self) -> Result<Self>

Source

pub fn permute<D: Dims>(&self, dims: D) -> Result<Self>

Returns a tensor with the same data as the input where the dimensions have been permuted. dims must be a permutation, i.e. include each dimension index exactly once.

use lumen_core::Tensor;
let tensor = Tensor::<u32>::arange(0u32, 120u32).unwrap().reshape((2, 3, 4, 5)).unwrap();
assert_eq!(tensor.dims(), &[2, 3, 4, 5]);
let tensor = tensor.permute((2, 3, 1, 0)).unwrap();
assert_eq!(tensor.dims(), &[4, 5, 3, 2]);
Source

pub fn cat<A: AsRef<Tensor<T>>, D: Dim>(arrs: &[A], dim: D) -> Result<Self>

Concatenates two or more tensors along a particular dimension.

All tensors must of the same rank, and the output will have the same rank

use lumen_core::Tensor;
let a = Tensor::<f32>::zeros((2, 3)).unwrap();
let b = Tensor::<f32>::zeros((2, 3)).unwrap();

let c = Tensor::cat(&[&a, &b], 0).unwrap();
assert_eq!(c.dims(), &[4, 3]);

let c = Tensor::cat(&[&a, &b], 1).unwrap();
assert_eq!(c.dims(), &[2, 6]);
Source

pub fn stack<A: AsRef<Tensor<T>>, D: Dim>(args: &[A], dim: D) -> Result<Self>

Stacks two or more tensors along a particular dimension.

All tensors must have the same rank, and the output has one additional rank

use lumen_core::Tensor;
let a = Tensor::<f32>::zeros((2, 3)).unwrap();
let b = Tensor::<f32>::zeros((2, 3)).unwrap();

let c = Tensor::stack(&[&a, &b], 0).unwrap();
assert_eq!(c.dims(), &[2, 2, 3]);

let c = Tensor::stack(&[&a, &b], 2).unwrap();
assert_eq!(c.dims(), &[2, 3, 2]);
Source

pub fn split<D: Dim>(&self, dim: D) -> Result<Vec<Self>>

Splits a tensor along a specified dimension into multiple sub-tensors.

The tensor is split along the given dim into as many sub-tensors as the size of that dimension. Each sub-tensor has the same shape as the original tensor, except the size along dim becomes 1.

use lumen_core::Tensor;

let a = Tensor::new(&[[1, 2], [3, 4], [5, 6]]).unwrap();

// Split along axis 0 (rows)
let splits = a.split(0).unwrap();
assert_eq!(splits.len(), 3);
assert_eq!(splits[0].to_vec().unwrap(), [1, 2]);
assert_eq!(splits[1].to_vec().unwrap(), [3, 4]);
assert_eq!(splits[2].to_vec().unwrap(), [5, 6]);

// Split along axis 1 (columns)
let splits = a.split(1).unwrap();
assert_eq!(splits.len(), 2);
assert_eq!(splits[0].to_vec().unwrap(), [1, 3, 5]);
assert_eq!(splits[1].to_vec().unwrap(), [2, 4, 6]);

// 1D array
let b = Tensor::new(&[10, 20, 30]).unwrap();
let splits = b.split(0).unwrap();
assert_eq!(splits.len(), 3);
assert_eq!(splits[0].to_vec().unwrap(), [10]);
assert_eq!(splits[1].to_vec().unwrap(), [20]);
assert_eq!(splits[2].to_vec().unwrap(), [30]);
Source

pub fn chunk<D: Dim>(&self, chunks: usize, dim: D) -> Result<Vec<Self>>

Split a tensor into the specified number of chunks, this may return less chunks than specified.

Source

pub fn flatten<D1: Dim, D2: Dim>( &self, start_dim: D1, end_dim: D2, ) -> Result<Self>

Flattens the input tensor on the dimension indexes from start_dim to end_dim (both inclusive).

Source

pub fn flatten_to<D: Dim>(&self, end_dim: D) -> Result<Self>

Flattens the input tensor on the dimension indexes from 0 to end_dim (inclusive).

Source

pub fn flatten_from<D: Dim>(&self, start_dim: D) -> Result<Self>

Flattens the input tensor on the dimension indexes from start_dim (inclusive) to the last dimension.

Source

pub fn flatten_all(&self) -> Result<Self>

Flattens the input tensor by reshaping it into a one dimension tensor.

use lumen_core::Tensor;
let arr = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]]).unwrap();
let arr = arr.flatten_all().unwrap();
let len = arr.dims1().unwrap();
assert_eq!(len, 6);
assert_eq!(arr.to_vec().unwrap(), [0., 1., 2., 3., 4., 5.]);
Source

pub fn repeat<S: Into<Shape>>(&self, shape: S) -> Result<Self>

Repeat this tensor along the specified dimensions.

Source

pub fn repeat_dim<D: Dim>(&self, dim: D, times: usize) -> Result<Self>

Repeat this tensor along the specified dimension with specified times

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn add_tensor(&self, rhs: &Self) -> Result<Self>

Source

pub fn add_scalar(&self, rhs: T) -> Result<Self>

Source

pub fn scalar_add(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>

Source

pub fn add(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>

Source

pub fn mul_tensor(&self, rhs: &Self) -> Result<Self>

Source

pub fn mul_scalar(&self, rhs: T) -> Result<Self>

Source

pub fn scalar_mul(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>

Source

pub fn mul(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>

Source

pub fn sub_tensor(&self, rhs: &Self) -> Result<Self>

Source

pub fn sub_scalar(&self, rhs: T) -> Result<Self>

Source

pub fn scalar_sub(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>

Source

pub fn sub(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>

Source

pub fn div_tensor(&self, rhs: &Self) -> Result<Self>

Source

pub fn div_scalar(&self, rhs: T) -> Result<Self>

Source

pub fn scalar_div(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>

Source

pub fn div(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>

Source

pub fn minimum_tensor(&self, rhs: &Self) -> Result<Self>

Source

pub fn minimum_scalar(&self, rhs: T) -> Result<Self>

Source

pub fn scalar_minimum(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>

Source

pub fn minimum(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>

Source

pub fn maximum_tensor(&self, rhs: &Self) -> Result<Self>

Source

pub fn maximum_scalar(&self, rhs: T) -> Result<Self>

Source

pub fn scalar_maximum(lhs: T, rhs: &Tensor<T>) -> Result<Tensor<T>>

Source

pub fn maximum(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>

Source

pub fn clamp(&self, min: T, max: T) -> Result<Self>

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn add_(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>

Source

pub fn sub_(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>

Source

pub fn mul_(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>

Source

pub fn div_(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Self>

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn eq(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>

Source

pub fn ne(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>

Source

pub fn le(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>

Source

pub fn ge(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>

Source

pub fn lt(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>

Source

pub fn gt(&self, rhs: impl Into<TensorOrScalar<T>>) -> Result<Tensor<bool>>

Source

pub fn cmp( &self, rhs: impl Into<TensorOrScalar<T>>, op: CmpOp, ) -> Result<Tensor<bool>>

Source§

impl Tensor<bool>

Source

pub fn and(&self, rhs: impl Into<TensorOrScalar<bool>>) -> Result<Tensor<bool>>

Source

pub fn or(&self, rhs: impl Into<TensorOrScalar<bool>>) -> Result<Tensor<bool>>

Source

pub fn xor(&self, rhs: impl Into<TensorOrScalar<bool>>) -> Result<Tensor<bool>>

Source

pub fn not(&self) -> Result<Tensor<bool>>

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn affine(&self, mul: T, add: T) -> Result<Self>

Source

pub fn affine_assign(&self, mul: T, add: T) -> Result<()>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn map<F, O>(&self, f: F) -> Result<Tensor<O>>
where O: WithDType, F: Fn(T) -> O,

Source

pub fn map_assign<F>(&self, f: F) -> Result<()>
where F: Fn(T) -> T,

Source§

impl<T: NumDType + Neg<Output = T>> Tensor<T>

Source

pub fn neg(&self) -> Result<Self>

Source§

impl<F: FloatDType> Tensor<F>

Source

pub fn floor(&self) -> Result<Self>

Source

pub fn ceil(&self) -> Result<Self>

Source

pub fn round(&self) -> Result<Self>

Source

pub fn exp(&self) -> Result<Self>

Source

pub fn ln(&self) -> Result<Self>

Source

pub fn sin(&self) -> Result<Self>

Source

pub fn cos(&self) -> Result<Self>

Source

pub fn tanh(&self) -> Result<Self>

Source

pub fn sqrt(&self) -> Result<Self>

Source

pub fn sqr(&self) -> Result<Self>

Source

pub fn abs(&self) -> Result<Self>

Source

pub fn recip(&self) -> Result<Self>

Source

pub fn gelu(&self) -> Result<Self>

Source

pub fn gelu_erf(&self) -> Result<Self>

Source

pub fn erf(&self) -> Result<Self>

Source

pub fn relu(&self) -> Result<Self>

Source

pub fn silu(&self) -> Result<Self>

Source

pub fn sigmoid(&self) -> Result<Self>

Source

pub fn leaky_relu(&self, negative_slope: F) -> Result<Self>

Source§

impl<F: FloatDType> Tensor<F>

Source

pub fn pow(&self, e: F) -> Result<Self>

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn matmul(&self, rhs: &Self) -> Result<Self>

Returns the matrix-multiplication of the input tensor with the other provided tensor.

§Arguments
  • self - A tensor with dimensions b1, b2, ..., bi, m, k.
  • rhs - A tensor with dimensions b1, b2, ..., bi, k, n.

The resulting tensor has dimensions b1, b2, ..., bi, m, n.

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn sum<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn sum_keepdim<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn sum_all(&self) -> Result<Self>

Source

pub fn min<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn min_keepdim<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn min_all(&self) -> Result<Self>

Source

pub fn max<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn max_keepdim<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn max_all(&self) -> Result<Self>

Source

pub fn mean<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn mean_keepdim<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn mean_all(&self) -> Result<Self>

Source

pub fn var_keepdim<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn var_unbiased_keepdim<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn var<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn var_unbiased<D: Dim>(&self, axis: D) -> Result<Self>

Source

pub fn var_all(&self) -> Result<Self>

Source

pub fn var_unbiased_all(&self) -> Result<Self>

Source

pub fn argmin_keepdim<D: Dim>(&self, axis: D) -> Result<Tensor<u32>>

Source

pub fn argmin<D: Dim>(&self, axis: D) -> Result<Tensor<u32>>

Source

pub fn argmax_keepdim<D: Dim>(&self, axis: D) -> Result<Tensor<u32>>

Source

pub fn argmax<D: Dim>(&self, axis: D) -> Result<Tensor<u32>>

Source§

impl Tensor<bool>

Source

pub fn all(&self) -> Result<bool>

Source

pub fn any(&self) -> Result<bool>

Source

pub fn all_axis<D: Dim>(&self, axis: D) -> Result<Tensor<bool>>

Source

pub fn any_axis<D: Dim>(&self, axis: D) -> Result<Tensor<bool>>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self>

Broadcast the input tensor to the target shape. This returns an error if the input shape is not compatible with the target shape.

If the input shape is i_1, i_2, ... i_k, the target shape has to have k dimensions or more and shape j_1, ..., j_l, t_1, t_2, ..., t_k. The dimensions j_1 to j_l can have any value, the dimension t_a must be equal to i_a if i_a is different from 1. If i_a is equal to 1, any value can be used.

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn broadcast_add(&self, rhs: &Self) -> Result<Self>

Source

pub fn broadcast_mul(&self, rhs: &Self) -> Result<Self>

Source

pub fn broadcast_sub(&self, rhs: &Self) -> Result<Self>

Source

pub fn broadcast_div(&self, rhs: &Self) -> Result<Self>

Source

pub fn broadcast_maximum(&self, rhs: &Self) -> Result<Self>

Source

pub fn broadcast_minimum(&self, rhs: &Self) -> Result<Self>

Source

pub fn broadcast_eq(&self, rhs: &Self) -> Result<Tensor<bool>>

Source

pub fn broadcast_ne(&self, rhs: &Self) -> Result<Tensor<bool>>

Source

pub fn broadcast_lt(&self, rhs: &Self) -> Result<Tensor<bool>>

Source

pub fn broadcast_le(&self, rhs: &Self) -> Result<Tensor<bool>>

Source

pub fn broadcast_gt(&self, rhs: &Self) -> Result<Tensor<bool>>

Source

pub fn broadcast_ge(&self, rhs: &Self) -> Result<Tensor<bool>>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn contiguous(&self) -> Result<Tensor<T>>

Source

pub fn copy(&self) -> Result<Self>

Source

pub fn copy_from(&self, source: &Self) -> Result<()>

Source

pub fn assign(&self, source: impl Into<TensorOrScalar<T>>) -> Result<()>

Source§

impl<From: WithDType> Tensor<From>

Source

pub fn cast<To: WithDType>(&self) -> Result<Tensor<To>>
where From: DTypeConvert<To>,

Source§

impl Tensor<bool>

Source

pub fn if_else<T: WithDType>( &self, true_val: impl Into<TensorOrScalar<T>>, false_val: impl Into<TensorOrScalar<T>>, ) -> Result<Tensor<T>>

Source

pub fn true_count(&self) -> Result<usize>

Source

pub fn false_count(&self) -> Result<usize>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn masked_fill( &self, mask: &Tensor<bool>, value: impl Into<TensorOrScalar<T>>, ) -> Result<Tensor<T>>

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn is_scalar(&self) -> bool

Source

pub fn check_scalar(&self) -> Result<()>

Source

pub fn to_scalar(&self) -> Result<T>

Source

pub fn set_scalar(&self, val: T) -> Result<()>

Source

pub fn storage_ref<'a>( &'a self, start_offset: usize, ) -> Result<StorageRef<'a, T>>

Source

pub fn storage_mut<'a>( &'a self, start_offset: usize, ) -> Result<StorageMut<'a, T>>

Source

pub fn storage_ptr(&self, start_offset: usize) -> Result<*mut T>

Source

pub fn is_meta(&self) -> bool

Source§

impl<T: WithDType> Tensor<T>

Source

pub fn id(&self) -> TensorId

Source

pub fn shape(&self) -> &Shape

Source

pub fn dtype(&self) -> DType

Source

pub fn layout(&self) -> &Layout

Source

pub fn dims(&self) -> &[usize]

Source

pub fn dim<D: Dim>(&self, dim: D) -> Result<usize>

Source

pub fn storage_read(&self) -> Result<RwLockReadGuard<'_, Storage<T>>>

Source

pub fn storage_write(&self) -> Result<RwLockWriteGuard<'_, Storage<T>>>

Source

pub fn element_count(&self) -> usize

Source

pub fn is_contiguous(&self) -> bool

Source

pub fn rank(&self) -> usize

Source

pub fn to_vec(&self) -> Result<Vec<T>>

Source

pub fn storage_indices(&self) -> StorageIndices<'_>

Returns an iterator over storage indices.

This iterator yields the linear (flat) indices as they are laid out in the underlying storage buffer. The order depends on the memory layout (e.g., row-major / column-major / with strides).

Example for shape = (2, 2) in row-major layout: yields: 0, 1, 2, 3

Source

pub fn dim_coordinates(&self) -> DimCoordinates

Returns an iterator over dimension coordinates.

This iterator yields the multi-dimensional coordinates (e.g., [i, j, k, ...]) of each element in the array, independent of the physical storage layout.

Example for shape = (2, 2): yields: [0, 0], [0, 1], [1, 0], [1, 1]

Source

pub fn dims_coordinates<const N: usize>(&self) -> Result<DimNCoordinates<N>>

Source

pub fn dim2_coordinates(&self) -> Result<DimNCoordinates<2>>

Source

pub fn dim3_coordinates(&self) -> Result<DimNCoordinates<3>>

Source

pub fn dim4_coordinates(&self) -> Result<DimNCoordinates<4>>

Source

pub fn dim5_coordinates(&self) -> Result<DimNCoordinates<5>>

Source§

impl<T: NumDType> Tensor<T>

Source

pub fn allclose(&self, other: &Self, rtol: f64, atol: f64) -> Result<bool>

Source§

impl<T: FloatDType> Tensor<T>

Source

pub fn detach(&self) -> Self

Source

pub fn requires_grad(&self) -> bool

Source

pub fn set_requires_grad(&self, mode: bool)

Source

pub fn op(&self) -> Option<&Op<T>>

Source

pub fn is_leaf(&self) -> bool

Source§

impl<T: FloatDType> Tensor<T>

Source

pub fn backward(&self) -> Result<GradStore<T>>

Source

pub fn sorted_nodes(&self) -> Vec<&Tensor<T>>

Trait Implementations§

Source§

impl Add<&Tensor<f32>> for f32

Source§

type Output = Tensor<f32>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<f32>) -> Self::Output

Performs the + operation. Read more
Source§

impl Add<&Tensor<f64>> for f64

Source§

type Output = Tensor<f64>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<f64>) -> Self::Output

Performs the + operation. Read more
Source§

impl Add<&Tensor<i32>> for i32

Source§

type Output = Tensor<i32>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<i32>) -> Self::Output

Performs the + operation. Read more
Source§

impl Add<&Tensor<u32>> for u32

Source§

type Output = Tensor<u32>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<u32>) -> Self::Output

Performs the + operation. Read more
Source§

impl Add<&Tensor<u8>> for u8

Source§

type Output = Tensor<u8>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: &Tensor<u8>) -> Self::Output

Performs the + operation. Read more
Source§

impl<'a, T: NumDType, R: Into<TensorOrScalar<T>>> Add<R> for &Tensor<T>

Add

Source§

type Output = Tensor<T>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: R) -> Self::Output

Performs the + operation. Read more
Source§

impl<'a, T: NumDType, R> Add<R> for Tensor<T>
where R: Into<TensorOrScalar<T>>,

Source§

type Output = Tensor<T>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: R) -> Self::Output

Performs the + operation. Read more
Source§

impl Add<Tensor<f32>> for f32

Source§

type Output = Tensor<f32>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<f32>) -> Self::Output

Performs the + operation. Read more
Source§

impl Add<Tensor<f64>> for f64

Source§

type Output = Tensor<f64>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<f64>) -> Self::Output

Performs the + operation. Read more
Source§

impl Add<Tensor<i32>> for i32

Source§

type Output = Tensor<i32>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<i32>) -> Self::Output

Performs the + operation. Read more
Source§

impl Add<Tensor<u32>> for u32

Source§

type Output = Tensor<u32>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<u32>) -> Self::Output

Performs the + operation. Read more
Source§

impl Add<Tensor<u8>> for u8

Source§

type Output = Tensor<u8>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Tensor<u8>) -> Self::Output

Performs the + operation. Read more
Source§

impl<T: WithDType> AsRef<Tensor<T>> for Tensor<T>

Source§

fn as_ref(&self) -> &Tensor<T>

Converts this type into a shared reference of the (usually inferred) input type.
Source§

impl<'a, R: Into<TensorOrScalar<bool>>> BitAnd<R> for &Tensor<bool>

Bool

Source§

type Output = Tensor<bool>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: R) -> Self::Output

Performs the & operation. Read more
Source§

impl<'a, R: Into<TensorOrScalar<bool>>> BitAnd<R> for Tensor<bool>

Source§

type Output = Tensor<bool>

The resulting type after applying the & operator.
Source§

fn bitand(self, rhs: R) -> Self::Output

Performs the & operation. Read more
Source§

impl<'a, R: Into<TensorOrScalar<bool>>> BitOr<R> for &Tensor<bool>

Source§

type Output = Tensor<bool>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: R) -> Self::Output

Performs the | operation. Read more
Source§

impl<'a, R: Into<TensorOrScalar<bool>>> BitOr<R> for Tensor<bool>

Source§

type Output = Tensor<bool>

The resulting type after applying the | operator.
Source§

fn bitor(self, rhs: R) -> Self::Output

Performs the | operation. Read more
Source§

impl<'a, R: Into<TensorOrScalar<bool>>> BitXor<R> for &Tensor<bool>

Source§

type Output = Tensor<bool>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: R) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<'a, R: Into<TensorOrScalar<bool>>> BitXor<R> for Tensor<bool>

Source§

type Output = Tensor<bool>

The resulting type after applying the ^ operator.
Source§

fn bitxor(self, rhs: R) -> Self::Output

Performs the ^ operation. Read more
Source§

impl<T: Clone + WithDType> Clone for Tensor<T>

Source§

fn clone(&self) -> Tensor<T>

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl<T: WithDType> Debug for Tensor<T>

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl<T: WithDType> Display for Tensor<T>
where DisplayOp: Display<T>,

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Div<&Tensor<f32>> for f32

Source§

type Output = Tensor<f32>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<f32>) -> Self::Output

Performs the / operation. Read more
Source§

impl Div<&Tensor<f64>> for f64

Source§

type Output = Tensor<f64>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<f64>) -> Self::Output

Performs the / operation. Read more
Source§

impl Div<&Tensor<i32>> for i32

Source§

type Output = Tensor<i32>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<i32>) -> Self::Output

Performs the / operation. Read more
Source§

impl Div<&Tensor<u32>> for u32

Source§

type Output = Tensor<u32>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<u32>) -> Self::Output

Performs the / operation. Read more
Source§

impl Div<&Tensor<u8>> for u8

Source§

type Output = Tensor<u8>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: &Tensor<u8>) -> Self::Output

Performs the / operation. Read more
Source§

impl<'a, T: NumDType, R: Into<TensorOrScalar<T>>> Div<R> for &Tensor<T>

Div

Source§

type Output = Tensor<T>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: R) -> Self::Output

Performs the / operation. Read more
Source§

impl<'a, T: NumDType, R: Into<TensorOrScalar<T>>> Div<R> for Tensor<T>

Source§

type Output = Tensor<T>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: R) -> Self::Output

Performs the / operation. Read more
Source§

impl Div<Tensor<f32>> for f32

Source§

type Output = Tensor<f32>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<f32>) -> Self::Output

Performs the / operation. Read more
Source§

impl Div<Tensor<f64>> for f64

Source§

type Output = Tensor<f64>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<f64>) -> Self::Output

Performs the / operation. Read more
Source§

impl Div<Tensor<i32>> for i32

Source§

type Output = Tensor<i32>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<i32>) -> Self::Output

Performs the / operation. Read more
Source§

impl Div<Tensor<u32>> for u32

Source§

type Output = Tensor<u32>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<u32>) -> Self::Output

Performs the / operation. Read more
Source§

impl Div<Tensor<u8>> for u8

Source§

type Output = Tensor<u8>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Tensor<u8>) -> Self::Output

Performs the / operation. Read more
Source§

impl<T: WithDType> From<&Tensor<T>> for TensorOrScalar<T>

Source§

fn from(value: &Tensor<T>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<bool>> for DynTensor

Source§

fn from(t: &Tensor<bool>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<bool>> for Indexer

Source§

fn from(value: &Tensor<bool>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<f32>> for DynTensor

Source§

fn from(t: &Tensor<f32>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<f32>> for FloatTensor

Source§

fn from(value: &Tensor<f32>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<f64>> for DynTensor

Source§

fn from(t: &Tensor<f64>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<f64>> for FloatTensor

Source§

fn from(value: &Tensor<f64>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<i32>> for DynTensor

Source§

fn from(t: &Tensor<i32>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<i32>> for IntTensor

Source§

fn from(value: &Tensor<i32>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<u32>> for DynTensor

Source§

fn from(t: &Tensor<u32>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<u32>> for IntTensor

Source§

fn from(value: &Tensor<u32>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<u8>> for DynTensor

Source§

fn from(t: &Tensor<u8>) -> Self

Converts to this type from the input type.
Source§

impl From<&Tensor<u8>> for IntTensor

Source§

fn from(value: &Tensor<u8>) -> Self

Converts to this type from the input type.
Source§

impl<T: WithDType> From<Tensor<T>> for TensorOrScalar<T>

Source§

fn from(value: Tensor<T>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<bool>> for DynTensor

Source§

fn from(t: Tensor<bool>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<bool>> for Indexer

Source§

fn from(value: Tensor<bool>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<f32>> for DynTensor

Source§

fn from(t: Tensor<f32>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<f32>> for FloatTensor

Source§

fn from(value: Tensor<f32>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<f64>> for DynTensor

Source§

fn from(t: Tensor<f64>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<f64>> for FloatTensor

Source§

fn from(value: Tensor<f64>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<i32>> for DynTensor

Source§

fn from(t: Tensor<i32>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<i32>> for IntTensor

Source§

fn from(value: Tensor<i32>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<u32>> for DynTensor

Source§

fn from(t: Tensor<u32>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<u32>> for IntTensor

Source§

fn from(value: Tensor<u32>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<u8>> for DynTensor

Source§

fn from(t: Tensor<u8>) -> Self

Converts to this type from the input type.
Source§

impl From<Tensor<u8>> for IntTensor

Source§

fn from(value: Tensor<u8>) -> Self

Converts to this type from the input type.
Source§

impl<T: WithDType> Hash for Tensor<T>

Source§

fn hash<H: Hasher>(&self, state: &mut H)

Feeds this value into the given Hasher. Read more
1.3.0 · Source§

fn hash_slice<H>(data: &[Self], state: &mut H)
where H: Hasher, Self: Sized,

Feeds a slice of this type into the given Hasher. Read more
Source§

impl<T: FloatDType> Index<&Tensor<T>> for GradStore<T>

Source§

type Output = Tensor<T>

The returned type after indexing.
Source§

fn index(&self, index: &Tensor<T>) -> &Self::Output

Performs the indexing (container[index]) operation. Read more
Source§

impl<I: Into<Indexer>, D: WithDType> IndexOp<(I,), D> for Tensor<D>

Source§

fn index(&self, (index): (I,)) -> Result<Tensor<D>>

Source§

impl<I1, I2, D: WithDType> IndexOp<(I1, I2), D> for Tensor<D>
where I1: Into<Indexer>, I2: Into<Indexer>,

Source§

fn index(&self, (I1, I2): (I1, I2)) -> Result<Tensor<D>>

Source§

impl<I1, I2, I3, D: WithDType> IndexOp<(I1, I2, I3), D> for Tensor<D>
where I1: Into<Indexer>, I2: Into<Indexer>, I3: Into<Indexer>,

Source§

fn index(&self, (I1, I2, I3): (I1, I2, I3)) -> Result<Tensor<D>>

Source§

impl<I1, I2, I3, I4, D: WithDType> IndexOp<(I1, I2, I3, I4), D> for Tensor<D>
where I1: Into<Indexer>, I2: Into<Indexer>, I3: Into<Indexer>, I4: Into<Indexer>,

Source§

fn index(&self, (I1, I2, I3, I4): (I1, I2, I3, I4)) -> Result<Tensor<D>>

Source§

impl<I1, I2, I3, I4, I5, D: WithDType> IndexOp<(I1, I2, I3, I4, I5), D> for Tensor<D>
where I1: Into<Indexer>, I2: Into<Indexer>, I3: Into<Indexer>, I4: Into<Indexer>, I5: Into<Indexer>,

Source§

fn index(&self, (I1, I2, I3, I4, I5): (I1, I2, I3, I4, I5)) -> Result<Tensor<D>>

Source§

impl<I: Into<Indexer>, D: WithDType> IndexOp<I, D> for Tensor<D>

Source§

fn index(&self, index: I) -> Result<Tensor<D>>

Source§

impl<I: Into<Indexer>, D: WithDType> IndexOp<Vec<I>, D> for Tensor<D>

Source§

fn index(&self, index: Vec<I>) -> Result<Tensor<D>>

Source§

impl Mul<&Tensor<f32>> for f32

Source§

type Output = Tensor<f32>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<f32>) -> Self::Output

Performs the * operation. Read more
Source§

impl Mul<&Tensor<f64>> for f64

Source§

type Output = Tensor<f64>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<f64>) -> Self::Output

Performs the * operation. Read more
Source§

impl Mul<&Tensor<i32>> for i32

Source§

type Output = Tensor<i32>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<i32>) -> Self::Output

Performs the * operation. Read more
Source§

impl Mul<&Tensor<u32>> for u32

Source§

type Output = Tensor<u32>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<u32>) -> Self::Output

Performs the * operation. Read more
Source§

impl Mul<&Tensor<u8>> for u8

Source§

type Output = Tensor<u8>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: &Tensor<u8>) -> Self::Output

Performs the * operation. Read more
Source§

impl<'a, T: NumDType, R: Into<TensorOrScalar<T>>> Mul<R> for &Tensor<T>

Mul

Source§

type Output = Tensor<T>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: R) -> Self::Output

Performs the * operation. Read more
Source§

impl<'a, T: NumDType, R: Into<TensorOrScalar<T>>> Mul<R> for Tensor<T>

Source§

type Output = Tensor<T>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: R) -> Self::Output

Performs the * operation. Read more
Source§

impl Mul<Tensor<f32>> for f32

Source§

type Output = Tensor<f32>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<f32>) -> Self::Output

Performs the * operation. Read more
Source§

impl Mul<Tensor<f64>> for f64

Source§

type Output = Tensor<f64>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<f64>) -> Self::Output

Performs the * operation. Read more
Source§

impl Mul<Tensor<i32>> for i32

Source§

type Output = Tensor<i32>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<i32>) -> Self::Output

Performs the * operation. Read more
Source§

impl Mul<Tensor<u32>> for u32

Source§

type Output = Tensor<u32>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<u32>) -> Self::Output

Performs the * operation. Read more
Source§

impl Mul<Tensor<u8>> for u8

Source§

type Output = Tensor<u8>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Tensor<u8>) -> Self::Output

Performs the * operation. Read more
Source§

impl<T: WithDType> PartialEq for Tensor<T>

Source§

fn eq(&self, other: &Self) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · Source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
Source§

impl Sub<&Tensor<f32>> for f32

Source§

type Output = Tensor<f32>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<f32>) -> Self::Output

Performs the - operation. Read more
Source§

impl Sub<&Tensor<f64>> for f64

Source§

type Output = Tensor<f64>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<f64>) -> Self::Output

Performs the - operation. Read more
Source§

impl Sub<&Tensor<i32>> for i32

Source§

type Output = Tensor<i32>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<i32>) -> Self::Output

Performs the - operation. Read more
Source§

impl Sub<&Tensor<u32>> for u32

Source§

type Output = Tensor<u32>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<u32>) -> Self::Output

Performs the - operation. Read more
Source§

impl Sub<&Tensor<u8>> for u8

Source§

type Output = Tensor<u8>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: &Tensor<u8>) -> Self::Output

Performs the - operation. Read more
Source§

impl<'a, T: NumDType, R: Into<TensorOrScalar<T>>> Sub<R> for &Tensor<T>

Sub

Source§

type Output = Tensor<T>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: R) -> Self::Output

Performs the - operation. Read more
Source§

impl<'a, T: NumDType, R: Into<TensorOrScalar<T>>> Sub<R> for Tensor<T>

Source§

type Output = Tensor<T>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: R) -> Self::Output

Performs the - operation. Read more
Source§

impl Sub<Tensor<f32>> for f32

Source§

type Output = Tensor<f32>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<f32>) -> Self::Output

Performs the - operation. Read more
Source§

impl Sub<Tensor<f64>> for f64

Source§

type Output = Tensor<f64>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<f64>) -> Self::Output

Performs the - operation. Read more
Source§

impl Sub<Tensor<i32>> for i32

Source§

type Output = Tensor<i32>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<i32>) -> Self::Output

Performs the - operation. Read more
Source§

impl Sub<Tensor<u32>> for u32

Source§

type Output = Tensor<u32>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<u32>) -> Self::Output

Performs the - operation. Read more
Source§

impl Sub<Tensor<u8>> for u8

Source§

type Output = Tensor<u8>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Tensor<u8>) -> Self::Output

Performs the - operation. Read more
Source§

impl TryFrom<DynTensor> for Tensor<bool>

Source§

type Error = Error

The type returned in the event of a conversion error.
Source§

fn try_from(value: DynTensor) -> Result<Self, Self::Error>

Performs the conversion.
Source§

impl TryFrom<DynTensor> for Tensor<f32>

Source§

type Error = Error

The type returned in the event of a conversion error.
Source§

fn try_from(value: DynTensor) -> Result<Self, Self::Error>

Performs the conversion.
Source§

impl TryFrom<DynTensor> for Tensor<f64>

Source§

type Error = Error

The type returned in the event of a conversion error.
Source§

fn try_from(value: DynTensor) -> Result<Self, Self::Error>

Performs the conversion.
Source§

impl TryFrom<DynTensor> for Tensor<i32>

Source§

type Error = Error

The type returned in the event of a conversion error.
Source§

fn try_from(value: DynTensor) -> Result<Self, Self::Error>

Performs the conversion.
Source§

impl TryFrom<DynTensor> for Tensor<u32>

Source§

type Error = Error

The type returned in the event of a conversion error.
Source§

fn try_from(value: DynTensor) -> Result<Self, Self::Error>

Performs the conversion.
Source§

impl TryFrom<DynTensor> for Tensor<u8>

Source§

type Error = Error

The type returned in the event of a conversion error.
Source§

fn try_from(value: DynTensor) -> Result<Self, Self::Error>

Performs the conversion.
Source§

impl<T: WithDType> Eq for Tensor<T>

Auto Trait Implementations§

§

impl<T> Freeze for Tensor<T>

§

impl<T> RefUnwindSafe for Tensor<T>

§

impl<T> Send for Tensor<T>

§

impl<T> Sync for Tensor<T>

§

impl<T> Unpin for Tensor<T>

§

impl<T> UnsafeUnpin for Tensor<T>

§

impl<T> UnwindSafe for Tensor<T>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T> ToString for T
where T: Display + ?Sized,

Source§

fn to_string(&self) -> String

Converts the given value to a String. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V