pub struct Tensor<B, const D: usize, K = Float>where
    B: Backend,
    K: TensorKind<B>,{ /* private fields */ }Implementations§
source§impl<B, const D: usize, K> Tensor<B, D, K>where
    B: Backend,
    K: TensorKind<B>,
 
impl<B, const D: usize, K> Tensor<B, D, K>where B: Backend, K: TensorKind<B>,
sourcepub fn new(primitive: <K as TensorKind<B>>::Primitive<D>) -> Tensor<B, D, K>
 
pub fn new(primitive: <K as TensorKind<B>>::Primitive<D>) -> Tensor<B, D, K>
Constructs a new Tensor.
source§impl<B, const D: usize, K> Tensor<B, D, K>where
    B: Backend,
    K: BasicOps<B>,
 
impl<B, const D: usize, K> Tensor<B, D, K>where B: Backend, K: BasicOps<B>,
sourcepub fn empty<S>(shape: S) -> Tensor<B, D, K>where
    S: Into<Shape<D>>,
 
pub fn empty<S>(shape: S) -> Tensor<B, D, K>where S: Into<Shape<D>>,
Create an empty tensor of the given shape.
sourcepub fn empty_device<S>(
    shape: S,
    device: &<B as Backend>::Device
) -> Tensor<B, D, K>where
    S: Into<Shape<D>>,
 
pub fn empty_device<S>( shape: S, device: &<B as Backend>::Device ) -> Tensor<B, D, K>where S: Into<Shape<D>>,
Create an empty tensor of the given shape.
sourcepub fn dims(&self) -> [usize; D]
 
pub fn dims(&self) -> [usize; D]
Returns the dimensions of the current tensor.
Equivalent to tensor.shape().dims.
sourcepub fn reshape<const D2: usize, S>(self, shape: S) -> Tensor<B, D2, K>where
    S: Into<Shape<D2>>,
 
pub fn reshape<const D2: usize, S>(self, shape: S) -> Tensor<B, D2, K>where S: Into<Shape<D2>>,
Reshape the tensor to have the given shape.
Panics
If the tensor can not be reshape to the given shape.
sourcepub fn flatten<const D2: usize>(
    self,
    start_dim: usize,
    end_dim: usize
) -> Tensor<B, D2, K>
 
pub fn flatten<const D2: usize>( self, start_dim: usize, end_dim: usize ) -> Tensor<B, D2, K>
Flatten the tensor along a given range of dimensions.
This function collapses the specified range of dimensions into a single dimension, effectively flattening the tensor in that range.
Arguments
- start_dim: The starting dimension of the range to be flattened.
- end_dim: The ending dimension of the range to be flattened (inclusive).
Type Parameters
- D2: The resulting number of dimensions in the flattened tensor.
Returns
A new Tensor<B, D2, K> instance with the specified range of dimensions flattened.
Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 4]));
    // Given a 3D tensor with dimensions (2, 3, 4), flatten the dimensions between indices 1 and 2:
    let flattened_tensor: Tensor::<B, 2> = tensor.flatten(1, 2);
    // The resulting tensor will have dimensions (2, 12).
   println!("{:?}", flattened_tensor.shape());
}
sourcepub fn unsqueeze<const D2: usize>(self) -> Tensor<B, D2, K>
 
pub fn unsqueeze<const D2: usize>(self) -> Tensor<B, D2, K>
Unsqueeze the current tensor. Create new dimensions to fit the given size.
Panics
If the output size is higher than the current tensor.
Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
    let tensor = Tensor::<B, 2>::ones(Shape::new([3, 3]));
    let tensor = tensor.unsqueeze::<4>();
    println!("{:?}", tensor.shape());
    // Shape { dims: [1, 1, 3, 3] }
}sourcepub fn index<const D2: usize>(
    self,
    indexes: [Range<usize>; D2]
) -> Tensor<B, D, K>
 
pub fn index<const D2: usize>( self, indexes: [Range<usize>; D2] ) -> Tensor<B, D, K>
Returns a tensor containing the elements selected from the given ranges.
Panics
If a range exceeds the number of elements on a dimension.
Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 3]));
    let tensor_indexed = tensor.index([0..1, 0..3, 1..2]);
    println!("{:?}", tensor_indexed.shape());
    // Shape { dims: [1, 3, 2] }
}sourcepub fn index_assign<const D2: usize>(
    self,
    indexes: [Range<usize>; D2],
    values: Tensor<B, D, K>
) -> Tensor<B, D, K>
 
pub fn index_assign<const D2: usize>( self, indexes: [Range<usize>; D2], values: Tensor<B, D, K> ) -> Tensor<B, D, K>
Returns a copy of the current tensor with the selected elements changed to the new ones at the selected indexes.
Panics
- If a range exceeds the number of elements on a dimension.
- If the given values don’t match the given ranges.
Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
    let tensor = Tensor::<B, 3>::ones([2, 3, 3]);
    let values = Tensor::<B, 3>::zeros([1, 1, 1]);
    let tensor_indexed = tensor.index_assign([0..1, 0..1, 0..1], values);
    println!("{:?}", tensor_indexed.shape());
    // Shape { dims: [2, 3, 3] }
}sourcepub fn to_device(self, device: &<B as Backend>::Device) -> Tensor<B, D, K>
 
pub fn to_device(self, device: &<B as Backend>::Device) -> Tensor<B, D, K>
Returns a new tensor on the given device.
sourcepub fn into_data(self) -> Data<<K as BasicOps<B>>::Elem, D>
 
pub fn into_data(self) -> Data<<K as BasicOps<B>>::Elem, D>
Returns the data of the current tensor.
sourcepub fn to_data(&self) -> Data<<K as BasicOps<B>>::Elem, D>
 
pub fn to_data(&self) -> Data<<K as BasicOps<B>>::Elem, D>
Returns the data of the current tensor without taking ownership.
sourcepub fn from_data<T>(data: T) -> Tensor<B, D, K>where
    T: Into<Data<<K as BasicOps<B>>::Elem, D>>,
 
pub fn from_data<T>(data: T) -> Tensor<B, D, K>where T: Into<Data<<K as BasicOps<B>>::Elem, D>>,
Create a tensor from the given data.
sourcepub fn from_data_device<T>(
    data: T,
    device: &<B as Backend>::Device
) -> Tensor<B, D, K>where
    T: Into<Data<<K as BasicOps<B>>::Elem, D>>,
 
pub fn from_data_device<T>( data: T, device: &<B as Backend>::Device ) -> Tensor<B, D, K>where T: Into<Data<<K as BasicOps<B>>::Elem, D>>,
Create a tensor from the given data on the given device.
sourcepub fn equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
 
pub fn equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element wise equal comparison and returns a boolean tensor.
Panics
If the two tensors don’t have the same shape.
source§impl<B, const D: usize> Tensor<B, D, Bool>where
    B: Backend,
 
impl<B, const D: usize> Tensor<B, D, Bool>where B: Backend,
source§impl<const D: usize, B> Tensor<B, D, Float>where
    B: Backend,
 
impl<const D: usize, B> Tensor<B, D, Float>where B: Backend,
pub fn into_primitive(self) -> <B as Backend>::TensorPrimitive<D>
pub fn from_primitive( tensor: <B as Backend>::TensorPrimitive<D> ) -> Tensor<B, D, Float>
sourcepub fn inplace<F>(&mut self, func: F)where
    F: FnOnce(Tensor<B, D, Float>) -> Tensor<B, D, Float>,
 
pub fn inplace<F>(&mut self, func: F)where F: FnOnce(Tensor<B, D, Float>) -> Tensor<B, D, Float>,
Executes an operation on the tensor and modifies its value.
Notes
This won’t necessary reuse the same tensor data/buffer, but it should if there is no other reference pointing to the same tensor.
Wrapping operations with inplace is not an optimization, it’s mainly there if you want to mutate a tensor by using owned operations. A plausible usage would be to update the weights of a mutable model reference.
sourcepub fn log(self) -> Tensor<B, D, Float>
 
pub fn log(self) -> Tensor<B, D, Float>
Applies element wise natural log operation ln.
y = log(x)
sourcepub fn log1p(self) -> Tensor<B, D, Float>
 
pub fn log1p(self) -> Tensor<B, D, Float>
Applies the natural logarithm of one plus the input tensor, element-wise.
y = log(x+1)
sourcepub fn erf(self) -> Tensor<B, D, Float>
 
pub fn erf(self) -> Tensor<B, D, Float>
Applies the error function element wise.
y = erf(x)
sourcepub fn powf(self, value: f32) -> Tensor<B, D, Float>
 
pub fn powf(self, value: f32) -> Tensor<B, D, Float>
Applies element wise power operation.
y = x^a
sourcepub fn from_floats<A>(floats: A) -> Tensor<B, D, Float>where
    A: Into<Data<f32, D>>,
 
pub fn from_floats<A>(floats: A) -> Tensor<B, D, Float>where A: Into<Data<f32, D>>,
Create a tensor from floats (f32).
Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
    let _ = Tensor::<B, 1>::from_floats([1.0, 2.0]);
    let _ = Tensor::<B, 2>::from_floats([[1.0, 2.0], [3.0, 4.0]]);
}sourcepub fn zeros_like(&self) -> Tensor<B, D, Float>
 
pub fn zeros_like(&self) -> Tensor<B, D, Float>
Returns a new tensor with the same shape and device as the current tensor filled with zeros.
sourcepub fn ones_like(&self) -> Tensor<B, D, Float>
 
pub fn ones_like(&self) -> Tensor<B, D, Float>
Returns a new tensor with the same shape and device as the current tensor filled with ones.
sourcepub fn random_like(
    &self,
    distribution: Distribution<<B as Backend>::FloatElem>
) -> Tensor<B, D, Float>
 
pub fn random_like( &self, distribution: Distribution<<B as Backend>::FloatElem> ) -> Tensor<B, D, Float>
Returns a new tensor with the same shape and device as the current tensor filled random values sampled from the given distribution.
sourcepub fn one_hot(index: usize, num_classes: usize) -> Tensor<B, D, Float>
 
pub fn one_hot(index: usize, num_classes: usize) -> Tensor<B, D, Float>
Create a one hot tensor.
Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
    let one_hot = Tensor::<B, 1>::one_hot(2, 10);
    println!("{}", one_hot.to_data());
    // [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
}sourcepub fn transpose(self) -> Tensor<B, D, Float>
 
pub fn transpose(self) -> Tensor<B, D, Float>
Applies the transpose operation.
On matrix and higher dimension tensor, it swap the last two dimensions.
Panics
If the tensor is of 1 dimension or less.
sourcepub fn matmul(self, other: Tensor<B, D, Float>) -> Tensor<B, D, Float>
 
pub fn matmul(self, other: Tensor<B, D, Float>) -> Tensor<B, D, Float>
Applies the matrix multiplication operation.
C = AB
Panics
If the two tensors dont’ have a compatible shape.
sourcepub fn var(self, dim: usize) -> Tensor<B, D, Float>
 
pub fn var(self, dim: usize) -> Tensor<B, D, Float>
Calculate the variance along the given dimension.
sourcepub fn var_bias(self, dim: usize) -> Tensor<B, D, Float>
 
pub fn var_bias(self, dim: usize) -> Tensor<B, D, Float>
Calculate the variance along the given dimension without applying the Bessel’s correction.
sourcepub fn var_mean(self, dim: usize) -> (Tensor<B, D, Float>, Tensor<B, D, Float>)
 
pub fn var_mean(self, dim: usize) -> (Tensor<B, D, Float>, Tensor<B, D, Float>)
Calculate the variance along the given dimension and also returns the mean.
sourcepub fn var_mean_bias(
    self,
    dim: usize
) -> (Tensor<B, D, Float>, Tensor<B, D, Float>)
 
pub fn var_mean_bias( self, dim: usize ) -> (Tensor<B, D, Float>, Tensor<B, D, Float>)
Calculate the variance along the given dimension without applying the Bessel’s correction and also returns the mean.
sourcepub fn random<S>(
    shape: S,
    distribution: Distribution<<B as Backend>::FloatElem>
) -> Tensor<B, D, Float>where
    S: Into<Shape<D>>,
 
pub fn random<S>( shape: S, distribution: Distribution<<B as Backend>::FloatElem> ) -> Tensor<B, D, Float>where S: Into<Shape<D>>,
Create a random tensor of the given shape where each element is sampled from the given distribution.
sourcepub fn to_full_precision(
    &self
) -> Tensor<<B as Backend>::FullPrecisionBackend, D, Float>
 
pub fn to_full_precision( &self ) -> Tensor<<B as Backend>::FullPrecisionBackend, D, Float>
Returns a tensor with full precision based on the selected backend.
sourcepub fn from_full_precision(
    tensor: Tensor<<B as Backend>::FullPrecisionBackend, D, Float>
) -> Tensor<B, D, Float>
 
pub fn from_full_precision( tensor: Tensor<<B as Backend>::FullPrecisionBackend, D, Float> ) -> Tensor<B, D, Float>
Returns a tensor on the selected backend from a full precision tensor.
sourcepub fn argmax(self, dim: usize) -> Tensor<B, D, Int>
 
pub fn argmax(self, dim: usize) -> Tensor<B, D, Int>
Applies the argmax function along the given dimension and returns an integer tensor.
Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 3]));
    let tensor = tensor.argmax(1);
    println!("{:?}", tensor.shape());
    // Shape { dims: [2, 1, 3] }
}sourcepub fn argmin(self, dim: usize) -> Tensor<B, D, Int>
 
pub fn argmin(self, dim: usize) -> Tensor<B, D, Int>
Applies the argmin function along the given dimension and returns an integer tensor.
Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 3]));
    let tensor = tensor.argmin(1);
    println!("{:?}", tensor.shape());
    // Shape { dims: [2, 1, 3] }
}sourcepub fn detach(self) -> Tensor<B, D, Float>
 
pub fn detach(self) -> Tensor<B, D, Float>
Detach the current tensor from the autodiff graph. This function does nothing when autodiff is not enabled. This can be used in batchers or elsewere to ensure that previous operations are not considered in the autodiff graph.
sourcepub fn require_grad(self) -> Tensor<B, D, Float>
 
pub fn require_grad(self) -> Tensor<B, D, Float>
Mark the tensor to keep gradients during the backward pass. This function does nothing when autodiff is not enabled.
sourcepub fn is_require_grad(&self) -> bool
 
pub fn is_require_grad(&self) -> bool
Returns true if the tensor requires gradients during the backward pass.
sourcepub fn set_require_grad(self, require_grad: bool) -> Tensor<B, D, Float>
 
pub fn set_require_grad(self, require_grad: bool) -> Tensor<B, D, Float>
Mark the tensor as tracked or untracked depending on the require grad argument. When tracked, the gradients will be available after the backward pass.
This function does nothing when autodiff is not enabled.
source§impl<const D: usize, B> Tensor<B, D, Float>where
    B: ADBackend,
 
impl<const D: usize, B> Tensor<B, D, Float>where B: ADBackend,
pub fn backward(&self) -> <B as ADBackend>::Gradients
sourcepub fn grad(
    &self,
    grads: &<B as ADBackend>::Gradients
) -> Option<Tensor<<B as ADBackend>::InnerBackend, D, Float>>
 
pub fn grad( &self, grads: &<B as ADBackend>::Gradients ) -> Option<Tensor<<B as ADBackend>::InnerBackend, D, Float>>
Get the gradients of a tensor if it exist.
Returns a new reference to the same tensor. Therefore the same grad tensor can be accessed multiple times. If you only need to get the gradients one time, consider using grad_remove for better performance.
sourcepub fn grad_remove(
    &self,
    grads: &mut <B as ADBackend>::Gradients
) -> Option<Tensor<<B as ADBackend>::InnerBackend, D, Float>>
 
pub fn grad_remove( &self, grads: &mut <B as ADBackend>::Gradients ) -> Option<Tensor<<B as ADBackend>::InnerBackend, D, Float>>
Remove the grad tensor from the grads struct returning the result.
pub fn inner(self) -> Tensor<<B as ADBackend>::InnerBackend, D, Float>
pub fn from_inner( inner: Tensor<<B as ADBackend>::InnerBackend, D, Float> ) -> Tensor<B, D, Float>
source§impl<B> Tensor<B, 1, Int>where
    B: Backend,
 
impl<B> Tensor<B, 1, Int>where B: Backend,
source§impl<B, const D: usize, K> Tensor<B, D, K>where
    B: Backend,
    K: Numeric<B>,
    <K as BasicOps<B>>::Elem: Element,
 
impl<B, const D: usize, K> Tensor<B, D, K>where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,
sourcepub fn into_scalar(self) -> <K as BasicOps<B>>::Elem
 
pub fn into_scalar(self) -> <K as BasicOps<B>>::Elem
sourcepub fn add(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
 
pub fn add(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise addition operation.
y = x2 + x1
sourcepub fn add_scalar<E>(self, other: E) -> Tensor<B, D, K>where
    E: ElementConversion,
 
pub fn add_scalar<E>(self, other: E) -> Tensor<B, D, K>where E: ElementConversion,
Applies element wise addition operation with a scalar.
y = x + s
sourcepub fn sub(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
 
pub fn sub(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise substraction operation.
y = x2 - x1
sourcepub fn sub_scalar<E>(self, other: E) -> Tensor<B, D, K>where
    E: ElementConversion,
 
pub fn sub_scalar<E>(self, other: E) -> Tensor<B, D, K>where E: ElementConversion,
Applies element wise substraction operation with a scalar.
y = x - s
sourcepub fn div(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
 
pub fn div(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise division operation.
y = x2 / x1
sourcepub fn div_scalar<E>(self, other: E) -> Tensor<B, D, K>where
    E: ElementConversion,
 
pub fn div_scalar<E>(self, other: E) -> Tensor<B, D, K>where E: ElementConversion,
Applies element wise division operation with a scalar.
y = x / s
sourcepub fn mul(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
 
pub fn mul(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise multiplication operation.
y = x2 * x1
sourcepub fn mul_scalar<E>(self, other: E) -> Tensor<B, D, K>where
    E: ElementConversion,
 
pub fn mul_scalar<E>(self, other: E) -> Tensor<B, D, K>where E: ElementConversion,
Applies element wise multiplication operation with a scalar.
y = x * s
sourcepub fn zeros<S>(shape: S) -> Tensor<B, D, K>where
    S: Into<Shape<D>>,
 
pub fn zeros<S>(shape: S) -> Tensor<B, D, K>where S: Into<Shape<D>>,
Create a tensor of the given shape where each element is zero.
sourcepub fn zeros_device<S>(
    shape: S,
    device: &<B as Backend>::Device
) -> Tensor<B, D, K>where
    S: Into<Shape<D>>,
 
pub fn zeros_device<S>( shape: S, device: &<B as Backend>::Device ) -> Tensor<B, D, K>where S: Into<Shape<D>>,
Create a tensor of the given shape where each element is zero.
sourcepub fn ones<S>(shape: S) -> Tensor<B, D, K>where
    S: Into<Shape<D>>,
 
pub fn ones<S>(shape: S) -> Tensor<B, D, K>where S: Into<Shape<D>>,
Create a tensor of the given shape where each element is one.
sourcepub fn ones_device<S>(
    shape: S,
    device: &<B as Backend>::Device
) -> Tensor<B, D, K>where
    S: Into<Shape<D>>,
 
pub fn ones_device<S>( shape: S, device: &<B as Backend>::Device ) -> Tensor<B, D, K>where S: Into<Shape<D>>,
Create a tensor of the given shape where each element is zero.
sourcepub fn mean(self) -> Tensor<B, 1, K>
 
pub fn mean(self) -> Tensor<B, 1, K>
Aggregate all elements in the tensor with the mean operation.
sourcepub fn sum(self) -> Tensor<B, 1, K>
 
pub fn sum(self) -> Tensor<B, 1, K>
Aggregate all elements in the tensor with the sum operation.
sourcepub fn mean_dim(self, dim: usize) -> Tensor<B, D, K>
 
pub fn mean_dim(self, dim: usize) -> Tensor<B, D, K>
Aggregate all elements along the given dimension or axis in the tensor with the mean operation.
sourcepub fn sum_dim(self, dim: usize) -> Tensor<B, D, K>
 
pub fn sum_dim(self, dim: usize) -> Tensor<B, D, K>
Aggregate all elements along the given dimension or axis in the tensor with the sum operation.
sourcepub fn greater(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
 
pub fn greater(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element wise greater comparison and returns a boolean tensor.
Panics
If the two tensors don’t have the same shape.
sourcepub fn greater_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
 
pub fn greater_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element wise greater-equal comparison and returns a boolean tensor.
Panics
If the two tensors don’t have the same shape.
sourcepub fn lower(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
 
pub fn lower(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element wise lower comparison and returns a boolean tensor.
Panics
If the two tensors don’t have the same shape.
sourcepub fn lower_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
 
pub fn lower_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element wise lower-equal comparison and returns a boolean tensor.
Panics
If the two tensors don’t have the same shape.
sourcepub fn greater_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
    E: ElementConversion,
 
pub fn greater_elem<E>(self, other: E) -> Tensor<B, D, Bool>where E: ElementConversion,
Applies element wise greater comparison and returns a boolean tensor.
sourcepub fn greater_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
    E: ElementConversion,
 
pub fn greater_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where E: ElementConversion,
Applies element wise greater-equal comparison and returns a boolean tensor.
sourcepub fn lower_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
    E: ElementConversion,
 
pub fn lower_elem<E>(self, other: E) -> Tensor<B, D, Bool>where E: ElementConversion,
Applies element wise lower comparison and returns a boolean tensor.
sourcepub fn lower_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
    E: ElementConversion,
 
pub fn lower_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where E: ElementConversion,
Applies element wise lower-equal comparison and returns a boolean tensor.
sourcepub fn mask_scatter(
    self,
    mask: Tensor<B, D, Bool>,
    source: Tensor<B, D, K>
) -> Tensor<B, D, K>
 
pub fn mask_scatter( self, mask: Tensor<B, D, Bool>, source: Tensor<B, D, K> ) -> Tensor<B, D, K>
Fill elements from the given tensor based where the mask is true.
sourcepub fn mask_fill<E>(self, mask: Tensor<B, D, Bool>, value: E) -> Tensor<B, D, K>where
    E: ElementConversion,
 
pub fn mask_fill<E>(self, mask: Tensor<B, D, Bool>, value: E) -> Tensor<B, D, K>where E: ElementConversion,
Fill each element with the given value based on the given mask.
sourcepub fn index_select(self, indexes: Tensor<B, D, Int>) -> Tensor<B, D, K>
 
pub fn index_select(self, indexes: Tensor<B, D, Int>) -> Tensor<B, D, K>
Select the tensor elements corresponding to the given indexes.
Notes
The index tensor shoud have the same shape as the original tensor except for the last dimension.
sourcepub fn index_select_assign(
    self,
    indexes: Tensor<B, D, Int>,
    values: Tensor<B, D, K>
) -> Tensor<B, D, K>
 
pub fn index_select_assign( self, indexes: Tensor<B, D, Int>, values: Tensor<B, D, K> ) -> Tensor<B, D, K>
Assign the selected elements corresponding to the given indexes from the value tensor to the original tensor using sum reduction.
Notes
The index tensor shoud have the same shape as the original tensor except for the last dimension. The value and index tensors should have the same shape.
sourcepub fn index_select_dim(
    self,
    dim: usize,
    indexes: Tensor<B, 1, Int>
) -> Tensor<B, D, K>
 
pub fn index_select_dim( self, dim: usize, indexes: Tensor<B, 1, Int> ) -> Tensor<B, D, K>
Select the tensor elements along the given dimension corresponding to the given indexes.
sourcepub fn index_select_dim_assign<const D2: usize>(
    self,
    dim: usize,
    indexes: Tensor<B, 1, Int>,
    values: Tensor<B, D2, K>
) -> Tensor<B, D, K>
 
pub fn index_select_dim_assign<const D2: usize>( self, dim: usize, indexes: Tensor<B, 1, Int>, values: Tensor<B, D2, K> ) -> Tensor<B, D, K>
Assign the selected elements along the given dimension corresponding to the given indexes from the value tensor to the original tensor using sum reduction.
Trait Implementations§
source§impl<E, const D: usize, B, K> Add<E> for Tensor<B, D, K>where
    E: ElementConversion,
    B: Backend,
    K: Numeric<B>,
    <K as BasicOps<B>>::Elem: Element,
 
impl<E, const D: usize, B, K> Add<E> for Tensor<B, D, K>where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,
source§impl<B, const D: usize, K> Add<Tensor<B, D, K>> for Tensor<B, D, K>where
    B: Backend,
    K: Numeric<B>,
    <K as BasicOps<B>>::Elem: Element,
 
impl<B, const D: usize, K> Add<Tensor<B, D, K>> for Tensor<B, D, K>where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,
source§impl<B, const D: usize, K> Clone for Tensor<B, D, K>where
    B: Clone + Backend,
    K: Clone + TensorKind<B>,
    <K as TensorKind<B>>::Primitive<D>: Clone,
 
impl<B, const D: usize, K> Clone for Tensor<B, D, K>where B: Clone + Backend, K: Clone + TensorKind<B>, <K as TensorKind<B>>::Primitive<D>: Clone,
source§impl<B, const D: usize, K> Debug for Tensor<B, D, K>where
    B: Debug + Backend,
    K: Debug + TensorKind<B>,
    <K as TensorKind<B>>::Primitive<D>: Debug,
 
impl<B, const D: usize, K> Debug for Tensor<B, D, K>where B: Debug + Backend, K: Debug + TensorKind<B>, <K as TensorKind<B>>::Primitive<D>: Debug,
source§impl<B, const D: usize, K> Display for Tensor<B, D, K>where
    B: Backend,
    <B as Backend>::IntElem: Display,
    K: BasicOps<B>,
    <K as BasicOps<B>>::Elem: Debug,
 
impl<B, const D: usize, K> Display for Tensor<B, D, K>where B: Backend, <B as Backend>::IntElem: Display, K: BasicOps<B>, <K as BasicOps<B>>::Elem: Debug,
Pretty print tensors