pub struct Tensor<B, const D: usize, K = Float>where
B: Backend,
K: TensorKind<B>,{ /* private fields */ }
Expand description
A tensor with a given backend, shape and data type.
§Indexing
Indexing a tensor can be done using slice
for all tensor types
or select
for numeric types.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
use burn_tensor::Int;
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 2>::from_data(
[
[3.0, 4.9, 2.0],
[2.0, 1.9, 3.0],
[6.0, 1.5, 7.0],
[3.0, 4.9, 9.0],
],
&device,
);
// Slice the tensor to get the second and third rows:
// [[2.0, 1.9, 3.0], [6.0, 1.5, 7.0]]
// The resulting tensor will have dimensions [2, 3].
let slice = tensor.clone().slice([1..3]);
println!("{slice}");
// Slice the tensor to get the first two rows and the first 2 columns:
// [[3.0, 4.9], [2.0, 1.9]]
// The resulting tensor will have dimensions [2, 2].
let slice = tensor.clone().slice([0..2, 0..2]);
println!("{slice}");
// Index the tensor along the dimension 1 to get the elements 0 and 2:
// [[3.0, 2.0], [2.0, 3.0], [6.0, 7.0], [3.0, 9.0]]
// The resulting tensor will have dimensions [4, 2]
let indices = Tensor::<B, 1, Int>::from_data([0, 2], &device);
let indexed = tensor.select(1, indices);
println!("{indexed}");
}
Implementations§
Source§impl<const D: usize, B> Tensor<B, D>where
B: AutodiffBackend,
impl<const D: usize, B> Tensor<B, D>where
B: AutodiffBackend,
Sourcepub fn backward(&self) -> <B as AutodiffBackend>::Gradients
pub fn backward(&self) -> <B as AutodiffBackend>::Gradients
Backward pass of the tensor.
Sourcepub fn grad(
&self,
grads: &<B as AutodiffBackend>::Gradients,
) -> Option<Tensor<<B as AutodiffBackend>::InnerBackend, D>>
pub fn grad( &self, grads: &<B as AutodiffBackend>::Gradients, ) -> Option<Tensor<<B as AutodiffBackend>::InnerBackend, D>>
Get the gradients of a tensor if it exist.
Returns a new reference to the same tensor. Therefore the same grad tensor can be accessed multiple times. If you only need to get the gradients one time, consider using grad_remove for better performance.
Sourcepub fn grad_remove(
&self,
grads: &mut <B as AutodiffBackend>::Gradients,
) -> Option<Tensor<<B as AutodiffBackend>::InnerBackend, D>>
pub fn grad_remove( &self, grads: &mut <B as AutodiffBackend>::Gradients, ) -> Option<Tensor<<B as AutodiffBackend>::InnerBackend, D>>
Remove the grad tensor from the grads struct returning the result.
Sourcepub fn grad_replace(
&self,
grads: &mut <B as AutodiffBackend>::Gradients,
grad: Tensor<<B as AutodiffBackend>::InnerBackend, D>,
)
pub fn grad_replace( &self, grads: &mut <B as AutodiffBackend>::Gradients, grad: Tensor<<B as AutodiffBackend>::InnerBackend, D>, )
Replace the grad tensor from the grads struct with the provided gradient.
Source§impl<const D: usize, B, K> Tensor<B, D, K>where
B: AutodiffBackend,
K: BasicAutodiffOps<B>,
impl<const D: usize, B, K> Tensor<B, D, K>where
B: AutodiffBackend,
K: BasicAutodiffOps<B>,
Sourcepub fn inner(
self,
) -> Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>
pub fn inner( self, ) -> Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>
Returns the inner tensor without the autodiff information.
Sourcepub fn from_inner(
inner: Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>,
) -> Tensor<B, D, K>
pub fn from_inner( inner: Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>, ) -> Tensor<B, D, K>
Source§impl<B, const D: usize, K> Tensor<B, D, K>where
B: Backend,
K: TensorKind<B>,
impl<B, const D: usize, K> Tensor<B, D, K>where
B: Backend,
K: TensorKind<B>,
Sourcepub fn new(primitive: <K as TensorKind<B>>::Primitive) -> Tensor<B, D, K>
pub fn new(primitive: <K as TensorKind<B>>::Primitive) -> Tensor<B, D, K>
Constructs a new Tensor
.
Source§impl<B, const D: usize, K> Tensor<B, D, K>
impl<B, const D: usize, K> Tensor<B, D, K>
Sourcepub fn into_primitive(self) -> <K as TensorKind<B>>::Primitive
pub fn into_primitive(self) -> <K as TensorKind<B>>::Primitive
Converts the tensor into a primitive tensor.
Sourcepub fn from_primitive(
tensor: <K as TensorKind<B>>::Primitive,
) -> Tensor<B, D, K>
pub fn from_primitive( tensor: <K as TensorKind<B>>::Primitive, ) -> Tensor<B, D, K>
Converts from a primitive tensor into a tensor.
Sourcepub fn dtype(&self) -> DType
pub fn dtype(&self) -> DType
Returns the tensor primitive data type.
§Note
Some element types are encoded in different primitive types depending on the backend
(e.g., bool could be encoded as u8
or u32
).
Sourcepub fn empty<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
pub fn empty<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
Create an empty tensor of the given shape.
§Arguments
- shape: The shape of the tensor.
- device: The device where the tensor will be created.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create an empty tensor with dimensions [2, 3, 4].
let tensor = Tensor::<B, 3>::empty([2, 3, 4], &device);
}
Sourcepub fn dims(&self) -> [usize; D]
pub fn dims(&self) -> [usize; D]
Returns the dimensions of the current tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 3>::ones([2, 3, 4], &device);
let dims = tensor.dims(); // [2, 3, 4]
println!("{dims:?}");
}
Sourcepub fn shape(&self) -> Shape
pub fn shape(&self) -> Shape
Returns the shape of the current tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 3>::ones([2, 3, 4], &device);
// Shape { dims: [2, 3, 4] }
let shape = tensor.shape();
}
Sourcepub fn reshape<const D2: usize, S>(self, shape: S) -> Tensor<B, D2, K>where
S: ReshapeArgs<D2>,
pub fn reshape<const D2: usize, S>(self, shape: S) -> Tensor<B, D2, K>where
S: ReshapeArgs<D2>,
Reshape the tensor to have the given shape.
The tensor has the same data and number of elements as the input.
A -1
in the shape is used to infer the remaining dimensions, e.g.: [2, -1]
will reshape the tensor with [2, 3, 4] dimensions to [2, 12].
A 0
in the shape instructs to keep the current dimension from the original tensor,
e.g.: [2, 0, 4]
will reshape the tensor with [2, 3, 4] dimensions to [2, 3, 4].
This is useful when reshaping tensors with unknown dimensions and combining with -1
to infer the remaining dimensions, e.g. [0, -1]
will reshape the tensor
with [1, 3, 4] dimensions to [1, 12].
§Arguments
shape
: The new shape of the tensor.
§Panics
- If the tensor contains more than one
-1
in the shape. - If the tensor contains values that are not positive (other than -1).
- If the shape does not match the number of elements of the original shape.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a tensor with dimensions [2, 3, 4]
let tensor = Tensor::<B, 3>::ones([2, 3, 4], &device);
// Reshape it to [2, 12], where 12 is inferred from the number of elements.
let reshaped = tensor.reshape([2, -1]);
println!("{reshaped}");
}
Sourcepub fn transpose(self) -> Tensor<B, D, K>
pub fn transpose(self) -> Tensor<B, D, K>
Transpose the tensor.
For a 2D tensor, this is the standard matrix transpose. For D > 2
, the transpose is
applied on the last two dimensions. For example, the transpose of a tensor with shape
[1, 2, 3, 4]
will have shape [1, 2, 4, 3]
.
See also permute
.
§Arguments
tensor
- The tensor to transpose.
§Returns
The transposed tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor of shape [2, 3]
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
// Transpose the tensor:
// [[1.0, 5.0], [-2.0, 9.0], [3.0, 6.0]]
// The resulting tensor will have dimensions [3, 2].
let transposed = tensor.transpose();
println!("{transposed}");
}
Sourcepub fn swap_dims(self, dim1: usize, dim2: usize) -> Tensor<B, D, K>
pub fn swap_dims(self, dim1: usize, dim2: usize) -> Tensor<B, D, K>
Swaps two dimensions of a tensor.
§Arguments
tensor
- The tensor to swap the dimensions of.dim1
- The first dimension to swap.dim2
- The second dimension to swap.
§Returns
The tensor with the dimensions swapped.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor of shape [2, 3]
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
// Swap the dimensions 0 and 1 (equivalent to `tensor.transpose()`):
// [[1.0, 5.0], [-2.0, 9.0], [3.0, 6.0]]
// The resulting tensor will have dimensions [3, 2].
let swapped = tensor.swap_dims(0, 1);
println!("{swapped}");
}
Sourcepub fn permute(self, axes: [isize; D]) -> Tensor<B, D, K>
pub fn permute(self, axes: [isize; D]) -> Tensor<B, D, K>
Permute the dimensions of the tensor.
§Arguments
axes
- The new order of the dimensions. The length of the axes must be equal to the number of dimensions of the tensor. The values must be unique and in the range of the number of dimensions. The values can be negative, in which case they are used as an offset from the end.
§Returns
The tensor with the dimensions permuted.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor of shape [3, 2]
let tensor = Tensor::<B, 2>::from_data([[1.0, 5.0], [-2.0, 9.0], [3.0, 6.0]], &device);
// Permute the dimensions 1 and 0:
// [[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]]
// The resulting tensor will have dimensions [3, 2].
let permuted = tensor.permute([1, 0]);
println!("{permuted}");
}
Sourcepub fn movedim<S1, S2>(self, src: S1, dst: S2) -> Tensor<B, D, K>where
S1: MovedimArgs,
S2: MovedimArgs,
pub fn movedim<S1, S2>(self, src: S1, dst: S2) -> Tensor<B, D, K>where
S1: MovedimArgs,
S2: MovedimArgs,
Moves the dimension(s) of input at the position(s) in source to the position(s) in destination.
Other dimensions of input that are not explicitly moved remain in their original order and appear at the positions not specified in destination.
§Arguments
-
src
- The dimension(s) to move. The values must be unique and in the range of the number of dimensions. The values can be negative, in which case they are used as an offset from the end. -
dst
- Destination positions for each of the original dims. These must also be unique.
§Panics
- If the source and destination dimensions are not of the same length.
- If the source and destination vectors contain duplicate values.
- If the source and destination vectors contain values that are out of bounds.
§Returns
The tensor with the dimensions moved.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 3D tensor of shape [3, 2, 1]
let tensor = Tensor::<B, 3>::from_data([[[1.0], [5.0]], [[-2.0], [9.0]], [[3.0], [6.0]]], &device);
// Move the dimensions 0 and 1:
// [[[1.0], [-2.0], [3.0]], [[5.0], [9.0], [6.0]]]
// The resulting tensor will have dimensions [2, 3, 1].
let moved = tensor.movedim(1, 0);
println!("{moved}");
}
§Note
This is a syntactic sugar for permute
. It is used widely enough, so we define a separate Op
for it
Sourcepub fn flip<const N: usize>(self, axes: [isize; N]) -> Tensor<B, D, K>
pub fn flip<const N: usize>(self, axes: [isize; N]) -> Tensor<B, D, K>
Reverse the order of elements in the tensor along the given dimensions.
§Arguments
axes
- The dimensions to reverse. The values must be unique and in the range of the number of dimensions. The values can be negative, in which case they are used as an offset from the end.
§Returns
The tensor with the axes flipped.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor with dimensions [4, 3]
let tensor = Tensor::<B, 2>::from_data(
[
[3.0, 4.9, 2.0],
[2.0, 1.9, 3.0],
[4.0, 5.9, 8.0],
[1.4, 5.8, 6.0],
],
&device,
);
// Flip the elements in dimensions 0 and 1:
// [[6.0, 5.8, 1.4],
// [8.0, 5.9, 4.0],
// [3.0, 1.9, 2.0],
// [2.0, 4.9, 3.0]]
// The resulting tensor will have dimensions [4, 3].
let flipped = tensor.flip([0, 1]);
println!("{flipped}");
}
Sourcepub fn flatten<const D2: usize>(
self,
start_dim: usize,
end_dim: usize,
) -> Tensor<B, D2, K>
pub fn flatten<const D2: usize>( self, start_dim: usize, end_dim: usize, ) -> Tensor<B, D2, K>
Flatten the tensor along a given range of dimensions.
This function collapses the specified range of dimensions into a single dimension, effectively flattening the tensor in that range.
§Arguments
start_dim
: The starting dimension of the range to be flattened.end_dim
: The ending dimension of the range to be flattened (inclusive).
§Type Parameters
D2
: The resulting number of dimensions in the flattened tensor.
§Returns
A new Tensor<B, D2, K>
instance with the specified range of dimensions flattened.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = Default::default();
// Create a 3D tensor with dimensions [2, 3, 4]
let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 4]), &device);
// Flatten the tensor from dimensions 1 to 2 (inclusive).
// The resulting tensor will have dimensions [2, 12]
let flattened: Tensor<B, 2> = tensor.flatten(1, 2);
println!("{flattened}");
}
Sourcepub fn squeeze<const D2: usize>(self, dim: usize) -> Tensor<B, D2, K>
pub fn squeeze<const D2: usize>(self, dim: usize) -> Tensor<B, D2, K>
Squeeze the tensor along the given dimension, removing the specified dimension of size one, and effectively reducing the rank of the tensor by one.
§Arguments
dim
: The dimension to be squeezed.
§Type Parameters
D2
: The resulting number of dimensions in the squeezed tensor.
§Panics
If the size in the squeezed dimension is not 1.
§Returns
A new Tensor<B, D2, K>
instance with the specified dimension removed.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = Default::default();
// Create a 3D tensor with dimensions [3, 1, 3]
let tensor = Tensor::<B, 3>::from_data(
[[[3.0, 4.9, 2.0]], [[2.0, 1.9, 3.0]], [[4.0, 5.9, 8.0]]],
&device,
);
// Squeeze the dimension 1.
// The resulting tensor will have dimensions [3, 3].
let squeezed = tensor.squeeze::<2>(1);
println!("{squeezed}");
}
Sourcepub fn squeeze_dims<const D2: usize>(self, dims: &[isize]) -> Tensor<B, D2, K>
pub fn squeeze_dims<const D2: usize>(self, dims: &[isize]) -> Tensor<B, D2, K>
Removes specified dimensions of size 1 from a tensor’s shape. This function takes a tensor and
an array of dimensions (dims
) to be squeezed. If dims
is provided, only the dimensions
specified in this array will be removed. Each dimension in dims
should correspond to a size of 1
in the tensor; otherwise, the dimension will not be squeezed. If dims
is empty, all single-dimensional entries
in the tensor will be removed. If entries in dims
are negative, then dimensions will be counted
from the back.
§Arguments
dims
: The dimension(s) to be squeezed.
§Type Parameters
D2
: The resulting number of dimensions in the squeezed tensor.
§Returns
A new Tensor<B, D2, K>
instance with the specified dimensions removed.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = Default::default();
// Create a 4D tensor with dimensions [2, 1, 4, 1]
let tensor = Tensor::<B, 4>::ones(Shape::new([2, 1, 4, 1]), &device);
// Squeeze the dimensions 1 and 3.
// The resulting tensor will have dimensions [2, 4].
let squeezed: Tensor<B, 2> = tensor.squeeze_dims(&[1, 3]);
println!("{squeezed}");
}
Sourcepub fn unsqueeze<const D2: usize>(self) -> Tensor<B, D2, K>
pub fn unsqueeze<const D2: usize>(self) -> Tensor<B, D2, K>
Unsqueeze the current tensor. Create new leading dimensions to fit the given size.
§Type Parameters
D2
: The resulting number of dimensions in the unsqueezed tensor.
§Panics
If the output size D2
is smaller than the current number of dimensions.
§Returns
A new Tensor<B, D2, K>
instance with the specified dimensions added.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor with dimensions [3, 3]
let tensor = Tensor::<B, 2>::ones(Shape::new([3, 3]), &device);
// Unsqueeze the tensor up to 4 dimensions.
// The resulting tensor will have dimensions [1, 1, 3, 3].
let unsqueezed = tensor.unsqueeze::<4>();
println!("{unsqueezed}");
}
Sourcepub fn unsqueeze_dim<const D2: usize>(self, dim: usize) -> Tensor<B, D2, K>
pub fn unsqueeze_dim<const D2: usize>(self, dim: usize) -> Tensor<B, D2, K>
Creates a new tensor with a dimension of size one inserted at the specified position.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor with dimensions [3, 3]
let tensor = Tensor::<B, 2>::ones(Shape::new([3, 3]), &device);
// Unsqueeze the dimension 1.
// The resulting tensor will have dimensions [3, 1, 3].
let unsqueezed: Tensor<B, 3> = tensor.unsqueeze_dim(1);
println!("{unsqueezed}");
}
Sourcepub fn unsqueeze_dims<const D2: usize>(self, axes: &[isize]) -> Tensor<B, D2, K>
pub fn unsqueeze_dims<const D2: usize>(self, axes: &[isize]) -> Tensor<B, D2, K>
Creates a new tensor with added dimensions of size one inserted at the specified indices. The indices can be negative, in which case they are counted from the last to the first dimension. the axes can contain duplicates, in which case the number of dimensions inserted at the index is the number of duplicates.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = Default::default();
// Create a 3D tensor with dimensions [3, 4, 5]
let tensor = Tensor::<B, 3>::ones(Shape::new([3, 4, 5]), &device);
// Unsqueeze the leading dimension (0) once and the trailing dimension (-1) twice.
// The resulting tensor will have dimensions [1, 3, 4, 5, 1, 1].
let unsqueezed: Tensor<B, 6> = tensor.unsqueeze_dims(&[0, -1, -1]);
println!("{unsqueezed}");
}
Sourcepub fn slice<const D2: usize, R>(self, ranges: R) -> Tensor<B, D, K>where
R: RangesArg<D2>,
pub fn slice<const D2: usize, R>(self, ranges: R) -> Tensor<B, D, K>where
R: RangesArg<D2>,
Returns a tensor containing the elements selected from the given ranges.
For more complex indexing with different slice ranges, see also the slice
macro s!
.
§Arguments
ranges
- A type implementing theRangesArg
trait, which can be:- A single range (slice the first dimension)
- A single index (slice the first dimension)
- An array of ranges
§Behavior
- Supports partial and full slicing in any number of dimensions.
- Missing ranges are treated as full slices if D > D2.
- Handles negative indices by wrapping around from the end of the dimension.
- Clamps ranges to the tensor’s dimensions if they exceed the bounds.
§Panics
- If the number of ranges provided exceeds the tensor’s dimensions.
- If a range is descending (e.g., 2..1) or empty (e.g., 1..1).
§Examples
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, s};
fn example<B: Backend>() {
let device = B::Device::default();
// 1D slicing
let tensor = Tensor::<B, 1, burn_tensor::Int>::arange(0..5, &device);
let slice = tensor.slice([1..4]);
assert_eq!(slice.into_data().to_vec::<i32>().unwrap(), vec![1i32, 2, 3]);
// 2D slicing
let tensor = Tensor::<B, 2>::ones(Shape::new([3, 4]), &device);
let slice = tensor.slice([1..3, 0..2]);
assert_eq!(slice.dims(), [2, 2]);
// Using negative indices
let tensor = Tensor::<B, 1, burn_tensor::Int>::arange(0..5, &device);
let slice = tensor.slice([1..-1]); // Equivalent to 1..4
assert_eq!(slice.into_data().to_vec::<i32>().unwrap(), vec![1i32, 2, 3]);
// Using the slice macro to select different ranges
let tensor = Tensor::<B, 1, burn_tensor::Int>::arange(0..12, &device).reshape([3, 4]);
let slice = tensor.slice(s![1.., ..]); // Select rows 1 and 2, all columns
assert_eq!(slice.dims(), [2, 4]);
let tensor = Tensor::<B, 1, burn_tensor::Int>::arange(0..16, &device).reshape([2, 4, 2]);
let slice = tensor.slice(s![1.., 1..=3, -1]);
assert_eq!(slice.dims(), [1, 3, 1]);
}
§Note
This function uses the RangesArg
trait for flexible range specification. The trait
handles the conversion of various range formats and applies clamping and negative
index handling internally.
Sourcepub fn slice_assign<const D2: usize>(
self,
ranges: [Range<usize>; D2],
values: Tensor<B, D, K>,
) -> Tensor<B, D, K>
pub fn slice_assign<const D2: usize>( self, ranges: [Range<usize>; D2], values: Tensor<B, D, K>, ) -> Tensor<B, D, K>
Returns a copy of the current tensor with the selected elements changed to the new ones at the selected indices.
§Panics
- If a range exceeds the number of elements on a dimension.
- If the given values don’t match the given ranges.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 3>::ones([2, 3, 3], &device);
let values = Tensor::<B, 3>::zeros([1, 1, 1], &device);
let tensor_sliced = tensor.slice_assign([0..1, 0..1, 0..1], values);
println!("{:?}", tensor_sliced.dims()); // [2, 3, 3]
}
Sourcepub fn to_device(self, device: &<B as Backend>::Device) -> Tensor<B, D, K>
pub fn to_device(self, device: &<B as Backend>::Device) -> Tensor<B, D, K>
Move the tensor to the given device.
Sourcepub fn into_data(self) -> TensorData
pub fn into_data(self) -> TensorData
Converts the data of the current tensor.
§Note
For better performance, prefer using a Transaction when reading multiple tensors at once. This may improve laziness, especially if executed on a different thread in native environments.
Sourcepub fn to_data(&self) -> TensorData
pub fn to_data(&self) -> TensorData
Converts the data of the current tensor.
§Note
For better performance, prefer using a Transaction when reading multiple tensors at once. This may improve laziness, especially if executed on a different thread in native environments.
Sourcepub async fn into_data_async(self) -> TensorData
pub async fn into_data_async(self) -> TensorData
Returns the data of the current tensor.
Sourcepub async fn to_data_async(&self) -> TensorData
pub async fn to_data_async(&self) -> TensorData
Returns the data of the current tensor.
Sourcepub fn from_data<T>(data: T, device: &<B as Backend>::Device) -> Tensor<B, D, K>where
T: Into<TensorData>,
pub fn from_data<T>(data: T, device: &<B as Backend>::Device) -> Tensor<B, D, K>where
T: Into<TensorData>,
Create a tensor from the given data on the given device.
Sourcepub fn from_data_dtype<T>(
data: T,
device: &<B as Backend>::Device,
dtype: DType,
) -> Tensor<B, D, K>where
T: Into<TensorData>,
pub fn from_data_dtype<T>(
data: T,
device: &<B as Backend>::Device,
dtype: DType,
) -> Tensor<B, D, K>where
T: Into<TensorData>,
Create a tensor from the given data on the given device enforcing the given data type.
Sourcepub fn repeat_dim(self, dim: usize, times: usize) -> Tensor<B, D, K>
pub fn repeat_dim(self, dim: usize, times: usize) -> Tensor<B, D, K>
Repeat the tensor along the given dimension.
The output tensor has the same shape, except along the given dimension.
§Arguments
dim
: The dimension to repeat.times
: The number of times to repeat the tensor along the given dimension in the new tensor.
§Returns
A new tensor with the given dimension repeated times
times.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor with dimensions [3, 2]
let tensor = Tensor::<B, 2>::from_data([[3.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);
// Repeat the tensor along the dimension 0 twice.
// [[3.0, 4.9], [2.0, 1.9], [4.0, 5.9], [3.0, 4.9], [2.0, 1.9], [4.0, 5.9]]
// The resulting tensor will have dimensions [6, 2].
let repeated = tensor.repeat_dim(0, 2);
println!("{repeated}");
}
Sourcepub fn repeat(self, sizes: &[usize]) -> Tensor<B, D, K>
pub fn repeat(self, sizes: &[usize]) -> Tensor<B, D, K>
Repeat the tensor along the given dimensions.
§Arguments
sizes
: Borrowed slice of the number of times to repeat each dimension.
§Returns
A new tensor with the given dimensions repeated times
times.
§Panics
If sizes
contains more elements than the number of dimensions.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor with dimensions [3, 2]
let tensor = Tensor::<B, 2>::from_data([[3.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);
// Repeat the tensor along the dimension 0 twice and the dimension 0 once.
// [[3.0, 4.9], [2.0, 1.9], [4.0, 5.9], [3.0, 4.9], [2.0, 1.9], [4.0, 5.9]]
// The resulting tensor will have dimensions [6, 2].
let repeated = tensor.repeat(&[2, 1]);
}
Sourcepub fn equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
pub fn equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element-wise equal comparison.
§Returns
A boolean tensor that is true
where input is equal to other
and false
elsewhere.
§Panics
If the two tensors don’t have the same shape.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
let t1 = Tensor::<B, 2>::from_data([[2.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);
let t2 = Tensor::<B, 2>::from_data([[3.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);
// Compare the elements of the two 2D tensors with dimensions [3, 2].
// [[false, true], [true, true], [true, true]]
let equal = t1.equal(t2);
println!("{equal}");
}
Sourcepub fn not_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
pub fn not_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element-wise non-equality comparison.
§Returns
A boolean tensor that is true
where input is not equal to other
and false
elsewhere.
§Panics
If the two tensors don’t have the same shape.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
let t1 = Tensor::<B, 2>::from_data([[2.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);
let t2 = Tensor::<B, 2>::from_data([[3.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);
// Compare the elements of the two 2D tensors for inequality.
// [[true, false], [false, false], [false, false]]
let not_equal = t1.not_equal(t2);
println!("{not_equal}");
}
Sourcepub fn cat(tensors: Vec<Tensor<B, D, K>>, dim: usize) -> Tensor<B, D, K>
pub fn cat(tensors: Vec<Tensor<B, D, K>>, dim: usize) -> Tensor<B, D, K>
Concatenates all tensors into a new one along the given dimension.
§Panics
- If
dim
is higher than the rank. - If
tensors
is an empty vector. - If all tensors don’t have the same shape (the dimension
dim
is ignored).
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
let t1 = Tensor::<B, 2>::from_data([[3.0, 4.9, 2.0, 1.0], [2.0, 1.9, 3.0, 1.0]], &device);
let t2 = Tensor::<B, 2>::from_data([[4.0, 5.9, 8.0], [1.4, 5.8, 6.0]], &device);
// Concatenate the two tensors with shapes [2, 4] and [2, 3] along the dimension 1.
// [[3.0, 4.9, 2.0, 1.0, 4.0, 5.9, 8.0], [2.0, 1.9, 3.0, 1.0, 1.4, 5.8, 6.0]]
// The resulting tensor will have shape [2, 7].
let concat = Tensor::cat(vec![t1, t2], 1);
println!("{concat}");
}
Sourcepub fn stack<const D2: usize>(
tensors: Vec<Tensor<B, D, K>>,
dim: usize,
) -> Tensor<B, D2, K>
pub fn stack<const D2: usize>( tensors: Vec<Tensor<B, D, K>>, dim: usize, ) -> Tensor<B, D2, K>
Concatenates all tensors into a new one along a new dimension.
§Panics
- If all tensors don’t have the same shape.
- If given dimension is not with range of 0..D2
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
let t1 = Tensor::<B, 2>::from_data([[3.0, 4.9, 2.0], [2.0, 1.9, 3.0]], &device);
let t2 = Tensor::<B, 2>::from_data([[4.0, 5.9, 8.0], [1.4, 5.8, 6.0]], &device);
let t3 = Tensor::<B, 2>::from_data([[4.0, 5.9, 8.0], [1.4, 5.8, 6.0]], &device);
// Concatenate the three tensors with shape [2, 3] along a new dimension, 0.
// [[[3.0, 4.9, 2.0], [2.0, 1.9, 3.0]],
// [[4.0, 5.9, 8.0], [1.4, 5.8, 6.0]],
// [[4.0, 5.9, 8.0], [1.4, 5.8, 6.0]]]
// The resulting tensor will have shape [3, 2, 3].
let stacked= Tensor::stack::<3>(vec![t1, t2, t3], 0);
println!("{stacked}");
}
Sourcepub fn iter_dim(self, dim: usize) -> DimIter<B, D, K> ⓘ
pub fn iter_dim(self, dim: usize) -> DimIter<B, D, K> ⓘ
Iterate over slices of tensors alongside a given dimension.
§Panics
If given dimension is greater than or equal to tensor rank.
§Returns
A tensor iterator.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B,2>::from_data([[3.0, 4.9, 2.0], [2.0, 1.9, 3.0]], &device);
// Given a 2D tensor with dimensions [2, 3], iterate over slices of tensors along the dimension 0.
let iter = tensor.iter_dim(0);
for (i,tensor) in iter.enumerate() {
println!("Tensor {}: {}", i, tensor);
// Tensor 0: Tensor { data: [[3.0, 4.9, 2.0]], ... }
// Tensor 1: Tensor { data: [[2.0, 1.9, 3.0]], ... }
}
}
Sourcepub fn narrow(self, dim: usize, start: usize, length: usize) -> Tensor<B, D, K>
pub fn narrow(self, dim: usize, start: usize, length: usize) -> Tensor<B, D, K>
Returns a new tensor with the given dimension narrowed to the given range.
§Panics
- If the dimension is greater than the number of dimensions of the tensor.
- If the given range exceeds the number of elements on the given dimension.
§Returns
A new tensor with the given dimension narrowed to the given range.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor with dimensions [4, 3]
let tensor = Tensor::<B, 2>::from_data(
[
[3.0, 4.9, 2.0],
[2.0, 1.9, 3.0],
[6.0, 1.5, 7.0],
[3.0, 4.9, 9.0],
],
&device,
);
// Narrow the tensor along the dimension 0, keeping 3 elements starting from index 1.
// [[2.0, 1.9, 3.0], [6.0, 1.5, 7.0], [3.0, 4.9, 9.0]]
// The resulting tensor will have dimensions [3, 3].
let narrowed = tensor.narrow(0, 1, 3);
println!("{narrowed}");
}
Sourcepub fn chunk(self, chunks: usize, dim: usize) -> Vec<Tensor<B, D, K>>
pub fn chunk(self, chunks: usize, dim: usize) -> Vec<Tensor<B, D, K>>
Attempts to split the tensor into a specified number of chunks along a given dimension. May return less chunks than requested if the tensor size is not divisible by the number of chunks.
When the given dimension is evenly divisible by the number of chunks, the chunks will be of equal size. Otherwise all chunks will be of equal size except for the last one.
§Panics
If the dimension is greater than the number of dimensions of the tensor.
§Returns
A vector of tensors.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor with dimensions [4, 3]
let tensor = Tensor::<B, 2>::from_data(
[
[3.0, 4.9, 2.0],
[2.0, 1.9, 3.0],
[6.0, 1.5, 7.0],
[3.0, 4.9, 9.0],
],
&device,
);
// Split the tensor along the dimension 1 into 2 chunks.
// The first chuck will have shape [4, 2]:
// [[3.0, 4.9], [2.0, 1.9], [6.0, 1.5], [3.0, 4.9]]
// The second chunk will have shape [4, 1]:
// [[2.0], [3.0], [7.0], [9.0]]
let chunks = tensor.chunk(2, 1);
println!("{chunks:?}");
}
Sourcepub fn split(self, split_size: usize, dim: usize) -> Vec<Tensor<B, D, K>>
pub fn split(self, split_size: usize, dim: usize) -> Vec<Tensor<B, D, K>>
Splits the tensor into chunks of a specified size along a given dimension. Each chunk is a view of the original tensor.
If the tensor size along the given dimension is not divisible by split_size
,
then the last chunk will be smaller.
§Panics
If the specified dimension to split along is greater than the number of dimensions of the tensor.
§Returns
A vector of tensors.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 1D tensor with 5 elements
let tensor = Tensor::<B, 1>::from_data([0.0, 1.0, 2.0, 3.0, 4.0], &device);
// Split the tensor into chunks of size 2 along dimension 0
let chunks = tensor.split(2, 0);
// The result is a vector of tensors:
// [Tensor([0.0, 1.0]), Tensor([2.0, 3.0]), Tensor([4.0])]
println!("{:?}", chunks);
}
Sourcepub fn split_with_sizes(
self,
split_sizes: Vec<usize>,
dim: usize,
) -> Vec<Tensor<B, D, K>>
pub fn split_with_sizes( self, split_sizes: Vec<usize>, dim: usize, ) -> Vec<Tensor<B, D, K>>
Splits the tensor into chunks with the specified sizes along a given dimension. Each chunk is a view of the original tensor.
The sizes of the chunks are specified in the split_sizes
vector. The sum of the sizes
in split_sizes
must equal the size of the tensor along the specified dimension.
§Panics
If the specified dimension to split along is greater than the number of dimensions of the tensor or
if the sum of dim_sizes
does not equal the size of the tensor along dim
.
§Returns
A vector of tensors.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 1D tensor with 5 elements
let tensor = Tensor::<B, 1>::from_data([0.0, 1.0, 2.0, 3.0, 4.0], &device);
// Split the tensor into chunks with sizes [2, 3] along dimension 0
let chunks = tensor.split_with_sizes(vec![2, 3], 0);
// The result is a vector of tensors:
// [Tensor([0.0, 1.0]), Tensor([2.0, 3.0, 4.0])]
println!("{:?}", chunks);
}
Sourcepub fn any(self) -> Tensor<B, 1, Bool>
pub fn any(self) -> Tensor<B, 1, Bool>
Tests if any element in the tensor
evaluates to True.
§Arguments
tensor
- The tensor to test. All input tensor types (Float, Int, Bool) are supported.
§Returns
A boolean tensor Tensor<B, 1, Bool>
containing a single element, True if any element in the input tensor
evaluates to True, False otherwise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B,2, Bool>::from_data([[true,false,true],[false,true,false]], &device);
let tensor_two = Tensor::<B,2, Bool>::from_data([[false,false,false],[false,false,false]], &device);
// Given a 2D tensor with dimensions [2, 3], test if any element in the tensor evaluates to True.
let any_tensor = tensor.any();
println!("{}", any_tensor);
// Tensor { data: [true], ... }
// Given a 2D tensor with dimensions [2, 3], test if any element in the tensor evaluates to True.
let any_tensor_two = tensor_two.any();
println!("{}", any_tensor_two);
// Tensor { data: [false], ... }
}
Sourcepub fn any_dim(self, dim: usize) -> Tensor<B, D, Bool>
pub fn any_dim(self, dim: usize) -> Tensor<B, D, Bool>
Tests if any element in the tensor
evaluates to True along a given dimension dim
.
§Arguments
tensor
- The tensor to test. All input tensor types (Float, Int, Bool) are supported.dim
- The axis along which to test.
§Returns
A boolean tensor Tensor<B, D, Bool>
with the same shape as input tensor
, except in the dim
axis
where the size is 1. The elem in the dim
axis is True if any element along this dim in the input
evaluates to True, False otherwise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};
fn example<B: Backend>() {
let device = Default::default();
let tensor =
Tensor::<B, 2, Bool>::from_data([[true, false, false], [false, true, false]], &device);
// Check if any element in the tensor evaluates to True along the dimension 1.
// [[true], [true]],
let any_dim = tensor.clone().any_dim(1);
println!("{any_dim}");
}
Sourcepub fn all(self) -> Tensor<B, 1, Bool>
pub fn all(self) -> Tensor<B, 1, Bool>
Tests if all elements in the tensor
evaluate to True.
§Arguments
tensor
- The tensor to test. All input tensor types (Float, Int, Bool) are supported.
§Returns
A boolean tensor Tensor<B, 1, Bool>
with a single element, True if all elements in the input tensor
evaluate to True, False otherwise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};
fn example<B: Backend>() {
let device = Default::default();
let tensor =
Tensor::<B, 2, Bool>::from_data([[true, false, true], [true, true, true]], &device);
// Check if all elements in the tensor evaluate to True (which is not the case).
// [false]
let all = tensor.all();
println!("{all}");
}
Sourcepub fn all_dim(self, dim: usize) -> Tensor<B, D, Bool>
pub fn all_dim(self, dim: usize) -> Tensor<B, D, Bool>
Tests if all elements in the tensor
evaluate to True along a given dimension dim
.
§Arguments
tensor
- The tensor to test. All input tensor types (Float, Int, Bool) are supported.dim
- The axis along which to test.
§Returns
A boolean tensor Tensor<B, D, Bool>
with the same shape as input tensor
, except in the dim
axis
where the size is 1. The elem in the dim
axis is True if all elements along this dim in the input
evaluates to True, False otherwise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};
fn example<B: Backend>() {
let device = Default::default();
let tensor =
Tensor::<B, 2, Bool>::from_data([[true, true, false], [true, true, true]], &device);
// Check if all elements in the tensor evaluate to True along the dimension 1.
// [[true, true, false]]
let all_dim = tensor.clone().all_dim(0);
println!("{all_dim}");
}
Sourcepub fn into_scalar(self) -> <K as BasicOps<B>>::Elem
pub fn into_scalar(self) -> <K as BasicOps<B>>::Elem
Convert the tensor into a scalar.
§Panics
- If the tensor doesn’t have one element.
- If the backend fails to read the tensor data synchronously.
§Returns
The scalar value of the tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 2>::from_data([[3.0]], &device);
// Convert the tensor with a single element into a scalar.
let scalar = tensor.into_scalar();
println!("{scalar}");
}
Sourcepub async fn into_scalar_async(self) -> <K as BasicOps<B>>::Elem
pub async fn into_scalar_async(self) -> <K as BasicOps<B>>::Elem
Sourcepub fn expand<const D2: usize, S>(self, shape: S) -> Tensor<B, D2, K>where
S: BroadcastArgs<D, D2>,
pub fn expand<const D2: usize, S>(self, shape: S) -> Tensor<B, D2, K>where
S: BroadcastArgs<D, D2>,
Broadcast the tensor to the given shape.
Only singleton dimensions can be expanded to a larger size. Other dimensions must have the same size
(which can be inferred with -1
).
§Arguments
shape
- The shape to broadcast the tensor to. Can contain -1 for dimensions that should be inferred. The number of elements in the shape must be greater or equal as the number of dimensions of the tensor.
§Panics
If the tensor cannot be broadcasted to the given shape.
§Returns
A new tensor with the given shape.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
// Create a 2D tensor with dimensions [3, 1]
let tensor = Tensor::<B, 2>::from_data([[1.], [2.], [3.]], &device);
// Expand the tensor to a new shape [3, 4]
// [[1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0], [3.0, 3.0, 3.0, 3.0]]
let expanded = tensor.expand([3, 4]);
println!("{}", expanded);
}
Source§impl<B, const D: usize> Tensor<B, D, Bool>where
B: Backend,
impl<B, const D: usize> Tensor<B, D, Bool>where
B: Backend,
Sourcepub fn from_bool(
data: TensorData,
device: &<B as Backend>::Device,
) -> Tensor<B, D, Bool>
pub fn from_bool( data: TensorData, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>
Create a boolean tensor from data on the given device.
Sourcepub fn bool_and(self, rhs: Tensor<B, D, Bool>) -> Tensor<B, D, Bool>
pub fn bool_and(self, rhs: Tensor<B, D, Bool>) -> Tensor<B, D, Bool>
Performs logical and (&&
) on two boolean tensors
Sourcepub fn bool_or(self, rhs: Tensor<B, D, Bool>) -> Tensor<B, D, Bool>
pub fn bool_or(self, rhs: Tensor<B, D, Bool>) -> Tensor<B, D, Bool>
Performs logical or (||
) on two boolean tensors
Sourcepub fn nonzero(self) -> Vec<Tensor<B, 1, Int>>
pub fn nonzero(self) -> Vec<Tensor<B, 1, Int>>
Compute the indices of the elements that are non-zero.
§Returns
A vector of tensors, one for each dimension of the given tensor, containing the indices of the non-zero elements in that dimension.
Sourcepub async fn nonzero_async(self) -> Vec<Tensor<B, 1, Int>>
pub async fn nonzero_async(self) -> Vec<Tensor<B, 1, Int>>
Compute the indices of the elements that are non-zero.
§Returns
A vector of tensors, one for each dimension of the given tensor, containing the indices of the non-zero elements in that dimension.
Sourcepub fn argwhere(self) -> Tensor<B, 2, Int>
pub fn argwhere(self) -> Tensor<B, 2, Int>
Compute the indices of the elements that are true, grouped by element.
§Returns
A tensor containing the indices of all non-zero elements of the given tensor. Each row in the result contains the indices of a non-zero element.
Sourcepub async fn argwhere_async(self) -> Tensor<B, 2, Int>
pub async fn argwhere_async(self) -> Tensor<B, 2, Int>
Compute the indices of the elements that are true, grouped by element.
§Returns
A tensor containing the indices of all non-zero elements of the given tensor. Each row in the result contains the indices of a non-zero element.
Sourcepub fn triu_mask<S>(
shape: S,
offset: i64,
device: &<B as Backend>::Device,
) -> Tensor<B, D, Bool>
pub fn triu_mask<S>( shape: S, offset: i64, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>
Creates a mask for the upper triangle of a matrix, which can be used to fill the specified area with a value.
This function generates a boolean tensor representing the mask of the upper triangle of a matrix.
§Arguments
shape
: The shape of the matrix.offset
: The offset from the diagonal, where 0 means the diagonal, and positive values shift towards the upper triangle.device
: The device on which the tensor will be allocated.
§Returns
Returns a boolean tensor where false
indicates the elements of the matrix that are part of the
upper triangle taking into account the specified offset
. All other elements are true
.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};
fn example<B: Backend>() {
let mask = Tensor::<B, 2, Bool>::triu_mask([3, 3], 0, &Default::default());
println!("{mask}");
// [[false, false, false],
// [true, false, false],
// [true, true, false]]
}
Sourcepub fn tril_mask<S>(
shape: S,
offset: i64,
device: &<B as Backend>::Device,
) -> Tensor<B, D, Bool>
pub fn tril_mask<S>( shape: S, offset: i64, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>
Creates a mask for the lower triangle of a matrix, which can be used to fill the specified area with a value.
This function generates a boolean tensor representing the mask of the lower triangle of a matrix.
§Arguments
shape
: The shape of the matrix.offset
: The offset from the diagonal, where 0 means the diagonal, and negative values shift towards the lower triangle.device
: The device on which the tensor will be allocated.
§Returns
Returns a boolean tensor where false
indicates the elements of the matrix that are part of the
lower triangle taking into account the specified offset
. All other elements are true
.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};
fn example<B: Backend>() {
let mask = Tensor::<B, 2, Bool>::tril_mask([3, 3], 0, &Default::default());
println!("{mask}");
// [[false, true, true],
// [false, false, true],
// [false, false, false]]
}
Sourcepub fn diag_mask<S>(
shape: S,
offset: i64,
device: &<B as Backend>::Device,
) -> Tensor<B, D, Bool>
pub fn diag_mask<S>( shape: S, offset: i64, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>
Creates a mask for the diagonal of a matrix, which can be used to fill the specified area with a value.
This function generates a boolean tensor representing the mask of the diagonal of a matrix.
§Arguments
shape
: The shape of the matrix.offset
: The offset from the diagonal, where 0 means the diagonal, and positive values shift towards the upper triangle.device
: The device on which the tensor will be allocated.
§Returns
Returns a boolean tensor where false
indicates the elements of the matrix that are part of the
diagonal. All other elements are true
.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};
fn example<B: Backend>() {
let mask = Tensor::<B, 2, Bool>::diag_mask([3, 3], 0, &Default::default());
println!("{mask}");
// [[false, true, true],
// [true, false, true],
// [true, true, false]]
}
Source§impl<const D: usize, B> Tensor<B, D>where
B: Backend,
impl<const D: usize, B> Tensor<B, D>where
B: Backend,
Sourcepub fn inplace<F>(&mut self, func: F)
pub fn inplace<F>(&mut self, func: F)
Executes an operation on the tensor and modifies its value.
§Notes
This won’t necessarily reuse the same tensor data/buffer, but it should if there is no other reference pointing to the same tensor.
Wrapping operations with inplace is not an optimization, it’s mainly there if you want to mutate a tensor by using owned operations. A plausible usage would be to update the weights of a mutable model reference.
Sourcepub fn log1p(self) -> Tensor<B, D>
pub fn log1p(self) -> Tensor<B, D>
Applies the natural logarithm of one plus the input tensor, element-wise.
y = log(x+1)
Sourcepub fn erf(self) -> Tensor<B, D>
pub fn erf(self) -> Tensor<B, D>
Applies the error function element wise.
y = erf(x)
Sourcepub fn recip(self) -> Tensor<B, D>
pub fn recip(self) -> Tensor<B, D>
Applies reciprocal operation (or multiplicative inverse) element wise.
y = 1/x
Sourcepub fn round(self) -> Tensor<B, D>
pub fn round(self) -> Tensor<B, D>
Applies element wise round operation.
This function implements the round half to even strategy, with halfway cases rounded to the nearest even integer value.
Sourcepub fn from_floats<A>(
floats: A,
device: &<B as Backend>::Device,
) -> Tensor<B, D>where
A: Into<TensorData>,
pub fn from_floats<A>(
floats: A,
device: &<B as Backend>::Device,
) -> Tensor<B, D>where
A: Into<TensorData>,
Create a tensor from floats (f32) on a given device.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = B::Device::default();
let _ = Tensor::<B, 1>::from_floats([1.0, 2.0], &device);
let _ = Tensor::<B, 2>::from_floats([[1.0, 2.0], [3.0, 4.0]], &device);
}
Sourcepub fn int(self) -> Tensor<B, D, Int>
pub fn int(self) -> Tensor<B, D, Int>
Returns a new tensor with the same shape and device as the current tensor and the data cast to Integer.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
let device = Default::default();
let float_tensor = Tensor::<B, 1>::from_floats([1.0, 2.0], &device);
let int_tensor = float_tensor.int();
}
Sourcepub fn random_like(&self, distribution: Distribution) -> Tensor<B, D>
pub fn random_like(&self, distribution: Distribution) -> Tensor<B, D>
Returns a new tensor with the same shape and device as the current tensor filled random values sampled from the given distribution.
Sourcepub fn matmul(self, other: Tensor<B, D>) -> Tensor<B, D>
pub fn matmul(self, other: Tensor<B, D>) -> Tensor<B, D>
Applies the matrix multiplication operation.
C = AB
§Panics
If the two tensors don’t have a compatible shape.
Sourcepub fn var_bias(self, dim: usize) -> Tensor<B, D>
pub fn var_bias(self, dim: usize) -> Tensor<B, D>
Calculate the variance along the given dimension without applying the Bessel’s correction.
Sourcepub fn var_mean(self, dim: usize) -> (Tensor<B, D>, Tensor<B, D>)
pub fn var_mean(self, dim: usize) -> (Tensor<B, D>, Tensor<B, D>)
Calculate the variance along the given dimension and also returns the mean.
Sourcepub fn var_mean_bias(self, dim: usize) -> (Tensor<B, D>, Tensor<B, D>)
pub fn var_mean_bias(self, dim: usize) -> (Tensor<B, D>, Tensor<B, D>)
Calculate the variance along the given dimension without applying the Bessel’s correction and also returns the mean.
Sourcepub fn cast<F>(self, dtype: F) -> Tensor<B, D>where
F: Into<FloatDType>,
pub fn cast<F>(self, dtype: F) -> Tensor<B, D>where
F: Into<FloatDType>,
Converts a tensor to the specified floating point data type.
§Warning
Most backends don’t have automatic type promotion at this time, so make sure that all tensors have the same floating point precision data type for operations multiple input tensors (e.g., binary ops).
Sourcepub fn detach(self) -> Tensor<B, D>
pub fn detach(self) -> Tensor<B, D>
Detach the current tensor from the autodiff graph.
This function does nothing when autodiff is not enabled. This can be used in batchers or elsewhere to ensure that previous operations are not considered in the autodiff graph.
Sourcepub fn require_grad(self) -> Tensor<B, D>
pub fn require_grad(self) -> Tensor<B, D>
Mark the tensor to keep gradients during the backward pass.
This function does nothing when autodiff is not enabled.
Sourcepub fn is_require_grad(&self) -> bool
pub fn is_require_grad(&self) -> bool
Returns true if the tensor requires gradients during the backward pass.
Sourcepub fn set_require_grad(self, require_grad: bool) -> Tensor<B, D>
pub fn set_require_grad(self, require_grad: bool) -> Tensor<B, D>
Mark the tensor as tracked or untracked depending on the require_grad argument. When tracked, the gradients will be available after the backward pass.
This function does nothing when autodiff is not enabled.
Sourcepub fn cov(self, dim: usize, correction_factor: usize) -> Tensor<B, D>
pub fn cov(self, dim: usize, correction_factor: usize) -> Tensor<B, D>
Calculate covaraince matrix between different entries alongside a given dimension.
§Arguments
size
- The size of the square matrix.correction_factor
- Is usually 1 for samples and 0 for population.
Sourcepub fn quantize(
self,
scheme: &QuantizationScheme,
qparams: QParams<Tensor<B, 1>, Tensor<B, 1, Int>>,
) -> Tensor<B, D>
pub fn quantize( self, scheme: &QuantizationScheme, qparams: QParams<Tensor<B, 1>, Tensor<B, 1, Int>>, ) -> Tensor<B, D>
Sourcepub fn quantize_dynamic(self, scheme: &QuantizationScheme) -> Tensor<B, D>
pub fn quantize_dynamic(self, scheme: &QuantizationScheme) -> Tensor<B, D>
Dynamically convert the tensor to a lower precision data type based on the quantization scheme.
§Arguments
scheme
- The quantization scheme.
§Returns
The quantized tensor.
§Notes
This uses min-max calibration.
Sourcepub fn dequantize(self) -> Tensor<B, D>
pub fn dequantize(self) -> Tensor<B, D>
Convert the tensor back to a higher precision data type.
If the tensor is not quantized, its value is simply returned.
§Returns
The dequantized tensor.
Source§impl<B> Tensor<B, 1, Int>where
B: Backend,
impl<B> Tensor<B, 1, Int>where
B: Backend,
Source§impl<const D: usize, B> Tensor<B, D, Int>where
B: Backend,
impl<const D: usize, B> Tensor<B, D, Int>where
B: Backend,
Sourcepub fn from_ints<A>(
ints: A,
device: &<B as Backend>::Device,
) -> Tensor<B, D, Int>where
A: Into<TensorData>,
pub fn from_ints<A>(
ints: A,
device: &<B as Backend>::Device,
) -> Tensor<B, D, Int>where
A: Into<TensorData>,
Create a tensor from integers (i32), placing it on a given device.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Int};
fn example<B: Backend>() {
let device = B::Device::default();
let _x: Tensor<B, 1, Int> = Tensor::from_ints([1, 2], &device);
let _y: Tensor<B, 2, Int> = Tensor::from_ints([[1, 2], [3, 4]], &device);
}
Sourcepub fn float(self) -> Tensor<B, D>
pub fn float(self) -> Tensor<B, D>
Returns a new tensor with the same shape and device as the current tensor and the data cast to Float.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};
fn example<B: Backend>() {
let device = Default::default();
let int_tensor = Tensor::<B, 1, Int>::arange(0..5, &device);
let float_tensor = int_tensor.float();
}
Sourcepub fn cartesian_grid<S, const D2: usize>(
shape: S,
device: &<B as Backend>::Device,
) -> Tensor<B, D2, Int>
pub fn cartesian_grid<S, const D2: usize>( shape: S, device: &<B as Backend>::Device, ) -> Tensor<B, D2, Int>
Generates a cartesian grid for the given tensor shape on the specified device.
The generated tensor is of dimension D2 = D + 1
, where each element at dimension D contains the cartesian grid coordinates for that element.
§Arguments
shape
- The shape specifying the dimensions of the tensor.device
- The device to create the tensor on.
§Panics
Panics if D2
is not equal to D+1
.
§Examples
use burn_tensor::Int;
use burn_tensor::{backend::Backend, Shape, Tensor};
fn example<B: Backend>() {
let device = Default::default();
let result: Tensor<B, 3, _> = Tensor::<B, 2, Int>::cartesian_grid([2, 3], &device);
println!("{}", result);
}
Sourcepub fn bitwise_and(self, other: Tensor<B, D, Int>) -> Tensor<B, D, Int>
pub fn bitwise_and(self, other: Tensor<B, D, Int>) -> Tensor<B, D, Int>
Applies the bitwise logical and operation with each bit representing the integer.
Sourcepub fn bitwise_or(self, other: Tensor<B, D, Int>) -> Tensor<B, D, Int>
pub fn bitwise_or(self, other: Tensor<B, D, Int>) -> Tensor<B, D, Int>
Applies the bitwise logical or operation with another tensor.
Sourcepub fn bitwise_xor(self, other: Tensor<B, D, Int>) -> Tensor<B, D, Int>
pub fn bitwise_xor(self, other: Tensor<B, D, Int>) -> Tensor<B, D, Int>
Applies the bitwise logical xor operation with another tensor.
Sourcepub fn bitwise_not(self) -> Tensor<B, D, Int>
pub fn bitwise_not(self) -> Tensor<B, D, Int>
Applies the bitwise logical not operation.
Sourcepub fn bitwise_and_scalar(
self,
other: <B as Backend>::IntElem,
) -> Tensor<B, D, Int>
pub fn bitwise_and_scalar( self, other: <B as Backend>::IntElem, ) -> Tensor<B, D, Int>
Applies the bitwise logical and operation with each bit in the scalar and the integers in the tensor.
Sourcepub fn bitwise_or_scalar(
self,
other: <B as Backend>::IntElem,
) -> Tensor<B, D, Int>
pub fn bitwise_or_scalar( self, other: <B as Backend>::IntElem, ) -> Tensor<B, D, Int>
Applies the bitwise logical or operation with each bit in the scalar and the integers in the tensor.
Sourcepub fn bitwise_xor_scalar(
self,
other: <B as Backend>::IntElem,
) -> Tensor<B, D, Int>
pub fn bitwise_xor_scalar( self, other: <B as Backend>::IntElem, ) -> Tensor<B, D, Int>
Applies bitwise logical xor operation with each bit in the scalar and the integers in the tensor.
Sourcepub fn bitwise_left_shift(self, other: Tensor<B, D, Int>) -> Tensor<B, D, Int>
pub fn bitwise_left_shift(self, other: Tensor<B, D, Int>) -> Tensor<B, D, Int>
Applies the bitwise left shift operation with the integers in the tensor.
Sourcepub fn bitwise_right_shift(self, other: Tensor<B, D, Int>) -> Tensor<B, D, Int>
pub fn bitwise_right_shift(self, other: Tensor<B, D, Int>) -> Tensor<B, D, Int>
Applies the bitwise right shift operation with the integers in the tensor.
Source§impl<B, const D: usize, K> Tensor<B, D, K>
impl<B, const D: usize, K> Tensor<B, D, K>
Sourcepub fn add(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
pub fn add(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise addition operation.
y = x2 + x1
§Arguments
other
- The tensor to add.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1 + tensor2;
println!("{tensor}");
// [[3.0, 1.0, 7.0], [6.0, 11.0, 9.0]]
}
Sourcepub fn add_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn add_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
Applies element wise addition operation with a scalar.
y = x + s
§Arguments
other
- The scalar to add, element wise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let scalar = 2.0;
let tensor = tensor + scalar;
println!("{tensor}");
// [[3.0, 0.0, 5.0], [7.0, 11.0, 8.0]]
}
Sourcepub fn sub(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
pub fn sub(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise subtraction operation.
y = x2 - x1
§Arguments
other
- The tensor to subtract.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1 - tensor2;
println!("{tensor}");
// [[-1.0, -5.0, -1.0], [4.0, 7.0, 3.0]]
}
Sourcepub fn sub_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn sub_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
Applies element wise subtraction operation with a scalar.
y = x - s
§Arguments
other
- The scalar to subtract, element wise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let scalar = 2.0;
let tensor = tensor - scalar;
println!("{tensor}");
// [[-1.0, -4.0, 1.0], [3.0, 7.0, 4.0]]
}
Sourcepub fn div(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
pub fn div(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise division operation.
y = x2 / x1
§Arguments
other
- The tensor to divide.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1 / tensor2;
println!("{tensor}");
// [[0.5, -0.6666667, 0.75], [5.0, 4.5, 2.0]]
}
Sourcepub fn div_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn div_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
Applies element wise division operation with a scalar.
y = x / s
§Arguments
other
- The scalar to divide, element wise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let scalar = 2.0;
let tensor = tensor / scalar;
println!("{tensor}");
// [[0.5, -1.0, 1.5], [2.5, 4.5, 3.0]]
}
Sourcepub fn remainder(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
pub fn remainder(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise the remainder operation with a scalar.
y = x2 % x1
Sourcepub fn remainder_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn remainder_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
Applies element wise the remainder operation with a scalar.
y = x % s
§Arguments
other
- The scalar to divide, element wise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let scalar = 2.0;
let tensor = tensor1 % scalar;
println!("{tensor}");
// [[1.0, 0.0, 1.0], [1.0, 1.0, 0.0]]
}
Sourcepub fn mul(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
pub fn mul(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise multiplication operation.
y = x2 * x1
§Arguments
other
- The tensor to multiply.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1 * tensor2;
println!("{tensor}");
// [[2.0, -6.0, 12.0], [5.0, 18.0, 18.0]]
}
Sourcepub fn mul_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn mul_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
Applies element wise multiplication operation with a scalar.
y = x * s
§Arguments
other
- The scalar to multiply, element wise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let scalar = 2.0;
let tensor = tensor * scalar;
println!("{tensor}");
// [[2.0, -4.0, 6.0], [10.0, 18.0, 12.0]]
}
Sourcepub fn neg(self) -> Tensor<B, D, K>
pub fn neg(self) -> Tensor<B, D, K>
Switch sign of each element in the tensor.
y = -x
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = -tensor;
println!("{tensor}");
// [[-1.0, 2.0, -3.0], [-5.0, -9.0, -6.0]]
}
Sourcepub fn sign(self) -> Tensor<B, D, K>
pub fn sign(self) -> Tensor<B, D, K>
Returns the signs of the elements of the input tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.sign();
println!("{tensor}");
// [[1.0, -1.0, 1.0], [1.0, 1.0, 1.0]]
}
Sourcepub fn zeros<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
pub fn zeros<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
Create a tensor of the given shape where each element is zero.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::zeros(Shape::new([2, 3]), &device);
println!("{tensor}");
// [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
}
Sourcepub fn zeros_like(&self) -> Tensor<B, D, K>
pub fn zeros_like(&self) -> Tensor<B, D, K>
Returns a new tensor with the same shape and device as the current tensor filled with zeros.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.zeros_like();
println!("{tensor}");
// [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
}
Sourcepub fn ones<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
pub fn ones<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
Create a tensor of the given shape where each element is one.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::ones(Shape::new([2, 3]), &device);
println!("{tensor}");
// [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
}
Sourcepub fn ones_like(&self) -> Tensor<B, D, K>
pub fn ones_like(&self) -> Tensor<B, D, K>
Returns a new tensor with the same shape and device as the current tensor filled with ones.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.ones_like();
println!("{tensor}");
// [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
}
Sourcepub fn full<S, E>(
shape: S,
fill_value: E,
device: &<B as Backend>::Device,
) -> Tensor<B, D, K>
pub fn full<S, E>( shape: S, fill_value: E, device: &<B as Backend>::Device, ) -> Tensor<B, D, K>
Create a tensor of the given shape where each element is equal to the provided value.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::full(Shape::new([2, 3]), 5.0, &device);
println!("{tensor}");
// [[5.0, 5.0, 5.0], [5.0, 5.0, 5.0]]
}
Sourcepub fn full_like<E>(&self, fill_value: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn full_like<E>(&self, fill_value: E) -> Tensor<B, D, K>where
E: ElementConversion,
Returns a new tensor with the same shape and device as the current tensor filled with the provided value.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.full_like(5.0);
println!("{tensor}");
// [[5.0, 5.0, 5.0], [5.0, 5.0, 5.0]]
}
Sourcepub fn mean(self) -> Tensor<B, 1, K>
pub fn mean(self) -> Tensor<B, 1, K>
Aggregate all elements in the tensor with the mean operation.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.mean();
println!("{tensor}");
// [3.6666667]
}
Sourcepub fn sum(self) -> Tensor<B, 1, K>
pub fn sum(self) -> Tensor<B, 1, K>
Aggregate all elements in the tensor with the sum operation.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.sum();
println!("{tensor}");
// [22.0]
}
Sourcepub fn mean_dim(self, dim: usize) -> Tensor<B, D, K>
pub fn mean_dim(self, dim: usize) -> Tensor<B, D, K>
Aggregate all elements along the given dimension or axis in the tensor with the mean operation.
§Arguments
dim
- The dimension or axis along which to aggregate the elements.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.clone().mean_dim(0);
println!("{tensor}");
// [[3.0, 3.5, 4.5]]
let tensor = tensor.clone().mean_dim(1);
println!("{tensor}");
// [[0.6666667], [6.6666665]]
}
Sourcepub fn sum_dim(self, dim: usize) -> Tensor<B, D, K>
pub fn sum_dim(self, dim: usize) -> Tensor<B, D, K>
Aggregate all elements along the given dimension or axis in the tensor with the sum operation.
§Arguments
dim
- The dimension or axis along which to aggregate the elements.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.clone().sum_dim(0);
println!("{tensor}");
// [[6.0, 7.0, 9.0]]
let tensor = tensor.clone().sum_dim(1);
println!("{tensor}");
// [[2.0], [20.0]]
}
Sourcepub fn prod(self) -> Tensor<B, 1, K>
pub fn prod(self) -> Tensor<B, 1, K>
Aggregate all elements in the tensor with the product operation.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.prod();
println!("{tensor}");
// [-1620.0]
}
Sourcepub fn prod_dim(self, dim: usize) -> Tensor<B, D, K>
pub fn prod_dim(self, dim: usize) -> Tensor<B, D, K>
Aggregate all elements along the given dimension or axis in the tensor with the product operation.
§Arguments
dim
- The dimension or axis along which to aggregate the elements.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.clone().prod_dim(0);
println!("{tensor}");
// [[5.0, -18.0, 18.0]]
let tensor = tensor.clone().prod_dim(1);
println!("{tensor}");
// [[-6.0], [270.0]]
}
Sourcepub fn equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: Element,
pub fn equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: Element,
Applies element wise equal comparison and returns a boolean tensor.
§Arguments
other
- The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.equal_elem(3.0);
println!("{tensor}");
// [[false, false, true], [false, false, false]]
}
Sourcepub fn not_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: Element,
pub fn not_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: Element,
Applies element wise non-equality comparison and returns a boolean tensor.
§Arguments
other
- The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.not_equal_elem(3.0);
println!("{tensor}");
// [[true, true, false], [true, true, true]]
}
Sourcepub fn greater(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
pub fn greater(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element wise greater comparison and returns a boolean tensor.
§Panics
If the two tensors don’t have the same shape.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[1.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1.greater(tensor2);
println!("{tensor}");
// [[false, false, false], [true, true, true]]
}
Sourcepub fn greater_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
pub fn greater_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element wise greater-equal comparison and returns a boolean tensor.
§Panics
If the two tensors don’t have the same shape.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[1.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1.greater_equal(tensor2);
println!("{tensor}");
// [[true, false, false], [true, true, true]]
}
Sourcepub fn lower(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
pub fn lower(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element wise lower comparison and returns a boolean tensor.
§Panics
If the two tensors don’t have the same shape.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[1.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1.lower(tensor2);
println!("{tensor}");
// [[false, true, true], [false, false, false]]
}
Sourcepub fn lower_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
pub fn lower_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>
Applies element wise lower-equal comparison and returns a boolean tensor.
§Panics
If the two tensors don’t have the same shape.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[1.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1.lower_equal(tensor2);
println!("{tensor}");
// [[true, true, true], [false, false, false]]
}
Sourcepub fn greater_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: ElementConversion,
pub fn greater_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: ElementConversion,
Applies greater than other
comparison and returns a boolean tensor.
§Arguments
other
- The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.greater_elem(3.0);
println!("{tensor}");
// [[false, false, true], [true, true, true]]
}
Sourcepub fn greater_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: ElementConversion,
pub fn greater_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: ElementConversion,
Applies greater-equal than other
comparison and returns a boolean tensor.
§Arguments
other
- The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.greater_equal_elem(3.0);
println!("{tensor}");
// [[false, false, true], [true, true, true]]
}
Sourcepub fn lower_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: ElementConversion,
pub fn lower_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: ElementConversion,
Applies lower than other
comparison and returns a boolean tensor.
§Arguments
other
- The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.lower_elem(3.0);
println!("{tensor}");
// [[true, true, false], [false, false, false]]
}
Sourcepub fn lower_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: ElementConversion,
pub fn lower_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>where
E: ElementConversion,
Applies lower-equal than other
comparison and returns a boolean tensor.
§Arguments
other
- The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.lower_equal_elem(3.0);
println!("{tensor}");
// [[true, true, true], [false, false, false]]
}
Sourcepub fn mask_where(
self,
mask: Tensor<B, D, Bool>,
value: Tensor<B, D, K>,
) -> Tensor<B, D, K>
pub fn mask_where( self, mask: Tensor<B, D, Bool>, value: Tensor<B, D, K>, ) -> Tensor<B, D, K>
Update the given tensor with the value tensor where the mask is true.
This is similar to mask_fill, however the value is a tensor instead of a scalar.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Bool};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let mask = Tensor::<B, 2, Bool>::from_data([[true, false, true], [false, true, false]], &device);
let value = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor.mask_where(mask, value);
println!("{tensor}");
// [[2.0, -2.0, 4.0], [5.0, 2.0, 6.0]]
}
Sourcepub fn mask_fill<E>(self, mask: Tensor<B, D, Bool>, value: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn mask_fill<E>(self, mask: Tensor<B, D, Bool>, value: E) -> Tensor<B, D, K>where
E: ElementConversion,
Update the given tensor with the value where the mask is true.
This is similar to mask_where, however the value is a scalar instead of a tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Bool};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let mask = Tensor::<B, 2, Bool>::from_data([[true, false, true], [false, true, false]], &device);
let tensor = tensor.mask_fill(mask, 3.0);
println!("{tensor}");
// [[3.0, -2.0, 3.0], [5.0, 3.0, 6.0]]
}
Sourcepub fn gather(self, dim: usize, indices: Tensor<B, D, Int>) -> Tensor<B, D, K>
pub fn gather(self, dim: usize, indices: Tensor<B, D, Int>) -> Tensor<B, D, K>
Gather tensor elements corresponding to the given indices from the specified dim.
Example using a 3D tensor:
output[i, j, k] = input[indices[i, j, k], j, k]; // dim = 0
output[i, j, k] = input[i, indices[i, j, k], k]; // dim = 1
output[i, j, k] = input[i, j, indices[i, j, k]]; // dim = 2
§Notes
The index tensor should have the same shape as the original tensor except for the dim specified.
§Warning
Not all backends have runtime bound checks for the indices, so make sure the they are valid. Otherwise, out of bounds indices could lead to unexpected results instead of panicking.
Sourcepub fn scatter(
self,
dim: usize,
indices: Tensor<B, D, Int>,
values: Tensor<B, D, K>,
) -> Tensor<B, D, K>
pub fn scatter( self, dim: usize, indices: Tensor<B, D, Int>, values: Tensor<B, D, K>, ) -> Tensor<B, D, K>
Assign the gathered elements corresponding to the given indices along the specified dimension from the value tensor to the original tensor using sum reduction.
Example using a 3D tensor:
input[indices[i, j, k], j, k] += values[i, j, k]; // dim = 0
input[i, indices[i, j, k], k] += values[i, j, k]; // dim = 1
input[i, j, indices[i, j, k]] += values[i, j, k]; // dim = 2
§Notes
The index tensor should have the same shape as the original tensor except for the specified dimension. The value and index tensors should have the same shape.
Other references to the input tensor will not be modified by this operation.
§Warning
Not all backends have runtime bound checks for the indices, so make sure the they are valid. Otherwise, out of bounds indices could lead to unexpected results instead of panicking.
Sourcepub fn select(self, dim: usize, indices: Tensor<B, 1, Int>) -> Tensor<B, D, K>
pub fn select(self, dim: usize, indices: Tensor<B, 1, Int>) -> Tensor<B, D, K>
Select the tensor elements along the given dimension corresponding to the given indices.
Example using a 3D tensor:
output[i, j, k] = input[indices[i], j, k]; // dim = 0
output[i, j, k] = input[i, indices[j], k]; // dim = 1
output[i, j, k] = input[i, j, indices[k]]; // dim = 2
§Warning
Not all backends have runtime bound checks for the indices, so make sure the they are valid. Otherwise, out of bounds indices could lead to unexpected results instead of panicking.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Int};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 3>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let indices = Tensor::<B, 1, Int>::from_data([0], &device);
let tensor = tensor.select(0, indices);
println!("{tensor}");
// [[1.0, -2.0, 3.0]]
}
Sourcepub fn select_assign(
self,
dim: usize,
indices: Tensor<B, 1, Int>,
values: Tensor<B, D, K>,
) -> Tensor<B, D, K>
pub fn select_assign( self, dim: usize, indices: Tensor<B, 1, Int>, values: Tensor<B, D, K>, ) -> Tensor<B, D, K>
Assign the selected elements along the given dimension corresponding to the given indices from the value tensor to the original tensor using sum reduction.
Example using a 3D tensor:
input[indices[i], j, k] += values[i, j, k]; // dim = 0
input[i, indices[j], k] += values[i, j, k]; // dim = 1
input[i, j, indices[k]] += values[i, j, k]; // dim = 2
§Warning
Not all backends have runtime bound checks for the indices, so make sure the they are valid. Otherwise, out of bounds indices could lead to unexpected results instead of panicking.
Sourcepub fn argmax(self, dim: usize) -> Tensor<B, D, Int>
pub fn argmax(self, dim: usize) -> Tensor<B, D, Int>
Applies the argmax function along the given dimension and returns an integer tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 3]), &device);
let tensor = tensor.argmax(1);
println!("{:?}", tensor.shape());
// Shape { dims: [2, 1, 3] }
}
Sourcepub fn max(self) -> Tensor<B, 1, K>
pub fn max(self) -> Tensor<B, 1, K>
Find the maximum value.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.max();
println!("{tensor}");
// [9.0]
}
Sourcepub fn max_dim(self, dim: usize) -> Tensor<B, D, K>
pub fn max_dim(self, dim: usize) -> Tensor<B, D, K>
Find the maximum value along the given dimension.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.max_dim(0);
println!("{tensor}");
// [[5.0, 9.0, 6.0]]
}
Sourcepub fn max_dim_with_indices(
self,
dim: usize,
) -> (Tensor<B, D, K>, Tensor<B, D, Int>)
pub fn max_dim_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)
Find the maximum value along the given dimension.
Also returns the indices.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let (tensor, index) = tensor.max_dim_with_indices(0);
// [[5.0, 9.0, 6.0]]
println!("{tensor}");
// [[1, 1, 1]]
println!("{index}");
}
Sourcepub fn max_pair(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
pub fn max_pair(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Finds the maximum pair wise values with another tensor.
§Arguments
other
- Other tensor to find maximum elements with
§Returns
A tensor with the same shape as the input tensors containing the maximum value found in the input tensors.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1.max_pair(tensor2);
println!("{tensor}");
// [[2.0, 3.0, 4.0], [5.0, 9.0, 6.0]]
}
Sourcepub fn max_abs(self) -> Tensor<B, 1, K>
pub fn max_abs(self) -> Tensor<B, 1, K>
Find the maximum absolute value.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -7.0, 3.0], [5.0, -1.0, 6.0]], &device);
let tensor = tensor.max_abs();
println!("{tensor}");
// [7.0]
}
Sourcepub fn max_abs_dim(self, dim: usize) -> Tensor<B, D, K>
pub fn max_abs_dim(self, dim: usize) -> Tensor<B, D, K>
Find the maximum absolute value along the given dimension.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.max_dim(0);
println!("{tensor}");
// [[5.0, 9.0, 6.0]]
}
Sourcepub fn argmin(self, dim: usize) -> Tensor<B, D, Int>
pub fn argmin(self, dim: usize) -> Tensor<B, D, Int>
Applies the argmin function along the given dimension and returns an integer tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 3]), &device);
let tensor = tensor.argmin(1);
println!("{:?}", tensor.shape());
// Shape { dims: [2, 1, 3] }
}
Sourcepub fn min(self) -> Tensor<B, 1, K>
pub fn min(self) -> Tensor<B, 1, K>
Find the minimum value.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.min();
println!("{tensor}");
// [-2.0]
}
Sourcepub fn min_dim(self, dim: usize) -> Tensor<B, D, K>
pub fn min_dim(self, dim: usize) -> Tensor<B, D, K>
Find the minimum value along the given dimension.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.min_dim(0);
println!("{tensor}");
// [[1.0, -2.0, 3.0]]
}
Sourcepub fn min_dim_with_indices(
self,
dim: usize,
) -> (Tensor<B, D, K>, Tensor<B, D, Int>)
pub fn min_dim_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)
Find the minimum value along the given dimension.
Also returns the indices.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[7.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let (tensor, index) = tensor.min_dim_with_indices(0);
println!("{tensor}");
// [[5.0, -2.0, 3.0]]
println!("{}", index);
// [[1, 0, 0]]
}
Sourcepub fn min_pair(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
pub fn min_pair(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Finds the minimum pair wise values with another tensor.
§Arguments
other
- Other tensor to find minimum elements with
§Returns
A tensor with the same shape as the input tensors containing the minimum value found between each element of the two source tensors.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1.min_pair(tensor2);
println!("{tensor}");
// [[1.0, -2.0, 3.0], [1.0, 2.0, 3.0]]
}
Sourcepub fn clamp<E>(self, min: E, max: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn clamp<E>(self, min: E, max: E) -> Tensor<B, D, K>where
E: ElementConversion,
Clamp element wise between the given min and max values.
§Arguments
min
- The minimum value.max
- The maximum value.
§Returns
A new tensor with the values clamped between the given min and max values.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 2, Int>::from_ints(
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
],
&device);
let tensor = tensor.clamp(2, 6);
println!("{tensor}");
// [[2, 2, 3], [4, 5, 6], [6, 6, 6]]
}
Sourcepub fn clamp_min<E>(self, min: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn clamp_min<E>(self, min: E) -> Tensor<B, D, K>where
E: ElementConversion,
Clamp element wise under a minimum value.
§Arguments
tensor
- The tensor to clamp.min
- The minimum value.
§Returns
A new tensor with the values clamped under the given min value.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 2, Int>::from_ints(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
&device);
let tensor = tensor.clamp_min(4);
println!("{tensor}");
// [[4, 4, 4], [4, 5, 6], [7, 8, 9]]
}
Sourcepub fn clamp_max<E>(self, max: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn clamp_max<E>(self, max: E) -> Tensor<B, D, K>where
E: ElementConversion,
Clamp element wise over a maximum value.
§Arguments
tensor
- The tensor to clamp.max
- The maximum value.
§Returns
A new tensor with the values clamped over the given max value.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 2, Int>::from_ints(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
&device);
let tensor = tensor.clamp_max(5);
println!("{tensor}");
// [[1, 2, 3], [4, 5, 5], [5, 5, 5]]
}
Sourcepub fn abs(self) -> Tensor<B, D, K>
pub fn abs(self) -> Tensor<B, D, K>
Apply element wise absolute value operation.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 2, Int>::from_ints([[1, -2, 3], [4, -5, 6], [7, -8, 9]], &device);
let tensor = tensor.abs();
println!("{tensor}");
// [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
}
Sourcepub fn triu(self, diagonal: i64) -> Tensor<B, D, K>
pub fn triu(self, diagonal: i64) -> Tensor<B, D, K>
Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input, the other elements of the result tensor out are set to 0.
See also triu_mask
.
§Arguments
diagonal
- The offset from the diagonal, where 0 means the diagonal, and positive values shift towards the upper triangle.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 2, Int>::from_ints(
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
],
&device
);
let tensor = tensor.triu(1);
println!("{tensor}");
// [
// [0, 2, 3],
// [0, 0, 6],
// [0, 0, 0]
// ]
}
Sourcepub fn tril(self, diagonal: i64) -> Tensor<B, D, K>
pub fn tril(self, diagonal: i64) -> Tensor<B, D, K>
Returns the lower triangular part of a matrix (2-D tensor) or batch of matrices input, the other elements of the result tensor out are set to 0.
See also tril_mask
.
§Arguments
diagonal
- The offset from the diagonal, where 0 means the diagonal, and positive values shift towards the upper triangle.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};
fn example<B: Backend>() {
let device = Default::default();
let tensor = Tensor::<B, 2, Int>::from_ints(
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
],
&device
);
let tensor = tensor.tril(-1);
println!("{tensor}");
// [
// [0, 0, 0],
// [4, 0, 0],
// [7, 8, 0]
// ]
}
Sourcepub fn powf(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
pub fn powf(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise power operation with a float Tensor
§Arguments
other
- The tensor to apply the power operation with.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
let tensor = tensor1.powf(tensor2);
println!("{tensor}");
// [[1.0, 8.0, 81.0], [5.0, 81.0, 216.0]]
}
Sourcepub fn powf_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn powf_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
Applies element wise power operation with a float scalar
§Arguments
other
- The scalar to apply the power operation with.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.powf_scalar(2.0);
println!("{tensor}");
// [[1.0, 4.0, 9.0], [25.0, 81.0, 36.0]]
}
Sourcepub fn powi(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
pub fn powi(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>
Applies element wise power operation with a integer Tensor
§Arguments
other
- The tensor to apply the power operation with.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Int};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2, Int>::from_ints([[1, -2, 3], [5, 9, 6]], &device);
let tensor2 = Tensor::<B, 2, Int>::from_ints([[2, 3, 4], [1, 2, 3]], &device);
let tensor = tensor1.powi(tensor2);
println!("{tensor}");
// [[1, -8, 81], [5, 81, 216]]
}
Sourcepub fn powi_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn powi_scalar<E>(self, other: E) -> Tensor<B, D, K>where
E: ElementConversion,
Applies element wise power operation with a integer scalar
§Arguments
other
- The scalar to apply the power operation with.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Int};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2, Int>::from_ints([[1, -2, 3], [5, 9, 6]], &device);
let tensor = tensor.powi_scalar(2);
println!("{tensor}");
// [[1, 4, 9], [25, 81, 36]]
let tensor = Tensor::<B, 2>::from_data([[1.5, -2., 3.], [5., 9., 6.]], &device);
let tensor = tensor.powi_scalar(2);
println!("{tensor}");
// [[2.25, 4., 9.], [25., 81., 36.]]
}
Sourcepub fn is_close(
self,
other: Tensor<B, D, K>,
rtol: Option<f64>,
atol: Option<f64>,
) -> Tensor<B, D, Bool>
pub fn is_close( self, other: Tensor<B, D, K>, rtol: Option<f64>, atol: Option<f64>, ) -> Tensor<B, D, Bool>
Checks element wise if the tensor is close to another tensor.
The tolerance is defined by the following equation:
abs(a - b) <= (atol + rtol * abs(b))
where `a` is the first tensor, `b` is the second tensor, `rtol` is the relative tolerance,
and `atol` is the absolute tolerance.
§Arguments
other
- The tensor to compare with.rtol
- Optional relative tolerance. Default is 1e-5; seeDEFAULT_RTOL
.atol
- Optional absolute tolerance. Default is 1e-8; seeDEFAULT_ATOL
.
§Returns
A boolean tensor with the same shape as the input tensors.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor1.is_close(tensor2, None, None);
println!("{tensor}");
// [[true, true, true], [true, true, true]]
}
Sourcepub fn all_close(
self,
other: Tensor<B, D, K>,
rtol: Option<f64>,
atol: Option<f64>,
) -> bool
pub fn all_close( self, other: Tensor<B, D, K>, rtol: Option<f64>, atol: Option<f64>, ) -> bool
Checks if all elements are close to another tensor.
The tolerance is defined by the following equation:
abs(a - b) <= (atol + rtol * abs(b))
where `a` is the first tensor, `b` is the second tensor, `rtol` is the relative tolerance,
and `atol` is the absolute tolerance.
§Arguments
other
- The tensor to compare with.rtol
- Optional relative tolerance. Default is 1e-5; seeDEFAULT_RTOL
.atol
- Optional absolute tolerance. Default is 1e-8; seeDEFAULT_ATOL
.
§Returns
A boolean scalar.
§Remarks
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor2 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let result = tensor1.all_close(tensor2, None, None);
println!("{}", result);
// true
}
Sourcepub fn bool(self) -> Tensor<B, D, Bool>
pub fn bool(self) -> Tensor<B, D, Bool>
Converts the tensor to a boolean tensor by checking if the elements are non-zero.
§Returns
A boolean tensor with the same shape as the input tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [0.0, 9.0, 6.0]], &device);
let tensor = tensor.bool();
println!("{tensor}");
// [
// [true, true, true],
// [false, true, true]
// ]
}
Sourcepub fn random<S>(
shape: S,
distribution: Distribution,
device: &<B as Backend>::Device,
) -> Tensor<B, D, K>
pub fn random<S>( shape: S, distribution: Distribution, device: &<B as Backend>::Device, ) -> Tensor<B, D, K>
Create a random tensor of the given shape on the given device where each element is sampled from the given distribution.
See also random_like
.
§Arguments
shape
- The shape of the tensor.distribution
- The distribution to sample from.device
- The device to create the tensor on.
§Returns
A new tensor with the given shape and elements sampled from the given distribution.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Distribution};
fn example<B: Backend>() {
let device = B::Device::default();
let distribution = Distribution::Uniform(0.0, 1.0); // Any random value between 0.0 and 1.0
let tensor = Tensor::<B, 2>::random(Shape::new([2, 3]), distribution, &device);
println!("{tensor}");
// [
// [0.08347523, 0.70498955, 0.60332155],
// [0.08173251, 0.18028641, 0.97942924]
// ]
}
Sourcepub fn sort(self, dim: usize) -> Tensor<B, D, K>
pub fn sort(self, dim: usize) -> Tensor<B, D, K>
Sort the elements by value in ascending order along a given dimension.
This sort is unstable (i.e., may reorder equal elements).
§Arguments
dim
- The dimension to sort along.
§Returns
A new tensor with the elements sorted in ascending order along the given dimension.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
let tensor = tensor.sort(0);
println!("{tensor}");
// [[5.0, -2.0, 3.0], [12.0, 3.0, 6.0]]
let tensor = tensor.sort(1);
println!("{tensor}");
// [[-2.0, 3.0, 12.0], [3.0, 5.0, 6.0]]
}
Sourcepub fn sort_descending(self, dim: usize) -> Tensor<B, D, K>
pub fn sort_descending(self, dim: usize) -> Tensor<B, D, K>
Sort the elements by value in descending order along a given dimension.
This sort is unstable (i.e., may reorder equal elements).
§Arguments
dim
- The dimension to sort along.
§Returns
A new tensor with the elements sorted in descending order along the given dimension.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
let tensor = tensor.sort_descending(0);
println!("{tensor}");
// [[12.0, 3.0, 6.0], [5.0, -2.0, 3.0]]
let tensor = tensor.sort_descending(1);
println!("{tensor}");
// [[12.0, 3.0, -2.0], [6.0, 5.0, 3.0]]
}
Sourcepub fn sort_with_indices(
self,
dim: usize,
) -> (Tensor<B, D, K>, Tensor<B, D, Int>)
pub fn sort_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)
Sort the elements by value in ascending order along a given dimension. Also returns the indices.
This sort is unstable (i.e., may reorder equal elements).
§Arguments
dim
- The dimension to sort along.
§Returns
A tuple containing the sorted tensor and the indices tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
let (tensor, indices) = tensor.sort_with_indices(0);
println!("{tensor}");
// [[5.0, -2.0, 3.0], [12.0, 3.0, 6.0]]
println!("{}", indices);
// [[1, 0, 0], [0, 1, 1]]
}
Sourcepub fn sort_descending_with_indices(
self,
dim: usize,
) -> (Tensor<B, D, K>, Tensor<B, D, Int>)
pub fn sort_descending_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)
Sort the elements by value in descending order along a given dimension. Also returns the indices.
This sort is unstable (i.e., may reorder equal elements).
§Arguments
dim
- The dimension to sort along.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
let (tensor, indices) = tensor.sort_descending_with_indices(0);
println!("{tensor}");
// [[12.0, 3.0, 6.0], [5.0, -2.0, 3.0]]
println!("{}", indices);
// [[0, 1, 1], [1, 0, 0]]
}
Sourcepub fn argsort(self, dim: usize) -> Tensor<B, D, Int>
pub fn argsort(self, dim: usize) -> Tensor<B, D, Int>
Returns the indices that sort the elements by value in ascending order along a given dimension.
This sort is unstable (i.e., may reorder equal elements).
§Arguments
dim
- The dimension to sort along.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
let tensor = tensor.argsort(0);
println!("{tensor}");
// [[1, 0, 0], [0, 1, 1]]
}
Sourcepub fn argsort_descending(self, dim: usize) -> Tensor<B, D, Int>
pub fn argsort_descending(self, dim: usize) -> Tensor<B, D, Int>
Returns the indices that sort the elements by value in descending order along a given dimension.
This sort is unstable (i.e., may reorder equal elements).
§Arguments
dim
- The dimension to sort along.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
let tensor = tensor.argsort_descending(0);
println!("{tensor}");
// [[0, 1, 1], [1, 0, 0]]
let tensor = tensor.argsort_descending(1);
println!("{tensor}");
// [[0, 2, 1], [2, 0, 1]]
}
Sourcepub fn topk(self, k: usize, dim: usize) -> Tensor<B, D, K>
pub fn topk(self, k: usize, dim: usize) -> Tensor<B, D, K>
Returns the k
largest elements of the given input tensor along a given dimension.
§Arguments
k
- The number of elements to return.
§Returns
A new tensor with the k
largest elements along the given dimension.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
let tensor = tensor.topk(2, 0);
println!("{tensor}");
// [[12.0, 3.0, 6.0], [5.0, -2.0, 3.0]]
let tensor = tensor.topk(1, 1);
println!("{tensor}");
// [[12.0], [6.0]]
}
Sourcepub fn topk_with_indices(
self,
k: usize,
dim: usize,
) -> (Tensor<B, D, K>, Tensor<B, D, Int>)
pub fn topk_with_indices( self, k: usize, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)
Returns the k
largest elements of the given input tensor along a given dimension.
Also returns the indices.
§Arguments
k
- The number of elements to return.dim
- The dimension to sort along.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
let (tensor, indices) = tensor.topk_with_indices(2, 0);
println!("{tensor}");
// [[12.0, 3.0, 6.0], [5.0, -2.0, 3.0]]
println!("{}", indices);
// [[0, 1, 1], [1, 0, 0]]
let (tensor, indices) = tensor.topk_with_indices(1, 1);
println!("{tensor}");
// [[12.0], [6.0]]
println!("{indices}");
// [[0], [2]]
}
Sourcepub fn pad<E>(
self,
padding: (usize, usize, usize, usize),
value: E,
) -> Tensor<B, D, K>where
E: ElementConversion,
pub fn pad<E>(
self,
padding: (usize, usize, usize, usize),
value: E,
) -> Tensor<B, D, K>where
E: ElementConversion,
Pad the tensor of rank two or higher with the given value on the last two dimensions.
§Arguments
padding
- A tuple of four integers representing the padding on the left, right, top, and bottom.value
- The value to pad the tensor with.
§Returns
A new tensor with the given padding.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};
fn example<B: Backend<FloatElem: From<f32>>>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
let tensor = tensor.pad((1, 1, 1, 1), 0.0);
println!("{tensor}");
// [
// [0.0, 0.0, 0.0, 0.0, 0.0],
// [0.0, 12.0, -2.0, 3.0, 0.0],
// [0.0, 5.0, 3.0, 6.0, 0.0],
// [0.0, 0.0, 0.0, 0.0, 0.0]
// ]
}
Sourcepub fn one_hot<const D2: usize>(self, num_classes: usize) -> Tensor<B, D2, K>
pub fn one_hot<const D2: usize>(self, num_classes: usize) -> Tensor<B, D2, K>
Create a one hot tensor.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>(){
let device = Default::default();
let indices: Tensor<B, 1> = Tensor::from_floats([0.0, 1.0, 2.0, 3.0], &device);
let one_hot: Tensor<B, 2> = indices.one_hot(4);
println!("{}", one_hot.to_data());
// [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]
}
Sourcepub fn one_hot_fill<const D2: usize>(
self,
num_classes: usize,
on_value: f32,
off_value: f32,
axis: i64,
) -> Tensor<B, D2, K>
pub fn one_hot_fill<const D2: usize>( self, num_classes: usize, on_value: f32, off_value: f32, axis: i64, ) -> Tensor<B, D2, K>
Create a one-hot encoded tensor with configurable num_classes
, on_value
, off_value
, and axis
including high-ranked tensors.
§Arguments
num_classes
: The number of classes for the one-hot encoding, which defines the size of the one-hot dimension.on_value
: The value to assign for active positions (corresponding to indices).off_value
: The value to assign for inactive positions.axis
: The axis along which the one-hot dimension is added. Supports negative indexing.
§Returns
A tensor with one additional dimension for the one-hot encoding, where active positions are filled with on_value
and others with off_value
.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Float};
fn example<B: Backend<FloatElem: From<f32>>>() {
let device = B::Device::default();
let indices: Tensor<B, 2, Float> = Tensor::from_floats([[0., 2.], [1., -1.]], &device);
// One-hot encoding
let tensor:Tensor<B, 3, Float> = indices.one_hot_fill(3, 5.0.into(), 0.0.into(), -1);
println!("{tensor}");
// [[[5.0, 0.0, 0.0],
// [0.0, 0.0, 5.0]],
// [[0.0, 5.0, 0.0],
// [0.0, 0.0, 5.0]]]
}
Sourcepub fn is_nan(&self) -> Tensor<B, D, Bool>
pub fn is_nan(&self) -> Tensor<B, D, Bool>
Returns a new tensor with boolean elements indicating whether each element of the input is NaN.
§Returns
A boolean tensor where true
indicates NaN and false
indicates a non-NaN value.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, f64::NAN, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.is_nan();
println!("{tensor}");
// [[false, true, false], [false, false, false]]
}
Sourcepub fn contains_nan(&self) -> Tensor<B, 1, Bool>
pub fn contains_nan(&self) -> Tensor<B, 1, Bool>
Checks if the tensor contains any NaN values.
§Returns
A boolean tensor with a single element indicating whether the tensor contains any NaN values.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool, Shape};
fn example<B: Backend>() {
let device = B::Device::default();
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [f64::NAN, 9.0, 6.0]], &device);
let tensor = tensor.contains_nan();
println!("{tensor}");
// [true]
let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
let tensor = tensor.contains_nan();
println!("{tensor}");
// [false]
}
Trait Implementations§
Source§impl<const D: usize, B: AutodiffBackend, K: BasicAutodiffOps<B>> AutodiffModule<B> for Tensor<B, D, K>
impl<const D: usize, B: AutodiffBackend, K: BasicAutodiffOps<B>> AutodiffModule<B> for Tensor<B, D, K>
Source§type InnerModule = Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>
type InnerModule = Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>
Source§fn valid(&self) -> Self::InnerModule
fn valid(&self) -> Self::InnerModule
Source§impl<'de, B, const D: usize, K> Deserialize<'de> for Tensor<B, D, K>
impl<'de, B, const D: usize, K> Deserialize<'de> for Tensor<B, D, K>
Source§fn deserialize<De>(
deserializer: De,
) -> Result<Tensor<B, D, K>, <De as Deserializer<'de>>::Error>where
De: Deserializer<'de>,
fn deserialize<De>(
deserializer: De,
) -> Result<Tensor<B, D, K>, <De as Deserializer<'de>>::Error>where
De: Deserializer<'de>,
Source§impl<const D: usize, B: Backend, K: BasicOps<B>> Module<B> for Tensor<B, D, K>
impl<const D: usize, B: Backend, K: BasicOps<B>> Module<B> for Tensor<B, D, K>
Source§type Record = ConstantRecord
type Record = ConstantRecord
Source§fn visit<V: ModuleVisitor<B>>(&self, _visitor: &mut V)
fn visit<V: ModuleVisitor<B>>(&self, _visitor: &mut V)
Source§fn map<M: ModuleMapper<B>>(self, _mapper: &mut M) -> Self
fn map<M: ModuleMapper<B>>(self, _mapper: &mut M) -> Self
Source§fn into_record(self) -> Self::Record
fn into_record(self) -> Self::Record
Source§fn load_record(self, _record: Self::Record) -> Self
fn load_record(self, _record: Self::Record) -> Self
Source§fn to_device(self, device: &B::Device) -> Self
fn to_device(self, device: &B::Device) -> Self
Source§fn fork(self, device: &B::Device) -> Self
fn fork(self, device: &B::Device) -> Self
Source§fn collect_devices(&self, devices: Devices<B>) -> Devices<B>
fn collect_devices(&self, devices: Devices<B>) -> Devices<B>
Source§fn devices(&self) -> Devices<B>
fn devices(&self) -> Devices<B>
Source§fn num_params(&self) -> usize
fn num_params(&self) -> usize
Source§fn save_file<FR, PB>(
self,
file_path: PB,
recorder: &FR,
) -> Result<(), RecorderError>
fn save_file<FR, PB>( self, file_path: PB, recorder: &FR, ) -> Result<(), RecorderError>
std
only.Source§fn load_file<FR, PB>(
self,
file_path: PB,
recorder: &FR,
device: &B::Device,
) -> Result<Self, RecorderError>
fn load_file<FR, PB>( self, file_path: PB, recorder: &FR, device: &B::Device, ) -> Result<Self, RecorderError>
std
only.Source§fn quantize_weights(self, quantizer: &mut Quantizer) -> Self
fn quantize_weights(self, quantizer: &mut Quantizer) -> Self
Source§impl<const D: usize, B: Backend, K: BasicOps<B>> ModuleDisplay for Tensor<B, D, K>
impl<const D: usize, B: Backend, K: BasicOps<B>> ModuleDisplay for Tensor<B, D, K>
Source§fn format(&self, passed_settings: DisplaySettings) -> String
fn format(&self, passed_settings: DisplaySettings) -> String
Source§fn custom_settings(&self) -> Option<DisplaySettings>
fn custom_settings(&self) -> Option<DisplaySettings>
Source§impl<B: Backend, const D: usize> Record<B> for Tensor<B, D>
impl<B: Backend, const D: usize> Record<B> for Tensor<B, D>
Source§type Item<S: PrecisionSettings> = FloatTensorSerde<S>
type Item<S: PrecisionSettings> = FloatTensorSerde<S>
Source§impl<B: Backend, const D: usize> Record<B> for Tensor<B, D, Bool>
impl<B: Backend, const D: usize> Record<B> for Tensor<B, D, Bool>
Source§type Item<S: PrecisionSettings> = BoolTensorSerde
type Item<S: PrecisionSettings> = BoolTensorSerde
Source§impl<B: Backend, const D: usize> Record<B> for Tensor<B, D, Int>
impl<B: Backend, const D: usize> Record<B> for Tensor<B, D, Int>
Source§type Item<S: PrecisionSettings> = IntTensorSerde<S>
type Item<S: PrecisionSettings> = IntTensorSerde<S>
Source§impl<B, const D: usize, K> Serialize for Tensor<B, D, K>
impl<B, const D: usize, K> Serialize for Tensor<B, D, K>
Source§fn serialize<S>(
&self,
serializer: S,
) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>where
S: Serializer,
fn serialize<S>(
&self,
serializer: S,
) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>where
S: Serializer,
Auto Trait Implementations§
impl<B, const D: usize, K> Freeze for Tensor<B, D, K>
impl<B, const D: usize, K> RefUnwindSafe for Tensor<B, D, K>
impl<B, const D: usize, K> Send for Tensor<B, D, K>
impl<B, const D: usize, K> Sync for Tensor<B, D, K>
impl<B, const D: usize, K> Unpin for Tensor<B, D, K>
impl<B, const D: usize, K> UnwindSafe for Tensor<B, D, K>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more