Struct burn_ndarray::NdArrayBackend
source · pub struct NdArrayBackend<E> { /* private fields */ }Trait Implementations§
source§impl<E: FloatNdArrayElement> Backend for NdArrayBackend<E>
impl<E: FloatNdArrayElement> Backend for NdArrayBackend<E>
§type Device = NdArrayDevice
type Device = NdArrayDevice
Device type.
§type FullPrecisionElem = f32
type FullPrecisionElem = f32
Full precision float element type.
§type FullPrecisionBackend = NdArrayBackend<f32>
type FullPrecisionBackend = NdArrayBackend<f32>
Pointer to another backend that have a full precision float element type
§type TensorPrimitive<const D: usize> = NdArrayTensor<E, D>
type TensorPrimitive<const D: usize> = NdArrayTensor<E, D>
Tensor primitive to be used for all float operations.
§type IntTensorPrimitive<const D: usize> = NdArrayTensor<i64, D>
type IntTensorPrimitive<const D: usize> = NdArrayTensor<i64, D>
Tensor primitive to be used for all int operations.
§type BoolTensorPrimitive<const D: usize> = NdArrayTensor<bool, D>
type BoolTensorPrimitive<const D: usize> = NdArrayTensor<bool, D>
Tensor primitive to be used for all bool operations.
source§fn ad_enabled() -> bool
fn ad_enabled() -> bool
If autodiff is enabled.
source§impl<E: FloatNdArrayElement> BoolTensorOps<NdArrayBackend<E>> for NdArrayBackend<E>
impl<E: FloatNdArrayElement> BoolTensorOps<NdArrayBackend<E>> for NdArrayBackend<E>
fn bool_from_data<const D: usize>( data: Data<bool, D>, _device: &NdArrayDevice ) -> NdArrayTensor<bool, D>
fn bool_shape<const D: usize>( tensor: &<NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D> ) -> Shape<D>
fn bool_to_data<const D: usize>( tensor: &<NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D> ) -> Data<bool, D>
fn bool_into_data<const D: usize>( tensor: <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D> ) -> Data<bool, D>
fn bool_to_device<const D: usize>( tensor: NdArrayTensor<bool, D>, _device: &NdArrayDevice ) -> NdArrayTensor<bool, D>
fn bool_reshape<const D1: usize, const D2: usize>( tensor: NdArrayTensor<bool, D1>, shape: Shape<D2> ) -> NdArrayTensor<bool, D2>
fn bool_index<const D1: usize, const D2: usize>( tensor: NdArrayTensor<bool, D1>, indexes: [Range<usize>; D2] ) -> NdArrayTensor<bool, D1>
fn bool_into_int<const D: usize>( tensor: <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D> ) -> NdArrayTensor<i64, D>
fn bool_device<const D: usize>( _tensor: &<NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D> ) -> <NdArrayBackend<E> as Backend>::Device
fn bool_empty<const D: usize>( shape: Shape<D>, _device: &<NdArrayBackend<E> as Backend>::Device ) -> <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D>
fn bool_index_assign<const D1: usize, const D2: usize>( tensor: <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D1>, indexes: [Range<usize>; D2], value: <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D1> ) -> <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D1>
fn bool_cat<const D: usize>( tensors: Vec<<NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D>>, dim: usize ) -> <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D>
fn bool_equal<const D: usize>( lhs: <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D>, rhs: <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D> ) -> <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D>
fn bool_equal_elem<const D: usize>( lhs: <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D>, rhs: bool ) -> <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D>
fn bool_repeat<const D: usize>( tensor: <B as Backend>::BoolTensorPrimitive<D>, dim: usize, times: usize ) -> <B as Backend>::BoolTensorPrimitive<D>
source§impl<E: Clone> Clone for NdArrayBackend<E>
impl<E: Clone> Clone for NdArrayBackend<E>
source§fn clone(&self) -> NdArrayBackend<E>
fn clone(&self) -> NdArrayBackend<E>
Returns a copy of the value. Read more
1.0.0 · source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moresource§impl<E: Debug> Debug for NdArrayBackend<E>
impl<E: Debug> Debug for NdArrayBackend<E>
source§impl<E: Default> Default for NdArrayBackend<E>
impl<E: Default> Default for NdArrayBackend<E>
source§fn default() -> NdArrayBackend<E>
fn default() -> NdArrayBackend<E>
Returns the “default value” for a type. Read more
source§impl<E: FloatNdArrayElement> IntTensorOps<NdArrayBackend<E>> for NdArrayBackend<E>
impl<E: FloatNdArrayElement> IntTensorOps<NdArrayBackend<E>> for NdArrayBackend<E>
fn int_from_data<const D: usize>( data: Data<i64, D>, _device: &NdArrayDevice ) -> NdArrayTensor<i64, D>
fn int_shape<const D: usize>(tensor: &NdArrayTensor<i64, D>) -> Shape<D>
fn int_to_data<const D: usize>(tensor: &NdArrayTensor<i64, D>) -> Data<i64, D>
fn int_into_data<const D: usize>(tensor: NdArrayTensor<i64, D>) -> Data<i64, D>
fn int_to_device<const D: usize>( tensor: NdArrayTensor<i64, D>, _device: &NdArrayDevice ) -> NdArrayTensor<i64, D>
fn int_reshape<const D1: usize, const D2: usize>( tensor: NdArrayTensor<i64, D1>, shape: Shape<D2> ) -> NdArrayTensor<i64, D2>
fn int_index<const D1: usize, const D2: usize>( tensor: NdArrayTensor<i64, D1>, indexes: [Range<usize>; D2] ) -> NdArrayTensor<i64, D1>
fn int_device<const D: usize>( _tensor: &NdArrayTensor<i64, D> ) -> <NdArrayBackend<E> as Backend>::Device
fn int_empty<const D: usize>( shape: Shape<D>, _device: &<NdArrayBackend<E> as Backend>::Device ) -> NdArrayTensor<i64, D>
fn int_index_assign<const D1: usize, const D2: usize>( tensor: NdArrayTensor<i64, D1>, indexes: [Range<usize>; D2], value: NdArrayTensor<i64, D1> ) -> NdArrayTensor<i64, D1>
fn int_cat<const D: usize>( tensors: Vec<NdArrayTensor<i64, D>>, dim: usize ) -> NdArrayTensor<i64, D>
fn int_equal<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: NdArrayTensor<i64, D> ) -> NdArrayTensor<bool, D>
fn int_equal_elem<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: i64 ) -> NdArrayTensor<bool, D>
fn int_greater<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: NdArrayTensor<i64, D> ) -> NdArrayTensor<bool, D>
fn int_greater_elem<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: i64 ) -> NdArrayTensor<bool, D>
fn int_greater_equal<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: NdArrayTensor<i64, D> ) -> NdArrayTensor<bool, D>
fn int_greater_equal_elem<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: i64 ) -> NdArrayTensor<bool, D>
fn int_lower<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: NdArrayTensor<i64, D> ) -> NdArrayTensor<bool, D>
fn int_lower_elem<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: i64 ) -> NdArrayTensor<bool, D>
fn int_lower_equal<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: NdArrayTensor<i64, D> ) -> NdArrayTensor<bool, D>
fn int_lower_equal_elem<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: i64 ) -> NdArrayTensor<bool, D>
fn int_add<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: NdArrayTensor<i64, D> ) -> NdArrayTensor<i64, D>
fn int_add_scalar<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: i64 ) -> NdArrayTensor<i64, D>
fn int_sub<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: NdArrayTensor<i64, D> ) -> NdArrayTensor<i64, D>
fn int_sub_scalar<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: i64 ) -> NdArrayTensor<i64, D>
fn int_mul<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: NdArrayTensor<i64, D> ) -> NdArrayTensor<i64, D>
fn int_mul_scalar<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: i64 ) -> NdArrayTensor<i64, D>
fn int_div<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: NdArrayTensor<i64, D> ) -> NdArrayTensor<i64, D>
fn int_div_scalar<const D: usize>( lhs: NdArrayTensor<i64, D>, rhs: i64 ) -> NdArrayTensor<i64, D>
fn int_neg<const D: usize>( tensor: NdArrayTensor<i64, D> ) -> NdArrayTensor<i64, D>
fn int_zeros<const D: usize>( shape: Shape<D>, device: &<NdArrayBackend<E> as Backend>::Device ) -> NdArrayTensor<i64, D>
fn int_ones<const D: usize>( shape: Shape<D>, device: &<NdArrayBackend<E> as Backend>::Device ) -> NdArrayTensor<i64, D>
fn int_sum<const D: usize>( tensor: NdArrayTensor<i64, D> ) -> NdArrayTensor<i64, 1>
fn int_sum_dim<const D: usize>( tensor: NdArrayTensor<i64, D>, dim: usize ) -> NdArrayTensor<i64, D>
fn int_mean<const D: usize>( tensor: NdArrayTensor<i64, D> ) -> NdArrayTensor<i64, 1>
fn int_mean_dim<const D: usize>( tensor: NdArrayTensor<i64, D>, dim: usize ) -> NdArrayTensor<i64, D>
fn int_index_select<const D: usize>( tensor: NdArrayTensor<i64, D>, indexes: NdArrayTensor<i64, D> ) -> NdArrayTensor<i64, D>
fn int_index_select_assign<const D: usize>( tensor: NdArrayTensor<i64, D>, indexes: NdArrayTensor<i64, D>, value: NdArrayTensor<i64, D> ) -> NdArrayTensor<i64, D>
fn int_index_select_dim<const D: usize>( tensor: NdArrayTensor<i64, D>, dim: usize, indexes: NdArrayTensor<i64, 1> ) -> NdArrayTensor<i64, D>
fn int_index_select_dim_assign<const D1: usize, const D2: usize>( tensor: NdArrayTensor<i64, D1>, dim: usize, indexes: NdArrayTensor<i64, 1>, value: NdArrayTensor<i64, D2> ) -> NdArrayTensor<i64, D1>
fn int_repeat<const D: usize>( tensor: <B as Backend>::IntTensorPrimitive<D>, dim: usize, times: usize ) -> <B as Backend>::IntTensorPrimitive<D>
source§impl<E: FloatNdArrayElement> ModuleOps<NdArrayBackend<E>> for NdArrayBackend<E>
impl<E: FloatNdArrayElement> ModuleOps<NdArrayBackend<E>> for NdArrayBackend<E>
fn embedding( weights: NdArrayTensor<E, 2>, indexes: NdArrayTensor<i64, 2> ) -> NdArrayTensor<E, 3>
fn embedding_backward( weights: NdArrayTensor<E, 2>, output: NdArrayTensor<E, 3>, indexes: NdArrayTensor<i64, 2> ) -> NdArrayTensor<E, 2>
source§fn conv2d(
x: NdArrayTensor<E, 4>,
weight: NdArrayTensor<E, 4>,
bias: Option<NdArrayTensor<E, 1>>,
stride: [usize; 2],
padding: [usize; 2]
) -> NdArrayTensor<E, 4>
fn conv2d( x: NdArrayTensor<E, 4>, weight: NdArrayTensor<E, 4>, bias: Option<NdArrayTensor<E, 1>>, stride: [usize; 2], padding: [usize; 2] ) -> NdArrayTensor<E, 4>
Two dimensional convolution. Read more
source§fn max_pool2d(
x: NdArrayTensor<E, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2]
) -> NdArrayTensor<E, 4>
fn max_pool2d( x: NdArrayTensor<E, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2] ) -> NdArrayTensor<E, 4>
Two dimensional max pooling. Read more
source§fn max_pool2d_with_indexes(
x: NdArrayTensor<E, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2]
) -> MaxPool2dWithIndexes<NdArrayBackend<E>>
fn max_pool2d_with_indexes( x: NdArrayTensor<E, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2] ) -> MaxPool2dWithIndexes<NdArrayBackend<E>>
Two dimensional max pooling with indexes. Read more
source§fn max_pool2d_with_indexes_backward(
x: NdArrayTensor<E, 4>,
kernel_size: [usize; 2],
stride: [usize; 2],
padding: [usize; 2],
output_grad: NdArrayTensor<E, 4>,
indexes: NdArrayTensor<i64, 4>
) -> MaxPool2dBackward<NdArrayBackend<E>>
fn max_pool2d_with_indexes_backward( x: NdArrayTensor<E, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], output_grad: NdArrayTensor<E, 4>, indexes: NdArrayTensor<i64, 4> ) -> MaxPool2dBackward<NdArrayBackend<E>>
Backward pass for the max pooling 2d operation.
source§fn conv2d_backward(
x: <B as Backend>::TensorPrimitive<4>,
weight: <B as Backend>::TensorPrimitive<4>,
bias: Option<<B as Backend>::TensorPrimitive<1>>,
stride: [usize; 2],
output_grad: <B as Backend>::TensorPrimitive<4>
) -> Conv2dBackward<B>
fn conv2d_backward( x: <B as Backend>::TensorPrimitive<4>, weight: <B as Backend>::TensorPrimitive<4>, bias: Option<<B as Backend>::TensorPrimitive<1>>, stride: [usize; 2], output_grad: <B as Backend>::TensorPrimitive<4> ) -> Conv2dBackward<B>
Backward pass for the conv2d operation.
source§fn conv1d(
x: <B as Backend>::TensorPrimitive<3>,
weight: <B as Backend>::TensorPrimitive<3>,
bias: Option<<B as Backend>::TensorPrimitive<1>>,
stride: usize,
padding: usize
) -> <B as Backend>::TensorPrimitive<3>
fn conv1d( x: <B as Backend>::TensorPrimitive<3>, weight: <B as Backend>::TensorPrimitive<3>, bias: Option<<B as Backend>::TensorPrimitive<1>>, stride: usize, padding: usize ) -> <B as Backend>::TensorPrimitive<3>
One dimensional convolution. Read more
source§fn conv1d_backward(
x: <B as Backend>::TensorPrimitive<3>,
weight: <B as Backend>::TensorPrimitive<3>,
bias: Option<<B as Backend>::TensorPrimitive<1>>,
stride: usize,
output_grad: <B as Backend>::TensorPrimitive<3>
) -> Conv1dBackward<B>
fn conv1d_backward( x: <B as Backend>::TensorPrimitive<3>, weight: <B as Backend>::TensorPrimitive<3>, bias: Option<<B as Backend>::TensorPrimitive<1>>, stride: usize, output_grad: <B as Backend>::TensorPrimitive<3> ) -> Conv1dBackward<B>
Backward pass for the conv1d operation.