Struct burn_ndarray::NdArrayBackend
source · pub struct NdArrayBackend<E> { /* private fields */ }Trait Implementations§
source§impl<E: NdArrayElement> Backend for NdArrayBackend<E>
impl<E: NdArrayElement> Backend for NdArrayBackend<E>
type Device = NdArrayDevice
type Elem = E
type FullPrecisionElem = f32
type FullPrecisionBackend = NdArrayBackend<f32>
type IntegerBackend = NdArrayBackend<i64>
type TensorPrimitive<const D: usize> = NdArrayTensor<E, D>
type BoolTensorPrimitive<const D: usize> = NdArrayTensor<bool, D>
fn ad_enabled() -> bool
fn name() -> String
fn seed(seed: u64)
source§impl<E: Clone> Clone for NdArrayBackend<E>
impl<E: Clone> Clone for NdArrayBackend<E>
source§fn clone(&self) -> NdArrayBackend<E>
fn clone(&self) -> NdArrayBackend<E>
Returns a copy of the value. Read more
1.0.0 · source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moresource§impl<E: Debug> Debug for NdArrayBackend<E>
impl<E: Debug> Debug for NdArrayBackend<E>
source§impl<E: Default> Default for NdArrayBackend<E>
impl<E: Default> Default for NdArrayBackend<E>
source§fn default() -> NdArrayBackend<E>
fn default() -> NdArrayBackend<E>
Returns the “default value” for a type. Read more
source§impl<E: NdArrayElement> ModuleOps<NdArrayBackend<E>> for NdArrayBackend<E>
impl<E: NdArrayElement> ModuleOps<NdArrayBackend<E>> for NdArrayBackend<E>
source§impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E>
impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E>
fn from_data<const D: usize>(
data: Data<E, D>,
_device: NdArrayDevice
) -> NdArrayTensor<E, D>
fn from_data_bool<const D: usize>(
data: Data<bool, D>,
_device: NdArrayDevice
) -> NdArrayTensor<bool, D>
fn random<const D: usize>(
shape: Shape<D>,
distribution: Distribution<E>,
device: NdArrayDevice
) -> NdArrayTensor<E, D>
fn shape<const D: usize>(
tensor: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>
) -> &Shape<D>
fn to_data<const D: usize>(
tensor: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>
) -> Data<<NdArrayBackend<E> as Backend>::Elem, D>
fn into_data<const D: usize>(
tensor: <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
) -> Data<<NdArrayBackend<E> as Backend>::Elem, D>
fn bool_shape<const D: usize>(
tensor: &<NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D>
) -> &Shape<D>
fn bool_to_data<const D: usize>(
tensor: &<NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D>
) -> Data<bool, D>
fn bool_into_data<const D: usize>(
tensor: <NdArrayBackend<E> as Backend>::BoolTensorPrimitive<D>
) -> Data<bool, D>
fn bool_to_device<const D: usize>(
tensor: &NdArrayTensor<bool, D>,
_device: NdArrayDevice
) -> NdArrayTensor<bool, D>
fn bool_reshape<const D1: usize, const D2: usize>(
tensor: &NdArrayTensor<bool, D1>,
shape: Shape<D2>
) -> NdArrayTensor<bool, D2>
fn device<const D: usize>(_tensor: &NdArrayTensor<E, D>) -> NdArrayDevice
fn to_device<const D: usize>(
tensor: &NdArrayTensor<E, D>,
_device: NdArrayDevice
) -> NdArrayTensor<E, D>
fn empty<const D: usize>(
shape: Shape<D>,
device: <NdArrayBackend<E> as Backend>::Device
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn add<const D: usize>(
lhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>,
rhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn add_scalar<const D: usize>(
lhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>,
rhs: &E
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn sub<const D: usize>(
lhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>,
rhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn sub_scalar<const D: usize>(
lhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>,
rhs: &E
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn mul<const D: usize>(
lhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>,
rhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn mul_scalar<const D: usize>(
lhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>,
rhs: &E
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn div<const D: usize>(
lhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>,
rhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn div_scalar<const D: usize>(
lhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>,
rhs: &E
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn matmul<const D: usize>(
lhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>,
rhs: &<NdArrayBackend<E> as Backend>::TensorPrimitive<D>
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn neg<const D: usize>(
tensor: &NdArrayTensor<E, D>
) -> <NdArrayBackend<E> as Backend>::TensorPrimitive<D>
fn swap_dims<const D: usize>(
tensor: &NdArrayTensor<E, D>,
dim1: usize,
dim2: usize
) -> NdArrayTensor<E, D>
fn reshape<const D1: usize, const D2: usize>(
tensor: &NdArrayTensor<E, D1>,
shape: Shape<D2>
) -> NdArrayTensor<E, D2>
fn bool_index<const D1: usize, const D2: usize>(
tensor: &NdArrayTensor<bool, D1>,
indexes: [Range<usize>; D2]
) -> NdArrayTensor<bool, D1>
fn index<const D1: usize, const D2: usize>(
tensor: &NdArrayTensor<E, D1>,
indexes: [Range<usize>; D2]
) -> NdArrayTensor<E, D1>
fn index_assign<const D1: usize, const D2: usize>(
tensor: &NdArrayTensor<E, D1>,
indexes: [Range<usize>; D2],
value: &NdArrayTensor<E, D1>
) -> NdArrayTensor<E, D1>
fn mask_fill<const D: usize>(
tensor: &NdArrayTensor<E, D>,
mask: &NdArrayTensor<bool, D>,
value: E
) -> NdArrayTensor<E, D>
fn equal<const D: usize>(
lhs: &NdArrayTensor<E, D>,
rhs: &NdArrayTensor<E, D>
) -> NdArrayTensor<bool, D>
fn equal_scalar<const D: usize>(
lhs: &NdArrayTensor<E, D>,
rhs: &E
) -> NdArrayTensor<bool, D>
fn greater<const D: usize>(
lhs: &NdArrayTensor<E, D>,
rhs: &NdArrayTensor<E, D>
) -> NdArrayTensor<bool, D>
fn greater_scalar<const D: usize>(
lhs: &NdArrayTensor<E, D>,
rhs: &E
) -> NdArrayTensor<bool, D>
fn greater_equal<const D: usize>(
lhs: &NdArrayTensor<E, D>,
rhs: &NdArrayTensor<E, D>
) -> NdArrayTensor<bool, D>
fn greater_equal_scalar<const D: usize>(
lhs: &NdArrayTensor<E, D>,
rhs: &E
) -> NdArrayTensor<bool, D>
fn lower<const D: usize>(
lhs: &NdArrayTensor<E, D>,
rhs: &NdArrayTensor<E, D>
) -> NdArrayTensor<bool, D>
fn lower_scalar<const D: usize>(
lhs: &NdArrayTensor<E, D>,
rhs: &E
) -> NdArrayTensor<bool, D>
fn lower_equal<const D: usize>(
lhs: &NdArrayTensor<E, D>,
rhs: &NdArrayTensor<E, D>
) -> NdArrayTensor<bool, D>
fn lower_equal_scalar<const D: usize>(
lhs: &NdArrayTensor<E, D>,
rhs: &E
) -> NdArrayTensor<bool, D>
fn detach<const D: usize>(tensor: &NdArrayTensor<E, D>) -> NdArrayTensor<E, D>
fn mean<const D: usize>(tensor: &NdArrayTensor<E, D>) -> NdArrayTensor<E, 1>
fn sum<const D: usize>(tensor: &NdArrayTensor<E, D>) -> NdArrayTensor<E, 1>
fn mean_dim<const D: usize>(
tensor: &NdArrayTensor<E, D>,
dim: usize
) -> NdArrayTensor<E, D>
fn sum_dim<const D: usize>(
tensor: &NdArrayTensor<E, D>,
dim: usize
) -> NdArrayTensor<E, D>
fn to_full_precision<const D: usize>(
tensor: &NdArrayTensor<E, D>
) -> NdArrayTensor<f32, D>
fn from_full_precision<const D: usize>(
tensor: &NdArrayTensor<f32, D>
) -> NdArrayTensor<E, D>
fn argmax<const D: usize>(
tensor: &NdArrayTensor<E, D>,
dim: usize
) -> NdArrayTensor<i64, D>
fn argmin<const D: usize>(
tensor: &NdArrayTensor<E, D>,
dim: usize
) -> NdArrayTensor<i64, D>
fn exp<const D: usize>(tensor: &NdArrayTensor<E, D>) -> NdArrayTensor<E, D>
fn log<const D: usize>(tensor: &NdArrayTensor<E, D>) -> NdArrayTensor<E, D>
fn powf<const D: usize>(
tensor: &NdArrayTensor<E, D>,
value: f32
) -> NdArrayTensor<E, D>
fn sqrt<const D: usize>(tensor: &NdArrayTensor<E, D>) -> NdArrayTensor<E, D>
fn erf<const D: usize>(tensor: &NdArrayTensor<E, D>) -> NdArrayTensor<E, D>
fn cat<const D: usize>(
tensors: &[NdArrayTensor<E, D>],
dim: usize
) -> NdArrayTensor<E, D>
fn relu<const D: usize>(tensor: &NdArrayTensor<E, D>) -> NdArrayTensor<E, D>
fn zeros<const D: usize>(
shape: Shape<D>,
device: <B as Backend>::Device
) -> <B as Backend>::TensorPrimitive<D>
fn ones<const D: usize>(
shape: Shape<D>,
device: <B as Backend>::Device
) -> <B as Backend>::TensorPrimitive<D>
fn arange(
range: Range<usize>,
device: <B as Backend>::Device
) -> <<B as Backend>::IntegerBackend as Backend>::TensorPrimitive<1>
fn repeat<const D: usize>(
tensor: &<B as Backend>::TensorPrimitive<D>,
dim: usize,
times: usize
) -> <B as Backend>::TensorPrimitive<D>
fn transpose<const D: usize>(
tensor: &<B as Backend>::TensorPrimitive<D>
) -> <B as Backend>::TensorPrimitive<D>
impl<E: Copy> Copy for NdArrayBackend<E>
Auto Trait Implementations§
impl<E> RefUnwindSafe for NdArrayBackend<E>where
E: RefUnwindSafe,
impl<E> Send for NdArrayBackend<E>where
E: Send,
impl<E> Sync for NdArrayBackend<E>where
E: Sync,
impl<E> Unpin for NdArrayBackend<E>where
E: Unpin,
impl<E> UnwindSafe for NdArrayBackend<E>where
E: UnwindSafe,
Blanket Implementations§
§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read more§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.