Struct nncombinator::device::DeviceGpu
source · pub struct DeviceGpu<U> {
pub memory_pool: Arc<Mutex<MemoryPool>>,
/* private fields */
}
Expand description
Implementation of Device to be computed by GPU
Fields§
§memory_pool: Arc<Mutex<MemoryPool>>
Memory pool for cuda memory allocation
Implementations§
source§impl<U> DeviceGpu<U>where
U: UnitValue<U>,
impl<U> DeviceGpu<U>where
U: UnitValue<U>,
sourcepub fn new(
memory_pool: &Arc<Mutex<MemoryPool>>
) -> Result<DeviceGpu<U>, DeviceError>
pub fn new(
memory_pool: &Arc<Mutex<MemoryPool>>
) -> Result<DeviceGpu<U>, DeviceError>
sourcepub fn cublas(&self) -> &CublasContext
pub fn cublas(&self) -> &CublasContext
Returns the CublasContext owned by itself
Trait Implementations§
source§impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>where
U: UnitValue<U>,
DeviceGpu<U>: Device<U>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>where
U: UnitValue<U>,
DeviceGpu<U>: Device<U>,
source§fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
source§fn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
_: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
_: &Arr<U, N>,
loss: &Arr<U, N>,
_: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
source§fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
source§impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
ReLuForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
ReLuBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
ReLuForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
ReLuBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
source§fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
source§fn derive(
&self,
_: &DeviceGpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
source§fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
source§impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SigmoidForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SigmoidBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SigmoidForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SigmoidBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
source§fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
source§fn derive(
&self,
_: &DeviceGpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
source§fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
source§impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
SoftMaxForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SoftMaxBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
SoftMaxForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SoftMaxBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
source§fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
source§fn derive(
&self,
_: &DeviceGpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
source§fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, l: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
source§impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SwishForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SwishBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SwishForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SwishBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
source§fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
source§fn derive(
&self,
_: &DeviceGpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
source§fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
source§impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
TanhForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
TanhBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> Activation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
TanhForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
TanhBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
source§fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
fn apply(
&self,
_: &DeviceGpu<U>,
input: &Arr<U, N>
) -> Result<Arr<U, N>, EvaluateError>
Apply the activation function Read more
source§fn derive(
&self,
_: &DeviceGpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
fn derive(
&self,
_: &DeviceGpu<U>,
o: &Arr<U, N>,
loss: &Arr<U, N>,
u: &Arr<U, N>
) -> Result<Arr<U, N>, TrainingError>
Apply derivatives of the activation function Read more
source§fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
fn is_canonical_link<L: LossFunction<U>>(&self, _: &L) -> bool
Returns whether or not the canonical linkage function can be used. Read more
source§impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>where
U: UnitValue<U>,
DeviceGpu<U>: Device<U>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>where
U: UnitValue<U>,
DeviceGpu<U>: Device<U>,
source§impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
ReLuForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
ReLuBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
ReLuForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
ReLuBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
source§impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SigmoidForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SigmoidBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SigmoidForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SigmoidBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
source§impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
SoftMaxForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SoftMaxBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
SoftMaxForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SoftMaxBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
source§impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SwishForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SwishBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
SwishForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
SwishBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
source§impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
TanhForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
TanhBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
impl<U, const N: usize> BatchActivation<U, Arr<U, N>, Arr<U, N>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
TanhForward<U>: Kernel<Args = ActivationForwardArgs<U>>,
TanhBackward<U>: Kernel<Args = ActivationBackwardArgs<U>>,
source§impl<U> BatchLossFunction<U, DeviceGpu<U>> for CrossEntropy<U>where
U: Clone + Copy + UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
LinearBatchCrossEntropy<U>: Kernel<Args = LinearBatchCrossEntropyArgs<U>>,
impl<U> BatchLossFunction<U, DeviceGpu<U>> for CrossEntropy<U>where
U: Clone + Copy + UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
LinearBatchCrossEntropy<U>: Kernel<Args = LinearBatchCrossEntropyArgs<U>>,
source§impl<U> BatchLossFunction<U, DeviceGpu<U>> for CrossEntropyMulticlass<U>where
U: Clone + Copy + UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
LinearBatchCrossEntropyMulticlass<U>: Kernel<Args = LinearBatchCrossEntropyMulticlassArgs<U>>,
impl<U> BatchLossFunction<U, DeviceGpu<U>> for CrossEntropyMulticlass<U>where
U: Clone + Copy + UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
LinearBatchCrossEntropyMulticlass<U>: Kernel<Args = LinearBatchCrossEntropyMulticlassArgs<U>>,
source§impl<U> BatchLossFunction<U, DeviceGpu<U>> for Mse<U>where
U: Clone + Copy + UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
LinearBatchMse<U>: Kernel<Args = LinearBatchMseArgs<U>>,
impl<U> BatchLossFunction<U, DeviceGpu<U>> for Mse<U>where
U: Clone + Copy + UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
CudaPtr<U>: TryFrom<U, Error = CudaError>,
LinearBatchMse<U>: Kernel<Args = LinearBatchMseArgs<U>>,
source§impl Device<f32> for DeviceGpu<f32>
impl Device<f32> for DeviceGpu<f32>
source§fn loss_linear<L, const N: usize>(
&self,
expected: &Arr<f32, N>,
actual: &Arr<f32, N>,
lossf: &L
) -> Arr<f32, N>where
L: LossFunction<f32>,
fn loss_linear<L, const N: usize>(
&self,
expected: &Arr<f32, N>,
actual: &Arr<f32, N>,
lossf: &L
) -> Arr<f32, N>where
L: LossFunction<f32>,
Calculation of Losses Read more
source§fn loss_linear_by_canonical_link<const N: usize>(
&self,
expected: &Arr<f32, N>,
actual: &Arr<f32, N>
) -> Arr<f32, N>
fn loss_linear_by_canonical_link<const N: usize>(
&self,
expected: &Arr<f32, N>,
actual: &Arr<f32, N>
) -> Arr<f32, N>
Calculation of Losses by canonical link Read more
source§fn loss_linear_total<L: LossFunction<f32>, const N: usize>(
&self,
exptected: &Arr<f32, N>,
actual: &Arr<f32, N>,
lossf: &L
) -> f32
fn loss_linear_total<L: LossFunction<f32>, const N: usize>(
&self,
exptected: &Arr<f32, N>,
actual: &Arr<f32, N>,
lossf: &L
) -> f32
Calculation of total Losses Read more
source§fn loss_linear_batch_by_canonical_link<const N: usize>(
&self,
expected: &VecArr<f32, Arr<f32, N>>,
actual: &VecArr<f32, Arr<f32, N>>
) -> Result<VecArr<f32, Arr<f32, N>>, TrainingError>
fn loss_linear_batch_by_canonical_link<const N: usize>(
&self,
expected: &VecArr<f32, Arr<f32, N>>,
actual: &VecArr<f32, Arr<f32, N>>
) -> Result<VecArr<f32, Arr<f32, N>>, TrainingError>
Calculation of loss during batch execution by canonical link Read more
source§fn batch_linear_reduce<const N: usize>(
&self,
loss: &VecArr<f32, Arr<f32, N>>
) -> Result<Arr<f32, N>, TrainingError>
fn batch_linear_reduce<const N: usize>(
&self,
loss: &VecArr<f32, Arr<f32, N>>
) -> Result<Arr<f32, N>, TrainingError>
convolutional calculation Read more
source§fn batch_loss_linear_total<L: LossFunction<U>, const N: usize>(
&self,
exptected: &VecArr<U, Arr<U, N>>,
actual: &VecArr<U, Arr<U, N>>,
lossf: &L
) -> Result<U, TrainingError>where
f64: From<U> + FromPrimitive,
fn batch_loss_linear_total<L: LossFunction<U>, const N: usize>(
&self,
exptected: &VecArr<U, Arr<U, N>>,
actual: &VecArr<U, Arr<U, N>>,
lossf: &L
) -> Result<U, TrainingError>where
f64: From<U> + FromPrimitive,
Calculation of total Losses (all batch) Read more
source§impl Device<f64> for DeviceGpu<f64>
impl Device<f64> for DeviceGpu<f64>
source§fn loss_linear<L, const N: usize>(
&self,
expected: &Arr<f64, N>,
actual: &Arr<f64, N>,
lossf: &L
) -> Arr<f64, N>where
L: LossFunction<f64>,
fn loss_linear<L, const N: usize>(
&self,
expected: &Arr<f64, N>,
actual: &Arr<f64, N>,
lossf: &L
) -> Arr<f64, N>where
L: LossFunction<f64>,
Calculation of Losses Read more
source§fn loss_linear_by_canonical_link<const N: usize>(
&self,
expected: &Arr<f64, N>,
actual: &Arr<f64, N>
) -> Arr<f64, N>
fn loss_linear_by_canonical_link<const N: usize>(
&self,
expected: &Arr<f64, N>,
actual: &Arr<f64, N>
) -> Arr<f64, N>
Calculation of Losses by canonical link Read more
source§fn loss_linear_total<L: LossFunction<f64>, const N: usize>(
&self,
exptected: &Arr<f64, N>,
actual: &Arr<f64, N>,
lossf: &L
) -> f64
fn loss_linear_total<L: LossFunction<f64>, const N: usize>(
&self,
exptected: &Arr<f64, N>,
actual: &Arr<f64, N>,
lossf: &L
) -> f64
Calculation of total Losses Read more
source§fn loss_linear_batch_by_canonical_link<const N: usize>(
&self,
expected: &VecArr<f64, Arr<f64, N>>,
actual: &VecArr<f64, Arr<f64, N>>
) -> Result<VecArr<f64, Arr<f64, N>>, TrainingError>
fn loss_linear_batch_by_canonical_link<const N: usize>(
&self,
expected: &VecArr<f64, Arr<f64, N>>,
actual: &VecArr<f64, Arr<f64, N>>
) -> Result<VecArr<f64, Arr<f64, N>>, TrainingError>
Calculation of loss during batch execution by canonical link Read more
source§fn batch_linear_reduce<const N: usize>(
&self,
loss: &VecArr<f64, Arr<f64, N>>
) -> Result<Arr<f64, N>, TrainingError>
fn batch_linear_reduce<const N: usize>(
&self,
loss: &VecArr<f64, Arr<f64, N>>
) -> Result<Arr<f64, N>, TrainingError>
convolutional calculation Read more
source§fn batch_loss_linear_total<L: LossFunction<U>, const N: usize>(
&self,
exptected: &VecArr<U, Arr<U, N>>,
actual: &VecArr<U, Arr<U, N>>,
lossf: &L
) -> Result<U, TrainingError>where
f64: From<U> + FromPrimitive,
fn batch_loss_linear_total<L: LossFunction<U>, const N: usize>(
&self,
exptected: &VecArr<U, Arr<U, N>>,
actual: &VecArr<U, Arr<U, N>>,
lossf: &L
) -> Result<U, TrainingError>where
f64: From<U> + FromPrimitive,
Calculation of total Losses (all batch) Read more
source§impl<const NI: usize, const NO: usize> DeviceLinear<f32, CachedTensor<f32, Arr2<f32, NI, NO>>, NI, NO> for DeviceGpu<f32>
impl<const NI: usize, const NO: usize> DeviceLinear<f32, CachedTensor<f32, Arr2<f32, NI, NO>>, NI, NO> for DeviceGpu<f32>
source§fn forward_linear(
&self,
bias: &Arr<f32, NO>,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NI>
) -> Result<Arr<f32, NO>, EvaluateError>
fn forward_linear(
&self,
bias: &Arr<f32, NO>,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NI>
) -> Result<Arr<f32, NO>, EvaluateError>
Forward propagation calculation Read more
source§fn backward_linear(
&self,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NO>
) -> Result<Arr<f32, NI>, TrainingError>
fn backward_linear(
&self,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NO>
) -> Result<Arr<f32, NI>, TrainingError>
Error back propagation calculation Read more
source§fn backward_weight_gradient(
&self,
o: &Arr<f32, NI>,
loss: &Arr<f32, NO>
) -> Result<Arr2<f32, NI, NO>, TrainingError>
fn backward_weight_gradient(
&self,
o: &Arr<f32, NI>,
loss: &Arr<f32, NO>
) -> Result<Arr2<f32, NI, NO>, TrainingError>
Calculate the gradient of the weights Read more
source§fn batch_forward_linear(
&self,
bias: &Arr<f32, NO>,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &VecArr<f32, Arr<f32, NI>>
) -> Result<VecArr<f32, Arr<f32, NO>>, TrainingError>
fn batch_forward_linear(
&self,
bias: &Arr<f32, NO>,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &VecArr<f32, Arr<f32, NI>>
) -> Result<VecArr<f32, Arr<f32, NO>>, TrainingError>
Forward propagation calculation in batch Read more
source§impl<const NI: usize, const NO: usize> DeviceLinear<f64, CachedTensor<f64, Arr2<f64, NI, NO>>, NI, NO> for DeviceGpu<f64>
impl<const NI: usize, const NO: usize> DeviceLinear<f64, CachedTensor<f64, Arr2<f64, NI, NO>>, NI, NO> for DeviceGpu<f64>
source§fn forward_linear(
&self,
bias: &Arr<f64, NO>,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NI>
) -> Result<Arr<f64, NO>, EvaluateError>
fn forward_linear(
&self,
bias: &Arr<f64, NO>,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NI>
) -> Result<Arr<f64, NO>, EvaluateError>
Forward propagation calculation Read more
source§fn backward_linear(
&self,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NO>
) -> Result<Arr<f64, NI>, TrainingError>
fn backward_linear(
&self,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NO>
) -> Result<Arr<f64, NI>, TrainingError>
Error back propagation calculation Read more
source§fn backward_weight_gradient(
&self,
o: &Arr<f64, NI>,
loss: &Arr<f64, NO>
) -> Result<Arr2<f64, NI, NO>, TrainingError>
fn backward_weight_gradient(
&self,
o: &Arr<f64, NI>,
loss: &Arr<f64, NO>
) -> Result<Arr2<f64, NI, NO>, TrainingError>
Calculate the gradient of the weights Read more
source§fn batch_forward_linear(
&self,
bias: &Arr<f64, NO>,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &VecArr<f64, Arr<f64, NI>>
) -> Result<VecArr<f64, Arr<f64, NO>>, TrainingError>
fn batch_forward_linear(
&self,
bias: &Arr<f64, NO>,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &VecArr<f64, Arr<f64, NI>>
) -> Result<VecArr<f64, Arr<f64, NO>>, TrainingError>
Forward propagation calculation in batch Read more
source§impl<U> DeviceMemoryPool for DeviceGpu<U>
impl<U> DeviceMemoryPool for DeviceGpu<U>
source§fn get_memory_pool(&self) -> &Arc<Mutex<MemoryPool>>
fn get_memory_pool(&self) -> &Arc<Mutex<MemoryPool>>
Returns the memory pool object owned by itself