pub struct CudaVec<U, T>{ /* private fields */ }
Implementations§
Trait Implementations§
Source§impl<U, T> AsCudaMutPtr for CudaVec<U, T>
impl<U, T> AsCudaMutPtr for CudaVec<U, T>
Source§type Pointer = CudaMemoryPoolPtr<U>
type Pointer = CudaMemoryPoolPtr<U>
Returned Cuda smart pointer type
fn as_cuda_mut_ptr<'a>(&'a mut self) -> CudaMutPtr<'a, Self::Pointer>
Source§impl<U, T> AsCudaPtrRef for CudaVec<U, T>
impl<U, T> AsCudaPtrRef for CudaVec<U, T>
Source§type Pointer = CudaMemoryPoolPtr<U>
type Pointer = CudaMemoryPoolPtr<U>
Returned Cuda smart pointer type
fn as_cuda_ptr_ref(&self) -> &Self::Pointer
Source§impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>
impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for Identity<U, DeviceGpu<U>>
Source§fn batch_apply(
&self,
_: &DeviceGpu<U>,
input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_apply( &self, _: &DeviceGpu<U>, input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply the activation function Read more
Source§fn batch_derive(
&self,
_: &DeviceGpu<U>,
_: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
_: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_derive( &self, _: &DeviceGpu<U>, _: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, _: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply derivatives of the activation function Read more
Source§impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> ReLuBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> ReLuBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for ReLu<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> ReLuBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> ReLuBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
Source§fn batch_apply(
&self,
device: &DeviceGpu<U>,
input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_apply( &self, device: &DeviceGpu<U>, input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply the activation function Read more
Source§fn batch_derive(
&self,
device: &DeviceGpu<U>,
o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_derive( &self, device: &DeviceGpu<U>, o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply derivatives of the activation function Read more
Source§impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> SigmoidBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> SigmoidBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for Sigmoid<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> SigmoidBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> SigmoidBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
Source§fn batch_apply(
&self,
device: &DeviceGpu<U>,
input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_apply( &self, device: &DeviceGpu<U>, input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply the activation function Read more
Source§fn batch_derive(
&self,
device: &DeviceGpu<U>,
o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_derive( &self, device: &DeviceGpu<U>, o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply derivatives of the activation function Read more
Source§impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> SoftMaxBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> SoftMaxBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for SoftMax<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> SoftMaxBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> SoftMaxBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
Source§fn batch_apply(
&self,
device: &DeviceGpu<U>,
input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_apply( &self, device: &DeviceGpu<U>, input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply the activation function Read more
Source§fn batch_derive(
&self,
device: &DeviceGpu<U>,
o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_derive( &self, device: &DeviceGpu<U>, o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply derivatives of the activation function Read more
Source§impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> SwishBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> SwishBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for Swish<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> SwishBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> SwishBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
Source§fn batch_apply(
&self,
device: &DeviceGpu<U>,
input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_apply( &self, device: &DeviceGpu<U>, input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply the activation function Read more
Source§fn batch_derive(
&self,
device: &DeviceGpu<U>,
o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_derive( &self, device: &DeviceGpu<U>, o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply derivatives of the activation function Read more
Source§impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> TanhBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> TanhBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
impl<'a, U, const N: usize> BatchActivation<U, CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, CudaVec<U, CudaTensor1dPtr<U, N>>, DeviceGpu<U>> for Tanh<U, DeviceGpu<U>>where
U: UnitValue<U> + DataTypeInfo,
DeviceGpu<U>: Device<U>,
for<'b> TanhBatchForward<'b, U, N>: Kernel<Args = ActivationBatchForwardArgs<'b, U, N>>,
for<'b> TanhBatchBackward<'b, U, N>: Kernel<Args = ActivationBatchBackwardArgs<'b, U, N>>,
Source§fn batch_apply(
&self,
device: &DeviceGpu<U>,
input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_apply( &self, device: &DeviceGpu<U>, input: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply the activation function Read more
Source§fn batch_derive(
&self,
device: &DeviceGpu<U>,
o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>,
) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
fn batch_derive( &self, device: &DeviceGpu<U>, o: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, loss: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, u: &CudaVecView<'a, U, CudaTensor1dPtr<U, N>>, ) -> Result<CudaVec<U, CudaTensor1dPtr<U, N>>, TrainingError>
Apply derivatives of the activation function Read more
Source§impl<U, T> IntoConverter for CudaVec<U, T>
impl<U, T> IntoConverter for CudaVec<U, T>
Source§type Converter = CudaVecConverter<U, T>
type Converter = CudaVecConverter<U, T>
Converter type to go through when converting to the type being converted
fn into_converter(self) -> Self::Converter
Source§impl<U, T> PointerElement for CudaVec<U, T>
impl<U, T> PointerElement for CudaVec<U, T>
Source§impl<U, T> ToHost<U> for CudaVec<U, T>where
U: Debug + Default + Clone + Copy + Send + UnitValue<U>,
SerializedVec<U, <T as ToHost<U>>::Output>: TryFrom<Box<[U]>, Error = TypeConvertError>,
for<'a> <T as ToHost<U>>::Output: SliceSize + MakeView<'a, U>,
for<'a> T: MemorySize + AsKernelPtr + AsConstKernelPtr + ToHost<U>,
impl<U, T> ToHost<U> for CudaVec<U, T>where
U: Debug + Default + Clone + Copy + Send + UnitValue<U>,
SerializedVec<U, <T as ToHost<U>>::Output>: TryFrom<Box<[U]>, Error = TypeConvertError>,
for<'a> <T as ToHost<U>>::Output: SliceSize + MakeView<'a, U>,
for<'a> T: MemorySize + AsKernelPtr + AsConstKernelPtr + ToHost<U>,
Source§impl<'a, U, T, R> TryFrom<&'a CudaVec<U, T>> for CudaVecView<'a, U, R>where
U: UnitValue<U> + Default + Clone + Send,
T: MemorySize + AsKernelPtr + AsConstKernelPtr,
R: MemorySize + AsKernelPtr + AsConstKernelPtr + TryFrom<T>,
impl<'a, U, T, R> TryFrom<&'a CudaVec<U, T>> for CudaVecView<'a, U, R>where
U: UnitValue<U> + Default + Clone + Send,
T: MemorySize + AsKernelPtr + AsConstKernelPtr,
R: MemorySize + AsKernelPtr + AsConstKernelPtr + TryFrom<T>,
Source§impl<'a, U, T> TryFrom<&'a CudaVecView<'a, U, T>> for CudaVec<U, T>
impl<'a, U, T> TryFrom<&'a CudaVecView<'a, U, T>> for CudaVec<U, T>
Source§impl<U, T, R> TryFrom<CudaVecConverter<U, T>> for CudaVec<U, R>where
U: UnitValue<U> + Default + Clone + Send,
T: MemorySize + AsKernelPtr + AsConstKernelPtr,
R: MemorySize + AsKernelPtr + AsConstKernelPtr + From<T>,
impl<U, T, R> TryFrom<CudaVecConverter<U, T>> for CudaVec<U, R>where
U: UnitValue<U> + Default + Clone + Send,
T: MemorySize + AsKernelPtr + AsConstKernelPtr,
R: MemorySize + AsKernelPtr + AsConstKernelPtr + From<T>,
Source§type Error = TypeConvertError
type Error = TypeConvertError
The type returned in the event of a conversion error.
Auto Trait Implementations§
impl<U, T> Freeze for CudaVec<U, T>
impl<U, T> RefUnwindSafe for CudaVec<U, T>where
T: RefUnwindSafe,
U: RefUnwindSafe,
impl<U, T> !Send for CudaVec<U, T>
impl<U, T> !Sync for CudaVec<U, T>
impl<U, T> Unpin for CudaVec<U, T>where
T: Unpin,
impl<U, T> UnwindSafe for CudaVec<U, T>where
T: UnwindSafe,
U: RefUnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more