pub struct CudaPtr<T> { /* private fields */ }
Expand description
Wrapper to handle cuda device memory
Implementations§
Trait Implementations§
Source§impl<T> AsMutVoidPtr for CudaPtr<T>
impl<T> AsMutVoidPtr for CudaPtr<T>
fn as_mut_void_ptr(&mut self) -> *mut c_void
Source§impl<U, P, OP, I, PI, const N: usize> BatchNormalizationLayerInstantiation<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, CudaPtr<U>, N> for BatchNormalizationLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, CudaPtr<U>, N>
impl<U, P, OP, I, PI, const N: usize> BatchNormalizationLayerInstantiation<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, CudaPtr<U>, N> for BatchNormalizationLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, CudaPtr<U>, N>
Source§fn with_params<B: OptimizerBuilder<U, DeviceGpu<U>, Output = OP>>(
parent: P,
device: &DeviceGpu<U>,
scale: Arr<U, N>,
bias: Arr<U, N>,
momentum: U,
b: &B,
) -> Result<BatchNormalizationLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, CudaPtr<U>, N>, LayerInstantiationError>
fn with_params<B: OptimizerBuilder<U, DeviceGpu<U>, Output = OP>>( parent: P, device: &DeviceGpu<U>, scale: Arr<U, N>, bias: Arr<U, N>, momentum: U, b: &B, ) -> Result<BatchNormalizationLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, CudaPtr<U>, N>, LayerInstantiationError>
Create and return an instance with the specified scale, bias, and momentum. Read more
Source§fn with_momentum<B: OptimizerBuilder<U, DeviceGpu<U>, Output = OP>>(
parent: P,
device: &DeviceGpu<U>,
momentum: U,
b: &B,
) -> Result<BatchNormalizationLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, CudaPtr<U>, N>, LayerInstantiationError>
fn with_momentum<B: OptimizerBuilder<U, DeviceGpu<U>, Output = OP>>( parent: P, device: &DeviceGpu<U>, momentum: U, b: &B, ) -> Result<BatchNormalizationLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, CudaPtr<U>, N>, LayerInstantiationError>
Create and return an instance with the momentum. Read more
Source§fn new<B: OptimizerBuilder<U, DeviceGpu<U>, Output = OP>>(
parent: P,
device: &DeviceGpu<U>,
b: &B,
) -> Result<BatchNormalizationLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, CudaPtr<U>, N>, LayerInstantiationError>
fn new<B: OptimizerBuilder<U, DeviceGpu<U>, Output = OP>>( parent: P, device: &DeviceGpu<U>, b: &B, ) -> Result<BatchNormalizationLayer<U, CudaTensor1dPtr<U, N>, P, OP, DeviceGpu<U>, I, PI, CudaPtr<U>, N>, LayerInstantiationError>
Create and return an instance. Read more
Source§impl<CP> MemoryMoveTo<<CP as PointerElement>::Element, CudaPtr<<CP as PointerElement>::Element>> for CPwhere
CP: AsCudaPtrRef + PointerElement,
<CP as AsCudaPtrRef>::Pointer: MemoryMoveTo<<CP as PointerElement>::Element, CudaPtr<<CP as PointerElement>::Element>>,
impl<CP> MemoryMoveTo<<CP as PointerElement>::Element, CudaPtr<<CP as PointerElement>::Element>> for CPwhere
CP: AsCudaPtrRef + PointerElement,
<CP as AsCudaPtrRef>::Pointer: MemoryMoveTo<<CP as PointerElement>::Element, CudaPtr<<CP as PointerElement>::Element>>,
Source§impl<T: Default + Debug> MemoryMoveTo<T, CudaHostPtr<T>> for CudaPtr<T>
impl<T: Default + Debug> MemoryMoveTo<T, CudaHostPtr<T>> for CudaPtr<T>
Source§impl<T: Default + Debug> MemoryMoveTo<T, CudaMemoryPoolPtr<T>> for CudaPtr<T>
impl<T: Default + Debug> MemoryMoveTo<T, CudaMemoryPoolPtr<T>> for CudaPtr<T>
Source§impl<T: Default + Debug> MemoryMoveTo<T, CudaPtr<T>> for CudaHostPtr<T>
impl<T: Default + Debug> MemoryMoveTo<T, CudaPtr<T>> for CudaHostPtr<T>
Source§impl<T: Default + Debug> MemoryMoveTo<T, CudaPtr<T>> for CudaMemoryPoolPtr<T>
impl<T: Default + Debug> MemoryMoveTo<T, CudaPtr<T>> for CudaMemoryPoolPtr<T>
Source§impl<CP> MemoryMoveToAsync<<CP as PointerElement>::Element, CudaPtr<<CP as PointerElement>::Element>> for CPwhere
CP: AsCudaPtrRef + PointerElement,
<CP as AsCudaPtrRef>::Pointer: MemoryMoveToAsync<<CP as PointerElement>::Element, CudaPtr<<CP as PointerElement>::Element>>,
impl<CP> MemoryMoveToAsync<<CP as PointerElement>::Element, CudaPtr<<CP as PointerElement>::Element>> for CPwhere
CP: AsCudaPtrRef + PointerElement,
<CP as AsCudaPtrRef>::Pointer: MemoryMoveToAsync<<CP as PointerElement>::Element, CudaPtr<<CP as PointerElement>::Element>>,
Source§fn memcpy_to_async(
&self,
dst: &mut CudaPtr<<CP as PointerElement>::Element>,
len: usize,
stream: cudaStream_t,
) -> Result<usize, Error>
fn memcpy_to_async( &self, dst: &mut CudaPtr<<CP as PointerElement>::Element>, len: usize, stream: cudaStream_t, ) -> Result<usize, Error>
Memory Copy To Read more
Source§impl<T: Default + Debug> MemoryMoveToAsync<T, CudaPtr<T>> for CudaHostPtr<T>
impl<T: Default + Debug> MemoryMoveToAsync<T, CudaPtr<T>> for CudaHostPtr<T>
Source§fn memcpy_to_async(
&self,
dst: &mut CudaPtr<T>,
len: usize,
stream: cudaStream_t,
) -> Result<usize, Error>
fn memcpy_to_async( &self, dst: &mut CudaPtr<T>, len: usize, stream: cudaStream_t, ) -> Result<usize, Error>
Memory Copy To Read more
Auto Trait Implementations§
impl<T> Freeze for CudaPtr<T>
impl<T> RefUnwindSafe for CudaPtr<T>where
T: RefUnwindSafe,
impl<T> !Send for CudaPtr<T>
impl<T> !Sync for CudaPtr<T>
impl<T> Unpin for CudaPtr<T>
impl<T> UnwindSafe for CudaPtr<T>where
T: RefUnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more