Struct nncombinator::cuda::mem::CachedTensor
source · [−]pub struct CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,{ /* private fields */ }Expand description
Object that collectively manages cuda memory paired with a value of a specified type
Implementations
sourceimpl<U, T> CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
impl<U, T> CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
sourcepub fn new(
value: T,
memory_pool: &Arc<Mutex<MemoryPool>>
) -> Result<CachedTensor<U, T>, CudaError>
pub fn new(
value: T,
memory_pool: &Arc<Mutex<MemoryPool>>
) -> Result<CachedTensor<U, T>, CudaError>
sourcepub fn scoped_mut<'a>(&'a mut self) -> ScopedMut<'a, U, T>
pub fn scoped_mut<'a>(&'a mut self) -> ScopedMut<'a, U, T>
Returns the ScopedMut object associated with the value it has
Trait Implementations
sourceimpl<U, T> AsPtr<U> for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
impl<U, T> AsPtr<U> for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
sourceimpl<U, T> AsVoidPtr for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
impl<U, T> AsVoidPtr for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
fn as_void_ptr(&self) -> *const c_void
sourceimpl<U, T> Deref for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
impl<U, T> Deref for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
sourceimpl<const NI: usize, const NO: usize> DeviceLinear<f32, CachedTensor<f32, Arr2<f32, NI, NO>>, NI, NO> for DeviceGpu<f32>
impl<const NI: usize, const NO: usize> DeviceLinear<f32, CachedTensor<f32, Arr2<f32, NI, NO>>, NI, NO> for DeviceGpu<f32>
sourcefn forward_linear(
&self,
bias: &Arr<f32, NO>,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NI>
) -> Result<Arr<f32, NO>, EvaluateError>
fn forward_linear(
&self,
bias: &Arr<f32, NO>,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NI>
) -> Result<Arr<f32, NO>, EvaluateError>
Forward propagation calculation Read more
sourcefn backward_linear(
&self,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NO>
) -> Result<Arr<f32, NI>, TrainingError>
fn backward_linear(
&self,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NO>
) -> Result<Arr<f32, NI>, TrainingError>
Error back propagation calculation Read more
sourceimpl<const NI: usize, const NO: usize> DeviceLinear<f64, CachedTensor<f64, Arr2<f64, NI, NO>>, NI, NO> for DeviceGpu<f64>
impl<const NI: usize, const NO: usize> DeviceLinear<f64, CachedTensor<f64, Arr2<f64, NI, NO>>, NI, NO> for DeviceGpu<f64>
sourcefn forward_linear(
&self,
bias: &Arr<f64, NO>,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NI>
) -> Result<Arr<f64, NO>, EvaluateError>
fn forward_linear(
&self,
bias: &Arr<f64, NO>,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NI>
) -> Result<Arr<f64, NO>, EvaluateError>
Forward propagation calculation Read more
sourcefn backward_linear(
&self,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NO>
) -> Result<Arr<f64, NI>, TrainingError>
fn backward_linear(
&self,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NO>
) -> Result<Arr<f64, NI>, TrainingError>
Error back propagation calculation Read more
Auto Trait Implementations
impl<U, T> RefUnwindSafe for CachedTensor<U, T>where
T: RefUnwindSafe,
U: RefUnwindSafe,
impl<U, T> !Send for CachedTensor<U, T>
impl<U, T> !Sync for CachedTensor<U, T>
impl<U, T> Unpin for CachedTensor<U, T>where
T: Unpin,
impl<U, T> UnwindSafe for CachedTensor<U, T>where
T: UnwindSafe,
U: RefUnwindSafe,
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more