Struct nncombinator::cuda::mem::CachedTensor
source · pub struct CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,{ /* private fields */ }
Expand description
Object that collectively manages cuda memory paired with a value of a specified type
Implementations§
source§impl<U, T> CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
impl<U, T> CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
sourcepub fn new(
value: T,
memory_pool: &Arc<Mutex<MemoryPool>>
) -> Result<CachedTensor<U, T>, CudaError>
pub fn new(
value: T,
memory_pool: &Arc<Mutex<MemoryPool>>
) -> Result<CachedTensor<U, T>, CudaError>
sourcepub fn scoped_mut<'a>(&'a mut self) -> ScopedMut<'a, U, T>
pub fn scoped_mut<'a>(&'a mut self) -> ScopedMut<'a, U, T>
Returns the ScopedMut object associated with the value it has
Trait Implementations§
source§impl<U, T> AsPtr<U> for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
impl<U, T> AsPtr<U> for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
source§impl<U, T> AsVoidPtr for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
impl<U, T> AsVoidPtr for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
fn as_void_ptr(&self) -> *const c_void
source§impl<U, T> Deref for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
impl<U, T> Deref for CachedTensor<U, T>where
U: Debug + Default,
T: AsRawSlice<U>,
source§impl<const NI: usize, const NO: usize> DeviceLinear<f32, CachedTensor<f32, Arr2<f32, NI, NO>>, NI, NO> for DeviceGpu<f32>
impl<const NI: usize, const NO: usize> DeviceLinear<f32, CachedTensor<f32, Arr2<f32, NI, NO>>, NI, NO> for DeviceGpu<f32>
source§fn forward_linear(
&self,
bias: &Arr<f32, NO>,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NI>
) -> Result<Arr<f32, NO>, EvaluateError>
fn forward_linear(
&self,
bias: &Arr<f32, NO>,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NI>
) -> Result<Arr<f32, NO>, EvaluateError>
Forward propagation calculation Read more
source§fn backward_linear(
&self,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NO>
) -> Result<Arr<f32, NI>, TrainingError>
fn backward_linear(
&self,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &Arr<f32, NO>
) -> Result<Arr<f32, NI>, TrainingError>
Error back propagation calculation Read more
source§fn backward_weight_gradient(
&self,
o: &Arr<f32, NI>,
loss: &Arr<f32, NO>
) -> Result<Arr2<f32, NI, NO>, TrainingError>
fn backward_weight_gradient(
&self,
o: &Arr<f32, NI>,
loss: &Arr<f32, NO>
) -> Result<Arr2<f32, NI, NO>, TrainingError>
Calculate the gradient of the weights Read more
source§fn batch_forward_linear(
&self,
bias: &Arr<f32, NO>,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &VecArr<f32, Arr<f32, NI>>
) -> Result<VecArr<f32, Arr<f32, NO>>, TrainingError>
fn batch_forward_linear(
&self,
bias: &Arr<f32, NO>,
units: &CachedTensor<f32, Arr2<f32, NI, NO>>,
input: &VecArr<f32, Arr<f32, NI>>
) -> Result<VecArr<f32, Arr<f32, NO>>, TrainingError>
Forward propagation calculation in batch Read more
source§impl<const NI: usize, const NO: usize> DeviceLinear<f64, CachedTensor<f64, Arr2<f64, NI, NO>>, NI, NO> for DeviceGpu<f64>
impl<const NI: usize, const NO: usize> DeviceLinear<f64, CachedTensor<f64, Arr2<f64, NI, NO>>, NI, NO> for DeviceGpu<f64>
source§fn forward_linear(
&self,
bias: &Arr<f64, NO>,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NI>
) -> Result<Arr<f64, NO>, EvaluateError>
fn forward_linear(
&self,
bias: &Arr<f64, NO>,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NI>
) -> Result<Arr<f64, NO>, EvaluateError>
Forward propagation calculation Read more
source§fn backward_linear(
&self,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NO>
) -> Result<Arr<f64, NI>, TrainingError>
fn backward_linear(
&self,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &Arr<f64, NO>
) -> Result<Arr<f64, NI>, TrainingError>
Error back propagation calculation Read more
source§fn backward_weight_gradient(
&self,
o: &Arr<f64, NI>,
loss: &Arr<f64, NO>
) -> Result<Arr2<f64, NI, NO>, TrainingError>
fn backward_weight_gradient(
&self,
o: &Arr<f64, NI>,
loss: &Arr<f64, NO>
) -> Result<Arr2<f64, NI, NO>, TrainingError>
Calculate the gradient of the weights Read more
source§fn batch_forward_linear(
&self,
bias: &Arr<f64, NO>,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &VecArr<f64, Arr<f64, NI>>
) -> Result<VecArr<f64, Arr<f64, NO>>, TrainingError>
fn batch_forward_linear(
&self,
bias: &Arr<f64, NO>,
units: &CachedTensor<f64, Arr2<f64, NI, NO>>,
input: &VecArr<f64, Arr<f64, NI>>
) -> Result<VecArr<f64, Arr<f64, NO>>, TrainingError>
Forward propagation calculation in batch Read more