pub struct Tensor<D: Device, T: Scalar> {
pub context: Context,
/* private fields */
}Fields§
§context: ContextImplementations§
source§impl<T: Scalar, K: Kind> Tensor<Gpu<K>, T>
impl<T: Scalar, K: Kind> Tensor<Gpu<K>, T>
pub fn load(&self, host: &TensorCpu<'_, T>) -> Result<(), TensorError>
pub fn load_batch( &self, host: &TensorCpu<'_, T>, batch: usize ) -> Result<(), TensorError>
pub fn destroy(self)
source§impl<'a, T: Scalar> Tensor<Cpu<'a, T>, T>
impl<'a, T: Scalar> Tensor<Cpu<'a, T>, T>
pub fn map<U: Scalar>(self, f: impl FnMut(&T) -> U) -> TensorCpu<'a, U>
sourcepub fn split(self, axis: usize) -> Result<Vec<Self>, TensorError>
pub fn split(self, axis: usize) -> Result<Vec<Self>, TensorError>
Split the tensor along the highest plural axis.
sourcepub fn stack(batches: Vec<Self>) -> Result<Self, TensorError>
pub fn stack(batches: Vec<Self>) -> Result<Self, TensorError>
Concat a batch of tensors.
pub fn slice( &self, x: impl TensorAxis, y: impl TensorAxis, z: impl TensorAxis, w: impl TensorAxis ) -> Result<TensorCpu<'a, T>, TensorError>
pub fn into_slice( self, x: impl TensorAxis, y: impl TensorAxis, z: impl TensorAxis, w: impl TensorAxis ) -> Result<Self, TensorError>
source§impl<T: Scalar> Tensor<Gpu<ReadWrite>, T>
impl<T: Scalar> Tensor<Gpu<ReadWrite>, T>
pub fn view( &self, x: impl TensorAxis, y: impl TensorAxis, z: impl TensorAxis, w: impl TensorAxis ) -> Result<TensorView<'_, T>, TensorError>
Trait Implementations§
Auto Trait Implementations§
impl<D, T> !RefUnwindSafe for Tensor<D, T>
impl<D, T> Send for Tensor<D, T>
impl<D, T> Sync for Tensor<D, T>
impl<D, T> Unpin for Tensor<D, T>
impl<D, T> !UnwindSafe for Tensor<D, T>
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more