Skip to main content

AccelProvider

Trait AccelProvider 

Source
pub trait AccelProvider: Send + Sync {
Show 202 methods // Required methods fn upload(&self, host: &HostTensorView<'_>) -> Result<GpuTensorHandle>; fn download<'a>(&'a self, h: &'a GpuTensorHandle) -> AccelDownloadFuture<'a>; fn free(&self, h: &GpuTensorHandle) -> Result<()>; fn device_info(&self) -> String; // Provided methods fn device_id(&self) -> u32 { ... } fn export_context( &self, _kind: AccelContextKind, ) -> Option<AccelContextHandle> { ... } fn gather_linear( &self, _source: &GpuTensorHandle, _indices: &[u32], _output_shape: &[usize], ) -> Result<GpuTensorHandle> { ... } fn scatter_linear( &self, _target: &GpuTensorHandle, _indices: &[u32], _values: &GpuTensorHandle, ) -> Result<()> { ... } fn device_info_struct(&self) -> ApiDeviceInfo { ... } fn precision(&self) -> ProviderPrecision { ... } fn read_scalar( &self, _h: &GpuTensorHandle, _linear_index: usize, ) -> Result<f64> { ... } fn zeros(&self, _shape: &[usize]) -> Result<GpuTensorHandle> { ... } fn ones(&self, _shape: &[usize]) -> Result<GpuTensorHandle> { ... } fn zeros_like(&self, prototype: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn fill(&self, shape: &[usize], value: f64) -> Result<GpuTensorHandle> { ... } fn fill_like( &self, prototype: &GpuTensorHandle, value: f64, ) -> Result<GpuTensorHandle> { ... } fn ones_like(&self, prototype: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn eye(&self, _shape: &[usize]) -> Result<GpuTensorHandle> { ... } fn eye_like(&self, prototype: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn meshgrid( &self, _axes: &[MeshgridAxisView<'_>], ) -> Result<ProviderMeshgridResult> { ... } fn diag_from_vector( &self, _vector: &GpuTensorHandle, _offset: isize, ) -> Result<GpuTensorHandle> { ... } fn diag_extract( &self, _matrix: &GpuTensorHandle, _offset: isize, ) -> Result<GpuTensorHandle> { ... } fn tril<'a>( &'a self, _matrix: &'a GpuTensorHandle, _offset: isize, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn triu<'a>( &'a self, _matrix: &'a GpuTensorHandle, _offset: isize, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn polyval( &self, _coefficients: &GpuTensorHandle, _points: &GpuTensorHandle, _options: &ProviderPolyvalOptions, ) -> Result<GpuTensorHandle> { ... } fn polyfit<'a>( &'a self, _x: &'a GpuTensorHandle, _y: &'a GpuTensorHandle, _degree: usize, _weights: Option<&'a GpuTensorHandle>, ) -> AccelProviderFuture<'a, ProviderPolyfitResult> { ... } fn polyder_single<'a>( &'a self, _polynomial: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn polyder_product<'a>( &'a self, _p: &'a GpuTensorHandle, _q: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn polyder_quotient<'a>( &'a self, _u: &'a GpuTensorHandle, _v: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, ProviderPolyderQuotient> { ... } fn polyint( &self, _polynomial: &GpuTensorHandle, _constant: f64, ) -> Result<GpuTensorHandle> { ... } fn random_uniform(&self, _shape: &[usize]) -> Result<GpuTensorHandle> { ... } fn random_uniform_like( &self, prototype: &GpuTensorHandle, ) -> Result<GpuTensorHandle> { ... } fn random_normal(&self, _shape: &[usize]) -> Result<GpuTensorHandle> { ... } fn random_normal_like( &self, prototype: &GpuTensorHandle, ) -> Result<GpuTensorHandle> { ... } fn stochastic_evolution( &self, _state: &GpuTensorHandle, _drift: f64, _scale: f64, _steps: u32, ) -> Result<GpuTensorHandle> { ... } fn set_rng_state(&self, _state: u64) -> Result<()> { ... } fn fspecial(&self, _request: &FspecialRequest) -> Result<GpuTensorHandle> { ... } fn imfilter<'a>( &'a self, _image: &'a GpuTensorHandle, _kernel: &'a GpuTensorHandle, _options: &'a ImfilterOptions, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn random_integer_range( &self, _lower: i64, _upper: i64, _shape: &[usize], ) -> Result<GpuTensorHandle> { ... } fn random_integer_like( &self, prototype: &GpuTensorHandle, lower: i64, upper: i64, ) -> Result<GpuTensorHandle> { ... } fn random_permutation( &self, _n: usize, _k: usize, ) -> Result<GpuTensorHandle> { ... } fn random_permutation_like( &self, _prototype: &GpuTensorHandle, n: usize, k: usize, ) -> Result<GpuTensorHandle> { ... } fn covariance<'a>( &'a self, _matrix: &'a GpuTensorHandle, _second: Option<&'a GpuTensorHandle>, _weights: Option<&'a GpuTensorHandle>, _options: &'a CovarianceOptions, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn corrcoef<'a>( &'a self, _matrix: &'a GpuTensorHandle, _options: &'a CorrcoefOptions, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn linspace( &self, _start: f64, _stop: f64, _count: usize, ) -> Result<GpuTensorHandle> { ... } fn elem_add<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_mul<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_max<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_min<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_sub<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_div<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_pow<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_hypot<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_ge<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_le<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_lt<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_gt<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_eq<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn elem_ne<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn logical_and( &self, _a: &GpuTensorHandle, _b: &GpuTensorHandle, ) -> Result<GpuTensorHandle> { ... } fn logical_or( &self, _a: &GpuTensorHandle, _b: &GpuTensorHandle, ) -> Result<GpuTensorHandle> { ... } fn logical_xor( &self, _a: &GpuTensorHandle, _b: &GpuTensorHandle, ) -> Result<GpuTensorHandle> { ... } fn logical_not(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn logical_islogical(&self, a: &GpuTensorHandle) -> Result<bool> { ... } fn logical_isreal(&self, _a: &GpuTensorHandle) -> Result<bool> { ... } fn logical_isfinite(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn logical_isnan(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn logical_isinf(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn elem_atan2<'a>( &'a self, _y: &'a GpuTensorHandle, _x: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_sin<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_gamma<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_factorial<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_asinh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_sinh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_cosh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_asin<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_acos<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_acosh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_tan<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_tanh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_atan<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_atanh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_ceil<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_floor<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_round<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_fix<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_cos<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_angle<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_imag<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_real<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_conj<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_abs<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_sign<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_exp<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_expm1<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_log<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_log2<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_log10<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_log1p<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_sqrt<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_double<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_single<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unary_pow2<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn pow2_scale( &self, _mantissa: &GpuTensorHandle, _exponent: &GpuTensorHandle, ) -> Result<GpuTensorHandle> { ... } fn scalar_rsub( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle> { ... } fn scalar_rdiv( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle> { ... } fn scalar_add( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle> { ... } fn scalar_sub( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle> { ... } fn scalar_mul( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle> { ... } fn scalar_max( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle> { ... } fn scalar_min( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle> { ... } fn scalar_div( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle> { ... } fn sort_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, _order: SortOrder, _comparison: SortComparison, ) -> AccelProviderFuture<'a, SortResult> { ... } fn sort_rows<'a>( &'a self, _a: &'a GpuTensorHandle, _columns: &'a [SortRowsColumnSpec], _comparison: SortComparison, ) -> AccelProviderFuture<'a, SortResult> { ... } fn matmul<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn syrk(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn pagefun(&self, _request: &PagefunRequest) -> Result<GpuTensorHandle> { ... } fn matmul_epilogue<'a>( &'a self, a: &'a GpuTensorHandle, b: &'a GpuTensorHandle, epilogue: &'a MatmulEpilogue, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn image_normalize<'a>( &'a self, _input: &'a GpuTensorHandle, _desc: &'a ImageNormalizeDescriptor, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn matmul_power_step<'a>( &'a self, _lhs: &'a GpuTensorHandle, _rhs: &'a GpuTensorHandle, _epilogue: &'a PowerStepEpilogue, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn linsolve<'a>( &'a self, _lhs: &'a GpuTensorHandle, _rhs: &'a GpuTensorHandle, _options: &'a ProviderLinsolveOptions, ) -> AccelProviderFuture<'a, ProviderLinsolveResult> { ... } fn inv<'a>( &'a self, _matrix: &'a GpuTensorHandle, _options: ProviderInvOptions, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn pinv<'a>( &'a self, _matrix: &'a GpuTensorHandle, _options: ProviderPinvOptions, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn cond<'a>( &'a self, _matrix: &'a GpuTensorHandle, _norm: ProviderCondNorm, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn norm<'a>( &'a self, _tensor: &'a GpuTensorHandle, _order: ProviderNormOrder, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn rank<'a>( &'a self, _matrix: &'a GpuTensorHandle, _tolerance: Option<f64>, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn rcond<'a>( &'a self, _matrix: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn mldivide<'a>( &'a self, _lhs: &'a GpuTensorHandle, _rhs: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn mrdivide<'a>( &'a self, _lhs: &'a GpuTensorHandle, _rhs: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn eig<'a>( &'a self, _a: &'a GpuTensorHandle, _compute_left: bool, ) -> AccelProviderFuture<'a, ProviderEigResult> { ... } fn lu<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, ProviderLuResult> { ... } fn chol<'a>( &'a self, _a: &'a GpuTensorHandle, _lower: bool, ) -> AccelProviderFuture<'a, ProviderCholResult> { ... } fn qr<'a>( &'a self, _a: &'a GpuTensorHandle, _options: ProviderQrOptions, ) -> AccelProviderFuture<'a, ProviderQrResult> { ... } fn take_matmul_sources( &self, _product: &GpuTensorHandle, ) -> Option<(GpuTensorHandle, GpuTensorHandle)> { ... } fn qr_power_iter<'a>( &'a self, product: &'a GpuTensorHandle, _product_lhs: Option<&'a GpuTensorHandle>, q_handle: &'a GpuTensorHandle, options: &'a ProviderQrOptions, ) -> AccelProviderFuture<'a, Option<ProviderQrPowerIterResult>> { ... } fn transpose(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn conv1d( &self, _signal: &GpuTensorHandle, _kernel: &GpuTensorHandle, _options: ProviderConv1dOptions, ) -> Result<GpuTensorHandle> { ... } fn conv2d( &self, _signal: &GpuTensorHandle, _kernel: &GpuTensorHandle, _mode: ProviderConvMode, ) -> Result<GpuTensorHandle> { ... } fn iir_filter<'a>( &'a self, _b: &'a GpuTensorHandle, _a: &'a GpuTensorHandle, _x: &'a GpuTensorHandle, _options: ProviderIirFilterOptions, ) -> AccelProviderFuture<'a, ProviderIirFilterResult> { ... } fn permute( &self, _handle: &GpuTensorHandle, _order: &[usize], ) -> Result<GpuTensorHandle> { ... } fn flip( &self, _handle: &GpuTensorHandle, _axes: &[usize], ) -> Result<GpuTensorHandle> { ... } fn circshift( &self, _handle: &GpuTensorHandle, _shifts: &[isize], ) -> Result<GpuTensorHandle> { ... } fn diff_dim( &self, _handle: &GpuTensorHandle, _order: usize, _dim: usize, ) -> Result<GpuTensorHandle> { ... } fn fft_dim<'a>( &'a self, _handle: &'a GpuTensorHandle, _len: Option<usize>, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn ifft_dim<'a>( &'a self, _handle: &'a GpuTensorHandle, _len: Option<usize>, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn unique<'a>( &'a self, _handle: &'a GpuTensorHandle, _options: &'a UniqueOptions, ) -> AccelProviderFuture<'a, UniqueResult> { ... } fn union<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, _options: &'a UnionOptions, ) -> AccelProviderFuture<'a, UnionResult> { ... } fn setdiff<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, _options: &'a SetdiffOptions, ) -> AccelProviderFuture<'a, SetdiffResult> { ... } fn ismember<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, _options: &'a IsMemberOptions, ) -> AccelProviderFuture<'a, IsMemberResult> { ... } fn reshape( &self, handle: &GpuTensorHandle, new_shape: &[usize], ) -> Result<GpuTensorHandle> { ... } fn cat( &self, _dim: usize, _inputs: &[GpuTensorHandle], ) -> Result<GpuTensorHandle> { ... } fn repmat( &self, _handle: &GpuTensorHandle, _reps: &[usize], ) -> Result<GpuTensorHandle> { ... } fn kron( &self, _a: &GpuTensorHandle, _b: &GpuTensorHandle, ) -> Result<GpuTensorHandle> { ... } fn reduce_sum<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_sum_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn dot<'a>( &'a self, _lhs: &'a GpuTensorHandle, _rhs: &'a GpuTensorHandle, _dim: Option<usize>, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_nnz<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_nnz_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_prod<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_prod_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_mean<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_mean_nd<'a>( &'a self, _a: &'a GpuTensorHandle, _dims_zero_based: &'a [usize], ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_moments_nd<'a>( &'a self, _a: &'a GpuTensorHandle, _dims_zero_based: &'a [usize], ) -> AccelProviderFuture<'a, ProviderMoments2> { ... } fn reduce_mean_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_std<'a>( &'a self, _a: &'a GpuTensorHandle, _normalization: ProviderStdNormalization, _nan_mode: ProviderNanMode, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_std_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, _normalization: ProviderStdNormalization, _nan_mode: ProviderNanMode, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_any<'a>( &'a self, _a: &'a GpuTensorHandle, _omit_nan: bool, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_any_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, _omit_nan: bool, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_all<'a>( &'a self, _a: &'a GpuTensorHandle, _omit_nan: bool, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_all_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, _omit_nan: bool, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_median<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_median_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_min<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_min_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, ReduceDimResult> { ... } fn reduce_max<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle> { ... } fn reduce_max_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, ReduceDimResult> { ... } fn cumsum_scan( &self, _input: &GpuTensorHandle, _dim: usize, _direction: ProviderScanDirection, _nan_mode: ProviderNanMode, ) -> Result<GpuTensorHandle> { ... } fn cumprod_scan( &self, _input: &GpuTensorHandle, _dim: usize, _direction: ProviderScanDirection, _nan_mode: ProviderNanMode, ) -> Result<GpuTensorHandle> { ... } fn cummin_scan( &self, _input: &GpuTensorHandle, _dim: usize, _direction: ProviderScanDirection, _nan_mode: ProviderNanMode, ) -> Result<ProviderCumminResult> { ... } fn cummax_scan( &self, _input: &GpuTensorHandle, _dim: usize, _direction: ProviderScanDirection, _nan_mode: ProviderNanMode, ) -> Result<ProviderCummaxResult> { ... } fn find( &self, _a: &GpuTensorHandle, _limit: Option<usize>, _direction: FindDirection, ) -> Result<ProviderFindResult> { ... } fn fused_elementwise( &self, _shader: &str, _inputs: &[GpuTensorHandle], _output_shape: &[usize], _len: usize, ) -> Result<GpuTensorHandle> { ... } fn map_nan_to_zero(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn not_nan_mask(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle> { ... } fn fused_reduction( &self, _shader: &str, _inputs: &[GpuTensorHandle], _output_shape: &[usize], _reduce_len: usize, _num_slices: usize, _workgroup_size: u32, _flavor: ReductionFlavor, ) -> Result<GpuTensorHandle> { ... } fn warmup(&self) { ... } fn fused_cache_counters(&self) -> (u64, u64) { ... } fn last_warmup_millis(&self) -> Option<u64> { ... } fn telemetry_snapshot(&self) -> ProviderTelemetry { ... } fn reset_telemetry(&self) { ... } fn default_reduction_workgroup_size(&self) -> u32 { ... } fn two_pass_threshold(&self) -> usize { ... } fn reduction_two_pass_mode(&self) -> ReductionTwoPassMode { ... } fn scatter_column( &self, _matrix: &GpuTensorHandle, _col_index: usize, _values: &GpuTensorHandle, ) -> Result<GpuTensorHandle> { ... } fn scatter_row( &self, _matrix: &GpuTensorHandle, _row_index: usize, _values: &GpuTensorHandle, ) -> Result<GpuTensorHandle> { ... } fn sub2ind( &self, _dims: &[usize], _strides: &[usize], _inputs: &[&GpuTensorHandle], _scalar_mask: &[bool], _len: usize, _output_shape: &[usize], ) -> Result<GpuTensorHandle> { ... } fn supports_ind2sub(&self) -> bool { ... } fn ind2sub( &self, _dims: &[usize], _strides: &[usize], _indices: &GpuTensorHandle, _total: usize, _len: usize, _output_shape: &[usize], ) -> Result<Vec<GpuTensorHandle>> { ... } fn issymmetric( &self, _matrix: &GpuTensorHandle, _kind: ProviderSymmetryKind, _tolerance: f64, ) -> Result<bool> { ... } fn ishermitian<'a>( &'a self, _matrix: &'a GpuTensorHandle, _kind: ProviderHermitianKind, _tolerance: f64, ) -> AccelProviderFuture<'a, bool> { ... } fn bandwidth(&self, _matrix: &GpuTensorHandle) -> Result<ProviderBandwidth> { ... } fn sym_rcm<'a>( &'a self, _matrix: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, Vec<usize>> { ... }
}
Expand description

Device/provider interface that backends implement and register into the runtime layer

Required Methodsยง

Provided Methodsยง

Source

fn device_id(&self) -> u32

Source

fn export_context(&self, _kind: AccelContextKind) -> Option<AccelContextHandle>

Export a shared GPU context handle, allowing downstream systems (plotting, visualization) to reuse the same device/queue without copying tensor data back to the host.

Source

fn gather_linear( &self, _source: &GpuTensorHandle, _indices: &[u32], _output_shape: &[usize], ) -> Result<GpuTensorHandle>

Gather elements from source at the provided zero-based linear indices, materialising a dense tensor with the specified output_shape.

Source

fn scatter_linear( &self, _target: &GpuTensorHandle, _indices: &[u32], _values: &GpuTensorHandle, ) -> Result<()>

Scatter the contents of values into target at the provided zero-based linear indices.

The provider must ensure values.len() == indices.len() and update target in place.

Source

fn device_info_struct(&self) -> ApiDeviceInfo

Structured device information (optional to override). Default adapts from device_info().

Source

fn precision(&self) -> ProviderPrecision

Source

fn read_scalar(&self, _h: &GpuTensorHandle, _linear_index: usize) -> Result<f64>

Read a single scalar at linear index from a device tensor, returning it as f64.

Source

fn zeros(&self, _shape: &[usize]) -> Result<GpuTensorHandle>

Allocate a zero-initialised tensor with the provided shape on the device.

Source

fn ones(&self, _shape: &[usize]) -> Result<GpuTensorHandle>

Allocate a one-initialised tensor with the provided shape on the device.

Source

fn zeros_like(&self, prototype: &GpuTensorHandle) -> Result<GpuTensorHandle>

Allocate a zero-initialised tensor matching the prototype tensor.

Source

fn fill(&self, shape: &[usize], value: f64) -> Result<GpuTensorHandle>

Allocate a tensor filled with a constant value on the device.

Source

fn fill_like( &self, prototype: &GpuTensorHandle, value: f64, ) -> Result<GpuTensorHandle>

Allocate a tensor filled with a constant value, matching a prototypeโ€™s residency.

Source

fn ones_like(&self, prototype: &GpuTensorHandle) -> Result<GpuTensorHandle>

Allocate a one-initialised tensor matching the prototype tensor.

Source

fn eye(&self, _shape: &[usize]) -> Result<GpuTensorHandle>

Allocate an identity tensor with ones along the leading diagonal of the first two axes.

Source

fn eye_like(&self, prototype: &GpuTensorHandle) -> Result<GpuTensorHandle>

Allocate an identity tensor matching the prototype tensorโ€™s shape.

Source

fn meshgrid( &self, _axes: &[MeshgridAxisView<'_>], ) -> Result<ProviderMeshgridResult>

Construct MATLAB-style coordinate grids from axis vectors.

Source

fn diag_from_vector( &self, _vector: &GpuTensorHandle, _offset: isize, ) -> Result<GpuTensorHandle>

Construct a diagonal matrix from a vector-like tensor. offset matches MATLAB semantics.

Source

fn diag_extract( &self, _matrix: &GpuTensorHandle, _offset: isize, ) -> Result<GpuTensorHandle>

Extract a diagonal from a matrix-like tensor. The result is always a column vector.

Source

fn tril<'a>( &'a self, _matrix: &'a GpuTensorHandle, _offset: isize, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Apply a lower-triangular mask to the first two dimensions of a tensor.

Source

fn triu<'a>( &'a self, _matrix: &'a GpuTensorHandle, _offset: isize, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Apply an upper-triangular mask to the first two dimensions of a tensor.

Source

fn polyval( &self, _coefficients: &GpuTensorHandle, _points: &GpuTensorHandle, _options: &ProviderPolyvalOptions, ) -> Result<GpuTensorHandle>

Evaluate a polynomial expressed by coefficients at each element in points.

Source

fn polyfit<'a>( &'a self, _x: &'a GpuTensorHandle, _y: &'a GpuTensorHandle, _degree: usize, _weights: Option<&'a GpuTensorHandle>, ) -> AccelProviderFuture<'a, ProviderPolyfitResult>

Fit a polynomial of degree degree to (x, y) samples. Optional weights must match x.

Source

fn polyder_single<'a>( &'a self, _polynomial: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Differentiate a polynomial represented as a vector of coefficients.

Source

fn polyder_product<'a>( &'a self, _p: &'a GpuTensorHandle, _q: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Apply the product rule to polynomials p and q.

Source

fn polyder_quotient<'a>( &'a self, _u: &'a GpuTensorHandle, _v: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, ProviderPolyderQuotient>

Apply the quotient rule to polynomials u and v.

Source

fn polyint( &self, _polynomial: &GpuTensorHandle, _constant: f64, ) -> Result<GpuTensorHandle>

Integrate a polynomial represented as a vector of coefficients and append a constant term.

Source

fn random_uniform(&self, _shape: &[usize]) -> Result<GpuTensorHandle>

Allocate a tensor filled with random values drawn from U(0, 1).

Source

fn random_uniform_like( &self, prototype: &GpuTensorHandle, ) -> Result<GpuTensorHandle>

Allocate a tensor filled with random values matching the prototype shape.

Source

fn random_normal(&self, _shape: &[usize]) -> Result<GpuTensorHandle>

Allocate a tensor filled with standard normal (mean 0, stddev 1) random values.

Source

fn random_normal_like( &self, prototype: &GpuTensorHandle, ) -> Result<GpuTensorHandle>

Allocate a tensor of standard normal values matching a prototypeโ€™s shape.

Source

fn stochastic_evolution( &self, _state: &GpuTensorHandle, _drift: f64, _scale: f64, _steps: u32, ) -> Result<GpuTensorHandle>

Source

fn set_rng_state(&self, _state: u64) -> Result<()>

Set the provider RNG state to align with the host RNG.

Source

fn fspecial(&self, _request: &FspecialRequest) -> Result<GpuTensorHandle>

Generate a 2-D correlation kernel matching MATLABโ€™s fspecial builtin.

Source

fn imfilter<'a>( &'a self, _image: &'a GpuTensorHandle, _kernel: &'a GpuTensorHandle, _options: &'a ImfilterOptions, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Apply an N-D correlation/convolution with padding semantics matching MATLABโ€™s imfilter.

Source

fn random_integer_range( &self, _lower: i64, _upper: i64, _shape: &[usize], ) -> Result<GpuTensorHandle>

Allocate a tensor filled with random integers over an inclusive range.

Source

fn random_integer_like( &self, prototype: &GpuTensorHandle, lower: i64, upper: i64, ) -> Result<GpuTensorHandle>

Allocate a random integer tensor matching the prototype shape.

Source

fn random_permutation(&self, _n: usize, _k: usize) -> Result<GpuTensorHandle>

Allocate a random permutation of 1..=n, returning the first k elements.

Source

fn random_permutation_like( &self, _prototype: &GpuTensorHandle, n: usize, k: usize, ) -> Result<GpuTensorHandle>

Allocate a random permutation matching the prototype residency.

Source

fn covariance<'a>( &'a self, _matrix: &'a GpuTensorHandle, _second: Option<&'a GpuTensorHandle>, _weights: Option<&'a GpuTensorHandle>, _options: &'a CovarianceOptions, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Compute a covariance matrix across the columns of matrix.

Source

fn corrcoef<'a>( &'a self, _matrix: &'a GpuTensorHandle, _options: &'a CorrcoefOptions, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Compute a correlation coefficient matrix across the columns of matrix.

Source

fn linspace( &self, _start: f64, _stop: f64, _count: usize, ) -> Result<GpuTensorHandle>

Source

fn elem_add<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_mul<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_max<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_min<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_sub<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_div<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_pow<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_hypot<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_ge<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_le<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_lt<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_gt<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_eq<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn elem_ne<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn logical_and( &self, _a: &GpuTensorHandle, _b: &GpuTensorHandle, ) -> Result<GpuTensorHandle>

Source

fn logical_or( &self, _a: &GpuTensorHandle, _b: &GpuTensorHandle, ) -> Result<GpuTensorHandle>

Source

fn logical_xor( &self, _a: &GpuTensorHandle, _b: &GpuTensorHandle, ) -> Result<GpuTensorHandle>

Source

fn logical_not(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle>

Source

fn logical_islogical(&self, a: &GpuTensorHandle) -> Result<bool>

Source

fn logical_isreal(&self, _a: &GpuTensorHandle) -> Result<bool>

Source

fn logical_isfinite(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle>

Source

fn logical_isnan(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle>

Source

fn logical_isinf(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle>

Source

fn elem_atan2<'a>( &'a self, _y: &'a GpuTensorHandle, _x: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_sin<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_gamma<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_factorial<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_asinh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_sinh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_cosh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_asin<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_acos<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_acosh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_tan<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_tanh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_atan<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_atanh<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_ceil<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_floor<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_round<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_fix<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_cos<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_angle<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_imag<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_real<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_conj<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_abs<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_sign<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_exp<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_expm1<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_log<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_log2<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_log10<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_log1p<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_sqrt<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_double<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_single<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unary_pow2<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn pow2_scale( &self, _mantissa: &GpuTensorHandle, _exponent: &GpuTensorHandle, ) -> Result<GpuTensorHandle>

Source

fn scalar_rsub( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle>

Source

fn scalar_rdiv( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle>

Source

fn scalar_add( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle>

Source

fn scalar_sub( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle>

Source

fn scalar_mul( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle>

Source

fn scalar_max( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle>

Source

fn scalar_min( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle>

Source

fn scalar_div( &self, _a: &GpuTensorHandle, _scalar: f64, ) -> Result<GpuTensorHandle>

Source

fn sort_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, _order: SortOrder, _comparison: SortComparison, ) -> AccelProviderFuture<'a, SortResult>

Source

fn sort_rows<'a>( &'a self, _a: &'a GpuTensorHandle, _columns: &'a [SortRowsColumnSpec], _comparison: SortComparison, ) -> AccelProviderFuture<'a, SortResult>

Source

fn matmul<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn syrk(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle>

Source

fn pagefun(&self, _request: &PagefunRequest) -> Result<GpuTensorHandle>

Source

fn matmul_epilogue<'a>( &'a self, a: &'a GpuTensorHandle, b: &'a GpuTensorHandle, epilogue: &'a MatmulEpilogue, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Optional: matrix multiplication with an epilogue applied before store.

The default implementation falls back to matmul when the epilogue is effectively a no-op (alpha=1, beta=0, no row/col scales), and otherwise returns Err.

Source

fn image_normalize<'a>( &'a self, _input: &'a GpuTensorHandle, _desc: &'a ImageNormalizeDescriptor, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn matmul_power_step<'a>( &'a self, _lhs: &'a GpuTensorHandle, _rhs: &'a GpuTensorHandle, _epilogue: &'a PowerStepEpilogue, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn linsolve<'a>( &'a self, _lhs: &'a GpuTensorHandle, _rhs: &'a GpuTensorHandle, _options: &'a ProviderLinsolveOptions, ) -> AccelProviderFuture<'a, ProviderLinsolveResult>

Source

fn inv<'a>( &'a self, _matrix: &'a GpuTensorHandle, _options: ProviderInvOptions, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn pinv<'a>( &'a self, _matrix: &'a GpuTensorHandle, _options: ProviderPinvOptions, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn cond<'a>( &'a self, _matrix: &'a GpuTensorHandle, _norm: ProviderCondNorm, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn norm<'a>( &'a self, _tensor: &'a GpuTensorHandle, _order: ProviderNormOrder, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn rank<'a>( &'a self, _matrix: &'a GpuTensorHandle, _tolerance: Option<f64>, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn rcond<'a>( &'a self, _matrix: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn mldivide<'a>( &'a self, _lhs: &'a GpuTensorHandle, _rhs: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn mrdivide<'a>( &'a self, _lhs: &'a GpuTensorHandle, _rhs: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn eig<'a>( &'a self, _a: &'a GpuTensorHandle, _compute_left: bool, ) -> AccelProviderFuture<'a, ProviderEigResult>

Source

fn lu<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, ProviderLuResult>

Source

fn chol<'a>( &'a self, _a: &'a GpuTensorHandle, _lower: bool, ) -> AccelProviderFuture<'a, ProviderCholResult>

Source

fn qr<'a>( &'a self, _a: &'a GpuTensorHandle, _options: ProviderQrOptions, ) -> AccelProviderFuture<'a, ProviderQrResult>

Source

fn take_matmul_sources( &self, _product: &GpuTensorHandle, ) -> Option<(GpuTensorHandle, GpuTensorHandle)>

Source

fn qr_power_iter<'a>( &'a self, product: &'a GpuTensorHandle, _product_lhs: Option<&'a GpuTensorHandle>, q_handle: &'a GpuTensorHandle, options: &'a ProviderQrOptions, ) -> AccelProviderFuture<'a, Option<ProviderQrPowerIterResult>>

Source

fn transpose(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle>

Source

fn conv1d( &self, _signal: &GpuTensorHandle, _kernel: &GpuTensorHandle, _options: ProviderConv1dOptions, ) -> Result<GpuTensorHandle>

Source

fn conv2d( &self, _signal: &GpuTensorHandle, _kernel: &GpuTensorHandle, _mode: ProviderConvMode, ) -> Result<GpuTensorHandle>

Source

fn iir_filter<'a>( &'a self, _b: &'a GpuTensorHandle, _a: &'a GpuTensorHandle, _x: &'a GpuTensorHandle, _options: ProviderIirFilterOptions, ) -> AccelProviderFuture<'a, ProviderIirFilterResult>

Source

fn permute( &self, _handle: &GpuTensorHandle, _order: &[usize], ) -> Result<GpuTensorHandle>

Reorder tensor dimensions according to order, expressed as zero-based indices.

Source

fn flip( &self, _handle: &GpuTensorHandle, _axes: &[usize], ) -> Result<GpuTensorHandle>

Source

fn circshift( &self, _handle: &GpuTensorHandle, _shifts: &[isize], ) -> Result<GpuTensorHandle>

Source

fn diff_dim( &self, _handle: &GpuTensorHandle, _order: usize, _dim: usize, ) -> Result<GpuTensorHandle>

Source

fn fft_dim<'a>( &'a self, _handle: &'a GpuTensorHandle, _len: Option<usize>, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Perform an in-place FFT along a zero-based dimension, optionally padding/truncating to len.

Source

fn ifft_dim<'a>( &'a self, _handle: &'a GpuTensorHandle, _len: Option<usize>, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn unique<'a>( &'a self, _handle: &'a GpuTensorHandle, _options: &'a UniqueOptions, ) -> AccelProviderFuture<'a, UniqueResult>

Source

fn union<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, _options: &'a UnionOptions, ) -> AccelProviderFuture<'a, UnionResult>

Source

fn setdiff<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, _options: &'a SetdiffOptions, ) -> AccelProviderFuture<'a, SetdiffResult>

Source

fn ismember<'a>( &'a self, _a: &'a GpuTensorHandle, _b: &'a GpuTensorHandle, _options: &'a IsMemberOptions, ) -> AccelProviderFuture<'a, IsMemberResult>

Source

fn reshape( &self, handle: &GpuTensorHandle, new_shape: &[usize], ) -> Result<GpuTensorHandle>

Source

fn cat( &self, _dim: usize, _inputs: &[GpuTensorHandle], ) -> Result<GpuTensorHandle>

Concatenate the provided tensors along the 1-based dimension dim.

Source

fn repmat( &self, _handle: &GpuTensorHandle, _reps: &[usize], ) -> Result<GpuTensorHandle>

Source

fn kron( &self, _a: &GpuTensorHandle, _b: &GpuTensorHandle, ) -> Result<GpuTensorHandle>

Compute the Kronecker product of two tensors, matching MATLAB semantics.

Source

fn reduce_sum<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_sum_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn dot<'a>( &'a self, _lhs: &'a GpuTensorHandle, _rhs: &'a GpuTensorHandle, _dim: Option<usize>, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_nnz<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_nnz_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_prod<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_prod_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_mean<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_mean_nd<'a>( &'a self, _a: &'a GpuTensorHandle, _dims_zero_based: &'a [usize], ) -> AccelProviderFuture<'a, GpuTensorHandle>

Reduce mean across multiple zero-based dimensions in one device pass.

Source

fn reduce_moments_nd<'a>( &'a self, _a: &'a GpuTensorHandle, _dims_zero_based: &'a [usize], ) -> AccelProviderFuture<'a, ProviderMoments2>

Reduce moments across multiple zero-based dimensions in one device pass. Returns mean (E[x]) and mean of squares (E[x^2]).

Source

fn reduce_mean_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_std<'a>( &'a self, _a: &'a GpuTensorHandle, _normalization: ProviderStdNormalization, _nan_mode: ProviderNanMode, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_std_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, _normalization: ProviderStdNormalization, _nan_mode: ProviderNanMode, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_any<'a>( &'a self, _a: &'a GpuTensorHandle, _omit_nan: bool, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_any_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, _omit_nan: bool, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_all<'a>( &'a self, _a: &'a GpuTensorHandle, _omit_nan: bool, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_all_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, _omit_nan: bool, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_median<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_median_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_min<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_min_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, ReduceDimResult>

Source

fn reduce_max<'a>( &'a self, _a: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, GpuTensorHandle>

Source

fn reduce_max_dim<'a>( &'a self, _a: &'a GpuTensorHandle, _dim: usize, ) -> AccelProviderFuture<'a, ReduceDimResult>

Source

fn cumsum_scan( &self, _input: &GpuTensorHandle, _dim: usize, _direction: ProviderScanDirection, _nan_mode: ProviderNanMode, ) -> Result<GpuTensorHandle>

Source

fn cumprod_scan( &self, _input: &GpuTensorHandle, _dim: usize, _direction: ProviderScanDirection, _nan_mode: ProviderNanMode, ) -> Result<GpuTensorHandle>

Source

fn cummin_scan( &self, _input: &GpuTensorHandle, _dim: usize, _direction: ProviderScanDirection, _nan_mode: ProviderNanMode, ) -> Result<ProviderCumminResult>

Source

fn cummax_scan( &self, _input: &GpuTensorHandle, _dim: usize, _direction: ProviderScanDirection, _nan_mode: ProviderNanMode, ) -> Result<ProviderCummaxResult>

Source

fn find( &self, _a: &GpuTensorHandle, _limit: Option<usize>, _direction: FindDirection, ) -> Result<ProviderFindResult>

Source

fn fused_elementwise( &self, _shader: &str, _inputs: &[GpuTensorHandle], _output_shape: &[usize], _len: usize, ) -> Result<GpuTensorHandle>

Source

fn map_nan_to_zero(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle>

Build a numeric tensor where NaNs in a are replaced with 0.0 (device side).

Source

fn not_nan_mask(&self, _a: &GpuTensorHandle) -> Result<GpuTensorHandle>

Build a numeric mask tensor with 1.0 where value is not NaN and 0.0 where value is NaN.

Source

fn fused_reduction( &self, _shader: &str, _inputs: &[GpuTensorHandle], _output_shape: &[usize], _reduce_len: usize, _num_slices: usize, _workgroup_size: u32, _flavor: ReductionFlavor, ) -> Result<GpuTensorHandle>

Generic fused reduction entrypoint.

The shader is expected to implement a column-major reduction across reduce_len with num_slices independent slices (e.g., columns). Providers should create a uniform buffer compatible with the expected Params/MParams struct in the shader and dispatch num_slices workgroups with workgroup_size threads, or an equivalent strategy.

Source

fn warmup(&self)

Optionally pre-compile commonly used pipelines to amortize first-dispatch costs.

Source

fn fused_cache_counters(&self) -> (u64, u64)

Returns (cache_hits, cache_misses) for fused pipeline cache, if supported.

Source

fn last_warmup_millis(&self) -> Option<u64>

Returns the duration of the last provider warmup in milliseconds, if known.

Source

fn telemetry_snapshot(&self) -> ProviderTelemetry

Returns a snapshot of provider telemetry counters if supported.

Source

fn reset_telemetry(&self)

Reset all telemetry counters maintained by the provider, if supported.

Source

fn default_reduction_workgroup_size(&self) -> u32

Default reduction workgroup size the provider prefers.

Source

fn two_pass_threshold(&self) -> usize

Threshold above which provider will prefer two-pass reduction.

Source

fn reduction_two_pass_mode(&self) -> ReductionTwoPassMode

Current two-pass mode preference (auto/forced on/off).

Source

fn scatter_column( &self, _matrix: &GpuTensorHandle, _col_index: usize, _values: &GpuTensorHandle, ) -> Result<GpuTensorHandle>

Fast-path: write a GPU column in a matrix from a GPU vector, returning a new handle. Expected: values.shape == [rows, 1] (or [rows]) and col_index < cols.

Source

fn scatter_row( &self, _matrix: &GpuTensorHandle, _row_index: usize, _values: &GpuTensorHandle, ) -> Result<GpuTensorHandle>

Fast-path: write a GPU row in a matrix from a GPU vector, returning a new handle. Expected: values.shape == [1, cols] (or [cols]) and row_index < rows.

Source

fn sub2ind( &self, _dims: &[usize], _strides: &[usize], _inputs: &[&GpuTensorHandle], _scalar_mask: &[bool], _len: usize, _output_shape: &[usize], ) -> Result<GpuTensorHandle>

Source

fn supports_ind2sub(&self) -> bool

Returns true if the provider offers a device-side ind2sub implementation.

Source

fn ind2sub( &self, _dims: &[usize], _strides: &[usize], _indices: &GpuTensorHandle, _total: usize, _len: usize, _output_shape: &[usize], ) -> Result<Vec<GpuTensorHandle>>

Convert linear indices into per-dimension subscripts on the device.

Source

fn issymmetric( &self, _matrix: &GpuTensorHandle, _kind: ProviderSymmetryKind, _tolerance: f64, ) -> Result<bool>

Determine if a matrix is symmetric (or skew-symmetric) without gathering it to the host.

Source

fn ishermitian<'a>( &'a self, _matrix: &'a GpuTensorHandle, _kind: ProviderHermitianKind, _tolerance: f64, ) -> AccelProviderFuture<'a, bool>

Determine if a matrix is Hermitian (or skew-Hermitian) without gathering it to the host.

Source

fn bandwidth(&self, _matrix: &GpuTensorHandle) -> Result<ProviderBandwidth>

Inspect the bandwidth of a matrix without gathering it back to the host.

Source

fn sym_rcm<'a>( &'a self, _matrix: &'a GpuTensorHandle, ) -> AccelProviderFuture<'a, Vec<usize>>

Compute the symmetric reverse Cuthill-McKee permutation for the matrix.

Implementations may execute on the device or gather to the host. The permutation should be returned as zero-based indices.

Implementorsยง