Struct cudnn::API [] [src]

pub struct API;

Defines the Cuda cuDNN API.

Methods

impl API
[src]

fn activation_forward(handle: cudnnHandle_t, mode: cudnnActivationMode_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *mut c_void) -> Result<()Error>

Computes an activation forward function.

fn activation_backward(handle: cudnnHandle_t, mode: cudnnActivationMode_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, src_diff_desc: cudnnTensorDescriptor_t, src_diff_data: *const c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *const c_void, dest_diff_desc: cudnnTensorDescriptor_t, dest_diff_data: *mut c_void) -> Result<()Error>

Computes an activation backward function.

impl API
[src]

fn create_filter_descriptor() -> Result<cudnnFilterDescriptor_t, Error>

Creates a generic CUDA cuDNN Filter Descriptor.

fn destroy_filter_descriptor(desc: cudnnFilterDescriptor_t) -> Result<()Error>

Destroys a CUDA cuDNN Filter Descriptor.

Should be called when freeing a CUDA::Descriptor to not trash up the CUDA device.

fn set_filter_descriptor(desc: cudnnFilterDescriptor_t, data_type: cudnnDataType_t, nb_dims: c_int, filter_dim_a: *const c_int) -> Result<()Error>

Initializes a generic CUDA cuDNN Filter Descriptor with specific properties.

fn find_convolution_forward_algorithm(handle: cudnnHandle_t, filter_desc: cudnnFilterDescriptor_t, conv_desc: cudnnConvolutionDescriptor_t, src_desc: cudnnTensorDescriptor_t, dest_desc: cudnnTensorDescriptor_t) -> Result<Vec<cudnnConvolutionFwdAlgoPerf_t>, Error>

cuDNN Convolution Configuration

Returns the most performant convolutional forward algorithm, for the given scenario.

fn get_convolution_forward_workspace_size(handle: cudnnHandle_t, algo: cudnnConvolutionFwdAlgo_t, filter_desc: cudnnFilterDescriptor_t, conv_desc: cudnnConvolutionDescriptor_t, src_desc: cudnnTensorDescriptor_t, dest_desc: cudnnTensorDescriptor_t) -> Result<usizeError>

Returns the workspace size in byte, which are needed for the given convolutional algorithm.

fn find_convolution_backward_filter_algorithm(handle: cudnnHandle_t, filter_desc: cudnnFilterDescriptor_t, conv_desc: cudnnConvolutionDescriptor_t, src_desc: cudnnTensorDescriptor_t, dest_desc: cudnnTensorDescriptor_t) -> Result<Vec<cudnnConvolutionBwdFilterAlgoPerf_t>, Error>

Returns the most performant convolutional backward data algorithm, for the given scenario.

fn get_convolution_backward_filter_workspace_size(handle: cudnnHandle_t, algo: cudnnConvolutionBwdFilterAlgo_t, filter_desc: cudnnFilterDescriptor_t, conv_desc: cudnnConvolutionDescriptor_t, src_desc: cudnnTensorDescriptor_t, dest_desc: cudnnTensorDescriptor_t) -> Result<usizeError>

Returns the workspace size in byte, which are needed for the given convolutional algorithm.

fn find_convolution_backward_data_algorithm(handle: cudnnHandle_t, filter_desc: cudnnFilterDescriptor_t, conv_desc: cudnnConvolutionDescriptor_t, src_desc: cudnnTensorDescriptor_t, dest_desc: cudnnTensorDescriptor_t) -> Result<Vec<cudnnConvolutionBwdDataAlgoPerf_t>, Error>

Returns the most performant convolutional backward data algorithm, for the given scenario.

fn get_convolution_backward_data_workspace_size(handle: cudnnHandle_t, algo: cudnnConvolutionBwdDataAlgo_t, filter_desc: cudnnFilterDescriptor_t, conv_desc: cudnnConvolutionDescriptor_t, src_desc: cudnnTensorDescriptor_t, dest_desc: cudnnTensorDescriptor_t) -> Result<usizeError>

Returns the workspace size in byte, which are needed for the given convolutional algorithm.

fn create_convolution_descriptor() -> Result<cudnnConvolutionDescriptor_t, Error>

Creates a generic CUDA cuDNN Convolution Descriptor.

fn destroy_convolution_descriptor(desc: cudnnConvolutionDescriptor_t) -> Result<()Error>

Destroys a CUDA cuDNN Convolution Descriptor.

Should be called when freeing a CUDA::Descriptor to not trash up the CUDA device.

fn set_convolution_descriptor(desc: cudnnConvolutionDescriptor_t, data_type: cudnnDataType_t, mode: cudnnConvolutionMode_t, array_length: c_int, pad_a: *const c_int, filter_stride_a: *const c_int, upscale_a: *const c_int) -> Result<()Error>

Initializes a generic CUDA cuDNN Convolution Descriptor with specific properties.

fn convolution_forward(handle: cudnnHandle_t, algo: cudnnConvolutionFwdAlgo_t, conv_desc: cudnnConvolutionDescriptor_t, work_space: *mut c_void, work_size_in_bytes: size_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, filter_desc: cudnnFilterDescriptor_t, filter_data: *const c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *mut c_void) -> Result<()Error>

Computes a convolution forward function.

fn convolution_backward_bias(handle: cudnnHandle_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *mut c_void) -> Result<()Error>

Computes a convolution backward function w.r.t the bias.

fn convolution_backward_filter(handle: cudnnHandle_t, algo: cudnnConvolutionBwdFilterAlgo_t, conv_desc: cudnnConvolutionDescriptor_t, work_space: *mut c_void, work_size_in_bytes: size_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, diff_desc: cudnnTensorDescriptor_t, diff_data: *const c_void, beta: *const c_void, grad_desc: cudnnFilterDescriptor_t, grad_data: *mut c_void) -> Result<()Error>

Computes a convolution backward function w.r.t filter coefficient.

fn convolution_backward_data(handle: cudnnHandle_t, algo: cudnnConvolutionBwdDataAlgo_t, conv_desc: cudnnConvolutionDescriptor_t, work_space: *mut c_void, work_size_in_bytes: size_t, alpha: *const c_void, filter_desc: cudnnFilterDescriptor_t, filter_data: *const c_void, diff_desc: cudnnTensorDescriptor_t, diff_data: *const c_void, beta: *const c_void, grad_desc: cudnnTensorDescriptor_t, grad_data: *mut c_void) -> Result<()Error>

Computes a convolution backward function w.r.t the output tensor.

impl API
[src]

fn create_lrn_descriptor() -> Result<cudnnLRNDescriptor_t, Error>

Creates a generic CUDA cuDNN LRN Descriptor.

fn destroy_lrn_descriptor(desc: cudnnLRNDescriptor_t) -> Result<()Error>

Destroys a CUDA cuDNN LRN Descriptor.

Should be called when freeing a CUDA::Descriptor to not trash up the CUDA device.

fn set_lrn_descriptor(desc: cudnnLRNDescriptor_t, lrn_n: c_uint, lrn_alpha: c_double, lrn_beta: c_double, lrn_k: c_double) -> Result<()Error>

Initializes a generic CUDA cuDNN LRN Descriptor with specific properties.

fn lrn_cross_channel_forward(handle: cudnnHandle_t, norm_desc: cudnnLRNDescriptor_t, mode: cudnnLRNMode_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *mut c_void) -> Result<()Error>

Computes an LRN cross channel forward function.

fn lrn_cross_channel_backward(handle: cudnnHandle_t, norm_desc: cudnnLRNDescriptor_t, mode: cudnnDivNormMode_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, src_diff_desc: cudnnTensorDescriptor_t, src_diff_data: *const c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *const c_void, dest_diff_desc: cudnnTensorDescriptor_t, dest_diff_data: *mut c_void) -> Result<()Error>

Computes an LRN cross channel backward function.

fn divisive_normalization_forward(handle: cudnnHandle_t, norm_desc: cudnnLRNDescriptor_t, mode: cudnnDivNormMode_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, src_means_data: *const c_void, temp_data: *mut c_void, temp_data2: *mut c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *mut c_void) -> Result<()Error>

Computes an devisive normalization forward function.

fn divisive_normalization_backward(handle: cudnnHandle_t, norm_desc: cudnnLRNDescriptor_t, mode: cudnnDivNormMode_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, src_means_data: *const c_void, src_diff_data: *const c_void, temp_data: *mut c_void, temp_data2: *mut c_void, beta: *const c_void, dest_data_desc: cudnnTensorDescriptor_t, dest_data_diff: *mut c_void, dest_means_diff: *mut c_void) -> Result<()Error>

Computes an devisive normalization backward function.

impl API
[src]

fn create_pooling_descriptor() -> Result<cudnnPoolingDescriptor_t, Error>

Creates a generic CUDA cuDNN Pooling Descriptor.

fn destroy_pooling_descriptor(desc: cudnnPoolingDescriptor_t) -> Result<()Error>

Destroys a CUDA cuDNN Pooling Descriptor.

Should be called when freeing a CUDA::Descriptor to not trash up the CUDA device.

fn set_pooling_descriptor(desc: cudnnPoolingDescriptor_t, mode: cudnnPoolingMode_t, nb_dims: c_int, window: *const c_int, padding: *const c_int, stride: *const c_int) -> Result<()Error>

Initializes a generic CUDA cuDNN Pooling Descriptor with specific properties.

fn get_pooling_descriptor(desc: cudnnPoolingDescriptor_t, nb_dims_requested: c_int, mode: *mut cudnnPoolingMode_t, nb_dims: *mut c_int, window: *mut c_int, padding: *mut c_int, stride: *mut c_int) -> Result<()Error>

Return information about a generic CUDA cuDNN Pooling Descriptor.

fn set_pooling_2d_descriptor(desc: cudnnPoolingDescriptor_t, mode: cudnnPoolingMode_t, window_height: c_int, window_width: c_int, vertical_padding: c_int, horizontal_padding: c_int, vertical_stride: c_int, horizontal_stride: c_int) -> Result<()Error>

Initializes a generic CUDA cuDNN Pooling Descriptor with specific properties.

fn get_pooling_2d_descriptor(desc: cudnnPoolingDescriptor_t, mode: *mut cudnnPoolingMode_t, window_height: *mut c_int, window_width: *mut c_int, vertical_padding: *mut c_int, horizontal_padding: *mut c_int, vertical_stride: *mut c_int, horizontal_stride: *mut c_int) -> Result<()Error>

Return information about a generic CUDA cuDNN Pooling Descriptor.

fn get_pooling_forward_output_dim(pooling_desc: cudnnPoolingDescriptor_t, input_desc: cudnnTensorDescriptor_t, nb_dims: c_int, out_dim_a: *mut c_int) -> Result<()Error>

Initializes a generic CUDA cuDNN Pooling Descriptor with specific properties.

fn pooling_forward(handle: cudnnHandle_t, pooling_desc: cudnnPoolingDescriptor_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *mut c_void) -> Result<()Error>

Computes a pooling forward function.

fn pooling_backward(handle: cudnnHandle_t, pooling_desc: cudnnPoolingDescriptor_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, src_diff_desc: cudnnTensorDescriptor_t, src_diff_data: *const c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *const c_void, dest_diff_desc: cudnnTensorDescriptor_t, dest_diff_data: *mut c_void) -> Result<()Error>

Computes a pooling backward function.

impl API
[src]

fn softmax_forward(handle: cudnnHandle_t, algorithm: cudnnSoftmaxAlgorithm_t, mode: cudnnSoftmaxMode_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *mut c_void) -> Result<()Error>

Computes an softmax forward function.

fn softmax_backward(handle: cudnnHandle_t, algorithm: cudnnSoftmaxAlgorithm_t, mode: cudnnSoftmaxMode_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, src_diff_desc: cudnnTensorDescriptor_t, src_diff_data: *const c_void, beta: *const c_void, dest_diff_desc: cudnnTensorDescriptor_t, dest_diff_data: *mut c_void) -> Result<()Error>

Computes an softmax backward function.

impl API
[src]

fn create_tensor_descriptor() -> Result<cudnnTensorDescriptor_t, Error>

Creates a generic CUDA cuDNN Tensor Descriptor.

fn destroy_tensor_descriptor(tensor_desc: cudnnTensorDescriptor_t) -> Result<()Error>

Destroys a CUDA cuDNN Tensor Descriptor.

Should be called when freeing a CUDA::Descriptor to not trash up the CUDA device.

fn set_tensor_descriptor(tensor_desc: cudnnTensorDescriptor_t, data_type: cudnnDataType_t, nb_dims: c_int, dim_a: *const c_int, stride_a: *const c_int) -> Result<()Error>

Initializes a generic CUDA cuDNN Tensor Descriptor with specific properties.

fn get_tensor_descriptor(tensor_desc: cudnnTensorDescriptor_t, nb_dims_requested: c_int, data_type: *mut cudnnDataType_t, nb_dims: *mut c_int, dim_a: *mut c_int, stride_a: *mut c_int) -> Result<()Error>

Returns informations about a generic CUDA cuDNN Tensor Descriptor.

fn transform_tensor(handle: cudnnHandle_t, alpha: *const c_void, src_desc: cudnnTensorDescriptor_t, src_data: *const c_void, beta: *const c_void, dest_desc: cudnnTensorDescriptor_t, dest_data: *mut c_void) -> Result<()Error>

Transforms a CUDA cuDNN Tensor from to another Tensor with a different layout.

This function copies the scaled data from one tensor to another tensor with a different layout. Those descriptors need to have the same dimensions but not necessarily the same strides. The input and output tensors must not overlap in any way (i.e., tensors cannot be transformed in place). This function can be used to convert a tensor with an unsupported format to a supported one.

fn add_tensor(handle: cudnnHandle_t, alpha: *const c_void, bias_desc: cudnnTensorDescriptor_t, bias_data: *const c_void, beta: *const c_void, src_dest_desc: cudnnTensorDescriptor_t, src_dest_data: *mut c_void) -> Result<()Error>

Adds the scaled values from one a CUDA cuDNN Tensor to another.

Up to dimension 5, all tensor formats are supported. Beyond those dimensions, this routine is not supported.

This function adds the scaled values of one bias tensor to another tensor. Each dimension of the bias tensor must match the coresponding dimension of the src_dest tensor or must be equal to 1. In the latter case, the same value from the bias tensor for thoses dimensions will be used to blend into the src_dest tensor.

fn set_tensor(handle: cudnnHandle_t, src_dest_desc: cudnnTensorDescriptor_t, src_dest_data: *mut c_void, value: *const c_void) -> Result<()Error>

Sets all elements of a tensor to a given value.

fn scale_tensor(handle: cudnnHandle_t, src_dest_desc: cudnnTensorDescriptor_t, src_dest_data: *mut c_void, alpha: *const c_void) -> Result<()Error>

Scales all elements of a tensor by a given factor.

impl API
[src]

fn init() -> Result<cudnnHandle_t, Error>

Initialize the CUDA cuDNN API with needed context and resources.

The returned handle must be provided to future CUDA cuDNN API calls. Call this method outside of performance critical routines.

fn destroy(handle: cudnnHandle_t) -> Result<()Error>

Destroys the CUDA cuDNN context and resources associated with the handle.

Frees up resources and will call cudaDeviceSynchronize internaly. Therefore, use this method outside of performance critical routines.

fn get_version() -> usize

Returns the version of the CUDA cuDNN API.

Trait Implementations

impl Clone for API
[src]

fn clone(&self) -> API

Returns a copy of the value. Read more

fn clone_from(&mut self, source: &Self)
1.0.0

Performs copy-assignment from source. Read more

impl Copy for API
[src]

impl Debug for API
[src]

fn fmt(&self, __arg_0: &mut Formatter) -> Result

Formats the value using the given formatter.