var searchIndex = {}; searchIndex["collenchyma_nn"] = {"doc":"Provides a [Collenchyma][collenchyma] Plugin, to extend Collenchyma with Neural Network related\noperations such as convolutions, pooling, ReLU, etc. A full list of operations provided by this Plugin,\ncan be found at the [provided Operations section](#operations).","items":[[4,"ConvForwardAlgo","collenchyma_nn","Different algorithms to compute the convolution forward algorithm.",null,null],[13,"Auto","","Attempt to automatically find the best algorithm of all the other available ones.",0,null],[13,"GEMM","","Compute the convolution as explicit matrix product.",0,null],[13,"ImplicitGEMM","","Compute the convolution as matrix product without forming the matrix that holds the input data.",0,null],[13,"ImplicitPrecompiledGEMM","","Similar to `ImplicitGEMM` but needs some workspace to precompile the implicit indices.",0,null],[13,"FFT","","Compute the convolution as Fast-Fourier Transform.",0,null],[13,"FFTTiling","","Compute the convolution as Fast-Fourier Transform with 32x32 tiles.",0,null],[13,"Direct","","Compute the convolution without implicit or explicit matrix-multiplication. **Do not try to use this**.",0,null],[4,"ConvBackwardFilterAlgo","","Different algorithms to compute the gradient with respect to the filter.",null,null],[13,"Auto","","Attempt to automatically find the best algorithm of all the other available ones.",1,null],[13,"ImplicitGEMM","","Compute the convolution as matrix product without forming the matrix that holds the input data.",1,null],[13,"ImplicitGEMMSum","","Compute the convolution as sum of matrix product without forming the matrix that holds the input data.",1,null],[13,"ImplicitPrecompiledGEMMSum","","Similar to `ImplicitGEMMSum` but needs some workspace to precompile the implicit indices.",1,null],[13,"FFT","","Compute the convolution as Fast-Fourier Transform.",1,null],[4,"ConvBackwardDataAlgo","","Different algorithms to compute the gradient with respect to the filter.",null,null],[13,"Auto","","Attempt to automatically find the best algorithm of all the other available ones.",2,null],[13,"ImplicitGEMM","","Compute the convolution as matrix product without forming the matrix that holds the input data.",2,null],[13,"ImplicitGEMMSum","","Compute the convolution as sum of matrix product without forming the matrix that holds the input data.",2,null],[13,"FFT","","Compute the convolution as Fast-Fourier Transform.",2,null],[13,"FFTTiling","","Compute the convolution as Fast-Fourier Transform with 32x32 tiles.",2,null],[11,"clone","","",0,null],[11,"fmt","","",0,null],[11,"is_auto","","Check if algorithim should be chosen automatically.",0,null],[11,"clone","","",1,null],[11,"fmt","","",1,null],[11,"is_auto","","Check if algorithim should be chosen automatically.",1,null],[11,"clone","","",2,null],[11,"fmt","","",2,null],[11,"is_auto","","Check if algorithim should be chosen automatically.",2,null],[0,"frameworks","","Provides the specific Framework implementations for the Library Operations.",null,null],[0,"native","collenchyma_nn::frameworks","Provides NN for a Native backend.",null,null],[0,"helper","collenchyma_nn::frameworks::native","Provides useful macros for easier NN implementation for native.",null,null],[3,"ConvolutionConfig","collenchyma_nn::frameworks::native::helper","",null,null],[3,"NormalizationConfig","","",null,null],[3,"PoolingConfig","","",null,null],[5,"write_to_memory","","Just a helper function until SharedTensor has a nice interface for writing data",null,{"inputs":[{"name":"memorytype"},{"name":"t"}],"output":null}],[5,"sigmoid","","Computes the Sigmoid Function on the CPU",null,{"inputs":[{"name":"t"}],"output":{"name":"t"}}],[5,"sigmoid_grad","","Computes the Sigmoid Gradient on the CPU",null,{"inputs":[{"name":"t"},{"name":"t"}],"output":{"name":"t"}}],[5,"relu","","Computes the ReLU Function on the CPU",null,{"inputs":[{"name":"t"}],"output":{"name":"t"}}],[5,"relu_grad","","Computes the ReLU Gradient on the CPU",null,{"inputs":[{"name":"t"},{"name":"t"}],"output":{"name":"t"}}],[5,"tanh","","Computes the Tanh Function on the CPU",null,{"inputs":[{"name":"t"}],"output":{"name":"t"}}],[5,"tanh_grad","","Computes the Tanh Gradient on the CPU",null,{"inputs":[{"name":"t"},{"name":"t"}],"output":{"name":"t"}}],[11,"clone","","",3,null],[11,"fmt","","",3,null],[11,"clone","","",4,null],[11,"fmt","","",4,null],[11,"clone","","",5,null],[11,"fmt","","",5,null],[0,"cuda","collenchyma_nn::frameworks","Provides NN for a CUDA backend.",null,null],[0,"helper","collenchyma_nn::frameworks::cuda","Provides useful macros for easier NN implementation for CUDA/cuDNN.",null,null],[5,"receive_memory_ptr","collenchyma_nn::frameworks::cuda::helper","Returns cuDNN ready memory pointer from a SharedTensor.",null,{"inputs":[{"name":"sharedtensor"},{"name":"devicetype"}],"output":{"name":"result"}}],[5,"receive_memory_ptr_mut","","Returns mutable cuDNN ready memory pointer from a SharedTensor.",null,{"inputs":[{"name":"sharedtensor"},{"name":"devicetype"}],"output":{"name":"result"}}],[8,"ICudnnDesc","collenchyma_nn::frameworks::cuda","",null,null],[10,"cudnn_tensor_desc","","",6,null],[10,"cudnn_tensor_desc_softmax","","Creates a TensorDescriptor similar to `cudnn_tensor_desc`,\nbut will create a fitting 4D tensor if the actual tensor would be 1D-3D.",6,null],[10,"cudnn_tensor_desc_flat","","Creates a TensorDescriptor similar to `cudnn_tensor_desc`,\nbut will create a fitting 3D tensor if the actual tensor would be 1D/2D.",6,null],[10,"cudnn_filter_desc","","",6,null],[10,"cudnn_convolution_desc","","",6,null],[8,"NNOperationConfig","collenchyma_nn","Provides generic NN Operation Config functionality.",null,null],[8,"ConvolutionConfig","","Provides Convolution Config functionality.",null,null],[11,"workspace_size","","Returns the largest workspace size in bytes needed\nfor any of the convolution operations.",7,null],[8,"NN","","Provides the functionality for a backend to support Neural Network related operations.",null,null],[16,"CC","","The Convolution Operation Config representation for this Plugin.",8,null],[16,"CLRN","","The LRN Operation Config representation for this Plugin.",8,null],[16,"CPOOL","","The Pooling Operation Config representation for this Plugin.",8,null],[10,"init_nn","","Initializes the Plugin.",8,{"inputs":[],"output":null}],[10,"device","","Returns the device on which the Plugin operations will run.",8,null],[8,"Sigmoid","","Provides the functionality for a Backend to support Sigmoid operations.",null,null],[10,"sigmoid","","Computes the [Sigmoid function][sigmoid] over the input Tensor `x` with complete memory management.\n[sigmoid]: https://en.wikipedia.org/wiki/Sigmoid_function",9,null],[10,"sigmoid_plain","","Computes the Sigmoid function over the input Tensor `x` without any memory management.",9,null],[10,"sigmoid_grad","","Computes the gradient of a [Sigmoid function][sigmoid] over the input Tensor `x` with complete memory management.\n[sigmoid]: https://en.wikipedia.org/wiki/Sigmoid_function",9,null],[10,"sigmoid_grad_plain","","Computes the gradient of a Sigmoid function over the input Tensor `x` without any memory management.",9,null],[8,"SigmoidPointwise","","Provides the functionality for pointwise Sigmoid operations (overwrites the input with the result of the operation).",null,null],[10,"sigmoid_pointwise","","Computes the [Sigmoid function][sigmoid] over the input Tensor `x` with complete memory management.\n[sigmoid]: https://en.wikipedia.org/wiki/Sigmoid_function",10,null],[10,"sigmoid_pointwise_plain","","Computes the Sigmoid function over the input Tensor `x` without any memory management.",10,null],[10,"sigmoid_pointwise_grad","","Computes the gradient of a [Sigmoid function][sigmoid] over the input Tensor `x` with complete memory management.\n[sigmoid]: https://en.wikipedia.org/wiki/Sigmoid_function",10,null],[10,"sigmoid_pointwise_grad_plain","","Computes the gradient of a Sigmoid function over the input Tensor `x` without any memory management.",10,null],[8,"Relu","","Provides the functionality for a Backend to support ReLU operations.",null,null],[10,"relu","","Computes the [Rectified linear units][relu] over the input Tensor `x` with complete memory management.\n[relu]: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)",11,null],[10,"relu_plain","","Computes the ReLU over the input Tensor `x` without any memory management.",11,null],[10,"relu_grad","","Computes the gradient of [ReLU][relu] over the input Tensor `x` with complete memory management.\n[relu]: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)",11,null],[10,"relu_grad_plain","","Computes the gradient of ReLU over the input Tensor `x` without any memory management.",11,null],[8,"ReluPointwise","","Provides the functionality for pointwise ReLU operations (overwrites the input with the result of the operation).",null,null],[10,"relu_pointwise","","Computes the [Rectified linear units][relu] over the input Tensor `x` with complete memory management.\n[relu]: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)",12,null],[10,"relu_pointwise_plain","","Computes the ReLU over the input Tensor `x` without any memory management.",12,null],[10,"relu_pointwise_grad","","Computes the gradient of [ReLU][relu] over the input Tensor `x` with complete memory management.\n[relu]: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)",12,null],[10,"relu_pointwise_grad_plain","","Computes the gradient of ReLU over the input Tensor `x` without any memory management.",12,null],[8,"Tanh","","Provides the functionality for a Backend to support TanH operations.",null,null],[10,"tanh","","Computes the [hyperbolic Tangent][tanh] over the input Tensor `x` with complete memory management.\n[tanh]: https://en.wikipedia.org/wiki/Hyperbolic_function",13,null],[10,"tanh_plain","","Computes the tanh over the input Tensor `x` without any memory management.",13,null],[10,"tanh_grad","","Computes the gradient of [tanh][tanh] over the input Tensor `x` with complete memory management.\n[tanh]: https://en.wikipedia.org/wiki/Hyperbolic_function",13,null],[10,"tanh_grad_plain","","Computes the gradient of tanh over the input Tensor `x` without any memory management.",13,null],[8,"TanhPointwise","","Provides the functionality for pointwise ReLU operations (overwrites the input with the result of the operation).",null,null],[10,"tanh_pointwise","","Computes the [hyperbolic Tangent][tanh] over the input Tensor `x` with complete memory management.\n[tanh]: https://en.wikipedia.org/wiki/Hyperbolic_function",14,null],[10,"tanh_pointwise_plain","","Computes the tanh over the input Tensor `x` without any memory management.",14,null],[10,"tanh_pointwise_grad","","Computes the gradient of [tanh][tanh] over the input Tensor `x` with complete memory management.\n[tanh]: https://en.wikipedia.org/wiki/Hyperbolic_function",14,null],[10,"tanh_pointwise_grad_plain","","Computes the gradient of tanh over the input Tensor `x` without any memory management.",14,null],[8,"Convolution","","Provides the functionality for a Backend to support Convolution operations.",null,null],[10,"new_convolution_config","","Creates a new ConvolutionConfig, which needs to be passed to further convolution Operations.",15,null],[10,"convolution","","Computes a [CNN convolution][convolution] over the input Tensor `x` with complete memory management.\n[convolution]: https://en.wikipedia.org/wiki/Convolutional_neural_network",15,null],[10,"convolution_plain","","Computes the convolution over the input Tensor `x` without any memory management.",15,null],[10,"convolution_grad_filter","","Computes the gradient of a [CNN convolution][convolution] with respect to the filter and complete memory management.\n[convolution]: https://en.wikipedia.org/wiki/Convolutional_neural_network",15,null],[10,"convolution_grad_filter_plain","","Computes the gradient of a convolution with respect to the filter and without any memory management.",15,null],[10,"convolution_grad_data","","Computes the gradient of a [CNN convolution][convolution] over the input Tensor `x` with respect to the data and complete memory management.\n[convolution]: https://en.wikipedia.org/wiki/Convolutional_neural_network",15,null],[10,"convolution_grad_data_plain","","Computes the gradient of a convolution over the input Tensor `x` with respect to the data and without any memory management.",15,null],[8,"Softmax","","Provides the functionality for a Backend to support Softmax operations.",null,null],[10,"softmax","","Computes a [Softmax][softmax] over the input Tensor `x` with complete memory management.\n[softmax]: https://en.wikipedia.org/wiki/Softmax_function",16,null],[10,"softmax_plain","","Computes the softmax over the input Tensor `x` without any memory management.",16,null],[10,"softmax_grad","","Computes the gradient of a [Softmax][softmax] over the input Tensor `x` with complete memory management.\n[softmax]: https://en.wikipedia.org/wiki/Softmax_function",16,null],[10,"softmax_grad_plain","","Computes the gradient of a softmax over the input Tensor `x` without any memory management.",16,null],[8,"LogSoftmax","","Provides the functionality for a Backend to support LogSoftmax operations.",null,null],[10,"log_softmax","","Computes a logarithmic softmax over the input Tensor `x` with complete memory management.",17,null],[10,"log_softmax_plain","","Computes the logarithmic softmax over the input Tensor `x` without any memory management.",17,null],[10,"log_softmax_grad","","Computes the gradient of a logarithmic softmax over the input Tensor `x` with complete memory management.",17,null],[10,"log_softmax_grad_plain","","Computes the gradient of a logarithmic softmax over the input Tensor `x` without any memory management.",17,null],[8,"LRN","","Provides the functionality for a Backend to support Local Response Normalization operations.",null,null],[10,"new_lrn_config","","Creates a new (Local Response Normalization) LRNConfig, which needs to be passed to further LRN Operations.",18,null],[10,"lrn","","Computes a [LRN][lrn] over the input Tensor `x` with complete memory management.\n[lrn]: https://en.wikipedia.org/wiki/lrnal_neural_network",18,null],[10,"lrn_plain","","Computes the LRN over the input Tensor `x` without any memory management.",18,null],[10,"lrn_grad","","Computes the gradient of a [LRN][lrn] over the input Tensor `x` with complete memory management.\n[lrn]: https://en.wikipedia.org/wiki/lrnal_neural_network",18,null],[10,"lrn_grad_plain","","Computes the gradient of a LRN over the input Tensor `x` without any memory management.",18,null],[8,"Pooling","","Provides the functionality for a Backend to support Pooling operations.",null,null],[10,"new_pooling_config","","Creates a new PoolingConfig, which needs to be passed to further pooling Operations.",19,null],[10,"pooling_max","","Computes non-linear down-sampling ([max Pooling][pooling]) over the input Tensor `x` with complete memory management.\n[pooling]: https://en.wikipedia.org/wiki/Convolutional_neural_network#Pooling_layer",19,null],[10,"pooling_max_plain","","Computes the max pooling over the input Tensor `x` without any memory management.",19,null],[10,"pooling_max_grad","","Computes the gradient of [max Pooling][pooling] over the input Tensor `x` with complete memory management.\n[pooling]: https://en.wikipedia.org/wiki/Convolutional_neural_network#Pooling_layer",19,null],[10,"pooling_max_grad_plain","","Computes the gradient of max pooling over the input Tensor `x` without any memory management.",19,null],[14,"impl_ops_sigmoid_for!","","",null,null],[14,"impl_ops_relu_for!","","",null,null],[14,"impl_ops_tanh_for!","","",null,null],[14,"impl_ops_convolution_for!","","",null,null],[14,"impl_ops_softmax_for!","","",null,null],[14,"impl_ops_log_softmax_for!","","",null,null],[14,"impl_ops_lrn_for!","","",null,null],[14,"impl_ops_pooling_for!","","",null,null],[14,"impl_oconf_for_cc!","","",null,null],[14,"impl_oconf_for_clrn!","","",null,null],[14,"impl_oconf_for_pooling!","","",null,null],[14,"impl_ops_sigmoid_for!","","",null,null],[14,"impl_ops_sigmoid_pointwise_for!","","",null,null],[14,"impl_ops_relu_for!","","",null,null],[14,"impl_ops_relu_pointwise_for!","","",null,null],[14,"impl_ops_tanh_for!","","",null,null],[14,"impl_ops_tanh_pointwise_for!","","",null,null],[14,"impl_ops_convolution_for!","","",null,null],[14,"impl_ops_softmax_for!","","",null,null],[14,"impl_ops_log_softmax_for!","","",null,null],[14,"impl_ops_lrn_for!","","",null,null],[14,"impl_ops_pooling_for!","","",null,null],[11,"workspace_size","","Returns the largest workspace size in bytes needed\nfor any of the convolution operations.",7,null]],"paths":[[4,"ConvForwardAlgo"],[4,"ConvBackwardFilterAlgo"],[4,"ConvBackwardDataAlgo"],[3,"ConvolutionConfig"],[3,"NormalizationConfig"],[3,"PoolingConfig"],[8,"ICudnnDesc"],[8,"ConvolutionConfig"],[8,"NN"],[8,"Sigmoid"],[8,"SigmoidPointwise"],[8,"Relu"],[8,"ReluPointwise"],[8,"Tanh"],[8,"TanhPointwise"],[8,"Convolution"],[8,"Softmax"],[8,"LogSoftmax"],[8,"LRN"],[8,"Pooling"]]}; initSearch(searchIndex);