var searchIndex = {}; searchIndex["leaf"] = {"doc":"Leaf is a open, modular and clear-designed Machine Intelligence Framework providing\nstate-of-the-art performance for distributed (Deep|Machine) Learning - sharing concepts from\nTensorflow and Caffe.","items":[[0,"layer","leaf","Provides the generics and interfaces for the specific [Layers][layers].\n[layers]: ../layers/index.html",null,null],[3,"Layer","leaf::layer","The generic Layer",null,null],[12,"name","","Identifies the Network",0,null],[12,"config","","The configuration of the Layer",0,null],[12,"worker","","The [implementation][1] of the Layer.\n[1]: ../layers/index.html",0,null],[12,"weights_data","","The vector that stores shared references to the weights in the form of blobs.",0,null],[12,"weights_gradient","","The vector that stores shared references to the weights in the form of blobs.",0,null],[12,"input_blobs_data","","References to all the input blobs of the layer.",0,null],[12,"input_blobs_gradient","","References to all the input blobs of the layer.",0,null],[12,"input_blob_names","","Names for all the input blobs of the layer.",0,null],[12,"output_blobs_data","","References to all the output blobs of the layer.",0,null],[12,"output_blobs_gradient","","References to all the output blobs of the layer.",0,null],[12,"blob_names","","All the blobs of the layer that can be addressed by name.",0,null],[3,"LayerConfig","","Layer Configuration Struct",null,null],[12,"name","","The name of the Layer",1,null],[12,"layer_type","","The type of the Layer",1,null],[12,"outputs","","The name for each output Blob",1,null],[12,"inputs","","The name for each input Blob",1,null],[12,"params","","Specifies training configuration for each weight blob.",1,null],[12,"propagate_down","","Specifies on which inputs the backpropagation should be skipped.\nThe size must be either 0 or equal to the number of inputs.",1,null],[4,"LayerType","","The Layer Types",null,null],[13,"Convolution","","Convolution Layer",2,null],[13,"Linear","","Linear Layer",2,null],[13,"LogSoftmax","","LogSoftmax Layer",2,null],[13,"Pooling","","Pooling Layer",2,null],[13,"Sequential","","Sequential Layer",2,null],[13,"Softmax","","Softmax Layer",2,null],[13,"ReLU","","ReLU Layer",2,null],[13,"Sigmoid","","Sigmoid Layer",2,null],[13,"NegativeLogLikelihood","","NegativeLogLikelihood Layer",2,null],[13,"Reshape","","Reshape Layer",2,null],[8,"ILayer","","A Layer in a [Neural Network][1] that can handle forward and backward of a computation step.\n[1]: ../network/index.html",null,null],[11,"init","","Initialize the layer for computation.",3,null],[11,"reshape","","Adjust to shapes of the output blobs to fit the shapes of the input blobs.",3,null],[11,"resize_shared_workspace","","Adjust size of shared workspace.",3,null],[11,"forward","","Compute the [feedforward][1] layer output using the provided Backend.\n[1]: https://en.wikipedia.org/wiki/Feedforward_neural_network",3,null],[11,"backward_input","","Compute the [backpropagation][1] input gradient using the provided backend.\n[1]: https://en.wikipedia.org/wiki/Backpropagation",3,null],[11,"backward_parameters","","Compute the [backpropagation][1] parameters gradient using the provided backend.\n[1]: https://en.wikipedia.org/wiki/Backpropagation",3,null],[11,"sync","","Synchronize the blobs before doing a forward or backward operation.",3,null],[11,"auto_output_blobs","","Return whether "anonymous" output blobs are created automatically for the layer.",3,null],[11,"min_output_blobs","","Returns the minimum number of output blobs required by the layer,\nor 0 if no minimum number is required.",3,null],[11,"exact_num_output_blobs","","Returns the exact number of output blobs required by the layer,\nor `None` if no exact number is required.",3,null],[11,"auto_weight_blobs","","Return whether weight blobs are created automatically for the layer.",3,null],[11,"exact_num_input_blobs","","Returns the exact number of input blobs required by the layer,\nor `None` if no exact number is required.",3,null],[11,"allow_force_backward","","Return whether to allow force_backward for a given input blob index.",3,null],[11,"sync_native","","Return wether a simple native backend should be used to [sync][1] instead of the default backend.\n[1]: #method.sync",3,null],[11,"compute_in_place","","Return wether the computations of a layer should be done in-place (the output will be written where the input was read from).",3,null],[11,"is_container","","Return wether the layer is a container.",3,null],[11,"loss_weight","","Return the associated loss weight for a given output blob index.",3,null],[11,"inputs_data","","Return the input tensors of the layer.",3,null],[11,"inputs_gradients","","Return the gradients of the input tensors of the layer.",3,null],[11,"outputs_data","","Return the output tensors of the layer.",3,null],[11,"outputs_gradients","","Return the gradients of the output tensors of the layer.",3,null],[11,"learnable_weights","","Return the learnable weights inside the layer.",3,null],[11,"learnable_weights_gradients","","Return the gradients for the learnable weights inside the layer.",3,null],[11,"learnable_weights_lr","","Return the learning rates for the learnable weights inside the layer.",3,null],[8,"ComputeOutput","","A Layer that can compute the output for a given input.",null,null],[10,"compute_output","","Compute output for given input and write them into `output_data`.",4,null],[8,"ComputeInputGradient","","A Layer that can compute the gradient with respect to its input.",null,null],[10,"compute_input_gradient","","Compute gradients with respect to the inputs and write them into `input_gradients`.",5,null],[8,"ComputeParametersGradient","","A Layer that can compute the gradient with respect to its parameters (= weights, bias, etc.).",null,null],[11,"compute_parameters_gradient","","Compute gradients with respect to the parameters and write them into `parameters_gradients`.",6,null],[11,"fmt","","",0,null],[11,"from_config","","Creates a new Layer from a [LayerConfig][1].\n[1]: ./struct.LayerConfig.html",0,{"inputs":[{"name":"rc"},{"name":"layerconfig"}],"output":{"name":"layer"}}],[11,"connect","","Connect the layer to the other layers in a [Network][1] and set up Blobs.\n[1]: ../network/struct.Network.html",0,null],[11,"init_backprop","","Initializes layer for [backpropagation][1]\n[1]: https://en.wikipedia.org/wiki/Backpropagation",0,null],[11,"init_force_backward","","Set [backpropagation][1] flags to force this layer to backpropagate.\n[1]: https://en.wikipedia.org/wiki/Backpropagation",0,null],[11,"forward","","Uses the underlying layer implementation to compute a forward step.",0,null],[11,"backward","","Uses the underlying layer implementation to compute a backward step.",0,null],[11,"backward_input","","Calculate the gradient w.r.t. input.",0,null],[11,"backward_parameters","","Calculate the gradient w.r.t. parameters.",0,null],[11,"synchronize","","Synchronize the layers backend.",0,null],[11,"update_weights","","Updates the [weights][1] with the weight update computed by the [Solver][2].\n[1]: https://en.wikipedia.org/wiki/Synaptic_weight\n[2]: ../solver/struct.Solver.html",0,null],[11,"clear_weights_gradients","","Clears the [weights][1] gradients and zero-inits them.\n[1]: https://en.wikipedia.org/wiki/Synaptic_weight",0,null],[11,"set_weight_propagate_down","","Sets whether the layer should compute gradients w.r.t. a\nweight at a particular index given by `weight_id`.",0,null],[11,"is_using_in_place","","Returns `true` when the layer is using in-place computation.",0,null],[11,"input_blob_names","","Returns the names of all the input blobs.",0,null],[11,"loss","","Returns the [loss weight][1] associated with the weight blob\nwith id `weight_id`.\n[1]: http://caffe.berkeleyvision.org/tutorial/loss.html",0,null],[11,"learnable_weights_data","","Returns all the learnable weights in the layer.",0,null],[11,"learnable_weights_gradients","","Returns the gradients for all the learnable weights in the layer.",0,null],[11,"learnable_weights_lr","","Returns the learning rate for all the learnable weights in the layer.",0,null],[11,"fmt","","",3,null],[11,"clone","","",1,null],[11,"fmt","","",1,null],[11,"clone","","",2,null],[11,"fmt","","",2,null],[11,"supports_in_place","","Returns wether the LayerType supports in-place operations.",2,null],[11,"new","","Creates a new LayerConfig",1,{"inputs":[{"name":"str"},{"name":"l"}],"output":{"name":"layerconfig"}}],[11,"output","","Returns the Name of the requested output Blob",1,null],[11,"outputs_len","","Returns the number of output Blobs",1,null],[11,"add_output","","Add a output by name",1,null],[11,"input","","Returns the Name of the requested input Blob",1,null],[11,"inputs_len","","Returns the number of input Blobs",1,null],[11,"add_input","","Add a input by name",1,null],[11,"param","","Returns the requested WeightConfig",1,null],[11,"params_len","","Returns the number of params",1,null],[11,"validate","","Check if the configured parameters make sense.",1,null],[0,"layers","leaf","Provides the fundamental units of computation for the [Network][1].\n[1]: ../network/index.html",null,null],[0,"activation","leaf::layers","Provides nonlinear activation methods.",null,null],[0,"relu","leaf::layers::activation","Applies the nonlinear Rectified Linear Unit.",null,null],[3,"ReLU","leaf::layers::activation::relu","ReLU Activation Layer",null,null],[11,"clone","","",7,null],[11,"fmt","","",7,null],[11,"exact_num_output_blobs","","",7,null],[11,"exact_num_input_blobs","","",7,null],[11,"compute_in_place","","",7,null],[11,"reshape","","",7,null],[11,"compute_output","","",7,null],[11,"compute_input_gradient","","",7,null],[0,"sigmoid","leaf::layers::activation","Applies the nonlinear Log-Sigmoid function.",null,null],[3,"Sigmoid","leaf::layers::activation::sigmoid","Sigmoid Activation Layer",null,null],[11,"clone","","",8,null],[11,"fmt","","",8,null],[11,"exact_num_output_blobs","","",8,null],[11,"exact_num_input_blobs","","",8,null],[11,"compute_in_place","","",8,null],[11,"reshape","","",8,null],[11,"compute_output","","",8,null],[11,"compute_input_gradient","","",8,null],[0,"common","leaf::layers","Provides common neural network layers.",null,null],[0,"convolution","leaf::layers::common","Convolves the input tensor.",null,null],[3,"Convolution","leaf::layers::common::convolution","Convolution Layer",null,null],[3,"ConvolutionConfig","","Specifies configuration parameters for a Convolution Layer.",null,null],[12,"num_output","","The number of output feature maps",9,null],[12,"filter_shape","","The size of the kernel",9,null],[12,"stride","","The stride size",9,null],[12,"padding","","The padding size",9,null],[11,"clone","","",10,null],[11,"fmt","","",10,null],[11,"from_config","","Create a Convolution layer from a ConvolutionConfig.",10,{"inputs":[{"name":"convolutionconfig"}],"output":{"name":"convolution"}}],[11,"num_spatial_dims","","Calculates the number of spatial dimensions for the convolution operation.",10,null],[11,"calculate_output_shape","","",10,null],[11,"filter_shape","","",10,null],[11,"stride","","",10,null],[11,"padding","","",10,null],[11,"exact_num_output_blobs","","",10,null],[11,"exact_num_input_blobs","","",10,null],[11,"auto_weight_blobs","","",10,null],[11,"reshape","","",10,null],[11,"resize_shared_workspace","","",10,null],[11,"compute_output","","",10,null],[11,"compute_input_gradient","","",10,null],[11,"compute_parameters_gradient","","",10,null],[11,"clone","","",9,null],[11,"fmt","","",9,null],[11,"into","","",9,null],[0,"linear","leaf::layers::common","Applies a linear transformation to the input data `y = a * x + b`",null,null],[3,"Linear","leaf::layers::common::linear","Linear Layer",null,null],[3,"LinearConfig","","Specifies configuration parameters for a Linear Layer.",null,null],[12,"output_size","","The number of output values",11,null],[11,"fmt","","",12,null],[11,"from_config","","Create a Linear layer from a LinearConfig.",12,{"inputs":[{"name":"linearconfig"}],"output":{"name":"linear"}}],[11,"exact_num_output_blobs","","",12,null],[11,"exact_num_input_blobs","","",12,null],[11,"auto_weight_blobs","","",12,null],[11,"init","","",12,null],[11,"reshape","","",12,null],[11,"compute_output","","",12,null],[11,"compute_input_gradient","","",12,null],[11,"compute_parameters_gradient","","",12,null],[11,"default","","",12,{"inputs":[],"output":{"name":"linear"}}],[11,"clone","","",11,null],[11,"fmt","","",11,null],[11,"into","","",11,null],[0,"log_softmax","leaf::layers::common","Computes the logarithmic softmax of its input.",null,null],[3,"LogSoftmax","leaf::layers::common::log_softmax","LogSoftmax Layer",null,null],[11,"clone","","",13,null],[11,"fmt","","",13,null],[11,"reshape","","",13,null],[11,"compute_output","","",13,null],[11,"compute_input_gradient","","",13,null],[11,"default","","",13,{"inputs":[],"output":{"name":"logsoftmax"}}],[0,"pooling","leaf::layers::common","Applies pooling to the input.",null,null],[3,"Pooling","leaf::layers::common::pooling","[Pooling](./index.html) Layer",null,null],[3,"PoolingConfig","","Specifies configuration parameters for a Pooling Layer.",null,null],[12,"mode","","The PoolingMode to use",14,null],[12,"filter_shape","","The shape of the filter",14,null],[12,"stride","","The stride size",14,null],[12,"padding","","The padding size",14,null],[4,"PoolingMode","","The different modes of pooling that can be calculated.",null,null],[13,"Max","","The maximum value inside the pooling window will be used as result.",15,null],[11,"clone","","",16,null],[11,"fmt","","",16,null],[11,"from_config","","Create a Pooling layer from a PoolingConfig.",16,{"inputs":[{"name":"poolingconfig"}],"output":{"name":"pooling"}}],[11,"num_spatial_dims","","Calculates the number of spatial dimensions for the pooling operation.",16,null],[11,"calculate_output_shape","","",16,null],[11,"filter_shape","","",16,null],[11,"stride","","",16,null],[11,"padding","","",16,null],[11,"exact_num_output_blobs","","",16,null],[11,"exact_num_input_blobs","","",16,null],[11,"reshape","","",16,null],[11,"compute_output","","",16,null],[11,"compute_input_gradient","","",16,null],[11,"clone","","",14,null],[11,"fmt","","",14,null],[11,"into","","",14,null],[11,"clone","","",15,null],[11,"fmt","","",15,null],[0,"sequential","leaf::layers::common","A container layer that runs operations sequentially on the contained layers.",null,null],[3,"Sequential","leaf::layers::common::sequential","Sequential Layer",null,null],[3,"SequentialConfig","","Specifies configuration parameters for a Sequential Layer.",null,null],[12,"layers","","Defines the layers of the container via [LayerConfig][1]s.\n[1]: ../layer/struct.LayerConfig.html",17,null],[12,"inputs","","Defines the names and shapes of the input tensors.",17,null],[12,"force_backward","","Defines if the container will force every layer to do [backpropagation][1].\n[1]: https://en.wikipedia.org/wiki/Backpropagation",17,null],[11,"fmt","","",18,null],[11,"empty","","Create a empty Sequential container layer.",18,{"inputs":[],"output":{"name":"sequential"}}],[11,"from_config","","Create a Sequential layer from a SequentialConfig.",18,{"inputs":[{"name":"rc"},{"name":"sequentialconfig"}],"output":{"name":"sequential"}}],[11,"init_layers","","Initializes a sequential container.",18,null],[11,"is_container","","",18,null],[11,"inputs_data","","",18,null],[11,"inputs_gradients","","",18,null],[11,"outputs_data","","",18,null],[11,"outputs_gradients","","",18,null],[11,"learnable_weights","","",18,null],[11,"learnable_weights_gradients","","",18,null],[11,"resize_shared_workspace","","",18,null],[11,"forward","","",18,null],[11,"backward_input","","",18,null],[11,"backward_parameters","","",18,null],[11,"compute_output","","",18,null],[11,"compute_input_gradient","","",18,null],[11,"compute_parameters_gradient","","",18,null],[11,"clone","","",17,null],[11,"fmt","","",17,null],[11,"find_in_place_output","","Tries to find the output of a previous layer that is usable as in-place output for the n-th layer.",17,null],[11,"add_layer","","Add layer at the end of the sequential container.",17,null],[11,"add_input","","Add a input to the network.",17,null],[11,"into","","",17,null],[11,"default","","",17,{"inputs":[],"output":{"name":"sequentialconfig"}}],[0,"softmax","leaf::layers::common","Computes the softmax of its input.",null,null],[3,"Softmax","leaf::layers::common::softmax","Softmax Layer",null,null],[11,"clone","","",19,null],[11,"fmt","","",19,null],[11,"reshape","","",19,null],[11,"compute_output","","",19,null],[11,"compute_input_gradient","","",19,null],[11,"default","","",19,{"inputs":[],"output":{"name":"softmax"}}],[8,"FilterLayer","leaf::layers::common","Provides common utilities for Layers that utilize a filter with stride and padding.",null,null],[11,"calculate_spatial_output_dims","","Computes the shape of the spatial dimensions.",20,null],[10,"calculate_output_shape","","Calculate output shape based on the shape of filter, padding, stride and input.",20,null],[10,"num_spatial_dims","","Calculates the number of spatial dimensions for the pooling operation.",20,null],[11,"spatial_filter_dims","","Retrievs the spatial dimensions for the filter based on `self.filter_shape()`\nand the number of spatial dimensions.",20,null],[11,"stride_dims","","Retrievs the stride for the convolution based on `self.stride`\nand the number of spatial dimensions.",20,null],[11,"padding_dims","","Retrievs the padding for the convolution based on `self.padding`\nand the number of spatial dimensions.",20,null],[10,"filter_shape","","The filter_shape that will be used by `spatial_filter_dims`.",20,null],[10,"stride","","The stride that will be used by `stride_dims`.",20,null],[10,"padding","","The padding that will be used by `padding_dims`.",20,null],[0,"loss","leaf::layers","Provides methods to calculate the loss (cost) of some output.",null,null],[0,"negative_log_likelihood","leaf::layers::loss","TODO: DOC",null,null],[3,"NegativeLogLikelihood","leaf::layers::loss::negative_log_likelihood","NegativeLogLikelihood Loss Layer",null,null],[3,"NegativeLogLikelihoodConfig","","Specifies configuration parameters for a NegativeLogLikelihood Layer.",null,null],[12,"num_classes","","How many different classes can be classified.",21,null],[11,"clone","","",22,null],[11,"fmt","","",22,null],[11,"from_config","","Create a NegativeLogLikelihood layer from a NegativeLogLikelihoodConfig.",22,{"inputs":[{"name":"negativeloglikelihoodconfig"}],"output":{"name":"negativeloglikelihood"}}],[11,"exact_num_output_blobs","","",22,null],[11,"exact_num_input_blobs","","",22,null],[11,"auto_output_blobs","","",22,null],[11,"loss_weight","","",22,null],[11,"sync_native","","",22,null],[11,"reshape","","",22,null],[11,"compute_output","","",22,null],[11,"compute_input_gradient","","",22,null],[11,"clone","","",21,null],[11,"fmt","","",21,null],[11,"into","","",21,null],[0,"utility","leaf::layers","Provides various helpful layers, which might be not directly related to\nneural networks in general.",null,null],[0,"flatten","leaf::layers::utility","Flattens the bottom Blob into a simpler top Blob.",null,null],[3,"Flatten","leaf::layers::utility::flatten","Flattening Utility Layer",null,null],[11,"clone","","",23,null],[11,"fmt","","",23,null],[0,"reshape","leaf::layers::utility","Utility layer to give a tensor another shape.",null,null],[3,"Reshape","leaf::layers::utility::reshape","Reshape Utility Layer",null,null],[3,"ReshapeConfig","","Specifies configuration parameters for a Reshape Layer.",null,null],[12,"shape","","The target shape that the input should assume.",24,null],[11,"clone","","",25,null],[11,"fmt","","",25,null],[11,"from_config","","Create a Reshape layer from a ReshapeConfig.",25,{"inputs":[{"name":"reshapeconfig"}],"output":{"name":"reshape"}}],[11,"compute_in_place","","",25,null],[11,"auto_output_blobs","","",25,null],[11,"reshape","","",25,null],[11,"compute_output","","",25,null],[11,"compute_input_gradient","","",25,null],[11,"clone","","",24,null],[11,"fmt","","",24,null],[11,"of_shape","","Create a ReshapeConfig that describes a Reshape layer with a provided shape.",24,null],[11,"into","","",24,null],[0,"solver","leaf","Provides the generics and interfaces for the specific [Solvers][solvers].\n[solvers]: ../solvers/index.html",null,null],[3,"Solver","leaf::solver","Solver that optimizes a [Network][1].\n[1]: ../network/struct.Network.html",null,null],[12,"worker","","The implementation of the Solver",26,null],[3,"SolverConfig","","Configuration for a Solver",null,null],[12,"name","","Name of the solver.",27,null],[12,"network","","The [LayerConfig][1] that is used to initialize the network.\n[1]: ../layer/struct.LayerConfig.html",27,null],[12,"objective","","The [LayerConfig][1] that is used to initialize the objective.\n[1]: ../layer/struct.LayerConfig.html",27,null],[12,"solver","","The [Solver implementation][1] to be used.\n[1]: ../solvers/index.html",27,null],[12,"minibatch_size","","Accumulate gradients over `minibatch_size` instances.",27,null],[12,"lr_policy","","The learning rate policy to be used.",27,null],[12,"base_lr","","The base learning rate.",27,null],[12,"gamma","","gamma as used in the calculation of most learning rate policies.",27,null],[12,"stepsize","","The stepsize used in Step and Sigmoid learning policies.",27,null],[12,"clip_gradients","","The threshold for clipping gradients.",27,null],[12,"weight_decay","","The global [weight decay][1] multiplier for [regularization][2].\n[1]: http://www.alglib.net/dataanalysis/improvinggeneralization.php#header3\n[2]: https://cs231n.github.io/neural-networks-2/#reg",27,null],[12,"regularization_method","","The method of [regularization][1] to use.\n[1]: https://cs231n.github.io/neural-networks-2/#reg",27,null],[12,"momentum","","The [momentum][1] multiplier for [SGD solvers][2].\n[1]: https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum\n[2]: ../solvers/sgd/index.html",27,null],[4,"SolverKind","","All available types of solvers.",null,null],[13,"SGD","","Stochastic Gradient Descent.\nSee [SGDKind][1] for all available SGD solvers.\n[1]: ./enum.SGDKind.html",28,null],[4,"SGDKind","","All available types of Stochastic Gradient Descent solvers.",null,null],[13,"Momentum","","Stochastic Gradient Descent with Momentum. See [implementation][1]\n[1] ../solvers/",29,null],[4,"LRPolicy","","Learning Rate Policy for a [Solver][1]\n[1]: ./struct.Solver.html",null,null],[13,"Fixed","","always return base_lr",30,null],[13,"Step","","learning rate decays every `step` iterations.\nreturn base_lr * gamma ^ (floor(iter / step))",30,null],[13,"Exp","","return base_lr * gamma ^ iter",30,null],[4,"RegularizationMethod","","[Regularization][1] method for a [Solver][2].\n[1]: https://cs231n.github.io/neural-networks-2/#reg\n[2]: ./struct.Solver.html",null,null],[13,"L2","","L2 regularization",31,null],[0,"confusion_matrix","","TODO: DOC",null,null],[3,"ConfusionMatrix","leaf::solver::confusion_matrix","A [ConfusionMatrix][wiki].",null,null],[3,"Sample","","A single prediction Sample.",null,null],[3,"Accuracy","","The accuracy of the predictions in a ConfusionMatrix.",null,null],[11,"fmt","","",32,null],[11,"new","","Create a ConfusionMatrix that analyzes the prediction of `num_classes` classes.",32,{"inputs":[{"name":"usize"}],"output":{"name":"confusionmatrix"}}],[11,"add_sample","","Add a sample by providing the expected `target` class and the `prediction`.",32,null],[11,"add_samples","","Add a batch of samples.",32,null],[11,"get_predictions","","Get the predicted classes from the output of a network.",32,null],[11,"set_capacity","","Set the `capacity` of the ConfusionMatrix",32,null],[11,"samples","","Return all collected samples.",32,null],[11,"accuracy","","Return the accuracy of the collected predictions.",32,null],[11,"clone","","",33,null],[11,"fmt","","",33,null],[11,"correct","","Returns if the prediction is equal to the expected target.",33,null],[11,"fmt","","",33,null],[11,"clone","","",34,null],[11,"fmt","","",34,null],[11,"fmt","","",34,null],[8,"ISolver","leaf::solver","Implementation of a specific Solver.",null,null],[11,"init","","Initialize the solver, setting up any network related data.",35,null],[10,"compute_update","","Update the weights of the net with part of the gradient.",35,null],[10,"backend","","Returns the backend used by the solver.",35,null],[11,"fmt","","",26,null],[11,"from_config","","Create Solver from [SolverConfig][1]\n[1]: ./struct.SolverConfig.html",26,{"inputs":[{"name":"rc"},{"name":"rc"},{"name":"solverconfig"}],"output":{"name":"solver"}}],[11,"train_minibatch","","Train the network with one minibatch",26,null],[11,"network","","Returns the network trained by the solver.",26,null],[11,"mut_network","","Returns the network trained by the solver.",26,null],[11,"fmt","","",35,null],[11,"clone","","",27,null],[11,"fmt","","",27,null],[11,"default","","",27,{"inputs":[],"output":{"name":"solverconfig"}}],[11,"get_learning_rate","","Return the learning rate for a supplied iteration.",27,null],[11,"clone","","",28,null],[11,"fmt","","",28,null],[11,"with_config","","Create a Solver of the specified kind with the supplied SolverConfig.",28,null],[11,"clone","","",29,null],[11,"fmt","","",29,null],[11,"with_config","","Create a Solver of the specified kind with the supplied SolverConfig.",29,null],[11,"clone","","",30,null],[11,"fmt","","",30,null],[11,"clone","","",31,null],[11,"fmt","","",31,null],[0,"solvers","leaf","Provides the trainers for the [Network][network].\n[network]: ../network/index.html",null,null],[0,"sgd","leaf::solvers","Provides [ISolver][1] implementations based on [Stochastic Gradient\nDescent][2].\n[1]: ../solver/trait.ISolver.html\n[2]: https://en.wikipedia.org/wiki/Stochastic_gradient_descent",null,null],[0,"momentum","leaf::solvers::sgd","A [Stochastic Gradient Descent with Momentum][1]\n[1]: https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum",null,null],[3,"Momentum","leaf::solvers::sgd::momentum","Stochastic Gradient Descent with Momentum.",null,null],[11,"clone","","",36,null],[11,"fmt","","",36,null],[11,"new","","Create a new SGD Momentum solver.",36,{"inputs":[{"name":"rc"}],"output":{"name":"momentum"}}],[11,"init","","Initialize the SGD Momentum solver, allocating memory for its history.",36,null],[11,"compute_update","","",36,null],[11,"backend","","",36,null],[0,"weight","leaf","Provides configuration of weights and their initialization.",null,null],[3,"WeightConfig","leaf::weight","Specifies training configuration for a weight blob.",null,null],[12,"name","","The name of the weight blob -- useful for sharing weights among\nlayers, but never required otherwise. To share a weight between two\nlayers, give it a (non-empty) name.",37,null],[12,"share_mode","","Whether to require shared weights to have the same shape, or just the same\ncount",37,null],[12,"lr_mult","","The multiplier on the global learning rate for this parameter.",37,null],[12,"decay_mult","","The multiplier on the global weight decay for this parameter.",37,null],[12,"filler","","The filler that initializes the weights in the weight blob.",37,null],[4,"DimCheckMode","","Enum for specifing the shared weights behaviour",null,null],[13,"Strict","","Strict requires that shapes match.",38,null],[13,"Permissive","","Permissive requires only the count of weights to match.",38,null],[4,"FillerType","","Enum for specifing the type of Filler.",null,null],[13,"Constant","","Fills the weight blob with a constant `value` (all values are the same).",39,null],[12,"value","leaf::weight::FillerType","The value that will be used to fill the blob.",39,null],[13,"Glorot","leaf::weight","Fills the weight blobs based on the paper:",39,null],[12,"input_size","leaf::weight::FillerType","Number of input nodes for each output.",39,null],[12,"output_size","","Number of output nodes for each input.",39,null],[11,"clone","leaf::weight","",37,null],[11,"fmt","","",37,null],[11,"default","","",37,{"inputs":[],"output":{"name":"weightconfig"}}],[11,"check_dimensions","","Checks dimensions of two blobs according to the `share_mode`.\nReturns an error if there is a count/shape mismatch.",37,null],[11,"lr_mult","","The multiplier on the global learning rate for this weight blob.",37,null],[11,"decay_mult","","The multiplier on the global weight decay for this weight blob.",37,null],[11,"clone","","",38,null],[11,"fmt","","",38,null],[11,"clone","","",39,null],[11,"fmt","","",39,null],[11,"fill","","Uses a filler as specified by this FillerType to fill the values in a SharedTensor",39,null],[11,"fill_constant","","Directly use the [Constant Filler](#variant.Constant).",39,{"inputs":[{"name":"sharedtensor"},{"name":"f32"}],"output":null}],[11,"fill_glorot","","Directly use the [Glorot Filler](#variant.Glorot).",39,{"inputs":[{"name":"sharedtensor"},{"name":"usize"},{"name":"usize"}],"output":null}],[0,"util","leaf","Provides common utility functions",null,null],[5,"native_backend","leaf::util","Create a simple native backend.",null,{"inputs":[],"output":{"name":"backend"}}],[5,"write_to_memory","","Write into a native Collenchyma Memory.",null,null],[5,"write_to_memory_offset","","Write into a native Collenchyma Memory with a offset.",null,null],[5,"write_batch_sample","","Write the `i`th sample of a batch into a SharedTensor.",null,null],[5,"native_scalar","","Create a Collenchyma SharedTensor for a scalar value.",null,{"inputs":[{"name":"t"}],"output":{"name":"sharedtensor"}}],[5,"cast_vec_usize_to_i32","","Casts a Vec<usize> to as Vec<i32>",null,{"inputs":[{"name":"vec"}],"output":{"name":"vec"}}],[6,"ArcLock","","Shared Lock used for our tensors",null,null],[8,"Axpby","","Extends IBlas with Axpby",null,null],[11,"axpby","","Performs the operation y := a*x + b*y .",40,null],[11,"axpby_plain","","Performs the operation y := a*x + b*y .",40,null],[8,"SolverOps","","Encapsulates all traits required by Solvers.",null,null],[8,"LayerOps","","Encapsulates all traits used in Layers.",null,null],[14,"impl_ilayer_activation!","leaf","",null,null],[14,"impl_ilayer_common!","","",null,null],[14,"impl_ilayer_loss!","","",null,null],[14,"impl_isolver_sgd!","","Implement [ISolver][1] for [SGD solvers][2].\n[1]: ./solver/trait.ISolver.html\n[2]: ./solvers/sgd/index.html",null,null]],"paths":[[3,"Layer"],[3,"LayerConfig"],[4,"LayerType"],[8,"ILayer"],[8,"ComputeOutput"],[8,"ComputeInputGradient"],[8,"ComputeParametersGradient"],[3,"ReLU"],[3,"Sigmoid"],[3,"ConvolutionConfig"],[3,"Convolution"],[3,"LinearConfig"],[3,"Linear"],[3,"LogSoftmax"],[3,"PoolingConfig"],[4,"PoolingMode"],[3,"Pooling"],[3,"SequentialConfig"],[3,"Sequential"],[3,"Softmax"],[8,"FilterLayer"],[3,"NegativeLogLikelihoodConfig"],[3,"NegativeLogLikelihood"],[3,"Flatten"],[3,"ReshapeConfig"],[3,"Reshape"],[3,"Solver"],[3,"SolverConfig"],[4,"SolverKind"],[4,"SGDKind"],[4,"LRPolicy"],[4,"RegularizationMethod"],[3,"ConfusionMatrix"],[3,"Sample"],[3,"Accuracy"],[8,"ISolver"],[3,"Momentum"],[3,"WeightConfig"],[4,"DimCheckMode"],[4,"FillerType"],[8,"Axpby"]]}; initSearch(searchIndex);