Struct leaf::layers::common::softmax::Softmax
[−]
[src]
pub struct Softmax;
Softmax Layer
Trait Implementations
impl Debug for Softmax
[src]
impl Clone for Softmax
[src]
fn clone(&self) -> Softmax
Returns a copy of the value. Read more
fn clone_from(&mut self, source: &Self)
1.0.0
Performs copy-assignment from source
. Read more
impl<B: IBackend + Softmax<f32>> ILayer<B> for Softmax
[src]
fn reshape(
&mut self,
backend: Rc<B>,
input_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>
)
&mut self,
backend: Rc<B>,
input_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>
)
Adjust to shapes of the output blobs to fit the shapes of the input blobs. Read more
fn init(&mut self, backend: Rc<B>)
Initialize the layer for computation. Read more
Adjust size of shared workspace. Read more
fn forward(
&self,
backend: &B,
input_data: &[ArcLock<SharedTensor<f32>>],
weights_data: &[ArcLock<SharedTensor<f32>>],
output_data: &mut [ArcLock<SharedTensor<f32>>]
)
&self,
backend: &B,
input_data: &[ArcLock<SharedTensor<f32>>],
weights_data: &[ArcLock<SharedTensor<f32>>],
output_data: &mut [ArcLock<SharedTensor<f32>>]
)
Compute the [feedforward][1] layer output using the provided Backend. [1]: https://en.wikipedia.org/wiki/Feedforward_neural_network Read more
fn backward_input(
&self,
backend: &B,
weights_data: &[ArcLock<SharedTensor<f32>>],
output_data: &[ArcLock<SharedTensor<f32>>],
output_gradients: &[ArcLock<SharedTensor<f32>>],
input_data: &[ArcLock<SharedTensor<f32>>],
input_gradients: &mut [ArcLock<SharedTensor<f32>>]
)
&self,
backend: &B,
weights_data: &[ArcLock<SharedTensor<f32>>],
output_data: &[ArcLock<SharedTensor<f32>>],
output_gradients: &[ArcLock<SharedTensor<f32>>],
input_data: &[ArcLock<SharedTensor<f32>>],
input_gradients: &mut [ArcLock<SharedTensor<f32>>]
)
Compute the [backpropagation][1] input gradient using the provided backend. [1]: https://en.wikipedia.org/wiki/Backpropagation Read more
fn backward_parameters(
&self,
backend: &B,
output_data: &[ArcLock<SharedTensor<f32>>],
output_gradients: &[ArcLock<SharedTensor<f32>>],
input_data: &[ArcLock<SharedTensor<f32>>],
weights_gradients: &mut [ArcLock<SharedTensor<f32>>]
)
&self,
backend: &B,
output_data: &[ArcLock<SharedTensor<f32>>],
output_gradients: &[ArcLock<SharedTensor<f32>>],
input_data: &[ArcLock<SharedTensor<f32>>],
weights_gradients: &mut [ArcLock<SharedTensor<f32>>]
)
Compute the [backpropagation][1] parameters gradient using the provided backend. [1]: https://en.wikipedia.org/wiki/Backpropagation Read more
fn sync(
&self,
backend: &B,
input_data: &mut [ArcLock<SharedTensor<f32>>],
input_gradients: &mut [ArcLock<SharedTensor<f32>>],
weights_data: &mut [ArcLock<SharedTensor<f32>>],
weights_gradients: &mut [ArcLock<SharedTensor<f32>>],
output_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_gradients: &mut Vec<ArcLock<SharedTensor<f32>>>
)
&self,
backend: &B,
input_data: &mut [ArcLock<SharedTensor<f32>>],
input_gradients: &mut [ArcLock<SharedTensor<f32>>],
weights_data: &mut [ArcLock<SharedTensor<f32>>],
weights_gradients: &mut [ArcLock<SharedTensor<f32>>],
output_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_gradients: &mut Vec<ArcLock<SharedTensor<f32>>>
)
Synchronize the blobs before doing a forward or backward operation. Read more
fn auto_output_blobs(&self) -> bool
Return whether "anonymous" output blobs are created automatically for the layer. Read more
fn min_output_blobs(&self) -> usize
Returns the minimum number of output blobs required by the layer, or 0 if no minimum number is required. Read more
fn exact_num_output_blobs(&self) -> Option<usize>
Returns the exact number of output blobs required by the layer, or None
if no exact number is required. Read more
fn auto_weight_blobs(&self) -> bool
Return whether weight blobs are created automatically for the layer. Read more
fn exact_num_input_blobs(&self) -> Option<usize>
Returns the exact number of input blobs required by the layer, or None
if no exact number is required. Read more
fn allow_force_backward(&self, input_id: usize) -> bool
Return whether to allow force_backward for a given input blob index. Read more
fn sync_native(&self) -> bool
Return wether a simple native backend should be used to [sync][1] instead of the default backend. [1]: #method.sync Read more
fn compute_in_place(&self) -> bool
Return wether the computations of a layer should be done in-place (the output will be written where the input was read from). Read more
fn is_container(&self) -> bool
Return wether the layer is a container. Read more
fn loss_weight(&self, output_id: usize) -> Option<f32>
Return the associated loss weight for a given output blob index. Read more
fn inputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the input tensors of the layer. Read more
fn inputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the gradients of the input tensors of the layer. Read more
fn outputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the output tensors of the layer. Read more
fn outputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the gradients of the output tensors of the layer. Read more
fn learnable_weights(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the learnable weights inside the layer. Read more
fn learnable_weights_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the gradients for the learnable weights inside the layer. Read more
fn learnable_weights_names(&self) -> Option<Vec<String>>
Return the names of the learnable weights inside the layer. Read more
fn learnable_weights_lr(&self) -> Option<Vec<Option<f32>>>
Return the learning rates for the learnable weights inside the layer. Read more
impl<B: IBackend + Softmax<f32>> ComputeOutput<f32, B> for Softmax
[src]
fn compute_output(
&self,
backend: &B,
_weights: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
output_data: &mut [&mut SharedTensor<f32>]
)
&self,
backend: &B,
_weights: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
output_data: &mut [&mut SharedTensor<f32>]
)
Compute output for given input and write them into output_data
.
impl<B: IBackend + Softmax<f32>> ComputeInputGradient<f32, B> for Softmax
[src]
fn compute_input_gradient(
&self,
backend: &B,
weights_data: &[&SharedTensor<f32>],
output_data: &[&SharedTensor<f32>],
output_gradients: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
input_gradients: &mut [&mut SharedTensor<f32>]
)
&self,
backend: &B,
weights_data: &[&SharedTensor<f32>],
output_data: &[&SharedTensor<f32>],
output_gradients: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
input_gradients: &mut [&mut SharedTensor<f32>]
)
Compute gradients with respect to the inputs and write them into input_gradients
.
impl<B: IBackend + Softmax<f32>> ComputeParametersGradient<f32, B> for Softmax
[src]
fn compute_parameters_gradient(
&self,
backend: &B,
output_data: &[&SharedTensor<T>],
output_gradients: &[&SharedTensor<T>],
input_data: &[&SharedTensor<T>],
parameters_gradients: &mut [&mut SharedTensor<T>]
)
&self,
backend: &B,
output_data: &[&SharedTensor<T>],
output_gradients: &[&SharedTensor<T>],
input_data: &[&SharedTensor<T>],
parameters_gradients: &mut [&mut SharedTensor<T>]
)
Compute gradients with respect to the parameters and write them into parameters_gradients
.