Struct leaf::layers::container::sequential::Sequential
[−]
[src]
pub struct Sequential<B: IBackend + LayerOps<f32>> { /* fields omitted */ }
Sequential Layer
Methods
impl<B: IBackend + LayerOps<f32> + 'static> Sequential<B>
[src]
fn empty() -> Sequential<B>
Create a empty Sequential container layer.
fn from_config(backend: Rc<B>, config: &SequentialConfig) -> Sequential<B>
Create a Sequential layer from a SequentialConfig.
fn init_layers(&mut self, backend: Rc<B>, in_config: &SequentialConfig)
Initializes a sequential container.
Sets up the structure of the sequential container. It reads the supplied SequentialConfig, connects the input and output blobs of each layer and determines if the backpropagation has to be executed for each tensor and layer.
Trait Implementations
impl<B: Debug + IBackend + LayerOps<f32>> Debug for Sequential<B>
[src]
impl<B: IBackend + LayerOps<f32> + 'static> ILayer<B> for Sequential<B>
[src]
fn is_container(&self) -> bool
Return wether the layer is a container. Read more
fn inputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the input tensors of the layer. Read more
fn inputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the gradients of the input tensors of the layer. Read more
fn outputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the output tensors of the layer. Read more
fn outputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the gradients of the output tensors of the layer. Read more
fn learnable_weights(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the learnable weights inside the layer. Read more
fn learnable_weights_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>
Return the gradients for the learnable weights inside the layer. Read more
fn learnable_weights_names(&self) -> Option<Vec<String>>
Return the names of the learnable weights inside the layer. Read more
Adjust size of shared workspace. Read more
fn forward(
&self,
backend: &B,
input_data: &[ArcLock<SharedTensor<f32>>],
weights_data: &[ArcLock<SharedTensor<f32>>],
output_data: &mut [ArcLock<SharedTensor<f32>>]
)
&self,
backend: &B,
input_data: &[ArcLock<SharedTensor<f32>>],
weights_data: &[ArcLock<SharedTensor<f32>>],
output_data: &mut [ArcLock<SharedTensor<f32>>]
)
Compute the [feedforward][1] layer output using the provided Backend. [1]: https://en.wikipedia.org/wiki/Feedforward_neural_network Read more
fn backward_input(
&self,
backend: &B,
weights_data: &[ArcLock<SharedTensor<f32>>],
output_data: &[ArcLock<SharedTensor<f32>>],
output_gradients: &[ArcLock<SharedTensor<f32>>],
input_data: &[ArcLock<SharedTensor<f32>>],
input_gradients: &mut [ArcLock<SharedTensor<f32>>]
)
&self,
backend: &B,
weights_data: &[ArcLock<SharedTensor<f32>>],
output_data: &[ArcLock<SharedTensor<f32>>],
output_gradients: &[ArcLock<SharedTensor<f32>>],
input_data: &[ArcLock<SharedTensor<f32>>],
input_gradients: &mut [ArcLock<SharedTensor<f32>>]
)
Compute the [backpropagation][1] input gradient using the provided backend. [1]: https://en.wikipedia.org/wiki/Backpropagation Read more
fn backward_parameters(
&self,
backend: &B,
output_data: &[ArcLock<SharedTensor<f32>>],
output_gradients: &[ArcLock<SharedTensor<f32>>],
input_data: &[ArcLock<SharedTensor<f32>>],
weights_gradients: &mut [ArcLock<SharedTensor<f32>>]
)
&self,
backend: &B,
output_data: &[ArcLock<SharedTensor<f32>>],
output_gradients: &[ArcLock<SharedTensor<f32>>],
input_data: &[ArcLock<SharedTensor<f32>>],
weights_gradients: &mut [ArcLock<SharedTensor<f32>>]
)
Compute the [backpropagation][1] parameters gradient using the provided backend. [1]: https://en.wikipedia.org/wiki/Backpropagation Read more
fn init(&mut self, backend: Rc<B>)
Initialize the layer for computation. Read more
fn reshape(
&mut self,
backend: Rc<B>,
input_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>
)
&mut self,
backend: Rc<B>,
input_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>
)
Adjust to shapes of the output blobs to fit the shapes of the input blobs. Read more
fn sync(
&self,
backend: &B,
input_data: &mut [ArcLock<SharedTensor<f32>>],
input_gradients: &mut [ArcLock<SharedTensor<f32>>],
weights_data: &mut [ArcLock<SharedTensor<f32>>],
weights_gradients: &mut [ArcLock<SharedTensor<f32>>],
output_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_gradients: &mut Vec<ArcLock<SharedTensor<f32>>>
)
&self,
backend: &B,
input_data: &mut [ArcLock<SharedTensor<f32>>],
input_gradients: &mut [ArcLock<SharedTensor<f32>>],
weights_data: &mut [ArcLock<SharedTensor<f32>>],
weights_gradients: &mut [ArcLock<SharedTensor<f32>>],
output_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_gradients: &mut Vec<ArcLock<SharedTensor<f32>>>
)
Synchronize the blobs before doing a forward or backward operation. Read more
fn auto_output_blobs(&self) -> bool
Return whether "anonymous" output blobs are created automatically for the layer. Read more
fn min_output_blobs(&self) -> usize
Returns the minimum number of output blobs required by the layer, or 0 if no minimum number is required. Read more
fn exact_num_output_blobs(&self) -> Option<usize>
Returns the exact number of output blobs required by the layer, or None
if no exact number is required. Read more
fn auto_weight_blobs(&self) -> bool
Return whether weight blobs are created automatically for the layer. Read more
fn exact_num_input_blobs(&self) -> Option<usize>
Returns the exact number of input blobs required by the layer, or None
if no exact number is required. Read more
fn allow_force_backward(&self, input_id: usize) -> bool
Return whether to allow force_backward for a given input blob index. Read more
fn sync_native(&self) -> bool
Return wether a simple native backend should be used to [sync][1] instead of the default backend. [1]: #method.sync Read more
fn compute_in_place(&self) -> bool
Return wether the computations of a layer should be done in-place (the output will be written where the input was read from). Read more
fn loss_weight(&self, output_id: usize) -> Option<f32>
Return the associated loss weight for a given output blob index. Read more
fn learnable_weights_lr(&self) -> Option<Vec<Option<f32>>>
Return the learning rates for the learnable weights inside the layer. Read more
impl<B: IBackend + LayerOps<f32> + 'static> ComputeOutput<f32, B> for Sequential<B>
[src]
fn compute_output(
&self,
backend: &B,
weights: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
output_data: &mut [&mut SharedTensor<f32>]
)
&self,
backend: &B,
weights: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
output_data: &mut [&mut SharedTensor<f32>]
)
Compute output for given input and write them into output_data
.
impl<B: IBackend + LayerOps<f32> + 'static> ComputeInputGradient<f32, B> for Sequential<B>
[src]
fn compute_input_gradient(
&self,
backend: &B,
weights_data: &[&SharedTensor<f32>],
output_data: &[&SharedTensor<f32>],
output_gradients: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
input_gradients: &mut [&mut SharedTensor<f32>]
)
&self,
backend: &B,
weights_data: &[&SharedTensor<f32>],
output_data: &[&SharedTensor<f32>],
output_gradients: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
input_gradients: &mut [&mut SharedTensor<f32>]
)
Compute gradients with respect to the inputs and write them into input_gradients
.
impl<B: IBackend + LayerOps<f32> + 'static> ComputeParametersGradient<f32, B> for Sequential<B>
[src]
fn compute_parameters_gradient(
&self,
backend: &B,
output_data: &[&SharedTensor<f32>],
output_gradients: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
parameters_gradients: &mut [&mut SharedTensor<f32>]
)
&self,
backend: &B,
output_data: &[&SharedTensor<f32>],
output_gradients: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
parameters_gradients: &mut [&mut SharedTensor<f32>]
)
Compute gradients with respect to the parameters and write them into parameters_gradients
.