Trait opencv::dnn::prelude::LayerTrait [−][src]
pub trait LayerTrait: AlgorithmTrait + LayerTraitConst {
Show 27 methods
fn as_raw_mut_Layer(&mut self) -> *mut c_void;
fn set_blobs(&mut self, val: Vector<Mat>) { ... }
fn set_name(&mut self, val: &str) { ... }
fn set_type(&mut self, val: &str) { ... }
fn set_preferable_target(&mut self, val: i32) { ... }
fn finalize(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray
) -> Result<()> { ... }
fn forward_mat(
&mut self,
input: &mut Vector<Mat>,
output: &mut Vector<Mat>,
internals: &mut Vector<Mat>
) -> Result<()> { ... }
fn forward(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray,
internals: &mut dyn ToOutputArray
) -> Result<()> { ... }
fn try_quantize(
&mut self,
scales: &Vector<Vector<f32>>,
zeropoints: &Vector<Vector<i32>>,
params: &mut LayerParams
) -> Result<bool> { ... }
fn forward_fallback(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray,
internals: &mut dyn ToOutputArray
) -> Result<()> { ... }
fn finalize_mat_to(
&mut self,
inputs: &Vector<Mat>,
outputs: &mut Vector<Mat>
) -> Result<()> { ... }
fn finalize_mat(&mut self, inputs: &Vector<Mat>) -> Result<Vector<Mat>> { ... }
fn run(
&mut self,
inputs: &Vector<Mat>,
outputs: &mut Vector<Mat>,
internals: &mut Vector<Mat>
) -> Result<()> { ... }
fn input_name_to_index(&mut self, input_name: &str) -> Result<i32> { ... }
fn output_name_to_index(&mut self, output_name: &str) -> Result<i32> { ... }
fn support_backend(&mut self, backend_id: i32) -> Result<bool> { ... }
fn init_halide(
&mut self,
inputs: &Vector<Ptr<dyn BackendWrapper>>
) -> Result<Ptr<BackendNode>> { ... }
fn init_inf_engine(
&mut self,
inputs: &Vector<Ptr<dyn BackendWrapper>>
) -> Result<Ptr<BackendNode>> { ... }
fn init_ngraph(
&mut self,
inputs: &Vector<Ptr<dyn BackendWrapper>>,
nodes: &Vector<Ptr<BackendNode>>
) -> Result<Ptr<BackendNode>> { ... }
fn init_vk_com(
&mut self,
inputs: &Vector<Ptr<dyn BackendWrapper>>
) -> Result<Ptr<BackendNode>> { ... }
unsafe fn init_cuda(
&mut self,
context: *mut c_void,
inputs: &Vector<Ptr<dyn BackendWrapper>>,
outputs: &Vector<Ptr<dyn BackendWrapper>>
) -> Result<Ptr<BackendNode>> { ... }
fn try_attach(
&mut self,
node: &Ptr<BackendNode>
) -> Result<Ptr<BackendNode>> { ... }
fn set_activation(&mut self, layer: &Ptr<ActivationLayer>) -> Result<bool> { ... }
fn try_fuse(&mut self, top: &mut Ptr<Layer>) -> Result<bool> { ... }
fn unset_attached(&mut self) -> Result<()> { ... }
fn update_memory_shapes(
&mut self,
inputs: &Vector<MatShape>
) -> Result<bool> { ... }
fn set_params_from(&mut self, params: &LayerParams) -> Result<()> { ... }
}
Required methods
fn as_raw_mut_Layer(&mut self) -> *mut c_void
Provided methods
List of learned parameters must be stored here to allow read them by using Net::getParam().
Name of the layer instance, can be used for logging or other internal purposes.
fn set_preferable_target(&mut self, val: i32)
fn set_preferable_target(&mut self, val: i32)
prefer target for layer forwarding
fn finalize(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray
) -> Result<()>
fn finalize(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray
) -> Result<()>
Computes and sets internal parameters according to inputs, outputs and blobs.
Parameters
- inputs: vector of already allocated input blobs
- outputs:[out] vector of already allocated output blobs
If this method is called after network has allocated all memory for input and output blobs and before inferencing.
Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead
Given the @p input blobs, computes the output @p blobs.
Deprecated: Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead
Parameters
- input: the input blobs.
- output:[out] allocated output blobs, which will store results of the computation.
- internals:[out] allocated internal blobs
fn forward(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray,
internals: &mut dyn ToOutputArray
) -> Result<()>
fn forward(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray,
internals: &mut dyn ToOutputArray
) -> Result<()>
Given the @p input blobs, computes the output @p blobs.
Parameters
- inputs: the input blobs.
- outputs:[out] allocated output blobs, which will store results of the computation.
- internals:[out] allocated internal blobs
Tries to quantize the given layer and compute the quantization parameters required for fixed point implementation.
Parameters
- scales: input and output scales.
- zeropoints: input and output zeropoints.
- params:[out] Quantized parameters required for fixed point implementation of that layer.
Returns
True if layer can be quantized.
fn forward_fallback(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray,
internals: &mut dyn ToOutputArray
) -> Result<()>
fn forward_fallback(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray,
internals: &mut dyn ToOutputArray
) -> Result<()>
Given the @p input blobs, computes the output @p blobs.
Parameters
- inputs: the input blobs.
- outputs:[out] allocated output blobs, which will store results of the computation.
- internals:[out] allocated internal blobs
Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
@brief Computes and sets internal parameters according to inputs, outputs and blobs.
Parameters
- inputs: vector of already allocated input blobs
- outputs:[out] vector of already allocated output blobs
If this method is called after network has allocated all memory for input and output blobs and before inferencing.
Overloaded parameters
Deprecated: Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
@brief Computes and sets internal parameters according to inputs, outputs and blobs.
Parameters
- inputs: vector of already allocated input blobs
- outputs:[out] vector of already allocated output blobs
If this method is called after network has allocated all memory for input and output blobs and before inferencing.
Overloaded parameters
Deprecated: Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
This method will be removed in the future release.
Allocates layer and computes output.
Deprecated: This method will be removed in the future release.
fn input_name_to_index(&mut self, input_name: &str) -> Result<i32>
fn input_name_to_index(&mut self, input_name: &str) -> Result<i32>
Returns index of input blob into the input array.
Parameters
- inputName: label of input blob
Each layer input and output can be labeled to easily identify them using “%<layer_name%>[.output_name]” notation. This method maps label of input blob to its index into input vector.
fn output_name_to_index(&mut self, output_name: &str) -> Result<i32>
fn output_name_to_index(&mut self, output_name: &str) -> Result<i32>
fn support_backend(&mut self, backend_id: i32) -> Result<bool>
fn support_backend(&mut self, backend_id: i32) -> Result<bool>
Ask layer if it support specific backend for doing computations.
Parameters
- backendId: computation backend identifier.
See also
Backend
fn init_halide(
&mut self,
inputs: &Vector<Ptr<dyn BackendWrapper>>
) -> Result<Ptr<BackendNode>>
fn init_halide(
&mut self,
inputs: &Vector<Ptr<dyn BackendWrapper>>
) -> Result<Ptr<BackendNode>>
Returns Halide backend node.
Parameters
- inputs: Input Halide buffers.
See also
BackendNode, BackendWrapper
Input buffers should be exactly the same that will be used in forward invocations. Despite we can use Halide::ImageParam based on input shape only, it helps prevent some memory management issues (if something wrong, Halide tests will be failed).
fn init_inf_engine(
&mut self,
inputs: &Vector<Ptr<dyn BackendWrapper>>
) -> Result<Ptr<BackendNode>>
fn init_ngraph(
&mut self,
inputs: &Vector<Ptr<dyn BackendWrapper>>,
nodes: &Vector<Ptr<BackendNode>>
) -> Result<Ptr<BackendNode>>
fn init_vk_com(
&mut self,
inputs: &Vector<Ptr<dyn BackendWrapper>>
) -> Result<Ptr<BackendNode>>
unsafe fn init_cuda(
&mut self,
context: *mut c_void,
inputs: &Vector<Ptr<dyn BackendWrapper>>,
outputs: &Vector<Ptr<dyn BackendWrapper>>
) -> Result<Ptr<BackendNode>>
unsafe fn init_cuda(
&mut self,
context: *mut c_void,
inputs: &Vector<Ptr<dyn BackendWrapper>>,
outputs: &Vector<Ptr<dyn BackendWrapper>>
) -> Result<Ptr<BackendNode>>
Returns a CUDA backend node
Parameters
- context: void pointer to CSLContext object
- inputs: layer inputs
- outputs: layer outputs
fn try_attach(&mut self, node: &Ptr<BackendNode>) -> Result<Ptr<BackendNode>>
fn try_attach(&mut self, node: &Ptr<BackendNode>) -> Result<Ptr<BackendNode>>
Implement layers fusing.
Parameters
- node: Backend node of bottom layer.
See also
BackendNode
Actual for graph-based backends. If layer attached successfully, returns non-empty cv::Ptr to node of the same backend. Fuse only over the last function.
fn set_activation(&mut self, layer: &Ptr<ActivationLayer>) -> Result<bool>
fn set_activation(&mut self, layer: &Ptr<ActivationLayer>) -> Result<bool>
Tries to attach to the layer the subsequent activation layer, i.e. do the layer fusion in a partial case.
Parameters
- layer: The subsequent activation layer.
Returns true if the activation layer has been attached successfully.
Try to fuse current layer with a next one
Parameters
- top: Next layer to be fused.
Returns
True if fusion was performed.
fn unset_attached(&mut self) -> Result<()>
fn unset_attached(&mut self) -> Result<()>
“Deattaches” all the layers, attached to particular layer.