pub trait LayerTrait: AlgorithmTrait + LayerTraitConst {
Show 29 methods
// Required method
fn as_raw_mut_Layer(&mut self) -> *mut c_void;
// Provided methods
fn set_blobs(&mut self, val: Vector<Mat>) { ... }
fn set_name(&mut self, val: &str) { ... }
fn set_type(&mut self, val: &str) { ... }
fn set_preferable_target(&mut self, val: i32) { ... }
fn finalize(
&mut self,
inputs: &impl ToInputArray,
outputs: &mut impl ToOutputArray,
) -> Result<()> { ... }
fn forward_mat(
&mut self,
input: &mut Vector<Mat>,
output: &mut Vector<Mat>,
internals: &mut Vector<Mat>,
) -> Result<()> { ... }
fn forward(
&mut self,
inputs: &impl ToInputArray,
outputs: &mut impl ToOutputArray,
internals: &mut impl ToOutputArray,
) -> Result<()> { ... }
fn try_quantize(
&mut self,
scales: &Vector<Vector<f32>>,
zeropoints: &Vector<Vector<i32>>,
params: &mut impl LayerParamsTrait,
) -> Result<bool> { ... }
fn forward_fallback(
&mut self,
inputs: &impl ToInputArray,
outputs: &mut impl ToOutputArray,
internals: &mut impl ToOutputArray,
) -> Result<()> { ... }
fn finalize_mat_to(
&mut self,
inputs: &Vector<Mat>,
outputs: &mut Vector<Mat>,
) -> Result<()> { ... }
fn finalize_mat(&mut self, inputs: &Vector<Mat>) -> Result<Vector<Mat>> { ... }
fn run(
&mut self,
inputs: &Vector<Mat>,
outputs: &mut Vector<Mat>,
internals: &mut Vector<Mat>,
) -> Result<()> { ... }
fn input_name_to_index(&mut self, input_name: &str) -> Result<i32> { ... }
fn output_name_to_index(&mut self, output_name: &str) -> Result<i32> { ... }
fn support_backend(&mut self, backend_id: i32) -> Result<bool> { ... }
fn init_halide(
&mut self,
inputs: &Vector<Ptr<BackendWrapper>>,
) -> Result<Ptr<BackendNode>> { ... }
fn init_ngraph(
&mut self,
inputs: &Vector<Ptr<BackendWrapper>>,
nodes: &Vector<Ptr<BackendNode>>,
) -> Result<Ptr<BackendNode>> { ... }
fn init_vk_com(
&mut self,
inputs: &Vector<Ptr<BackendWrapper>>,
outputs: &mut Vector<Ptr<BackendWrapper>>,
) -> Result<Ptr<BackendNode>> { ... }
fn init_webnn(
&mut self,
inputs: &Vector<Ptr<BackendWrapper>>,
nodes: &Vector<Ptr<BackendNode>>,
) -> Result<Ptr<BackendNode>> { ... }
unsafe fn init_cuda(
&mut self,
context: *mut c_void,
inputs: &Vector<Ptr<BackendWrapper>>,
outputs: &Vector<Ptr<BackendWrapper>>,
) -> Result<Ptr<BackendNode>> { ... }
unsafe fn init_tim_vx(
&mut self,
tim_vx_info: *mut c_void,
inputs_wrapper: &Vector<Ptr<BackendWrapper>>,
outputs_wrapper: &Vector<Ptr<BackendWrapper>>,
is_last: bool,
) -> Result<Ptr<BackendNode>> { ... }
fn init_cann(
&mut self,
inputs: &Vector<Ptr<BackendWrapper>>,
outputs: &Vector<Ptr<BackendWrapper>>,
nodes: &Vector<Ptr<BackendNode>>,
) -> Result<Ptr<BackendNode>> { ... }
fn try_attach(
&mut self,
node: &Ptr<BackendNode>,
) -> Result<Ptr<BackendNode>> { ... }
fn set_activation(&mut self, layer: &Ptr<ActivationLayer>) -> Result<bool> { ... }
fn try_fuse(&mut self, top: &mut Ptr<Layer>) -> Result<bool> { ... }
fn unset_attached(&mut self) -> Result<()> { ... }
fn update_memory_shapes(
&mut self,
inputs: &Vector<MatShape>,
) -> Result<bool> { ... }
fn set_params_from(
&mut self,
params: &impl LayerParamsTraitConst,
) -> Result<()> { ... }
}
Expand description
Mutable methods for crate::dnn::Layer
Required Methods§
fn as_raw_mut_Layer(&mut self) -> *mut c_void
Provided Methods§
sourcefn set_blobs(&mut self, val: Vector<Mat>)
fn set_blobs(&mut self, val: Vector<Mat>)
List of learned parameters must be stored here to allow read them by using Net::getParam().
sourcefn set_name(&mut self, val: &str)
fn set_name(&mut self, val: &str)
Name of the layer instance, can be used for logging or other internal purposes.
sourcefn set_type(&mut self, val: &str)
fn set_type(&mut self, val: &str)
Type name which was used for creating layer by layer factory.
sourcefn set_preferable_target(&mut self, val: i32)
fn set_preferable_target(&mut self, val: i32)
prefer target for layer forwarding
sourcefn finalize(
&mut self,
inputs: &impl ToInputArray,
outputs: &mut impl ToOutputArray,
) -> Result<()>
fn finalize( &mut self, inputs: &impl ToInputArray, outputs: &mut impl ToOutputArray, ) -> Result<()>
Computes and sets internal parameters according to inputs, outputs and blobs.
§Parameters
- inputs: vector of already allocated input blobs
- outputs:[out] vector of already allocated output blobs
This method is called after network has allocated all memory for input and output blobs and before inferencing.
sourcefn forward_mat(
&mut self,
input: &mut Vector<Mat>,
output: &mut Vector<Mat>,
internals: &mut Vector<Mat>,
) -> Result<()>
👎Deprecated: Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead
fn forward_mat( &mut self, input: &mut Vector<Mat>, output: &mut Vector<Mat>, internals: &mut Vector<Mat>, ) -> Result<()>
Given the @p input blobs, computes the output @p blobs.
Deprecated: Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead
§Parameters
- input: the input blobs.
- output:[out] allocated output blobs, which will store results of the computation.
- internals:[out] allocated internal blobs
sourcefn forward(
&mut self,
inputs: &impl ToInputArray,
outputs: &mut impl ToOutputArray,
internals: &mut impl ToOutputArray,
) -> Result<()>
fn forward( &mut self, inputs: &impl ToInputArray, outputs: &mut impl ToOutputArray, internals: &mut impl ToOutputArray, ) -> Result<()>
Given the @p input blobs, computes the output @p blobs.
§Parameters
- inputs: the input blobs.
- outputs:[out] allocated output blobs, which will store results of the computation.
- internals:[out] allocated internal blobs
sourcefn try_quantize(
&mut self,
scales: &Vector<Vector<f32>>,
zeropoints: &Vector<Vector<i32>>,
params: &mut impl LayerParamsTrait,
) -> Result<bool>
fn try_quantize( &mut self, scales: &Vector<Vector<f32>>, zeropoints: &Vector<Vector<i32>>, params: &mut impl LayerParamsTrait, ) -> Result<bool>
Tries to quantize the given layer and compute the quantization parameters required for fixed point implementation.
§Parameters
- scales: input and output scales.
- zeropoints: input and output zeropoints.
- params:[out] Quantized parameters required for fixed point implementation of that layer.
§Returns
True if layer can be quantized.
sourcefn forward_fallback(
&mut self,
inputs: &impl ToInputArray,
outputs: &mut impl ToOutputArray,
internals: &mut impl ToOutputArray,
) -> Result<()>
fn forward_fallback( &mut self, inputs: &impl ToInputArray, outputs: &mut impl ToOutputArray, internals: &mut impl ToOutputArray, ) -> Result<()>
Given the @p input blobs, computes the output @p blobs.
§Parameters
- inputs: the input blobs.
- outputs:[out] allocated output blobs, which will store results of the computation.
- internals:[out] allocated internal blobs
sourcefn finalize_mat_to(
&mut self,
inputs: &Vector<Mat>,
outputs: &mut Vector<Mat>,
) -> Result<()>
👎Deprecated: Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
fn finalize_mat_to( &mut self, inputs: &Vector<Mat>, outputs: &mut Vector<Mat>, ) -> Result<()>
Computes and sets internal parameters according to inputs, outputs and blobs.
§Parameters
- inputs: vector of already allocated input blobs
- outputs:[out] vector of already allocated output blobs
This method is called after network has allocated all memory for input and output blobs and before inferencing.
§Overloaded parameters
Deprecated: Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
sourcefn finalize_mat(&mut self, inputs: &Vector<Mat>) -> Result<Vector<Mat>>
👎Deprecated: Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
fn finalize_mat(&mut self, inputs: &Vector<Mat>) -> Result<Vector<Mat>>
Computes and sets internal parameters according to inputs, outputs and blobs.
§Parameters
- inputs: vector of already allocated input blobs
- outputs:[out] vector of already allocated output blobs
This method is called after network has allocated all memory for input and output blobs and before inferencing.
§Overloaded parameters
Deprecated: Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
sourcefn run(
&mut self,
inputs: &Vector<Mat>,
outputs: &mut Vector<Mat>,
internals: &mut Vector<Mat>,
) -> Result<()>
👎Deprecated: This method will be removed in the future release.
fn run( &mut self, inputs: &Vector<Mat>, outputs: &mut Vector<Mat>, internals: &mut Vector<Mat>, ) -> Result<()>
Allocates layer and computes output.
Deprecated: This method will be removed in the future release.
sourcefn input_name_to_index(&mut self, input_name: &str) -> Result<i32>
fn input_name_to_index(&mut self, input_name: &str) -> Result<i32>
Returns index of input blob into the input array.
§Parameters
- inputName: label of input blob
Each layer input and output can be labeled to easily identify them using “%<layer_name%>[.output_name]” notation. This method maps label of input blob to its index into input vector.
sourcefn output_name_to_index(&mut self, output_name: &str) -> Result<i32>
fn output_name_to_index(&mut self, output_name: &str) -> Result<i32>
sourcefn support_backend(&mut self, backend_id: i32) -> Result<bool>
fn support_backend(&mut self, backend_id: i32) -> Result<bool>
sourcefn init_halide(
&mut self,
inputs: &Vector<Ptr<BackendWrapper>>,
) -> Result<Ptr<BackendNode>>
fn init_halide( &mut self, inputs: &Vector<Ptr<BackendWrapper>>, ) -> Result<Ptr<BackendNode>>
Returns Halide backend node.
§Parameters
- inputs: Input Halide buffers.
§See also
BackendNode, BackendWrapper
Input buffers should be exactly the same that will be used in forward invocations. Despite we can use Halide::ImageParam based on input shape only, it helps prevent some memory management issues (if something wrong, Halide tests will be failed).
fn init_ngraph( &mut self, inputs: &Vector<Ptr<BackendWrapper>>, nodes: &Vector<Ptr<BackendNode>>, ) -> Result<Ptr<BackendNode>>
fn init_vk_com( &mut self, inputs: &Vector<Ptr<BackendWrapper>>, outputs: &mut Vector<Ptr<BackendWrapper>>, ) -> Result<Ptr<BackendNode>>
fn init_webnn( &mut self, inputs: &Vector<Ptr<BackendWrapper>>, nodes: &Vector<Ptr<BackendNode>>, ) -> Result<Ptr<BackendNode>>
sourceunsafe fn init_cuda(
&mut self,
context: *mut c_void,
inputs: &Vector<Ptr<BackendWrapper>>,
outputs: &Vector<Ptr<BackendWrapper>>,
) -> Result<Ptr<BackendNode>>
unsafe fn init_cuda( &mut self, context: *mut c_void, inputs: &Vector<Ptr<BackendWrapper>>, outputs: &Vector<Ptr<BackendWrapper>>, ) -> Result<Ptr<BackendNode>>
Returns a CUDA backend node
§Parameters
- context: void pointer to CSLContext object
- inputs: layer inputs
- outputs: layer outputs
sourceunsafe fn init_tim_vx(
&mut self,
tim_vx_info: *mut c_void,
inputs_wrapper: &Vector<Ptr<BackendWrapper>>,
outputs_wrapper: &Vector<Ptr<BackendWrapper>>,
is_last: bool,
) -> Result<Ptr<BackendNode>>
unsafe fn init_tim_vx( &mut self, tim_vx_info: *mut c_void, inputs_wrapper: &Vector<Ptr<BackendWrapper>>, outputs_wrapper: &Vector<Ptr<BackendWrapper>>, is_last: bool, ) -> Result<Ptr<BackendNode>>
Returns a TimVX backend node
§Parameters
- timVxInfo: void pointer to CSLContext object
- inputsWrapper: layer inputs
- outputsWrapper: layer outputs
- isLast: if the node is the last one of the TimVX Graph.
sourcefn init_cann(
&mut self,
inputs: &Vector<Ptr<BackendWrapper>>,
outputs: &Vector<Ptr<BackendWrapper>>,
nodes: &Vector<Ptr<BackendNode>>,
) -> Result<Ptr<BackendNode>>
fn init_cann( &mut self, inputs: &Vector<Ptr<BackendWrapper>>, outputs: &Vector<Ptr<BackendWrapper>>, nodes: &Vector<Ptr<BackendNode>>, ) -> Result<Ptr<BackendNode>>
Returns a CANN backend node
§Parameters
- inputs: input tensors of CANN operator
- outputs: output tensors of CANN operator
- nodes: nodes of input tensors
sourcefn try_attach(&mut self, node: &Ptr<BackendNode>) -> Result<Ptr<BackendNode>>
fn try_attach(&mut self, node: &Ptr<BackendNode>) -> Result<Ptr<BackendNode>>
sourcefn set_activation(&mut self, layer: &Ptr<ActivationLayer>) -> Result<bool>
fn set_activation(&mut self, layer: &Ptr<ActivationLayer>) -> Result<bool>
Tries to attach to the layer the subsequent activation layer, i.e. do the layer fusion in a partial case.
§Parameters
- layer: The subsequent activation layer.
Returns true if the activation layer has been attached successfully.
sourcefn unset_attached(&mut self) -> Result<()>
fn unset_attached(&mut self) -> Result<()>
“Detaches” all the layers, attached to particular layer.