[][src]Trait opencv::hub_prelude::LayerTrait

pub trait LayerTrait: AlgorithmTrait {
    fn as_raw_Layer(&self) -> *mut c_void;

    fn blobs(&mut self) -> VectorOfMat { ... }
fn set_blobs(&mut self, val: VectorOfMat) { ... }
fn name(&self) -> String { ... }
fn set_name(&mut self, val: &str) { ... }
fn typ(&self) -> String { ... }
fn set_type(&mut self, val: &str) { ... }
fn preferable_target(&self) -> i32 { ... }
fn set_preferable_target(&mut self, val: i32) { ... }
fn finalize(
        &mut self,
        inputs: &dyn ToInputArray,
        outputs: &mut dyn ToOutputArray
    ) -> Result<()> { ... }
fn forward_mat(
        &mut self,
        input: &mut VectorOfMat,
        output: &mut VectorOfMat,
        internals: &mut VectorOfMat
    ) -> Result<()> { ... }
fn forward(
        &mut self,
        inputs: &dyn ToInputArray,
        outputs: &mut dyn ToOutputArray,
        internals: &mut dyn ToOutputArray
    ) -> Result<()> { ... }
fn forward_fallback(
        &mut self,
        inputs: &dyn ToInputArray,
        outputs: &mut dyn ToOutputArray,
        internals: &mut dyn ToOutputArray
    ) -> Result<()> { ... }
fn finalize_mat_to(
        &mut self,
        inputs: &VectorOfMat,
        outputs: &mut VectorOfMat
    ) -> Result<()> { ... }
fn finalize_mat(&mut self, inputs: &VectorOfMat) -> Result<VectorOfMat> { ... }
fn run(
        &mut self,
        inputs: &VectorOfMat,
        outputs: &mut VectorOfMat,
        internals: &mut VectorOfMat
    ) -> Result<()> { ... }
fn input_name_to_index(&mut self, input_name: &str) -> Result<i32> { ... }
fn output_name_to_index(&mut self, output_name: &str) -> Result<i32> { ... }
fn support_backend(&mut self, backend_id: i32) -> Result<bool> { ... }
fn init_halide(
        &mut self,
        inputs: &VectorOfPtrOfBackendWrapper
    ) -> Result<PtrOfBackendNode> { ... }
fn init_inf_engine(
        &mut self,
        inputs: &VectorOfPtrOfBackendWrapper
    ) -> Result<PtrOfBackendNode> { ... }
fn init_ngraph(
        &mut self,
        inputs: &VectorOfPtrOfBackendWrapper,
        nodes: &VectorOfPtrOfBackendNode
    ) -> Result<PtrOfBackendNode> { ... }
fn init_vk_com(
        &mut self,
        inputs: &VectorOfPtrOfBackendWrapper
    ) -> Result<PtrOfBackendNode> { ... }
fn init_cuda(
        &mut self,
        context: *mut c_void,
        inputs: &VectorOfPtrOfBackendWrapper,
        outputs: &VectorOfPtrOfBackendWrapper
    ) -> Result<PtrOfBackendNode> { ... }
fn apply_halide_scheduler(
        &self,
        node: &mut PtrOfBackendNode,
        inputs: &VectorOfMat,
        outputs: &VectorOfMat,
        target_id: i32
    ) -> Result<()> { ... }
fn try_attach(
        &mut self,
        node: &PtrOfBackendNode
    ) -> Result<PtrOfBackendNode> { ... }
fn set_activation(&mut self, layer: &PtrOfActivationLayer) -> Result<bool> { ... }
fn try_fuse(&mut self, top: &mut PtrOfLayer) -> Result<bool> { ... }
fn get_scale_shift(&self, scale: &mut Mat, shift: &mut Mat) -> Result<()> { ... }
fn unset_attached(&mut self) -> Result<()> { ... }
fn get_memory_shapes(
        &self,
        inputs: &VectorOfMatShape,
        required_outputs: i32,
        outputs: &mut VectorOfMatShape,
        internals: &mut VectorOfMatShape
    ) -> Result<bool> { ... }
fn get_flops(
        &self,
        inputs: &VectorOfMatShape,
        outputs: &VectorOfMatShape
    ) -> Result<i64> { ... }
fn set_params_from(&mut self, params: &LayerParams) -> Result<()> { ... } }

This interface class allows to build new Layers - are building blocks of networks.

Each class, derived from Layer, must implement allocate() methods to declare own outputs and forward() to compute outputs. Also before using the new layer into networks you must register your layer by using one of @ref dnnLayerFactory "LayerFactory" macros.

Required methods

Loading content...

Provided methods

fn blobs(&mut self) -> VectorOfMat

List of learned parameters must be stored here to allow read them by using Net::getParam().

fn set_blobs(&mut self, val: VectorOfMat)

List of learned parameters must be stored here to allow read them by using Net::getParam().

fn name(&self) -> String

Name of the layer instance, can be used for logging or other internal purposes.

fn set_name(&mut self, val: &str)

Name of the layer instance, can be used for logging or other internal purposes.

fn typ(&self) -> String

Type name which was used for creating layer by layer factory.

fn set_type(&mut self, val: &str)

Type name which was used for creating layer by layer factory.

fn preferable_target(&self) -> i32

prefer target for layer forwarding

fn set_preferable_target(&mut self, val: i32)

prefer target for layer forwarding

fn finalize(
    &mut self,
    inputs: &dyn ToInputArray,
    outputs: &mut dyn ToOutputArray
) -> Result<()>

Computes and sets internal parameters according to inputs, outputs and blobs.

Parameters

  • inputs: vector of already allocated input blobs
  • outputs:[out] vector of already allocated output blobs

If this method is called after network has allocated all memory for input and output blobs and before inferencing.

fn forward_mat(
    &mut self,
    input: &mut VectorOfMat,
    output: &mut VectorOfMat,
    internals: &mut VectorOfMat
) -> Result<()>

Deprecated:

Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead

Given the @p input blobs, computes the output @p blobs.

Deprecated: Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead

Parameters

  • input: the input blobs.
  • output:[out] allocated output blobs, which will store results of the computation.
  • internals:[out] allocated internal blobs

fn forward(
    &mut self,
    inputs: &dyn ToInputArray,
    outputs: &mut dyn ToOutputArray,
    internals: &mut dyn ToOutputArray
) -> Result<()>

Given the @p input blobs, computes the output @p blobs.

Parameters

  • inputs: the input blobs.
  • outputs:[out] allocated output blobs, which will store results of the computation.
  • internals:[out] allocated internal blobs

fn forward_fallback(
    &mut self,
    inputs: &dyn ToInputArray,
    outputs: &mut dyn ToOutputArray,
    internals: &mut dyn ToOutputArray
) -> Result<()>

Given the @p input blobs, computes the output @p blobs.

Parameters

  • inputs: the input blobs.
  • outputs:[out] allocated output blobs, which will store results of the computation.
  • internals:[out] allocated internal blobs

fn finalize_mat_to(
    &mut self,
    inputs: &VectorOfMat,
    outputs: &mut VectorOfMat
) -> Result<()>

Deprecated:

Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead

@brief Computes and sets internal parameters according to inputs, outputs and blobs.

Parameters

  • inputs: vector of already allocated input blobs
  • outputs:[out] vector of already allocated output blobs

If this method is called after network has allocated all memory for input and output blobs and before inferencing.

Overloaded parameters

Deprecated: Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead

fn finalize_mat(&mut self, inputs: &VectorOfMat) -> Result<VectorOfMat>

Deprecated:

Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead

@brief Computes and sets internal parameters according to inputs, outputs and blobs.

Parameters

  • inputs: vector of already allocated input blobs
  • outputs:[out] vector of already allocated output blobs

If this method is called after network has allocated all memory for input and output blobs and before inferencing.

Overloaded parameters

Deprecated: Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead

fn run(
    &mut self,
    inputs: &VectorOfMat,
    outputs: &mut VectorOfMat,
    internals: &mut VectorOfMat
) -> Result<()>

Deprecated:

This method will be removed in the future release.

Allocates layer and computes output.

Deprecated: This method will be removed in the future release.

fn input_name_to_index(&mut self, input_name: &str) -> Result<i32>

Returns index of input blob into the input array.

Parameters

  • inputName: label of input blob

Each layer input and output can be labeled to easily identify them using "%<layer_name%>[.output_name]" notation. This method maps label of input blob to its index into input vector.

fn output_name_to_index(&mut self, output_name: &str) -> Result<i32>

Returns index of output blob in output array.

See also

inputNameToIndex()

fn support_backend(&mut self, backend_id: i32) -> Result<bool>

Ask layer if it support specific backend for doing computations.

Parameters

  • backendId: computation backend identifier.

See also

Backend

fn init_halide(
    &mut self,
    inputs: &VectorOfPtrOfBackendWrapper
) -> Result<PtrOfBackendNode>

Returns Halide backend node.

Parameters

  • inputs: Input Halide buffers.

See also

BackendNode, BackendWrapper

Input buffers should be exactly the same that will be used in forward invocations. Despite we can use Halide::ImageParam based on input shape only, it helps prevent some memory management issues (if something wrong, Halide tests will be failed).

fn init_inf_engine(
    &mut self,
    inputs: &VectorOfPtrOfBackendWrapper
) -> Result<PtrOfBackendNode>

fn init_ngraph(
    &mut self,
    inputs: &VectorOfPtrOfBackendWrapper,
    nodes: &VectorOfPtrOfBackendNode
) -> Result<PtrOfBackendNode>

fn init_vk_com(
    &mut self,
    inputs: &VectorOfPtrOfBackendWrapper
) -> Result<PtrOfBackendNode>

fn init_cuda(
    &mut self,
    context: *mut c_void,
    inputs: &VectorOfPtrOfBackendWrapper,
    outputs: &VectorOfPtrOfBackendWrapper
) -> Result<PtrOfBackendNode>

Returns a CUDA backend node

Parameters

  • context: void pointer to CSLContext object
  • inputs: layer inputs
  • outputs: layer outputs

fn apply_halide_scheduler(
    &self,
    node: &mut PtrOfBackendNode,
    inputs: &VectorOfMat,
    outputs: &VectorOfMat,
    target_id: i32
) -> Result<()>

Automatic Halide scheduling based on layer hyper-parameters.

Parameters

  • node: Backend node with Halide functions.
  • inputs: Blobs that will be used in forward invocations.
  • outputs: Blobs that will be used in forward invocations.
  • targetId: Target identifier

See also

BackendNode, Target

Layer don't use own Halide::Func members because we can have applied layers fusing. In this way the fused function should be scheduled.

fn try_attach(&mut self, node: &PtrOfBackendNode) -> Result<PtrOfBackendNode>

Implement layers fusing.

Parameters

  • node: Backend node of bottom layer.

See also

BackendNode

Actual for graph-based backends. If layer attached successfully, returns non-empty cv::Ptr to node of the same backend. Fuse only over the last function.

fn set_activation(&mut self, layer: &PtrOfActivationLayer) -> Result<bool>

Tries to attach to the layer the subsequent activation layer, i.e. do the layer fusion in a partial case.

Parameters

  • layer: The subsequent activation layer.

Returns true if the activation layer has been attached successfully.

fn try_fuse(&mut self, top: &mut PtrOfLayer) -> Result<bool>

Try to fuse current layer with a next one

Parameters

  • top: Next layer to be fused.

Returns

True if fusion was performed.

fn get_scale_shift(&self, scale: &mut Mat, shift: &mut Mat) -> Result<()>

Returns parameters of layers with channel-wise multiplication and addition.

Parameters

  • scale:[out] Channel-wise multipliers. Total number of values should be equal to number of channels.
  • shift:[out] Channel-wise offsets. Total number of values should be equal to number of channels.

Some layers can fuse their transformations with further layers. In example, convolution + batch normalization. This way base layer use weights from layer after it. Fused layer is skipped. By default, @p scale and @p shift are empty that means layer has no element-wise multiplications or additions.

fn unset_attached(&mut self) -> Result<()>

"Deattaches" all the layers, attached to particular layer.

fn get_memory_shapes(
    &self,
    inputs: &VectorOfMatShape,
    required_outputs: i32,
    outputs: &mut VectorOfMatShape,
    internals: &mut VectorOfMatShape
) -> Result<bool>

fn get_flops(
    &self,
    inputs: &VectorOfMatShape,
    outputs: &VectorOfMatShape
) -> Result<i64>

fn set_params_from(&mut self, params: &LayerParams) -> Result<()>

Loading content...

Implementors

impl LayerTrait for BaseConvolutionLayer[src]

impl LayerTrait for BlankLayer[src]

impl LayerTrait for ConcatLayer[src]

impl LayerTrait for ConstLayer[src]

impl LayerTrait for ConvolutionLayer[src]

impl LayerTrait for CropAndResizeLayer[src]

impl LayerTrait for CropLayer[src]

impl LayerTrait for DeconvolutionLayer[src]

impl LayerTrait for DetectionOutputLayer[src]

impl LayerTrait for EltwiseLayer[src]

impl LayerTrait for FlattenLayer[src]

impl LayerTrait for InnerProductLayer[src]

impl LayerTrait for InterpLayer[src]

impl LayerTrait for LRNLayer[src]

impl LayerTrait for Layer[src]

impl LayerTrait for MVNLayer[src]

impl LayerTrait for MaxUnpoolLayer[src]

impl LayerTrait for NormalizeBBoxLayer[src]

impl LayerTrait for PaddingLayer[src]

impl LayerTrait for PermuteLayer[src]

impl LayerTrait for PoolingLayer[src]

impl LayerTrait for PriorBoxLayer[src]

impl LayerTrait for ProposalLayer[src]

impl LayerTrait for RegionLayer[src]

impl LayerTrait for ReorgLayer[src]

impl LayerTrait for ReshapeLayer[src]

impl LayerTrait for ResizeLayer[src]

impl LayerTrait for ScaleLayer[src]

impl LayerTrait for ShiftLayer[src]

impl LayerTrait for ShuffleChannelLayer[src]

impl LayerTrait for SliceLayer[src]

impl LayerTrait for SoftmaxLayer[src]

impl LayerTrait for SplitLayer[src]

impl LayerTrait for PtrOfAbsLayer[src]

impl LayerTrait for PtrOfActivationLayer[src]

impl LayerTrait for PtrOfBNLLLayer[src]

impl LayerTrait for PtrOfBaseConvolutionLayer[src]

impl LayerTrait for PtrOfBatchNormLayer[src]

impl LayerTrait for PtrOfConcatLayer[src]

impl LayerTrait for PtrOfDetectionOutputLayer[src]

impl LayerTrait for PtrOfELULayer[src]

impl LayerTrait for PtrOfEltwiseLayer[src]

impl LayerTrait for PtrOfFlattenLayer[src]

impl LayerTrait for PtrOfInnerProductLayer[src]

impl LayerTrait for PtrOfLRNLayer[src]

impl LayerTrait for PtrOfLSTMLayer[src]

impl LayerTrait for PtrOfLayer[src]

impl LayerTrait for PtrOfMVNLayer[src]

impl LayerTrait for PtrOfMaxUnpoolLayer[src]

impl LayerTrait for PtrOfMishLayer[src]

impl LayerTrait for PtrOfNormalizeBBoxLayer[src]

impl LayerTrait for PtrOfPaddingLayer[src]

impl LayerTrait for PtrOfPermuteLayer[src]

impl LayerTrait for PtrOfPoolingLayer[src]

impl LayerTrait for PtrOfPowerLayer[src]

impl LayerTrait for PtrOfPriorBoxLayer[src]

impl LayerTrait for PtrOfProposalLayer[src]

impl LayerTrait for PtrOfRNNLayer[src]

impl LayerTrait for PtrOfReLU6Layer[src]

impl LayerTrait for PtrOfReLULayer[src]

impl LayerTrait for PtrOfRegionLayer[src]

impl LayerTrait for PtrOfReorgLayer[src]

impl LayerTrait for PtrOfReshapeLayer[src]

impl LayerTrait for PtrOfResizeLayer[src]

impl LayerTrait for PtrOfScaleLayer[src]

impl LayerTrait for PtrOfSigmoidLayer[src]

impl LayerTrait for PtrOfSliceLayer[src]

impl LayerTrait for PtrOfSoftmaxLayer[src]

impl LayerTrait for PtrOfSplitLayer[src]

impl LayerTrait for PtrOfSwishLayer[src]

impl LayerTrait for PtrOfTanHLayer[src]

Loading content...