pub trait NetTrait: NetTraitConst {
Show 27 methods
// Required method
fn as_raw_mut_Net(&mut self) -> *mut c_void;
// Provided methods
fn dump(&mut self) -> Result<String> { ... }
fn dump_to_file(&mut self, path: &str) -> Result<()> { ... }
fn add_layer_type(
&mut self,
name: &str,
typ: &str,
dtype: &i32,
params: &mut LayerParams
) -> Result<i32> { ... }
fn add_layer(
&mut self,
name: &str,
typ: &str,
params: &mut LayerParams
) -> Result<i32> { ... }
fn add_layer_to_prev_type(
&mut self,
name: &str,
typ: &str,
dtype: &i32,
params: &mut LayerParams
) -> Result<i32> { ... }
fn add_layer_to_prev(
&mut self,
name: &str,
typ: &str,
params: &mut LayerParams
) -> Result<i32> { ... }
fn connect_first_second(
&mut self,
out_pin: &str,
inp_pin: &str
) -> Result<()> { ... }
fn connect(
&mut self,
out_layer_id: i32,
out_num: i32,
inp_layer_id: i32,
inp_num: i32
) -> Result<()> { ... }
fn register_output(
&mut self,
output_name: &str,
layer_id: i32,
output_port: i32
) -> Result<i32> { ... }
fn set_inputs_names(
&mut self,
input_blob_names: &Vector<String>
) -> Result<()> { ... }
fn set_input_shape(
&mut self,
input_name: &str,
shape: &MatShape
) -> Result<()> { ... }
fn forward_single(&mut self, output_name: &str) -> Result<Mat> { ... }
fn forward_async(&mut self, output_name: &str) -> Result<AsyncArray> { ... }
fn forward_layer(
&mut self,
output_blobs: &mut dyn ToOutputArray,
output_name: &str
) -> Result<()> { ... }
fn forward(
&mut self,
output_blobs: &mut dyn ToOutputArray,
out_blob_names: &Vector<String>
) -> Result<()> { ... }
fn forward_and_retrieve(
&mut self,
output_blobs: &mut Vector<Vector<Mat>>,
out_blob_names: &Vector<String>
) -> Result<()> { ... }
fn quantize(
&mut self,
calib_data: &dyn ToInputArray,
inputs_dtype: i32,
outputs_dtype: i32,
per_channel: bool
) -> Result<Net> { ... }
fn set_halide_scheduler(&mut self, scheduler: &str) -> Result<()> { ... }
fn set_preferable_backend(&mut self, backend_id: i32) -> Result<()> { ... }
fn set_preferable_target(&mut self, target_id: i32) -> Result<()> { ... }
fn set_input(
&mut self,
blob: &dyn ToInputArray,
name: &str,
scalefactor: f64,
mean: Scalar
) -> Result<()> { ... }
fn set_param(
&mut self,
layer: i32,
num_param: i32,
blob: &Mat
) -> Result<()> { ... }
fn set_param_1(
&mut self,
layer_name: &str,
num_param: i32,
blob: &Mat
) -> Result<()> { ... }
fn enable_fusion(&mut self, fusion: bool) -> Result<()> { ... }
fn enable_winograd(&mut self, use_winograd: bool) -> Result<()> { ... }
fn get_perf_profile(&mut self, timings: &mut Vector<f64>) -> Result<i64> { ... }
}
Expand description
Mutable methods for crate::dnn::Net
Required Methods§
fn as_raw_mut_Net(&mut self) -> *mut c_void
Provided Methods§
sourcefn dump(&mut self) -> Result<String>
fn dump(&mut self) -> Result<String>
Dump net to String
Returns
String with structure, hyperparameters, backend, target and fusion Call method after setInput(). To see correct backend, target and fusion run after forward().
sourcefn dump_to_file(&mut self, path: &str) -> Result<()>
fn dump_to_file(&mut self, path: &str) -> Result<()>
Dump net structure, hyperparameters, backend, target and fusion to dot file
Parameters
- path: path to output file with .dot extension
See also
dump()
sourcefn add_layer_type(
&mut self,
name: &str,
typ: &str,
dtype: &i32,
params: &mut LayerParams
) -> Result<i32>
fn add_layer_type( &mut self, name: &str, typ: &str, dtype: &i32, params: &mut LayerParams ) -> Result<i32>
Adds new layer to the net.
Parameters
- name: unique name of the adding layer.
- type: typename of the adding layer (type must be registered in LayerRegister).
- dtype: datatype of output blobs.
- params: parameters which will be used to initialize the creating layer.
Returns
unique identifier of created layer, or -1 if a failure will happen.
sourcefn add_layer(
&mut self,
name: &str,
typ: &str,
params: &mut LayerParams
) -> Result<i32>
fn add_layer( &mut self, name: &str, typ: &str, params: &mut LayerParams ) -> Result<i32>
Adds new layer to the net.
Parameters
- name: unique name of the adding layer.
- type: typename of the adding layer (type must be registered in LayerRegister).
- dtype: datatype of output blobs.
- params: parameters which will be used to initialize the creating layer.
Returns
unique identifier of created layer, or -1 if a failure will happen.
Overloaded parameters
Datatype of output blobs set to default CV_32F
sourcefn add_layer_to_prev_type(
&mut self,
name: &str,
typ: &str,
dtype: &i32,
params: &mut LayerParams
) -> Result<i32>
fn add_layer_to_prev_type( &mut self, name: &str, typ: &str, dtype: &i32, params: &mut LayerParams ) -> Result<i32>
Adds new layer and connects its first input to the first output of previously added layer.
See also
addLayer()
sourcefn add_layer_to_prev(
&mut self,
name: &str,
typ: &str,
params: &mut LayerParams
) -> Result<i32>
fn add_layer_to_prev( &mut self, name: &str, typ: &str, params: &mut LayerParams ) -> Result<i32>
Adds new layer and connects its first input to the first output of previously added layer.
See also
addLayer()
Overloaded parameters
sourcefn connect_first_second(&mut self, out_pin: &str, inp_pin: &str) -> Result<()>
fn connect_first_second(&mut self, out_pin: &str, inp_pin: &str) -> Result<()>
Connects output of the first layer to input of the second layer.
Parameters
- outPin: descriptor of the first layer output.
- inpPin: descriptor of the second layer input.
Descriptors have the following template <layer_name>[.input_number]:
- the first part of the template layer_name is string name of the added layer. If this part is empty then the network input pseudo layer will be used;
- the second optional part of the template input_number is either number of the layer input, either label one. If this part is omitted then the first layer input will be used.
See also
setNetInputs(), Layer::inputNameToIndex(), Layer::outputNameToIndex()
sourcefn connect(
&mut self,
out_layer_id: i32,
out_num: i32,
inp_layer_id: i32,
inp_num: i32
) -> Result<()>
fn connect( &mut self, out_layer_id: i32, out_num: i32, inp_layer_id: i32, inp_num: i32 ) -> Result<()>
Connects #@p outNum output of the first layer to #@p inNum input of the second layer.
Parameters
- outLayerId: identifier of the first layer
- outNum: number of the first layer output
- inpLayerId: identifier of the second layer
- inpNum: number of the second layer input
sourcefn register_output(
&mut self,
output_name: &str,
layer_id: i32,
output_port: i32
) -> Result<i32>
fn register_output( &mut self, output_name: &str, layer_id: i32, output_port: i32 ) -> Result<i32>
Registers network output with name
Function may create additional ‘Identity’ layer.
Parameters
- outputName: identifier of the output
- layerId: identifier of the second layer
- outputPort: number of the second layer input
Returns
index of bound layer (the same as layerId or newly created)
sourcefn set_inputs_names(&mut self, input_blob_names: &Vector<String>) -> Result<()>
fn set_inputs_names(&mut self, input_blob_names: &Vector<String>) -> Result<()>
Sets outputs names of the network input pseudo layer.
Each net always has special own the network input pseudo layer with id=0. This layer stores the user blobs only and don’t make any computations. In fact, this layer provides the only way to pass user data into the network. As any other layer, this layer can label its outputs and this function provides an easy way to do this.
sourcefn set_input_shape(&mut self, input_name: &str, shape: &MatShape) -> Result<()>
fn set_input_shape(&mut self, input_name: &str, shape: &MatShape) -> Result<()>
Specify shape of network input.
sourcefn forward_single(&mut self, output_name: &str) -> Result<Mat>
fn forward_single(&mut self, output_name: &str) -> Result<Mat>
Runs forward pass to compute output of layer with name @p outputName.
Parameters
- outputName: name for layer which output is needed to get
Returns
blob for first output of specified layer. @details By default runs forward pass for the whole network.
C++ default parameters
- output_name: String()
sourcefn forward_async(&mut self, output_name: &str) -> Result<AsyncArray>
fn forward_async(&mut self, output_name: &str) -> Result<AsyncArray>
Runs forward pass to compute output of layer with name @p outputName.
Parameters
- outputName: name for layer which output is needed to get @details By default runs forward pass for the whole network.
This is an asynchronous version of forward(const String&). dnn::DNN_BACKEND_INFERENCE_ENGINE backend is required.
C++ default parameters
- output_name: String()
sourcefn forward_layer(
&mut self,
output_blobs: &mut dyn ToOutputArray,
output_name: &str
) -> Result<()>
fn forward_layer( &mut self, output_blobs: &mut dyn ToOutputArray, output_name: &str ) -> Result<()>
Runs forward pass to compute output of layer with name @p outputName.
Parameters
- outputBlobs: contains all output blobs for specified layer.
- outputName: name for layer which output is needed to get @details If @p outputName is empty, runs forward pass for the whole network.
C++ default parameters
- output_name: String()
sourcefn forward(
&mut self,
output_blobs: &mut dyn ToOutputArray,
out_blob_names: &Vector<String>
) -> Result<()>
fn forward( &mut self, output_blobs: &mut dyn ToOutputArray, out_blob_names: &Vector<String> ) -> Result<()>
Runs forward pass to compute outputs of layers listed in @p outBlobNames.
Parameters
- outputBlobs: contains blobs for first outputs of specified layers.
- outBlobNames: names for layers which outputs are needed to get
sourcefn forward_and_retrieve(
&mut self,
output_blobs: &mut Vector<Vector<Mat>>,
out_blob_names: &Vector<String>
) -> Result<()>
fn forward_and_retrieve( &mut self, output_blobs: &mut Vector<Vector<Mat>>, out_blob_names: &Vector<String> ) -> Result<()>
Runs forward pass to compute outputs of layers listed in @p outBlobNames.
Parameters
- outputBlobs: contains all output blobs for each layer specified in @p outBlobNames.
- outBlobNames: names for layers which outputs are needed to get
sourcefn quantize(
&mut self,
calib_data: &dyn ToInputArray,
inputs_dtype: i32,
outputs_dtype: i32,
per_channel: bool
) -> Result<Net>
fn quantize( &mut self, calib_data: &dyn ToInputArray, inputs_dtype: i32, outputs_dtype: i32, per_channel: bool ) -> Result<Net>
Returns a quantized Net from a floating-point Net.
Parameters
- calibData: Calibration data to compute the quantization parameters.
- inputsDtype: Datatype of quantized net’s inputs. Can be CV_32F or CV_8S.
- outputsDtype: Datatype of quantized net’s outputs. Can be CV_32F or CV_8S.
- perChannel: Quantization granularity of quantized Net. The default is true, that means quantize model in per-channel way (channel-wise). Set it false to quantize model in per-tensor way (or tensor-wise).
C++ default parameters
- per_channel: true
sourcefn set_halide_scheduler(&mut self, scheduler: &str) -> Result<()>
fn set_halide_scheduler(&mut self, scheduler: &str) -> Result<()>
Compile Halide layers.
Parameters
- scheduler: Path to YAML file with scheduling directives.
See also
setPreferableBackend
Schedule layers that support Halide backend. Then compile them for specific target. For layers that not represented in scheduling file or if no manual scheduling used at all, automatic scheduling will be applied.
sourcefn set_preferable_backend(&mut self, backend_id: i32) -> Result<()>
fn set_preferable_backend(&mut self, backend_id: i32) -> Result<()>
Ask network to use specific computation backend where it supported.
Parameters
- backendId: backend identifier.
See also
Backend
If OpenCV is compiled with Intel’s Inference Engine library, DNN_BACKEND_DEFAULT means DNN_BACKEND_INFERENCE_ENGINE. Otherwise it equals to DNN_BACKEND_OPENCV.
sourcefn set_preferable_target(&mut self, target_id: i32) -> Result<()>
fn set_preferable_target(&mut self, target_id: i32) -> Result<()>
Ask network to make computations on specific target device.
Parameters
- targetId: target identifier.
See also
Target
List of supported combinations backend / target: | | DNN_BACKEND_OPENCV | DNN_BACKEND_INFERENCE_ENGINE | DNN_BACKEND_HALIDE | DNN_BACKEND_CUDA | |————————|––––––––––|——————————|––––––––––|—————––| | DNN_TARGET_CPU | + | + | + | | | DNN_TARGET_OPENCL | + | + | + | | | DNN_TARGET_OPENCL_FP16 | + | + | | | | DNN_TARGET_MYRIAD | | + | | | | DNN_TARGET_FPGA | | + | | | | DNN_TARGET_CUDA | | | | + | | DNN_TARGET_CUDA_FP16 | | | | + | | DNN_TARGET_HDDL | | + | | |
sourcefn set_input(
&mut self,
blob: &dyn ToInputArray,
name: &str,
scalefactor: f64,
mean: Scalar
) -> Result<()>
fn set_input( &mut self, blob: &dyn ToInputArray, name: &str, scalefactor: f64, mean: Scalar ) -> Result<()>
Sets the new input value for the network
Parameters
- blob: A new blob. Should have CV_32F or CV_8U depth.
- name: A name of input layer.
- scalefactor: An optional normalization scale.
- mean: An optional mean subtraction values.
See also
connect(String, String) to know format of the descriptor.
If scale or mean values are specified, a final input blob is computed
as:
C++ default parameters
- name: “”
- scalefactor: 1.0
- mean: Scalar()
sourcefn set_param(&mut self, layer: i32, num_param: i32, blob: &Mat) -> Result<()>
fn set_param(&mut self, layer: i32, num_param: i32, blob: &Mat) -> Result<()>
Sets the new value for the learned param of the layer.
Parameters
- layer: name or id of the layer.
- numParam: index of the layer parameter in the Layer::blobs array.
- blob: the new value.
See also
Layer::blobs
Note: If shape of the new blob differs from the previous shape, then the following forward pass may fail.
fn set_param_1( &mut self, layer_name: &str, num_param: i32, blob: &Mat ) -> Result<()>
sourcefn enable_fusion(&mut self, fusion: bool) -> Result<()>
fn enable_fusion(&mut self, fusion: bool) -> Result<()>
Enables or disables layer fusion in the network.
Parameters
- fusion: true to enable the fusion, false to disable. The fusion is enabled by default.
sourcefn enable_winograd(&mut self, use_winograd: bool) -> Result<()>
fn enable_winograd(&mut self, use_winograd: bool) -> Result<()>
Enables or disables the Winograd compute branch. The Winograd compute branch can speed up 3x3 Convolution at a small loss of accuracy.
Parameters
- useWinograd: true to enable the Winograd compute branch. The default is true.
sourcefn get_perf_profile(&mut self, timings: &mut Vector<f64>) -> Result<i64>
fn get_perf_profile(&mut self, timings: &mut Vector<f64>) -> Result<i64>
Returns overall time for inference and timings (in ticks) for layers.
Indexes in returned vector correspond to layers ids. Some layers can be fused with others, in this case zero ticks count will be return for that skipped layers. Supported by DNN_BACKEND_OPENCV on DNN_TARGET_CPU only.
Parameters
- timings:[out] vector for tick timings for all layers.
Returns
overall ticks for model inference.