pub trait TextDetectionModelTraitConst: ModelTraitConst {
// Required method
fn as_raw_TextDetectionModel(&self) -> *const c_void;
// Provided methods
fn detect_with_confidences(
&self,
frame: &impl ToInputArray,
detections: &mut Vector<Vector<Point>>,
confidences: &mut Vector<f32>,
) -> Result<()> { ... }
fn detect(
&self,
frame: &impl ToInputArray,
detections: &mut Vector<Vector<Point>>,
) -> Result<()> { ... }
fn detect_text_rectangles(
&self,
frame: &impl ToInputArray,
detections: &mut Vector<RotatedRect>,
confidences: &mut Vector<f32>,
) -> Result<()> { ... }
fn detect_text_rectangles_1(
&self,
frame: &impl ToInputArray,
detections: &mut Vector<RotatedRect>,
) -> Result<()> { ... }
}
Expand description
Constant methods for crate::dnn::TextDetectionModel
Required Methods§
fn as_raw_TextDetectionModel(&self) -> *const c_void
Provided Methods§
sourcefn detect_with_confidences(
&self,
frame: &impl ToInputArray,
detections: &mut Vector<Vector<Point>>,
confidences: &mut Vector<f32>,
) -> Result<()>
fn detect_with_confidences( &self, frame: &impl ToInputArray, detections: &mut Vector<Vector<Point>>, confidences: &mut Vector<f32>, ) -> Result<()>
Performs detection
Given the input @p frame, prepare network input, run network inference, post-process network output and return result detections.
Each result is quadrangle’s 4 points in this order:
- bottom-left
- top-left
- top-right
- bottom-right
Use cv::getPerspectiveTransform function to retrieve image region without perspective transformations.
Note: If DL model doesn’t support that kind of output then result may be derived from detectTextRectangles() output.
§Parameters
- frame: The input image
- detections:[out] array with detections’ quadrangles (4 points per result)
- confidences:[out] array with detection confidences
sourcefn detect(
&self,
frame: &impl ToInputArray,
detections: &mut Vector<Vector<Point>>,
) -> Result<()>
fn detect( &self, frame: &impl ToInputArray, detections: &mut Vector<Vector<Point>>, ) -> Result<()>
Performs detection
Given the input @p frame, prepare network input, run network inference, post-process network output and return result detections.
Each result is quadrangle’s 4 points in this order:
- bottom-left
- top-left
- top-right
- bottom-right
Use cv::getPerspectiveTransform function to retrieve image region without perspective transformations.
Note: If DL model doesn’t support that kind of output then result may be derived from detectTextRectangles() output.
§Parameters
- frame: The input image
- detections:[out] array with detections’ quadrangles (4 points per result)
- confidences:[out] array with detection confidences
§Overloaded parameters
sourcefn detect_text_rectangles(
&self,
frame: &impl ToInputArray,
detections: &mut Vector<RotatedRect>,
confidences: &mut Vector<f32>,
) -> Result<()>
fn detect_text_rectangles( &self, frame: &impl ToInputArray, detections: &mut Vector<RotatedRect>, confidences: &mut Vector<f32>, ) -> Result<()>
Performs detection
Given the input @p frame, prepare network input, run network inference, post-process network output and return result detections.
Each result is rotated rectangle.
Note: Result may be inaccurate in case of strong perspective transformations.
§Parameters
- frame: the input image
- detections:[out] array with detections’ RotationRect results
- confidences:[out] array with detection confidences
sourcefn detect_text_rectangles_1(
&self,
frame: &impl ToInputArray,
detections: &mut Vector<RotatedRect>,
) -> Result<()>
fn detect_text_rectangles_1( &self, frame: &impl ToInputArray, detections: &mut Vector<RotatedRect>, ) -> Result<()>
Performs detection
Given the input @p frame, prepare network input, run network inference, post-process network output and return result detections.
Each result is rotated rectangle.
Note: Result may be inaccurate in case of strong perspective transformations.
§Parameters
- frame: the input image
- detections:[out] array with detections’ RotationRect results
- confidences:[out] array with detection confidences