pub trait ImageProcessorTrait {
// Required methods
fn convert(
&mut self,
src: &TensorDyn,
dst: &mut TensorDyn,
rotation: Rotation,
flip: Flip,
crop: Crop,
) -> Result<()>;
fn draw_decoded_masks(
&mut self,
dst: &mut TensorDyn,
detect: &[DetectBox],
segmentation: &[Segmentation],
overlay: MaskOverlay<'_>,
) -> Result<()>;
fn draw_proto_masks(
&mut self,
dst: &mut TensorDyn,
detect: &[DetectBox],
proto_data: &ProtoData,
overlay: MaskOverlay<'_>,
) -> Result<()>;
fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()>;
}Required Methods§
Sourcefn convert(
&mut self,
src: &TensorDyn,
dst: &mut TensorDyn,
rotation: Rotation,
flip: Flip,
crop: Crop,
) -> Result<()>
fn convert( &mut self, src: &TensorDyn, dst: &mut TensorDyn, rotation: Rotation, flip: Flip, crop: Crop, ) -> Result<()>
Converts the source image to the destination image format and size. The image is cropped first, then flipped, then rotated
§Arguments
dst- The destination image to be converted to.src- The source image to convert from.rotation- The rotation to apply to the destination image.flip- Flips the imagecrop- An optional rectangle specifying the area to crop from the source image
§Returns
A Result indicating success or failure of the conversion.
Sourcefn draw_decoded_masks(
&mut self,
dst: &mut TensorDyn,
detect: &[DetectBox],
segmentation: &[Segmentation],
overlay: MaskOverlay<'_>,
) -> Result<()>
fn draw_decoded_masks( &mut self, dst: &mut TensorDyn, detect: &[DetectBox], segmentation: &[Segmentation], overlay: MaskOverlay<'_>, ) -> Result<()>
Draw pre-decoded detection boxes and segmentation masks onto dst.
Supports two segmentation modes based on the mask channel count:
- Instance segmentation (
C=1): oneSegmentationper detection,segmentationanddetectare zipped. - Semantic segmentation (
C>1): a singleSegmentationcovering all classes; only the first element is used.
§Format requirements
- CPU backend:
dstmust beRGBAorRGB. - OpenGL backend:
dstmust beRGBA,BGRA, orRGB. - G2D backend: not implemented (returns
NotImplemented).
An empty segmentation slice is valid — only bounding boxes are drawn.
overlay controls compositing: background replaces dst’s base
content; opacity scales mask alpha. Use MaskOverlay::default()
for backward-compatible behaviour.
Sourcefn draw_proto_masks(
&mut self,
dst: &mut TensorDyn,
detect: &[DetectBox],
proto_data: &ProtoData,
overlay: MaskOverlay<'_>,
) -> Result<()>
fn draw_proto_masks( &mut self, dst: &mut TensorDyn, detect: &[DetectBox], proto_data: &ProtoData, overlay: MaskOverlay<'_>, ) -> Result<()>
Draw masks from proto data onto image (fused decode+draw).
For YOLO segmentation models, this avoids materializing intermediate
Array3<u8> masks. The ProtoData contains mask coefficients and the
prototype tensor; the renderer computes mask_coeff @ protos directly
at the output resolution using bilinear sampling.
detect and proto_data.mask_coefficients must have the same length
(enforced by zip — excess entries are silently ignored). An empty
detect slice is valid and returns immediately after drawing nothing.
§Format requirements
Same as draw_decoded_masks. G2D returns NotImplemented.
overlay controls compositing — see draw_decoded_masks.