pub trait ImageProcessorTrait {
// Required methods
fn convert(
&mut self,
src: &TensorImage,
dst: &mut TensorImage,
rotation: Rotation,
flip: Flip,
crop: Crop,
) -> Result<()>;
fn convert_ref(
&mut self,
src: &TensorImage,
dst: &mut TensorImageRef<'_>,
rotation: Rotation,
flip: Flip,
crop: Crop,
) -> Result<()>;
fn render_to_image(
&mut self,
dst: &mut TensorImage,
detect: &[DetectBox],
segmentation: &[Segmentation],
) -> Result<()>;
fn render_from_protos(
&mut self,
dst: &mut TensorImage,
detect: &[DetectBox],
proto_data: &ProtoData,
) -> Result<()>;
fn render_masks_from_protos(
&mut self,
detect: &[DetectBox],
proto_data: ProtoData,
output_width: usize,
output_height: usize,
) -> Result<Vec<MaskResult>>;
fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()>;
}Required Methods§
Sourcefn convert(
&mut self,
src: &TensorImage,
dst: &mut TensorImage,
rotation: Rotation,
flip: Flip,
crop: Crop,
) -> Result<()>
fn convert( &mut self, src: &TensorImage, dst: &mut TensorImage, rotation: Rotation, flip: Flip, crop: Crop, ) -> Result<()>
Converts the source image to the destination image format and size. The image is cropped first, then flipped, then rotated
§Arguments
dst- The destination image to be converted to.src- The source image to convert from.rotation- The rotation to apply to the destination image.flip- Flips the imagecrop- An optional rectangle specifying the area to crop from the source image
§Returns
A Result indicating success or failure of the conversion.
Sourcefn convert_ref(
&mut self,
src: &TensorImage,
dst: &mut TensorImageRef<'_>,
rotation: Rotation,
flip: Flip,
crop: Crop,
) -> Result<()>
fn convert_ref( &mut self, src: &TensorImage, dst: &mut TensorImageRef<'_>, rotation: Rotation, flip: Flip, crop: Crop, ) -> Result<()>
Converts the source image to a borrowed destination tensor for zero-copy preprocessing.
This variant accepts a TensorImageRef as the destination, enabling
direct writes into external buffers (e.g., model input tensors) without
intermediate copies.
§Arguments
src- The source image to convert from.dst- A borrowed tensor image wrapping the destination buffer.rotation- The rotation to apply to the destination image.flip- Flips the imagecrop- An optional rectangle specifying the area to crop from the source image
§Returns
A Result indicating success or failure of the conversion.
fn render_to_image( &mut self, dst: &mut TensorImage, detect: &[DetectBox], segmentation: &[Segmentation], ) -> Result<()>
Sourcefn render_from_protos(
&mut self,
dst: &mut TensorImage,
detect: &[DetectBox],
proto_data: &ProtoData,
) -> Result<()>
fn render_from_protos( &mut self, dst: &mut TensorImage, detect: &[DetectBox], proto_data: &ProtoData, ) -> Result<()>
Renders detection boxes and segmentation masks from raw prototype data.
For YOLO segmentation models, this avoids materializing intermediate
Array3<u8> masks. The ProtoData contains mask coefficients and the
prototype tensor; the renderer computes mask_coeff @ protos directly.
Phase 1 implementation materializes masks internally and delegates to existing render paths. Phase 2 will compute masks in the GPU shader.
Sourcefn render_masks_from_protos(
&mut self,
detect: &[DetectBox],
proto_data: ProtoData,
output_width: usize,
output_height: usize,
) -> Result<Vec<MaskResult>>
fn render_masks_from_protos( &mut self, detect: &[DetectBox], proto_data: ProtoData, output_width: usize, output_height: usize, ) -> Result<Vec<MaskResult>>
Renders per-instance grayscale masks from raw prototype data at full output resolution.
Each mask is rendered at the detection’s bounding-box region using
sigmoid(mask_coeff @ protos) without thresholding, producing
continuous [0,255] values suitable for soft IoU computation.
Returns one MaskResult per detection with the bbox-cropped pixels.