Skip to main content

edgefirst_image/
lib.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4/*!
5
6## EdgeFirst HAL - Image Converter
7
8The `edgefirst_image` crate is part of the EdgeFirst Hardware Abstraction
9Layer (HAL) and provides functionality for converting images between
10different formats and sizes.  The crate is designed to work with hardware
11acceleration when available, but also provides a CPU-based fallback for
12environments where hardware acceleration is not present or not suitable.
13
14The main features of the `edgefirst_image` crate include:
15- Support for various image formats, including YUYV, RGB, RGBA, and GREY.
16- Support for source crop, destination crop, rotation, and flipping.
17- Image conversion using hardware acceleration (G2D, OpenGL) when available.
18- CPU-based image conversion as a fallback option.
19
20The crate uses [`TensorDyn`] from `edgefirst_tensor` to represent images,
21with [`PixelFormat`] metadata describing the pixel layout. The
22[`ImageProcessor`] struct manages the conversion process, selecting
23the appropriate conversion method based on the available hardware.
24
25## Examples
26
27```rust
28# use edgefirst_image::{ImageProcessor, Rotation, Flip, Crop, ImageProcessorTrait, load_image};
29# use edgefirst_tensor::{PixelFormat, DType, TensorDyn};
30# fn main() -> Result<(), edgefirst_image::Error> {
31let image = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
32let src = load_image(image, Some(PixelFormat::Rgba), None)?;
33let mut converter = ImageProcessor::new()?;
34let mut dst = converter.create_image(640, 480, PixelFormat::Rgb, DType::U8, None)?;
35converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())?;
36# Ok(())
37# }
38```
39
40## Environment Variables
41The behavior of the `edgefirst_image::ImageProcessor` struct can be influenced by the
42following environment variables:
43- `EDGEFIRST_FORCE_BACKEND`: When set to `cpu`, `g2d`, or `opengl` (case-insensitive),
44  only that single backend is initialized and no fallback chain is used. If the
45  forced backend fails to initialize, an error is returned immediately. This is
46  useful for benchmarking individual backends in isolation. When this variable is
47  set, the `EDGEFIRST_DISABLE_*` variables are ignored.
48- `EDGEFIRST_DISABLE_GL`: If set to `1`, disables the use of OpenGL for image
49  conversion, forcing the use of CPU or other available hardware methods.
50- `EDGEFIRST_DISABLE_G2D`: If set to `1`, disables the use of G2D for image
51  conversion, forcing the use of CPU or other available hardware methods.
52- `EDGEFIRST_DISABLE_CPU`: If set to `1`, disables the use of CPU for image
53  conversion, forcing the use of hardware acceleration methods. If no hardware
54  acceleration methods are available, an error will be returned when attempting
55  to create an `ImageProcessor`.
56
57Additionally the TensorMemory used by default allocations can be controlled using the
58`EDGEFIRST_TENSOR_FORCE_MEM` environment variable. If set to `1`, default tensor memory
59uses system memory. This will disable the use of specialized memory regions for tensors
60and hardware acceleration. However, this will increase the performance of the CPU converter.
61*/
62#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
63
64use edgefirst_decoder::{DetectBox, ProtoData, Segmentation};
65use edgefirst_tensor::{
66    DType, PixelFormat, PixelLayout, Tensor, TensorDyn, TensorMemory, TensorTrait as _,
67};
68use enum_dispatch::enum_dispatch;
69use std::{fmt::Display, time::Instant};
70use zune_jpeg::{
71    zune_core::{colorspace::ColorSpace, options::DecoderOptions},
72    JpegDecoder,
73};
74use zune_png::PngDecoder;
75
76pub use cpu::CPUProcessor;
77pub use error::{Error, Result};
78#[cfg(target_os = "linux")]
79pub use g2d::G2DProcessor;
80#[cfg(target_os = "linux")]
81#[cfg(feature = "opengl")]
82pub use opengl_headless::GLProcessorThreaded;
83#[cfg(target_os = "linux")]
84#[cfg(feature = "opengl")]
85pub use opengl_headless::Int8InterpolationMode;
86#[cfg(target_os = "linux")]
87#[cfg(feature = "opengl")]
88pub use opengl_headless::{probe_egl_displays, EglDisplayInfo, EglDisplayKind};
89
90mod cpu;
91mod error;
92mod g2d;
93#[path = "gl/mod.rs"]
94mod opengl_headless;
95
96// Use `edgefirst_tensor::PixelFormat` variants (Rgb, Rgba, Grey, etc.) and
97// `TensorDyn` / `Tensor<u8>` with `.format()` metadata instead.
98
99/// Flips the image data, then rotates it. Returns a new `TensorDyn`.
100fn rotate_flip_to_dyn(
101    src: &Tensor<u8>,
102    src_fmt: PixelFormat,
103    rotation: Rotation,
104    flip: Flip,
105    memory: Option<TensorMemory>,
106) -> Result<TensorDyn, Error> {
107    let src_w = src.width().unwrap();
108    let src_h = src.height().unwrap();
109    let channels = src_fmt.channels();
110
111    let (dst_w, dst_h) = match rotation {
112        Rotation::None | Rotation::Rotate180 => (src_w, src_h),
113        Rotation::Clockwise90 | Rotation::CounterClockwise90 => (src_h, src_w),
114    };
115
116    let dst = Tensor::<u8>::image(dst_w, dst_h, src_fmt, memory)?;
117    let src_map = src.map()?;
118    let mut dst_map = dst.map()?;
119
120    CPUProcessor::flip_rotate_ndarray_pf(
121        &src_map,
122        &mut dst_map,
123        dst_w,
124        dst_h,
125        channels,
126        rotation,
127        flip,
128    )?;
129    drop(dst_map);
130    drop(src_map);
131
132    Ok(TensorDyn::from(dst))
133}
134
135#[derive(Debug, Clone, Copy, PartialEq, Eq)]
136pub enum Rotation {
137    None = 0,
138    Clockwise90 = 1,
139    Rotate180 = 2,
140    CounterClockwise90 = 3,
141}
142impl Rotation {
143    /// Creates a Rotation enum from an angle in degrees. The angle must be a
144    /// multiple of 90.
145    ///
146    /// # Panics
147    /// Panics if the angle is not a multiple of 90.
148    ///
149    /// # Examples
150    /// ```rust
151    /// # use edgefirst_image::Rotation;
152    /// let rotation = Rotation::from_degrees_clockwise(270);
153    /// assert_eq!(rotation, Rotation::CounterClockwise90);
154    /// ```
155    pub fn from_degrees_clockwise(angle: usize) -> Rotation {
156        match angle.rem_euclid(360) {
157            0 => Rotation::None,
158            90 => Rotation::Clockwise90,
159            180 => Rotation::Rotate180,
160            270 => Rotation::CounterClockwise90,
161            _ => panic!("rotation angle is not a multiple of 90"),
162        }
163    }
164}
165
166#[derive(Debug, Clone, Copy, PartialEq, Eq)]
167pub enum Flip {
168    None = 0,
169    Vertical = 1,
170    Horizontal = 2,
171}
172
173/// Controls how the color palette index is chosen for each detected object.
174#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
175pub enum ColorMode {
176    /// Color is chosen by object class label (`det.label`). Default.
177    ///
178    /// Preserves backward compatibility and is correct for semantic
179    /// segmentation where colors carry class meaning.
180    #[default]
181    Class,
182    /// Color is chosen by instance order (loop index, zero-based).
183    ///
184    /// Each detected object gets a unique color regardless of class,
185    /// useful for instance segmentation.
186    Instance,
187    /// Color is chosen by track ID (future use; currently behaves like
188    /// [`Instance`](Self::Instance)).
189    Track,
190}
191
192impl ColorMode {
193    /// Return the palette index for a detection given its loop index and label.
194    #[inline]
195    pub fn index(self, idx: usize, label: usize) -> usize {
196        match self {
197            ColorMode::Class => label,
198            ColorMode::Instance | ColorMode::Track => idx,
199        }
200    }
201}
202
203/// Options for mask overlay rendering.
204///
205/// Controls how segmentation masks are composited onto the destination image:
206/// - `background`: when set, the background image is drawn first and masks
207///   are composited over it (result written to `dst`). When `None`, masks
208///   are composited directly over `dst`'s existing content.
209/// - `opacity`: scales the alpha of rendered mask colors. `1.0` (default)
210///   preserves the class color's alpha unchanged; `0.5` makes masks
211///   semi-transparent.
212/// - `color_mode`: controls whether colors are assigned by class label,
213///   instance index, or track ID. Defaults to [`ColorMode::Class`].
214#[derive(Debug, Clone, Copy)]
215pub struct MaskOverlay<'a> {
216    pub background: Option<&'a TensorDyn>,
217    pub opacity: f32,
218    /// Normalized letterbox region `[xmin, ymin, xmax, ymax]` in model-input
219    /// space that contains actual image content (the rest is padding).
220    ///
221    /// When set, bounding boxes and mask coordinates from the decoder (which
222    /// are in model-input normalized space) are mapped back to the original
223    /// image coordinate space before rendering.
224    ///
225    /// Use [`with_letterbox_crop`](Self::with_letterbox_crop) to compute this
226    /// from the [`Crop`] that was used in the model input [`convert`](crate::ImageProcessorTrait::convert) call.
227    pub letterbox: Option<[f32; 4]>,
228    pub color_mode: ColorMode,
229}
230
231impl Default for MaskOverlay<'_> {
232    fn default() -> Self {
233        Self {
234            background: None,
235            opacity: 1.0,
236            letterbox: None,
237            color_mode: ColorMode::Class,
238        }
239    }
240}
241
242impl<'a> MaskOverlay<'a> {
243    pub fn new() -> Self {
244        Self::default()
245    }
246
247    pub fn with_background(mut self, bg: &'a TensorDyn) -> Self {
248        self.background = Some(bg);
249        self
250    }
251
252    pub fn with_opacity(mut self, opacity: f32) -> Self {
253        self.opacity = opacity.clamp(0.0, 1.0);
254        self
255    }
256
257    pub fn with_color_mode(mut self, mode: ColorMode) -> Self {
258        self.color_mode = mode;
259        self
260    }
261
262    /// Set the letterbox transform from the [`Crop`] used when preparing the
263    /// model input, so that bounding boxes and masks are correctly mapped back
264    /// to the original image coordinate space during rendering.
265    ///
266    /// Pass the same `crop` that was given to
267    /// [`convert`](crate::ImageProcessorTrait::convert) along with the model
268    /// input dimensions (`model_w` × `model_h`).
269    ///
270    /// Has no effect when `crop.dst_rect` is `None` (no letterbox applied).
271    pub fn with_letterbox_crop(mut self, crop: &Crop, model_w: usize, model_h: usize) -> Self {
272        if let Some(r) = crop.dst_rect {
273            self.letterbox = Some([
274                r.left as f32 / model_w as f32,
275                r.top as f32 / model_h as f32,
276                (r.left + r.width) as f32 / model_w as f32,
277                (r.top + r.height) as f32 / model_h as f32,
278            ]);
279        }
280        self
281    }
282
283    /// Blit background into dst (if set) and return an overlay with
284    /// background cleared so backends don't need to handle it.
285    fn apply_background(&self, dst: &mut TensorDyn) -> Result<MaskOverlay<'static>> {
286        use edgefirst_tensor::TensorMapTrait;
287        if let Some(bg) = self.background {
288            if bg.shape() != dst.shape() {
289                return Err(Error::InvalidShape(
290                    "background shape does not match dst".into(),
291                ));
292            }
293            if bg.format() != dst.format() {
294                return Err(Error::InvalidShape(
295                    "background pixel format does not match dst".into(),
296                ));
297            }
298            let bg_u8 = bg.as_u8().ok_or(Error::NotAnImage)?;
299            let dst_u8 = dst.as_u8_mut().ok_or(Error::NotAnImage)?;
300            let bg_map = bg_u8.map()?;
301            let mut dst_map = dst_u8.map()?;
302            let bg_slice = bg_map.as_slice();
303            let dst_slice = dst_map.as_mut_slice();
304            if bg_slice.len() != dst_slice.len() {
305                return Err(Error::InvalidShape(
306                    "background buffer size does not match dst".into(),
307                ));
308            }
309            dst_slice.copy_from_slice(bg_slice);
310        }
311        Ok(MaskOverlay {
312            background: None,
313            opacity: self.opacity.clamp(0.0, 1.0),
314            letterbox: self.letterbox,
315            color_mode: self.color_mode,
316        })
317    }
318}
319
320/// Apply the inverse letterbox transform to a bounding box.
321///
322/// `letterbox` is `[lx0, ly0, lx1, ly1]` — the normalized region of the model
323/// input that contains actual image content (output of
324/// [`MaskOverlay::with_letterbox_crop`]).
325///
326/// Converts model-input-normalized coords to output-image-normalized coords,
327/// clamped to `[0.0, 1.0]`. Also canonicalises the bbox (ensures xmin ≤ xmax).
328#[inline]
329fn unletter_bbox(bbox: DetectBox, lb: [f32; 4]) -> DetectBox {
330    let b = bbox.bbox.to_canonical();
331    let [lx0, ly0, lx1, ly1] = lb;
332    let inv_w = if lx1 > lx0 { 1.0 / (lx1 - lx0) } else { 1.0 };
333    let inv_h = if ly1 > ly0 { 1.0 / (ly1 - ly0) } else { 1.0 };
334    DetectBox {
335        bbox: edgefirst_decoder::BoundingBox {
336            xmin: ((b.xmin - lx0) * inv_w).clamp(0.0, 1.0),
337            ymin: ((b.ymin - ly0) * inv_h).clamp(0.0, 1.0),
338            xmax: ((b.xmax - lx0) * inv_w).clamp(0.0, 1.0),
339            ymax: ((b.ymax - ly0) * inv_h).clamp(0.0, 1.0),
340        },
341        ..bbox
342    }
343}
344
345#[derive(Debug, Clone, Copy, PartialEq, Eq)]
346pub struct Crop {
347    pub src_rect: Option<Rect>,
348    pub dst_rect: Option<Rect>,
349    pub dst_color: Option<[u8; 4]>,
350}
351
352impl Default for Crop {
353    fn default() -> Self {
354        Crop::new()
355    }
356}
357impl Crop {
358    // Creates a new Crop with default values (no cropping).
359    pub fn new() -> Self {
360        Crop {
361            src_rect: None,
362            dst_rect: None,
363            dst_color: None,
364        }
365    }
366
367    // Sets the source rectangle for cropping.
368    pub fn with_src_rect(mut self, src_rect: Option<Rect>) -> Self {
369        self.src_rect = src_rect;
370        self
371    }
372
373    // Sets the destination rectangle for cropping.
374    pub fn with_dst_rect(mut self, dst_rect: Option<Rect>) -> Self {
375        self.dst_rect = dst_rect;
376        self
377    }
378
379    // Sets the destination color for areas outside the cropped region.
380    pub fn with_dst_color(mut self, dst_color: Option<[u8; 4]>) -> Self {
381        self.dst_color = dst_color;
382        self
383    }
384
385    // Creates a new Crop with no cropping.
386    pub fn no_crop() -> Self {
387        Crop::new()
388    }
389
390    /// Validate crop rectangles against explicit dimensions.
391    pub(crate) fn check_crop_dims(
392        &self,
393        src_w: usize,
394        src_h: usize,
395        dst_w: usize,
396        dst_h: usize,
397    ) -> Result<(), Error> {
398        let src_ok = self
399            .src_rect
400            .is_none_or(|r| r.left + r.width <= src_w && r.top + r.height <= src_h);
401        let dst_ok = self
402            .dst_rect
403            .is_none_or(|r| r.left + r.width <= dst_w && r.top + r.height <= dst_h);
404        match (src_ok, dst_ok) {
405            (true, true) => Ok(()),
406            (true, false) => Err(Error::CropInvalid(format!(
407                "Dest crop invalid: {:?}",
408                self.dst_rect
409            ))),
410            (false, true) => Err(Error::CropInvalid(format!(
411                "Src crop invalid: {:?}",
412                self.src_rect
413            ))),
414            (false, false) => Err(Error::CropInvalid(format!(
415                "Dest and Src crop invalid: {:?} {:?}",
416                self.dst_rect, self.src_rect
417            ))),
418        }
419    }
420
421    /// Validate crop rectangles against TensorDyn source and destination.
422    pub fn check_crop_dyn(
423        &self,
424        src: &edgefirst_tensor::TensorDyn,
425        dst: &edgefirst_tensor::TensorDyn,
426    ) -> Result<(), Error> {
427        self.check_crop_dims(
428            src.width().unwrap_or(0),
429            src.height().unwrap_or(0),
430            dst.width().unwrap_or(0),
431            dst.height().unwrap_or(0),
432        )
433    }
434}
435
436#[derive(Debug, Clone, Copy, PartialEq, Eq)]
437pub struct Rect {
438    pub left: usize,
439    pub top: usize,
440    pub width: usize,
441    pub height: usize,
442}
443
444impl Rect {
445    // Creates a new Rect with the specified left, top, width, and height.
446    pub fn new(left: usize, top: usize, width: usize, height: usize) -> Self {
447        Self {
448            left,
449            top,
450            width,
451            height,
452        }
453    }
454
455    // Checks if the rectangle is valid for the given TensorDyn image.
456    pub fn check_rect_dyn(&self, image: &TensorDyn) -> bool {
457        let w = image.width().unwrap_or(0);
458        let h = image.height().unwrap_or(0);
459        self.left + self.width <= w && self.top + self.height <= h
460    }
461}
462
463#[enum_dispatch(ImageProcessor)]
464pub trait ImageProcessorTrait {
465    /// Converts the source image to the destination image format and size. The
466    /// image is cropped first, then flipped, then rotated
467    ///
468    /// # Arguments
469    ///
470    /// * `dst` - The destination image to be converted to.
471    /// * `src` - The source image to convert from.
472    /// * `rotation` - The rotation to apply to the destination image.
473    /// * `flip` - Flips the image
474    /// * `crop` - An optional rectangle specifying the area to crop from the
475    ///   source image
476    ///
477    /// # Returns
478    ///
479    /// A `Result` indicating success or failure of the conversion.
480    fn convert(
481        &mut self,
482        src: &TensorDyn,
483        dst: &mut TensorDyn,
484        rotation: Rotation,
485        flip: Flip,
486        crop: Crop,
487    ) -> Result<()>;
488
489    /// Draw pre-decoded detection boxes and segmentation masks onto `dst`.
490    ///
491    /// Supports two segmentation modes based on the mask channel count:
492    /// - **Instance segmentation** (`C=1`): one `Segmentation` per detection,
493    ///   `segmentation` and `detect` are zipped.
494    /// - **Semantic segmentation** (`C>1`): a single `Segmentation` covering
495    ///   all classes; only the first element is used.
496    ///
497    /// # Format requirements
498    ///
499    /// - CPU backend: `dst` must be `RGBA` or `RGB`.
500    /// - OpenGL backend: `dst` must be `RGBA`, `BGRA`, or `RGB`.
501    /// - G2D backend: not implemented (returns `NotImplemented`).
502    ///
503    /// An empty `segmentation` slice is valid — only bounding boxes are drawn.
504    ///
505    /// `overlay` controls compositing: `background` replaces dst's base
506    /// content; `opacity` scales mask alpha. Use `MaskOverlay::default()`
507    /// for backward-compatible behaviour.
508    fn draw_decoded_masks(
509        &mut self,
510        dst: &mut TensorDyn,
511        detect: &[DetectBox],
512        segmentation: &[Segmentation],
513        overlay: MaskOverlay<'_>,
514    ) -> Result<()>;
515
516    /// Draw masks from proto data onto image (fused decode+draw).
517    ///
518    /// For YOLO segmentation models, this avoids materializing intermediate
519    /// `Array3<u8>` masks. The `ProtoData` contains mask coefficients and the
520    /// prototype tensor; the renderer computes `mask_coeff @ protos` directly
521    /// at the output resolution using bilinear sampling.
522    ///
523    /// `detect` and `proto_data.mask_coefficients` must have the same length
524    /// (enforced by zip — excess entries are silently ignored). An empty
525    /// `detect` slice is valid and returns immediately after drawing nothing.
526    ///
527    /// # Format requirements
528    ///
529    /// Same as [`draw_decoded_masks`](Self::draw_decoded_masks). G2D returns `NotImplemented`.
530    ///
531    /// `overlay` controls compositing — see [`draw_decoded_masks`](Self::draw_decoded_masks).
532    fn draw_proto_masks(
533        &mut self,
534        dst: &mut TensorDyn,
535        detect: &[DetectBox],
536        proto_data: &ProtoData,
537        overlay: MaskOverlay<'_>,
538    ) -> Result<()>;
539
540    /// Sets the colors used for rendering segmentation masks. Up to 20 colors
541    /// can be set.
542    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()>;
543}
544
545/// Configuration for [`ImageProcessor`] construction.
546///
547/// Use with [`ImageProcessor::with_config`] to override the default EGL
548/// display auto-detection and backend selection. The default configuration
549/// preserves the existing auto-detection behaviour.
550#[derive(Debug, Clone, Default)]
551pub struct ImageProcessorConfig {
552    /// Force OpenGL to use this EGL display type instead of auto-detecting.
553    ///
554    /// When `None`, the processor probes displays in priority order: GBM,
555    /// PlatformDevice, Default. Use [`probe_egl_displays`] to discover
556    /// which displays are available on the current system.
557    ///
558    /// Ignored when `EDGEFIRST_DISABLE_GL=1` is set.
559    #[cfg(target_os = "linux")]
560    #[cfg(feature = "opengl")]
561    pub egl_display: Option<EglDisplayKind>,
562
563    /// Preferred compute backend.
564    ///
565    /// When set to a specific backend (not [`ComputeBackend::Auto`]), the
566    /// processor initializes that backend with no fallback — returns an error if the conversion is not supported.
567    /// This takes precedence over `EDGEFIRST_FORCE_BACKEND` and the
568    /// `EDGEFIRST_DISABLE_*` environment variables.
569    ///
570    /// - [`ComputeBackend::OpenGl`]: init OpenGL + CPU, skip G2D
571    /// - [`ComputeBackend::G2d`]: init G2D + CPU, skip OpenGL
572    /// - [`ComputeBackend::Cpu`]: init CPU only
573    /// - [`ComputeBackend::Auto`]: existing env-var-driven selection
574    pub backend: ComputeBackend,
575}
576
577/// Compute backend selection for [`ImageProcessor`].
578///
579/// Use with [`ImageProcessorConfig::backend`] to select which backend the
580/// processor should prefer. When a specific backend is selected, the
581/// processor initializes that backend plus CPU as a fallback. When `Auto`
582/// is used, the existing environment-variable-driven selection applies.
583#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
584pub enum ComputeBackend {
585    /// Auto-detect based on available hardware and environment variables.
586    #[default]
587    Auto,
588    /// CPU-only processing (no hardware acceleration).
589    Cpu,
590    /// Prefer G2D hardware blitter (+ CPU fallback).
591    G2d,
592    /// Prefer OpenGL ES (+ CPU fallback).
593    OpenGl,
594}
595
596/// Backend forced via the `EDGEFIRST_FORCE_BACKEND` environment variable
597/// or [`ImageProcessorConfig::backend`].
598///
599/// When set, the [`ImageProcessor`] only initializes and dispatches to the
600/// selected backend — no fallback chain is used.
601#[derive(Debug, Clone, Copy, PartialEq, Eq)]
602pub(crate) enum ForcedBackend {
603    Cpu,
604    G2d,
605    OpenGl,
606}
607
608/// Image converter that uses available hardware acceleration or CPU as a
609/// fallback.
610#[derive(Debug)]
611pub struct ImageProcessor {
612    /// CPU-based image converter as a fallback. This is only None if the
613    /// EDGEFIRST_DISABLE_CPU environment variable is set.
614    pub cpu: Option<CPUProcessor>,
615
616    #[cfg(target_os = "linux")]
617    /// G2D-based image converter for Linux systems. This is only available if
618    /// the EDGEFIRST_DISABLE_G2D environment variable is not set and libg2d.so
619    /// is available.
620    pub g2d: Option<G2DProcessor>,
621    #[cfg(target_os = "linux")]
622    #[cfg(feature = "opengl")]
623    /// OpenGL-based image converter for Linux systems. This is only available
624    /// if the EDGEFIRST_DISABLE_GL environment variable is not set and OpenGL
625    /// ES is available.
626    pub opengl: Option<GLProcessorThreaded>,
627
628    /// When set, only the specified backend is used — no fallback chain.
629    pub(crate) forced_backend: Option<ForcedBackend>,
630}
631
632unsafe impl Send for ImageProcessor {}
633unsafe impl Sync for ImageProcessor {}
634
635impl ImageProcessor {
636    /// Creates a new `ImageProcessor` instance, initializing available
637    /// hardware converters based on the system capabilities and environment
638    /// variables.
639    ///
640    /// # Examples
641    /// ```rust
642    /// # use edgefirst_image::{ImageProcessor, Rotation, Flip, Crop, ImageProcessorTrait, load_image};
643    /// # use edgefirst_tensor::{PixelFormat, DType, TensorDyn};
644    /// # fn main() -> Result<(), edgefirst_image::Error> {
645    /// let image = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
646    /// let src = load_image(image, Some(PixelFormat::Rgba), None)?;
647    /// let mut converter = ImageProcessor::new()?;
648    /// let mut dst = converter.create_image(640, 480, PixelFormat::Rgb, DType::U8, None)?;
649    /// converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())?;
650    /// # Ok(())
651    /// # }
652    /// ```
653    pub fn new() -> Result<Self> {
654        Self::with_config(ImageProcessorConfig::default())
655    }
656
657    /// Creates a new `ImageProcessor` with the given configuration.
658    ///
659    /// When [`ImageProcessorConfig::backend`] is set to a specific backend,
660    /// environment variables are ignored and the processor initializes the
661    /// requested backend plus CPU as a fallback.
662    ///
663    /// When `Auto`, the existing `EDGEFIRST_FORCE_BACKEND` and
664    /// `EDGEFIRST_DISABLE_*` environment variables apply.
665    #[allow(unused_variables)]
666    pub fn with_config(config: ImageProcessorConfig) -> Result<Self> {
667        // ── Config-driven backend selection ──────────────────────────
668        // When the caller explicitly requests a backend via the config,
669        // skip all environment variable logic.
670        match config.backend {
671            ComputeBackend::Cpu => {
672                log::info!("ComputeBackend::Cpu — CPU only");
673                return Ok(Self {
674                    cpu: Some(CPUProcessor::new()),
675                    #[cfg(target_os = "linux")]
676                    g2d: None,
677                    #[cfg(target_os = "linux")]
678                    #[cfg(feature = "opengl")]
679                    opengl: None,
680                    forced_backend: None,
681                });
682            }
683            ComputeBackend::G2d => {
684                log::info!("ComputeBackend::G2d — G2D + CPU fallback");
685                #[cfg(target_os = "linux")]
686                {
687                    let g2d = match G2DProcessor::new() {
688                        Ok(g) => Some(g),
689                        Err(e) => {
690                            log::warn!("G2D requested but failed to initialize: {e:?}");
691                            None
692                        }
693                    };
694                    return Ok(Self {
695                        cpu: Some(CPUProcessor::new()),
696                        g2d,
697                        #[cfg(feature = "opengl")]
698                        opengl: None,
699                        forced_backend: None,
700                    });
701                }
702                #[cfg(not(target_os = "linux"))]
703                {
704                    log::warn!("G2D requested but not available on this platform, using CPU");
705                    return Ok(Self {
706                        cpu: Some(CPUProcessor::new()),
707                        forced_backend: None,
708                    });
709                }
710            }
711            ComputeBackend::OpenGl => {
712                log::info!("ComputeBackend::OpenGl — OpenGL + CPU fallback");
713                #[cfg(target_os = "linux")]
714                {
715                    #[cfg(feature = "opengl")]
716                    let opengl = match GLProcessorThreaded::new(config.egl_display) {
717                        Ok(gl) => Some(gl),
718                        Err(e) => {
719                            log::warn!("OpenGL requested but failed to initialize: {e:?}");
720                            None
721                        }
722                    };
723                    return Ok(Self {
724                        cpu: Some(CPUProcessor::new()),
725                        g2d: None,
726                        #[cfg(feature = "opengl")]
727                        opengl,
728                        forced_backend: None,
729                    });
730                }
731                #[cfg(not(target_os = "linux"))]
732                {
733                    log::warn!("OpenGL requested but not available on this platform, using CPU");
734                    return Ok(Self {
735                        cpu: Some(CPUProcessor::new()),
736                        forced_backend: None,
737                    });
738                }
739            }
740            ComputeBackend::Auto => { /* fall through to env-var logic below */ }
741        }
742
743        // ── EDGEFIRST_FORCE_BACKEND ──────────────────────────────────
744        // When set, only the requested backend is initialised and no
745        // fallback chain is used. Accepted values (case-insensitive):
746        //   "cpu", "g2d", "opengl"
747        if let Ok(val) = std::env::var("EDGEFIRST_FORCE_BACKEND") {
748            let val_lower = val.to_lowercase();
749            let forced = match val_lower.as_str() {
750                "cpu" => ForcedBackend::Cpu,
751                "g2d" => ForcedBackend::G2d,
752                "opengl" => ForcedBackend::OpenGl,
753                other => {
754                    return Err(Error::ForcedBackendUnavailable(format!(
755                        "unknown EDGEFIRST_FORCE_BACKEND value: {other:?} (expected cpu, g2d, or opengl)"
756                    )));
757                }
758            };
759
760            log::info!("EDGEFIRST_FORCE_BACKEND={val} — only initializing {val_lower} backend");
761
762            return match forced {
763                ForcedBackend::Cpu => Ok(Self {
764                    cpu: Some(CPUProcessor::new()),
765                    #[cfg(target_os = "linux")]
766                    g2d: None,
767                    #[cfg(target_os = "linux")]
768                    #[cfg(feature = "opengl")]
769                    opengl: None,
770                    forced_backend: Some(ForcedBackend::Cpu),
771                }),
772                ForcedBackend::G2d => {
773                    #[cfg(target_os = "linux")]
774                    {
775                        let g2d = G2DProcessor::new().map_err(|e| {
776                            Error::ForcedBackendUnavailable(format!(
777                                "g2d forced but failed to initialize: {e:?}"
778                            ))
779                        })?;
780                        Ok(Self {
781                            cpu: None,
782                            g2d: Some(g2d),
783                            #[cfg(feature = "opengl")]
784                            opengl: None,
785                            forced_backend: Some(ForcedBackend::G2d),
786                        })
787                    }
788                    #[cfg(not(target_os = "linux"))]
789                    {
790                        Err(Error::ForcedBackendUnavailable(
791                            "g2d backend is only available on Linux".into(),
792                        ))
793                    }
794                }
795                ForcedBackend::OpenGl => {
796                    #[cfg(target_os = "linux")]
797                    #[cfg(feature = "opengl")]
798                    {
799                        let opengl = GLProcessorThreaded::new(config.egl_display).map_err(|e| {
800                            Error::ForcedBackendUnavailable(format!(
801                                "opengl forced but failed to initialize: {e:?}"
802                            ))
803                        })?;
804                        Ok(Self {
805                            cpu: None,
806                            g2d: None,
807                            opengl: Some(opengl),
808                            forced_backend: Some(ForcedBackend::OpenGl),
809                        })
810                    }
811                    #[cfg(not(all(target_os = "linux", feature = "opengl")))]
812                    {
813                        Err(Error::ForcedBackendUnavailable(
814                            "opengl backend requires Linux with the 'opengl' feature enabled"
815                                .into(),
816                        ))
817                    }
818                }
819            };
820        }
821
822        // ── Existing DISABLE logic (unchanged) ──────────────────────
823        #[cfg(target_os = "linux")]
824        let g2d = if std::env::var("EDGEFIRST_DISABLE_G2D")
825            .map(|x| x != "0" && x.to_lowercase() != "false")
826            .unwrap_or(false)
827        {
828            log::debug!("EDGEFIRST_DISABLE_G2D is set");
829            None
830        } else {
831            match G2DProcessor::new() {
832                Ok(g2d_converter) => Some(g2d_converter),
833                Err(err) => {
834                    log::warn!("Failed to initialize G2D converter: {err:?}");
835                    None
836                }
837            }
838        };
839
840        #[cfg(target_os = "linux")]
841        #[cfg(feature = "opengl")]
842        let opengl = if std::env::var("EDGEFIRST_DISABLE_GL")
843            .map(|x| x != "0" && x.to_lowercase() != "false")
844            .unwrap_or(false)
845        {
846            log::debug!("EDGEFIRST_DISABLE_GL is set");
847            None
848        } else {
849            match GLProcessorThreaded::new(config.egl_display) {
850                Ok(gl_converter) => Some(gl_converter),
851                Err(err) => {
852                    log::warn!("Failed to initialize GL converter: {err:?}");
853                    None
854                }
855            }
856        };
857
858        let cpu = if std::env::var("EDGEFIRST_DISABLE_CPU")
859            .map(|x| x != "0" && x.to_lowercase() != "false")
860            .unwrap_or(false)
861        {
862            log::debug!("EDGEFIRST_DISABLE_CPU is set");
863            None
864        } else {
865            Some(CPUProcessor::new())
866        };
867        Ok(Self {
868            cpu,
869            #[cfg(target_os = "linux")]
870            g2d,
871            #[cfg(target_os = "linux")]
872            #[cfg(feature = "opengl")]
873            opengl,
874            forced_backend: None,
875        })
876    }
877
878    /// Sets the interpolation mode for int8 proto textures on the OpenGL
879    /// backend. No-op if OpenGL is not available.
880    #[cfg(target_os = "linux")]
881    #[cfg(feature = "opengl")]
882    pub fn set_int8_interpolation_mode(&mut self, mode: Int8InterpolationMode) -> Result<()> {
883        if let Some(ref mut gl) = self.opengl {
884            gl.set_int8_interpolation_mode(mode)?;
885        }
886        Ok(())
887    }
888
889    /// Create a [`TensorDyn`] image with the best available memory backend.
890    ///
891    /// Priority: DMA-buf → PBO (byte-sized types: u8, i8) → system memory.
892    ///
893    /// Use this method instead of [`TensorDyn::image()`] when the tensor will
894    /// be used with [`ImageProcessor::convert()`]. It selects the optimal
895    /// memory backing (including PBO for GPU zero-copy) which direct
896    /// allocation cannot achieve.
897    ///
898    /// This method is on [`ImageProcessor`] rather than [`ImageProcessorTrait`]
899    /// because optimal allocation requires knowledge of the active compute
900    /// backends (e.g. the GL context handle for PBO allocation). Individual
901    /// backend implementations ([`CPUProcessor`], etc.) do not have this
902    /// cross-backend visibility.
903    ///
904    /// # Arguments
905    ///
906    /// * `width` - Image width in pixels
907    /// * `height` - Image height in pixels
908    /// * `format` - Pixel format
909    /// * `dtype` - Element data type (e.g. `DType::U8`, `DType::I8`)
910    /// * `memory` - Optional memory type override; when `None`, the best
911    ///   available backend is selected automatically.
912    ///
913    /// # Returns
914    ///
915    /// A [`TensorDyn`] backed by the highest-performance memory type
916    /// available on this system.
917    ///
918    /// # Errors
919    ///
920    /// Returns an error if all allocation strategies fail.
921    pub fn create_image(
922        &self,
923        width: usize,
924        height: usize,
925        format: PixelFormat,
926        dtype: DType,
927        memory: Option<TensorMemory>,
928    ) -> Result<TensorDyn> {
929        // If an explicit memory type is requested, honour it directly.
930        if let Some(mem) = memory {
931            return Ok(TensorDyn::image(width, height, format, dtype, Some(mem))?);
932        }
933
934        // Try DMA first on Linux — skip only when GL has explicitly selected PBO
935        // as the preferred transfer path (PBO is better than DMA in that case).
936        #[cfg(target_os = "linux")]
937        {
938            #[cfg(feature = "opengl")]
939            let gl_uses_pbo = self
940                .opengl
941                .as_ref()
942                .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
943            #[cfg(not(feature = "opengl"))]
944            let gl_uses_pbo = false;
945
946            if !gl_uses_pbo {
947                if let Ok(img) = TensorDyn::image(
948                    width,
949                    height,
950                    format,
951                    dtype,
952                    Some(edgefirst_tensor::TensorMemory::Dma),
953                ) {
954                    return Ok(img);
955                }
956            }
957        }
958
959        // Try PBO (if GL available).
960        // PBO buffers are u8-sized; the int8 shader emulates i8 output via
961        // XOR 0x80 on the same underlying buffer, so both U8 and I8 work.
962        #[cfg(target_os = "linux")]
963        #[cfg(feature = "opengl")]
964        if dtype.size() == 1 {
965            if let Some(gl) = &self.opengl {
966                match gl.create_pbo_image(width, height, format) {
967                    Ok(t) => {
968                        if dtype == DType::I8 {
969                            // SAFETY: Tensor<u8> and Tensor<i8> are layout-
970                            // identical (same element size, no T-dependent
971                            // drop glue). The int8 shader applies XOR 0x80
972                            // on the same PBO buffer. Same rationale as
973                            // gl::processor::tensor_i8_as_u8_mut.
974                            // Invariant: PBO tensors never have chroma
975                            // (create_pbo_image → Tensor::wrap sets it None).
976                            debug_assert!(
977                                t.chroma().is_none(),
978                                "PBO i8 transmute requires chroma == None"
979                            );
980                            let t_i8: Tensor<i8> = unsafe { std::mem::transmute(t) };
981                            return Ok(TensorDyn::from(t_i8));
982                        }
983                        return Ok(TensorDyn::from(t));
984                    }
985                    Err(e) => log::debug!("PBO image creation failed, falling back to Mem: {e:?}"),
986                }
987            }
988        }
989
990        // Fallback to Mem
991        Ok(TensorDyn::image(
992            width,
993            height,
994            format,
995            dtype,
996            Some(edgefirst_tensor::TensorMemory::Mem),
997        )?)
998    }
999
1000    /// Import an external DMA-BUF image.
1001    ///
1002    /// Each [`PlaneDescriptor`] owns an already-duped fd; this method
1003    /// consumes the descriptors and takes ownership of those fds (whether
1004    /// the call succeeds or fails).
1005    ///
1006    /// The caller must ensure the DMA-BUF allocation is large enough for the
1007    /// specified width, height, format, and any stride/offset on the plane
1008    /// descriptors. No buffer-size validation is performed; an undersized
1009    /// buffer may cause GPU faults or EGL import failure.
1010    ///
1011    /// # Arguments
1012    ///
1013    /// * `image` - Plane descriptor for the primary (or only) plane
1014    /// * `chroma` - Optional plane descriptor for the UV chroma plane
1015    ///   (required for multiplane NV12)
1016    /// * `width` - Image width in pixels
1017    /// * `height` - Image height in pixels
1018    /// * `format` - Pixel format of the buffer
1019    /// * `dtype` - Element data type (e.g. `DType::U8`)
1020    ///
1021    /// # Returns
1022    ///
1023    /// A `TensorDyn` configured as an image.
1024    ///
1025    /// # Errors
1026    ///
1027    /// * [`Error::NotSupported`] if `chroma` is `Some` for a non-semi-planar
1028    ///   format, or multiplane NV16 (not yet supported), or the fd is not
1029    ///   DMA-backed
1030    /// * [`Error::InvalidShape`] if NV12 height is odd
1031    ///
1032    /// # Platform
1033    ///
1034    /// Linux only.
1035    ///
1036    /// # Examples
1037    ///
1038    /// ```rust,ignore
1039    /// use edgefirst_tensor::PlaneDescriptor;
1040    ///
1041    /// // Single-plane RGBA
1042    /// let pd = PlaneDescriptor::new(fd.as_fd())?;
1043    /// let src = proc.import_image(pd, None, 1920, 1080, PixelFormat::Rgba, DType::U8)?;
1044    ///
1045    /// // Multi-plane NV12 with stride
1046    /// let y_pd = PlaneDescriptor::new(y_fd.as_fd())?.with_stride(2048);
1047    /// let uv_pd = PlaneDescriptor::new(uv_fd.as_fd())?.with_stride(2048);
1048    /// let src = proc.import_image(y_pd, Some(uv_pd), 1920, 1080,
1049    ///                             PixelFormat::Nv12, DType::U8)?;
1050    /// ```
1051    #[cfg(target_os = "linux")]
1052    pub fn import_image(
1053        &self,
1054        image: edgefirst_tensor::PlaneDescriptor,
1055        chroma: Option<edgefirst_tensor::PlaneDescriptor>,
1056        width: usize,
1057        height: usize,
1058        format: PixelFormat,
1059        dtype: DType,
1060    ) -> Result<TensorDyn> {
1061        use edgefirst_tensor::{Tensor, TensorMemory};
1062
1063        // Capture stride/offset from descriptors before consuming them
1064        let image_stride = image.stride();
1065        let image_offset = image.offset();
1066        let chroma_stride = chroma.as_ref().and_then(|c| c.stride());
1067        let chroma_offset = chroma.as_ref().and_then(|c| c.offset());
1068
1069        if let Some(chroma_pd) = chroma {
1070            // ── Multiplane path ──────────────────────────────────────
1071            // Multiplane tensors are backed by Tensor<u8> (or transmuted to
1072            // Tensor<i8>). Reject other dtypes to avoid silently returning a
1073            // tensor with the wrong element type.
1074            if dtype != DType::U8 && dtype != DType::I8 {
1075                return Err(Error::NotSupported(format!(
1076                    "multiplane import only supports U8/I8, got {dtype:?}"
1077                )));
1078            }
1079            if format.layout() != PixelLayout::SemiPlanar {
1080                return Err(Error::NotSupported(format!(
1081                    "import_image with chroma requires a semi-planar format, got {format:?}"
1082                )));
1083            }
1084
1085            let chroma_h = match format {
1086                PixelFormat::Nv12 => {
1087                    if !height.is_multiple_of(2) {
1088                        return Err(Error::InvalidShape(format!(
1089                            "NV12 requires even height, got {height}"
1090                        )));
1091                    }
1092                    height / 2
1093                }
1094                // NV16 multiplane will be supported in a future release;
1095                // the GL backend currently only handles NV12 plane1 attributes.
1096                PixelFormat::Nv16 => {
1097                    return Err(Error::NotSupported(
1098                        "multiplane NV16 is not yet supported; use contiguous NV16 instead".into(),
1099                    ))
1100                }
1101                _ => {
1102                    return Err(Error::NotSupported(format!(
1103                        "unsupported semi-planar format: {format:?}"
1104                    )))
1105                }
1106            };
1107
1108            let luma = Tensor::<u8>::from_fd(image.into_fd(), &[height, width], Some("luma"))?;
1109            if luma.memory() != TensorMemory::Dma {
1110                return Err(Error::NotSupported(format!(
1111                    "luma fd must be DMA-backed, got {:?}",
1112                    luma.memory()
1113                )));
1114            }
1115
1116            let chroma_tensor =
1117                Tensor::<u8>::from_fd(chroma_pd.into_fd(), &[chroma_h, width], Some("chroma"))?;
1118            if chroma_tensor.memory() != TensorMemory::Dma {
1119                return Err(Error::NotSupported(format!(
1120                    "chroma fd must be DMA-backed, got {:?}",
1121                    chroma_tensor.memory()
1122                )));
1123            }
1124
1125            // from_planes creates the combined tensor with format set,
1126            // preserving luma's row_stride (currently None since luma was raw).
1127            let mut tensor = Tensor::<u8>::from_planes(luma, chroma_tensor, format)?;
1128
1129            // Apply stride/offset to the combined tensor (luma plane)
1130            if let Some(s) = image_stride {
1131                tensor.set_row_stride(s)?;
1132            }
1133            if let Some(o) = image_offset {
1134                tensor.set_plane_offset(o);
1135            }
1136
1137            // Apply stride/offset to the chroma sub-tensor.
1138            // The chroma tensor is a raw 2D [chroma_h, width] tensor without
1139            // format metadata, so we validate stride manually rather than
1140            // using set_row_stride (which requires format).
1141            if let Some(chroma_ref) = tensor.chroma_mut() {
1142                if let Some(s) = chroma_stride {
1143                    if s < width {
1144                        return Err(Error::InvalidShape(format!(
1145                            "chroma stride {s} < minimum {width} for {format:?}"
1146                        )));
1147                    }
1148                    chroma_ref.set_row_stride_unchecked(s);
1149                }
1150                if let Some(o) = chroma_offset {
1151                    chroma_ref.set_plane_offset(o);
1152                }
1153            }
1154
1155            if dtype == DType::I8 {
1156                // SAFETY: Tensor<u8> and Tensor<i8> have identical layout because
1157                // the struct contains only type-erased storage (OwnedFd, shape, name),
1158                // no inline T values. This assertion catches layout drift at compile time.
1159                const {
1160                    assert!(std::mem::size_of::<Tensor<u8>>() == std::mem::size_of::<Tensor<i8>>());
1161                    assert!(
1162                        std::mem::align_of::<Tensor<u8>>() == std::mem::align_of::<Tensor<i8>>()
1163                    );
1164                }
1165                let tensor_i8: Tensor<i8> = unsafe { std::mem::transmute(tensor) };
1166                return Ok(TensorDyn::from(tensor_i8));
1167            }
1168            Ok(TensorDyn::from(tensor))
1169        } else {
1170            // ── Single-plane path ────────────────────────────────────
1171            let shape = match format.layout() {
1172                PixelLayout::Packed => vec![height, width, format.channels()],
1173                PixelLayout::Planar => vec![format.channels(), height, width],
1174                PixelLayout::SemiPlanar => {
1175                    let total_h = match format {
1176                        PixelFormat::Nv12 => {
1177                            if !height.is_multiple_of(2) {
1178                                return Err(Error::InvalidShape(format!(
1179                                    "NV12 requires even height, got {height}"
1180                                )));
1181                            }
1182                            height * 3 / 2
1183                        }
1184                        PixelFormat::Nv16 => height * 2,
1185                        _ => {
1186                            return Err(Error::InvalidShape(format!(
1187                                "unknown semi-planar height multiplier for {format:?}"
1188                            )))
1189                        }
1190                    };
1191                    vec![total_h, width]
1192                }
1193                _ => {
1194                    return Err(Error::NotSupported(format!(
1195                        "unsupported pixel layout for import_image: {:?}",
1196                        format.layout()
1197                    )));
1198                }
1199            };
1200            let tensor = TensorDyn::from_fd(image.into_fd(), &shape, dtype, None)?;
1201            if tensor.memory() != TensorMemory::Dma {
1202                return Err(Error::NotSupported(format!(
1203                    "import_image requires DMA-backed fd, got {:?}",
1204                    tensor.memory()
1205                )));
1206            }
1207            let mut tensor = tensor.with_format(format)?;
1208            if let Some(s) = image_stride {
1209                tensor.set_row_stride(s)?;
1210            }
1211            if let Some(o) = image_offset {
1212                tensor.set_plane_offset(o);
1213            }
1214            Ok(tensor)
1215        }
1216    }
1217
1218    /// Decode model outputs and draw segmentation masks onto `dst`.
1219    ///
1220    /// This is the primary mask rendering API. The processor decodes via the
1221    /// provided [`Decoder`], selects the optimal rendering path (hybrid
1222    /// CPU+GL or fused GPU), and composites masks onto `dst`.
1223    ///
1224    /// Returns the detected bounding boxes.
1225    pub fn draw_masks(
1226        &mut self,
1227        decoder: &edgefirst_decoder::Decoder,
1228        outputs: &[&TensorDyn],
1229        dst: &mut TensorDyn,
1230        overlay: MaskOverlay<'_>,
1231    ) -> Result<Vec<DetectBox>> {
1232        let mut output_boxes = Vec::with_capacity(100);
1233
1234        // Try proto path first (fused rendering without materializing masks)
1235        let proto_result = decoder
1236            .decode_proto(outputs, &mut output_boxes)
1237            .map_err(|e| Error::Internal(format!("decode_proto: {e:#?}")))?;
1238
1239        if let Some(proto_data) = proto_result {
1240            self.draw_proto_masks(dst, &output_boxes, &proto_data, overlay)?;
1241        } else {
1242            // Detection-only or unsupported model: full decode + render
1243            let mut output_masks = Vec::with_capacity(100);
1244            decoder
1245                .decode(outputs, &mut output_boxes, &mut output_masks)
1246                .map_err(|e| Error::Internal(format!("decode: {e:#?}")))?;
1247            self.draw_decoded_masks(dst, &output_boxes, &output_masks, overlay)?;
1248        }
1249        Ok(output_boxes)
1250    }
1251
1252    /// Decode tracked model outputs and draw segmentation masks onto `dst`.
1253    ///
1254    /// Like [`draw_masks`](Self::draw_masks) but integrates a tracker for
1255    /// maintaining object identities across frames. The tracker runs after
1256    /// NMS but before mask extraction.
1257    ///
1258    /// Returns detected boxes and track info.
1259    #[cfg(feature = "tracker")]
1260    pub fn draw_masks_tracked<TR: edgefirst_tracker::Tracker<DetectBox>>(
1261        &mut self,
1262        decoder: &edgefirst_decoder::Decoder,
1263        tracker: &mut TR,
1264        timestamp: u64,
1265        outputs: &[&TensorDyn],
1266        dst: &mut TensorDyn,
1267        overlay: MaskOverlay<'_>,
1268    ) -> Result<(Vec<DetectBox>, Vec<edgefirst_tracker::TrackInfo>)> {
1269        let mut output_boxes = Vec::with_capacity(100);
1270        let mut output_tracks = Vec::new();
1271
1272        let proto_result = decoder
1273            .decode_proto_tracked(
1274                tracker,
1275                timestamp,
1276                outputs,
1277                &mut output_boxes,
1278                &mut output_tracks,
1279            )
1280            .map_err(|e| Error::Internal(format!("decode_proto_tracked: {e:#?}")))?;
1281
1282        if let Some(proto_data) = proto_result {
1283            self.draw_proto_masks(dst, &output_boxes, &proto_data, overlay)?;
1284        } else {
1285            // Note: decode_proto_tracked returns None for detection-only/ModelPack
1286            // models WITHOUT calling the tracker. The else branch below is the
1287            // first (and only) tracker call for those model types.
1288            let mut output_masks = Vec::with_capacity(100);
1289            decoder
1290                .decode_tracked(
1291                    tracker,
1292                    timestamp,
1293                    outputs,
1294                    &mut output_boxes,
1295                    &mut output_masks,
1296                    &mut output_tracks,
1297                )
1298                .map_err(|e| Error::Internal(format!("decode_tracked: {e:#?}")))?;
1299            self.draw_decoded_masks(dst, &output_boxes, &output_masks, overlay)?;
1300        }
1301        Ok((output_boxes, output_tracks))
1302    }
1303
1304    /// Materialize per-instance segmentation masks from raw prototype data.
1305    ///
1306    /// Computes `mask_coeff @ protos` with sigmoid activation for each detection,
1307    /// producing compact masks at prototype resolution (e.g., 160×160 crops).
1308    /// Mask values are continuous sigmoid confidence outputs quantized to u8
1309    /// (0 = background, 255 = full confidence), NOT binary thresholded.
1310    ///
1311    /// The returned [`Vec<Segmentation>`] can be:
1312    /// - Inspected or exported for analytics, IoU computation, etc.
1313    /// - Passed directly to [`ImageProcessorTrait::draw_decoded_masks`] for
1314    ///   GPU-interpolated rendering.
1315    ///
1316    /// # Performance Note
1317    ///
1318    /// Calling `materialize_masks` + `draw_decoded_masks` separately prevents
1319    /// the HAL from using its internal fused optimization path. For render-only
1320    /// use cases, prefer [`ImageProcessorTrait::draw_proto_masks`] which selects
1321    /// the fastest path automatically (currently 1.6×–27× faster on tested
1322    /// platforms). Use this method when you need access to the intermediate masks.
1323    ///
1324    /// # Errors
1325    ///
1326    /// Returns [`Error::NoConverter`] if the CPU backend is not available.
1327    pub fn materialize_masks(
1328        &self,
1329        detect: &[DetectBox],
1330        proto_data: &ProtoData,
1331        letterbox: Option<[f32; 4]>,
1332    ) -> Result<Vec<Segmentation>> {
1333        let cpu = self.cpu.as_ref().ok_or(Error::NoConverter)?;
1334        cpu.materialize_segmentations(detect, proto_data, letterbox)
1335    }
1336}
1337
1338impl ImageProcessorTrait for ImageProcessor {
1339    /// Converts the source image to the destination image format and size. The
1340    /// image is cropped first, then flipped, then rotated
1341    ///
1342    /// Prefer hardware accelerators when available, falling back to CPU if
1343    /// necessary.
1344    fn convert(
1345        &mut self,
1346        src: &TensorDyn,
1347        dst: &mut TensorDyn,
1348        rotation: Rotation,
1349        flip: Flip,
1350        crop: Crop,
1351    ) -> Result<()> {
1352        let start = Instant::now();
1353        let src_fmt = src.format();
1354        let dst_fmt = dst.format();
1355        log::trace!(
1356            "convert: {src_fmt:?}({:?}/{:?}) → {dst_fmt:?}({:?}/{:?}), \
1357             rotation={rotation:?}, flip={flip:?}, backend={:?}",
1358            src.dtype(),
1359            src.memory(),
1360            dst.dtype(),
1361            dst.memory(),
1362            self.forced_backend,
1363        );
1364
1365        // ── Forced backend: no fallback chain ────────────────────────
1366        if let Some(forced) = self.forced_backend {
1367            return match forced {
1368                ForcedBackend::Cpu => {
1369                    if let Some(cpu) = self.cpu.as_mut() {
1370                        let r = cpu.convert(src, dst, rotation, flip, crop);
1371                        log::trace!(
1372                            "convert: forced=cpu result={} ({:?})",
1373                            if r.is_ok() { "ok" } else { "err" },
1374                            start.elapsed()
1375                        );
1376                        return r;
1377                    }
1378                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1379                }
1380                ForcedBackend::G2d => {
1381                    #[cfg(target_os = "linux")]
1382                    if let Some(g2d) = self.g2d.as_mut() {
1383                        let r = g2d.convert(src, dst, rotation, flip, crop);
1384                        log::trace!(
1385                            "convert: forced=g2d result={} ({:?})",
1386                            if r.is_ok() { "ok" } else { "err" },
1387                            start.elapsed()
1388                        );
1389                        return r;
1390                    }
1391                    Err(Error::ForcedBackendUnavailable("g2d".into()))
1392                }
1393                ForcedBackend::OpenGl => {
1394                    #[cfg(target_os = "linux")]
1395                    #[cfg(feature = "opengl")]
1396                    if let Some(opengl) = self.opengl.as_mut() {
1397                        let r = opengl.convert(src, dst, rotation, flip, crop);
1398                        log::trace!(
1399                            "convert: forced=opengl result={} ({:?})",
1400                            if r.is_ok() { "ok" } else { "err" },
1401                            start.elapsed()
1402                        );
1403                        return r;
1404                    }
1405                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1406                }
1407            };
1408        }
1409
1410        // ── Auto fallback chain: OpenGL → G2D → CPU ──────────────────
1411        #[cfg(target_os = "linux")]
1412        #[cfg(feature = "opengl")]
1413        if let Some(opengl) = self.opengl.as_mut() {
1414            match opengl.convert(src, dst, rotation, flip, crop) {
1415                Ok(_) => {
1416                    log::trace!(
1417                        "convert: auto selected=opengl for {src_fmt:?}→{dst_fmt:?} ({:?})",
1418                        start.elapsed()
1419                    );
1420                    return Ok(());
1421                }
1422                Err(e) => {
1423                    log::trace!("convert: auto opengl declined {src_fmt:?}→{dst_fmt:?}: {e}");
1424                }
1425            }
1426        }
1427
1428        #[cfg(target_os = "linux")]
1429        if let Some(g2d) = self.g2d.as_mut() {
1430            match g2d.convert(src, dst, rotation, flip, crop) {
1431                Ok(_) => {
1432                    log::trace!(
1433                        "convert: auto selected=g2d for {src_fmt:?}→{dst_fmt:?} ({:?})",
1434                        start.elapsed()
1435                    );
1436                    return Ok(());
1437                }
1438                Err(e) => {
1439                    log::trace!("convert: auto g2d declined {src_fmt:?}→{dst_fmt:?}: {e}");
1440                }
1441            }
1442        }
1443
1444        if let Some(cpu) = self.cpu.as_mut() {
1445            match cpu.convert(src, dst, rotation, flip, crop) {
1446                Ok(_) => {
1447                    log::trace!(
1448                        "convert: auto selected=cpu for {src_fmt:?}→{dst_fmt:?} ({:?})",
1449                        start.elapsed()
1450                    );
1451                    return Ok(());
1452                }
1453                Err(e) => {
1454                    log::trace!("convert: auto cpu failed {src_fmt:?}→{dst_fmt:?}: {e}");
1455                    return Err(e);
1456                }
1457            }
1458        }
1459        Err(Error::NoConverter)
1460    }
1461
1462    fn draw_decoded_masks(
1463        &mut self,
1464        dst: &mut TensorDyn,
1465        detect: &[DetectBox],
1466        segmentation: &[Segmentation],
1467        overlay: MaskOverlay<'_>,
1468    ) -> Result<()> {
1469        let start = Instant::now();
1470
1471        if detect.is_empty() && segmentation.is_empty() {
1472            return Ok(());
1473        }
1474
1475        // Un-letterbox detect boxes and segmentation bboxes for rendering when
1476        // a letterbox was applied to prepare the model input.
1477        let lb_boxes: Vec<DetectBox>;
1478        let lb_segs: Vec<Segmentation>;
1479        let (detect, segmentation) = if let Some(lb) = overlay.letterbox {
1480            lb_boxes = detect.iter().map(|&d| unletter_bbox(d, lb)).collect();
1481            // Keep segmentation bboxes in sync with the transformed detect boxes
1482            // when we have a 1:1 correspondence (instance segmentation).
1483            lb_segs = if segmentation.len() == lb_boxes.len() {
1484                segmentation
1485                    .iter()
1486                    .zip(lb_boxes.iter())
1487                    .map(|(s, d)| Segmentation {
1488                        xmin: d.bbox.xmin,
1489                        ymin: d.bbox.ymin,
1490                        xmax: d.bbox.xmax,
1491                        ymax: d.bbox.ymax,
1492                        segmentation: s.segmentation.clone(),
1493                    })
1494                    .collect()
1495            } else {
1496                segmentation.to_vec()
1497            };
1498            (lb_boxes.as_slice(), lb_segs.as_slice())
1499        } else {
1500            (detect, segmentation)
1501        };
1502
1503        // ── Forced backend: no fallback chain ────────────────────────
1504        if let Some(forced) = self.forced_backend {
1505            return match forced {
1506                ForcedBackend::Cpu => {
1507                    // CPU needs background pre-blitted
1508                    let overlay = overlay.apply_background(dst)?;
1509                    if let Some(cpu) = self.cpu.as_mut() {
1510                        return cpu.draw_decoded_masks(dst, detect, segmentation, overlay);
1511                    }
1512                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1513                }
1514                ForcedBackend::G2d => Err(Error::NotSupported(
1515                    "g2d does not support draw_decoded_masks".into(),
1516                )),
1517                ForcedBackend::OpenGl => {
1518                    // GL handles background natively via GPU blit
1519                    #[cfg(target_os = "linux")]
1520                    #[cfg(feature = "opengl")]
1521                    if let Some(opengl) = self.opengl.as_mut() {
1522                        return opengl.draw_decoded_masks(dst, detect, segmentation, overlay);
1523                    }
1524                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1525                }
1526            };
1527        }
1528
1529        // skip G2D as it doesn't support rendering to image
1530
1531        // GL path: pass overlay with background — GL will GPU-blit if DMA-BUF
1532        #[cfg(target_os = "linux")]
1533        #[cfg(feature = "opengl")]
1534        if let Some(opengl) = self.opengl.as_mut() {
1535            log::trace!(
1536                "draw_decoded_masks started with opengl in {:?}",
1537                start.elapsed()
1538            );
1539            match opengl.draw_decoded_masks(dst, detect, segmentation, overlay) {
1540                Ok(_) => {
1541                    log::trace!("draw_decoded_masks with opengl in {:?}", start.elapsed());
1542                    return Ok(());
1543                }
1544                Err(e) => {
1545                    log::trace!("draw_decoded_masks didn't work with opengl: {e:?}")
1546                }
1547            }
1548        }
1549
1550        // CPU fallback: blit background via memcpy before rendering
1551        let overlay = overlay.apply_background(dst)?;
1552        log::trace!(
1553            "draw_decoded_masks started with cpu in {:?}",
1554            start.elapsed()
1555        );
1556        if let Some(cpu) = self.cpu.as_mut() {
1557            match cpu.draw_decoded_masks(dst, detect, segmentation, overlay) {
1558                Ok(_) => {
1559                    log::trace!("draw_decoded_masks with cpu in {:?}", start.elapsed());
1560                    return Ok(());
1561                }
1562                Err(e) => {
1563                    log::trace!("draw_decoded_masks didn't work with cpu: {e:?}");
1564                    return Err(e);
1565                }
1566            }
1567        }
1568        Err(Error::NoConverter)
1569    }
1570
1571    fn draw_proto_masks(
1572        &mut self,
1573        dst: &mut TensorDyn,
1574        detect: &[DetectBox],
1575        proto_data: &ProtoData,
1576        overlay: MaskOverlay<'_>,
1577    ) -> Result<()> {
1578        let start = Instant::now();
1579
1580        if detect.is_empty() {
1581            return Ok(());
1582        }
1583
1584        // Un-letterbox detect boxes for rendering when a letterbox was applied
1585        // to prepare the model input.  The original `detect` coords are still
1586        // passed to `materialize_segmentations` (which needs model-space coords
1587        // to correctly crop the proto tensor) alongside `overlay.letterbox` so
1588        // it can emit `Segmentation` structs in output-image space.
1589        let lb_boxes: Vec<DetectBox>;
1590        let render_detect = if let Some(lb) = overlay.letterbox {
1591            lb_boxes = detect.iter().map(|&d| unletter_bbox(d, lb)).collect();
1592            lb_boxes.as_slice()
1593        } else {
1594            detect
1595        };
1596
1597        // ── Forced backend: no fallback chain ────────────────────────
1598        if let Some(forced) = self.forced_backend {
1599            return match forced {
1600                ForcedBackend::Cpu => {
1601                    let overlay = overlay.apply_background(dst)?;
1602                    if let Some(cpu) = self.cpu.as_mut() {
1603                        return cpu.draw_proto_masks(dst, render_detect, proto_data, overlay);
1604                    }
1605                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1606                }
1607                ForcedBackend::G2d => Err(Error::NotSupported(
1608                    "g2d does not support draw_proto_masks".into(),
1609                )),
1610                ForcedBackend::OpenGl => {
1611                    #[cfg(target_os = "linux")]
1612                    #[cfg(feature = "opengl")]
1613                    if let Some(opengl) = self.opengl.as_mut() {
1614                        return opengl.draw_proto_masks(dst, render_detect, proto_data, overlay);
1615                    }
1616                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1617                }
1618            };
1619        }
1620
1621        // skip G2D as it doesn't support rendering to image
1622
1623        // Hybrid path: CPU materialize + GL overlay (benchmarked faster than
1624        // full-GPU draw_proto_masks on all tested platforms: 27× on imx8mp,
1625        // 4× on imx95, 2.5× on rpi5, 1.6× on x86).
1626        // GL handles background natively via GPU blit.
1627        #[cfg(target_os = "linux")]
1628        #[cfg(feature = "opengl")]
1629        if let Some(opengl) = self.opengl.as_mut() {
1630            let Some(cpu) = self.cpu.as_ref() else {
1631                return Err(Error::Internal(
1632                    "draw_proto_masks requires CPU backend for hybrid path".into(),
1633                ));
1634            };
1635            log::trace!(
1636                "draw_proto_masks started with hybrid (cpu+opengl) in {:?}",
1637                start.elapsed()
1638            );
1639            let segmentation =
1640                cpu.materialize_segmentations(detect, proto_data, overlay.letterbox)?;
1641            match opengl.draw_decoded_masks(dst, render_detect, &segmentation, overlay) {
1642                Ok(_) => {
1643                    log::trace!(
1644                        "draw_proto_masks with hybrid (cpu+opengl) in {:?}",
1645                        start.elapsed()
1646                    );
1647                    return Ok(());
1648                }
1649                Err(e) => {
1650                    log::trace!("draw_proto_masks hybrid path failed, falling back to cpu: {e:?}");
1651                }
1652            }
1653        }
1654
1655        // CPU-only fallback: blit background via memcpy
1656        let overlay = overlay.apply_background(dst)?;
1657        let Some(cpu) = self.cpu.as_mut() else {
1658            return Err(Error::Internal(
1659                "draw_proto_masks requires CPU backend for fallback path".into(),
1660            ));
1661        };
1662        log::trace!("draw_proto_masks started with cpu in {:?}", start.elapsed());
1663        cpu.draw_proto_masks(dst, render_detect, proto_data, overlay)
1664    }
1665
1666    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()> {
1667        let start = Instant::now();
1668
1669        // ── Forced backend: no fallback chain ────────────────────────
1670        if let Some(forced) = self.forced_backend {
1671            return match forced {
1672                ForcedBackend::Cpu => {
1673                    if let Some(cpu) = self.cpu.as_mut() {
1674                        return cpu.set_class_colors(colors);
1675                    }
1676                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1677                }
1678                ForcedBackend::G2d => Err(Error::NotSupported(
1679                    "g2d does not support set_class_colors".into(),
1680                )),
1681                ForcedBackend::OpenGl => {
1682                    #[cfg(target_os = "linux")]
1683                    #[cfg(feature = "opengl")]
1684                    if let Some(opengl) = self.opengl.as_mut() {
1685                        return opengl.set_class_colors(colors);
1686                    }
1687                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1688                }
1689            };
1690        }
1691
1692        // skip G2D as it doesn't support rendering to image
1693
1694        #[cfg(target_os = "linux")]
1695        #[cfg(feature = "opengl")]
1696        if let Some(opengl) = self.opengl.as_mut() {
1697            log::trace!("image started with opengl in {:?}", start.elapsed());
1698            match opengl.set_class_colors(colors) {
1699                Ok(_) => {
1700                    log::trace!("colors set with opengl in {:?}", start.elapsed());
1701                    return Ok(());
1702                }
1703                Err(e) => {
1704                    log::trace!("colors didn't set with opengl: {e:?}")
1705                }
1706            }
1707        }
1708        log::trace!("image started with cpu in {:?}", start.elapsed());
1709        if let Some(cpu) = self.cpu.as_mut() {
1710            match cpu.set_class_colors(colors) {
1711                Ok(_) => {
1712                    log::trace!("colors set with cpu in {:?}", start.elapsed());
1713                    return Ok(());
1714                }
1715                Err(e) => {
1716                    log::trace!("colors didn't set with cpu: {e:?}");
1717                    return Err(e);
1718                }
1719            }
1720        }
1721        Err(Error::NoConverter)
1722    }
1723}
1724
1725// ---------------------------------------------------------------------------
1726// Image loading / saving helpers
1727// ---------------------------------------------------------------------------
1728
1729/// Read EXIF orientation from raw EXIF bytes and return (Rotation, Flip).
1730fn read_exif_orientation(exif_bytes: &[u8]) -> (Rotation, Flip) {
1731    let exifreader = exif::Reader::new();
1732    let Ok(exif_) = exifreader.read_raw(exif_bytes.to_vec()) else {
1733        return (Rotation::None, Flip::None);
1734    };
1735    let Some(orientation) = exif_.get_field(exif::Tag::Orientation, exif::In::PRIMARY) else {
1736        return (Rotation::None, Flip::None);
1737    };
1738    match orientation.value.get_uint(0) {
1739        Some(1) => (Rotation::None, Flip::None),
1740        Some(2) => (Rotation::None, Flip::Horizontal),
1741        Some(3) => (Rotation::Rotate180, Flip::None),
1742        Some(4) => (Rotation::Rotate180, Flip::Horizontal),
1743        Some(5) => (Rotation::Clockwise90, Flip::Horizontal),
1744        Some(6) => (Rotation::Clockwise90, Flip::None),
1745        Some(7) => (Rotation::CounterClockwise90, Flip::Horizontal),
1746        Some(8) => (Rotation::CounterClockwise90, Flip::None),
1747        Some(v) => {
1748            log::warn!("broken orientation EXIF value: {v}");
1749            (Rotation::None, Flip::None)
1750        }
1751        None => (Rotation::None, Flip::None),
1752    }
1753}
1754
1755/// Map a [`PixelFormat`] to the zune-jpeg `ColorSpace` for decoding.
1756/// Returns `None` for formats that the JPEG decoder cannot output directly.
1757fn pixelfmt_to_colorspace(fmt: PixelFormat) -> Option<ColorSpace> {
1758    match fmt {
1759        PixelFormat::Rgb => Some(ColorSpace::RGB),
1760        PixelFormat::Rgba => Some(ColorSpace::RGBA),
1761        PixelFormat::Grey => Some(ColorSpace::Luma),
1762        _ => None,
1763    }
1764}
1765
1766/// Map a zune-jpeg `ColorSpace` to a [`PixelFormat`].
1767fn colorspace_to_pixelfmt(cs: ColorSpace) -> Option<PixelFormat> {
1768    match cs {
1769        ColorSpace::RGB => Some(PixelFormat::Rgb),
1770        ColorSpace::RGBA => Some(PixelFormat::Rgba),
1771        ColorSpace::Luma => Some(PixelFormat::Grey),
1772        _ => None,
1773    }
1774}
1775
1776/// Load a JPEG image from raw bytes and return a [`TensorDyn`].
1777fn load_jpeg(
1778    image: &[u8],
1779    format: Option<PixelFormat>,
1780    memory: Option<TensorMemory>,
1781) -> Result<TensorDyn> {
1782    let colour = match format {
1783        Some(f) => pixelfmt_to_colorspace(f)
1784            .ok_or_else(|| Error::NotSupported(format!("Unsupported image format {f:?}")))?,
1785        None => ColorSpace::RGB,
1786    };
1787    let options = DecoderOptions::default().jpeg_set_out_colorspace(colour);
1788    let mut decoder = JpegDecoder::new_with_options(image, options);
1789    decoder.decode_headers()?;
1790
1791    let image_info = decoder.info().ok_or(Error::Internal(
1792        "JPEG did not return decoded image info".to_string(),
1793    ))?;
1794
1795    let converted_cs = decoder
1796        .get_output_colorspace()
1797        .ok_or(Error::Internal("No output colorspace".to_string()))?;
1798
1799    let converted_fmt = colorspace_to_pixelfmt(converted_cs).ok_or(Error::NotSupported(
1800        "Unsupported JPEG decoder output".to_string(),
1801    ))?;
1802
1803    let dest_fmt = format.unwrap_or(converted_fmt);
1804
1805    let (rotation, flip) = decoder
1806        .exif()
1807        .map(|x| read_exif_orientation(x))
1808        .unwrap_or((Rotation::None, Flip::None));
1809
1810    let w = image_info.width as usize;
1811    let h = image_info.height as usize;
1812
1813    if (rotation, flip) == (Rotation::None, Flip::None) {
1814        let mut img = Tensor::<u8>::image(w, h, dest_fmt, memory)?;
1815
1816        if converted_fmt != dest_fmt {
1817            let tmp = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
1818            decoder.decode_into(&mut tmp.map()?)?;
1819            CPUProcessor::convert_format_pf(&tmp, &mut img, converted_fmt, dest_fmt)?;
1820            return Ok(TensorDyn::from(img));
1821        }
1822        decoder.decode_into(&mut img.map()?)?;
1823        return Ok(TensorDyn::from(img));
1824    }
1825
1826    let mut tmp = Tensor::<u8>::image(w, h, dest_fmt, Some(TensorMemory::Mem))?;
1827
1828    if converted_fmt != dest_fmt {
1829        let tmp2 = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
1830        decoder.decode_into(&mut tmp2.map()?)?;
1831        CPUProcessor::convert_format_pf(&tmp2, &mut tmp, converted_fmt, dest_fmt)?;
1832    } else {
1833        decoder.decode_into(&mut tmp.map()?)?;
1834    }
1835
1836    rotate_flip_to_dyn(&tmp, dest_fmt, rotation, flip, memory)
1837}
1838
1839/// Load a PNG image from raw bytes and return a [`TensorDyn`].
1840fn load_png(
1841    image: &[u8],
1842    format: Option<PixelFormat>,
1843    memory: Option<TensorMemory>,
1844) -> Result<TensorDyn> {
1845    let fmt = format.unwrap_or(PixelFormat::Rgb);
1846    let alpha = match fmt {
1847        PixelFormat::Rgb => false,
1848        PixelFormat::Rgba => true,
1849        _ => {
1850            return Err(Error::NotImplemented(
1851                "Unsupported image format".to_string(),
1852            ));
1853        }
1854    };
1855
1856    let options = DecoderOptions::default()
1857        .png_set_add_alpha_channel(alpha)
1858        .png_set_decode_animated(false);
1859    let mut decoder = PngDecoder::new_with_options(image, options);
1860    decoder.decode_headers()?;
1861    let image_info = decoder.get_info().ok_or(Error::Internal(
1862        "PNG did not return decoded image info".to_string(),
1863    ))?;
1864
1865    let (rotation, flip) = image_info
1866        .exif
1867        .as_ref()
1868        .map(|x| read_exif_orientation(x))
1869        .unwrap_or((Rotation::None, Flip::None));
1870
1871    if (rotation, flip) == (Rotation::None, Flip::None) {
1872        let img = Tensor::<u8>::image(image_info.width, image_info.height, fmt, memory)?;
1873        decoder.decode_into(&mut img.map()?)?;
1874        return Ok(TensorDyn::from(img));
1875    }
1876
1877    let tmp = Tensor::<u8>::image(
1878        image_info.width,
1879        image_info.height,
1880        fmt,
1881        Some(TensorMemory::Mem),
1882    )?;
1883    decoder.decode_into(&mut tmp.map()?)?;
1884
1885    rotate_flip_to_dyn(&tmp, fmt, rotation, flip, memory)
1886}
1887
1888/// Load an image from raw bytes (JPEG or PNG) and return a [`TensorDyn`].
1889///
1890/// The optional `format` specifies the desired output pixel format (e.g.,
1891/// [`PixelFormat::Rgb`], [`PixelFormat::Rgba`]); if `None`, the native
1892/// format of the file is used (typically RGB for JPEG).
1893///
1894/// # Examples
1895/// ```rust
1896/// use edgefirst_image::load_image;
1897/// use edgefirst_tensor::PixelFormat;
1898/// # fn main() -> Result<(), edgefirst_image::Error> {
1899/// let jpeg = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
1900/// let img = load_image(jpeg, Some(PixelFormat::Rgb), None)?;
1901/// assert_eq!(img.width(), Some(1280));
1902/// assert_eq!(img.height(), Some(720));
1903/// # Ok(())
1904/// # }
1905/// ```
1906pub fn load_image(
1907    image: &[u8],
1908    format: Option<PixelFormat>,
1909    memory: Option<TensorMemory>,
1910) -> Result<TensorDyn> {
1911    if let Ok(i) = load_jpeg(image, format, memory) {
1912        return Ok(i);
1913    }
1914    if let Ok(i) = load_png(image, format, memory) {
1915        return Ok(i);
1916    }
1917    Err(Error::NotSupported(
1918        "Could not decode as jpeg or png".to_string(),
1919    ))
1920}
1921
1922/// Save a [`TensorDyn`] image as a JPEG file.
1923///
1924/// Only packed RGB and RGBA formats are supported.
1925pub fn save_jpeg(tensor: &TensorDyn, path: impl AsRef<std::path::Path>, quality: u8) -> Result<()> {
1926    let t = tensor.as_u8().ok_or(Error::UnsupportedFormat(
1927        "save_jpeg requires u8 tensor".to_string(),
1928    ))?;
1929    let fmt = t.format().ok_or(Error::NotAnImage)?;
1930    if fmt.layout() != PixelLayout::Packed {
1931        return Err(Error::NotImplemented(
1932            "Saving planar images is not supported".to_string(),
1933        ));
1934    }
1935
1936    let colour = match fmt {
1937        PixelFormat::Rgb => jpeg_encoder::ColorType::Rgb,
1938        PixelFormat::Rgba => jpeg_encoder::ColorType::Rgba,
1939        _ => {
1940            return Err(Error::NotImplemented(
1941                "Unsupported image format for saving".to_string(),
1942            ));
1943        }
1944    };
1945
1946    let w = t.width().ok_or(Error::NotAnImage)?;
1947    let h = t.height().ok_or(Error::NotAnImage)?;
1948    let encoder = jpeg_encoder::Encoder::new_file(path, quality)?;
1949    let tensor_map = t.map()?;
1950
1951    encoder.encode(&tensor_map, w as u16, h as u16, colour)?;
1952
1953    Ok(())
1954}
1955
1956pub(crate) struct FunctionTimer<T: Display> {
1957    name: T,
1958    start: std::time::Instant,
1959}
1960
1961impl<T: Display> FunctionTimer<T> {
1962    pub fn new(name: T) -> Self {
1963        Self {
1964            name,
1965            start: std::time::Instant::now(),
1966        }
1967    }
1968}
1969
1970impl<T: Display> Drop for FunctionTimer<T> {
1971    fn drop(&mut self) {
1972        log::trace!("{} elapsed: {:?}", self.name, self.start.elapsed())
1973    }
1974}
1975
1976const DEFAULT_COLORS: [[f32; 4]; 20] = [
1977    [0., 1., 0., 0.7],
1978    [1., 0.5568628, 0., 0.7],
1979    [0.25882353, 0.15294118, 0.13333333, 0.7],
1980    [0.8, 0.7647059, 0.78039216, 0.7],
1981    [0.3137255, 0.3137255, 0.3137255, 0.7],
1982    [0.1411765, 0.3098039, 0.1215686, 0.7],
1983    [1., 0.95686275, 0.5137255, 0.7],
1984    [0.3529412, 0.32156863, 0., 0.7],
1985    [0.4235294, 0.6235294, 0.6509804, 0.7],
1986    [0.5098039, 0.5098039, 0.7294118, 0.7],
1987    [0.00784314, 0.18823529, 0.29411765, 0.7],
1988    [0.0, 0.2706, 1.0, 0.7],
1989    [0.0, 0.0, 0.0, 0.7],
1990    [0.0, 0.5, 0.0, 0.7],
1991    [1.0, 0.0, 0.0, 0.7],
1992    [0.0, 0.0, 1.0, 0.7],
1993    [1.0, 0.5, 0.5, 0.7],
1994    [0.1333, 0.5451, 0.1333, 0.7],
1995    [0.1176, 0.4118, 0.8235, 0.7],
1996    [1., 1., 1., 0.7],
1997];
1998
1999const fn denorm<const M: usize, const N: usize>(a: [[f32; M]; N]) -> [[u8; M]; N] {
2000    let mut result = [[0; M]; N];
2001    let mut i = 0;
2002    while i < N {
2003        let mut j = 0;
2004        while j < M {
2005            result[i][j] = (a[i][j] * 255.0).round() as u8;
2006            j += 1;
2007        }
2008        i += 1;
2009    }
2010    result
2011}
2012
2013const DEFAULT_COLORS_U8: [[u8; 4]; 20] = denorm(DEFAULT_COLORS);
2014
2015#[cfg(test)]
2016#[cfg_attr(coverage_nightly, coverage(off))]
2017mod image_tests {
2018    use super::*;
2019    use crate::{CPUProcessor, Rotation};
2020    #[cfg(target_os = "linux")]
2021    use edgefirst_tensor::is_dma_available;
2022    use edgefirst_tensor::{TensorMapTrait, TensorMemory, TensorTrait};
2023    use image::buffer::ConvertBuffer;
2024
2025    /// Test helper: call `ImageProcessorTrait::convert()` on two `TensorDyn`s
2026    /// by going through the `TensorDyn` API.
2027    ///
2028    /// Returns the `(src_image, dst_image)` reconstructed from the TensorDyn
2029    /// round-trip so the caller can feed them to `compare_images` etc.
2030    fn convert_img(
2031        proc: &mut dyn ImageProcessorTrait,
2032        src: TensorDyn,
2033        dst: TensorDyn,
2034        rotation: Rotation,
2035        flip: Flip,
2036        crop: Crop,
2037    ) -> (Result<()>, TensorDyn, TensorDyn) {
2038        let src_fourcc = src.format().unwrap();
2039        let dst_fourcc = dst.format().unwrap();
2040        let src_dyn = src;
2041        let mut dst_dyn = dst;
2042        let result = proc.convert(&src_dyn, &mut dst_dyn, rotation, flip, crop);
2043        let src_back = {
2044            let mut __t = src_dyn.into_u8().unwrap();
2045            __t.set_format(src_fourcc).unwrap();
2046            TensorDyn::from(__t)
2047        };
2048        let dst_back = {
2049            let mut __t = dst_dyn.into_u8().unwrap();
2050            __t.set_format(dst_fourcc).unwrap();
2051            TensorDyn::from(__t)
2052        };
2053        (result, src_back, dst_back)
2054    }
2055
2056    #[ctor::ctor]
2057    fn init() {
2058        env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
2059    }
2060
2061    macro_rules! function {
2062        () => {{
2063            fn f() {}
2064            fn type_name_of<T>(_: T) -> &'static str {
2065                std::any::type_name::<T>()
2066            }
2067            let name = type_name_of(f);
2068
2069            // Find and cut the rest of the path
2070            match &name[..name.len() - 3].rfind(':') {
2071                Some(pos) => &name[pos + 1..name.len() - 3],
2072                None => &name[..name.len() - 3],
2073            }
2074        }};
2075    }
2076
2077    #[test]
2078    fn test_invalid_crop() {
2079        let src = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
2080        let dst = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
2081
2082        let crop = Crop::new()
2083            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
2084            .with_dst_rect(Some(Rect::new(0, 0, 150, 150)));
2085
2086        let result = crop.check_crop_dyn(&src, &dst);
2087        assert!(matches!(
2088            result,
2089            Err(Error::CropInvalid(e)) if e.starts_with("Dest and Src crop invalid")
2090        ));
2091
2092        let crop = crop.with_src_rect(Some(Rect::new(0, 0, 10, 10)));
2093        let result = crop.check_crop_dyn(&src, &dst);
2094        assert!(matches!(
2095            result,
2096            Err(Error::CropInvalid(e)) if e.starts_with("Dest crop invalid")
2097        ));
2098
2099        let crop = crop
2100            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
2101            .with_dst_rect(Some(Rect::new(0, 0, 50, 50)));
2102        let result = crop.check_crop_dyn(&src, &dst);
2103        assert!(matches!(
2104            result,
2105            Err(Error::CropInvalid(e)) if e.starts_with("Src crop invalid")
2106        ));
2107
2108        let crop = crop.with_src_rect(Some(Rect::new(50, 50, 50, 50)));
2109
2110        let result = crop.check_crop_dyn(&src, &dst);
2111        assert!(result.is_ok());
2112    }
2113
2114    #[test]
2115    fn test_invalid_tensor_format() -> Result<(), Error> {
2116        // 4D tensor cannot be set to a 3-channel pixel format
2117        let mut tensor = Tensor::<u8>::new(&[720, 1280, 4, 1], None, None)?;
2118        let result = tensor.set_format(PixelFormat::Rgb);
2119        assert!(result.is_err(), "4D tensor should reject set_format");
2120
2121        // Tensor with wrong channel count for the format
2122        let mut tensor = Tensor::<u8>::new(&[720, 1280, 4], None, None)?;
2123        let result = tensor.set_format(PixelFormat::Rgb);
2124        assert!(result.is_err(), "4-channel tensor should reject RGB format");
2125
2126        Ok(())
2127    }
2128
2129    #[test]
2130    fn test_invalid_image_file() -> Result<(), Error> {
2131        let result = crate::load_image(&[123; 5000], None, None);
2132        assert!(matches!(
2133            result,
2134            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
2135
2136        Ok(())
2137    }
2138
2139    #[test]
2140    fn test_invalid_jpeg_format() -> Result<(), Error> {
2141        let result = crate::load_image(&[123; 5000], Some(PixelFormat::Yuyv), None);
2142        assert!(matches!(
2143            result,
2144            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
2145
2146        Ok(())
2147    }
2148
2149    #[test]
2150    fn test_load_resize_save() {
2151        let file = include_bytes!(concat!(
2152            env!("CARGO_MANIFEST_DIR"),
2153            "/../../testdata/zidane.jpg"
2154        ));
2155        let img = crate::load_image(file, Some(PixelFormat::Rgba), None).unwrap();
2156        assert_eq!(img.width(), Some(1280));
2157        assert_eq!(img.height(), Some(720));
2158
2159        let dst = TensorDyn::image(640, 360, PixelFormat::Rgba, DType::U8, None).unwrap();
2160        let mut converter = CPUProcessor::new();
2161        let (result, _img, dst) = convert_img(
2162            &mut converter,
2163            img,
2164            dst,
2165            Rotation::None,
2166            Flip::None,
2167            Crop::no_crop(),
2168        );
2169        result.unwrap();
2170        assert_eq!(dst.width(), Some(640));
2171        assert_eq!(dst.height(), Some(360));
2172
2173        crate::save_jpeg(&dst, "zidane_resized.jpg", 80).unwrap();
2174
2175        let file = std::fs::read("zidane_resized.jpg").unwrap();
2176        let img = crate::load_image(&file, None, None).unwrap();
2177        assert_eq!(img.width(), Some(640));
2178        assert_eq!(img.height(), Some(360));
2179        assert_eq!(img.format().unwrap(), PixelFormat::Rgb);
2180    }
2181
2182    #[test]
2183    fn test_from_tensor_planar() -> Result<(), Error> {
2184        let mut tensor = Tensor::new(&[3, 720, 1280], None, None)?;
2185        tensor.map()?.copy_from_slice(include_bytes!(concat!(
2186            env!("CARGO_MANIFEST_DIR"),
2187            "/../../testdata/camera720p.8bps"
2188        )));
2189        let planar = {
2190            tensor
2191                .set_format(PixelFormat::PlanarRgb)
2192                .map_err(|e| crate::Error::Internal(e.to_string()))?;
2193            TensorDyn::from(tensor)
2194        };
2195
2196        let rbga = load_bytes_to_tensor(
2197            1280,
2198            720,
2199            PixelFormat::Rgba,
2200            None,
2201            include_bytes!(concat!(
2202                env!("CARGO_MANIFEST_DIR"),
2203                "/../../testdata/camera720p.rgba"
2204            )),
2205        )?;
2206        compare_images_convert_to_rgb(&planar, &rbga, 0.98, function!());
2207
2208        Ok(())
2209    }
2210
2211    #[test]
2212    fn test_from_tensor_invalid_format() {
2213        // PixelFormat::from_fourcc_str returns None for unknown FourCC codes.
2214        // Since there's no "TEST" pixel format, this validates graceful handling.
2215        assert!(PixelFormat::from_fourcc(u32::from_le_bytes(*b"TEST")).is_none());
2216    }
2217
2218    #[test]
2219    #[should_panic(expected = "Failed to save planar RGB image")]
2220    fn test_save_planar() {
2221        let planar_img = load_bytes_to_tensor(
2222            1280,
2223            720,
2224            PixelFormat::PlanarRgb,
2225            None,
2226            include_bytes!(concat!(
2227                env!("CARGO_MANIFEST_DIR"),
2228                "/../../testdata/camera720p.8bps"
2229            )),
2230        )
2231        .unwrap();
2232
2233        let save_path = "/tmp/planar_rgb.jpg";
2234        crate::save_jpeg(&planar_img, save_path, 90).expect("Failed to save planar RGB image");
2235    }
2236
2237    #[test]
2238    #[should_panic(expected = "Failed to save YUYV image")]
2239    fn test_save_yuyv() {
2240        let planar_img = load_bytes_to_tensor(
2241            1280,
2242            720,
2243            PixelFormat::Yuyv,
2244            None,
2245            include_bytes!(concat!(
2246                env!("CARGO_MANIFEST_DIR"),
2247                "/../../testdata/camera720p.yuyv"
2248            )),
2249        )
2250        .unwrap();
2251
2252        let save_path = "/tmp/yuyv.jpg";
2253        crate::save_jpeg(&planar_img, save_path, 90).expect("Failed to save YUYV image");
2254    }
2255
2256    #[test]
2257    fn test_rotation_angle() {
2258        assert_eq!(Rotation::from_degrees_clockwise(0), Rotation::None);
2259        assert_eq!(Rotation::from_degrees_clockwise(90), Rotation::Clockwise90);
2260        assert_eq!(Rotation::from_degrees_clockwise(180), Rotation::Rotate180);
2261        assert_eq!(
2262            Rotation::from_degrees_clockwise(270),
2263            Rotation::CounterClockwise90
2264        );
2265        assert_eq!(Rotation::from_degrees_clockwise(360), Rotation::None);
2266        assert_eq!(Rotation::from_degrees_clockwise(450), Rotation::Clockwise90);
2267        assert_eq!(Rotation::from_degrees_clockwise(540), Rotation::Rotate180);
2268        assert_eq!(
2269            Rotation::from_degrees_clockwise(630),
2270            Rotation::CounterClockwise90
2271        );
2272    }
2273
2274    #[test]
2275    #[should_panic(expected = "rotation angle is not a multiple of 90")]
2276    fn test_rotation_angle_panic() {
2277        Rotation::from_degrees_clockwise(361);
2278    }
2279
2280    #[test]
2281    fn test_disable_env_var() -> Result<(), Error> {
2282        // EDGEFIRST_FORCE_BACKEND takes precedence over EDGEFIRST_DISABLE_*,
2283        // so clear it for the duration of this test to avoid races with
2284        // test_force_backend_cpu running in parallel.
2285        let saved_force = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
2286        unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") };
2287
2288        #[cfg(target_os = "linux")]
2289        {
2290            let original = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2291            unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2292            let converter = ImageProcessor::new()?;
2293            match original {
2294                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2295                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2296            }
2297            assert!(converter.g2d.is_none());
2298        }
2299
2300        #[cfg(target_os = "linux")]
2301        #[cfg(feature = "opengl")]
2302        {
2303            let original = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2304            unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2305            let converter = ImageProcessor::new()?;
2306            match original {
2307                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2308                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2309            }
2310            assert!(converter.opengl.is_none());
2311        }
2312
2313        let original = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2314        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2315        let converter = ImageProcessor::new()?;
2316        match original {
2317            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2318            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2319        }
2320        assert!(converter.cpu.is_none());
2321
2322        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2323        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2324        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2325        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2326        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2327        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2328        let mut converter = ImageProcessor::new()?;
2329
2330        let src = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None)?;
2331        let dst = TensorDyn::image(640, 360, PixelFormat::Rgba, DType::U8, None)?;
2332        let (result, _src, _dst) = convert_img(
2333            &mut converter,
2334            src,
2335            dst,
2336            Rotation::None,
2337            Flip::None,
2338            Crop::no_crop(),
2339        );
2340        assert!(matches!(result, Err(Error::NoConverter)));
2341
2342        match original_cpu {
2343            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2344            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2345        }
2346        match original_gl {
2347            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2348            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2349        }
2350        match original_g2d {
2351            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2352            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2353        }
2354        match saved_force {
2355            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
2356            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
2357        }
2358
2359        Ok(())
2360    }
2361
2362    #[test]
2363    fn test_unsupported_conversion() {
2364        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
2365        let dst = TensorDyn::image(640, 360, PixelFormat::Nv12, DType::U8, None).unwrap();
2366        let mut converter = ImageProcessor::new().unwrap();
2367        let (result, _src, _dst) = convert_img(
2368            &mut converter,
2369            src,
2370            dst,
2371            Rotation::None,
2372            Flip::None,
2373            Crop::no_crop(),
2374        );
2375        log::debug!("result: {:?}", result);
2376        assert!(matches!(
2377            result,
2378            Err(Error::NotSupported(e)) if e.starts_with("Conversion from NV12 to NV12")
2379        ));
2380    }
2381
2382    #[test]
2383    fn test_load_grey() {
2384        let grey_img = crate::load_image(
2385            include_bytes!(concat!(
2386                env!("CARGO_MANIFEST_DIR"),
2387                "/../../testdata/grey.jpg"
2388            )),
2389            Some(PixelFormat::Rgba),
2390            None,
2391        )
2392        .unwrap();
2393
2394        let grey_but_rgb_img = crate::load_image(
2395            include_bytes!(concat!(
2396                env!("CARGO_MANIFEST_DIR"),
2397                "/../../testdata/grey-rgb.jpg"
2398            )),
2399            Some(PixelFormat::Rgba),
2400            None,
2401        )
2402        .unwrap();
2403
2404        compare_images(&grey_img, &grey_but_rgb_img, 0.99, function!());
2405    }
2406
2407    #[test]
2408    fn test_new_nv12() {
2409        let nv12 = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
2410        assert_eq!(nv12.height(), Some(720));
2411        assert_eq!(nv12.width(), Some(1280));
2412        assert_eq!(nv12.format().unwrap(), PixelFormat::Nv12);
2413        // PixelFormat::Nv12.channels() returns 1 (luma plane channel count)
2414        assert_eq!(nv12.format().unwrap().channels(), 1);
2415        assert!(nv12.format().is_some_and(
2416            |f| f.layout() == PixelLayout::Planar || f.layout() == PixelLayout::SemiPlanar
2417        ))
2418    }
2419
2420    #[test]
2421    #[cfg(target_os = "linux")]
2422    fn test_new_image_converter() {
2423        let dst_width = 640;
2424        let dst_height = 360;
2425        let file = include_bytes!(concat!(
2426            env!("CARGO_MANIFEST_DIR"),
2427            "/../../testdata/zidane.jpg"
2428        ))
2429        .to_vec();
2430        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2431
2432        let mut converter = ImageProcessor::new().unwrap();
2433        let converter_dst = converter
2434            .create_image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None)
2435            .unwrap();
2436        let (result, src, converter_dst) = convert_img(
2437            &mut converter,
2438            src,
2439            converter_dst,
2440            Rotation::None,
2441            Flip::None,
2442            Crop::no_crop(),
2443        );
2444        result.unwrap();
2445
2446        let cpu_dst =
2447            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2448        let mut cpu_converter = CPUProcessor::new();
2449        let (result, _src, cpu_dst) = convert_img(
2450            &mut cpu_converter,
2451            src,
2452            cpu_dst,
2453            Rotation::None,
2454            Flip::None,
2455            Crop::no_crop(),
2456        );
2457        result.unwrap();
2458
2459        compare_images(&converter_dst, &cpu_dst, 0.98, function!());
2460    }
2461
2462    #[test]
2463    #[cfg(target_os = "linux")]
2464    fn test_create_image_dtype_i8() {
2465        let mut converter = ImageProcessor::new().unwrap();
2466
2467        // I8 image should allocate successfully via create_image
2468        let dst = converter
2469            .create_image(320, 240, PixelFormat::Rgb, DType::I8, None)
2470            .unwrap();
2471        assert_eq!(dst.dtype(), DType::I8);
2472        assert!(dst.width() == Some(320));
2473        assert!(dst.height() == Some(240));
2474        assert_eq!(dst.format(), Some(PixelFormat::Rgb));
2475
2476        // U8 for comparison
2477        let dst_u8 = converter
2478            .create_image(320, 240, PixelFormat::Rgb, DType::U8, None)
2479            .unwrap();
2480        assert_eq!(dst_u8.dtype(), DType::U8);
2481
2482        // Convert into I8 dst should succeed
2483        let file = include_bytes!(concat!(
2484            env!("CARGO_MANIFEST_DIR"),
2485            "/../../testdata/zidane.jpg"
2486        ))
2487        .to_vec();
2488        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2489        let mut dst_i8 = converter
2490            .create_image(320, 240, PixelFormat::Rgb, DType::I8, None)
2491            .unwrap();
2492        converter
2493            .convert(
2494                &src,
2495                &mut dst_i8,
2496                Rotation::None,
2497                Flip::None,
2498                Crop::no_crop(),
2499            )
2500            .unwrap();
2501    }
2502
2503    #[test]
2504    #[ignore] // Hangs on desktop platforms where DMA-buf is unavailable and PBO
2505              // fallback triggers a GPU driver hang during SHM→texture upload (e.g.,
2506              // NVIDIA without /dev/dma_heap permissions). Works on embedded targets.
2507    fn test_crop_skip() {
2508        let file = include_bytes!(concat!(
2509            env!("CARGO_MANIFEST_DIR"),
2510            "/../../testdata/zidane.jpg"
2511        ))
2512        .to_vec();
2513        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2514
2515        let mut converter = ImageProcessor::new().unwrap();
2516        let converter_dst = converter
2517            .create_image(1280, 720, PixelFormat::Rgba, DType::U8, None)
2518            .unwrap();
2519        let crop = Crop::new()
2520            .with_src_rect(Some(Rect::new(0, 0, 640, 640)))
2521            .with_dst_rect(Some(Rect::new(0, 0, 640, 640)));
2522        let (result, src, converter_dst) = convert_img(
2523            &mut converter,
2524            src,
2525            converter_dst,
2526            Rotation::None,
2527            Flip::None,
2528            crop,
2529        );
2530        result.unwrap();
2531
2532        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
2533        let mut cpu_converter = CPUProcessor::new();
2534        let (result, _src, cpu_dst) = convert_img(
2535            &mut cpu_converter,
2536            src,
2537            cpu_dst,
2538            Rotation::None,
2539            Flip::None,
2540            crop,
2541        );
2542        result.unwrap();
2543
2544        compare_images(&converter_dst, &cpu_dst, 0.99999, function!());
2545    }
2546
2547    #[test]
2548    fn test_invalid_pixel_format() {
2549        // PixelFormat::from_fourcc returns None for unknown formats,
2550        // so TensorDyn::image cannot be called with an invalid format.
2551        assert!(PixelFormat::from_fourcc(u32::from_le_bytes(*b"TEST")).is_none());
2552    }
2553
2554    // Helper function to check if G2D library is available (Linux/i.MX8 only)
2555    #[cfg(target_os = "linux")]
2556    static G2D_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2557
2558    #[cfg(target_os = "linux")]
2559    fn is_g2d_available() -> bool {
2560        *G2D_AVAILABLE.get_or_init(|| G2DProcessor::new().is_ok())
2561    }
2562
2563    #[cfg(target_os = "linux")]
2564    #[cfg(feature = "opengl")]
2565    static GL_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2566
2567    #[cfg(target_os = "linux")]
2568    #[cfg(feature = "opengl")]
2569    // Helper function to check if OpenGL is available
2570    fn is_opengl_available() -> bool {
2571        #[cfg(all(target_os = "linux", feature = "opengl"))]
2572        {
2573            *GL_AVAILABLE.get_or_init(|| GLProcessorThreaded::new(None).is_ok())
2574        }
2575
2576        #[cfg(not(all(target_os = "linux", feature = "opengl")))]
2577        {
2578            false
2579        }
2580    }
2581
2582    #[test]
2583    fn test_load_jpeg_with_exif() {
2584        let file = include_bytes!(concat!(
2585            env!("CARGO_MANIFEST_DIR"),
2586            "/../../testdata/zidane_rotated_exif.jpg"
2587        ))
2588        .to_vec();
2589        let loaded = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2590
2591        assert_eq!(loaded.height(), Some(1280));
2592        assert_eq!(loaded.width(), Some(720));
2593
2594        let file = include_bytes!(concat!(
2595            env!("CARGO_MANIFEST_DIR"),
2596            "/../../testdata/zidane.jpg"
2597        ))
2598        .to_vec();
2599        let cpu_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2600
2601        let (dst_width, dst_height) = (cpu_src.height().unwrap(), cpu_src.width().unwrap());
2602
2603        let cpu_dst =
2604            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2605        let mut cpu_converter = CPUProcessor::new();
2606
2607        let (result, _cpu_src, cpu_dst) = convert_img(
2608            &mut cpu_converter,
2609            cpu_src,
2610            cpu_dst,
2611            Rotation::Clockwise90,
2612            Flip::None,
2613            Crop::no_crop(),
2614        );
2615        result.unwrap();
2616
2617        compare_images(&loaded, &cpu_dst, 0.98, function!());
2618    }
2619
2620    #[test]
2621    fn test_load_png_with_exif() {
2622        let file = include_bytes!(concat!(
2623            env!("CARGO_MANIFEST_DIR"),
2624            "/../../testdata/zidane_rotated_exif_180.png"
2625        ))
2626        .to_vec();
2627        let loaded = crate::load_png(&file, Some(PixelFormat::Rgba), None).unwrap();
2628
2629        assert_eq!(loaded.height(), Some(720));
2630        assert_eq!(loaded.width(), Some(1280));
2631
2632        let file = include_bytes!(concat!(
2633            env!("CARGO_MANIFEST_DIR"),
2634            "/../../testdata/zidane.jpg"
2635        ))
2636        .to_vec();
2637        let cpu_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2638
2639        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
2640        let mut cpu_converter = CPUProcessor::new();
2641
2642        let (result, _cpu_src, cpu_dst) = convert_img(
2643            &mut cpu_converter,
2644            cpu_src,
2645            cpu_dst,
2646            Rotation::Rotate180,
2647            Flip::None,
2648            Crop::no_crop(),
2649        );
2650        result.unwrap();
2651
2652        compare_images(&loaded, &cpu_dst, 0.98, function!());
2653    }
2654
2655    #[test]
2656    #[cfg(target_os = "linux")]
2657    fn test_g2d_resize() {
2658        if !is_g2d_available() {
2659            eprintln!("SKIPPED: test_g2d_resize - G2D library (libg2d.so.2) not available");
2660            return;
2661        }
2662        if !is_dma_available() {
2663            eprintln!(
2664                "SKIPPED: test_g2d_resize - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2665            );
2666            return;
2667        }
2668
2669        let dst_width = 640;
2670        let dst_height = 360;
2671        let file = include_bytes!(concat!(
2672            env!("CARGO_MANIFEST_DIR"),
2673            "/../../testdata/zidane.jpg"
2674        ))
2675        .to_vec();
2676        let src =
2677            crate::load_image(&file, Some(PixelFormat::Rgba), Some(TensorMemory::Dma)).unwrap();
2678
2679        let g2d_dst = TensorDyn::image(
2680            dst_width,
2681            dst_height,
2682            PixelFormat::Rgba,
2683            DType::U8,
2684            Some(TensorMemory::Dma),
2685        )
2686        .unwrap();
2687        let mut g2d_converter = G2DProcessor::new().unwrap();
2688        let (result, src, g2d_dst) = convert_img(
2689            &mut g2d_converter,
2690            src,
2691            g2d_dst,
2692            Rotation::None,
2693            Flip::None,
2694            Crop::no_crop(),
2695        );
2696        result.unwrap();
2697
2698        let cpu_dst =
2699            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2700        let mut cpu_converter = CPUProcessor::new();
2701        let (result, _src, cpu_dst) = convert_img(
2702            &mut cpu_converter,
2703            src,
2704            cpu_dst,
2705            Rotation::None,
2706            Flip::None,
2707            Crop::no_crop(),
2708        );
2709        result.unwrap();
2710
2711        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2712    }
2713
2714    #[test]
2715    #[cfg(target_os = "linux")]
2716    #[cfg(feature = "opengl")]
2717    fn test_opengl_resize() {
2718        if !is_opengl_available() {
2719            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2720            return;
2721        }
2722
2723        let dst_width = 640;
2724        let dst_height = 360;
2725        let file = include_bytes!(concat!(
2726            env!("CARGO_MANIFEST_DIR"),
2727            "/../../testdata/zidane.jpg"
2728        ))
2729        .to_vec();
2730        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2731
2732        let cpu_dst =
2733            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2734        let mut cpu_converter = CPUProcessor::new();
2735        let (result, src, cpu_dst) = convert_img(
2736            &mut cpu_converter,
2737            src,
2738            cpu_dst,
2739            Rotation::None,
2740            Flip::None,
2741            Crop::no_crop(),
2742        );
2743        result.unwrap();
2744
2745        let mut src = src;
2746        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2747
2748        for _ in 0..5 {
2749            let gl_dst =
2750                TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None)
2751                    .unwrap();
2752            let (result, src_back, gl_dst) = convert_img(
2753                &mut gl_converter,
2754                src,
2755                gl_dst,
2756                Rotation::None,
2757                Flip::None,
2758                Crop::no_crop(),
2759            );
2760            result.unwrap();
2761            src = src_back;
2762
2763            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2764        }
2765    }
2766
2767    #[test]
2768    #[ignore] // Vivante GPU hangs with concurrent EGL contexts on i.MX8MP
2769    #[cfg(target_os = "linux")]
2770    #[cfg(feature = "opengl")]
2771    fn test_opengl_10_threads() {
2772        if !is_opengl_available() {
2773            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2774            return;
2775        }
2776
2777        let handles: Vec<_> = (0..10)
2778            .map(|i| {
2779                std::thread::Builder::new()
2780                    .name(format!("Thread {i}"))
2781                    .spawn(test_opengl_resize)
2782                    .unwrap()
2783            })
2784            .collect();
2785        handles.into_iter().for_each(|h| {
2786            if let Err(e) = h.join() {
2787                std::panic::resume_unwind(e)
2788            }
2789        });
2790    }
2791
2792    #[test]
2793    #[cfg(target_os = "linux")]
2794    #[cfg(feature = "opengl")]
2795    fn test_opengl_grey() {
2796        if !is_opengl_available() {
2797            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2798            return;
2799        }
2800
2801        let img = crate::load_image(
2802            include_bytes!(concat!(
2803                env!("CARGO_MANIFEST_DIR"),
2804                "/../../testdata/grey.jpg"
2805            )),
2806            Some(PixelFormat::Grey),
2807            None,
2808        )
2809        .unwrap();
2810
2811        let gl_dst = TensorDyn::image(640, 640, PixelFormat::Grey, DType::U8, None).unwrap();
2812        let cpu_dst = TensorDyn::image(640, 640, PixelFormat::Grey, DType::U8, None).unwrap();
2813
2814        let mut converter = CPUProcessor::new();
2815
2816        let (result, img, cpu_dst) = convert_img(
2817            &mut converter,
2818            img,
2819            cpu_dst,
2820            Rotation::None,
2821            Flip::None,
2822            Crop::no_crop(),
2823        );
2824        result.unwrap();
2825
2826        let mut gl = GLProcessorThreaded::new(None).unwrap();
2827        let (result, _img, gl_dst) = convert_img(
2828            &mut gl,
2829            img,
2830            gl_dst,
2831            Rotation::None,
2832            Flip::None,
2833            Crop::no_crop(),
2834        );
2835        result.unwrap();
2836
2837        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2838    }
2839
2840    #[test]
2841    #[cfg(target_os = "linux")]
2842    fn test_g2d_src_crop() {
2843        if !is_g2d_available() {
2844            eprintln!("SKIPPED: test_g2d_src_crop - G2D library (libg2d.so.2) not available");
2845            return;
2846        }
2847        if !is_dma_available() {
2848            eprintln!(
2849                "SKIPPED: test_g2d_src_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2850            );
2851            return;
2852        }
2853
2854        let dst_width = 640;
2855        let dst_height = 640;
2856        let file = include_bytes!(concat!(
2857            env!("CARGO_MANIFEST_DIR"),
2858            "/../../testdata/zidane.jpg"
2859        ))
2860        .to_vec();
2861        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2862
2863        let cpu_dst =
2864            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2865        let mut cpu_converter = CPUProcessor::new();
2866        let crop = Crop {
2867            src_rect: Some(Rect {
2868                left: 0,
2869                top: 0,
2870                width: 640,
2871                height: 360,
2872            }),
2873            dst_rect: None,
2874            dst_color: None,
2875        };
2876        let (result, src, cpu_dst) = convert_img(
2877            &mut cpu_converter,
2878            src,
2879            cpu_dst,
2880            Rotation::None,
2881            Flip::None,
2882            crop,
2883        );
2884        result.unwrap();
2885
2886        let g2d_dst =
2887            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2888        let mut g2d_converter = G2DProcessor::new().unwrap();
2889        let (result, _src, g2d_dst) = convert_img(
2890            &mut g2d_converter,
2891            src,
2892            g2d_dst,
2893            Rotation::None,
2894            Flip::None,
2895            crop,
2896        );
2897        result.unwrap();
2898
2899        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2900    }
2901
2902    #[test]
2903    #[cfg(target_os = "linux")]
2904    fn test_g2d_dst_crop() {
2905        if !is_g2d_available() {
2906            eprintln!("SKIPPED: test_g2d_dst_crop - G2D library (libg2d.so.2) not available");
2907            return;
2908        }
2909        if !is_dma_available() {
2910            eprintln!(
2911                "SKIPPED: test_g2d_dst_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2912            );
2913            return;
2914        }
2915
2916        let dst_width = 640;
2917        let dst_height = 640;
2918        let file = include_bytes!(concat!(
2919            env!("CARGO_MANIFEST_DIR"),
2920            "/../../testdata/zidane.jpg"
2921        ))
2922        .to_vec();
2923        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2924
2925        let cpu_dst =
2926            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2927        let mut cpu_converter = CPUProcessor::new();
2928        let crop = Crop {
2929            src_rect: None,
2930            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2931            dst_color: None,
2932        };
2933        let (result, src, cpu_dst) = convert_img(
2934            &mut cpu_converter,
2935            src,
2936            cpu_dst,
2937            Rotation::None,
2938            Flip::None,
2939            crop,
2940        );
2941        result.unwrap();
2942
2943        let g2d_dst =
2944            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2945        let mut g2d_converter = G2DProcessor::new().unwrap();
2946        let (result, _src, g2d_dst) = convert_img(
2947            &mut g2d_converter,
2948            src,
2949            g2d_dst,
2950            Rotation::None,
2951            Flip::None,
2952            crop,
2953        );
2954        result.unwrap();
2955
2956        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2957    }
2958
2959    #[test]
2960    #[cfg(target_os = "linux")]
2961    fn test_g2d_all_rgba() {
2962        if !is_g2d_available() {
2963            eprintln!("SKIPPED: test_g2d_all_rgba - G2D library (libg2d.so.2) not available");
2964            return;
2965        }
2966        if !is_dma_available() {
2967            eprintln!(
2968                "SKIPPED: test_g2d_all_rgba - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2969            );
2970            return;
2971        }
2972
2973        let dst_width = 640;
2974        let dst_height = 640;
2975        let file = include_bytes!(concat!(
2976            env!("CARGO_MANIFEST_DIR"),
2977            "/../../testdata/zidane.jpg"
2978        ))
2979        .to_vec();
2980        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2981        let src_dyn = src;
2982
2983        let mut cpu_dst =
2984            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2985        let mut cpu_converter = CPUProcessor::new();
2986        let mut g2d_dst =
2987            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2988        let mut g2d_converter = G2DProcessor::new().unwrap();
2989
2990        let crop = Crop {
2991            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2992            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2993            dst_color: None,
2994        };
2995
2996        for rot in [
2997            Rotation::None,
2998            Rotation::Clockwise90,
2999            Rotation::Rotate180,
3000            Rotation::CounterClockwise90,
3001        ] {
3002            cpu_dst
3003                .as_u8()
3004                .unwrap()
3005                .map()
3006                .unwrap()
3007                .as_mut_slice()
3008                .fill(114);
3009            g2d_dst
3010                .as_u8()
3011                .unwrap()
3012                .map()
3013                .unwrap()
3014                .as_mut_slice()
3015                .fill(114);
3016            for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
3017                let mut cpu_dst_dyn = cpu_dst;
3018                cpu_converter
3019                    .convert(&src_dyn, &mut cpu_dst_dyn, Rotation::None, Flip::None, crop)
3020                    .unwrap();
3021                cpu_dst = {
3022                    let mut __t = cpu_dst_dyn.into_u8().unwrap();
3023                    __t.set_format(PixelFormat::Rgba).unwrap();
3024                    TensorDyn::from(__t)
3025                };
3026
3027                let mut g2d_dst_dyn = g2d_dst;
3028                g2d_converter
3029                    .convert(&src_dyn, &mut g2d_dst_dyn, Rotation::None, Flip::None, crop)
3030                    .unwrap();
3031                g2d_dst = {
3032                    let mut __t = g2d_dst_dyn.into_u8().unwrap();
3033                    __t.set_format(PixelFormat::Rgba).unwrap();
3034                    TensorDyn::from(__t)
3035                };
3036
3037                compare_images(
3038                    &g2d_dst,
3039                    &cpu_dst,
3040                    0.98,
3041                    &format!("{} {:?} {:?}", function!(), rot, flip),
3042                );
3043            }
3044        }
3045    }
3046
3047    #[test]
3048    #[cfg(target_os = "linux")]
3049    #[cfg(feature = "opengl")]
3050    fn test_opengl_src_crop() {
3051        if !is_opengl_available() {
3052            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3053            return;
3054        }
3055
3056        let dst_width = 640;
3057        let dst_height = 360;
3058        let file = include_bytes!(concat!(
3059            env!("CARGO_MANIFEST_DIR"),
3060            "/../../testdata/zidane.jpg"
3061        ))
3062        .to_vec();
3063        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
3064        let crop = Crop {
3065            src_rect: Some(Rect {
3066                left: 320,
3067                top: 180,
3068                width: 1280 - 320,
3069                height: 720 - 180,
3070            }),
3071            dst_rect: None,
3072            dst_color: None,
3073        };
3074
3075        let cpu_dst =
3076            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3077        let mut cpu_converter = CPUProcessor::new();
3078        let (result, src, cpu_dst) = convert_img(
3079            &mut cpu_converter,
3080            src,
3081            cpu_dst,
3082            Rotation::None,
3083            Flip::None,
3084            crop,
3085        );
3086        result.unwrap();
3087
3088        let gl_dst =
3089            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3090        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3091        let (result, _src, gl_dst) = convert_img(
3092            &mut gl_converter,
3093            src,
3094            gl_dst,
3095            Rotation::None,
3096            Flip::None,
3097            crop,
3098        );
3099        result.unwrap();
3100
3101        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3102    }
3103
3104    #[test]
3105    #[cfg(target_os = "linux")]
3106    #[cfg(feature = "opengl")]
3107    fn test_opengl_dst_crop() {
3108        if !is_opengl_available() {
3109            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3110            return;
3111        }
3112
3113        let dst_width = 640;
3114        let dst_height = 640;
3115        let file = include_bytes!(concat!(
3116            env!("CARGO_MANIFEST_DIR"),
3117            "/../../testdata/zidane.jpg"
3118        ))
3119        .to_vec();
3120        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
3121
3122        let cpu_dst =
3123            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3124        let mut cpu_converter = CPUProcessor::new();
3125        let crop = Crop {
3126            src_rect: None,
3127            dst_rect: Some(Rect::new(100, 100, 512, 288)),
3128            dst_color: None,
3129        };
3130        let (result, src, cpu_dst) = convert_img(
3131            &mut cpu_converter,
3132            src,
3133            cpu_dst,
3134            Rotation::None,
3135            Flip::None,
3136            crop,
3137        );
3138        result.unwrap();
3139
3140        let gl_dst =
3141            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3142        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3143        let (result, _src, gl_dst) = convert_img(
3144            &mut gl_converter,
3145            src,
3146            gl_dst,
3147            Rotation::None,
3148            Flip::None,
3149            crop,
3150        );
3151        result.unwrap();
3152
3153        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3154    }
3155
3156    #[test]
3157    #[cfg(target_os = "linux")]
3158    #[cfg(feature = "opengl")]
3159    fn test_opengl_all_rgba() {
3160        if !is_opengl_available() {
3161            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3162            return;
3163        }
3164
3165        let dst_width = 640;
3166        let dst_height = 640;
3167        let file = include_bytes!(concat!(
3168            env!("CARGO_MANIFEST_DIR"),
3169            "/../../testdata/zidane.jpg"
3170        ))
3171        .to_vec();
3172
3173        let mut cpu_converter = CPUProcessor::new();
3174
3175        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3176
3177        let mut mem = vec![None, Some(TensorMemory::Mem), Some(TensorMemory::Shm)];
3178        if is_dma_available() {
3179            mem.push(Some(TensorMemory::Dma));
3180        }
3181        let crop = Crop {
3182            src_rect: Some(Rect::new(50, 120, 1024, 576)),
3183            dst_rect: Some(Rect::new(100, 100, 512, 288)),
3184            dst_color: None,
3185        };
3186        for m in mem {
3187            let src = crate::load_image(&file, Some(PixelFormat::Rgba), m).unwrap();
3188            let src_dyn = src;
3189
3190            for rot in [
3191                Rotation::None,
3192                Rotation::Clockwise90,
3193                Rotation::Rotate180,
3194                Rotation::CounterClockwise90,
3195            ] {
3196                for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
3197                    let cpu_dst =
3198                        TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, m)
3199                            .unwrap();
3200                    let gl_dst =
3201                        TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, m)
3202                            .unwrap();
3203                    cpu_dst
3204                        .as_u8()
3205                        .unwrap()
3206                        .map()
3207                        .unwrap()
3208                        .as_mut_slice()
3209                        .fill(114);
3210                    gl_dst
3211                        .as_u8()
3212                        .unwrap()
3213                        .map()
3214                        .unwrap()
3215                        .as_mut_slice()
3216                        .fill(114);
3217
3218                    let mut cpu_dst_dyn = cpu_dst;
3219                    cpu_converter
3220                        .convert(&src_dyn, &mut cpu_dst_dyn, Rotation::None, Flip::None, crop)
3221                        .unwrap();
3222                    let cpu_dst = {
3223                        let mut __t = cpu_dst_dyn.into_u8().unwrap();
3224                        __t.set_format(PixelFormat::Rgba).unwrap();
3225                        TensorDyn::from(__t)
3226                    };
3227
3228                    let mut gl_dst_dyn = gl_dst;
3229                    gl_converter
3230                        .convert(&src_dyn, &mut gl_dst_dyn, Rotation::None, Flip::None, crop)
3231                        .map_err(|e| {
3232                            log::error!("error mem {m:?} rot {rot:?} error: {e:?}");
3233                            e
3234                        })
3235                        .unwrap();
3236                    let gl_dst = {
3237                        let mut __t = gl_dst_dyn.into_u8().unwrap();
3238                        __t.set_format(PixelFormat::Rgba).unwrap();
3239                        TensorDyn::from(__t)
3240                    };
3241
3242                    compare_images(
3243                        &gl_dst,
3244                        &cpu_dst,
3245                        0.98,
3246                        &format!("{} {:?} {:?}", function!(), rot, flip),
3247                    );
3248                }
3249            }
3250        }
3251    }
3252
3253    #[test]
3254    #[cfg(target_os = "linux")]
3255    fn test_cpu_rotate() {
3256        for rot in [
3257            Rotation::Clockwise90,
3258            Rotation::Rotate180,
3259            Rotation::CounterClockwise90,
3260        ] {
3261            test_cpu_rotate_(rot);
3262        }
3263    }
3264
3265    #[cfg(target_os = "linux")]
3266    fn test_cpu_rotate_(rot: Rotation) {
3267        // This test rotates the image 4 times and checks that the image was returned to
3268        // be the same Currently doesn't check if rotations actually rotated in
3269        // right direction
3270        let file = include_bytes!(concat!(
3271            env!("CARGO_MANIFEST_DIR"),
3272            "/../../testdata/zidane.jpg"
3273        ))
3274        .to_vec();
3275
3276        let unchanged_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
3277        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
3278
3279        let (dst_width, dst_height) = match rot {
3280            Rotation::None | Rotation::Rotate180 => (src.width().unwrap(), src.height().unwrap()),
3281            Rotation::Clockwise90 | Rotation::CounterClockwise90 => {
3282                (src.height().unwrap(), src.width().unwrap())
3283            }
3284        };
3285
3286        let cpu_dst =
3287            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3288        let mut cpu_converter = CPUProcessor::new();
3289
3290        // After rotating 4 times, the image should be the same as the original
3291
3292        let (result, src, cpu_dst) = convert_img(
3293            &mut cpu_converter,
3294            src,
3295            cpu_dst,
3296            rot,
3297            Flip::None,
3298            Crop::no_crop(),
3299        );
3300        result.unwrap();
3301
3302        let (result, cpu_dst, src) = convert_img(
3303            &mut cpu_converter,
3304            cpu_dst,
3305            src,
3306            rot,
3307            Flip::None,
3308            Crop::no_crop(),
3309        );
3310        result.unwrap();
3311
3312        let (result, src, cpu_dst) = convert_img(
3313            &mut cpu_converter,
3314            src,
3315            cpu_dst,
3316            rot,
3317            Flip::None,
3318            Crop::no_crop(),
3319        );
3320        result.unwrap();
3321
3322        let (result, _cpu_dst, src) = convert_img(
3323            &mut cpu_converter,
3324            cpu_dst,
3325            src,
3326            rot,
3327            Flip::None,
3328            Crop::no_crop(),
3329        );
3330        result.unwrap();
3331
3332        compare_images(&src, &unchanged_src, 0.98, function!());
3333    }
3334
3335    #[test]
3336    #[cfg(target_os = "linux")]
3337    #[cfg(feature = "opengl")]
3338    fn test_opengl_rotate() {
3339        if !is_opengl_available() {
3340            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3341            return;
3342        }
3343
3344        let size = (1280, 720);
3345        let mut mem = vec![None, Some(TensorMemory::Shm), Some(TensorMemory::Mem)];
3346
3347        if is_dma_available() {
3348            mem.push(Some(TensorMemory::Dma));
3349        }
3350        for m in mem {
3351            for rot in [
3352                Rotation::Clockwise90,
3353                Rotation::Rotate180,
3354                Rotation::CounterClockwise90,
3355            ] {
3356                test_opengl_rotate_(size, rot, m);
3357            }
3358        }
3359    }
3360
3361    #[cfg(target_os = "linux")]
3362    #[cfg(feature = "opengl")]
3363    fn test_opengl_rotate_(
3364        size: (usize, usize),
3365        rot: Rotation,
3366        tensor_memory: Option<TensorMemory>,
3367    ) {
3368        let (dst_width, dst_height) = match rot {
3369            Rotation::None | Rotation::Rotate180 => size,
3370            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
3371        };
3372
3373        let file = include_bytes!(concat!(
3374            env!("CARGO_MANIFEST_DIR"),
3375            "/../../testdata/zidane.jpg"
3376        ))
3377        .to_vec();
3378        let src = crate::load_image(&file, Some(PixelFormat::Rgba), tensor_memory).unwrap();
3379
3380        let cpu_dst =
3381            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3382        let mut cpu_converter = CPUProcessor::new();
3383
3384        let (result, mut src, cpu_dst) = convert_img(
3385            &mut cpu_converter,
3386            src,
3387            cpu_dst,
3388            rot,
3389            Flip::None,
3390            Crop::no_crop(),
3391        );
3392        result.unwrap();
3393
3394        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3395
3396        for _ in 0..5 {
3397            let gl_dst = TensorDyn::image(
3398                dst_width,
3399                dst_height,
3400                PixelFormat::Rgba,
3401                DType::U8,
3402                tensor_memory,
3403            )
3404            .unwrap();
3405            let (result, src_back, gl_dst) = convert_img(
3406                &mut gl_converter,
3407                src,
3408                gl_dst,
3409                rot,
3410                Flip::None,
3411                Crop::no_crop(),
3412            );
3413            result.unwrap();
3414            src = src_back;
3415            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3416        }
3417    }
3418
3419    #[test]
3420    #[cfg(target_os = "linux")]
3421    fn test_g2d_rotate() {
3422        if !is_g2d_available() {
3423            eprintln!("SKIPPED: test_g2d_rotate - G2D library (libg2d.so.2) not available");
3424            return;
3425        }
3426        if !is_dma_available() {
3427            eprintln!(
3428                "SKIPPED: test_g2d_rotate - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3429            );
3430            return;
3431        }
3432
3433        let size = (1280, 720);
3434        for rot in [
3435            Rotation::Clockwise90,
3436            Rotation::Rotate180,
3437            Rotation::CounterClockwise90,
3438        ] {
3439            test_g2d_rotate_(size, rot);
3440        }
3441    }
3442
3443    #[cfg(target_os = "linux")]
3444    fn test_g2d_rotate_(size: (usize, usize), rot: Rotation) {
3445        let (dst_width, dst_height) = match rot {
3446            Rotation::None | Rotation::Rotate180 => size,
3447            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
3448        };
3449
3450        let file = include_bytes!(concat!(
3451            env!("CARGO_MANIFEST_DIR"),
3452            "/../../testdata/zidane.jpg"
3453        ))
3454        .to_vec();
3455        let src =
3456            crate::load_image(&file, Some(PixelFormat::Rgba), Some(TensorMemory::Dma)).unwrap();
3457
3458        let cpu_dst =
3459            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3460        let mut cpu_converter = CPUProcessor::new();
3461
3462        let (result, src, cpu_dst) = convert_img(
3463            &mut cpu_converter,
3464            src,
3465            cpu_dst,
3466            rot,
3467            Flip::None,
3468            Crop::no_crop(),
3469        );
3470        result.unwrap();
3471
3472        let g2d_dst = TensorDyn::image(
3473            dst_width,
3474            dst_height,
3475            PixelFormat::Rgba,
3476            DType::U8,
3477            Some(TensorMemory::Dma),
3478        )
3479        .unwrap();
3480        let mut g2d_converter = G2DProcessor::new().unwrap();
3481
3482        let (result, _src, g2d_dst) = convert_img(
3483            &mut g2d_converter,
3484            src,
3485            g2d_dst,
3486            rot,
3487            Flip::None,
3488            Crop::no_crop(),
3489        );
3490        result.unwrap();
3491
3492        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3493    }
3494
3495    #[test]
3496    fn test_rgba_to_yuyv_resize_cpu() {
3497        let src = load_bytes_to_tensor(
3498            1280,
3499            720,
3500            PixelFormat::Rgba,
3501            None,
3502            include_bytes!(concat!(
3503                env!("CARGO_MANIFEST_DIR"),
3504                "/../../testdata/camera720p.rgba"
3505            )),
3506        )
3507        .unwrap();
3508
3509        let (dst_width, dst_height) = (640, 360);
3510
3511        let dst =
3512            TensorDyn::image(dst_width, dst_height, PixelFormat::Yuyv, DType::U8, None).unwrap();
3513
3514        let dst_through_yuyv =
3515            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3516        let dst_direct =
3517            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3518
3519        let mut cpu_converter = CPUProcessor::new();
3520
3521        let (result, src, dst) = convert_img(
3522            &mut cpu_converter,
3523            src,
3524            dst,
3525            Rotation::None,
3526            Flip::None,
3527            Crop::no_crop(),
3528        );
3529        result.unwrap();
3530
3531        let (result, _dst, dst_through_yuyv) = convert_img(
3532            &mut cpu_converter,
3533            dst,
3534            dst_through_yuyv,
3535            Rotation::None,
3536            Flip::None,
3537            Crop::no_crop(),
3538        );
3539        result.unwrap();
3540
3541        let (result, _src, dst_direct) = convert_img(
3542            &mut cpu_converter,
3543            src,
3544            dst_direct,
3545            Rotation::None,
3546            Flip::None,
3547            Crop::no_crop(),
3548        );
3549        result.unwrap();
3550
3551        compare_images(&dst_through_yuyv, &dst_direct, 0.98, function!());
3552    }
3553
3554    #[test]
3555    #[cfg(target_os = "linux")]
3556    #[cfg(feature = "opengl")]
3557    #[ignore = "opengl doesn't support rendering to PixelFormat::Yuyv texture"]
3558    fn test_rgba_to_yuyv_resize_opengl() {
3559        if !is_opengl_available() {
3560            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3561            return;
3562        }
3563
3564        if !is_dma_available() {
3565            eprintln!(
3566                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3567                function!()
3568            );
3569            return;
3570        }
3571
3572        let src = load_bytes_to_tensor(
3573            1280,
3574            720,
3575            PixelFormat::Rgba,
3576            None,
3577            include_bytes!(concat!(
3578                env!("CARGO_MANIFEST_DIR"),
3579                "/../../testdata/camera720p.rgba"
3580            )),
3581        )
3582        .unwrap();
3583
3584        let (dst_width, dst_height) = (640, 360);
3585
3586        let dst = TensorDyn::image(
3587            dst_width,
3588            dst_height,
3589            PixelFormat::Yuyv,
3590            DType::U8,
3591            Some(TensorMemory::Dma),
3592        )
3593        .unwrap();
3594
3595        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3596
3597        let (result, src, dst) = convert_img(
3598            &mut gl_converter,
3599            src,
3600            dst,
3601            Rotation::None,
3602            Flip::None,
3603            Crop::new()
3604                .with_dst_rect(Some(Rect::new(100, 100, 100, 100)))
3605                .with_dst_color(Some([255, 255, 255, 255])),
3606        );
3607        result.unwrap();
3608
3609        std::fs::write(
3610            "rgba_to_yuyv_opengl.yuyv",
3611            dst.as_u8().unwrap().map().unwrap().as_slice(),
3612        )
3613        .unwrap();
3614        let cpu_dst = TensorDyn::image(
3615            dst_width,
3616            dst_height,
3617            PixelFormat::Yuyv,
3618            DType::U8,
3619            Some(TensorMemory::Dma),
3620        )
3621        .unwrap();
3622        let (result, _src, cpu_dst) = convert_img(
3623            &mut CPUProcessor::new(),
3624            src,
3625            cpu_dst,
3626            Rotation::None,
3627            Flip::None,
3628            Crop::no_crop(),
3629        );
3630        result.unwrap();
3631
3632        compare_images_convert_to_rgb(&dst, &cpu_dst, 0.98, function!());
3633    }
3634
3635    #[test]
3636    #[cfg(target_os = "linux")]
3637    fn test_rgba_to_yuyv_resize_g2d() {
3638        if !is_g2d_available() {
3639            eprintln!(
3640                "SKIPPED: test_rgba_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3641            );
3642            return;
3643        }
3644        if !is_dma_available() {
3645            eprintln!(
3646                "SKIPPED: test_rgba_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3647            );
3648            return;
3649        }
3650
3651        let src = load_bytes_to_tensor(
3652            1280,
3653            720,
3654            PixelFormat::Rgba,
3655            Some(TensorMemory::Dma),
3656            include_bytes!(concat!(
3657                env!("CARGO_MANIFEST_DIR"),
3658                "/../../testdata/camera720p.rgba"
3659            )),
3660        )
3661        .unwrap();
3662
3663        let (dst_width, dst_height) = (1280, 720);
3664
3665        let cpu_dst = TensorDyn::image(
3666            dst_width,
3667            dst_height,
3668            PixelFormat::Yuyv,
3669            DType::U8,
3670            Some(TensorMemory::Dma),
3671        )
3672        .unwrap();
3673
3674        let g2d_dst = TensorDyn::image(
3675            dst_width,
3676            dst_height,
3677            PixelFormat::Yuyv,
3678            DType::U8,
3679            Some(TensorMemory::Dma),
3680        )
3681        .unwrap();
3682
3683        let mut g2d_converter = G2DProcessor::new().unwrap();
3684        let crop = Crop {
3685            src_rect: None,
3686            dst_rect: Some(Rect::new(100, 100, 2, 2)),
3687            dst_color: None,
3688        };
3689
3690        g2d_dst
3691            .as_u8()
3692            .unwrap()
3693            .map()
3694            .unwrap()
3695            .as_mut_slice()
3696            .fill(128);
3697        let (result, src, g2d_dst) = convert_img(
3698            &mut g2d_converter,
3699            src,
3700            g2d_dst,
3701            Rotation::None,
3702            Flip::None,
3703            crop,
3704        );
3705        result.unwrap();
3706
3707        let cpu_dst_img = cpu_dst;
3708        cpu_dst_img
3709            .as_u8()
3710            .unwrap()
3711            .map()
3712            .unwrap()
3713            .as_mut_slice()
3714            .fill(128);
3715        let (result, _src, cpu_dst) = convert_img(
3716            &mut CPUProcessor::new(),
3717            src,
3718            cpu_dst_img,
3719            Rotation::None,
3720            Flip::None,
3721            crop,
3722        );
3723        result.unwrap();
3724
3725        compare_images_convert_to_rgb(&cpu_dst, &g2d_dst, 0.98, function!());
3726    }
3727
3728    #[test]
3729    fn test_yuyv_to_rgba_cpu() {
3730        let file = include_bytes!(concat!(
3731            env!("CARGO_MANIFEST_DIR"),
3732            "/../../testdata/camera720p.yuyv"
3733        ))
3734        .to_vec();
3735        let src = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
3736        src.as_u8()
3737            .unwrap()
3738            .map()
3739            .unwrap()
3740            .as_mut_slice()
3741            .copy_from_slice(&file);
3742
3743        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3744        let mut cpu_converter = CPUProcessor::new();
3745
3746        let (result, _src, dst) = convert_img(
3747            &mut cpu_converter,
3748            src,
3749            dst,
3750            Rotation::None,
3751            Flip::None,
3752            Crop::no_crop(),
3753        );
3754        result.unwrap();
3755
3756        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3757        target_image
3758            .as_u8()
3759            .unwrap()
3760            .map()
3761            .unwrap()
3762            .as_mut_slice()
3763            .copy_from_slice(include_bytes!(concat!(
3764                env!("CARGO_MANIFEST_DIR"),
3765                "/../../testdata/camera720p.rgba"
3766            )));
3767
3768        compare_images(&dst, &target_image, 0.98, function!());
3769    }
3770
3771    #[test]
3772    fn test_yuyv_to_rgb_cpu() {
3773        let file = include_bytes!(concat!(
3774            env!("CARGO_MANIFEST_DIR"),
3775            "/../../testdata/camera720p.yuyv"
3776        ))
3777        .to_vec();
3778        let src = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
3779        src.as_u8()
3780            .unwrap()
3781            .map()
3782            .unwrap()
3783            .as_mut_slice()
3784            .copy_from_slice(&file);
3785
3786        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3787        let mut cpu_converter = CPUProcessor::new();
3788
3789        let (result, _src, dst) = convert_img(
3790            &mut cpu_converter,
3791            src,
3792            dst,
3793            Rotation::None,
3794            Flip::None,
3795            Crop::no_crop(),
3796        );
3797        result.unwrap();
3798
3799        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3800        target_image
3801            .as_u8()
3802            .unwrap()
3803            .map()
3804            .unwrap()
3805            .as_mut_slice()
3806            .as_chunks_mut::<3>()
3807            .0
3808            .iter_mut()
3809            .zip(
3810                include_bytes!(concat!(
3811                    env!("CARGO_MANIFEST_DIR"),
3812                    "/../../testdata/camera720p.rgba"
3813                ))
3814                .as_chunks::<4>()
3815                .0,
3816            )
3817            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
3818
3819        compare_images(&dst, &target_image, 0.98, function!());
3820    }
3821
3822    #[test]
3823    #[cfg(target_os = "linux")]
3824    fn test_yuyv_to_rgba_g2d() {
3825        if !is_g2d_available() {
3826            eprintln!("SKIPPED: test_yuyv_to_rgba_g2d - G2D library (libg2d.so.2) not available");
3827            return;
3828        }
3829        if !is_dma_available() {
3830            eprintln!(
3831                "SKIPPED: test_yuyv_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3832            );
3833            return;
3834        }
3835
3836        let src = load_bytes_to_tensor(
3837            1280,
3838            720,
3839            PixelFormat::Yuyv,
3840            None,
3841            include_bytes!(concat!(
3842                env!("CARGO_MANIFEST_DIR"),
3843                "/../../testdata/camera720p.yuyv"
3844            )),
3845        )
3846        .unwrap();
3847
3848        let dst = TensorDyn::image(
3849            1280,
3850            720,
3851            PixelFormat::Rgba,
3852            DType::U8,
3853            Some(TensorMemory::Dma),
3854        )
3855        .unwrap();
3856        let mut g2d_converter = G2DProcessor::new().unwrap();
3857
3858        let (result, _src, dst) = convert_img(
3859            &mut g2d_converter,
3860            src,
3861            dst,
3862            Rotation::None,
3863            Flip::None,
3864            Crop::no_crop(),
3865        );
3866        result.unwrap();
3867
3868        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3869        target_image
3870            .as_u8()
3871            .unwrap()
3872            .map()
3873            .unwrap()
3874            .as_mut_slice()
3875            .copy_from_slice(include_bytes!(concat!(
3876                env!("CARGO_MANIFEST_DIR"),
3877                "/../../testdata/camera720p.rgba"
3878            )));
3879
3880        compare_images(&dst, &target_image, 0.98, function!());
3881    }
3882
3883    #[test]
3884    #[cfg(target_os = "linux")]
3885    #[cfg(feature = "opengl")]
3886    fn test_yuyv_to_rgba_opengl() {
3887        if !is_opengl_available() {
3888            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3889            return;
3890        }
3891        if !is_dma_available() {
3892            eprintln!(
3893                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3894                function!()
3895            );
3896            return;
3897        }
3898
3899        let src = load_bytes_to_tensor(
3900            1280,
3901            720,
3902            PixelFormat::Yuyv,
3903            Some(TensorMemory::Dma),
3904            include_bytes!(concat!(
3905                env!("CARGO_MANIFEST_DIR"),
3906                "/../../testdata/camera720p.yuyv"
3907            )),
3908        )
3909        .unwrap();
3910
3911        let dst = TensorDyn::image(
3912            1280,
3913            720,
3914            PixelFormat::Rgba,
3915            DType::U8,
3916            Some(TensorMemory::Dma),
3917        )
3918        .unwrap();
3919        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3920
3921        let (result, _src, dst) = convert_img(
3922            &mut gl_converter,
3923            src,
3924            dst,
3925            Rotation::None,
3926            Flip::None,
3927            Crop::no_crop(),
3928        );
3929        result.unwrap();
3930
3931        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3932        target_image
3933            .as_u8()
3934            .unwrap()
3935            .map()
3936            .unwrap()
3937            .as_mut_slice()
3938            .copy_from_slice(include_bytes!(concat!(
3939                env!("CARGO_MANIFEST_DIR"),
3940                "/../../testdata/camera720p.rgba"
3941            )));
3942
3943        compare_images(&dst, &target_image, 0.98, function!());
3944    }
3945
3946    #[test]
3947    #[cfg(target_os = "linux")]
3948    fn test_yuyv_to_rgb_g2d() {
3949        if !is_g2d_available() {
3950            eprintln!("SKIPPED: test_yuyv_to_rgb_g2d - G2D library (libg2d.so.2) not available");
3951            return;
3952        }
3953        if !is_dma_available() {
3954            eprintln!(
3955                "SKIPPED: test_yuyv_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3956            );
3957            return;
3958        }
3959
3960        let src = load_bytes_to_tensor(
3961            1280,
3962            720,
3963            PixelFormat::Yuyv,
3964            None,
3965            include_bytes!(concat!(
3966                env!("CARGO_MANIFEST_DIR"),
3967                "/../../testdata/camera720p.yuyv"
3968            )),
3969        )
3970        .unwrap();
3971
3972        let g2d_dst = TensorDyn::image(
3973            1280,
3974            720,
3975            PixelFormat::Rgb,
3976            DType::U8,
3977            Some(TensorMemory::Dma),
3978        )
3979        .unwrap();
3980        let mut g2d_converter = G2DProcessor::new().unwrap();
3981
3982        let (result, src, g2d_dst) = convert_img(
3983            &mut g2d_converter,
3984            src,
3985            g2d_dst,
3986            Rotation::None,
3987            Flip::None,
3988            Crop::no_crop(),
3989        );
3990        result.unwrap();
3991
3992        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3993        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3994
3995        let (result, _src, cpu_dst) = convert_img(
3996            &mut cpu_converter,
3997            src,
3998            cpu_dst,
3999            Rotation::None,
4000            Flip::None,
4001            Crop::no_crop(),
4002        );
4003        result.unwrap();
4004
4005        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
4006    }
4007
4008    #[test]
4009    #[cfg(target_os = "linux")]
4010    fn test_yuyv_to_yuyv_resize_g2d() {
4011        if !is_g2d_available() {
4012            eprintln!(
4013                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
4014            );
4015            return;
4016        }
4017        if !is_dma_available() {
4018            eprintln!(
4019                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4020            );
4021            return;
4022        }
4023
4024        let src = load_bytes_to_tensor(
4025            1280,
4026            720,
4027            PixelFormat::Yuyv,
4028            None,
4029            include_bytes!(concat!(
4030                env!("CARGO_MANIFEST_DIR"),
4031                "/../../testdata/camera720p.yuyv"
4032            )),
4033        )
4034        .unwrap();
4035
4036        let g2d_dst = TensorDyn::image(
4037            600,
4038            400,
4039            PixelFormat::Yuyv,
4040            DType::U8,
4041            Some(TensorMemory::Dma),
4042        )
4043        .unwrap();
4044        let mut g2d_converter = G2DProcessor::new().unwrap();
4045
4046        let (result, src, g2d_dst) = convert_img(
4047            &mut g2d_converter,
4048            src,
4049            g2d_dst,
4050            Rotation::None,
4051            Flip::None,
4052            Crop::no_crop(),
4053        );
4054        result.unwrap();
4055
4056        let cpu_dst = TensorDyn::image(600, 400, PixelFormat::Yuyv, DType::U8, None).unwrap();
4057        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
4058
4059        let (result, _src, cpu_dst) = convert_img(
4060            &mut cpu_converter,
4061            src,
4062            cpu_dst,
4063            Rotation::None,
4064            Flip::None,
4065            Crop::no_crop(),
4066        );
4067        result.unwrap();
4068
4069        // TODO: compare PixelFormat::Yuyv and PixelFormat::Yuyv images without having to convert them to PixelFormat::Rgb
4070        compare_images_convert_to_rgb(&g2d_dst, &cpu_dst, 0.98, function!());
4071    }
4072
4073    #[test]
4074    fn test_yuyv_to_rgba_resize_cpu() {
4075        let src = load_bytes_to_tensor(
4076            1280,
4077            720,
4078            PixelFormat::Yuyv,
4079            None,
4080            include_bytes!(concat!(
4081                env!("CARGO_MANIFEST_DIR"),
4082                "/../../testdata/camera720p.yuyv"
4083            )),
4084        )
4085        .unwrap();
4086
4087        let (dst_width, dst_height) = (960, 540);
4088
4089        let dst =
4090            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
4091        let mut cpu_converter = CPUProcessor::new();
4092
4093        let (result, _src, dst) = convert_img(
4094            &mut cpu_converter,
4095            src,
4096            dst,
4097            Rotation::None,
4098            Flip::None,
4099            Crop::no_crop(),
4100        );
4101        result.unwrap();
4102
4103        let dst_target =
4104            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
4105        let src_target = load_bytes_to_tensor(
4106            1280,
4107            720,
4108            PixelFormat::Rgba,
4109            None,
4110            include_bytes!(concat!(
4111                env!("CARGO_MANIFEST_DIR"),
4112                "/../../testdata/camera720p.rgba"
4113            )),
4114        )
4115        .unwrap();
4116        let (result, _src_target, dst_target) = convert_img(
4117            &mut cpu_converter,
4118            src_target,
4119            dst_target,
4120            Rotation::None,
4121            Flip::None,
4122            Crop::no_crop(),
4123        );
4124        result.unwrap();
4125
4126        compare_images(&dst, &dst_target, 0.98, function!());
4127    }
4128
4129    #[test]
4130    #[cfg(target_os = "linux")]
4131    fn test_yuyv_to_rgba_crop_flip_g2d() {
4132        if !is_g2d_available() {
4133            eprintln!(
4134                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - G2D library (libg2d.so.2) not available"
4135            );
4136            return;
4137        }
4138        if !is_dma_available() {
4139            eprintln!(
4140                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4141            );
4142            return;
4143        }
4144
4145        let src = load_bytes_to_tensor(
4146            1280,
4147            720,
4148            PixelFormat::Yuyv,
4149            Some(TensorMemory::Dma),
4150            include_bytes!(concat!(
4151                env!("CARGO_MANIFEST_DIR"),
4152                "/../../testdata/camera720p.yuyv"
4153            )),
4154        )
4155        .unwrap();
4156
4157        let (dst_width, dst_height) = (640, 640);
4158
4159        let dst_g2d = TensorDyn::image(
4160            dst_width,
4161            dst_height,
4162            PixelFormat::Rgba,
4163            DType::U8,
4164            Some(TensorMemory::Dma),
4165        )
4166        .unwrap();
4167        let mut g2d_converter = G2DProcessor::new().unwrap();
4168        let crop = Crop {
4169            src_rect: Some(Rect {
4170                left: 20,
4171                top: 15,
4172                width: 400,
4173                height: 300,
4174            }),
4175            dst_rect: None,
4176            dst_color: None,
4177        };
4178
4179        let (result, src, dst_g2d) = convert_img(
4180            &mut g2d_converter,
4181            src,
4182            dst_g2d,
4183            Rotation::None,
4184            Flip::Horizontal,
4185            crop,
4186        );
4187        result.unwrap();
4188
4189        let dst_cpu = TensorDyn::image(
4190            dst_width,
4191            dst_height,
4192            PixelFormat::Rgba,
4193            DType::U8,
4194            Some(TensorMemory::Dma),
4195        )
4196        .unwrap();
4197        let mut cpu_converter = CPUProcessor::new();
4198
4199        let (result, _src, dst_cpu) = convert_img(
4200            &mut cpu_converter,
4201            src,
4202            dst_cpu,
4203            Rotation::None,
4204            Flip::Horizontal,
4205            crop,
4206        );
4207        result.unwrap();
4208        compare_images(&dst_g2d, &dst_cpu, 0.98, function!());
4209    }
4210
4211    #[test]
4212    #[cfg(target_os = "linux")]
4213    #[cfg(feature = "opengl")]
4214    fn test_yuyv_to_rgba_crop_flip_opengl() {
4215        if !is_opengl_available() {
4216            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4217            return;
4218        }
4219
4220        if !is_dma_available() {
4221            eprintln!(
4222                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4223                function!()
4224            );
4225            return;
4226        }
4227
4228        let src = load_bytes_to_tensor(
4229            1280,
4230            720,
4231            PixelFormat::Yuyv,
4232            Some(TensorMemory::Dma),
4233            include_bytes!(concat!(
4234                env!("CARGO_MANIFEST_DIR"),
4235                "/../../testdata/camera720p.yuyv"
4236            )),
4237        )
4238        .unwrap();
4239
4240        let (dst_width, dst_height) = (640, 640);
4241
4242        let dst_gl = TensorDyn::image(
4243            dst_width,
4244            dst_height,
4245            PixelFormat::Rgba,
4246            DType::U8,
4247            Some(TensorMemory::Dma),
4248        )
4249        .unwrap();
4250        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4251        let crop = Crop {
4252            src_rect: Some(Rect {
4253                left: 20,
4254                top: 15,
4255                width: 400,
4256                height: 300,
4257            }),
4258            dst_rect: None,
4259            dst_color: None,
4260        };
4261
4262        let (result, src, dst_gl) = convert_img(
4263            &mut gl_converter,
4264            src,
4265            dst_gl,
4266            Rotation::None,
4267            Flip::Horizontal,
4268            crop,
4269        );
4270        result.unwrap();
4271
4272        let dst_cpu = TensorDyn::image(
4273            dst_width,
4274            dst_height,
4275            PixelFormat::Rgba,
4276            DType::U8,
4277            Some(TensorMemory::Dma),
4278        )
4279        .unwrap();
4280        let mut cpu_converter = CPUProcessor::new();
4281
4282        let (result, _src, dst_cpu) = convert_img(
4283            &mut cpu_converter,
4284            src,
4285            dst_cpu,
4286            Rotation::None,
4287            Flip::Horizontal,
4288            crop,
4289        );
4290        result.unwrap();
4291        compare_images(&dst_gl, &dst_cpu, 0.98, function!());
4292    }
4293
4294    #[test]
4295    fn test_vyuy_to_rgba_cpu() {
4296        let file = include_bytes!(concat!(
4297            env!("CARGO_MANIFEST_DIR"),
4298            "/../../testdata/camera720p.vyuy"
4299        ))
4300        .to_vec();
4301        let src = TensorDyn::image(1280, 720, PixelFormat::Vyuy, DType::U8, None).unwrap();
4302        src.as_u8()
4303            .unwrap()
4304            .map()
4305            .unwrap()
4306            .as_mut_slice()
4307            .copy_from_slice(&file);
4308
4309        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4310        let mut cpu_converter = CPUProcessor::new();
4311
4312        let (result, _src, dst) = convert_img(
4313            &mut cpu_converter,
4314            src,
4315            dst,
4316            Rotation::None,
4317            Flip::None,
4318            Crop::no_crop(),
4319        );
4320        result.unwrap();
4321
4322        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4323        target_image
4324            .as_u8()
4325            .unwrap()
4326            .map()
4327            .unwrap()
4328            .as_mut_slice()
4329            .copy_from_slice(include_bytes!(concat!(
4330                env!("CARGO_MANIFEST_DIR"),
4331                "/../../testdata/camera720p.rgba"
4332            )));
4333
4334        compare_images(&dst, &target_image, 0.98, function!());
4335    }
4336
4337    #[test]
4338    fn test_vyuy_to_rgb_cpu() {
4339        let file = include_bytes!(concat!(
4340            env!("CARGO_MANIFEST_DIR"),
4341            "/../../testdata/camera720p.vyuy"
4342        ))
4343        .to_vec();
4344        let src = TensorDyn::image(1280, 720, PixelFormat::Vyuy, DType::U8, None).unwrap();
4345        src.as_u8()
4346            .unwrap()
4347            .map()
4348            .unwrap()
4349            .as_mut_slice()
4350            .copy_from_slice(&file);
4351
4352        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4353        let mut cpu_converter = CPUProcessor::new();
4354
4355        let (result, _src, dst) = convert_img(
4356            &mut cpu_converter,
4357            src,
4358            dst,
4359            Rotation::None,
4360            Flip::None,
4361            Crop::no_crop(),
4362        );
4363        result.unwrap();
4364
4365        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4366        target_image
4367            .as_u8()
4368            .unwrap()
4369            .map()
4370            .unwrap()
4371            .as_mut_slice()
4372            .as_chunks_mut::<3>()
4373            .0
4374            .iter_mut()
4375            .zip(
4376                include_bytes!(concat!(
4377                    env!("CARGO_MANIFEST_DIR"),
4378                    "/../../testdata/camera720p.rgba"
4379                ))
4380                .as_chunks::<4>()
4381                .0,
4382            )
4383            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
4384
4385        compare_images(&dst, &target_image, 0.98, function!());
4386    }
4387
4388    #[test]
4389    #[cfg(target_os = "linux")]
4390    #[ignore = "G2D does not support VYUY; re-enable when hardware support is added"]
4391    fn test_vyuy_to_rgba_g2d() {
4392        if !is_g2d_available() {
4393            eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D library (libg2d.so.2) not available");
4394            return;
4395        }
4396        if !is_dma_available() {
4397            eprintln!(
4398                "SKIPPED: test_vyuy_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4399            );
4400            return;
4401        }
4402
4403        let src = load_bytes_to_tensor(
4404            1280,
4405            720,
4406            PixelFormat::Vyuy,
4407            None,
4408            include_bytes!(concat!(
4409                env!("CARGO_MANIFEST_DIR"),
4410                "/../../testdata/camera720p.vyuy"
4411            )),
4412        )
4413        .unwrap();
4414
4415        let dst = TensorDyn::image(
4416            1280,
4417            720,
4418            PixelFormat::Rgba,
4419            DType::U8,
4420            Some(TensorMemory::Dma),
4421        )
4422        .unwrap();
4423        let mut g2d_converter = G2DProcessor::new().unwrap();
4424
4425        let (result, _src, dst) = convert_img(
4426            &mut g2d_converter,
4427            src,
4428            dst,
4429            Rotation::None,
4430            Flip::None,
4431            Crop::no_crop(),
4432        );
4433        match result {
4434            Err(Error::G2D(_)) => {
4435                eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D does not support PixelFormat::Vyuy format");
4436                return;
4437            }
4438            r => r.unwrap(),
4439        }
4440
4441        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4442        target_image
4443            .as_u8()
4444            .unwrap()
4445            .map()
4446            .unwrap()
4447            .as_mut_slice()
4448            .copy_from_slice(include_bytes!(concat!(
4449                env!("CARGO_MANIFEST_DIR"),
4450                "/../../testdata/camera720p.rgba"
4451            )));
4452
4453        compare_images(&dst, &target_image, 0.98, function!());
4454    }
4455
4456    #[test]
4457    #[cfg(target_os = "linux")]
4458    #[ignore = "G2D does not support VYUY; re-enable when hardware support is added"]
4459    fn test_vyuy_to_rgb_g2d() {
4460        if !is_g2d_available() {
4461            eprintln!("SKIPPED: test_vyuy_to_rgb_g2d - G2D library (libg2d.so.2) not available");
4462            return;
4463        }
4464        if !is_dma_available() {
4465            eprintln!(
4466                "SKIPPED: test_vyuy_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4467            );
4468            return;
4469        }
4470
4471        let src = load_bytes_to_tensor(
4472            1280,
4473            720,
4474            PixelFormat::Vyuy,
4475            None,
4476            include_bytes!(concat!(
4477                env!("CARGO_MANIFEST_DIR"),
4478                "/../../testdata/camera720p.vyuy"
4479            )),
4480        )
4481        .unwrap();
4482
4483        let g2d_dst = TensorDyn::image(
4484            1280,
4485            720,
4486            PixelFormat::Rgb,
4487            DType::U8,
4488            Some(TensorMemory::Dma),
4489        )
4490        .unwrap();
4491        let mut g2d_converter = G2DProcessor::new().unwrap();
4492
4493        let (result, src, g2d_dst) = convert_img(
4494            &mut g2d_converter,
4495            src,
4496            g2d_dst,
4497            Rotation::None,
4498            Flip::None,
4499            Crop::no_crop(),
4500        );
4501        match result {
4502            Err(Error::G2D(_)) => {
4503                eprintln!(
4504                    "SKIPPED: test_vyuy_to_rgb_g2d - G2D does not support PixelFormat::Vyuy format"
4505                );
4506                return;
4507            }
4508            r => r.unwrap(),
4509        }
4510
4511        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4512        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
4513
4514        let (result, _src, cpu_dst) = convert_img(
4515            &mut cpu_converter,
4516            src,
4517            cpu_dst,
4518            Rotation::None,
4519            Flip::None,
4520            Crop::no_crop(),
4521        );
4522        result.unwrap();
4523
4524        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
4525    }
4526
4527    #[test]
4528    #[cfg(target_os = "linux")]
4529    #[cfg(feature = "opengl")]
4530    fn test_vyuy_to_rgba_opengl() {
4531        if !is_opengl_available() {
4532            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4533            return;
4534        }
4535        if !is_dma_available() {
4536            eprintln!(
4537                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4538                function!()
4539            );
4540            return;
4541        }
4542
4543        let src = load_bytes_to_tensor(
4544            1280,
4545            720,
4546            PixelFormat::Vyuy,
4547            Some(TensorMemory::Dma),
4548            include_bytes!(concat!(
4549                env!("CARGO_MANIFEST_DIR"),
4550                "/../../testdata/camera720p.vyuy"
4551            )),
4552        )
4553        .unwrap();
4554
4555        let dst = TensorDyn::image(
4556            1280,
4557            720,
4558            PixelFormat::Rgba,
4559            DType::U8,
4560            Some(TensorMemory::Dma),
4561        )
4562        .unwrap();
4563        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4564
4565        let (result, _src, dst) = convert_img(
4566            &mut gl_converter,
4567            src,
4568            dst,
4569            Rotation::None,
4570            Flip::None,
4571            Crop::no_crop(),
4572        );
4573        match result {
4574            Err(Error::NotSupported(_)) => {
4575                eprintln!(
4576                    "SKIPPED: {} - OpenGL does not support PixelFormat::Vyuy DMA format",
4577                    function!()
4578                );
4579                return;
4580            }
4581            r => r.unwrap(),
4582        }
4583
4584        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4585        target_image
4586            .as_u8()
4587            .unwrap()
4588            .map()
4589            .unwrap()
4590            .as_mut_slice()
4591            .copy_from_slice(include_bytes!(concat!(
4592                env!("CARGO_MANIFEST_DIR"),
4593                "/../../testdata/camera720p.rgba"
4594            )));
4595
4596        compare_images(&dst, &target_image, 0.98, function!());
4597    }
4598
4599    #[test]
4600    fn test_nv12_to_rgba_cpu() {
4601        let file = include_bytes!(concat!(
4602            env!("CARGO_MANIFEST_DIR"),
4603            "/../../testdata/zidane.nv12"
4604        ))
4605        .to_vec();
4606        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4607        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4608            .copy_from_slice(&file);
4609
4610        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4611        let mut cpu_converter = CPUProcessor::new();
4612
4613        let (result, _src, dst) = convert_img(
4614            &mut cpu_converter,
4615            src,
4616            dst,
4617            Rotation::None,
4618            Flip::None,
4619            Crop::no_crop(),
4620        );
4621        result.unwrap();
4622
4623        let target_image = crate::load_image(
4624            include_bytes!(concat!(
4625                env!("CARGO_MANIFEST_DIR"),
4626                "/../../testdata/zidane.jpg"
4627            )),
4628            Some(PixelFormat::Rgba),
4629            None,
4630        )
4631        .unwrap();
4632
4633        compare_images(&dst, &target_image, 0.98, function!());
4634    }
4635
4636    #[test]
4637    fn test_nv12_to_rgb_cpu() {
4638        let file = include_bytes!(concat!(
4639            env!("CARGO_MANIFEST_DIR"),
4640            "/../../testdata/zidane.nv12"
4641        ))
4642        .to_vec();
4643        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4644        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4645            .copy_from_slice(&file);
4646
4647        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4648        let mut cpu_converter = CPUProcessor::new();
4649
4650        let (result, _src, dst) = convert_img(
4651            &mut cpu_converter,
4652            src,
4653            dst,
4654            Rotation::None,
4655            Flip::None,
4656            Crop::no_crop(),
4657        );
4658        result.unwrap();
4659
4660        let target_image = crate::load_image(
4661            include_bytes!(concat!(
4662                env!("CARGO_MANIFEST_DIR"),
4663                "/../../testdata/zidane.jpg"
4664            )),
4665            Some(PixelFormat::Rgb),
4666            None,
4667        )
4668        .unwrap();
4669
4670        compare_images(&dst, &target_image, 0.98, function!());
4671    }
4672
4673    #[test]
4674    fn test_nv12_to_grey_cpu() {
4675        let file = include_bytes!(concat!(
4676            env!("CARGO_MANIFEST_DIR"),
4677            "/../../testdata/zidane.nv12"
4678        ))
4679        .to_vec();
4680        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4681        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4682            .copy_from_slice(&file);
4683
4684        let dst = TensorDyn::image(1280, 720, PixelFormat::Grey, DType::U8, None).unwrap();
4685        let mut cpu_converter = CPUProcessor::new();
4686
4687        let (result, _src, dst) = convert_img(
4688            &mut cpu_converter,
4689            src,
4690            dst,
4691            Rotation::None,
4692            Flip::None,
4693            Crop::no_crop(),
4694        );
4695        result.unwrap();
4696
4697        let target_image = crate::load_image(
4698            include_bytes!(concat!(
4699                env!("CARGO_MANIFEST_DIR"),
4700                "/../../testdata/zidane.jpg"
4701            )),
4702            Some(PixelFormat::Grey),
4703            None,
4704        )
4705        .unwrap();
4706
4707        compare_images(&dst, &target_image, 0.98, function!());
4708    }
4709
4710    #[test]
4711    fn test_nv12_to_yuyv_cpu() {
4712        let file = include_bytes!(concat!(
4713            env!("CARGO_MANIFEST_DIR"),
4714            "/../../testdata/zidane.nv12"
4715        ))
4716        .to_vec();
4717        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4718        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4719            .copy_from_slice(&file);
4720
4721        let dst = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
4722        let mut cpu_converter = CPUProcessor::new();
4723
4724        let (result, _src, dst) = convert_img(
4725            &mut cpu_converter,
4726            src,
4727            dst,
4728            Rotation::None,
4729            Flip::None,
4730            Crop::no_crop(),
4731        );
4732        result.unwrap();
4733
4734        let target_image = crate::load_image(
4735            include_bytes!(concat!(
4736                env!("CARGO_MANIFEST_DIR"),
4737                "/../../testdata/zidane.jpg"
4738            )),
4739            Some(PixelFormat::Rgb),
4740            None,
4741        )
4742        .unwrap();
4743
4744        compare_images_convert_to_rgb(&dst, &target_image, 0.98, function!());
4745    }
4746
4747    #[test]
4748    fn test_cpu_resize_planar_rgb() {
4749        let src = TensorDyn::image(4, 4, PixelFormat::Rgba, DType::U8, None).unwrap();
4750        #[rustfmt::skip]
4751        let src_image = [
4752                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4753                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4754                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4755                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4756        ];
4757        src.as_u8()
4758            .unwrap()
4759            .map()
4760            .unwrap()
4761            .as_mut_slice()
4762            .copy_from_slice(&src_image);
4763
4764        let cpu_dst = TensorDyn::image(5, 5, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
4765        let mut cpu_converter = CPUProcessor::new();
4766
4767        let (result, _src, cpu_dst) = convert_img(
4768            &mut cpu_converter,
4769            src,
4770            cpu_dst,
4771            Rotation::None,
4772            Flip::None,
4773            Crop::new()
4774                .with_dst_rect(Some(Rect {
4775                    left: 1,
4776                    top: 1,
4777                    width: 4,
4778                    height: 4,
4779                }))
4780                .with_dst_color(Some([114, 114, 114, 255])),
4781        );
4782        result.unwrap();
4783
4784        #[rustfmt::skip]
4785        let expected_dst = [
4786            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,    114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4787            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,    114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4788            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,      114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4789        ];
4790
4791        assert_eq!(
4792            cpu_dst.as_u8().unwrap().map().unwrap().as_slice(),
4793            &expected_dst
4794        );
4795    }
4796
4797    #[test]
4798    fn test_cpu_resize_planar_rgba() {
4799        let src = TensorDyn::image(4, 4, PixelFormat::Rgba, DType::U8, None).unwrap();
4800        #[rustfmt::skip]
4801        let src_image = [
4802                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4803                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4804                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4805                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4806        ];
4807        src.as_u8()
4808            .unwrap()
4809            .map()
4810            .unwrap()
4811            .as_mut_slice()
4812            .copy_from_slice(&src_image);
4813
4814        let cpu_dst = TensorDyn::image(5, 5, PixelFormat::PlanarRgba, DType::U8, None).unwrap();
4815        let mut cpu_converter = CPUProcessor::new();
4816
4817        let (result, _src, cpu_dst) = convert_img(
4818            &mut cpu_converter,
4819            src,
4820            cpu_dst,
4821            Rotation::None,
4822            Flip::None,
4823            Crop::new()
4824                .with_dst_rect(Some(Rect {
4825                    left: 1,
4826                    top: 1,
4827                    width: 4,
4828                    height: 4,
4829                }))
4830                .with_dst_color(Some([114, 114, 114, 255])),
4831        );
4832        result.unwrap();
4833
4834        #[rustfmt::skip]
4835        let expected_dst = [
4836            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,        114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4837            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,        114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4838            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,          114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4839            255, 255, 255, 255, 255,    255, 255, 255, 255, 255,    255, 0, 255, 0, 255,        255, 0, 255, 0, 255,      255, 0, 255, 0, 255,
4840        ];
4841
4842        assert_eq!(
4843            cpu_dst.as_u8().unwrap().map().unwrap().as_slice(),
4844            &expected_dst
4845        );
4846    }
4847
4848    #[test]
4849    #[cfg(target_os = "linux")]
4850    #[cfg(feature = "opengl")]
4851    fn test_opengl_resize_planar_rgb() {
4852        if !is_opengl_available() {
4853            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4854            return;
4855        }
4856
4857        if !is_dma_available() {
4858            eprintln!(
4859                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4860                function!()
4861            );
4862            return;
4863        }
4864
4865        let dst_width = 640;
4866        let dst_height = 640;
4867        let file = include_bytes!(concat!(
4868            env!("CARGO_MANIFEST_DIR"),
4869            "/../../testdata/test_image.jpg"
4870        ))
4871        .to_vec();
4872        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
4873
4874        let cpu_dst = TensorDyn::image(
4875            dst_width,
4876            dst_height,
4877            PixelFormat::PlanarRgb,
4878            DType::U8,
4879            None,
4880        )
4881        .unwrap();
4882        let mut cpu_converter = CPUProcessor::new();
4883        let (result, src, cpu_dst) = convert_img(
4884            &mut cpu_converter,
4885            src,
4886            cpu_dst,
4887            Rotation::None,
4888            Flip::None,
4889            Crop::no_crop(),
4890        );
4891        result.unwrap();
4892        let crop_letterbox = Crop::new()
4893            .with_dst_rect(Some(Rect {
4894                left: 102,
4895                top: 102,
4896                width: 440,
4897                height: 440,
4898            }))
4899            .with_dst_color(Some([114, 114, 114, 114]));
4900        let (result, src, cpu_dst) = convert_img(
4901            &mut cpu_converter,
4902            src,
4903            cpu_dst,
4904            Rotation::None,
4905            Flip::None,
4906            crop_letterbox,
4907        );
4908        result.unwrap();
4909
4910        let gl_dst = TensorDyn::image(
4911            dst_width,
4912            dst_height,
4913            PixelFormat::PlanarRgb,
4914            DType::U8,
4915            None,
4916        )
4917        .unwrap();
4918        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4919
4920        let (result, _src, gl_dst) = convert_img(
4921            &mut gl_converter,
4922            src,
4923            gl_dst,
4924            Rotation::None,
4925            Flip::None,
4926            crop_letterbox,
4927        );
4928        result.unwrap();
4929        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
4930    }
4931
4932    #[test]
4933    fn test_cpu_resize_nv16() {
4934        let file = include_bytes!(concat!(
4935            env!("CARGO_MANIFEST_DIR"),
4936            "/../../testdata/zidane.jpg"
4937        ))
4938        .to_vec();
4939        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
4940
4941        let cpu_nv16_dst = TensorDyn::image(640, 640, PixelFormat::Nv16, DType::U8, None).unwrap();
4942        let cpu_rgb_dst = TensorDyn::image(640, 640, PixelFormat::Rgb, DType::U8, None).unwrap();
4943        let mut cpu_converter = CPUProcessor::new();
4944        let crop = Crop::new()
4945            .with_dst_rect(Some(Rect {
4946                left: 20,
4947                top: 140,
4948                width: 600,
4949                height: 360,
4950            }))
4951            .with_dst_color(Some([255, 128, 0, 255]));
4952
4953        let (result, src, cpu_nv16_dst) = convert_img(
4954            &mut cpu_converter,
4955            src,
4956            cpu_nv16_dst,
4957            Rotation::None,
4958            Flip::None,
4959            crop,
4960        );
4961        result.unwrap();
4962
4963        let (result, _src, cpu_rgb_dst) = convert_img(
4964            &mut cpu_converter,
4965            src,
4966            cpu_rgb_dst,
4967            Rotation::None,
4968            Flip::None,
4969            crop,
4970        );
4971        result.unwrap();
4972        compare_images_convert_to_rgb(&cpu_nv16_dst, &cpu_rgb_dst, 0.99, function!());
4973    }
4974
4975    fn load_bytes_to_tensor(
4976        width: usize,
4977        height: usize,
4978        format: PixelFormat,
4979        memory: Option<TensorMemory>,
4980        bytes: &[u8],
4981    ) -> Result<TensorDyn, Error> {
4982        let src = TensorDyn::image(width, height, format, DType::U8, memory)?;
4983        src.as_u8()
4984            .unwrap()
4985            .map()?
4986            .as_mut_slice()
4987            .copy_from_slice(bytes);
4988        Ok(src)
4989    }
4990
4991    fn compare_images(img1: &TensorDyn, img2: &TensorDyn, threshold: f64, name: &str) {
4992        assert_eq!(img1.height(), img2.height(), "Heights differ");
4993        assert_eq!(img1.width(), img2.width(), "Widths differ");
4994        assert_eq!(
4995            img1.format().unwrap(),
4996            img2.format().unwrap(),
4997            "PixelFormat differ"
4998        );
4999        assert!(
5000            matches!(
5001                img1.format().unwrap(),
5002                PixelFormat::Rgb | PixelFormat::Rgba | PixelFormat::Grey | PixelFormat::PlanarRgb
5003            ),
5004            "format must be Rgb or Rgba for comparison"
5005        );
5006
5007        let image1 = match img1.format().unwrap() {
5008            PixelFormat::Rgb => image::RgbImage::from_vec(
5009                img1.width().unwrap() as u32,
5010                img1.height().unwrap() as u32,
5011                img1.as_u8().unwrap().map().unwrap().to_vec(),
5012            )
5013            .unwrap(),
5014            PixelFormat::Rgba => image::RgbaImage::from_vec(
5015                img1.width().unwrap() as u32,
5016                img1.height().unwrap() as u32,
5017                img1.as_u8().unwrap().map().unwrap().to_vec(),
5018            )
5019            .unwrap()
5020            .convert(),
5021            PixelFormat::Grey => image::GrayImage::from_vec(
5022                img1.width().unwrap() as u32,
5023                img1.height().unwrap() as u32,
5024                img1.as_u8().unwrap().map().unwrap().to_vec(),
5025            )
5026            .unwrap()
5027            .convert(),
5028            PixelFormat::PlanarRgb => image::GrayImage::from_vec(
5029                img1.width().unwrap() as u32,
5030                (img1.height().unwrap() * 3) as u32,
5031                img1.as_u8().unwrap().map().unwrap().to_vec(),
5032            )
5033            .unwrap()
5034            .convert(),
5035            _ => return,
5036        };
5037
5038        let image2 = match img2.format().unwrap() {
5039            PixelFormat::Rgb => image::RgbImage::from_vec(
5040                img2.width().unwrap() as u32,
5041                img2.height().unwrap() as u32,
5042                img2.as_u8().unwrap().map().unwrap().to_vec(),
5043            )
5044            .unwrap(),
5045            PixelFormat::Rgba => image::RgbaImage::from_vec(
5046                img2.width().unwrap() as u32,
5047                img2.height().unwrap() as u32,
5048                img2.as_u8().unwrap().map().unwrap().to_vec(),
5049            )
5050            .unwrap()
5051            .convert(),
5052            PixelFormat::Grey => image::GrayImage::from_vec(
5053                img2.width().unwrap() as u32,
5054                img2.height().unwrap() as u32,
5055                img2.as_u8().unwrap().map().unwrap().to_vec(),
5056            )
5057            .unwrap()
5058            .convert(),
5059            PixelFormat::PlanarRgb => image::GrayImage::from_vec(
5060                img2.width().unwrap() as u32,
5061                (img2.height().unwrap() * 3) as u32,
5062                img2.as_u8().unwrap().map().unwrap().to_vec(),
5063            )
5064            .unwrap()
5065            .convert(),
5066            _ => return,
5067        };
5068
5069        let similarity = image_compare::rgb_similarity_structure(
5070            &image_compare::Algorithm::RootMeanSquared,
5071            &image1,
5072            &image2,
5073        )
5074        .expect("Image Comparison failed");
5075        if similarity.score < threshold {
5076            // image1.save(format!("{name}_1.png"));
5077            // image2.save(format!("{name}_2.png"));
5078            similarity
5079                .image
5080                .to_color_map()
5081                .save(format!("{name}.png"))
5082                .unwrap();
5083            panic!(
5084                "{name}: converted image and target image have similarity score too low: {} < {}",
5085                similarity.score, threshold
5086            )
5087        }
5088    }
5089
5090    fn compare_images_convert_to_rgb(
5091        img1: &TensorDyn,
5092        img2: &TensorDyn,
5093        threshold: f64,
5094        name: &str,
5095    ) {
5096        assert_eq!(img1.height(), img2.height(), "Heights differ");
5097        assert_eq!(img1.width(), img2.width(), "Widths differ");
5098
5099        let mut img_rgb1 = TensorDyn::image(
5100            img1.width().unwrap(),
5101            img1.height().unwrap(),
5102            PixelFormat::Rgb,
5103            DType::U8,
5104            Some(TensorMemory::Mem),
5105        )
5106        .unwrap();
5107        let mut img_rgb2 = TensorDyn::image(
5108            img1.width().unwrap(),
5109            img1.height().unwrap(),
5110            PixelFormat::Rgb,
5111            DType::U8,
5112            Some(TensorMemory::Mem),
5113        )
5114        .unwrap();
5115        let mut __cv = CPUProcessor::default();
5116        let r1 = __cv.convert(
5117            img1,
5118            &mut img_rgb1,
5119            crate::Rotation::None,
5120            crate::Flip::None,
5121            crate::Crop::default(),
5122        );
5123        let r2 = __cv.convert(
5124            img2,
5125            &mut img_rgb2,
5126            crate::Rotation::None,
5127            crate::Flip::None,
5128            crate::Crop::default(),
5129        );
5130        if r1.is_err() || r2.is_err() {
5131            // Fallback: compare raw bytes as greyscale strip
5132            let w = img1.width().unwrap() as u32;
5133            let data1 = img1.as_u8().unwrap().map().unwrap().to_vec();
5134            let data2 = img2.as_u8().unwrap().map().unwrap().to_vec();
5135            let h1 = (data1.len() as u32) / w;
5136            let h2 = (data2.len() as u32) / w;
5137            let g1 = image::GrayImage::from_vec(w, h1, data1).unwrap();
5138            let g2 = image::GrayImage::from_vec(w, h2, data2).unwrap();
5139            let similarity = image_compare::gray_similarity_structure(
5140                &image_compare::Algorithm::RootMeanSquared,
5141                &g1,
5142                &g2,
5143            )
5144            .expect("Image Comparison failed");
5145            if similarity.score < threshold {
5146                panic!(
5147                    "{name}: converted image and target image have similarity score too low: {} < {}",
5148                    similarity.score, threshold
5149                )
5150            }
5151            return;
5152        }
5153
5154        let image1 = image::RgbImage::from_vec(
5155            img_rgb1.width().unwrap() as u32,
5156            img_rgb1.height().unwrap() as u32,
5157            img_rgb1.as_u8().unwrap().map().unwrap().to_vec(),
5158        )
5159        .unwrap();
5160
5161        let image2 = image::RgbImage::from_vec(
5162            img_rgb2.width().unwrap() as u32,
5163            img_rgb2.height().unwrap() as u32,
5164            img_rgb2.as_u8().unwrap().map().unwrap().to_vec(),
5165        )
5166        .unwrap();
5167
5168        let similarity = image_compare::rgb_similarity_structure(
5169            &image_compare::Algorithm::RootMeanSquared,
5170            &image1,
5171            &image2,
5172        )
5173        .expect("Image Comparison failed");
5174        if similarity.score < threshold {
5175            // image1.save(format!("{name}_1.png"));
5176            // image2.save(format!("{name}_2.png"));
5177            similarity
5178                .image
5179                .to_color_map()
5180                .save(format!("{name}.png"))
5181                .unwrap();
5182            panic!(
5183                "{name}: converted image and target image have similarity score too low: {} < {}",
5184                similarity.score, threshold
5185            )
5186        }
5187    }
5188
5189    // =========================================================================
5190    // PixelFormat::Nv12 Format Tests
5191    // =========================================================================
5192
5193    #[test]
5194    fn test_nv12_image_creation() {
5195        let width = 640;
5196        let height = 480;
5197        let img = TensorDyn::image(width, height, PixelFormat::Nv12, DType::U8, None).unwrap();
5198
5199        assert_eq!(img.width(), Some(width));
5200        assert_eq!(img.height(), Some(height));
5201        assert_eq!(img.format().unwrap(), PixelFormat::Nv12);
5202        // PixelFormat::Nv12 uses shape [H*3/2, W] to store Y plane + UV plane
5203        assert_eq!(img.as_u8().unwrap().shape(), &[height * 3 / 2, width]);
5204    }
5205
5206    #[test]
5207    fn test_nv12_channels() {
5208        let img = TensorDyn::image(640, 480, PixelFormat::Nv12, DType::U8, None).unwrap();
5209        // PixelFormat::Nv12.channels() returns 1 (luma plane)
5210        assert_eq!(img.format().unwrap().channels(), 1);
5211    }
5212
5213    // =========================================================================
5214    // Tensor Format Metadata Tests
5215    // =========================================================================
5216
5217    #[test]
5218    fn test_tensor_set_format_planar() {
5219        let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
5220        tensor.set_format(PixelFormat::PlanarRgb).unwrap();
5221        assert_eq!(tensor.format(), Some(PixelFormat::PlanarRgb));
5222        assert_eq!(tensor.width(), Some(640));
5223        assert_eq!(tensor.height(), Some(480));
5224    }
5225
5226    #[test]
5227    fn test_tensor_set_format_interleaved() {
5228        let mut tensor = Tensor::<u8>::new(&[480, 640, 4], None, None).unwrap();
5229        tensor.set_format(PixelFormat::Rgba).unwrap();
5230        assert_eq!(tensor.format(), Some(PixelFormat::Rgba));
5231        assert_eq!(tensor.width(), Some(640));
5232        assert_eq!(tensor.height(), Some(480));
5233    }
5234
5235    #[test]
5236    fn test_tensordyn_image_rgb() {
5237        let img = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::U8, None).unwrap();
5238        assert_eq!(img.width(), Some(640));
5239        assert_eq!(img.height(), Some(480));
5240        assert_eq!(img.format(), Some(PixelFormat::Rgb));
5241    }
5242
5243    #[test]
5244    fn test_tensordyn_image_planar_rgb() {
5245        let img = TensorDyn::image(640, 480, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
5246        assert_eq!(img.width(), Some(640));
5247        assert_eq!(img.height(), Some(480));
5248        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
5249    }
5250
5251    #[test]
5252    fn test_rgb_int8_format() {
5253        // Int8 variant: same PixelFormat::Rgb but with DType::I8
5254        let img = TensorDyn::image(
5255            1280,
5256            720,
5257            PixelFormat::Rgb,
5258            DType::I8,
5259            Some(TensorMemory::Mem),
5260        )
5261        .unwrap();
5262        assert_eq!(img.width(), Some(1280));
5263        assert_eq!(img.height(), Some(720));
5264        assert_eq!(img.format(), Some(PixelFormat::Rgb));
5265        assert_eq!(img.dtype(), DType::I8);
5266    }
5267
5268    #[test]
5269    fn test_planar_rgb_int8_format() {
5270        let img = TensorDyn::image(
5271            1280,
5272            720,
5273            PixelFormat::PlanarRgb,
5274            DType::I8,
5275            Some(TensorMemory::Mem),
5276        )
5277        .unwrap();
5278        assert_eq!(img.width(), Some(1280));
5279        assert_eq!(img.height(), Some(720));
5280        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
5281        assert_eq!(img.dtype(), DType::I8);
5282    }
5283
5284    #[test]
5285    fn test_rgb_from_tensor() {
5286        let mut tensor = Tensor::<u8>::new(&[720, 1280, 3], None, None).unwrap();
5287        tensor.set_format(PixelFormat::Rgb).unwrap();
5288        let img = TensorDyn::from(tensor);
5289        assert_eq!(img.width(), Some(1280));
5290        assert_eq!(img.height(), Some(720));
5291        assert_eq!(img.format(), Some(PixelFormat::Rgb));
5292    }
5293
5294    #[test]
5295    fn test_planar_rgb_from_tensor() {
5296        let mut tensor = Tensor::<u8>::new(&[3, 720, 1280], None, None).unwrap();
5297        tensor.set_format(PixelFormat::PlanarRgb).unwrap();
5298        let img = TensorDyn::from(tensor);
5299        assert_eq!(img.width(), Some(1280));
5300        assert_eq!(img.height(), Some(720));
5301        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
5302    }
5303
5304    #[test]
5305    fn test_dtype_determines_int8() {
5306        // DType::I8 indicates int8 data
5307        let u8_img = TensorDyn::image(64, 64, PixelFormat::Rgb, DType::U8, None).unwrap();
5308        let i8_img = TensorDyn::image(64, 64, PixelFormat::Rgb, DType::I8, None).unwrap();
5309        assert_eq!(u8_img.dtype(), DType::U8);
5310        assert_eq!(i8_img.dtype(), DType::I8);
5311    }
5312
5313    #[test]
5314    fn test_pixel_layout_packed_vs_planar() {
5315        // Packed vs planar layout classification
5316        assert_eq!(PixelFormat::Rgb.layout(), PixelLayout::Packed);
5317        assert_eq!(PixelFormat::Rgba.layout(), PixelLayout::Packed);
5318        assert_eq!(PixelFormat::PlanarRgb.layout(), PixelLayout::Planar);
5319        assert_eq!(PixelFormat::Nv12.layout(), PixelLayout::SemiPlanar);
5320    }
5321
5322    /// Integration test that exercises the PBO-to-PBO convert path.
5323    /// Uses ImageProcessor::create_image() to allocate PBO-backed tensors,
5324    /// then converts between them. Skipped when GL is unavailable or the
5325    /// backend is not PBO (e.g. DMA-buf systems).
5326    #[cfg(target_os = "linux")]
5327    #[cfg(feature = "opengl")]
5328    #[test]
5329    fn test_convert_pbo_to_pbo() {
5330        let mut converter = ImageProcessor::new().unwrap();
5331
5332        // Skip if GL is not available or backend is not PBO
5333        let is_pbo = converter
5334            .opengl
5335            .as_ref()
5336            .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
5337        if !is_pbo {
5338            eprintln!("Skipping test_convert_pbo_to_pbo: backend is not PBO");
5339            return;
5340        }
5341
5342        let src_w = 640;
5343        let src_h = 480;
5344        let dst_w = 320;
5345        let dst_h = 240;
5346
5347        // Create PBO-backed source image
5348        let pbo_src = converter
5349            .create_image(src_w, src_h, PixelFormat::Rgba, DType::U8, None)
5350            .unwrap();
5351        assert_eq!(
5352            pbo_src.as_u8().unwrap().memory(),
5353            TensorMemory::Pbo,
5354            "create_image should produce a PBO tensor"
5355        );
5356
5357        // Fill source PBO with test pattern: load JPEG then convert Mem→PBO
5358        let file = include_bytes!(concat!(
5359            env!("CARGO_MANIFEST_DIR"),
5360            "/../../testdata/zidane.jpg"
5361        ))
5362        .to_vec();
5363        let jpeg_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
5364
5365        // Resize JPEG into a Mem temp of the right size, then copy into PBO
5366        let mem_src = TensorDyn::image(
5367            src_w,
5368            src_h,
5369            PixelFormat::Rgba,
5370            DType::U8,
5371            Some(TensorMemory::Mem),
5372        )
5373        .unwrap();
5374        let (result, _jpeg_src, mem_src) = convert_img(
5375            &mut CPUProcessor::new(),
5376            jpeg_src,
5377            mem_src,
5378            Rotation::None,
5379            Flip::None,
5380            Crop::no_crop(),
5381        );
5382        result.unwrap();
5383
5384        // Copy pixel data into the PBO source by mapping it
5385        {
5386            let src_data = mem_src.as_u8().unwrap().map().unwrap();
5387            let mut pbo_map = pbo_src.as_u8().unwrap().map().unwrap();
5388            pbo_map.copy_from_slice(&src_data);
5389        }
5390
5391        // Create PBO-backed destination image
5392        let pbo_dst = converter
5393            .create_image(dst_w, dst_h, PixelFormat::Rgba, DType::U8, None)
5394            .unwrap();
5395        assert_eq!(pbo_dst.as_u8().unwrap().memory(), TensorMemory::Pbo);
5396
5397        // Convert PBO→PBO (this exercises convert_pbo_to_pbo)
5398        let mut pbo_dst = pbo_dst;
5399        let result = converter.convert(
5400            &pbo_src,
5401            &mut pbo_dst,
5402            Rotation::None,
5403            Flip::None,
5404            Crop::no_crop(),
5405        );
5406        result.unwrap();
5407
5408        // Verify: compare with CPU-only conversion of the same input
5409        let cpu_dst = TensorDyn::image(
5410            dst_w,
5411            dst_h,
5412            PixelFormat::Rgba,
5413            DType::U8,
5414            Some(TensorMemory::Mem),
5415        )
5416        .unwrap();
5417        let (result, _mem_src, cpu_dst) = convert_img(
5418            &mut CPUProcessor::new(),
5419            mem_src,
5420            cpu_dst,
5421            Rotation::None,
5422            Flip::None,
5423            Crop::no_crop(),
5424        );
5425        result.unwrap();
5426
5427        let pbo_dst_img = {
5428            let mut __t = pbo_dst.into_u8().unwrap();
5429            __t.set_format(PixelFormat::Rgba).unwrap();
5430            TensorDyn::from(__t)
5431        };
5432        compare_images(&pbo_dst_img, &cpu_dst, 0.95, function!());
5433        log::info!("test_convert_pbo_to_pbo: PASS — PBO-to-PBO convert matches CPU reference");
5434    }
5435
5436    #[test]
5437    fn test_image_bgra() {
5438        let img = TensorDyn::image(
5439            640,
5440            480,
5441            PixelFormat::Bgra,
5442            DType::U8,
5443            Some(edgefirst_tensor::TensorMemory::Mem),
5444        )
5445        .unwrap();
5446        assert_eq!(img.width(), Some(640));
5447        assert_eq!(img.height(), Some(480));
5448        assert_eq!(img.format().unwrap().channels(), 4);
5449        assert_eq!(img.format().unwrap(), PixelFormat::Bgra);
5450    }
5451
5452    // ========================================================================
5453    // Tests for EDGEFIRST_FORCE_BACKEND env var
5454    // ========================================================================
5455
5456    #[test]
5457    fn test_force_backend_cpu() {
5458        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5459        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5460        let result = ImageProcessor::new();
5461        match original {
5462            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5463            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5464        }
5465        let converter = result.unwrap();
5466        assert!(converter.cpu.is_some());
5467        assert_eq!(converter.forced_backend, Some(ForcedBackend::Cpu));
5468    }
5469
5470    #[test]
5471    fn test_force_backend_invalid() {
5472        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5473        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "invalid") };
5474        let result = ImageProcessor::new();
5475        match original {
5476            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5477            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5478        }
5479        assert!(
5480            matches!(&result, Err(Error::ForcedBackendUnavailable(s)) if s.contains("unknown")),
5481            "invalid backend value should return ForcedBackendUnavailable error: {result:?}"
5482        );
5483    }
5484
5485    #[test]
5486    fn test_force_backend_unset() {
5487        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5488        unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") };
5489        let result = ImageProcessor::new();
5490        match original {
5491            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5492            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5493        }
5494        let converter = result.unwrap();
5495        assert!(converter.forced_backend.is_none());
5496    }
5497
5498    // ========================================================================
5499    // Tests for hybrid mask path error handling
5500    // ========================================================================
5501
5502    #[test]
5503    fn test_draw_proto_masks_no_cpu_returns_error() {
5504        // Disable CPU backend to trigger the error path
5505        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
5506        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
5507        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
5508        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
5509        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
5510        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
5511
5512        let result = ImageProcessor::new();
5513
5514        match original_cpu {
5515            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
5516            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
5517        }
5518        match original_gl {
5519            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
5520            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
5521        }
5522        match original_g2d {
5523            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
5524            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
5525        }
5526
5527        let mut converter = result.unwrap();
5528        assert!(converter.cpu.is_none(), "CPU should be disabled");
5529
5530        let dst = TensorDyn::image(
5531            640,
5532            480,
5533            PixelFormat::Rgba,
5534            DType::U8,
5535            Some(TensorMemory::Mem),
5536        )
5537        .unwrap();
5538        let mut dst_dyn = dst;
5539        let det = [DetectBox {
5540            bbox: edgefirst_decoder::BoundingBox {
5541                xmin: 0.1,
5542                ymin: 0.1,
5543                xmax: 0.5,
5544                ymax: 0.5,
5545            },
5546            score: 0.9,
5547            label: 0,
5548        }];
5549        let proto_data = ProtoData {
5550            mask_coefficients: vec![vec![0.5; 4]],
5551            protos: edgefirst_decoder::ProtoTensor::Float(ndarray::Array3::<f32>::zeros((8, 8, 4))),
5552        };
5553        let result =
5554            converter.draw_proto_masks(&mut dst_dyn, &det, &proto_data, Default::default());
5555        assert!(
5556            matches!(&result, Err(Error::Internal(s)) if s.contains("CPU backend")),
5557            "draw_proto_masks without CPU should return Internal error: {result:?}"
5558        );
5559    }
5560
5561    #[test]
5562    fn test_draw_proto_masks_cpu_fallback_works() {
5563        // Force CPU-only backend to ensure the CPU fallback path executes
5564        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5565        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5566        let result = ImageProcessor::new();
5567        match original {
5568            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5569            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5570        }
5571
5572        let mut converter = result.unwrap();
5573        assert!(converter.cpu.is_some());
5574
5575        let dst = TensorDyn::image(
5576            64,
5577            64,
5578            PixelFormat::Rgba,
5579            DType::U8,
5580            Some(TensorMemory::Mem),
5581        )
5582        .unwrap();
5583        let mut dst_dyn = dst;
5584        let det = [DetectBox {
5585            bbox: edgefirst_decoder::BoundingBox {
5586                xmin: 0.1,
5587                ymin: 0.1,
5588                xmax: 0.5,
5589                ymax: 0.5,
5590            },
5591            score: 0.9,
5592            label: 0,
5593        }];
5594        let proto_data = ProtoData {
5595            mask_coefficients: vec![vec![0.5; 4]],
5596            protos: edgefirst_decoder::ProtoTensor::Float(ndarray::Array3::<f32>::zeros((8, 8, 4))),
5597        };
5598        let result =
5599            converter.draw_proto_masks(&mut dst_dyn, &det, &proto_data, Default::default());
5600        assert!(result.is_ok(), "CPU fallback path should work: {result:?}");
5601    }
5602
5603    #[test]
5604    fn test_set_format_then_cpu_convert() {
5605        // Force CPU backend (save/restore to avoid leaking into other tests)
5606        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5607        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5608        let mut processor = ImageProcessor::new().unwrap();
5609        match original {
5610            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5611            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5612        }
5613
5614        // Load a source image
5615        let image = include_bytes!(concat!(
5616            env!("CARGO_MANIFEST_DIR"),
5617            "/../../testdata/zidane.jpg"
5618        ));
5619        let src = load_image(image, Some(PixelFormat::Rgba), None).unwrap();
5620
5621        // Create a raw tensor, then attach format — simulating the from_fd workflow
5622        let mut dst =
5623            TensorDyn::new(&[640, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
5624        dst.set_format(PixelFormat::Rgb).unwrap();
5625
5626        // Convert should work with the set_format-annotated tensor
5627        processor
5628            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())
5629            .unwrap();
5630
5631        // Verify format survived conversion
5632        assert_eq!(dst.format(), Some(PixelFormat::Rgb));
5633        assert_eq!(dst.width(), Some(640));
5634        assert_eq!(dst.height(), Some(640));
5635    }
5636}