Skip to main content

edgefirst_image/
lib.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4/*!
5
6## EdgeFirst HAL - Image Converter
7
8The `edgefirst_image` crate is part of the EdgeFirst Hardware Abstraction
9Layer (HAL) and provides functionality for converting images between
10different formats and sizes.  The crate is designed to work with hardware
11acceleration when available, but also provides a CPU-based fallback for
12environments where hardware acceleration is not present or not suitable.
13
14The main features of the `edgefirst_image` crate include:
15- Support for various image formats, including YUYV, RGB, RGBA, and GREY.
16- Support for source crop, destination crop, rotation, and flipping.
17- Image conversion using hardware acceleration (G2D, OpenGL) when available.
18- CPU-based image conversion as a fallback option.
19
20The crate uses [`TensorDyn`] from `edgefirst_tensor` to represent images,
21with [`PixelFormat`] metadata describing the pixel layout. The
22[`ImageProcessor`] struct manages the conversion process, selecting
23the appropriate conversion method based on the available hardware.
24
25## Examples
26
27```rust
28# use edgefirst_image::{ImageProcessor, Rotation, Flip, Crop, ImageProcessorTrait, load_image};
29# use edgefirst_tensor::{PixelFormat, DType, TensorDyn};
30# fn main() -> Result<(), edgefirst_image::Error> {
31let image = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
32let src = load_image(image, Some(PixelFormat::Rgba), None)?;
33let mut converter = ImageProcessor::new()?;
34let mut dst = converter.create_image(640, 480, PixelFormat::Rgb, DType::U8, None)?;
35converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())?;
36# Ok(())
37# }
38```
39
40## Environment Variables
41The behavior of the `edgefirst_image::ImageProcessor` struct can be influenced by the
42following environment variables:
43- `EDGEFIRST_FORCE_BACKEND`: When set to `cpu`, `g2d`, or `opengl` (case-insensitive),
44  only that single backend is initialized and no fallback chain is used. If the
45  forced backend fails to initialize, an error is returned immediately. This is
46  useful for benchmarking individual backends in isolation. When this variable is
47  set, the `EDGEFIRST_DISABLE_*` variables are ignored.
48- `EDGEFIRST_DISABLE_GL`: If set to `1`, disables the use of OpenGL for image
49  conversion, forcing the use of CPU or other available hardware methods.
50- `EDGEFIRST_DISABLE_G2D`: If set to `1`, disables the use of G2D for image
51  conversion, forcing the use of CPU or other available hardware methods.
52- `EDGEFIRST_DISABLE_CPU`: If set to `1`, disables the use of CPU for image
53  conversion, forcing the use of hardware acceleration methods. If no hardware
54  acceleration methods are available, an error will be returned when attempting
55  to create an `ImageProcessor`.
56
57Additionally the TensorMemory used by default allocations can be controlled using the
58`EDGEFIRST_TENSOR_FORCE_MEM` environment variable. If set to `1`, default tensor memory
59uses system memory. This will disable the use of specialized memory regions for tensors
60and hardware acceleration. However, this will increase the performance of the CPU converter.
61*/
62#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
63
64use edgefirst_decoder::{DetectBox, ProtoData, Segmentation};
65use edgefirst_tensor::{
66    DType, PixelFormat, PixelLayout, Tensor, TensorDyn, TensorMemory, TensorTrait as _,
67};
68use enum_dispatch::enum_dispatch;
69use std::{fmt::Display, time::Instant};
70use zune_jpeg::{
71    zune_core::{colorspace::ColorSpace, options::DecoderOptions},
72    JpegDecoder,
73};
74use zune_png::PngDecoder;
75
76pub use cpu::CPUProcessor;
77pub use error::{Error, Result};
78#[cfg(target_os = "linux")]
79pub use g2d::G2DProcessor;
80#[cfg(target_os = "linux")]
81#[cfg(feature = "opengl")]
82pub use opengl_headless::GLProcessorThreaded;
83#[cfg(target_os = "linux")]
84#[cfg(feature = "opengl")]
85pub use opengl_headless::Int8InterpolationMode;
86#[cfg(target_os = "linux")]
87#[cfg(feature = "opengl")]
88pub use opengl_headless::{probe_egl_displays, EglDisplayInfo, EglDisplayKind};
89
90/// Result of rendering a single per-instance grayscale mask.
91///
92/// Contains the bounding-box region in output image coordinates and the
93/// raw uint8 pixel data (RED channel only, 0–255 representing sigmoid output).
94#[derive(Debug, Clone)]
95pub(crate) struct MaskResult {
96    /// X offset of the bbox region in the output image.
97    pub(crate) x: usize,
98    /// Y offset of the bbox region in the output image.
99    pub(crate) y: usize,
100    /// Width of the bbox region.
101    pub(crate) w: usize,
102    /// Height of the bbox region.
103    pub(crate) h: usize,
104    /// Grayscale pixel data (w * h bytes, row-major).
105    pub(crate) pixels: Vec<u8>,
106}
107
108/// Region metadata for a single detection within a compact mask atlas.
109///
110/// The atlas packs padded bounding-box strips vertically.  This struct
111/// records where each detection's strip lives in the atlas and how it
112/// maps back to the original output coordinate space.
113#[must_use]
114#[derive(Debug, Clone, Copy)]
115pub struct MaskRegion {
116    /// Row offset of this detection's strip in the atlas.
117    pub atlas_y_offset: usize,
118    /// Left edge of the padded bbox in output image coordinates.
119    pub padded_x: usize,
120    /// Top edge of the padded bbox in output image coordinates.
121    pub padded_y: usize,
122    /// Width of the padded bbox.
123    pub padded_w: usize,
124    /// Height of the padded bbox (= number of atlas rows for this strip).
125    pub padded_h: usize,
126    /// Original (unpadded) bbox left edge in output image coordinates.
127    pub bbox_x: usize,
128    /// Original (unpadded) bbox top edge in output image coordinates.
129    pub bbox_y: usize,
130    /// Original (unpadded) bbox width.
131    pub bbox_w: usize,
132    /// Original (unpadded) bbox height.
133    pub bbox_h: usize,
134}
135
136mod cpu;
137mod error;
138mod g2d;
139#[path = "gl/mod.rs"]
140mod opengl_headless;
141
142// Use `edgefirst_tensor::PixelFormat` variants (Rgb, Rgba, Grey, etc.) and
143// `TensorDyn` / `Tensor<u8>` with `.format()` metadata instead.
144
145/// Flips the image data, then rotates it. Returns a new `TensorDyn`.
146fn rotate_flip_to_dyn(
147    src: &Tensor<u8>,
148    src_fmt: PixelFormat,
149    rotation: Rotation,
150    flip: Flip,
151    memory: Option<TensorMemory>,
152) -> Result<TensorDyn, Error> {
153    let src_w = src.width().unwrap();
154    let src_h = src.height().unwrap();
155    let channels = src_fmt.channels();
156
157    let (dst_w, dst_h) = match rotation {
158        Rotation::None | Rotation::Rotate180 => (src_w, src_h),
159        Rotation::Clockwise90 | Rotation::CounterClockwise90 => (src_h, src_w),
160    };
161
162    let dst = Tensor::<u8>::image(dst_w, dst_h, src_fmt, memory)?;
163    let src_map = src.map()?;
164    let mut dst_map = dst.map()?;
165
166    CPUProcessor::flip_rotate_ndarray_pf(
167        &src_map,
168        &mut dst_map,
169        dst_w,
170        dst_h,
171        channels,
172        rotation,
173        flip,
174    )?;
175    drop(dst_map);
176    drop(src_map);
177
178    Ok(TensorDyn::from(dst))
179}
180
181#[derive(Debug, Clone, Copy, PartialEq, Eq)]
182pub enum Rotation {
183    None = 0,
184    Clockwise90 = 1,
185    Rotate180 = 2,
186    CounterClockwise90 = 3,
187}
188impl Rotation {
189    /// Creates a Rotation enum from an angle in degrees. The angle must be a
190    /// multiple of 90.
191    ///
192    /// # Panics
193    /// Panics if the angle is not a multiple of 90.
194    ///
195    /// # Examples
196    /// ```rust
197    /// # use edgefirst_image::Rotation;
198    /// let rotation = Rotation::from_degrees_clockwise(270);
199    /// assert_eq!(rotation, Rotation::CounterClockwise90);
200    /// ```
201    pub fn from_degrees_clockwise(angle: usize) -> Rotation {
202        match angle.rem_euclid(360) {
203            0 => Rotation::None,
204            90 => Rotation::Clockwise90,
205            180 => Rotation::Rotate180,
206            270 => Rotation::CounterClockwise90,
207            _ => panic!("rotation angle is not a multiple of 90"),
208        }
209    }
210}
211
212#[derive(Debug, Clone, Copy, PartialEq, Eq)]
213pub enum Flip {
214    None = 0,
215    Vertical = 1,
216    Horizontal = 2,
217}
218
219#[derive(Debug, Clone, Copy, PartialEq, Eq)]
220pub struct Crop {
221    pub src_rect: Option<Rect>,
222    pub dst_rect: Option<Rect>,
223    pub dst_color: Option<[u8; 4]>,
224}
225
226impl Default for Crop {
227    fn default() -> Self {
228        Crop::new()
229    }
230}
231impl Crop {
232    // Creates a new Crop with default values (no cropping).
233    pub fn new() -> Self {
234        Crop {
235            src_rect: None,
236            dst_rect: None,
237            dst_color: None,
238        }
239    }
240
241    // Sets the source rectangle for cropping.
242    pub fn with_src_rect(mut self, src_rect: Option<Rect>) -> Self {
243        self.src_rect = src_rect;
244        self
245    }
246
247    // Sets the destination rectangle for cropping.
248    pub fn with_dst_rect(mut self, dst_rect: Option<Rect>) -> Self {
249        self.dst_rect = dst_rect;
250        self
251    }
252
253    // Sets the destination color for areas outside the cropped region.
254    pub fn with_dst_color(mut self, dst_color: Option<[u8; 4]>) -> Self {
255        self.dst_color = dst_color;
256        self
257    }
258
259    // Creates a new Crop with no cropping.
260    pub fn no_crop() -> Self {
261        Crop::new()
262    }
263
264    /// Validate crop rectangles against explicit dimensions.
265    pub(crate) fn check_crop_dims(
266        &self,
267        src_w: usize,
268        src_h: usize,
269        dst_w: usize,
270        dst_h: usize,
271    ) -> Result<(), Error> {
272        let src_ok = self
273            .src_rect
274            .is_none_or(|r| r.left + r.width <= src_w && r.top + r.height <= src_h);
275        let dst_ok = self
276            .dst_rect
277            .is_none_or(|r| r.left + r.width <= dst_w && r.top + r.height <= dst_h);
278        match (src_ok, dst_ok) {
279            (true, true) => Ok(()),
280            (true, false) => Err(Error::CropInvalid(format!(
281                "Dest crop invalid: {:?}",
282                self.dst_rect
283            ))),
284            (false, true) => Err(Error::CropInvalid(format!(
285                "Src crop invalid: {:?}",
286                self.src_rect
287            ))),
288            (false, false) => Err(Error::CropInvalid(format!(
289                "Dest and Src crop invalid: {:?} {:?}",
290                self.dst_rect, self.src_rect
291            ))),
292        }
293    }
294
295    /// Validate crop rectangles against TensorDyn source and destination.
296    pub fn check_crop_dyn(
297        &self,
298        src: &edgefirst_tensor::TensorDyn,
299        dst: &edgefirst_tensor::TensorDyn,
300    ) -> Result<(), Error> {
301        self.check_crop_dims(
302            src.width().unwrap_or(0),
303            src.height().unwrap_or(0),
304            dst.width().unwrap_or(0),
305            dst.height().unwrap_or(0),
306        )
307    }
308}
309
310#[derive(Debug, Clone, Copy, PartialEq, Eq)]
311pub struct Rect {
312    pub left: usize,
313    pub top: usize,
314    pub width: usize,
315    pub height: usize,
316}
317
318impl Rect {
319    // Creates a new Rect with the specified left, top, width, and height.
320    pub fn new(left: usize, top: usize, width: usize, height: usize) -> Self {
321        Self {
322            left,
323            top,
324            width,
325            height,
326        }
327    }
328
329    // Checks if the rectangle is valid for the given TensorDyn image.
330    pub fn check_rect_dyn(&self, image: &TensorDyn) -> bool {
331        let w = image.width().unwrap_or(0);
332        let h = image.height().unwrap_or(0);
333        self.left + self.width <= w && self.top + self.height <= h
334    }
335}
336
337#[enum_dispatch(ImageProcessor)]
338pub trait ImageProcessorTrait {
339    /// Converts the source image to the destination image format and size. The
340    /// image is cropped first, then flipped, then rotated
341    ///
342    /// # Arguments
343    ///
344    /// * `dst` - The destination image to be converted to.
345    /// * `src` - The source image to convert from.
346    /// * `rotation` - The rotation to apply to the destination image.
347    /// * `flip` - Flips the image
348    /// * `crop` - An optional rectangle specifying the area to crop from the
349    ///   source image
350    ///
351    /// # Returns
352    ///
353    /// A `Result` indicating success or failure of the conversion.
354    fn convert(
355        &mut self,
356        src: &TensorDyn,
357        dst: &mut TensorDyn,
358        rotation: Rotation,
359        flip: Flip,
360        crop: Crop,
361    ) -> Result<()>;
362
363    /// Draw pre-decoded detection boxes and segmentation masks onto `dst`.
364    ///
365    /// Supports two segmentation modes based on the mask channel count:
366    /// - **Instance segmentation** (`C=1`): one `Segmentation` per detection,
367    ///   `segmentation` and `detect` are zipped.
368    /// - **Semantic segmentation** (`C>1`): a single `Segmentation` covering
369    ///   all classes; only the first element is used.
370    ///
371    /// # Format requirements
372    ///
373    /// - CPU backend: `dst` must be `RGBA` or `RGB`.
374    /// - OpenGL backend: `dst` must be `RGBA`, `BGRA`, or `RGB`.
375    /// - G2D backend: not implemented (returns `NotImplemented`).
376    ///
377    /// An empty `segmentation` slice is valid — only bounding boxes are drawn.
378    fn draw_masks(
379        &mut self,
380        dst: &mut TensorDyn,
381        detect: &[DetectBox],
382        segmentation: &[Segmentation],
383    ) -> Result<()>;
384
385    /// Draw masks from proto data onto image (fused decode+draw).
386    ///
387    /// For YOLO segmentation models, this avoids materializing intermediate
388    /// `Array3<u8>` masks. The `ProtoData` contains mask coefficients and the
389    /// prototype tensor; the renderer computes `mask_coeff @ protos` directly
390    /// at the output resolution using bilinear sampling.
391    ///
392    /// `detect` and `proto_data.mask_coefficients` must have the same length
393    /// (enforced by zip — excess entries are silently ignored). An empty
394    /// `detect` slice is valid and returns immediately after drawing nothing.
395    ///
396    /// # Format requirements
397    ///
398    /// Same as [`draw_masks`](Self::draw_masks). G2D returns `NotImplemented`.
399    fn draw_masks_proto(
400        &mut self,
401        dst: &mut TensorDyn,
402        detect: &[DetectBox],
403        proto_data: &ProtoData,
404    ) -> Result<()>;
405
406    /// Decode masks into a compact atlas buffer.
407    ///
408    /// Used internally by the Python/C `decode_masks` APIs. The atlas is a
409    /// compact vertical strip where each detection occupies a strip sized to
410    /// its padded bounding box (not the full output resolution).
411    ///
412    /// `output_width` and `output_height` define the coordinate space for
413    /// interpreting bounding boxes — individual mask regions are bbox-sized.
414    /// Mask pixels are binary: `255` = presence, `0` = background.
415    ///
416    /// Returns `(atlas_pixels, regions)` where `regions` describes each
417    /// detection's location and bbox within the atlas.
418    ///
419    /// G2D backend returns `NotImplemented`.
420    fn decode_masks_atlas(
421        &mut self,
422        detect: &[DetectBox],
423        proto_data: ProtoData,
424        output_width: usize,
425        output_height: usize,
426    ) -> Result<(Vec<u8>, Vec<MaskRegion>)>;
427
428    /// Sets the colors used for rendering segmentation masks. Up to 20 colors
429    /// can be set.
430    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()>;
431}
432
433/// Configuration for [`ImageProcessor`] construction.
434///
435/// Use with [`ImageProcessor::with_config`] to override the default EGL
436/// display auto-detection and backend selection. The default configuration
437/// preserves the existing auto-detection behaviour.
438#[derive(Debug, Clone, Default)]
439pub struct ImageProcessorConfig {
440    /// Force OpenGL to use this EGL display type instead of auto-detecting.
441    ///
442    /// When `None`, the processor probes displays in priority order: GBM,
443    /// PlatformDevice, Default. Use [`probe_egl_displays`] to discover
444    /// which displays are available on the current system.
445    ///
446    /// Ignored when `EDGEFIRST_DISABLE_GL=1` is set.
447    #[cfg(target_os = "linux")]
448    #[cfg(feature = "opengl")]
449    pub egl_display: Option<EglDisplayKind>,
450
451    /// Preferred compute backend.
452    ///
453    /// When set to a specific backend (not [`ComputeBackend::Auto`]), the
454    /// processor initializes that backend with no fallback — returns an error if the conversion is not supported.
455    /// This takes precedence over `EDGEFIRST_FORCE_BACKEND` and the
456    /// `EDGEFIRST_DISABLE_*` environment variables.
457    ///
458    /// - [`ComputeBackend::OpenGl`]: init OpenGL + CPU, skip G2D
459    /// - [`ComputeBackend::G2d`]: init G2D + CPU, skip OpenGL
460    /// - [`ComputeBackend::Cpu`]: init CPU only
461    /// - [`ComputeBackend::Auto`]: existing env-var-driven selection
462    pub backend: ComputeBackend,
463}
464
465/// Compute backend selection for [`ImageProcessor`].
466///
467/// Use with [`ImageProcessorConfig::backend`] to select which backend the
468/// processor should prefer. When a specific backend is selected, the
469/// processor initializes that backend plus CPU as a fallback. When `Auto`
470/// is used, the existing environment-variable-driven selection applies.
471#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
472pub enum ComputeBackend {
473    /// Auto-detect based on available hardware and environment variables.
474    #[default]
475    Auto,
476    /// CPU-only processing (no hardware acceleration).
477    Cpu,
478    /// Prefer G2D hardware blitter (+ CPU fallback).
479    G2d,
480    /// Prefer OpenGL ES (+ CPU fallback).
481    OpenGl,
482}
483
484/// Backend forced via the `EDGEFIRST_FORCE_BACKEND` environment variable
485/// or [`ImageProcessorConfig::backend`].
486///
487/// When set, the [`ImageProcessor`] only initializes and dispatches to the
488/// selected backend — no fallback chain is used.
489#[derive(Debug, Clone, Copy, PartialEq, Eq)]
490pub(crate) enum ForcedBackend {
491    Cpu,
492    G2d,
493    OpenGl,
494}
495
496/// Image converter that uses available hardware acceleration or CPU as a
497/// fallback.
498#[derive(Debug)]
499pub struct ImageProcessor {
500    /// CPU-based image converter as a fallback. This is only None if the
501    /// EDGEFIRST_DISABLE_CPU environment variable is set.
502    pub cpu: Option<CPUProcessor>,
503
504    #[cfg(target_os = "linux")]
505    /// G2D-based image converter for Linux systems. This is only available if
506    /// the EDGEFIRST_DISABLE_G2D environment variable is not set and libg2d.so
507    /// is available.
508    pub g2d: Option<G2DProcessor>,
509    #[cfg(target_os = "linux")]
510    #[cfg(feature = "opengl")]
511    /// OpenGL-based image converter for Linux systems. This is only available
512    /// if the EDGEFIRST_DISABLE_GL environment variable is not set and OpenGL
513    /// ES is available.
514    pub opengl: Option<GLProcessorThreaded>,
515
516    /// When set, only the specified backend is used — no fallback chain.
517    pub(crate) forced_backend: Option<ForcedBackend>,
518}
519
520unsafe impl Send for ImageProcessor {}
521unsafe impl Sync for ImageProcessor {}
522
523impl ImageProcessor {
524    /// Creates a new `ImageProcessor` instance, initializing available
525    /// hardware converters based on the system capabilities and environment
526    /// variables.
527    ///
528    /// # Examples
529    /// ```rust
530    /// # use edgefirst_image::{ImageProcessor, Rotation, Flip, Crop, ImageProcessorTrait, load_image};
531    /// # use edgefirst_tensor::{PixelFormat, DType, TensorDyn};
532    /// # fn main() -> Result<(), edgefirst_image::Error> {
533    /// let image = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
534    /// let src = load_image(image, Some(PixelFormat::Rgba), None)?;
535    /// let mut converter = ImageProcessor::new()?;
536    /// let mut dst = converter.create_image(640, 480, PixelFormat::Rgb, DType::U8, None)?;
537    /// converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())?;
538    /// # Ok(())
539    /// # }
540    /// ```
541    pub fn new() -> Result<Self> {
542        Self::with_config(ImageProcessorConfig::default())
543    }
544
545    /// Creates a new `ImageProcessor` with the given configuration.
546    ///
547    /// When [`ImageProcessorConfig::backend`] is set to a specific backend,
548    /// environment variables are ignored and the processor initializes the
549    /// requested backend plus CPU as a fallback.
550    ///
551    /// When `Auto`, the existing `EDGEFIRST_FORCE_BACKEND` and
552    /// `EDGEFIRST_DISABLE_*` environment variables apply.
553    #[allow(unused_variables)]
554    pub fn with_config(config: ImageProcessorConfig) -> Result<Self> {
555        // ── Config-driven backend selection ──────────────────────────
556        // When the caller explicitly requests a backend via the config,
557        // skip all environment variable logic.
558        match config.backend {
559            ComputeBackend::Cpu => {
560                log::info!("ComputeBackend::Cpu — CPU only");
561                return Ok(Self {
562                    cpu: Some(CPUProcessor::new()),
563                    #[cfg(target_os = "linux")]
564                    g2d: None,
565                    #[cfg(target_os = "linux")]
566                    #[cfg(feature = "opengl")]
567                    opengl: None,
568                    forced_backend: None,
569                });
570            }
571            ComputeBackend::G2d => {
572                log::info!("ComputeBackend::G2d — G2D + CPU fallback");
573                #[cfg(target_os = "linux")]
574                {
575                    let g2d = match G2DProcessor::new() {
576                        Ok(g) => Some(g),
577                        Err(e) => {
578                            log::warn!("G2D requested but failed to initialize: {e:?}");
579                            None
580                        }
581                    };
582                    return Ok(Self {
583                        cpu: Some(CPUProcessor::new()),
584                        g2d,
585                        #[cfg(feature = "opengl")]
586                        opengl: None,
587                        forced_backend: None,
588                    });
589                }
590                #[cfg(not(target_os = "linux"))]
591                {
592                    log::warn!("G2D requested but not available on this platform, using CPU");
593                    return Ok(Self {
594                        cpu: Some(CPUProcessor::new()),
595                        forced_backend: None,
596                    });
597                }
598            }
599            ComputeBackend::OpenGl => {
600                log::info!("ComputeBackend::OpenGl — OpenGL + CPU fallback");
601                #[cfg(target_os = "linux")]
602                {
603                    #[cfg(feature = "opengl")]
604                    let opengl = match GLProcessorThreaded::new(config.egl_display) {
605                        Ok(gl) => Some(gl),
606                        Err(e) => {
607                            log::warn!("OpenGL requested but failed to initialize: {e:?}");
608                            None
609                        }
610                    };
611                    return Ok(Self {
612                        cpu: Some(CPUProcessor::new()),
613                        g2d: None,
614                        #[cfg(feature = "opengl")]
615                        opengl,
616                        forced_backend: None,
617                    });
618                }
619                #[cfg(not(target_os = "linux"))]
620                {
621                    log::warn!("OpenGL requested but not available on this platform, using CPU");
622                    return Ok(Self {
623                        cpu: Some(CPUProcessor::new()),
624                        forced_backend: None,
625                    });
626                }
627            }
628            ComputeBackend::Auto => { /* fall through to env-var logic below */ }
629        }
630
631        // ── EDGEFIRST_FORCE_BACKEND ──────────────────────────────────
632        // When set, only the requested backend is initialised and no
633        // fallback chain is used. Accepted values (case-insensitive):
634        //   "cpu", "g2d", "opengl"
635        if let Ok(val) = std::env::var("EDGEFIRST_FORCE_BACKEND") {
636            let val_lower = val.to_lowercase();
637            let forced = match val_lower.as_str() {
638                "cpu" => ForcedBackend::Cpu,
639                "g2d" => ForcedBackend::G2d,
640                "opengl" => ForcedBackend::OpenGl,
641                other => {
642                    return Err(Error::ForcedBackendUnavailable(format!(
643                        "unknown EDGEFIRST_FORCE_BACKEND value: {other:?} (expected cpu, g2d, or opengl)"
644                    )));
645                }
646            };
647
648            log::info!("EDGEFIRST_FORCE_BACKEND={val} — only initializing {val_lower} backend");
649
650            return match forced {
651                ForcedBackend::Cpu => Ok(Self {
652                    cpu: Some(CPUProcessor::new()),
653                    #[cfg(target_os = "linux")]
654                    g2d: None,
655                    #[cfg(target_os = "linux")]
656                    #[cfg(feature = "opengl")]
657                    opengl: None,
658                    forced_backend: Some(ForcedBackend::Cpu),
659                }),
660                ForcedBackend::G2d => {
661                    #[cfg(target_os = "linux")]
662                    {
663                        let g2d = G2DProcessor::new().map_err(|e| {
664                            Error::ForcedBackendUnavailable(format!(
665                                "g2d forced but failed to initialize: {e:?}"
666                            ))
667                        })?;
668                        Ok(Self {
669                            cpu: None,
670                            g2d: Some(g2d),
671                            #[cfg(feature = "opengl")]
672                            opengl: None,
673                            forced_backend: Some(ForcedBackend::G2d),
674                        })
675                    }
676                    #[cfg(not(target_os = "linux"))]
677                    {
678                        Err(Error::ForcedBackendUnavailable(
679                            "g2d backend is only available on Linux".into(),
680                        ))
681                    }
682                }
683                ForcedBackend::OpenGl => {
684                    #[cfg(target_os = "linux")]
685                    #[cfg(feature = "opengl")]
686                    {
687                        let opengl = GLProcessorThreaded::new(config.egl_display).map_err(|e| {
688                            Error::ForcedBackendUnavailable(format!(
689                                "opengl forced but failed to initialize: {e:?}"
690                            ))
691                        })?;
692                        Ok(Self {
693                            cpu: None,
694                            g2d: None,
695                            opengl: Some(opengl),
696                            forced_backend: Some(ForcedBackend::OpenGl),
697                        })
698                    }
699                    #[cfg(not(all(target_os = "linux", feature = "opengl")))]
700                    {
701                        Err(Error::ForcedBackendUnavailable(
702                            "opengl backend requires Linux with the 'opengl' feature enabled"
703                                .into(),
704                        ))
705                    }
706                }
707            };
708        }
709
710        // ── Existing DISABLE logic (unchanged) ──────────────────────
711        #[cfg(target_os = "linux")]
712        let g2d = if std::env::var("EDGEFIRST_DISABLE_G2D")
713            .map(|x| x != "0" && x.to_lowercase() != "false")
714            .unwrap_or(false)
715        {
716            log::debug!("EDGEFIRST_DISABLE_G2D is set");
717            None
718        } else {
719            match G2DProcessor::new() {
720                Ok(g2d_converter) => Some(g2d_converter),
721                Err(err) => {
722                    log::warn!("Failed to initialize G2D converter: {err:?}");
723                    None
724                }
725            }
726        };
727
728        #[cfg(target_os = "linux")]
729        #[cfg(feature = "opengl")]
730        let opengl = if std::env::var("EDGEFIRST_DISABLE_GL")
731            .map(|x| x != "0" && x.to_lowercase() != "false")
732            .unwrap_or(false)
733        {
734            log::debug!("EDGEFIRST_DISABLE_GL is set");
735            None
736        } else {
737            match GLProcessorThreaded::new(config.egl_display) {
738                Ok(gl_converter) => Some(gl_converter),
739                Err(err) => {
740                    log::warn!("Failed to initialize GL converter: {err:?}");
741                    None
742                }
743            }
744        };
745
746        let cpu = if std::env::var("EDGEFIRST_DISABLE_CPU")
747            .map(|x| x != "0" && x.to_lowercase() != "false")
748            .unwrap_or(false)
749        {
750            log::debug!("EDGEFIRST_DISABLE_CPU is set");
751            None
752        } else {
753            Some(CPUProcessor::new())
754        };
755        Ok(Self {
756            cpu,
757            #[cfg(target_os = "linux")]
758            g2d,
759            #[cfg(target_os = "linux")]
760            #[cfg(feature = "opengl")]
761            opengl,
762            forced_backend: None,
763        })
764    }
765
766    /// Sets the interpolation mode for int8 proto textures on the OpenGL
767    /// backend. No-op if OpenGL is not available.
768    #[cfg(target_os = "linux")]
769    #[cfg(feature = "opengl")]
770    pub fn set_int8_interpolation_mode(&mut self, mode: Int8InterpolationMode) -> Result<()> {
771        if let Some(ref mut gl) = self.opengl {
772            gl.set_int8_interpolation_mode(mode)?;
773        }
774        Ok(())
775    }
776
777    /// Create a [`TensorDyn`] image with the best available memory backend.
778    ///
779    /// Priority: DMA-buf → PBO (byte-sized types: u8, i8) → system memory.
780    ///
781    /// Use this method instead of [`TensorDyn::image()`] when the tensor will
782    /// be used with [`ImageProcessor::convert()`]. It selects the optimal
783    /// memory backing (including PBO for GPU zero-copy) which direct
784    /// allocation cannot achieve.
785    ///
786    /// This method is on [`ImageProcessor`] rather than [`ImageProcessorTrait`]
787    /// because optimal allocation requires knowledge of the active compute
788    /// backends (e.g. the GL context handle for PBO allocation). Individual
789    /// backend implementations ([`CPUProcessor`], etc.) do not have this
790    /// cross-backend visibility.
791    ///
792    /// # Arguments
793    ///
794    /// * `width` - Image width in pixels
795    /// * `height` - Image height in pixels
796    /// * `format` - Pixel format
797    /// * `dtype` - Element data type (e.g. `DType::U8`, `DType::I8`)
798    /// * `memory` - Optional memory type override; when `None`, the best
799    ///   available backend is selected automatically.
800    ///
801    /// # Returns
802    ///
803    /// A [`TensorDyn`] backed by the highest-performance memory type
804    /// available on this system.
805    ///
806    /// # Errors
807    ///
808    /// Returns an error if all allocation strategies fail.
809    pub fn create_image(
810        &self,
811        width: usize,
812        height: usize,
813        format: PixelFormat,
814        dtype: DType,
815        memory: Option<TensorMemory>,
816    ) -> Result<TensorDyn> {
817        // If an explicit memory type is requested, honour it directly.
818        if let Some(mem) = memory {
819            return Ok(TensorDyn::image(width, height, format, dtype, Some(mem))?);
820        }
821
822        // Try DMA first on Linux — skip only when GL has explicitly selected PBO
823        // as the preferred transfer path (PBO is better than DMA in that case).
824        #[cfg(target_os = "linux")]
825        {
826            #[cfg(feature = "opengl")]
827            let gl_uses_pbo = self
828                .opengl
829                .as_ref()
830                .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
831            #[cfg(not(feature = "opengl"))]
832            let gl_uses_pbo = false;
833
834            if !gl_uses_pbo {
835                if let Ok(img) = TensorDyn::image(
836                    width,
837                    height,
838                    format,
839                    dtype,
840                    Some(edgefirst_tensor::TensorMemory::Dma),
841                ) {
842                    return Ok(img);
843                }
844            }
845        }
846
847        // Try PBO (if GL available).
848        // PBO buffers are u8-sized; the int8 shader emulates i8 output via
849        // XOR 0x80 on the same underlying buffer, so both U8 and I8 work.
850        #[cfg(target_os = "linux")]
851        #[cfg(feature = "opengl")]
852        if dtype.size() == 1 {
853            if let Some(gl) = &self.opengl {
854                match gl.create_pbo_image(width, height, format) {
855                    Ok(t) => {
856                        if dtype == DType::I8 {
857                            // SAFETY: Tensor<u8> and Tensor<i8> are layout-
858                            // identical (same element size, no T-dependent
859                            // drop glue). The int8 shader applies XOR 0x80
860                            // on the same PBO buffer. Same rationale as
861                            // gl::processor::tensor_i8_as_u8_mut.
862                            // Invariant: PBO tensors never have chroma
863                            // (create_pbo_image → Tensor::wrap sets it None).
864                            debug_assert!(
865                                t.chroma().is_none(),
866                                "PBO i8 transmute requires chroma == None"
867                            );
868                            let t_i8: Tensor<i8> = unsafe { std::mem::transmute(t) };
869                            return Ok(TensorDyn::from(t_i8));
870                        }
871                        return Ok(TensorDyn::from(t));
872                    }
873                    Err(e) => log::debug!("PBO image creation failed, falling back to Mem: {e:?}"),
874                }
875            }
876        }
877
878        // Fallback to Mem
879        Ok(TensorDyn::image(
880            width,
881            height,
882            format,
883            dtype,
884            Some(edgefirst_tensor::TensorMemory::Mem),
885        )?)
886    }
887
888    /// Import an external DMA-BUF image.
889    ///
890    /// Each [`PlaneDescriptor`] owns an already-duped fd; this method
891    /// consumes the descriptors and takes ownership of those fds (whether
892    /// the call succeeds or fails).
893    ///
894    /// The caller must ensure the DMA-BUF allocation is large enough for the
895    /// specified width, height, format, and any stride/offset on the plane
896    /// descriptors. No buffer-size validation is performed; an undersized
897    /// buffer may cause GPU faults or EGL import failure.
898    ///
899    /// # Arguments
900    ///
901    /// * `image` - Plane descriptor for the primary (or only) plane
902    /// * `chroma` - Optional plane descriptor for the UV chroma plane
903    ///   (required for multiplane NV12)
904    /// * `width` - Image width in pixels
905    /// * `height` - Image height in pixels
906    /// * `format` - Pixel format of the buffer
907    /// * `dtype` - Element data type (e.g. `DType::U8`)
908    ///
909    /// # Returns
910    ///
911    /// A `TensorDyn` configured as an image.
912    ///
913    /// # Errors
914    ///
915    /// * [`Error::NotSupported`] if `chroma` is `Some` for a non-semi-planar
916    ///   format, or multiplane NV16 (not yet supported), or the fd is not
917    ///   DMA-backed
918    /// * [`Error::InvalidShape`] if NV12 height is odd
919    ///
920    /// # Platform
921    ///
922    /// Linux only.
923    ///
924    /// # Examples
925    ///
926    /// ```rust,ignore
927    /// use edgefirst_tensor::PlaneDescriptor;
928    ///
929    /// // Single-plane RGBA
930    /// let pd = PlaneDescriptor::new(fd.as_fd())?;
931    /// let src = proc.import_image(pd, None, 1920, 1080, PixelFormat::Rgba, DType::U8)?;
932    ///
933    /// // Multi-plane NV12 with stride
934    /// let y_pd = PlaneDescriptor::new(y_fd.as_fd())?.with_stride(2048);
935    /// let uv_pd = PlaneDescriptor::new(uv_fd.as_fd())?.with_stride(2048);
936    /// let src = proc.import_image(y_pd, Some(uv_pd), 1920, 1080,
937    ///                             PixelFormat::Nv12, DType::U8)?;
938    /// ```
939    #[cfg(target_os = "linux")]
940    pub fn import_image(
941        &self,
942        image: edgefirst_tensor::PlaneDescriptor,
943        chroma: Option<edgefirst_tensor::PlaneDescriptor>,
944        width: usize,
945        height: usize,
946        format: PixelFormat,
947        dtype: DType,
948    ) -> Result<TensorDyn> {
949        use edgefirst_tensor::{Tensor, TensorMemory};
950
951        // Capture stride/offset from descriptors before consuming them
952        let image_stride = image.stride();
953        let image_offset = image.offset();
954        let chroma_stride = chroma.as_ref().and_then(|c| c.stride());
955        let chroma_offset = chroma.as_ref().and_then(|c| c.offset());
956
957        if let Some(chroma_pd) = chroma {
958            // ── Multiplane path ──────────────────────────────────────
959            // Multiplane tensors are backed by Tensor<u8> (or transmuted to
960            // Tensor<i8>). Reject other dtypes to avoid silently returning a
961            // tensor with the wrong element type.
962            if dtype != DType::U8 && dtype != DType::I8 {
963                return Err(Error::NotSupported(format!(
964                    "multiplane import only supports U8/I8, got {dtype:?}"
965                )));
966            }
967            if format.layout() != PixelLayout::SemiPlanar {
968                return Err(Error::NotSupported(format!(
969                    "import_image with chroma requires a semi-planar format, got {format:?}"
970                )));
971            }
972
973            let chroma_h = match format {
974                PixelFormat::Nv12 => {
975                    if !height.is_multiple_of(2) {
976                        return Err(Error::InvalidShape(format!(
977                            "NV12 requires even height, got {height}"
978                        )));
979                    }
980                    height / 2
981                }
982                // NV16 multiplane will be supported in a future release;
983                // the GL backend currently only handles NV12 plane1 attributes.
984                PixelFormat::Nv16 => {
985                    return Err(Error::NotSupported(
986                        "multiplane NV16 is not yet supported; use contiguous NV16 instead".into(),
987                    ))
988                }
989                _ => {
990                    return Err(Error::NotSupported(format!(
991                        "unsupported semi-planar format: {format:?}"
992                    )))
993                }
994            };
995
996            let luma = Tensor::<u8>::from_fd(image.into_fd(), &[height, width], Some("luma"))?;
997            if luma.memory() != TensorMemory::Dma {
998                return Err(Error::NotSupported(format!(
999                    "luma fd must be DMA-backed, got {:?}",
1000                    luma.memory()
1001                )));
1002            }
1003
1004            let chroma_tensor =
1005                Tensor::<u8>::from_fd(chroma_pd.into_fd(), &[chroma_h, width], Some("chroma"))?;
1006            if chroma_tensor.memory() != TensorMemory::Dma {
1007                return Err(Error::NotSupported(format!(
1008                    "chroma fd must be DMA-backed, got {:?}",
1009                    chroma_tensor.memory()
1010                )));
1011            }
1012
1013            // from_planes creates the combined tensor with format set,
1014            // preserving luma's row_stride (currently None since luma was raw).
1015            let mut tensor = Tensor::<u8>::from_planes(luma, chroma_tensor, format)?;
1016
1017            // Apply stride/offset to the combined tensor (luma plane)
1018            if let Some(s) = image_stride {
1019                tensor.set_row_stride(s)?;
1020            }
1021            if let Some(o) = image_offset {
1022                tensor.set_plane_offset(o);
1023            }
1024
1025            // Apply stride/offset to the chroma sub-tensor.
1026            // The chroma tensor is a raw 2D [chroma_h, width] tensor without
1027            // format metadata, so we validate stride manually rather than
1028            // using set_row_stride (which requires format).
1029            if let Some(chroma_ref) = tensor.chroma_mut() {
1030                if let Some(s) = chroma_stride {
1031                    if s < width {
1032                        return Err(Error::InvalidShape(format!(
1033                            "chroma stride {s} < minimum {width} for {format:?}"
1034                        )));
1035                    }
1036                    chroma_ref.set_row_stride_unchecked(s);
1037                }
1038                if let Some(o) = chroma_offset {
1039                    chroma_ref.set_plane_offset(o);
1040                }
1041            }
1042
1043            if dtype == DType::I8 {
1044                // SAFETY: Tensor<u8> and Tensor<i8> have identical layout because
1045                // the struct contains only type-erased storage (OwnedFd, shape, name),
1046                // no inline T values. This assertion catches layout drift at compile time.
1047                const {
1048                    assert!(std::mem::size_of::<Tensor<u8>>() == std::mem::size_of::<Tensor<i8>>());
1049                    assert!(
1050                        std::mem::align_of::<Tensor<u8>>() == std::mem::align_of::<Tensor<i8>>()
1051                    );
1052                }
1053                let tensor_i8: Tensor<i8> = unsafe { std::mem::transmute(tensor) };
1054                return Ok(TensorDyn::from(tensor_i8));
1055            }
1056            Ok(TensorDyn::from(tensor))
1057        } else {
1058            // ── Single-plane path ────────────────────────────────────
1059            let shape = match format.layout() {
1060                PixelLayout::Packed => vec![height, width, format.channels()],
1061                PixelLayout::Planar => vec![format.channels(), height, width],
1062                PixelLayout::SemiPlanar => {
1063                    let total_h = match format {
1064                        PixelFormat::Nv12 => {
1065                            if !height.is_multiple_of(2) {
1066                                return Err(Error::InvalidShape(format!(
1067                                    "NV12 requires even height, got {height}"
1068                                )));
1069                            }
1070                            height * 3 / 2
1071                        }
1072                        PixelFormat::Nv16 => height * 2,
1073                        _ => {
1074                            return Err(Error::InvalidShape(format!(
1075                                "unknown semi-planar height multiplier for {format:?}"
1076                            )))
1077                        }
1078                    };
1079                    vec![total_h, width]
1080                }
1081                _ => {
1082                    return Err(Error::NotSupported(format!(
1083                        "unsupported pixel layout for import_image: {:?}",
1084                        format.layout()
1085                    )));
1086                }
1087            };
1088            let tensor = TensorDyn::from_fd(image.into_fd(), &shape, dtype, None)?;
1089            if tensor.memory() != TensorMemory::Dma {
1090                return Err(Error::NotSupported(format!(
1091                    "import_image requires DMA-backed fd, got {:?}",
1092                    tensor.memory()
1093                )));
1094            }
1095            let mut tensor = tensor.with_format(format)?;
1096            if let Some(s) = image_stride {
1097                tensor.set_row_stride(s)?;
1098            }
1099            if let Some(o) = image_offset {
1100                tensor.set_plane_offset(o);
1101            }
1102            Ok(tensor)
1103        }
1104    }
1105}
1106
1107impl ImageProcessorTrait for ImageProcessor {
1108    /// Converts the source image to the destination image format and size. The
1109    /// image is cropped first, then flipped, then rotated
1110    ///
1111    /// Prefer hardware accelerators when available, falling back to CPU if
1112    /// necessary.
1113    fn convert(
1114        &mut self,
1115        src: &TensorDyn,
1116        dst: &mut TensorDyn,
1117        rotation: Rotation,
1118        flip: Flip,
1119        crop: Crop,
1120    ) -> Result<()> {
1121        let start = Instant::now();
1122        let src_fmt = src.format();
1123        let dst_fmt = dst.format();
1124        log::trace!(
1125            "convert: {src_fmt:?}({:?}/{:?}) → {dst_fmt:?}({:?}/{:?}), \
1126             rotation={rotation:?}, flip={flip:?}, backend={:?}",
1127            src.dtype(),
1128            src.memory(),
1129            dst.dtype(),
1130            dst.memory(),
1131            self.forced_backend,
1132        );
1133
1134        // ── Forced backend: no fallback chain ────────────────────────
1135        if let Some(forced) = self.forced_backend {
1136            return match forced {
1137                ForcedBackend::Cpu => {
1138                    if let Some(cpu) = self.cpu.as_mut() {
1139                        let r = cpu.convert(src, dst, rotation, flip, crop);
1140                        log::trace!(
1141                            "convert: forced=cpu result={} ({:?})",
1142                            if r.is_ok() { "ok" } else { "err" },
1143                            start.elapsed()
1144                        );
1145                        return r;
1146                    }
1147                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1148                }
1149                ForcedBackend::G2d => {
1150                    #[cfg(target_os = "linux")]
1151                    if let Some(g2d) = self.g2d.as_mut() {
1152                        let r = g2d.convert(src, dst, rotation, flip, crop);
1153                        log::trace!(
1154                            "convert: forced=g2d result={} ({:?})",
1155                            if r.is_ok() { "ok" } else { "err" },
1156                            start.elapsed()
1157                        );
1158                        return r;
1159                    }
1160                    Err(Error::ForcedBackendUnavailable("g2d".into()))
1161                }
1162                ForcedBackend::OpenGl => {
1163                    #[cfg(target_os = "linux")]
1164                    #[cfg(feature = "opengl")]
1165                    if let Some(opengl) = self.opengl.as_mut() {
1166                        let r = opengl.convert(src, dst, rotation, flip, crop);
1167                        log::trace!(
1168                            "convert: forced=opengl result={} ({:?})",
1169                            if r.is_ok() { "ok" } else { "err" },
1170                            start.elapsed()
1171                        );
1172                        return r;
1173                    }
1174                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1175                }
1176            };
1177        }
1178
1179        // ── Auto fallback chain: OpenGL → G2D → CPU ──────────────────
1180        #[cfg(target_os = "linux")]
1181        #[cfg(feature = "opengl")]
1182        if let Some(opengl) = self.opengl.as_mut() {
1183            match opengl.convert(src, dst, rotation, flip, crop) {
1184                Ok(_) => {
1185                    log::trace!(
1186                        "convert: auto selected=opengl for {src_fmt:?}→{dst_fmt:?} ({:?})",
1187                        start.elapsed()
1188                    );
1189                    return Ok(());
1190                }
1191                Err(e) => {
1192                    log::trace!("convert: auto opengl declined {src_fmt:?}→{dst_fmt:?}: {e}");
1193                }
1194            }
1195        }
1196
1197        #[cfg(target_os = "linux")]
1198        if let Some(g2d) = self.g2d.as_mut() {
1199            match g2d.convert(src, dst, rotation, flip, crop) {
1200                Ok(_) => {
1201                    log::trace!(
1202                        "convert: auto selected=g2d for {src_fmt:?}→{dst_fmt:?} ({:?})",
1203                        start.elapsed()
1204                    );
1205                    return Ok(());
1206                }
1207                Err(e) => {
1208                    log::trace!("convert: auto g2d declined {src_fmt:?}→{dst_fmt:?}: {e}");
1209                }
1210            }
1211        }
1212
1213        if let Some(cpu) = self.cpu.as_mut() {
1214            match cpu.convert(src, dst, rotation, flip, crop) {
1215                Ok(_) => {
1216                    log::trace!(
1217                        "convert: auto selected=cpu for {src_fmt:?}→{dst_fmt:?} ({:?})",
1218                        start.elapsed()
1219                    );
1220                    return Ok(());
1221                }
1222                Err(e) => {
1223                    log::trace!("convert: auto cpu failed {src_fmt:?}→{dst_fmt:?}: {e}");
1224                    return Err(e);
1225                }
1226            }
1227        }
1228        Err(Error::NoConverter)
1229    }
1230
1231    fn draw_masks(
1232        &mut self,
1233        dst: &mut TensorDyn,
1234        detect: &[DetectBox],
1235        segmentation: &[Segmentation],
1236    ) -> Result<()> {
1237        let start = Instant::now();
1238
1239        if detect.is_empty() && segmentation.is_empty() {
1240            return Ok(());
1241        }
1242
1243        // ── Forced backend: no fallback chain ────────────────────────
1244        if let Some(forced) = self.forced_backend {
1245            return match forced {
1246                ForcedBackend::Cpu => {
1247                    if let Some(cpu) = self.cpu.as_mut() {
1248                        return cpu.draw_masks(dst, detect, segmentation);
1249                    }
1250                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1251                }
1252                ForcedBackend::G2d => Err(Error::NotSupported(
1253                    "g2d does not support draw_masks".into(),
1254                )),
1255                ForcedBackend::OpenGl => {
1256                    #[cfg(target_os = "linux")]
1257                    #[cfg(feature = "opengl")]
1258                    if let Some(opengl) = self.opengl.as_mut() {
1259                        return opengl.draw_masks(dst, detect, segmentation);
1260                    }
1261                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1262                }
1263            };
1264        }
1265
1266        // skip G2D as it doesn't support rendering to image
1267
1268        #[cfg(target_os = "linux")]
1269        #[cfg(feature = "opengl")]
1270        if let Some(opengl) = self.opengl.as_mut() {
1271            log::trace!("draw_masks started with opengl in {:?}", start.elapsed());
1272            match opengl.draw_masks(dst, detect, segmentation) {
1273                Ok(_) => {
1274                    log::trace!("draw_masks with opengl in {:?}", start.elapsed());
1275                    return Ok(());
1276                }
1277                Err(e) => {
1278                    log::trace!("draw_masks didn't work with opengl: {e:?}")
1279                }
1280            }
1281        }
1282        log::trace!("draw_masks started with cpu in {:?}", start.elapsed());
1283        if let Some(cpu) = self.cpu.as_mut() {
1284            match cpu.draw_masks(dst, detect, segmentation) {
1285                Ok(_) => {
1286                    log::trace!("draw_masks with cpu in {:?}", start.elapsed());
1287                    return Ok(());
1288                }
1289                Err(e) => {
1290                    log::trace!("draw_masks didn't work with cpu: {e:?}");
1291                    return Err(e);
1292                }
1293            }
1294        }
1295        Err(Error::NoConverter)
1296    }
1297
1298    fn draw_masks_proto(
1299        &mut self,
1300        dst: &mut TensorDyn,
1301        detect: &[DetectBox],
1302        proto_data: &ProtoData,
1303    ) -> Result<()> {
1304        let start = Instant::now();
1305
1306        if detect.is_empty() {
1307            return Ok(());
1308        }
1309
1310        // ── Forced backend: no fallback chain ────────────────────────
1311        if let Some(forced) = self.forced_backend {
1312            return match forced {
1313                ForcedBackend::Cpu => {
1314                    if let Some(cpu) = self.cpu.as_mut() {
1315                        return cpu.draw_masks_proto(dst, detect, proto_data);
1316                    }
1317                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1318                }
1319                ForcedBackend::G2d => Err(Error::NotSupported(
1320                    "g2d does not support draw_masks_proto".into(),
1321                )),
1322                ForcedBackend::OpenGl => {
1323                    #[cfg(target_os = "linux")]
1324                    #[cfg(feature = "opengl")]
1325                    if let Some(opengl) = self.opengl.as_mut() {
1326                        return opengl.draw_masks_proto(dst, detect, proto_data);
1327                    }
1328                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1329                }
1330            };
1331        }
1332
1333        // skip G2D as it doesn't support rendering to image
1334
1335        // Hybrid path: CPU materialize + GL overlay (benchmarked faster than
1336        // full-GPU draw_masks_proto on all tested platforms: 27× on imx8mp,
1337        // 4× on imx95, 2.5× on rpi5, 1.6× on x86).
1338        #[cfg(target_os = "linux")]
1339        #[cfg(feature = "opengl")]
1340        if let Some(opengl) = self.opengl.as_mut() {
1341            let Some(cpu) = self.cpu.as_ref() else {
1342                return Err(Error::Internal(
1343                    "draw_masks_proto requires CPU backend for hybrid path".into(),
1344                ));
1345            };
1346            log::trace!(
1347                "draw_masks_proto started with hybrid (cpu+opengl) in {:?}",
1348                start.elapsed()
1349            );
1350            let segmentation = cpu.materialize_segmentations(detect, proto_data)?;
1351            match opengl.draw_masks(dst, detect, &segmentation) {
1352                Ok(_) => {
1353                    log::trace!(
1354                        "draw_masks_proto with hybrid (cpu+opengl) in {:?}",
1355                        start.elapsed()
1356                    );
1357                    return Ok(());
1358                }
1359                Err(e) => {
1360                    log::trace!("draw_masks_proto hybrid path failed, falling back to cpu: {e:?}");
1361                }
1362            }
1363        }
1364
1365        // CPU-only fallback (no OpenGL, or hybrid GL overlay failed)
1366        let Some(cpu) = self.cpu.as_mut() else {
1367            return Err(Error::Internal(
1368                "draw_masks_proto requires CPU backend for fallback path".into(),
1369            ));
1370        };
1371        log::trace!("draw_masks_proto started with cpu in {:?}", start.elapsed());
1372        cpu.draw_masks_proto(dst, detect, proto_data)
1373    }
1374
1375    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()> {
1376        let start = Instant::now();
1377
1378        // ── Forced backend: no fallback chain ────────────────────────
1379        if let Some(forced) = self.forced_backend {
1380            return match forced {
1381                ForcedBackend::Cpu => {
1382                    if let Some(cpu) = self.cpu.as_mut() {
1383                        return cpu.set_class_colors(colors);
1384                    }
1385                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1386                }
1387                ForcedBackend::G2d => Err(Error::NotSupported(
1388                    "g2d does not support set_class_colors".into(),
1389                )),
1390                ForcedBackend::OpenGl => {
1391                    #[cfg(target_os = "linux")]
1392                    #[cfg(feature = "opengl")]
1393                    if let Some(opengl) = self.opengl.as_mut() {
1394                        return opengl.set_class_colors(colors);
1395                    }
1396                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1397                }
1398            };
1399        }
1400
1401        // skip G2D as it doesn't support rendering to image
1402
1403        #[cfg(target_os = "linux")]
1404        #[cfg(feature = "opengl")]
1405        if let Some(opengl) = self.opengl.as_mut() {
1406            log::trace!("image started with opengl in {:?}", start.elapsed());
1407            match opengl.set_class_colors(colors) {
1408                Ok(_) => {
1409                    log::trace!("colors set with opengl in {:?}", start.elapsed());
1410                    return Ok(());
1411                }
1412                Err(e) => {
1413                    log::trace!("colors didn't set with opengl: {e:?}")
1414                }
1415            }
1416        }
1417        log::trace!("image started with cpu in {:?}", start.elapsed());
1418        if let Some(cpu) = self.cpu.as_mut() {
1419            match cpu.set_class_colors(colors) {
1420                Ok(_) => {
1421                    log::trace!("colors set with cpu in {:?}", start.elapsed());
1422                    return Ok(());
1423                }
1424                Err(e) => {
1425                    log::trace!("colors didn't set with cpu: {e:?}");
1426                    return Err(e);
1427                }
1428            }
1429        }
1430        Err(Error::NoConverter)
1431    }
1432
1433    fn decode_masks_atlas(
1434        &mut self,
1435        detect: &[DetectBox],
1436        proto_data: ProtoData,
1437        output_width: usize,
1438        output_height: usize,
1439    ) -> Result<(Vec<u8>, Vec<MaskRegion>)> {
1440        if detect.is_empty() {
1441            return Ok((Vec::new(), Vec::new()));
1442        }
1443
1444        // ── Forced backend: no fallback chain ────────────────────────
1445        if let Some(forced) = self.forced_backend {
1446            return match forced {
1447                ForcedBackend::Cpu => {
1448                    if let Some(cpu) = self.cpu.as_mut() {
1449                        return cpu.decode_masks_atlas(
1450                            detect,
1451                            proto_data,
1452                            output_width,
1453                            output_height,
1454                        );
1455                    }
1456                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1457                }
1458                ForcedBackend::G2d => Err(Error::NotSupported(
1459                    "g2d does not support decode_masks_atlas".into(),
1460                )),
1461                ForcedBackend::OpenGl => {
1462                    #[cfg(target_os = "linux")]
1463                    #[cfg(feature = "opengl")]
1464                    if let Some(opengl) = self.opengl.as_mut() {
1465                        return opengl.decode_masks_atlas(
1466                            detect,
1467                            proto_data,
1468                            output_width,
1469                            output_height,
1470                        );
1471                    }
1472                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1473                }
1474            };
1475        }
1476
1477        #[cfg(target_os = "linux")]
1478        #[cfg(feature = "opengl")]
1479        {
1480            let has_opengl = self.opengl.is_some();
1481            if has_opengl {
1482                let opengl = self.opengl.as_mut().unwrap();
1483                match opengl.decode_masks_atlas(detect, proto_data, output_width, output_height) {
1484                    Ok(r) => return Ok(r),
1485                    Err(e) => {
1486                        log::trace!("decode_masks_atlas didn't work with opengl: {e:?}");
1487                        return Err(e);
1488                    }
1489                }
1490            }
1491        }
1492        // CPU fallback: render per-detection masks and pack into compact atlas
1493        if let Some(cpu) = self.cpu.as_mut() {
1494            return cpu.decode_masks_atlas(detect, proto_data, output_width, output_height);
1495        }
1496        Err(Error::NoConverter)
1497    }
1498}
1499
1500// ---------------------------------------------------------------------------
1501// Image loading / saving helpers
1502// ---------------------------------------------------------------------------
1503
1504/// Read EXIF orientation from raw EXIF bytes and return (Rotation, Flip).
1505fn read_exif_orientation(exif_bytes: &[u8]) -> (Rotation, Flip) {
1506    let exifreader = exif::Reader::new();
1507    let Ok(exif_) = exifreader.read_raw(exif_bytes.to_vec()) else {
1508        return (Rotation::None, Flip::None);
1509    };
1510    let Some(orientation) = exif_.get_field(exif::Tag::Orientation, exif::In::PRIMARY) else {
1511        return (Rotation::None, Flip::None);
1512    };
1513    match orientation.value.get_uint(0) {
1514        Some(1) => (Rotation::None, Flip::None),
1515        Some(2) => (Rotation::None, Flip::Horizontal),
1516        Some(3) => (Rotation::Rotate180, Flip::None),
1517        Some(4) => (Rotation::Rotate180, Flip::Horizontal),
1518        Some(5) => (Rotation::Clockwise90, Flip::Horizontal),
1519        Some(6) => (Rotation::Clockwise90, Flip::None),
1520        Some(7) => (Rotation::CounterClockwise90, Flip::Horizontal),
1521        Some(8) => (Rotation::CounterClockwise90, Flip::None),
1522        Some(v) => {
1523            log::warn!("broken orientation EXIF value: {v}");
1524            (Rotation::None, Flip::None)
1525        }
1526        None => (Rotation::None, Flip::None),
1527    }
1528}
1529
1530/// Map a [`PixelFormat`] to the zune-jpeg `ColorSpace` for decoding.
1531/// Returns `None` for formats that the JPEG decoder cannot output directly.
1532fn pixelfmt_to_colorspace(fmt: PixelFormat) -> Option<ColorSpace> {
1533    match fmt {
1534        PixelFormat::Rgb => Some(ColorSpace::RGB),
1535        PixelFormat::Rgba => Some(ColorSpace::RGBA),
1536        PixelFormat::Grey => Some(ColorSpace::Luma),
1537        _ => None,
1538    }
1539}
1540
1541/// Map a zune-jpeg `ColorSpace` to a [`PixelFormat`].
1542fn colorspace_to_pixelfmt(cs: ColorSpace) -> Option<PixelFormat> {
1543    match cs {
1544        ColorSpace::RGB => Some(PixelFormat::Rgb),
1545        ColorSpace::RGBA => Some(PixelFormat::Rgba),
1546        ColorSpace::Luma => Some(PixelFormat::Grey),
1547        _ => None,
1548    }
1549}
1550
1551/// Load a JPEG image from raw bytes and return a [`TensorDyn`].
1552fn load_jpeg(
1553    image: &[u8],
1554    format: Option<PixelFormat>,
1555    memory: Option<TensorMemory>,
1556) -> Result<TensorDyn> {
1557    let colour = match format {
1558        Some(f) => pixelfmt_to_colorspace(f)
1559            .ok_or_else(|| Error::NotSupported(format!("Unsupported image format {f:?}")))?,
1560        None => ColorSpace::RGB,
1561    };
1562    let options = DecoderOptions::default().jpeg_set_out_colorspace(colour);
1563    let mut decoder = JpegDecoder::new_with_options(image, options);
1564    decoder.decode_headers()?;
1565
1566    let image_info = decoder.info().ok_or(Error::Internal(
1567        "JPEG did not return decoded image info".to_string(),
1568    ))?;
1569
1570    let converted_cs = decoder
1571        .get_output_colorspace()
1572        .ok_or(Error::Internal("No output colorspace".to_string()))?;
1573
1574    let converted_fmt = colorspace_to_pixelfmt(converted_cs).ok_or(Error::NotSupported(
1575        "Unsupported JPEG decoder output".to_string(),
1576    ))?;
1577
1578    let dest_fmt = format.unwrap_or(converted_fmt);
1579
1580    let (rotation, flip) = decoder
1581        .exif()
1582        .map(|x| read_exif_orientation(x))
1583        .unwrap_or((Rotation::None, Flip::None));
1584
1585    let w = image_info.width as usize;
1586    let h = image_info.height as usize;
1587
1588    if (rotation, flip) == (Rotation::None, Flip::None) {
1589        let mut img = Tensor::<u8>::image(w, h, dest_fmt, memory)?;
1590
1591        if converted_fmt != dest_fmt {
1592            let tmp = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
1593            decoder.decode_into(&mut tmp.map()?)?;
1594            CPUProcessor::convert_format_pf(&tmp, &mut img, converted_fmt, dest_fmt)?;
1595            return Ok(TensorDyn::from(img));
1596        }
1597        decoder.decode_into(&mut img.map()?)?;
1598        return Ok(TensorDyn::from(img));
1599    }
1600
1601    let mut tmp = Tensor::<u8>::image(w, h, dest_fmt, Some(TensorMemory::Mem))?;
1602
1603    if converted_fmt != dest_fmt {
1604        let tmp2 = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
1605        decoder.decode_into(&mut tmp2.map()?)?;
1606        CPUProcessor::convert_format_pf(&tmp2, &mut tmp, converted_fmt, dest_fmt)?;
1607    } else {
1608        decoder.decode_into(&mut tmp.map()?)?;
1609    }
1610
1611    rotate_flip_to_dyn(&tmp, dest_fmt, rotation, flip, memory)
1612}
1613
1614/// Load a PNG image from raw bytes and return a [`TensorDyn`].
1615fn load_png(
1616    image: &[u8],
1617    format: Option<PixelFormat>,
1618    memory: Option<TensorMemory>,
1619) -> Result<TensorDyn> {
1620    let fmt = format.unwrap_or(PixelFormat::Rgb);
1621    let alpha = match fmt {
1622        PixelFormat::Rgb => false,
1623        PixelFormat::Rgba => true,
1624        _ => {
1625            return Err(Error::NotImplemented(
1626                "Unsupported image format".to_string(),
1627            ));
1628        }
1629    };
1630
1631    let options = DecoderOptions::default()
1632        .png_set_add_alpha_channel(alpha)
1633        .png_set_decode_animated(false);
1634    let mut decoder = PngDecoder::new_with_options(image, options);
1635    decoder.decode_headers()?;
1636    let image_info = decoder.get_info().ok_or(Error::Internal(
1637        "PNG did not return decoded image info".to_string(),
1638    ))?;
1639
1640    let (rotation, flip) = image_info
1641        .exif
1642        .as_ref()
1643        .map(|x| read_exif_orientation(x))
1644        .unwrap_or((Rotation::None, Flip::None));
1645
1646    if (rotation, flip) == (Rotation::None, Flip::None) {
1647        let img = Tensor::<u8>::image(image_info.width, image_info.height, fmt, memory)?;
1648        decoder.decode_into(&mut img.map()?)?;
1649        return Ok(TensorDyn::from(img));
1650    }
1651
1652    let tmp = Tensor::<u8>::image(
1653        image_info.width,
1654        image_info.height,
1655        fmt,
1656        Some(TensorMemory::Mem),
1657    )?;
1658    decoder.decode_into(&mut tmp.map()?)?;
1659
1660    rotate_flip_to_dyn(&tmp, fmt, rotation, flip, memory)
1661}
1662
1663/// Load an image from raw bytes (JPEG or PNG) and return a [`TensorDyn`].
1664///
1665/// The optional `format` specifies the desired output pixel format (e.g.,
1666/// [`PixelFormat::Rgb`], [`PixelFormat::Rgba`]); if `None`, the native
1667/// format of the file is used (typically RGB for JPEG).
1668///
1669/// # Examples
1670/// ```rust
1671/// use edgefirst_image::load_image;
1672/// use edgefirst_tensor::PixelFormat;
1673/// # fn main() -> Result<(), edgefirst_image::Error> {
1674/// let jpeg = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
1675/// let img = load_image(jpeg, Some(PixelFormat::Rgb), None)?;
1676/// assert_eq!(img.width(), Some(1280));
1677/// assert_eq!(img.height(), Some(720));
1678/// # Ok(())
1679/// # }
1680/// ```
1681pub fn load_image(
1682    image: &[u8],
1683    format: Option<PixelFormat>,
1684    memory: Option<TensorMemory>,
1685) -> Result<TensorDyn> {
1686    if let Ok(i) = load_jpeg(image, format, memory) {
1687        return Ok(i);
1688    }
1689    if let Ok(i) = load_png(image, format, memory) {
1690        return Ok(i);
1691    }
1692    Err(Error::NotSupported(
1693        "Could not decode as jpeg or png".to_string(),
1694    ))
1695}
1696
1697/// Save a [`TensorDyn`] image as a JPEG file.
1698///
1699/// Only packed RGB and RGBA formats are supported.
1700pub fn save_jpeg(tensor: &TensorDyn, path: impl AsRef<std::path::Path>, quality: u8) -> Result<()> {
1701    let t = tensor.as_u8().ok_or(Error::UnsupportedFormat(
1702        "save_jpeg requires u8 tensor".to_string(),
1703    ))?;
1704    let fmt = t.format().ok_or(Error::NotAnImage)?;
1705    if fmt.layout() != PixelLayout::Packed {
1706        return Err(Error::NotImplemented(
1707            "Saving planar images is not supported".to_string(),
1708        ));
1709    }
1710
1711    let colour = match fmt {
1712        PixelFormat::Rgb => jpeg_encoder::ColorType::Rgb,
1713        PixelFormat::Rgba => jpeg_encoder::ColorType::Rgba,
1714        _ => {
1715            return Err(Error::NotImplemented(
1716                "Unsupported image format for saving".to_string(),
1717            ));
1718        }
1719    };
1720
1721    let w = t.width().ok_or(Error::NotAnImage)?;
1722    let h = t.height().ok_or(Error::NotAnImage)?;
1723    let encoder = jpeg_encoder::Encoder::new_file(path, quality)?;
1724    let tensor_map = t.map()?;
1725
1726    encoder.encode(&tensor_map, w as u16, h as u16, colour)?;
1727
1728    Ok(())
1729}
1730
1731pub(crate) struct FunctionTimer<T: Display> {
1732    name: T,
1733    start: std::time::Instant,
1734}
1735
1736impl<T: Display> FunctionTimer<T> {
1737    pub fn new(name: T) -> Self {
1738        Self {
1739            name,
1740            start: std::time::Instant::now(),
1741        }
1742    }
1743}
1744
1745impl<T: Display> Drop for FunctionTimer<T> {
1746    fn drop(&mut self) {
1747        log::trace!("{} elapsed: {:?}", self.name, self.start.elapsed())
1748    }
1749}
1750
1751const DEFAULT_COLORS: [[f32; 4]; 20] = [
1752    [0., 1., 0., 0.7],
1753    [1., 0.5568628, 0., 0.7],
1754    [0.25882353, 0.15294118, 0.13333333, 0.7],
1755    [0.8, 0.7647059, 0.78039216, 0.7],
1756    [0.3137255, 0.3137255, 0.3137255, 0.7],
1757    [0.1411765, 0.3098039, 0.1215686, 0.7],
1758    [1., 0.95686275, 0.5137255, 0.7],
1759    [0.3529412, 0.32156863, 0., 0.7],
1760    [0.4235294, 0.6235294, 0.6509804, 0.7],
1761    [0.5098039, 0.5098039, 0.7294118, 0.7],
1762    [0.00784314, 0.18823529, 0.29411765, 0.7],
1763    [0.0, 0.2706, 1.0, 0.7],
1764    [0.0, 0.0, 0.0, 0.7],
1765    [0.0, 0.5, 0.0, 0.7],
1766    [1.0, 0.0, 0.0, 0.7],
1767    [0.0, 0.0, 1.0, 0.7],
1768    [1.0, 0.5, 0.5, 0.7],
1769    [0.1333, 0.5451, 0.1333, 0.7],
1770    [0.1176, 0.4118, 0.8235, 0.7],
1771    [1., 1., 1., 0.7],
1772];
1773
1774const fn denorm<const M: usize, const N: usize>(a: [[f32; M]; N]) -> [[u8; M]; N] {
1775    let mut result = [[0; M]; N];
1776    let mut i = 0;
1777    while i < N {
1778        let mut j = 0;
1779        while j < M {
1780            result[i][j] = (a[i][j] * 255.0).round() as u8;
1781            j += 1;
1782        }
1783        i += 1;
1784    }
1785    result
1786}
1787
1788const DEFAULT_COLORS_U8: [[u8; 4]; 20] = denorm(DEFAULT_COLORS);
1789
1790#[cfg(test)]
1791#[cfg_attr(coverage_nightly, coverage(off))]
1792mod image_tests {
1793    use super::*;
1794    use crate::{CPUProcessor, Rotation};
1795    #[cfg(target_os = "linux")]
1796    use edgefirst_tensor::is_dma_available;
1797    use edgefirst_tensor::{TensorMapTrait, TensorMemory, TensorTrait};
1798    use image::buffer::ConvertBuffer;
1799
1800    /// Test helper: call `ImageProcessorTrait::convert()` on two `TensorDyn`s
1801    /// by going through the `TensorDyn` API.
1802    ///
1803    /// Returns the `(src_image, dst_image)` reconstructed from the TensorDyn
1804    /// round-trip so the caller can feed them to `compare_images` etc.
1805    fn convert_img(
1806        proc: &mut dyn ImageProcessorTrait,
1807        src: TensorDyn,
1808        dst: TensorDyn,
1809        rotation: Rotation,
1810        flip: Flip,
1811        crop: Crop,
1812    ) -> (Result<()>, TensorDyn, TensorDyn) {
1813        let src_fourcc = src.format().unwrap();
1814        let dst_fourcc = dst.format().unwrap();
1815        let src_dyn = src;
1816        let mut dst_dyn = dst;
1817        let result = proc.convert(&src_dyn, &mut dst_dyn, rotation, flip, crop);
1818        let src_back = {
1819            let mut __t = src_dyn.into_u8().unwrap();
1820            __t.set_format(src_fourcc).unwrap();
1821            TensorDyn::from(__t)
1822        };
1823        let dst_back = {
1824            let mut __t = dst_dyn.into_u8().unwrap();
1825            __t.set_format(dst_fourcc).unwrap();
1826            TensorDyn::from(__t)
1827        };
1828        (result, src_back, dst_back)
1829    }
1830
1831    #[ctor::ctor]
1832    fn init() {
1833        env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
1834    }
1835
1836    macro_rules! function {
1837        () => {{
1838            fn f() {}
1839            fn type_name_of<T>(_: T) -> &'static str {
1840                std::any::type_name::<T>()
1841            }
1842            let name = type_name_of(f);
1843
1844            // Find and cut the rest of the path
1845            match &name[..name.len() - 3].rfind(':') {
1846                Some(pos) => &name[pos + 1..name.len() - 3],
1847                None => &name[..name.len() - 3],
1848            }
1849        }};
1850    }
1851
1852    #[test]
1853    fn test_invalid_crop() {
1854        let src = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
1855        let dst = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
1856
1857        let crop = Crop::new()
1858            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
1859            .with_dst_rect(Some(Rect::new(0, 0, 150, 150)));
1860
1861        let result = crop.check_crop_dyn(&src, &dst);
1862        assert!(matches!(
1863            result,
1864            Err(Error::CropInvalid(e)) if e.starts_with("Dest and Src crop invalid")
1865        ));
1866
1867        let crop = crop.with_src_rect(Some(Rect::new(0, 0, 10, 10)));
1868        let result = crop.check_crop_dyn(&src, &dst);
1869        assert!(matches!(
1870            result,
1871            Err(Error::CropInvalid(e)) if e.starts_with("Dest crop invalid")
1872        ));
1873
1874        let crop = crop
1875            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
1876            .with_dst_rect(Some(Rect::new(0, 0, 50, 50)));
1877        let result = crop.check_crop_dyn(&src, &dst);
1878        assert!(matches!(
1879            result,
1880            Err(Error::CropInvalid(e)) if e.starts_with("Src crop invalid")
1881        ));
1882
1883        let crop = crop.with_src_rect(Some(Rect::new(50, 50, 50, 50)));
1884
1885        let result = crop.check_crop_dyn(&src, &dst);
1886        assert!(result.is_ok());
1887    }
1888
1889    #[test]
1890    fn test_invalid_tensor_format() -> Result<(), Error> {
1891        // 4D tensor cannot be set to a 3-channel pixel format
1892        let mut tensor = Tensor::<u8>::new(&[720, 1280, 4, 1], None, None)?;
1893        let result = tensor.set_format(PixelFormat::Rgb);
1894        assert!(result.is_err(), "4D tensor should reject set_format");
1895
1896        // Tensor with wrong channel count for the format
1897        let mut tensor = Tensor::<u8>::new(&[720, 1280, 4], None, None)?;
1898        let result = tensor.set_format(PixelFormat::Rgb);
1899        assert!(result.is_err(), "4-channel tensor should reject RGB format");
1900
1901        Ok(())
1902    }
1903
1904    #[test]
1905    fn test_invalid_image_file() -> Result<(), Error> {
1906        let result = crate::load_image(&[123; 5000], None, None);
1907        assert!(matches!(
1908            result,
1909            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
1910
1911        Ok(())
1912    }
1913
1914    #[test]
1915    fn test_invalid_jpeg_format() -> Result<(), Error> {
1916        let result = crate::load_image(&[123; 5000], Some(PixelFormat::Yuyv), None);
1917        assert!(matches!(
1918            result,
1919            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
1920
1921        Ok(())
1922    }
1923
1924    #[test]
1925    fn test_load_resize_save() {
1926        let file = include_bytes!(concat!(
1927            env!("CARGO_MANIFEST_DIR"),
1928            "/../../testdata/zidane.jpg"
1929        ));
1930        let img = crate::load_image(file, Some(PixelFormat::Rgba), None).unwrap();
1931        assert_eq!(img.width(), Some(1280));
1932        assert_eq!(img.height(), Some(720));
1933
1934        let dst = TensorDyn::image(640, 360, PixelFormat::Rgba, DType::U8, None).unwrap();
1935        let mut converter = CPUProcessor::new();
1936        let (result, _img, dst) = convert_img(
1937            &mut converter,
1938            img,
1939            dst,
1940            Rotation::None,
1941            Flip::None,
1942            Crop::no_crop(),
1943        );
1944        result.unwrap();
1945        assert_eq!(dst.width(), Some(640));
1946        assert_eq!(dst.height(), Some(360));
1947
1948        crate::save_jpeg(&dst, "zidane_resized.jpg", 80).unwrap();
1949
1950        let file = std::fs::read("zidane_resized.jpg").unwrap();
1951        let img = crate::load_image(&file, None, None).unwrap();
1952        assert_eq!(img.width(), Some(640));
1953        assert_eq!(img.height(), Some(360));
1954        assert_eq!(img.format().unwrap(), PixelFormat::Rgb);
1955    }
1956
1957    #[test]
1958    fn test_from_tensor_planar() -> Result<(), Error> {
1959        let mut tensor = Tensor::new(&[3, 720, 1280], None, None)?;
1960        tensor.map()?.copy_from_slice(include_bytes!(concat!(
1961            env!("CARGO_MANIFEST_DIR"),
1962            "/../../testdata/camera720p.8bps"
1963        )));
1964        let planar = {
1965            tensor
1966                .set_format(PixelFormat::PlanarRgb)
1967                .map_err(|e| crate::Error::Internal(e.to_string()))?;
1968            TensorDyn::from(tensor)
1969        };
1970
1971        let rbga = load_bytes_to_tensor(
1972            1280,
1973            720,
1974            PixelFormat::Rgba,
1975            None,
1976            include_bytes!(concat!(
1977                env!("CARGO_MANIFEST_DIR"),
1978                "/../../testdata/camera720p.rgba"
1979            )),
1980        )?;
1981        compare_images_convert_to_rgb(&planar, &rbga, 0.98, function!());
1982
1983        Ok(())
1984    }
1985
1986    #[test]
1987    fn test_from_tensor_invalid_format() {
1988        // PixelFormat::from_fourcc_str returns None for unknown FourCC codes.
1989        // Since there's no "TEST" pixel format, this validates graceful handling.
1990        assert!(PixelFormat::from_fourcc(u32::from_le_bytes(*b"TEST")).is_none());
1991    }
1992
1993    #[test]
1994    #[should_panic(expected = "Failed to save planar RGB image")]
1995    fn test_save_planar() {
1996        let planar_img = load_bytes_to_tensor(
1997            1280,
1998            720,
1999            PixelFormat::PlanarRgb,
2000            None,
2001            include_bytes!(concat!(
2002                env!("CARGO_MANIFEST_DIR"),
2003                "/../../testdata/camera720p.8bps"
2004            )),
2005        )
2006        .unwrap();
2007
2008        let save_path = "/tmp/planar_rgb.jpg";
2009        crate::save_jpeg(&planar_img, save_path, 90).expect("Failed to save planar RGB image");
2010    }
2011
2012    #[test]
2013    #[should_panic(expected = "Failed to save YUYV image")]
2014    fn test_save_yuyv() {
2015        let planar_img = load_bytes_to_tensor(
2016            1280,
2017            720,
2018            PixelFormat::Yuyv,
2019            None,
2020            include_bytes!(concat!(
2021                env!("CARGO_MANIFEST_DIR"),
2022                "/../../testdata/camera720p.yuyv"
2023            )),
2024        )
2025        .unwrap();
2026
2027        let save_path = "/tmp/yuyv.jpg";
2028        crate::save_jpeg(&planar_img, save_path, 90).expect("Failed to save YUYV image");
2029    }
2030
2031    #[test]
2032    fn test_rotation_angle() {
2033        assert_eq!(Rotation::from_degrees_clockwise(0), Rotation::None);
2034        assert_eq!(Rotation::from_degrees_clockwise(90), Rotation::Clockwise90);
2035        assert_eq!(Rotation::from_degrees_clockwise(180), Rotation::Rotate180);
2036        assert_eq!(
2037            Rotation::from_degrees_clockwise(270),
2038            Rotation::CounterClockwise90
2039        );
2040        assert_eq!(Rotation::from_degrees_clockwise(360), Rotation::None);
2041        assert_eq!(Rotation::from_degrees_clockwise(450), Rotation::Clockwise90);
2042        assert_eq!(Rotation::from_degrees_clockwise(540), Rotation::Rotate180);
2043        assert_eq!(
2044            Rotation::from_degrees_clockwise(630),
2045            Rotation::CounterClockwise90
2046        );
2047    }
2048
2049    #[test]
2050    #[should_panic(expected = "rotation angle is not a multiple of 90")]
2051    fn test_rotation_angle_panic() {
2052        Rotation::from_degrees_clockwise(361);
2053    }
2054
2055    #[test]
2056    fn test_disable_env_var() -> Result<(), Error> {
2057        #[cfg(target_os = "linux")]
2058        {
2059            let original = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2060            unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2061            let converter = ImageProcessor::new()?;
2062            match original {
2063                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2064                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2065            }
2066            assert!(converter.g2d.is_none());
2067        }
2068
2069        #[cfg(target_os = "linux")]
2070        #[cfg(feature = "opengl")]
2071        {
2072            let original = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2073            unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2074            let converter = ImageProcessor::new()?;
2075            match original {
2076                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2077                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2078            }
2079            assert!(converter.opengl.is_none());
2080        }
2081
2082        let original = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2083        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2084        let converter = ImageProcessor::new()?;
2085        match original {
2086            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2087            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2088        }
2089        assert!(converter.cpu.is_none());
2090
2091        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2092        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2093        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2094        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2095        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2096        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2097        let mut converter = ImageProcessor::new()?;
2098
2099        let src = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None)?;
2100        let dst = TensorDyn::image(640, 360, PixelFormat::Rgba, DType::U8, None)?;
2101        let (result, _src, _dst) = convert_img(
2102            &mut converter,
2103            src,
2104            dst,
2105            Rotation::None,
2106            Flip::None,
2107            Crop::no_crop(),
2108        );
2109        assert!(matches!(result, Err(Error::NoConverter)));
2110
2111        match original_cpu {
2112            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2113            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2114        }
2115        match original_gl {
2116            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2117            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2118        }
2119        match original_g2d {
2120            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2121            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2122        }
2123
2124        Ok(())
2125    }
2126
2127    #[test]
2128    fn test_unsupported_conversion() {
2129        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
2130        let dst = TensorDyn::image(640, 360, PixelFormat::Nv12, DType::U8, None).unwrap();
2131        let mut converter = ImageProcessor::new().unwrap();
2132        let (result, _src, _dst) = convert_img(
2133            &mut converter,
2134            src,
2135            dst,
2136            Rotation::None,
2137            Flip::None,
2138            Crop::no_crop(),
2139        );
2140        log::debug!("result: {:?}", result);
2141        assert!(matches!(
2142            result,
2143            Err(Error::NotSupported(e)) if e.starts_with("Conversion from NV12 to NV12")
2144        ));
2145    }
2146
2147    #[test]
2148    fn test_load_grey() {
2149        let grey_img = crate::load_image(
2150            include_bytes!(concat!(
2151                env!("CARGO_MANIFEST_DIR"),
2152                "/../../testdata/grey.jpg"
2153            )),
2154            Some(PixelFormat::Rgba),
2155            None,
2156        )
2157        .unwrap();
2158
2159        let grey_but_rgb_img = crate::load_image(
2160            include_bytes!(concat!(
2161                env!("CARGO_MANIFEST_DIR"),
2162                "/../../testdata/grey-rgb.jpg"
2163            )),
2164            Some(PixelFormat::Rgba),
2165            None,
2166        )
2167        .unwrap();
2168
2169        compare_images(&grey_img, &grey_but_rgb_img, 0.99, function!());
2170    }
2171
2172    #[test]
2173    fn test_new_nv12() {
2174        let nv12 = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
2175        assert_eq!(nv12.height(), Some(720));
2176        assert_eq!(nv12.width(), Some(1280));
2177        assert_eq!(nv12.format().unwrap(), PixelFormat::Nv12);
2178        // PixelFormat::Nv12.channels() returns 1 (luma plane channel count)
2179        assert_eq!(nv12.format().unwrap().channels(), 1);
2180        assert!(nv12.format().is_some_and(
2181            |f| f.layout() == PixelLayout::Planar || f.layout() == PixelLayout::SemiPlanar
2182        ))
2183    }
2184
2185    #[test]
2186    #[cfg(target_os = "linux")]
2187    fn test_new_image_converter() {
2188        let dst_width = 640;
2189        let dst_height = 360;
2190        let file = include_bytes!(concat!(
2191            env!("CARGO_MANIFEST_DIR"),
2192            "/../../testdata/zidane.jpg"
2193        ))
2194        .to_vec();
2195        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2196
2197        let mut converter = ImageProcessor::new().unwrap();
2198        let converter_dst = converter
2199            .create_image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None)
2200            .unwrap();
2201        let (result, src, converter_dst) = convert_img(
2202            &mut converter,
2203            src,
2204            converter_dst,
2205            Rotation::None,
2206            Flip::None,
2207            Crop::no_crop(),
2208        );
2209        result.unwrap();
2210
2211        let cpu_dst =
2212            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2213        let mut cpu_converter = CPUProcessor::new();
2214        let (result, _src, cpu_dst) = convert_img(
2215            &mut cpu_converter,
2216            src,
2217            cpu_dst,
2218            Rotation::None,
2219            Flip::None,
2220            Crop::no_crop(),
2221        );
2222        result.unwrap();
2223
2224        compare_images(&converter_dst, &cpu_dst, 0.98, function!());
2225    }
2226
2227    #[test]
2228    #[cfg(target_os = "linux")]
2229    fn test_create_image_dtype_i8() {
2230        let mut converter = ImageProcessor::new().unwrap();
2231
2232        // I8 image should allocate successfully via create_image
2233        let dst = converter
2234            .create_image(320, 240, PixelFormat::Rgb, DType::I8, None)
2235            .unwrap();
2236        assert_eq!(dst.dtype(), DType::I8);
2237        assert!(dst.width() == Some(320));
2238        assert!(dst.height() == Some(240));
2239        assert_eq!(dst.format(), Some(PixelFormat::Rgb));
2240
2241        // U8 for comparison
2242        let dst_u8 = converter
2243            .create_image(320, 240, PixelFormat::Rgb, DType::U8, None)
2244            .unwrap();
2245        assert_eq!(dst_u8.dtype(), DType::U8);
2246
2247        // Convert into I8 dst should succeed
2248        let file = include_bytes!(concat!(
2249            env!("CARGO_MANIFEST_DIR"),
2250            "/../../testdata/zidane.jpg"
2251        ))
2252        .to_vec();
2253        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2254        let mut dst_i8 = converter
2255            .create_image(320, 240, PixelFormat::Rgb, DType::I8, None)
2256            .unwrap();
2257        converter
2258            .convert(
2259                &src,
2260                &mut dst_i8,
2261                Rotation::None,
2262                Flip::None,
2263                Crop::no_crop(),
2264            )
2265            .unwrap();
2266    }
2267
2268    #[test]
2269    #[ignore] // Hangs on desktop platforms where DMA-buf is unavailable and PBO
2270              // fallback triggers a GPU driver hang during SHM→texture upload (e.g.,
2271              // NVIDIA without /dev/dma_heap permissions). Works on embedded targets.
2272    fn test_crop_skip() {
2273        let file = include_bytes!(concat!(
2274            env!("CARGO_MANIFEST_DIR"),
2275            "/../../testdata/zidane.jpg"
2276        ))
2277        .to_vec();
2278        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2279
2280        let mut converter = ImageProcessor::new().unwrap();
2281        let converter_dst = converter
2282            .create_image(1280, 720, PixelFormat::Rgba, DType::U8, None)
2283            .unwrap();
2284        let crop = Crop::new()
2285            .with_src_rect(Some(Rect::new(0, 0, 640, 640)))
2286            .with_dst_rect(Some(Rect::new(0, 0, 640, 640)));
2287        let (result, src, converter_dst) = convert_img(
2288            &mut converter,
2289            src,
2290            converter_dst,
2291            Rotation::None,
2292            Flip::None,
2293            crop,
2294        );
2295        result.unwrap();
2296
2297        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
2298        let mut cpu_converter = CPUProcessor::new();
2299        let (result, _src, cpu_dst) = convert_img(
2300            &mut cpu_converter,
2301            src,
2302            cpu_dst,
2303            Rotation::None,
2304            Flip::None,
2305            crop,
2306        );
2307        result.unwrap();
2308
2309        compare_images(&converter_dst, &cpu_dst, 0.99999, function!());
2310    }
2311
2312    #[test]
2313    fn test_invalid_pixel_format() {
2314        // PixelFormat::from_fourcc returns None for unknown formats,
2315        // so TensorDyn::image cannot be called with an invalid format.
2316        assert!(PixelFormat::from_fourcc(u32::from_le_bytes(*b"TEST")).is_none());
2317    }
2318
2319    // Helper function to check if G2D library is available (Linux/i.MX8 only)
2320    #[cfg(target_os = "linux")]
2321    static G2D_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2322
2323    #[cfg(target_os = "linux")]
2324    fn is_g2d_available() -> bool {
2325        *G2D_AVAILABLE.get_or_init(|| G2DProcessor::new().is_ok())
2326    }
2327
2328    #[cfg(target_os = "linux")]
2329    #[cfg(feature = "opengl")]
2330    static GL_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2331
2332    #[cfg(target_os = "linux")]
2333    #[cfg(feature = "opengl")]
2334    // Helper function to check if OpenGL is available
2335    fn is_opengl_available() -> bool {
2336        #[cfg(all(target_os = "linux", feature = "opengl"))]
2337        {
2338            *GL_AVAILABLE.get_or_init(|| GLProcessorThreaded::new(None).is_ok())
2339        }
2340
2341        #[cfg(not(all(target_os = "linux", feature = "opengl")))]
2342        {
2343            false
2344        }
2345    }
2346
2347    #[test]
2348    fn test_load_jpeg_with_exif() {
2349        let file = include_bytes!(concat!(
2350            env!("CARGO_MANIFEST_DIR"),
2351            "/../../testdata/zidane_rotated_exif.jpg"
2352        ))
2353        .to_vec();
2354        let loaded = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2355
2356        assert_eq!(loaded.height(), Some(1280));
2357        assert_eq!(loaded.width(), Some(720));
2358
2359        let file = include_bytes!(concat!(
2360            env!("CARGO_MANIFEST_DIR"),
2361            "/../../testdata/zidane.jpg"
2362        ))
2363        .to_vec();
2364        let cpu_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2365
2366        let (dst_width, dst_height) = (cpu_src.height().unwrap(), cpu_src.width().unwrap());
2367
2368        let cpu_dst =
2369            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2370        let mut cpu_converter = CPUProcessor::new();
2371
2372        let (result, _cpu_src, cpu_dst) = convert_img(
2373            &mut cpu_converter,
2374            cpu_src,
2375            cpu_dst,
2376            Rotation::Clockwise90,
2377            Flip::None,
2378            Crop::no_crop(),
2379        );
2380        result.unwrap();
2381
2382        compare_images(&loaded, &cpu_dst, 0.98, function!());
2383    }
2384
2385    #[test]
2386    fn test_load_png_with_exif() {
2387        let file = include_bytes!(concat!(
2388            env!("CARGO_MANIFEST_DIR"),
2389            "/../../testdata/zidane_rotated_exif_180.png"
2390        ))
2391        .to_vec();
2392        let loaded = crate::load_png(&file, Some(PixelFormat::Rgba), None).unwrap();
2393
2394        assert_eq!(loaded.height(), Some(720));
2395        assert_eq!(loaded.width(), Some(1280));
2396
2397        let file = include_bytes!(concat!(
2398            env!("CARGO_MANIFEST_DIR"),
2399            "/../../testdata/zidane.jpg"
2400        ))
2401        .to_vec();
2402        let cpu_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2403
2404        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
2405        let mut cpu_converter = CPUProcessor::new();
2406
2407        let (result, _cpu_src, cpu_dst) = convert_img(
2408            &mut cpu_converter,
2409            cpu_src,
2410            cpu_dst,
2411            Rotation::Rotate180,
2412            Flip::None,
2413            Crop::no_crop(),
2414        );
2415        result.unwrap();
2416
2417        compare_images(&loaded, &cpu_dst, 0.98, function!());
2418    }
2419
2420    #[test]
2421    #[cfg(target_os = "linux")]
2422    fn test_g2d_resize() {
2423        if !is_g2d_available() {
2424            eprintln!("SKIPPED: test_g2d_resize - G2D library (libg2d.so.2) not available");
2425            return;
2426        }
2427        if !is_dma_available() {
2428            eprintln!(
2429                "SKIPPED: test_g2d_resize - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2430            );
2431            return;
2432        }
2433
2434        let dst_width = 640;
2435        let dst_height = 360;
2436        let file = include_bytes!(concat!(
2437            env!("CARGO_MANIFEST_DIR"),
2438            "/../../testdata/zidane.jpg"
2439        ))
2440        .to_vec();
2441        let src =
2442            crate::load_image(&file, Some(PixelFormat::Rgba), Some(TensorMemory::Dma)).unwrap();
2443
2444        let g2d_dst = TensorDyn::image(
2445            dst_width,
2446            dst_height,
2447            PixelFormat::Rgba,
2448            DType::U8,
2449            Some(TensorMemory::Dma),
2450        )
2451        .unwrap();
2452        let mut g2d_converter = G2DProcessor::new().unwrap();
2453        let (result, src, g2d_dst) = convert_img(
2454            &mut g2d_converter,
2455            src,
2456            g2d_dst,
2457            Rotation::None,
2458            Flip::None,
2459            Crop::no_crop(),
2460        );
2461        result.unwrap();
2462
2463        let cpu_dst =
2464            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2465        let mut cpu_converter = CPUProcessor::new();
2466        let (result, _src, cpu_dst) = convert_img(
2467            &mut cpu_converter,
2468            src,
2469            cpu_dst,
2470            Rotation::None,
2471            Flip::None,
2472            Crop::no_crop(),
2473        );
2474        result.unwrap();
2475
2476        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2477    }
2478
2479    #[test]
2480    #[cfg(target_os = "linux")]
2481    #[cfg(feature = "opengl")]
2482    fn test_opengl_resize() {
2483        if !is_opengl_available() {
2484            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2485            return;
2486        }
2487
2488        let dst_width = 640;
2489        let dst_height = 360;
2490        let file = include_bytes!(concat!(
2491            env!("CARGO_MANIFEST_DIR"),
2492            "/../../testdata/zidane.jpg"
2493        ))
2494        .to_vec();
2495        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2496
2497        let cpu_dst =
2498            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2499        let mut cpu_converter = CPUProcessor::new();
2500        let (result, src, cpu_dst) = convert_img(
2501            &mut cpu_converter,
2502            src,
2503            cpu_dst,
2504            Rotation::None,
2505            Flip::None,
2506            Crop::no_crop(),
2507        );
2508        result.unwrap();
2509
2510        let mut src = src;
2511        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2512
2513        for _ in 0..5 {
2514            let gl_dst =
2515                TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None)
2516                    .unwrap();
2517            let (result, src_back, gl_dst) = convert_img(
2518                &mut gl_converter,
2519                src,
2520                gl_dst,
2521                Rotation::None,
2522                Flip::None,
2523                Crop::no_crop(),
2524            );
2525            result.unwrap();
2526            src = src_back;
2527
2528            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2529        }
2530    }
2531
2532    #[test]
2533    #[ignore] // Vivante GPU hangs with concurrent EGL contexts on i.MX8MP
2534    #[cfg(target_os = "linux")]
2535    #[cfg(feature = "opengl")]
2536    fn test_opengl_10_threads() {
2537        if !is_opengl_available() {
2538            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2539            return;
2540        }
2541
2542        let handles: Vec<_> = (0..10)
2543            .map(|i| {
2544                std::thread::Builder::new()
2545                    .name(format!("Thread {i}"))
2546                    .spawn(test_opengl_resize)
2547                    .unwrap()
2548            })
2549            .collect();
2550        handles.into_iter().for_each(|h| {
2551            if let Err(e) = h.join() {
2552                std::panic::resume_unwind(e)
2553            }
2554        });
2555    }
2556
2557    #[test]
2558    #[cfg(target_os = "linux")]
2559    #[cfg(feature = "opengl")]
2560    fn test_opengl_grey() {
2561        if !is_opengl_available() {
2562            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2563            return;
2564        }
2565
2566        let img = crate::load_image(
2567            include_bytes!(concat!(
2568                env!("CARGO_MANIFEST_DIR"),
2569                "/../../testdata/grey.jpg"
2570            )),
2571            Some(PixelFormat::Grey),
2572            None,
2573        )
2574        .unwrap();
2575
2576        let gl_dst = TensorDyn::image(640, 640, PixelFormat::Grey, DType::U8, None).unwrap();
2577        let cpu_dst = TensorDyn::image(640, 640, PixelFormat::Grey, DType::U8, None).unwrap();
2578
2579        let mut converter = CPUProcessor::new();
2580
2581        let (result, img, cpu_dst) = convert_img(
2582            &mut converter,
2583            img,
2584            cpu_dst,
2585            Rotation::None,
2586            Flip::None,
2587            Crop::no_crop(),
2588        );
2589        result.unwrap();
2590
2591        let mut gl = GLProcessorThreaded::new(None).unwrap();
2592        let (result, _img, gl_dst) = convert_img(
2593            &mut gl,
2594            img,
2595            gl_dst,
2596            Rotation::None,
2597            Flip::None,
2598            Crop::no_crop(),
2599        );
2600        result.unwrap();
2601
2602        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2603    }
2604
2605    #[test]
2606    #[cfg(target_os = "linux")]
2607    fn test_g2d_src_crop() {
2608        if !is_g2d_available() {
2609            eprintln!("SKIPPED: test_g2d_src_crop - G2D library (libg2d.so.2) not available");
2610            return;
2611        }
2612        if !is_dma_available() {
2613            eprintln!(
2614                "SKIPPED: test_g2d_src_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2615            );
2616            return;
2617        }
2618
2619        let dst_width = 640;
2620        let dst_height = 640;
2621        let file = include_bytes!(concat!(
2622            env!("CARGO_MANIFEST_DIR"),
2623            "/../../testdata/zidane.jpg"
2624        ))
2625        .to_vec();
2626        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2627
2628        let cpu_dst =
2629            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2630        let mut cpu_converter = CPUProcessor::new();
2631        let crop = Crop {
2632            src_rect: Some(Rect {
2633                left: 0,
2634                top: 0,
2635                width: 640,
2636                height: 360,
2637            }),
2638            dst_rect: None,
2639            dst_color: None,
2640        };
2641        let (result, src, cpu_dst) = convert_img(
2642            &mut cpu_converter,
2643            src,
2644            cpu_dst,
2645            Rotation::None,
2646            Flip::None,
2647            crop,
2648        );
2649        result.unwrap();
2650
2651        let g2d_dst =
2652            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2653        let mut g2d_converter = G2DProcessor::new().unwrap();
2654        let (result, _src, g2d_dst) = convert_img(
2655            &mut g2d_converter,
2656            src,
2657            g2d_dst,
2658            Rotation::None,
2659            Flip::None,
2660            crop,
2661        );
2662        result.unwrap();
2663
2664        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2665    }
2666
2667    #[test]
2668    #[cfg(target_os = "linux")]
2669    fn test_g2d_dst_crop() {
2670        if !is_g2d_available() {
2671            eprintln!("SKIPPED: test_g2d_dst_crop - G2D library (libg2d.so.2) not available");
2672            return;
2673        }
2674        if !is_dma_available() {
2675            eprintln!(
2676                "SKIPPED: test_g2d_dst_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2677            );
2678            return;
2679        }
2680
2681        let dst_width = 640;
2682        let dst_height = 640;
2683        let file = include_bytes!(concat!(
2684            env!("CARGO_MANIFEST_DIR"),
2685            "/../../testdata/zidane.jpg"
2686        ))
2687        .to_vec();
2688        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2689
2690        let cpu_dst =
2691            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2692        let mut cpu_converter = CPUProcessor::new();
2693        let crop = Crop {
2694            src_rect: None,
2695            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2696            dst_color: None,
2697        };
2698        let (result, src, cpu_dst) = convert_img(
2699            &mut cpu_converter,
2700            src,
2701            cpu_dst,
2702            Rotation::None,
2703            Flip::None,
2704            crop,
2705        );
2706        result.unwrap();
2707
2708        let g2d_dst =
2709            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2710        let mut g2d_converter = G2DProcessor::new().unwrap();
2711        let (result, _src, g2d_dst) = convert_img(
2712            &mut g2d_converter,
2713            src,
2714            g2d_dst,
2715            Rotation::None,
2716            Flip::None,
2717            crop,
2718        );
2719        result.unwrap();
2720
2721        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2722    }
2723
2724    #[test]
2725    #[cfg(target_os = "linux")]
2726    fn test_g2d_all_rgba() {
2727        if !is_g2d_available() {
2728            eprintln!("SKIPPED: test_g2d_all_rgba - G2D library (libg2d.so.2) not available");
2729            return;
2730        }
2731        if !is_dma_available() {
2732            eprintln!(
2733                "SKIPPED: test_g2d_all_rgba - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2734            );
2735            return;
2736        }
2737
2738        let dst_width = 640;
2739        let dst_height = 640;
2740        let file = include_bytes!(concat!(
2741            env!("CARGO_MANIFEST_DIR"),
2742            "/../../testdata/zidane.jpg"
2743        ))
2744        .to_vec();
2745        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2746        let src_dyn = src;
2747
2748        let mut cpu_dst =
2749            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2750        let mut cpu_converter = CPUProcessor::new();
2751        let mut g2d_dst =
2752            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2753        let mut g2d_converter = G2DProcessor::new().unwrap();
2754
2755        let crop = Crop {
2756            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2757            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2758            dst_color: None,
2759        };
2760
2761        for rot in [
2762            Rotation::None,
2763            Rotation::Clockwise90,
2764            Rotation::Rotate180,
2765            Rotation::CounterClockwise90,
2766        ] {
2767            cpu_dst
2768                .as_u8()
2769                .unwrap()
2770                .map()
2771                .unwrap()
2772                .as_mut_slice()
2773                .fill(114);
2774            g2d_dst
2775                .as_u8()
2776                .unwrap()
2777                .map()
2778                .unwrap()
2779                .as_mut_slice()
2780                .fill(114);
2781            for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
2782                let mut cpu_dst_dyn = cpu_dst;
2783                cpu_converter
2784                    .convert(&src_dyn, &mut cpu_dst_dyn, Rotation::None, Flip::None, crop)
2785                    .unwrap();
2786                cpu_dst = {
2787                    let mut __t = cpu_dst_dyn.into_u8().unwrap();
2788                    __t.set_format(PixelFormat::Rgba).unwrap();
2789                    TensorDyn::from(__t)
2790                };
2791
2792                let mut g2d_dst_dyn = g2d_dst;
2793                g2d_converter
2794                    .convert(&src_dyn, &mut g2d_dst_dyn, Rotation::None, Flip::None, crop)
2795                    .unwrap();
2796                g2d_dst = {
2797                    let mut __t = g2d_dst_dyn.into_u8().unwrap();
2798                    __t.set_format(PixelFormat::Rgba).unwrap();
2799                    TensorDyn::from(__t)
2800                };
2801
2802                compare_images(
2803                    &g2d_dst,
2804                    &cpu_dst,
2805                    0.98,
2806                    &format!("{} {:?} {:?}", function!(), rot, flip),
2807                );
2808            }
2809        }
2810    }
2811
2812    #[test]
2813    #[cfg(target_os = "linux")]
2814    #[cfg(feature = "opengl")]
2815    fn test_opengl_src_crop() {
2816        if !is_opengl_available() {
2817            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2818            return;
2819        }
2820
2821        let dst_width = 640;
2822        let dst_height = 360;
2823        let file = include_bytes!(concat!(
2824            env!("CARGO_MANIFEST_DIR"),
2825            "/../../testdata/zidane.jpg"
2826        ))
2827        .to_vec();
2828        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2829        let crop = Crop {
2830            src_rect: Some(Rect {
2831                left: 320,
2832                top: 180,
2833                width: 1280 - 320,
2834                height: 720 - 180,
2835            }),
2836            dst_rect: None,
2837            dst_color: None,
2838        };
2839
2840        let cpu_dst =
2841            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2842        let mut cpu_converter = CPUProcessor::new();
2843        let (result, src, cpu_dst) = convert_img(
2844            &mut cpu_converter,
2845            src,
2846            cpu_dst,
2847            Rotation::None,
2848            Flip::None,
2849            crop,
2850        );
2851        result.unwrap();
2852
2853        let gl_dst =
2854            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2855        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2856        let (result, _src, gl_dst) = convert_img(
2857            &mut gl_converter,
2858            src,
2859            gl_dst,
2860            Rotation::None,
2861            Flip::None,
2862            crop,
2863        );
2864        result.unwrap();
2865
2866        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2867    }
2868
2869    #[test]
2870    #[cfg(target_os = "linux")]
2871    #[cfg(feature = "opengl")]
2872    fn test_opengl_dst_crop() {
2873        if !is_opengl_available() {
2874            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2875            return;
2876        }
2877
2878        let dst_width = 640;
2879        let dst_height = 640;
2880        let file = include_bytes!(concat!(
2881            env!("CARGO_MANIFEST_DIR"),
2882            "/../../testdata/zidane.jpg"
2883        ))
2884        .to_vec();
2885        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2886
2887        let cpu_dst =
2888            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2889        let mut cpu_converter = CPUProcessor::new();
2890        let crop = Crop {
2891            src_rect: None,
2892            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2893            dst_color: None,
2894        };
2895        let (result, src, cpu_dst) = convert_img(
2896            &mut cpu_converter,
2897            src,
2898            cpu_dst,
2899            Rotation::None,
2900            Flip::None,
2901            crop,
2902        );
2903        result.unwrap();
2904
2905        let gl_dst =
2906            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2907        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2908        let (result, _src, gl_dst) = convert_img(
2909            &mut gl_converter,
2910            src,
2911            gl_dst,
2912            Rotation::None,
2913            Flip::None,
2914            crop,
2915        );
2916        result.unwrap();
2917
2918        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2919    }
2920
2921    #[test]
2922    #[cfg(target_os = "linux")]
2923    #[cfg(feature = "opengl")]
2924    fn test_opengl_all_rgba() {
2925        if !is_opengl_available() {
2926            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2927            return;
2928        }
2929
2930        let dst_width = 640;
2931        let dst_height = 640;
2932        let file = include_bytes!(concat!(
2933            env!("CARGO_MANIFEST_DIR"),
2934            "/../../testdata/zidane.jpg"
2935        ))
2936        .to_vec();
2937
2938        let mut cpu_converter = CPUProcessor::new();
2939
2940        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2941
2942        let mut mem = vec![None, Some(TensorMemory::Mem), Some(TensorMemory::Shm)];
2943        if is_dma_available() {
2944            mem.push(Some(TensorMemory::Dma));
2945        }
2946        let crop = Crop {
2947            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2948            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2949            dst_color: None,
2950        };
2951        for m in mem {
2952            let src = crate::load_image(&file, Some(PixelFormat::Rgba), m).unwrap();
2953            let src_dyn = src;
2954
2955            for rot in [
2956                Rotation::None,
2957                Rotation::Clockwise90,
2958                Rotation::Rotate180,
2959                Rotation::CounterClockwise90,
2960            ] {
2961                for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
2962                    let cpu_dst =
2963                        TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, m)
2964                            .unwrap();
2965                    let gl_dst =
2966                        TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, m)
2967                            .unwrap();
2968                    cpu_dst
2969                        .as_u8()
2970                        .unwrap()
2971                        .map()
2972                        .unwrap()
2973                        .as_mut_slice()
2974                        .fill(114);
2975                    gl_dst
2976                        .as_u8()
2977                        .unwrap()
2978                        .map()
2979                        .unwrap()
2980                        .as_mut_slice()
2981                        .fill(114);
2982
2983                    let mut cpu_dst_dyn = cpu_dst;
2984                    cpu_converter
2985                        .convert(&src_dyn, &mut cpu_dst_dyn, Rotation::None, Flip::None, crop)
2986                        .unwrap();
2987                    let cpu_dst = {
2988                        let mut __t = cpu_dst_dyn.into_u8().unwrap();
2989                        __t.set_format(PixelFormat::Rgba).unwrap();
2990                        TensorDyn::from(__t)
2991                    };
2992
2993                    let mut gl_dst_dyn = gl_dst;
2994                    gl_converter
2995                        .convert(&src_dyn, &mut gl_dst_dyn, Rotation::None, Flip::None, crop)
2996                        .map_err(|e| {
2997                            log::error!("error mem {m:?} rot {rot:?} error: {e:?}");
2998                            e
2999                        })
3000                        .unwrap();
3001                    let gl_dst = {
3002                        let mut __t = gl_dst_dyn.into_u8().unwrap();
3003                        __t.set_format(PixelFormat::Rgba).unwrap();
3004                        TensorDyn::from(__t)
3005                    };
3006
3007                    compare_images(
3008                        &gl_dst,
3009                        &cpu_dst,
3010                        0.98,
3011                        &format!("{} {:?} {:?}", function!(), rot, flip),
3012                    );
3013                }
3014            }
3015        }
3016    }
3017
3018    #[test]
3019    #[cfg(target_os = "linux")]
3020    fn test_cpu_rotate() {
3021        for rot in [
3022            Rotation::Clockwise90,
3023            Rotation::Rotate180,
3024            Rotation::CounterClockwise90,
3025        ] {
3026            test_cpu_rotate_(rot);
3027        }
3028    }
3029
3030    #[cfg(target_os = "linux")]
3031    fn test_cpu_rotate_(rot: Rotation) {
3032        // This test rotates the image 4 times and checks that the image was returned to
3033        // be the same Currently doesn't check if rotations actually rotated in
3034        // right direction
3035        let file = include_bytes!(concat!(
3036            env!("CARGO_MANIFEST_DIR"),
3037            "/../../testdata/zidane.jpg"
3038        ))
3039        .to_vec();
3040
3041        let unchanged_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
3042        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
3043
3044        let (dst_width, dst_height) = match rot {
3045            Rotation::None | Rotation::Rotate180 => (src.width().unwrap(), src.height().unwrap()),
3046            Rotation::Clockwise90 | Rotation::CounterClockwise90 => {
3047                (src.height().unwrap(), src.width().unwrap())
3048            }
3049        };
3050
3051        let cpu_dst =
3052            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3053        let mut cpu_converter = CPUProcessor::new();
3054
3055        // After rotating 4 times, the image should be the same as the original
3056
3057        let (result, src, cpu_dst) = convert_img(
3058            &mut cpu_converter,
3059            src,
3060            cpu_dst,
3061            rot,
3062            Flip::None,
3063            Crop::no_crop(),
3064        );
3065        result.unwrap();
3066
3067        let (result, cpu_dst, src) = convert_img(
3068            &mut cpu_converter,
3069            cpu_dst,
3070            src,
3071            rot,
3072            Flip::None,
3073            Crop::no_crop(),
3074        );
3075        result.unwrap();
3076
3077        let (result, src, cpu_dst) = convert_img(
3078            &mut cpu_converter,
3079            src,
3080            cpu_dst,
3081            rot,
3082            Flip::None,
3083            Crop::no_crop(),
3084        );
3085        result.unwrap();
3086
3087        let (result, _cpu_dst, src) = convert_img(
3088            &mut cpu_converter,
3089            cpu_dst,
3090            src,
3091            rot,
3092            Flip::None,
3093            Crop::no_crop(),
3094        );
3095        result.unwrap();
3096
3097        compare_images(&src, &unchanged_src, 0.98, function!());
3098    }
3099
3100    #[test]
3101    #[cfg(target_os = "linux")]
3102    #[cfg(feature = "opengl")]
3103    fn test_opengl_rotate() {
3104        if !is_opengl_available() {
3105            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3106            return;
3107        }
3108
3109        let size = (1280, 720);
3110        let mut mem = vec![None, Some(TensorMemory::Shm), Some(TensorMemory::Mem)];
3111
3112        if is_dma_available() {
3113            mem.push(Some(TensorMemory::Dma));
3114        }
3115        for m in mem {
3116            for rot in [
3117                Rotation::Clockwise90,
3118                Rotation::Rotate180,
3119                Rotation::CounterClockwise90,
3120            ] {
3121                test_opengl_rotate_(size, rot, m);
3122            }
3123        }
3124    }
3125
3126    #[cfg(target_os = "linux")]
3127    #[cfg(feature = "opengl")]
3128    fn test_opengl_rotate_(
3129        size: (usize, usize),
3130        rot: Rotation,
3131        tensor_memory: Option<TensorMemory>,
3132    ) {
3133        let (dst_width, dst_height) = match rot {
3134            Rotation::None | Rotation::Rotate180 => size,
3135            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
3136        };
3137
3138        let file = include_bytes!(concat!(
3139            env!("CARGO_MANIFEST_DIR"),
3140            "/../../testdata/zidane.jpg"
3141        ))
3142        .to_vec();
3143        let src = crate::load_image(&file, Some(PixelFormat::Rgba), tensor_memory).unwrap();
3144
3145        let cpu_dst =
3146            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3147        let mut cpu_converter = CPUProcessor::new();
3148
3149        let (result, mut src, cpu_dst) = convert_img(
3150            &mut cpu_converter,
3151            src,
3152            cpu_dst,
3153            rot,
3154            Flip::None,
3155            Crop::no_crop(),
3156        );
3157        result.unwrap();
3158
3159        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3160
3161        for _ in 0..5 {
3162            let gl_dst = TensorDyn::image(
3163                dst_width,
3164                dst_height,
3165                PixelFormat::Rgba,
3166                DType::U8,
3167                tensor_memory,
3168            )
3169            .unwrap();
3170            let (result, src_back, gl_dst) = convert_img(
3171                &mut gl_converter,
3172                src,
3173                gl_dst,
3174                rot,
3175                Flip::None,
3176                Crop::no_crop(),
3177            );
3178            result.unwrap();
3179            src = src_back;
3180            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3181        }
3182    }
3183
3184    #[test]
3185    #[cfg(target_os = "linux")]
3186    fn test_g2d_rotate() {
3187        if !is_g2d_available() {
3188            eprintln!("SKIPPED: test_g2d_rotate - G2D library (libg2d.so.2) not available");
3189            return;
3190        }
3191        if !is_dma_available() {
3192            eprintln!(
3193                "SKIPPED: test_g2d_rotate - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3194            );
3195            return;
3196        }
3197
3198        let size = (1280, 720);
3199        for rot in [
3200            Rotation::Clockwise90,
3201            Rotation::Rotate180,
3202            Rotation::CounterClockwise90,
3203        ] {
3204            test_g2d_rotate_(size, rot);
3205        }
3206    }
3207
3208    #[cfg(target_os = "linux")]
3209    fn test_g2d_rotate_(size: (usize, usize), rot: Rotation) {
3210        let (dst_width, dst_height) = match rot {
3211            Rotation::None | Rotation::Rotate180 => size,
3212            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
3213        };
3214
3215        let file = include_bytes!(concat!(
3216            env!("CARGO_MANIFEST_DIR"),
3217            "/../../testdata/zidane.jpg"
3218        ))
3219        .to_vec();
3220        let src =
3221            crate::load_image(&file, Some(PixelFormat::Rgba), Some(TensorMemory::Dma)).unwrap();
3222
3223        let cpu_dst =
3224            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3225        let mut cpu_converter = CPUProcessor::new();
3226
3227        let (result, src, cpu_dst) = convert_img(
3228            &mut cpu_converter,
3229            src,
3230            cpu_dst,
3231            rot,
3232            Flip::None,
3233            Crop::no_crop(),
3234        );
3235        result.unwrap();
3236
3237        let g2d_dst = TensorDyn::image(
3238            dst_width,
3239            dst_height,
3240            PixelFormat::Rgba,
3241            DType::U8,
3242            Some(TensorMemory::Dma),
3243        )
3244        .unwrap();
3245        let mut g2d_converter = G2DProcessor::new().unwrap();
3246
3247        let (result, _src, g2d_dst) = convert_img(
3248            &mut g2d_converter,
3249            src,
3250            g2d_dst,
3251            rot,
3252            Flip::None,
3253            Crop::no_crop(),
3254        );
3255        result.unwrap();
3256
3257        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3258    }
3259
3260    #[test]
3261    fn test_rgba_to_yuyv_resize_cpu() {
3262        let src = load_bytes_to_tensor(
3263            1280,
3264            720,
3265            PixelFormat::Rgba,
3266            None,
3267            include_bytes!(concat!(
3268                env!("CARGO_MANIFEST_DIR"),
3269                "/../../testdata/camera720p.rgba"
3270            )),
3271        )
3272        .unwrap();
3273
3274        let (dst_width, dst_height) = (640, 360);
3275
3276        let dst =
3277            TensorDyn::image(dst_width, dst_height, PixelFormat::Yuyv, DType::U8, None).unwrap();
3278
3279        let dst_through_yuyv =
3280            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3281        let dst_direct =
3282            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3283
3284        let mut cpu_converter = CPUProcessor::new();
3285
3286        let (result, src, dst) = convert_img(
3287            &mut cpu_converter,
3288            src,
3289            dst,
3290            Rotation::None,
3291            Flip::None,
3292            Crop::no_crop(),
3293        );
3294        result.unwrap();
3295
3296        let (result, _dst, dst_through_yuyv) = convert_img(
3297            &mut cpu_converter,
3298            dst,
3299            dst_through_yuyv,
3300            Rotation::None,
3301            Flip::None,
3302            Crop::no_crop(),
3303        );
3304        result.unwrap();
3305
3306        let (result, _src, dst_direct) = convert_img(
3307            &mut cpu_converter,
3308            src,
3309            dst_direct,
3310            Rotation::None,
3311            Flip::None,
3312            Crop::no_crop(),
3313        );
3314        result.unwrap();
3315
3316        compare_images(&dst_through_yuyv, &dst_direct, 0.98, function!());
3317    }
3318
3319    #[test]
3320    #[cfg(target_os = "linux")]
3321    #[cfg(feature = "opengl")]
3322    #[ignore = "opengl doesn't support rendering to PixelFormat::Yuyv texture"]
3323    fn test_rgba_to_yuyv_resize_opengl() {
3324        if !is_opengl_available() {
3325            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3326            return;
3327        }
3328
3329        if !is_dma_available() {
3330            eprintln!(
3331                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3332                function!()
3333            );
3334            return;
3335        }
3336
3337        let src = load_bytes_to_tensor(
3338            1280,
3339            720,
3340            PixelFormat::Rgba,
3341            None,
3342            include_bytes!(concat!(
3343                env!("CARGO_MANIFEST_DIR"),
3344                "/../../testdata/camera720p.rgba"
3345            )),
3346        )
3347        .unwrap();
3348
3349        let (dst_width, dst_height) = (640, 360);
3350
3351        let dst = TensorDyn::image(
3352            dst_width,
3353            dst_height,
3354            PixelFormat::Yuyv,
3355            DType::U8,
3356            Some(TensorMemory::Dma),
3357        )
3358        .unwrap();
3359
3360        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3361
3362        let (result, src, dst) = convert_img(
3363            &mut gl_converter,
3364            src,
3365            dst,
3366            Rotation::None,
3367            Flip::None,
3368            Crop::new()
3369                .with_dst_rect(Some(Rect::new(100, 100, 100, 100)))
3370                .with_dst_color(Some([255, 255, 255, 255])),
3371        );
3372        result.unwrap();
3373
3374        std::fs::write(
3375            "rgba_to_yuyv_opengl.yuyv",
3376            dst.as_u8().unwrap().map().unwrap().as_slice(),
3377        )
3378        .unwrap();
3379        let cpu_dst = TensorDyn::image(
3380            dst_width,
3381            dst_height,
3382            PixelFormat::Yuyv,
3383            DType::U8,
3384            Some(TensorMemory::Dma),
3385        )
3386        .unwrap();
3387        let (result, _src, cpu_dst) = convert_img(
3388            &mut CPUProcessor::new(),
3389            src,
3390            cpu_dst,
3391            Rotation::None,
3392            Flip::None,
3393            Crop::no_crop(),
3394        );
3395        result.unwrap();
3396
3397        compare_images_convert_to_rgb(&dst, &cpu_dst, 0.98, function!());
3398    }
3399
3400    #[test]
3401    #[cfg(target_os = "linux")]
3402    fn test_rgba_to_yuyv_resize_g2d() {
3403        if !is_g2d_available() {
3404            eprintln!(
3405                "SKIPPED: test_rgba_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3406            );
3407            return;
3408        }
3409        if !is_dma_available() {
3410            eprintln!(
3411                "SKIPPED: test_rgba_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3412            );
3413            return;
3414        }
3415
3416        let src = load_bytes_to_tensor(
3417            1280,
3418            720,
3419            PixelFormat::Rgba,
3420            Some(TensorMemory::Dma),
3421            include_bytes!(concat!(
3422                env!("CARGO_MANIFEST_DIR"),
3423                "/../../testdata/camera720p.rgba"
3424            )),
3425        )
3426        .unwrap();
3427
3428        let (dst_width, dst_height) = (1280, 720);
3429
3430        let cpu_dst = TensorDyn::image(
3431            dst_width,
3432            dst_height,
3433            PixelFormat::Yuyv,
3434            DType::U8,
3435            Some(TensorMemory::Dma),
3436        )
3437        .unwrap();
3438
3439        let g2d_dst = TensorDyn::image(
3440            dst_width,
3441            dst_height,
3442            PixelFormat::Yuyv,
3443            DType::U8,
3444            Some(TensorMemory::Dma),
3445        )
3446        .unwrap();
3447
3448        let mut g2d_converter = G2DProcessor::new().unwrap();
3449        let crop = Crop {
3450            src_rect: None,
3451            dst_rect: Some(Rect::new(100, 100, 2, 2)),
3452            dst_color: None,
3453        };
3454
3455        g2d_dst
3456            .as_u8()
3457            .unwrap()
3458            .map()
3459            .unwrap()
3460            .as_mut_slice()
3461            .fill(128);
3462        let (result, src, g2d_dst) = convert_img(
3463            &mut g2d_converter,
3464            src,
3465            g2d_dst,
3466            Rotation::None,
3467            Flip::None,
3468            crop,
3469        );
3470        result.unwrap();
3471
3472        let cpu_dst_img = cpu_dst;
3473        cpu_dst_img
3474            .as_u8()
3475            .unwrap()
3476            .map()
3477            .unwrap()
3478            .as_mut_slice()
3479            .fill(128);
3480        let (result, _src, cpu_dst) = convert_img(
3481            &mut CPUProcessor::new(),
3482            src,
3483            cpu_dst_img,
3484            Rotation::None,
3485            Flip::None,
3486            crop,
3487        );
3488        result.unwrap();
3489
3490        compare_images_convert_to_rgb(&cpu_dst, &g2d_dst, 0.98, function!());
3491    }
3492
3493    #[test]
3494    fn test_yuyv_to_rgba_cpu() {
3495        let file = include_bytes!(concat!(
3496            env!("CARGO_MANIFEST_DIR"),
3497            "/../../testdata/camera720p.yuyv"
3498        ))
3499        .to_vec();
3500        let src = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
3501        src.as_u8()
3502            .unwrap()
3503            .map()
3504            .unwrap()
3505            .as_mut_slice()
3506            .copy_from_slice(&file);
3507
3508        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3509        let mut cpu_converter = CPUProcessor::new();
3510
3511        let (result, _src, dst) = convert_img(
3512            &mut cpu_converter,
3513            src,
3514            dst,
3515            Rotation::None,
3516            Flip::None,
3517            Crop::no_crop(),
3518        );
3519        result.unwrap();
3520
3521        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3522        target_image
3523            .as_u8()
3524            .unwrap()
3525            .map()
3526            .unwrap()
3527            .as_mut_slice()
3528            .copy_from_slice(include_bytes!(concat!(
3529                env!("CARGO_MANIFEST_DIR"),
3530                "/../../testdata/camera720p.rgba"
3531            )));
3532
3533        compare_images(&dst, &target_image, 0.98, function!());
3534    }
3535
3536    #[test]
3537    fn test_yuyv_to_rgb_cpu() {
3538        let file = include_bytes!(concat!(
3539            env!("CARGO_MANIFEST_DIR"),
3540            "/../../testdata/camera720p.yuyv"
3541        ))
3542        .to_vec();
3543        let src = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
3544        src.as_u8()
3545            .unwrap()
3546            .map()
3547            .unwrap()
3548            .as_mut_slice()
3549            .copy_from_slice(&file);
3550
3551        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3552        let mut cpu_converter = CPUProcessor::new();
3553
3554        let (result, _src, dst) = convert_img(
3555            &mut cpu_converter,
3556            src,
3557            dst,
3558            Rotation::None,
3559            Flip::None,
3560            Crop::no_crop(),
3561        );
3562        result.unwrap();
3563
3564        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3565        target_image
3566            .as_u8()
3567            .unwrap()
3568            .map()
3569            .unwrap()
3570            .as_mut_slice()
3571            .as_chunks_mut::<3>()
3572            .0
3573            .iter_mut()
3574            .zip(
3575                include_bytes!(concat!(
3576                    env!("CARGO_MANIFEST_DIR"),
3577                    "/../../testdata/camera720p.rgba"
3578                ))
3579                .as_chunks::<4>()
3580                .0,
3581            )
3582            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
3583
3584        compare_images(&dst, &target_image, 0.98, function!());
3585    }
3586
3587    #[test]
3588    #[cfg(target_os = "linux")]
3589    fn test_yuyv_to_rgba_g2d() {
3590        if !is_g2d_available() {
3591            eprintln!("SKIPPED: test_yuyv_to_rgba_g2d - G2D library (libg2d.so.2) not available");
3592            return;
3593        }
3594        if !is_dma_available() {
3595            eprintln!(
3596                "SKIPPED: test_yuyv_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3597            );
3598            return;
3599        }
3600
3601        let src = load_bytes_to_tensor(
3602            1280,
3603            720,
3604            PixelFormat::Yuyv,
3605            None,
3606            include_bytes!(concat!(
3607                env!("CARGO_MANIFEST_DIR"),
3608                "/../../testdata/camera720p.yuyv"
3609            )),
3610        )
3611        .unwrap();
3612
3613        let dst = TensorDyn::image(
3614            1280,
3615            720,
3616            PixelFormat::Rgba,
3617            DType::U8,
3618            Some(TensorMemory::Dma),
3619        )
3620        .unwrap();
3621        let mut g2d_converter = G2DProcessor::new().unwrap();
3622
3623        let (result, _src, dst) = convert_img(
3624            &mut g2d_converter,
3625            src,
3626            dst,
3627            Rotation::None,
3628            Flip::None,
3629            Crop::no_crop(),
3630        );
3631        result.unwrap();
3632
3633        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3634        target_image
3635            .as_u8()
3636            .unwrap()
3637            .map()
3638            .unwrap()
3639            .as_mut_slice()
3640            .copy_from_slice(include_bytes!(concat!(
3641                env!("CARGO_MANIFEST_DIR"),
3642                "/../../testdata/camera720p.rgba"
3643            )));
3644
3645        compare_images(&dst, &target_image, 0.98, function!());
3646    }
3647
3648    #[test]
3649    #[cfg(target_os = "linux")]
3650    #[cfg(feature = "opengl")]
3651    fn test_yuyv_to_rgba_opengl() {
3652        if !is_opengl_available() {
3653            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3654            return;
3655        }
3656        if !is_dma_available() {
3657            eprintln!(
3658                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3659                function!()
3660            );
3661            return;
3662        }
3663
3664        let src = load_bytes_to_tensor(
3665            1280,
3666            720,
3667            PixelFormat::Yuyv,
3668            Some(TensorMemory::Dma),
3669            include_bytes!(concat!(
3670                env!("CARGO_MANIFEST_DIR"),
3671                "/../../testdata/camera720p.yuyv"
3672            )),
3673        )
3674        .unwrap();
3675
3676        let dst = TensorDyn::image(
3677            1280,
3678            720,
3679            PixelFormat::Rgba,
3680            DType::U8,
3681            Some(TensorMemory::Dma),
3682        )
3683        .unwrap();
3684        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3685
3686        let (result, _src, dst) = convert_img(
3687            &mut gl_converter,
3688            src,
3689            dst,
3690            Rotation::None,
3691            Flip::None,
3692            Crop::no_crop(),
3693        );
3694        result.unwrap();
3695
3696        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3697        target_image
3698            .as_u8()
3699            .unwrap()
3700            .map()
3701            .unwrap()
3702            .as_mut_slice()
3703            .copy_from_slice(include_bytes!(concat!(
3704                env!("CARGO_MANIFEST_DIR"),
3705                "/../../testdata/camera720p.rgba"
3706            )));
3707
3708        compare_images(&dst, &target_image, 0.98, function!());
3709    }
3710
3711    #[test]
3712    #[cfg(target_os = "linux")]
3713    fn test_yuyv_to_rgb_g2d() {
3714        if !is_g2d_available() {
3715            eprintln!("SKIPPED: test_yuyv_to_rgb_g2d - G2D library (libg2d.so.2) not available");
3716            return;
3717        }
3718        if !is_dma_available() {
3719            eprintln!(
3720                "SKIPPED: test_yuyv_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3721            );
3722            return;
3723        }
3724
3725        let src = load_bytes_to_tensor(
3726            1280,
3727            720,
3728            PixelFormat::Yuyv,
3729            None,
3730            include_bytes!(concat!(
3731                env!("CARGO_MANIFEST_DIR"),
3732                "/../../testdata/camera720p.yuyv"
3733            )),
3734        )
3735        .unwrap();
3736
3737        let g2d_dst = TensorDyn::image(
3738            1280,
3739            720,
3740            PixelFormat::Rgb,
3741            DType::U8,
3742            Some(TensorMemory::Dma),
3743        )
3744        .unwrap();
3745        let mut g2d_converter = G2DProcessor::new().unwrap();
3746
3747        let (result, src, g2d_dst) = convert_img(
3748            &mut g2d_converter,
3749            src,
3750            g2d_dst,
3751            Rotation::None,
3752            Flip::None,
3753            Crop::no_crop(),
3754        );
3755        result.unwrap();
3756
3757        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3758        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3759
3760        let (result, _src, cpu_dst) = convert_img(
3761            &mut cpu_converter,
3762            src,
3763            cpu_dst,
3764            Rotation::None,
3765            Flip::None,
3766            Crop::no_crop(),
3767        );
3768        result.unwrap();
3769
3770        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3771    }
3772
3773    #[test]
3774    #[cfg(target_os = "linux")]
3775    fn test_yuyv_to_yuyv_resize_g2d() {
3776        if !is_g2d_available() {
3777            eprintln!(
3778                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3779            );
3780            return;
3781        }
3782        if !is_dma_available() {
3783            eprintln!(
3784                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3785            );
3786            return;
3787        }
3788
3789        let src = load_bytes_to_tensor(
3790            1280,
3791            720,
3792            PixelFormat::Yuyv,
3793            None,
3794            include_bytes!(concat!(
3795                env!("CARGO_MANIFEST_DIR"),
3796                "/../../testdata/camera720p.yuyv"
3797            )),
3798        )
3799        .unwrap();
3800
3801        let g2d_dst = TensorDyn::image(
3802            600,
3803            400,
3804            PixelFormat::Yuyv,
3805            DType::U8,
3806            Some(TensorMemory::Dma),
3807        )
3808        .unwrap();
3809        let mut g2d_converter = G2DProcessor::new().unwrap();
3810
3811        let (result, src, g2d_dst) = convert_img(
3812            &mut g2d_converter,
3813            src,
3814            g2d_dst,
3815            Rotation::None,
3816            Flip::None,
3817            Crop::no_crop(),
3818        );
3819        result.unwrap();
3820
3821        let cpu_dst = TensorDyn::image(600, 400, PixelFormat::Yuyv, DType::U8, None).unwrap();
3822        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3823
3824        let (result, _src, cpu_dst) = convert_img(
3825            &mut cpu_converter,
3826            src,
3827            cpu_dst,
3828            Rotation::None,
3829            Flip::None,
3830            Crop::no_crop(),
3831        );
3832        result.unwrap();
3833
3834        // TODO: compare PixelFormat::Yuyv and PixelFormat::Yuyv images without having to convert them to PixelFormat::Rgb
3835        compare_images_convert_to_rgb(&g2d_dst, &cpu_dst, 0.98, function!());
3836    }
3837
3838    #[test]
3839    fn test_yuyv_to_rgba_resize_cpu() {
3840        let src = load_bytes_to_tensor(
3841            1280,
3842            720,
3843            PixelFormat::Yuyv,
3844            None,
3845            include_bytes!(concat!(
3846                env!("CARGO_MANIFEST_DIR"),
3847                "/../../testdata/camera720p.yuyv"
3848            )),
3849        )
3850        .unwrap();
3851
3852        let (dst_width, dst_height) = (960, 540);
3853
3854        let dst =
3855            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3856        let mut cpu_converter = CPUProcessor::new();
3857
3858        let (result, _src, dst) = convert_img(
3859            &mut cpu_converter,
3860            src,
3861            dst,
3862            Rotation::None,
3863            Flip::None,
3864            Crop::no_crop(),
3865        );
3866        result.unwrap();
3867
3868        let dst_target =
3869            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3870        let src_target = load_bytes_to_tensor(
3871            1280,
3872            720,
3873            PixelFormat::Rgba,
3874            None,
3875            include_bytes!(concat!(
3876                env!("CARGO_MANIFEST_DIR"),
3877                "/../../testdata/camera720p.rgba"
3878            )),
3879        )
3880        .unwrap();
3881        let (result, _src_target, dst_target) = convert_img(
3882            &mut cpu_converter,
3883            src_target,
3884            dst_target,
3885            Rotation::None,
3886            Flip::None,
3887            Crop::no_crop(),
3888        );
3889        result.unwrap();
3890
3891        compare_images(&dst, &dst_target, 0.98, function!());
3892    }
3893
3894    #[test]
3895    #[cfg(target_os = "linux")]
3896    fn test_yuyv_to_rgba_crop_flip_g2d() {
3897        if !is_g2d_available() {
3898            eprintln!(
3899                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - G2D library (libg2d.so.2) not available"
3900            );
3901            return;
3902        }
3903        if !is_dma_available() {
3904            eprintln!(
3905                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3906            );
3907            return;
3908        }
3909
3910        let src = load_bytes_to_tensor(
3911            1280,
3912            720,
3913            PixelFormat::Yuyv,
3914            Some(TensorMemory::Dma),
3915            include_bytes!(concat!(
3916                env!("CARGO_MANIFEST_DIR"),
3917                "/../../testdata/camera720p.yuyv"
3918            )),
3919        )
3920        .unwrap();
3921
3922        let (dst_width, dst_height) = (640, 640);
3923
3924        let dst_g2d = TensorDyn::image(
3925            dst_width,
3926            dst_height,
3927            PixelFormat::Rgba,
3928            DType::U8,
3929            Some(TensorMemory::Dma),
3930        )
3931        .unwrap();
3932        let mut g2d_converter = G2DProcessor::new().unwrap();
3933        let crop = Crop {
3934            src_rect: Some(Rect {
3935                left: 20,
3936                top: 15,
3937                width: 400,
3938                height: 300,
3939            }),
3940            dst_rect: None,
3941            dst_color: None,
3942        };
3943
3944        let (result, src, dst_g2d) = convert_img(
3945            &mut g2d_converter,
3946            src,
3947            dst_g2d,
3948            Rotation::None,
3949            Flip::Horizontal,
3950            crop,
3951        );
3952        result.unwrap();
3953
3954        let dst_cpu = TensorDyn::image(
3955            dst_width,
3956            dst_height,
3957            PixelFormat::Rgba,
3958            DType::U8,
3959            Some(TensorMemory::Dma),
3960        )
3961        .unwrap();
3962        let mut cpu_converter = CPUProcessor::new();
3963
3964        let (result, _src, dst_cpu) = convert_img(
3965            &mut cpu_converter,
3966            src,
3967            dst_cpu,
3968            Rotation::None,
3969            Flip::Horizontal,
3970            crop,
3971        );
3972        result.unwrap();
3973        compare_images(&dst_g2d, &dst_cpu, 0.98, function!());
3974    }
3975
3976    #[test]
3977    #[cfg(target_os = "linux")]
3978    #[cfg(feature = "opengl")]
3979    fn test_yuyv_to_rgba_crop_flip_opengl() {
3980        if !is_opengl_available() {
3981            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3982            return;
3983        }
3984
3985        if !is_dma_available() {
3986            eprintln!(
3987                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3988                function!()
3989            );
3990            return;
3991        }
3992
3993        let src = load_bytes_to_tensor(
3994            1280,
3995            720,
3996            PixelFormat::Yuyv,
3997            Some(TensorMemory::Dma),
3998            include_bytes!(concat!(
3999                env!("CARGO_MANIFEST_DIR"),
4000                "/../../testdata/camera720p.yuyv"
4001            )),
4002        )
4003        .unwrap();
4004
4005        let (dst_width, dst_height) = (640, 640);
4006
4007        let dst_gl = TensorDyn::image(
4008            dst_width,
4009            dst_height,
4010            PixelFormat::Rgba,
4011            DType::U8,
4012            Some(TensorMemory::Dma),
4013        )
4014        .unwrap();
4015        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4016        let crop = Crop {
4017            src_rect: Some(Rect {
4018                left: 20,
4019                top: 15,
4020                width: 400,
4021                height: 300,
4022            }),
4023            dst_rect: None,
4024            dst_color: None,
4025        };
4026
4027        let (result, src, dst_gl) = convert_img(
4028            &mut gl_converter,
4029            src,
4030            dst_gl,
4031            Rotation::None,
4032            Flip::Horizontal,
4033            crop,
4034        );
4035        result.unwrap();
4036
4037        let dst_cpu = TensorDyn::image(
4038            dst_width,
4039            dst_height,
4040            PixelFormat::Rgba,
4041            DType::U8,
4042            Some(TensorMemory::Dma),
4043        )
4044        .unwrap();
4045        let mut cpu_converter = CPUProcessor::new();
4046
4047        let (result, _src, dst_cpu) = convert_img(
4048            &mut cpu_converter,
4049            src,
4050            dst_cpu,
4051            Rotation::None,
4052            Flip::Horizontal,
4053            crop,
4054        );
4055        result.unwrap();
4056        compare_images(&dst_gl, &dst_cpu, 0.98, function!());
4057    }
4058
4059    #[test]
4060    fn test_vyuy_to_rgba_cpu() {
4061        let file = include_bytes!(concat!(
4062            env!("CARGO_MANIFEST_DIR"),
4063            "/../../testdata/camera720p.vyuy"
4064        ))
4065        .to_vec();
4066        let src = TensorDyn::image(1280, 720, PixelFormat::Vyuy, DType::U8, None).unwrap();
4067        src.as_u8()
4068            .unwrap()
4069            .map()
4070            .unwrap()
4071            .as_mut_slice()
4072            .copy_from_slice(&file);
4073
4074        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4075        let mut cpu_converter = CPUProcessor::new();
4076
4077        let (result, _src, dst) = convert_img(
4078            &mut cpu_converter,
4079            src,
4080            dst,
4081            Rotation::None,
4082            Flip::None,
4083            Crop::no_crop(),
4084        );
4085        result.unwrap();
4086
4087        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4088        target_image
4089            .as_u8()
4090            .unwrap()
4091            .map()
4092            .unwrap()
4093            .as_mut_slice()
4094            .copy_from_slice(include_bytes!(concat!(
4095                env!("CARGO_MANIFEST_DIR"),
4096                "/../../testdata/camera720p.rgba"
4097            )));
4098
4099        compare_images(&dst, &target_image, 0.98, function!());
4100    }
4101
4102    #[test]
4103    fn test_vyuy_to_rgb_cpu() {
4104        let file = include_bytes!(concat!(
4105            env!("CARGO_MANIFEST_DIR"),
4106            "/../../testdata/camera720p.vyuy"
4107        ))
4108        .to_vec();
4109        let src = TensorDyn::image(1280, 720, PixelFormat::Vyuy, DType::U8, None).unwrap();
4110        src.as_u8()
4111            .unwrap()
4112            .map()
4113            .unwrap()
4114            .as_mut_slice()
4115            .copy_from_slice(&file);
4116
4117        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4118        let mut cpu_converter = CPUProcessor::new();
4119
4120        let (result, _src, dst) = convert_img(
4121            &mut cpu_converter,
4122            src,
4123            dst,
4124            Rotation::None,
4125            Flip::None,
4126            Crop::no_crop(),
4127        );
4128        result.unwrap();
4129
4130        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4131        target_image
4132            .as_u8()
4133            .unwrap()
4134            .map()
4135            .unwrap()
4136            .as_mut_slice()
4137            .as_chunks_mut::<3>()
4138            .0
4139            .iter_mut()
4140            .zip(
4141                include_bytes!(concat!(
4142                    env!("CARGO_MANIFEST_DIR"),
4143                    "/../../testdata/camera720p.rgba"
4144                ))
4145                .as_chunks::<4>()
4146                .0,
4147            )
4148            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
4149
4150        compare_images(&dst, &target_image, 0.98, function!());
4151    }
4152
4153    #[test]
4154    #[cfg(target_os = "linux")]
4155    #[ignore = "G2D does not support VYUY; re-enable when hardware support is added"]
4156    fn test_vyuy_to_rgba_g2d() {
4157        if !is_g2d_available() {
4158            eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D library (libg2d.so.2) not available");
4159            return;
4160        }
4161        if !is_dma_available() {
4162            eprintln!(
4163                "SKIPPED: test_vyuy_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4164            );
4165            return;
4166        }
4167
4168        let src = load_bytes_to_tensor(
4169            1280,
4170            720,
4171            PixelFormat::Vyuy,
4172            None,
4173            include_bytes!(concat!(
4174                env!("CARGO_MANIFEST_DIR"),
4175                "/../../testdata/camera720p.vyuy"
4176            )),
4177        )
4178        .unwrap();
4179
4180        let dst = TensorDyn::image(
4181            1280,
4182            720,
4183            PixelFormat::Rgba,
4184            DType::U8,
4185            Some(TensorMemory::Dma),
4186        )
4187        .unwrap();
4188        let mut g2d_converter = G2DProcessor::new().unwrap();
4189
4190        let (result, _src, dst) = convert_img(
4191            &mut g2d_converter,
4192            src,
4193            dst,
4194            Rotation::None,
4195            Flip::None,
4196            Crop::no_crop(),
4197        );
4198        match result {
4199            Err(Error::G2D(_)) => {
4200                eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D does not support PixelFormat::Vyuy format");
4201                return;
4202            }
4203            r => r.unwrap(),
4204        }
4205
4206        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4207        target_image
4208            .as_u8()
4209            .unwrap()
4210            .map()
4211            .unwrap()
4212            .as_mut_slice()
4213            .copy_from_slice(include_bytes!(concat!(
4214                env!("CARGO_MANIFEST_DIR"),
4215                "/../../testdata/camera720p.rgba"
4216            )));
4217
4218        compare_images(&dst, &target_image, 0.98, function!());
4219    }
4220
4221    #[test]
4222    #[cfg(target_os = "linux")]
4223    #[ignore = "G2D does not support VYUY; re-enable when hardware support is added"]
4224    fn test_vyuy_to_rgb_g2d() {
4225        if !is_g2d_available() {
4226            eprintln!("SKIPPED: test_vyuy_to_rgb_g2d - G2D library (libg2d.so.2) not available");
4227            return;
4228        }
4229        if !is_dma_available() {
4230            eprintln!(
4231                "SKIPPED: test_vyuy_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4232            );
4233            return;
4234        }
4235
4236        let src = load_bytes_to_tensor(
4237            1280,
4238            720,
4239            PixelFormat::Vyuy,
4240            None,
4241            include_bytes!(concat!(
4242                env!("CARGO_MANIFEST_DIR"),
4243                "/../../testdata/camera720p.vyuy"
4244            )),
4245        )
4246        .unwrap();
4247
4248        let g2d_dst = TensorDyn::image(
4249            1280,
4250            720,
4251            PixelFormat::Rgb,
4252            DType::U8,
4253            Some(TensorMemory::Dma),
4254        )
4255        .unwrap();
4256        let mut g2d_converter = G2DProcessor::new().unwrap();
4257
4258        let (result, src, g2d_dst) = convert_img(
4259            &mut g2d_converter,
4260            src,
4261            g2d_dst,
4262            Rotation::None,
4263            Flip::None,
4264            Crop::no_crop(),
4265        );
4266        match result {
4267            Err(Error::G2D(_)) => {
4268                eprintln!(
4269                    "SKIPPED: test_vyuy_to_rgb_g2d - G2D does not support PixelFormat::Vyuy format"
4270                );
4271                return;
4272            }
4273            r => r.unwrap(),
4274        }
4275
4276        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4277        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
4278
4279        let (result, _src, cpu_dst) = convert_img(
4280            &mut cpu_converter,
4281            src,
4282            cpu_dst,
4283            Rotation::None,
4284            Flip::None,
4285            Crop::no_crop(),
4286        );
4287        result.unwrap();
4288
4289        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
4290    }
4291
4292    #[test]
4293    #[cfg(target_os = "linux")]
4294    #[cfg(feature = "opengl")]
4295    fn test_vyuy_to_rgba_opengl() {
4296        if !is_opengl_available() {
4297            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4298            return;
4299        }
4300        if !is_dma_available() {
4301            eprintln!(
4302                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4303                function!()
4304            );
4305            return;
4306        }
4307
4308        let src = load_bytes_to_tensor(
4309            1280,
4310            720,
4311            PixelFormat::Vyuy,
4312            Some(TensorMemory::Dma),
4313            include_bytes!(concat!(
4314                env!("CARGO_MANIFEST_DIR"),
4315                "/../../testdata/camera720p.vyuy"
4316            )),
4317        )
4318        .unwrap();
4319
4320        let dst = TensorDyn::image(
4321            1280,
4322            720,
4323            PixelFormat::Rgba,
4324            DType::U8,
4325            Some(TensorMemory::Dma),
4326        )
4327        .unwrap();
4328        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4329
4330        let (result, _src, dst) = convert_img(
4331            &mut gl_converter,
4332            src,
4333            dst,
4334            Rotation::None,
4335            Flip::None,
4336            Crop::no_crop(),
4337        );
4338        match result {
4339            Err(Error::NotSupported(_)) => {
4340                eprintln!(
4341                    "SKIPPED: {} - OpenGL does not support PixelFormat::Vyuy DMA format",
4342                    function!()
4343                );
4344                return;
4345            }
4346            r => r.unwrap(),
4347        }
4348
4349        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4350        target_image
4351            .as_u8()
4352            .unwrap()
4353            .map()
4354            .unwrap()
4355            .as_mut_slice()
4356            .copy_from_slice(include_bytes!(concat!(
4357                env!("CARGO_MANIFEST_DIR"),
4358                "/../../testdata/camera720p.rgba"
4359            )));
4360
4361        compare_images(&dst, &target_image, 0.98, function!());
4362    }
4363
4364    #[test]
4365    fn test_nv12_to_rgba_cpu() {
4366        let file = include_bytes!(concat!(
4367            env!("CARGO_MANIFEST_DIR"),
4368            "/../../testdata/zidane.nv12"
4369        ))
4370        .to_vec();
4371        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4372        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4373            .copy_from_slice(&file);
4374
4375        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4376        let mut cpu_converter = CPUProcessor::new();
4377
4378        let (result, _src, dst) = convert_img(
4379            &mut cpu_converter,
4380            src,
4381            dst,
4382            Rotation::None,
4383            Flip::None,
4384            Crop::no_crop(),
4385        );
4386        result.unwrap();
4387
4388        let target_image = crate::load_image(
4389            include_bytes!(concat!(
4390                env!("CARGO_MANIFEST_DIR"),
4391                "/../../testdata/zidane.jpg"
4392            )),
4393            Some(PixelFormat::Rgba),
4394            None,
4395        )
4396        .unwrap();
4397
4398        compare_images(&dst, &target_image, 0.98, function!());
4399    }
4400
4401    #[test]
4402    fn test_nv12_to_rgb_cpu() {
4403        let file = include_bytes!(concat!(
4404            env!("CARGO_MANIFEST_DIR"),
4405            "/../../testdata/zidane.nv12"
4406        ))
4407        .to_vec();
4408        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4409        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4410            .copy_from_slice(&file);
4411
4412        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4413        let mut cpu_converter = CPUProcessor::new();
4414
4415        let (result, _src, dst) = convert_img(
4416            &mut cpu_converter,
4417            src,
4418            dst,
4419            Rotation::None,
4420            Flip::None,
4421            Crop::no_crop(),
4422        );
4423        result.unwrap();
4424
4425        let target_image = crate::load_image(
4426            include_bytes!(concat!(
4427                env!("CARGO_MANIFEST_DIR"),
4428                "/../../testdata/zidane.jpg"
4429            )),
4430            Some(PixelFormat::Rgb),
4431            None,
4432        )
4433        .unwrap();
4434
4435        compare_images(&dst, &target_image, 0.98, function!());
4436    }
4437
4438    #[test]
4439    fn test_nv12_to_grey_cpu() {
4440        let file = include_bytes!(concat!(
4441            env!("CARGO_MANIFEST_DIR"),
4442            "/../../testdata/zidane.nv12"
4443        ))
4444        .to_vec();
4445        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4446        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4447            .copy_from_slice(&file);
4448
4449        let dst = TensorDyn::image(1280, 720, PixelFormat::Grey, DType::U8, None).unwrap();
4450        let mut cpu_converter = CPUProcessor::new();
4451
4452        let (result, _src, dst) = convert_img(
4453            &mut cpu_converter,
4454            src,
4455            dst,
4456            Rotation::None,
4457            Flip::None,
4458            Crop::no_crop(),
4459        );
4460        result.unwrap();
4461
4462        let target_image = crate::load_image(
4463            include_bytes!(concat!(
4464                env!("CARGO_MANIFEST_DIR"),
4465                "/../../testdata/zidane.jpg"
4466            )),
4467            Some(PixelFormat::Grey),
4468            None,
4469        )
4470        .unwrap();
4471
4472        compare_images(&dst, &target_image, 0.98, function!());
4473    }
4474
4475    #[test]
4476    fn test_nv12_to_yuyv_cpu() {
4477        let file = include_bytes!(concat!(
4478            env!("CARGO_MANIFEST_DIR"),
4479            "/../../testdata/zidane.nv12"
4480        ))
4481        .to_vec();
4482        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4483        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4484            .copy_from_slice(&file);
4485
4486        let dst = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
4487        let mut cpu_converter = CPUProcessor::new();
4488
4489        let (result, _src, dst) = convert_img(
4490            &mut cpu_converter,
4491            src,
4492            dst,
4493            Rotation::None,
4494            Flip::None,
4495            Crop::no_crop(),
4496        );
4497        result.unwrap();
4498
4499        let target_image = crate::load_image(
4500            include_bytes!(concat!(
4501                env!("CARGO_MANIFEST_DIR"),
4502                "/../../testdata/zidane.jpg"
4503            )),
4504            Some(PixelFormat::Rgb),
4505            None,
4506        )
4507        .unwrap();
4508
4509        compare_images_convert_to_rgb(&dst, &target_image, 0.98, function!());
4510    }
4511
4512    #[test]
4513    fn test_cpu_resize_planar_rgb() {
4514        let src = TensorDyn::image(4, 4, PixelFormat::Rgba, DType::U8, None).unwrap();
4515        #[rustfmt::skip]
4516        let src_image = [
4517                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4518                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4519                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4520                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4521        ];
4522        src.as_u8()
4523            .unwrap()
4524            .map()
4525            .unwrap()
4526            .as_mut_slice()
4527            .copy_from_slice(&src_image);
4528
4529        let cpu_dst = TensorDyn::image(5, 5, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
4530        let mut cpu_converter = CPUProcessor::new();
4531
4532        let (result, _src, cpu_dst) = convert_img(
4533            &mut cpu_converter,
4534            src,
4535            cpu_dst,
4536            Rotation::None,
4537            Flip::None,
4538            Crop::new()
4539                .with_dst_rect(Some(Rect {
4540                    left: 1,
4541                    top: 1,
4542                    width: 4,
4543                    height: 4,
4544                }))
4545                .with_dst_color(Some([114, 114, 114, 255])),
4546        );
4547        result.unwrap();
4548
4549        #[rustfmt::skip]
4550        let expected_dst = [
4551            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,    114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4552            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,    114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4553            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,      114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4554        ];
4555
4556        assert_eq!(
4557            cpu_dst.as_u8().unwrap().map().unwrap().as_slice(),
4558            &expected_dst
4559        );
4560    }
4561
4562    #[test]
4563    fn test_cpu_resize_planar_rgba() {
4564        let src = TensorDyn::image(4, 4, PixelFormat::Rgba, DType::U8, None).unwrap();
4565        #[rustfmt::skip]
4566        let src_image = [
4567                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4568                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4569                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4570                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4571        ];
4572        src.as_u8()
4573            .unwrap()
4574            .map()
4575            .unwrap()
4576            .as_mut_slice()
4577            .copy_from_slice(&src_image);
4578
4579        let cpu_dst = TensorDyn::image(5, 5, PixelFormat::PlanarRgba, DType::U8, None).unwrap();
4580        let mut cpu_converter = CPUProcessor::new();
4581
4582        let (result, _src, cpu_dst) = convert_img(
4583            &mut cpu_converter,
4584            src,
4585            cpu_dst,
4586            Rotation::None,
4587            Flip::None,
4588            Crop::new()
4589                .with_dst_rect(Some(Rect {
4590                    left: 1,
4591                    top: 1,
4592                    width: 4,
4593                    height: 4,
4594                }))
4595                .with_dst_color(Some([114, 114, 114, 255])),
4596        );
4597        result.unwrap();
4598
4599        #[rustfmt::skip]
4600        let expected_dst = [
4601            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,        114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4602            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,        114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4603            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,          114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4604            255, 255, 255, 255, 255,    255, 255, 255, 255, 255,    255, 0, 255, 0, 255,        255, 0, 255, 0, 255,      255, 0, 255, 0, 255,
4605        ];
4606
4607        assert_eq!(
4608            cpu_dst.as_u8().unwrap().map().unwrap().as_slice(),
4609            &expected_dst
4610        );
4611    }
4612
4613    #[test]
4614    #[cfg(target_os = "linux")]
4615    #[cfg(feature = "opengl")]
4616    fn test_opengl_resize_planar_rgb() {
4617        if !is_opengl_available() {
4618            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4619            return;
4620        }
4621
4622        if !is_dma_available() {
4623            eprintln!(
4624                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4625                function!()
4626            );
4627            return;
4628        }
4629
4630        let dst_width = 640;
4631        let dst_height = 640;
4632        let file = include_bytes!(concat!(
4633            env!("CARGO_MANIFEST_DIR"),
4634            "/../../testdata/test_image.jpg"
4635        ))
4636        .to_vec();
4637        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
4638
4639        let cpu_dst = TensorDyn::image(
4640            dst_width,
4641            dst_height,
4642            PixelFormat::PlanarRgb,
4643            DType::U8,
4644            None,
4645        )
4646        .unwrap();
4647        let mut cpu_converter = CPUProcessor::new();
4648        let (result, src, cpu_dst) = convert_img(
4649            &mut cpu_converter,
4650            src,
4651            cpu_dst,
4652            Rotation::None,
4653            Flip::None,
4654            Crop::no_crop(),
4655        );
4656        result.unwrap();
4657        let crop_letterbox = Crop::new()
4658            .with_dst_rect(Some(Rect {
4659                left: 102,
4660                top: 102,
4661                width: 440,
4662                height: 440,
4663            }))
4664            .with_dst_color(Some([114, 114, 114, 114]));
4665        let (result, src, cpu_dst) = convert_img(
4666            &mut cpu_converter,
4667            src,
4668            cpu_dst,
4669            Rotation::None,
4670            Flip::None,
4671            crop_letterbox,
4672        );
4673        result.unwrap();
4674
4675        let gl_dst = TensorDyn::image(
4676            dst_width,
4677            dst_height,
4678            PixelFormat::PlanarRgb,
4679            DType::U8,
4680            None,
4681        )
4682        .unwrap();
4683        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4684
4685        let (result, _src, gl_dst) = convert_img(
4686            &mut gl_converter,
4687            src,
4688            gl_dst,
4689            Rotation::None,
4690            Flip::None,
4691            crop_letterbox,
4692        );
4693        result.unwrap();
4694        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
4695    }
4696
4697    #[test]
4698    fn test_cpu_resize_nv16() {
4699        let file = include_bytes!(concat!(
4700            env!("CARGO_MANIFEST_DIR"),
4701            "/../../testdata/zidane.jpg"
4702        ))
4703        .to_vec();
4704        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
4705
4706        let cpu_nv16_dst = TensorDyn::image(640, 640, PixelFormat::Nv16, DType::U8, None).unwrap();
4707        let cpu_rgb_dst = TensorDyn::image(640, 640, PixelFormat::Rgb, DType::U8, None).unwrap();
4708        let mut cpu_converter = CPUProcessor::new();
4709        let crop = Crop::new()
4710            .with_dst_rect(Some(Rect {
4711                left: 20,
4712                top: 140,
4713                width: 600,
4714                height: 360,
4715            }))
4716            .with_dst_color(Some([255, 128, 0, 255]));
4717
4718        let (result, src, cpu_nv16_dst) = convert_img(
4719            &mut cpu_converter,
4720            src,
4721            cpu_nv16_dst,
4722            Rotation::None,
4723            Flip::None,
4724            crop,
4725        );
4726        result.unwrap();
4727
4728        let (result, _src, cpu_rgb_dst) = convert_img(
4729            &mut cpu_converter,
4730            src,
4731            cpu_rgb_dst,
4732            Rotation::None,
4733            Flip::None,
4734            crop,
4735        );
4736        result.unwrap();
4737        compare_images_convert_to_rgb(&cpu_nv16_dst, &cpu_rgb_dst, 0.99, function!());
4738    }
4739
4740    fn load_bytes_to_tensor(
4741        width: usize,
4742        height: usize,
4743        format: PixelFormat,
4744        memory: Option<TensorMemory>,
4745        bytes: &[u8],
4746    ) -> Result<TensorDyn, Error> {
4747        let src = TensorDyn::image(width, height, format, DType::U8, memory)?;
4748        src.as_u8()
4749            .unwrap()
4750            .map()?
4751            .as_mut_slice()
4752            .copy_from_slice(bytes);
4753        Ok(src)
4754    }
4755
4756    fn compare_images(img1: &TensorDyn, img2: &TensorDyn, threshold: f64, name: &str) {
4757        assert_eq!(img1.height(), img2.height(), "Heights differ");
4758        assert_eq!(img1.width(), img2.width(), "Widths differ");
4759        assert_eq!(
4760            img1.format().unwrap(),
4761            img2.format().unwrap(),
4762            "PixelFormat differ"
4763        );
4764        assert!(
4765            matches!(
4766                img1.format().unwrap(),
4767                PixelFormat::Rgb | PixelFormat::Rgba | PixelFormat::Grey | PixelFormat::PlanarRgb
4768            ),
4769            "format must be Rgb or Rgba for comparison"
4770        );
4771
4772        let image1 = match img1.format().unwrap() {
4773            PixelFormat::Rgb => image::RgbImage::from_vec(
4774                img1.width().unwrap() as u32,
4775                img1.height().unwrap() as u32,
4776                img1.as_u8().unwrap().map().unwrap().to_vec(),
4777            )
4778            .unwrap(),
4779            PixelFormat::Rgba => image::RgbaImage::from_vec(
4780                img1.width().unwrap() as u32,
4781                img1.height().unwrap() as u32,
4782                img1.as_u8().unwrap().map().unwrap().to_vec(),
4783            )
4784            .unwrap()
4785            .convert(),
4786            PixelFormat::Grey => image::GrayImage::from_vec(
4787                img1.width().unwrap() as u32,
4788                img1.height().unwrap() as u32,
4789                img1.as_u8().unwrap().map().unwrap().to_vec(),
4790            )
4791            .unwrap()
4792            .convert(),
4793            PixelFormat::PlanarRgb => image::GrayImage::from_vec(
4794                img1.width().unwrap() as u32,
4795                (img1.height().unwrap() * 3) as u32,
4796                img1.as_u8().unwrap().map().unwrap().to_vec(),
4797            )
4798            .unwrap()
4799            .convert(),
4800            _ => return,
4801        };
4802
4803        let image2 = match img2.format().unwrap() {
4804            PixelFormat::Rgb => image::RgbImage::from_vec(
4805                img2.width().unwrap() as u32,
4806                img2.height().unwrap() as u32,
4807                img2.as_u8().unwrap().map().unwrap().to_vec(),
4808            )
4809            .unwrap(),
4810            PixelFormat::Rgba => image::RgbaImage::from_vec(
4811                img2.width().unwrap() as u32,
4812                img2.height().unwrap() as u32,
4813                img2.as_u8().unwrap().map().unwrap().to_vec(),
4814            )
4815            .unwrap()
4816            .convert(),
4817            PixelFormat::Grey => image::GrayImage::from_vec(
4818                img2.width().unwrap() as u32,
4819                img2.height().unwrap() as u32,
4820                img2.as_u8().unwrap().map().unwrap().to_vec(),
4821            )
4822            .unwrap()
4823            .convert(),
4824            PixelFormat::PlanarRgb => image::GrayImage::from_vec(
4825                img2.width().unwrap() as u32,
4826                (img2.height().unwrap() * 3) as u32,
4827                img2.as_u8().unwrap().map().unwrap().to_vec(),
4828            )
4829            .unwrap()
4830            .convert(),
4831            _ => return,
4832        };
4833
4834        let similarity = image_compare::rgb_similarity_structure(
4835            &image_compare::Algorithm::RootMeanSquared,
4836            &image1,
4837            &image2,
4838        )
4839        .expect("Image Comparison failed");
4840        if similarity.score < threshold {
4841            // image1.save(format!("{name}_1.png"));
4842            // image2.save(format!("{name}_2.png"));
4843            similarity
4844                .image
4845                .to_color_map()
4846                .save(format!("{name}.png"))
4847                .unwrap();
4848            panic!(
4849                "{name}: converted image and target image have similarity score too low: {} < {}",
4850                similarity.score, threshold
4851            )
4852        }
4853    }
4854
4855    fn compare_images_convert_to_rgb(
4856        img1: &TensorDyn,
4857        img2: &TensorDyn,
4858        threshold: f64,
4859        name: &str,
4860    ) {
4861        assert_eq!(img1.height(), img2.height(), "Heights differ");
4862        assert_eq!(img1.width(), img2.width(), "Widths differ");
4863
4864        let mut img_rgb1 = TensorDyn::image(
4865            img1.width().unwrap(),
4866            img1.height().unwrap(),
4867            PixelFormat::Rgb,
4868            DType::U8,
4869            Some(TensorMemory::Mem),
4870        )
4871        .unwrap();
4872        let mut img_rgb2 = TensorDyn::image(
4873            img1.width().unwrap(),
4874            img1.height().unwrap(),
4875            PixelFormat::Rgb,
4876            DType::U8,
4877            Some(TensorMemory::Mem),
4878        )
4879        .unwrap();
4880        let mut __cv = CPUProcessor::default();
4881        let r1 = __cv.convert(
4882            img1,
4883            &mut img_rgb1,
4884            crate::Rotation::None,
4885            crate::Flip::None,
4886            crate::Crop::default(),
4887        );
4888        let r2 = __cv.convert(
4889            img2,
4890            &mut img_rgb2,
4891            crate::Rotation::None,
4892            crate::Flip::None,
4893            crate::Crop::default(),
4894        );
4895        if r1.is_err() || r2.is_err() {
4896            // Fallback: compare raw bytes as greyscale strip
4897            let w = img1.width().unwrap() as u32;
4898            let data1 = img1.as_u8().unwrap().map().unwrap().to_vec();
4899            let data2 = img2.as_u8().unwrap().map().unwrap().to_vec();
4900            let h1 = (data1.len() as u32) / w;
4901            let h2 = (data2.len() as u32) / w;
4902            let g1 = image::GrayImage::from_vec(w, h1, data1).unwrap();
4903            let g2 = image::GrayImage::from_vec(w, h2, data2).unwrap();
4904            let similarity = image_compare::gray_similarity_structure(
4905                &image_compare::Algorithm::RootMeanSquared,
4906                &g1,
4907                &g2,
4908            )
4909            .expect("Image Comparison failed");
4910            if similarity.score < threshold {
4911                panic!(
4912                    "{name}: converted image and target image have similarity score too low: {} < {}",
4913                    similarity.score, threshold
4914                )
4915            }
4916            return;
4917        }
4918
4919        let image1 = image::RgbImage::from_vec(
4920            img_rgb1.width().unwrap() as u32,
4921            img_rgb1.height().unwrap() as u32,
4922            img_rgb1.as_u8().unwrap().map().unwrap().to_vec(),
4923        )
4924        .unwrap();
4925
4926        let image2 = image::RgbImage::from_vec(
4927            img_rgb2.width().unwrap() as u32,
4928            img_rgb2.height().unwrap() as u32,
4929            img_rgb2.as_u8().unwrap().map().unwrap().to_vec(),
4930        )
4931        .unwrap();
4932
4933        let similarity = image_compare::rgb_similarity_structure(
4934            &image_compare::Algorithm::RootMeanSquared,
4935            &image1,
4936            &image2,
4937        )
4938        .expect("Image Comparison failed");
4939        if similarity.score < threshold {
4940            // image1.save(format!("{name}_1.png"));
4941            // image2.save(format!("{name}_2.png"));
4942            similarity
4943                .image
4944                .to_color_map()
4945                .save(format!("{name}.png"))
4946                .unwrap();
4947            panic!(
4948                "{name}: converted image and target image have similarity score too low: {} < {}",
4949                similarity.score, threshold
4950            )
4951        }
4952    }
4953
4954    // =========================================================================
4955    // PixelFormat::Nv12 Format Tests
4956    // =========================================================================
4957
4958    #[test]
4959    fn test_nv12_image_creation() {
4960        let width = 640;
4961        let height = 480;
4962        let img = TensorDyn::image(width, height, PixelFormat::Nv12, DType::U8, None).unwrap();
4963
4964        assert_eq!(img.width(), Some(width));
4965        assert_eq!(img.height(), Some(height));
4966        assert_eq!(img.format().unwrap(), PixelFormat::Nv12);
4967        // PixelFormat::Nv12 uses shape [H*3/2, W] to store Y plane + UV plane
4968        assert_eq!(img.as_u8().unwrap().shape(), &[height * 3 / 2, width]);
4969    }
4970
4971    #[test]
4972    fn test_nv12_channels() {
4973        let img = TensorDyn::image(640, 480, PixelFormat::Nv12, DType::U8, None).unwrap();
4974        // PixelFormat::Nv12.channels() returns 1 (luma plane)
4975        assert_eq!(img.format().unwrap().channels(), 1);
4976    }
4977
4978    // =========================================================================
4979    // Tensor Format Metadata Tests
4980    // =========================================================================
4981
4982    #[test]
4983    fn test_tensor_set_format_planar() {
4984        let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
4985        tensor.set_format(PixelFormat::PlanarRgb).unwrap();
4986        assert_eq!(tensor.format(), Some(PixelFormat::PlanarRgb));
4987        assert_eq!(tensor.width(), Some(640));
4988        assert_eq!(tensor.height(), Some(480));
4989    }
4990
4991    #[test]
4992    fn test_tensor_set_format_interleaved() {
4993        let mut tensor = Tensor::<u8>::new(&[480, 640, 4], None, None).unwrap();
4994        tensor.set_format(PixelFormat::Rgba).unwrap();
4995        assert_eq!(tensor.format(), Some(PixelFormat::Rgba));
4996        assert_eq!(tensor.width(), Some(640));
4997        assert_eq!(tensor.height(), Some(480));
4998    }
4999
5000    #[test]
5001    fn test_tensordyn_image_rgb() {
5002        let img = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::U8, None).unwrap();
5003        assert_eq!(img.width(), Some(640));
5004        assert_eq!(img.height(), Some(480));
5005        assert_eq!(img.format(), Some(PixelFormat::Rgb));
5006    }
5007
5008    #[test]
5009    fn test_tensordyn_image_planar_rgb() {
5010        let img = TensorDyn::image(640, 480, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
5011        assert_eq!(img.width(), Some(640));
5012        assert_eq!(img.height(), Some(480));
5013        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
5014    }
5015
5016    #[test]
5017    fn test_rgb_int8_format() {
5018        // Int8 variant: same PixelFormat::Rgb but with DType::I8
5019        let img = TensorDyn::image(
5020            1280,
5021            720,
5022            PixelFormat::Rgb,
5023            DType::I8,
5024            Some(TensorMemory::Mem),
5025        )
5026        .unwrap();
5027        assert_eq!(img.width(), Some(1280));
5028        assert_eq!(img.height(), Some(720));
5029        assert_eq!(img.format(), Some(PixelFormat::Rgb));
5030        assert_eq!(img.dtype(), DType::I8);
5031    }
5032
5033    #[test]
5034    fn test_planar_rgb_int8_format() {
5035        let img = TensorDyn::image(
5036            1280,
5037            720,
5038            PixelFormat::PlanarRgb,
5039            DType::I8,
5040            Some(TensorMemory::Mem),
5041        )
5042        .unwrap();
5043        assert_eq!(img.width(), Some(1280));
5044        assert_eq!(img.height(), Some(720));
5045        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
5046        assert_eq!(img.dtype(), DType::I8);
5047    }
5048
5049    #[test]
5050    fn test_rgb_from_tensor() {
5051        let mut tensor = Tensor::<u8>::new(&[720, 1280, 3], None, None).unwrap();
5052        tensor.set_format(PixelFormat::Rgb).unwrap();
5053        let img = TensorDyn::from(tensor);
5054        assert_eq!(img.width(), Some(1280));
5055        assert_eq!(img.height(), Some(720));
5056        assert_eq!(img.format(), Some(PixelFormat::Rgb));
5057    }
5058
5059    #[test]
5060    fn test_planar_rgb_from_tensor() {
5061        let mut tensor = Tensor::<u8>::new(&[3, 720, 1280], None, None).unwrap();
5062        tensor.set_format(PixelFormat::PlanarRgb).unwrap();
5063        let img = TensorDyn::from(tensor);
5064        assert_eq!(img.width(), Some(1280));
5065        assert_eq!(img.height(), Some(720));
5066        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
5067    }
5068
5069    #[test]
5070    fn test_dtype_determines_int8() {
5071        // DType::I8 indicates int8 data
5072        let u8_img = TensorDyn::image(64, 64, PixelFormat::Rgb, DType::U8, None).unwrap();
5073        let i8_img = TensorDyn::image(64, 64, PixelFormat::Rgb, DType::I8, None).unwrap();
5074        assert_eq!(u8_img.dtype(), DType::U8);
5075        assert_eq!(i8_img.dtype(), DType::I8);
5076    }
5077
5078    #[test]
5079    fn test_pixel_layout_packed_vs_planar() {
5080        // Packed vs planar layout classification
5081        assert_eq!(PixelFormat::Rgb.layout(), PixelLayout::Packed);
5082        assert_eq!(PixelFormat::Rgba.layout(), PixelLayout::Packed);
5083        assert_eq!(PixelFormat::PlanarRgb.layout(), PixelLayout::Planar);
5084        assert_eq!(PixelFormat::Nv12.layout(), PixelLayout::SemiPlanar);
5085    }
5086
5087    /// Integration test that exercises the PBO-to-PBO convert path.
5088    /// Uses ImageProcessor::create_image() to allocate PBO-backed tensors,
5089    /// then converts between them. Skipped when GL is unavailable or the
5090    /// backend is not PBO (e.g. DMA-buf systems).
5091    #[cfg(target_os = "linux")]
5092    #[cfg(feature = "opengl")]
5093    #[test]
5094    fn test_convert_pbo_to_pbo() {
5095        let mut converter = ImageProcessor::new().unwrap();
5096
5097        // Skip if GL is not available or backend is not PBO
5098        let is_pbo = converter
5099            .opengl
5100            .as_ref()
5101            .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
5102        if !is_pbo {
5103            eprintln!("Skipping test_convert_pbo_to_pbo: backend is not PBO");
5104            return;
5105        }
5106
5107        let src_w = 640;
5108        let src_h = 480;
5109        let dst_w = 320;
5110        let dst_h = 240;
5111
5112        // Create PBO-backed source image
5113        let pbo_src = converter
5114            .create_image(src_w, src_h, PixelFormat::Rgba, DType::U8, None)
5115            .unwrap();
5116        assert_eq!(
5117            pbo_src.as_u8().unwrap().memory(),
5118            TensorMemory::Pbo,
5119            "create_image should produce a PBO tensor"
5120        );
5121
5122        // Fill source PBO with test pattern: load JPEG then convert Mem→PBO
5123        let file = include_bytes!(concat!(
5124            env!("CARGO_MANIFEST_DIR"),
5125            "/../../testdata/zidane.jpg"
5126        ))
5127        .to_vec();
5128        let jpeg_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
5129
5130        // Resize JPEG into a Mem temp of the right size, then copy into PBO
5131        let mem_src = TensorDyn::image(
5132            src_w,
5133            src_h,
5134            PixelFormat::Rgba,
5135            DType::U8,
5136            Some(TensorMemory::Mem),
5137        )
5138        .unwrap();
5139        let (result, _jpeg_src, mem_src) = convert_img(
5140            &mut CPUProcessor::new(),
5141            jpeg_src,
5142            mem_src,
5143            Rotation::None,
5144            Flip::None,
5145            Crop::no_crop(),
5146        );
5147        result.unwrap();
5148
5149        // Copy pixel data into the PBO source by mapping it
5150        {
5151            let src_data = mem_src.as_u8().unwrap().map().unwrap();
5152            let mut pbo_map = pbo_src.as_u8().unwrap().map().unwrap();
5153            pbo_map.copy_from_slice(&src_data);
5154        }
5155
5156        // Create PBO-backed destination image
5157        let pbo_dst = converter
5158            .create_image(dst_w, dst_h, PixelFormat::Rgba, DType::U8, None)
5159            .unwrap();
5160        assert_eq!(pbo_dst.as_u8().unwrap().memory(), TensorMemory::Pbo);
5161
5162        // Convert PBO→PBO (this exercises convert_pbo_to_pbo)
5163        let mut pbo_dst = pbo_dst;
5164        let result = converter.convert(
5165            &pbo_src,
5166            &mut pbo_dst,
5167            Rotation::None,
5168            Flip::None,
5169            Crop::no_crop(),
5170        );
5171        result.unwrap();
5172
5173        // Verify: compare with CPU-only conversion of the same input
5174        let cpu_dst = TensorDyn::image(
5175            dst_w,
5176            dst_h,
5177            PixelFormat::Rgba,
5178            DType::U8,
5179            Some(TensorMemory::Mem),
5180        )
5181        .unwrap();
5182        let (result, _mem_src, cpu_dst) = convert_img(
5183            &mut CPUProcessor::new(),
5184            mem_src,
5185            cpu_dst,
5186            Rotation::None,
5187            Flip::None,
5188            Crop::no_crop(),
5189        );
5190        result.unwrap();
5191
5192        let pbo_dst_img = {
5193            let mut __t = pbo_dst.into_u8().unwrap();
5194            __t.set_format(PixelFormat::Rgba).unwrap();
5195            TensorDyn::from(__t)
5196        };
5197        compare_images(&pbo_dst_img, &cpu_dst, 0.95, function!());
5198        log::info!("test_convert_pbo_to_pbo: PASS — PBO-to-PBO convert matches CPU reference");
5199    }
5200
5201    #[test]
5202    fn test_image_bgra() {
5203        let img = TensorDyn::image(
5204            640,
5205            480,
5206            PixelFormat::Bgra,
5207            DType::U8,
5208            Some(edgefirst_tensor::TensorMemory::Mem),
5209        )
5210        .unwrap();
5211        assert_eq!(img.width(), Some(640));
5212        assert_eq!(img.height(), Some(480));
5213        assert_eq!(img.format().unwrap().channels(), 4);
5214        assert_eq!(img.format().unwrap(), PixelFormat::Bgra);
5215    }
5216
5217    // ========================================================================
5218    // Tests for EDGEFIRST_FORCE_BACKEND env var
5219    // ========================================================================
5220
5221    #[test]
5222    fn test_force_backend_cpu() {
5223        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5224        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5225        let result = ImageProcessor::new();
5226        match original {
5227            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5228            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5229        }
5230        let converter = result.unwrap();
5231        assert!(converter.cpu.is_some());
5232        assert_eq!(converter.forced_backend, Some(ForcedBackend::Cpu));
5233    }
5234
5235    #[test]
5236    fn test_force_backend_invalid() {
5237        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5238        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "invalid") };
5239        let result = ImageProcessor::new();
5240        match original {
5241            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5242            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5243        }
5244        assert!(
5245            matches!(&result, Err(Error::ForcedBackendUnavailable(s)) if s.contains("unknown")),
5246            "invalid backend value should return ForcedBackendUnavailable error: {result:?}"
5247        );
5248    }
5249
5250    #[test]
5251    fn test_force_backend_unset() {
5252        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5253        unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") };
5254        let result = ImageProcessor::new();
5255        match original {
5256            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5257            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5258        }
5259        let converter = result.unwrap();
5260        assert!(converter.forced_backend.is_none());
5261    }
5262
5263    // ========================================================================
5264    // Tests for hybrid mask path error handling
5265    // ========================================================================
5266
5267    #[test]
5268    fn test_draw_masks_proto_no_cpu_returns_error() {
5269        // Disable CPU backend to trigger the error path
5270        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
5271        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
5272        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
5273        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
5274        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
5275        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
5276
5277        let result = ImageProcessor::new();
5278
5279        match original_cpu {
5280            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
5281            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
5282        }
5283        match original_gl {
5284            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
5285            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
5286        }
5287        match original_g2d {
5288            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
5289            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
5290        }
5291
5292        let mut converter = result.unwrap();
5293        assert!(converter.cpu.is_none(), "CPU should be disabled");
5294
5295        let dst = TensorDyn::image(
5296            640,
5297            480,
5298            PixelFormat::Rgba,
5299            DType::U8,
5300            Some(TensorMemory::Mem),
5301        )
5302        .unwrap();
5303        let mut dst_dyn = dst;
5304        let det = [DetectBox {
5305            bbox: edgefirst_decoder::BoundingBox {
5306                xmin: 0.1,
5307                ymin: 0.1,
5308                xmax: 0.5,
5309                ymax: 0.5,
5310            },
5311            score: 0.9,
5312            label: 0,
5313        }];
5314        let proto_data = ProtoData {
5315            mask_coefficients: vec![vec![0.5; 4]],
5316            protos: edgefirst_decoder::ProtoTensor::Float(ndarray::Array3::<f32>::zeros((8, 8, 4))),
5317        };
5318        let result = converter.draw_masks_proto(&mut dst_dyn, &det, &proto_data);
5319        assert!(
5320            matches!(&result, Err(Error::Internal(s)) if s.contains("CPU backend")),
5321            "draw_masks_proto without CPU should return Internal error: {result:?}"
5322        );
5323    }
5324
5325    #[test]
5326    fn test_draw_masks_proto_cpu_fallback_works() {
5327        // Force CPU-only backend to ensure the CPU fallback path executes
5328        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5329        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5330        let result = ImageProcessor::new();
5331        match original {
5332            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5333            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5334        }
5335
5336        let mut converter = result.unwrap();
5337        assert!(converter.cpu.is_some());
5338
5339        let dst = TensorDyn::image(
5340            64,
5341            64,
5342            PixelFormat::Rgba,
5343            DType::U8,
5344            Some(TensorMemory::Mem),
5345        )
5346        .unwrap();
5347        let mut dst_dyn = dst;
5348        let det = [DetectBox {
5349            bbox: edgefirst_decoder::BoundingBox {
5350                xmin: 0.1,
5351                ymin: 0.1,
5352                xmax: 0.5,
5353                ymax: 0.5,
5354            },
5355            score: 0.9,
5356            label: 0,
5357        }];
5358        let proto_data = ProtoData {
5359            mask_coefficients: vec![vec![0.5; 4]],
5360            protos: edgefirst_decoder::ProtoTensor::Float(ndarray::Array3::<f32>::zeros((8, 8, 4))),
5361        };
5362        let result = converter.draw_masks_proto(&mut dst_dyn, &det, &proto_data);
5363        assert!(result.is_ok(), "CPU fallback path should work: {result:?}");
5364    }
5365
5366    #[test]
5367    fn test_set_format_then_cpu_convert() {
5368        // Force CPU backend (save/restore to avoid leaking into other tests)
5369        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5370        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5371        let mut processor = ImageProcessor::new().unwrap();
5372        match original {
5373            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5374            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5375        }
5376
5377        // Load a source image
5378        let image = include_bytes!(concat!(
5379            env!("CARGO_MANIFEST_DIR"),
5380            "/../../testdata/zidane.jpg"
5381        ));
5382        let src = load_image(image, Some(PixelFormat::Rgba), None).unwrap();
5383
5384        // Create a raw tensor, then attach format — simulating the from_fd workflow
5385        let mut dst =
5386            TensorDyn::new(&[640, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
5387        dst.set_format(PixelFormat::Rgb).unwrap();
5388
5389        // Convert should work with the set_format-annotated tensor
5390        processor
5391            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())
5392            .unwrap();
5393
5394        // Verify format survived conversion
5395        assert_eq!(dst.format(), Some(PixelFormat::Rgb));
5396        assert_eq!(dst.width(), Some(640));
5397        assert_eq!(dst.height(), Some(640));
5398    }
5399}