Skip to main content

edgefirst_image/
lib.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4/*!
5
6## EdgeFirst HAL - Image Converter
7
8The `edgefirst_image` crate is part of the EdgeFirst Hardware Abstraction
9Layer (HAL) and provides functionality for converting images between
10different formats and sizes.  The crate is designed to work with hardware
11acceleration when available, but also provides a CPU-based fallback for
12environments where hardware acceleration is not present or not suitable.
13
14The main features of the `edgefirst_image` crate include:
15- Support for various image formats, including YUYV, RGB, RGBA, and GREY.
16- Support for source crop, destination crop, rotation, and flipping.
17- Image conversion using hardware acceleration (G2D, OpenGL) when available.
18- CPU-based image conversion as a fallback option.
19
20The crate uses [`TensorDyn`] from `edgefirst_tensor` to represent images,
21with [`PixelFormat`] metadata describing the pixel layout. The
22[`ImageProcessor`] struct manages the conversion process, selecting
23the appropriate conversion method based on the available hardware.
24
25## Examples
26
27```rust
28# use edgefirst_image::{ImageProcessor, Rotation, Flip, Crop, ImageProcessorTrait, load_image};
29# use edgefirst_tensor::{PixelFormat, DType, TensorDyn};
30# fn main() -> Result<(), edgefirst_image::Error> {
31let image = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
32let src = load_image(image, Some(PixelFormat::Rgba), None)?;
33let mut converter = ImageProcessor::new()?;
34let mut dst = converter.create_image(640, 480, PixelFormat::Rgb, DType::U8, None)?;
35converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())?;
36# Ok(())
37# }
38```
39
40## Environment Variables
41The behavior of the `edgefirst_image::ImageProcessor` struct can be influenced by the
42following environment variables:
43- `EDGEFIRST_FORCE_BACKEND`: When set to `cpu`, `g2d`, or `opengl` (case-insensitive),
44  only that single backend is initialized and no fallback chain is used. If the
45  forced backend fails to initialize, an error is returned immediately. This is
46  useful for benchmarking individual backends in isolation. When this variable is
47  set, the `EDGEFIRST_DISABLE_*` variables are ignored.
48- `EDGEFIRST_DISABLE_GL`: If set to `1`, disables the use of OpenGL for image
49  conversion, forcing the use of CPU or other available hardware methods.
50- `EDGEFIRST_DISABLE_G2D`: If set to `1`, disables the use of G2D for image
51  conversion, forcing the use of CPU or other available hardware methods.
52- `EDGEFIRST_DISABLE_CPU`: If set to `1`, disables the use of CPU for image
53  conversion, forcing the use of hardware acceleration methods. If no hardware
54  acceleration methods are available, an error will be returned when attempting
55  to create an `ImageProcessor`.
56
57Additionally the TensorMemory used by default allocations can be controlled using the
58`EDGEFIRST_TENSOR_FORCE_MEM` environment variable. If set to `1`, default tensor memory
59uses system memory. This will disable the use of specialized memory regions for tensors
60and hardware acceleration. However, this will increase the performance of the CPU converter.
61*/
62#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
63
64use edgefirst_decoder::{DetectBox, ProtoData, Segmentation};
65use edgefirst_tensor::{
66    DType, PixelFormat, PixelLayout, Tensor, TensorDyn, TensorMemory, TensorTrait as _,
67};
68use enum_dispatch::enum_dispatch;
69use std::{fmt::Display, time::Instant};
70use zune_jpeg::{
71    zune_core::{colorspace::ColorSpace, options::DecoderOptions},
72    JpegDecoder,
73};
74use zune_png::PngDecoder;
75
76pub use cpu::CPUProcessor;
77pub use error::{Error, Result};
78#[cfg(target_os = "linux")]
79pub use g2d::G2DProcessor;
80#[cfg(target_os = "linux")]
81#[cfg(feature = "opengl")]
82pub use opengl_headless::GLProcessorThreaded;
83#[cfg(target_os = "linux")]
84#[cfg(feature = "opengl")]
85pub use opengl_headless::Int8InterpolationMode;
86#[cfg(target_os = "linux")]
87#[cfg(feature = "opengl")]
88pub use opengl_headless::{probe_egl_displays, EglDisplayInfo, EglDisplayKind};
89
90/// Result of rendering a single per-instance grayscale mask.
91///
92/// Contains the bounding-box region in output image coordinates and the
93/// raw uint8 pixel data (RED channel only, 0–255 representing sigmoid output).
94#[derive(Debug, Clone)]
95pub(crate) struct MaskResult {
96    /// X offset of the bbox region in the output image.
97    pub(crate) x: usize,
98    /// Y offset of the bbox region in the output image.
99    pub(crate) y: usize,
100    /// Width of the bbox region.
101    pub(crate) w: usize,
102    /// Height of the bbox region.
103    pub(crate) h: usize,
104    /// Grayscale pixel data (w * h bytes, row-major).
105    pub(crate) pixels: Vec<u8>,
106}
107
108/// Region metadata for a single detection within a compact mask atlas.
109///
110/// The atlas packs padded bounding-box strips vertically.  This struct
111/// records where each detection's strip lives in the atlas and how it
112/// maps back to the original output coordinate space.
113#[must_use]
114#[derive(Debug, Clone, Copy)]
115pub struct MaskRegion {
116    /// Row offset of this detection's strip in the atlas.
117    pub atlas_y_offset: usize,
118    /// Left edge of the padded bbox in output image coordinates.
119    pub padded_x: usize,
120    /// Top edge of the padded bbox in output image coordinates.
121    pub padded_y: usize,
122    /// Width of the padded bbox.
123    pub padded_w: usize,
124    /// Height of the padded bbox (= number of atlas rows for this strip).
125    pub padded_h: usize,
126    /// Original (unpadded) bbox left edge in output image coordinates.
127    pub bbox_x: usize,
128    /// Original (unpadded) bbox top edge in output image coordinates.
129    pub bbox_y: usize,
130    /// Original (unpadded) bbox width.
131    pub bbox_w: usize,
132    /// Original (unpadded) bbox height.
133    pub bbox_h: usize,
134}
135
136mod cpu;
137mod error;
138mod g2d;
139#[path = "gl/mod.rs"]
140mod opengl_headless;
141
142// Use `edgefirst_tensor::PixelFormat` variants (Rgb, Rgba, Grey, etc.) and
143// `TensorDyn` / `Tensor<u8>` with `.format()` metadata instead.
144
145/// Flips the image data, then rotates it. Returns a new `TensorDyn`.
146fn rotate_flip_to_dyn(
147    src: &Tensor<u8>,
148    src_fmt: PixelFormat,
149    rotation: Rotation,
150    flip: Flip,
151    memory: Option<TensorMemory>,
152) -> Result<TensorDyn, Error> {
153    let src_w = src.width().unwrap();
154    let src_h = src.height().unwrap();
155    let channels = src_fmt.channels();
156
157    let (dst_w, dst_h) = match rotation {
158        Rotation::None | Rotation::Rotate180 => (src_w, src_h),
159        Rotation::Clockwise90 | Rotation::CounterClockwise90 => (src_h, src_w),
160    };
161
162    let dst = Tensor::<u8>::image(dst_w, dst_h, src_fmt, memory)?;
163    let src_map = src.map()?;
164    let mut dst_map = dst.map()?;
165
166    CPUProcessor::flip_rotate_ndarray_pf(
167        &src_map,
168        &mut dst_map,
169        dst_w,
170        dst_h,
171        channels,
172        rotation,
173        flip,
174    )?;
175    drop(dst_map);
176    drop(src_map);
177
178    Ok(TensorDyn::from(dst))
179}
180
181#[derive(Debug, Clone, Copy, PartialEq, Eq)]
182pub enum Rotation {
183    None = 0,
184    Clockwise90 = 1,
185    Rotate180 = 2,
186    CounterClockwise90 = 3,
187}
188impl Rotation {
189    /// Creates a Rotation enum from an angle in degrees. The angle must be a
190    /// multiple of 90.
191    ///
192    /// # Panics
193    /// Panics if the angle is not a multiple of 90.
194    ///
195    /// # Examples
196    /// ```rust
197    /// # use edgefirst_image::Rotation;
198    /// let rotation = Rotation::from_degrees_clockwise(270);
199    /// assert_eq!(rotation, Rotation::CounterClockwise90);
200    /// ```
201    pub fn from_degrees_clockwise(angle: usize) -> Rotation {
202        match angle.rem_euclid(360) {
203            0 => Rotation::None,
204            90 => Rotation::Clockwise90,
205            180 => Rotation::Rotate180,
206            270 => Rotation::CounterClockwise90,
207            _ => panic!("rotation angle is not a multiple of 90"),
208        }
209    }
210}
211
212#[derive(Debug, Clone, Copy, PartialEq, Eq)]
213pub enum Flip {
214    None = 0,
215    Vertical = 1,
216    Horizontal = 2,
217}
218
219#[derive(Debug, Clone, Copy, PartialEq, Eq)]
220pub struct Crop {
221    pub src_rect: Option<Rect>,
222    pub dst_rect: Option<Rect>,
223    pub dst_color: Option<[u8; 4]>,
224}
225
226impl Default for Crop {
227    fn default() -> Self {
228        Crop::new()
229    }
230}
231impl Crop {
232    // Creates a new Crop with default values (no cropping).
233    pub fn new() -> Self {
234        Crop {
235            src_rect: None,
236            dst_rect: None,
237            dst_color: None,
238        }
239    }
240
241    // Sets the source rectangle for cropping.
242    pub fn with_src_rect(mut self, src_rect: Option<Rect>) -> Self {
243        self.src_rect = src_rect;
244        self
245    }
246
247    // Sets the destination rectangle for cropping.
248    pub fn with_dst_rect(mut self, dst_rect: Option<Rect>) -> Self {
249        self.dst_rect = dst_rect;
250        self
251    }
252
253    // Sets the destination color for areas outside the cropped region.
254    pub fn with_dst_color(mut self, dst_color: Option<[u8; 4]>) -> Self {
255        self.dst_color = dst_color;
256        self
257    }
258
259    // Creates a new Crop with no cropping.
260    pub fn no_crop() -> Self {
261        Crop::new()
262    }
263
264    /// Validate crop rectangles against explicit dimensions.
265    pub(crate) fn check_crop_dims(
266        &self,
267        src_w: usize,
268        src_h: usize,
269        dst_w: usize,
270        dst_h: usize,
271    ) -> Result<(), Error> {
272        let src_ok = self
273            .src_rect
274            .is_none_or(|r| r.left + r.width <= src_w && r.top + r.height <= src_h);
275        let dst_ok = self
276            .dst_rect
277            .is_none_or(|r| r.left + r.width <= dst_w && r.top + r.height <= dst_h);
278        match (src_ok, dst_ok) {
279            (true, true) => Ok(()),
280            (true, false) => Err(Error::CropInvalid(format!(
281                "Dest crop invalid: {:?}",
282                self.dst_rect
283            ))),
284            (false, true) => Err(Error::CropInvalid(format!(
285                "Src crop invalid: {:?}",
286                self.src_rect
287            ))),
288            (false, false) => Err(Error::CropInvalid(format!(
289                "Dest and Src crop invalid: {:?} {:?}",
290                self.dst_rect, self.src_rect
291            ))),
292        }
293    }
294
295    /// Validate crop rectangles against TensorDyn source and destination.
296    pub fn check_crop_dyn(
297        &self,
298        src: &edgefirst_tensor::TensorDyn,
299        dst: &edgefirst_tensor::TensorDyn,
300    ) -> Result<(), Error> {
301        self.check_crop_dims(
302            src.width().unwrap_or(0),
303            src.height().unwrap_or(0),
304            dst.width().unwrap_or(0),
305            dst.height().unwrap_or(0),
306        )
307    }
308}
309
310#[derive(Debug, Clone, Copy, PartialEq, Eq)]
311pub struct Rect {
312    pub left: usize,
313    pub top: usize,
314    pub width: usize,
315    pub height: usize,
316}
317
318impl Rect {
319    // Creates a new Rect with the specified left, top, width, and height.
320    pub fn new(left: usize, top: usize, width: usize, height: usize) -> Self {
321        Self {
322            left,
323            top,
324            width,
325            height,
326        }
327    }
328
329    // Checks if the rectangle is valid for the given TensorDyn image.
330    pub fn check_rect_dyn(&self, image: &TensorDyn) -> bool {
331        let w = image.width().unwrap_or(0);
332        let h = image.height().unwrap_or(0);
333        self.left + self.width <= w && self.top + self.height <= h
334    }
335}
336
337#[enum_dispatch(ImageProcessor)]
338pub trait ImageProcessorTrait {
339    /// Converts the source image to the destination image format and size. The
340    /// image is cropped first, then flipped, then rotated
341    ///
342    /// # Arguments
343    ///
344    /// * `dst` - The destination image to be converted to.
345    /// * `src` - The source image to convert from.
346    /// * `rotation` - The rotation to apply to the destination image.
347    /// * `flip` - Flips the image
348    /// * `crop` - An optional rectangle specifying the area to crop from the
349    ///   source image
350    ///
351    /// # Returns
352    ///
353    /// A `Result` indicating success or failure of the conversion.
354    fn convert(
355        &mut self,
356        src: &TensorDyn,
357        dst: &mut TensorDyn,
358        rotation: Rotation,
359        flip: Flip,
360        crop: Crop,
361    ) -> Result<()>;
362
363    /// Draw pre-decoded detection boxes and segmentation masks onto `dst`.
364    ///
365    /// Supports two segmentation modes based on the mask channel count:
366    /// - **Instance segmentation** (`C=1`): one `Segmentation` per detection,
367    ///   `segmentation` and `detect` are zipped.
368    /// - **Semantic segmentation** (`C>1`): a single `Segmentation` covering
369    ///   all classes; only the first element is used.
370    ///
371    /// # Format requirements
372    ///
373    /// - CPU backend: `dst` must be `RGBA` or `RGB`.
374    /// - OpenGL backend: `dst` must be `RGBA`, `BGRA`, or `RGB`.
375    /// - G2D backend: not implemented (returns `NotImplemented`).
376    ///
377    /// An empty `segmentation` slice is valid — only bounding boxes are drawn.
378    fn draw_masks(
379        &mut self,
380        dst: &mut TensorDyn,
381        detect: &[DetectBox],
382        segmentation: &[Segmentation],
383    ) -> Result<()>;
384
385    /// Draw masks from proto data onto image (fused decode+draw).
386    ///
387    /// For YOLO segmentation models, this avoids materializing intermediate
388    /// `Array3<u8>` masks. The `ProtoData` contains mask coefficients and the
389    /// prototype tensor; the renderer computes `mask_coeff @ protos` directly
390    /// at the output resolution using bilinear sampling.
391    ///
392    /// `detect` and `proto_data.mask_coefficients` must have the same length
393    /// (enforced by zip — excess entries are silently ignored). An empty
394    /// `detect` slice is valid and returns immediately after drawing nothing.
395    ///
396    /// # Format requirements
397    ///
398    /// Same as [`draw_masks`](Self::draw_masks). G2D returns `NotImplemented`.
399    fn draw_masks_proto(
400        &mut self,
401        dst: &mut TensorDyn,
402        detect: &[DetectBox],
403        proto_data: &ProtoData,
404    ) -> Result<()>;
405
406    /// Decode masks into a compact atlas buffer.
407    ///
408    /// Used internally by the Python/C `decode_masks` APIs. The atlas is a
409    /// compact vertical strip where each detection occupies a strip sized to
410    /// its padded bounding box (not the full output resolution).
411    ///
412    /// `output_width` and `output_height` define the coordinate space for
413    /// interpreting bounding boxes — individual mask regions are bbox-sized.
414    /// Mask pixels are binary: `255` = presence, `0` = background.
415    ///
416    /// Returns `(atlas_pixels, regions)` where `regions` describes each
417    /// detection's location and bbox within the atlas.
418    ///
419    /// G2D backend returns `NotImplemented`.
420    fn decode_masks_atlas(
421        &mut self,
422        detect: &[DetectBox],
423        proto_data: ProtoData,
424        output_width: usize,
425        output_height: usize,
426    ) -> Result<(Vec<u8>, Vec<MaskRegion>)>;
427
428    /// Sets the colors used for rendering segmentation masks. Up to 20 colors
429    /// can be set.
430    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()>;
431}
432
433/// Configuration for [`ImageProcessor`] construction.
434///
435/// Use with [`ImageProcessor::with_config`] to override the default EGL
436/// display auto-detection and backend selection. The default configuration
437/// preserves the existing auto-detection behaviour.
438#[derive(Debug, Clone, Default)]
439pub struct ImageProcessorConfig {
440    /// Force OpenGL to use this EGL display type instead of auto-detecting.
441    ///
442    /// When `None`, the processor probes displays in priority order: GBM,
443    /// PlatformDevice, Default. Use [`probe_egl_displays`] to discover
444    /// which displays are available on the current system.
445    ///
446    /// Ignored when `EDGEFIRST_DISABLE_GL=1` is set.
447    #[cfg(target_os = "linux")]
448    #[cfg(feature = "opengl")]
449    pub egl_display: Option<EglDisplayKind>,
450
451    /// Preferred compute backend.
452    ///
453    /// When set to a specific backend (not [`ComputeBackend::Auto`]), the
454    /// processor initializes that backend with no fallback — returns an error if the conversion is not supported.
455    /// This takes precedence over `EDGEFIRST_FORCE_BACKEND` and the
456    /// `EDGEFIRST_DISABLE_*` environment variables.
457    ///
458    /// - [`ComputeBackend::OpenGl`]: init OpenGL + CPU, skip G2D
459    /// - [`ComputeBackend::G2d`]: init G2D + CPU, skip OpenGL
460    /// - [`ComputeBackend::Cpu`]: init CPU only
461    /// - [`ComputeBackend::Auto`]: existing env-var-driven selection
462    pub backend: ComputeBackend,
463}
464
465/// Compute backend selection for [`ImageProcessor`].
466///
467/// Use with [`ImageProcessorConfig::backend`] to select which backend the
468/// processor should prefer. When a specific backend is selected, the
469/// processor initializes that backend plus CPU as a fallback. When `Auto`
470/// is used, the existing environment-variable-driven selection applies.
471#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
472pub enum ComputeBackend {
473    /// Auto-detect based on available hardware and environment variables.
474    #[default]
475    Auto,
476    /// CPU-only processing (no hardware acceleration).
477    Cpu,
478    /// Prefer G2D hardware blitter (+ CPU fallback).
479    G2d,
480    /// Prefer OpenGL ES (+ CPU fallback).
481    OpenGl,
482}
483
484/// Backend forced via the `EDGEFIRST_FORCE_BACKEND` environment variable
485/// or [`ImageProcessorConfig::backend`].
486///
487/// When set, the [`ImageProcessor`] only initializes and dispatches to the
488/// selected backend — no fallback chain is used.
489#[derive(Debug, Clone, Copy, PartialEq, Eq)]
490pub(crate) enum ForcedBackend {
491    Cpu,
492    G2d,
493    OpenGl,
494}
495
496/// Image converter that uses available hardware acceleration or CPU as a
497/// fallback.
498#[derive(Debug)]
499pub struct ImageProcessor {
500    /// CPU-based image converter as a fallback. This is only None if the
501    /// EDGEFIRST_DISABLE_CPU environment variable is set.
502    pub cpu: Option<CPUProcessor>,
503
504    #[cfg(target_os = "linux")]
505    /// G2D-based image converter for Linux systems. This is only available if
506    /// the EDGEFIRST_DISABLE_G2D environment variable is not set and libg2d.so
507    /// is available.
508    pub g2d: Option<G2DProcessor>,
509    #[cfg(target_os = "linux")]
510    #[cfg(feature = "opengl")]
511    /// OpenGL-based image converter for Linux systems. This is only available
512    /// if the EDGEFIRST_DISABLE_GL environment variable is not set and OpenGL
513    /// ES is available.
514    pub opengl: Option<GLProcessorThreaded>,
515
516    /// When set, only the specified backend is used — no fallback chain.
517    pub(crate) forced_backend: Option<ForcedBackend>,
518}
519
520unsafe impl Send for ImageProcessor {}
521unsafe impl Sync for ImageProcessor {}
522
523impl ImageProcessor {
524    /// Creates a new `ImageProcessor` instance, initializing available
525    /// hardware converters based on the system capabilities and environment
526    /// variables.
527    ///
528    /// # Examples
529    /// ```rust
530    /// # use edgefirst_image::{ImageProcessor, Rotation, Flip, Crop, ImageProcessorTrait, load_image};
531    /// # use edgefirst_tensor::{PixelFormat, DType, TensorDyn};
532    /// # fn main() -> Result<(), edgefirst_image::Error> {
533    /// let image = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
534    /// let src = load_image(image, Some(PixelFormat::Rgba), None)?;
535    /// let mut converter = ImageProcessor::new()?;
536    /// let mut dst = converter.create_image(640, 480, PixelFormat::Rgb, DType::U8, None)?;
537    /// converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())?;
538    /// # Ok(())
539    /// # }
540    /// ```
541    pub fn new() -> Result<Self> {
542        Self::with_config(ImageProcessorConfig::default())
543    }
544
545    /// Creates a new `ImageProcessor` with the given configuration.
546    ///
547    /// When [`ImageProcessorConfig::backend`] is set to a specific backend,
548    /// environment variables are ignored and the processor initializes the
549    /// requested backend plus CPU as a fallback.
550    ///
551    /// When `Auto`, the existing `EDGEFIRST_FORCE_BACKEND` and
552    /// `EDGEFIRST_DISABLE_*` environment variables apply.
553    #[allow(unused_variables)]
554    pub fn with_config(config: ImageProcessorConfig) -> Result<Self> {
555        // ── Config-driven backend selection ──────────────────────────
556        // When the caller explicitly requests a backend via the config,
557        // skip all environment variable logic.
558        match config.backend {
559            ComputeBackend::Cpu => {
560                log::info!("ComputeBackend::Cpu — CPU only");
561                return Ok(Self {
562                    cpu: Some(CPUProcessor::new()),
563                    #[cfg(target_os = "linux")]
564                    g2d: None,
565                    #[cfg(target_os = "linux")]
566                    #[cfg(feature = "opengl")]
567                    opengl: None,
568                    forced_backend: None,
569                });
570            }
571            ComputeBackend::G2d => {
572                log::info!("ComputeBackend::G2d — G2D + CPU fallback");
573                #[cfg(target_os = "linux")]
574                {
575                    let g2d = match G2DProcessor::new() {
576                        Ok(g) => Some(g),
577                        Err(e) => {
578                            log::warn!("G2D requested but failed to initialize: {e:?}");
579                            None
580                        }
581                    };
582                    return Ok(Self {
583                        cpu: Some(CPUProcessor::new()),
584                        g2d,
585                        #[cfg(feature = "opengl")]
586                        opengl: None,
587                        forced_backend: None,
588                    });
589                }
590                #[cfg(not(target_os = "linux"))]
591                {
592                    log::warn!("G2D requested but not available on this platform, using CPU");
593                    return Ok(Self {
594                        cpu: Some(CPUProcessor::new()),
595                        forced_backend: None,
596                    });
597                }
598            }
599            ComputeBackend::OpenGl => {
600                log::info!("ComputeBackend::OpenGl — OpenGL + CPU fallback");
601                #[cfg(target_os = "linux")]
602                {
603                    #[cfg(feature = "opengl")]
604                    let opengl = match GLProcessorThreaded::new(config.egl_display) {
605                        Ok(gl) => Some(gl),
606                        Err(e) => {
607                            log::warn!("OpenGL requested but failed to initialize: {e:?}");
608                            None
609                        }
610                    };
611                    return Ok(Self {
612                        cpu: Some(CPUProcessor::new()),
613                        g2d: None,
614                        #[cfg(feature = "opengl")]
615                        opengl,
616                        forced_backend: None,
617                    });
618                }
619                #[cfg(not(target_os = "linux"))]
620                {
621                    log::warn!("OpenGL requested but not available on this platform, using CPU");
622                    return Ok(Self {
623                        cpu: Some(CPUProcessor::new()),
624                        forced_backend: None,
625                    });
626                }
627            }
628            ComputeBackend::Auto => { /* fall through to env-var logic below */ }
629        }
630
631        // ── EDGEFIRST_FORCE_BACKEND ──────────────────────────────────
632        // When set, only the requested backend is initialised and no
633        // fallback chain is used. Accepted values (case-insensitive):
634        //   "cpu", "g2d", "opengl"
635        if let Ok(val) = std::env::var("EDGEFIRST_FORCE_BACKEND") {
636            let val_lower = val.to_lowercase();
637            let forced = match val_lower.as_str() {
638                "cpu" => ForcedBackend::Cpu,
639                "g2d" => ForcedBackend::G2d,
640                "opengl" => ForcedBackend::OpenGl,
641                other => {
642                    return Err(Error::ForcedBackendUnavailable(format!(
643                        "unknown EDGEFIRST_FORCE_BACKEND value: {other:?} (expected cpu, g2d, or opengl)"
644                    )));
645                }
646            };
647
648            log::info!("EDGEFIRST_FORCE_BACKEND={val} — only initializing {val_lower} backend");
649
650            return match forced {
651                ForcedBackend::Cpu => Ok(Self {
652                    cpu: Some(CPUProcessor::new()),
653                    #[cfg(target_os = "linux")]
654                    g2d: None,
655                    #[cfg(target_os = "linux")]
656                    #[cfg(feature = "opengl")]
657                    opengl: None,
658                    forced_backend: Some(ForcedBackend::Cpu),
659                }),
660                ForcedBackend::G2d => {
661                    #[cfg(target_os = "linux")]
662                    {
663                        let g2d = G2DProcessor::new().map_err(|e| {
664                            Error::ForcedBackendUnavailable(format!(
665                                "g2d forced but failed to initialize: {e:?}"
666                            ))
667                        })?;
668                        Ok(Self {
669                            cpu: None,
670                            g2d: Some(g2d),
671                            #[cfg(feature = "opengl")]
672                            opengl: None,
673                            forced_backend: Some(ForcedBackend::G2d),
674                        })
675                    }
676                    #[cfg(not(target_os = "linux"))]
677                    {
678                        Err(Error::ForcedBackendUnavailable(
679                            "g2d backend is only available on Linux".into(),
680                        ))
681                    }
682                }
683                ForcedBackend::OpenGl => {
684                    #[cfg(target_os = "linux")]
685                    #[cfg(feature = "opengl")]
686                    {
687                        let opengl = GLProcessorThreaded::new(config.egl_display).map_err(|e| {
688                            Error::ForcedBackendUnavailable(format!(
689                                "opengl forced but failed to initialize: {e:?}"
690                            ))
691                        })?;
692                        Ok(Self {
693                            cpu: None,
694                            g2d: None,
695                            opengl: Some(opengl),
696                            forced_backend: Some(ForcedBackend::OpenGl),
697                        })
698                    }
699                    #[cfg(not(all(target_os = "linux", feature = "opengl")))]
700                    {
701                        Err(Error::ForcedBackendUnavailable(
702                            "opengl backend requires Linux with the 'opengl' feature enabled"
703                                .into(),
704                        ))
705                    }
706                }
707            };
708        }
709
710        // ── Existing DISABLE logic (unchanged) ──────────────────────
711        #[cfg(target_os = "linux")]
712        let g2d = if std::env::var("EDGEFIRST_DISABLE_G2D")
713            .map(|x| x != "0" && x.to_lowercase() != "false")
714            .unwrap_or(false)
715        {
716            log::debug!("EDGEFIRST_DISABLE_G2D is set");
717            None
718        } else {
719            match G2DProcessor::new() {
720                Ok(g2d_converter) => Some(g2d_converter),
721                Err(err) => {
722                    log::warn!("Failed to initialize G2D converter: {err:?}");
723                    None
724                }
725            }
726        };
727
728        #[cfg(target_os = "linux")]
729        #[cfg(feature = "opengl")]
730        let opengl = if std::env::var("EDGEFIRST_DISABLE_GL")
731            .map(|x| x != "0" && x.to_lowercase() != "false")
732            .unwrap_or(false)
733        {
734            log::debug!("EDGEFIRST_DISABLE_GL is set");
735            None
736        } else {
737            match GLProcessorThreaded::new(config.egl_display) {
738                Ok(gl_converter) => Some(gl_converter),
739                Err(err) => {
740                    log::warn!("Failed to initialize GL converter: {err:?}");
741                    None
742                }
743            }
744        };
745
746        let cpu = if std::env::var("EDGEFIRST_DISABLE_CPU")
747            .map(|x| x != "0" && x.to_lowercase() != "false")
748            .unwrap_or(false)
749        {
750            log::debug!("EDGEFIRST_DISABLE_CPU is set");
751            None
752        } else {
753            Some(CPUProcessor::new())
754        };
755        Ok(Self {
756            cpu,
757            #[cfg(target_os = "linux")]
758            g2d,
759            #[cfg(target_os = "linux")]
760            #[cfg(feature = "opengl")]
761            opengl,
762            forced_backend: None,
763        })
764    }
765
766    /// Sets the interpolation mode for int8 proto textures on the OpenGL
767    /// backend. No-op if OpenGL is not available.
768    #[cfg(target_os = "linux")]
769    #[cfg(feature = "opengl")]
770    pub fn set_int8_interpolation_mode(&mut self, mode: Int8InterpolationMode) -> Result<()> {
771        if let Some(ref mut gl) = self.opengl {
772            gl.set_int8_interpolation_mode(mode)?;
773        }
774        Ok(())
775    }
776
777    /// Create a [`TensorDyn`] image with the best available memory backend.
778    ///
779    /// Priority: DMA-buf → PBO (byte-sized types: u8, i8) → system memory.
780    ///
781    /// Use this method instead of [`TensorDyn::image()`] when the tensor will
782    /// be used with [`ImageProcessor::convert()`]. It selects the optimal
783    /// memory backing (including PBO for GPU zero-copy) which direct
784    /// allocation cannot achieve.
785    ///
786    /// This method is on [`ImageProcessor`] rather than [`ImageProcessorTrait`]
787    /// because optimal allocation requires knowledge of the active compute
788    /// backends (e.g. the GL context handle for PBO allocation). Individual
789    /// backend implementations ([`CPUProcessor`], etc.) do not have this
790    /// cross-backend visibility.
791    ///
792    /// # Arguments
793    ///
794    /// * `width` - Image width in pixels
795    /// * `height` - Image height in pixels
796    /// * `format` - Pixel format
797    /// * `dtype` - Element data type (e.g. `DType::U8`, `DType::I8`)
798    /// * `memory` - Optional memory type override; when `None`, the best
799    ///   available backend is selected automatically.
800    ///
801    /// # Returns
802    ///
803    /// A [`TensorDyn`] backed by the highest-performance memory type
804    /// available on this system.
805    ///
806    /// # Errors
807    ///
808    /// Returns an error if all allocation strategies fail.
809    pub fn create_image(
810        &self,
811        width: usize,
812        height: usize,
813        format: PixelFormat,
814        dtype: DType,
815        memory: Option<TensorMemory>,
816    ) -> Result<TensorDyn> {
817        // If an explicit memory type is requested, honour it directly.
818        if let Some(mem) = memory {
819            return Ok(TensorDyn::image(width, height, format, dtype, Some(mem))?);
820        }
821
822        // Try DMA first on Linux — skip only when GL has explicitly selected PBO
823        // as the preferred transfer path (PBO is better than DMA in that case).
824        #[cfg(target_os = "linux")]
825        {
826            #[cfg(feature = "opengl")]
827            let gl_uses_pbo = self
828                .opengl
829                .as_ref()
830                .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
831            #[cfg(not(feature = "opengl"))]
832            let gl_uses_pbo = false;
833
834            if !gl_uses_pbo {
835                if let Ok(img) = TensorDyn::image(
836                    width,
837                    height,
838                    format,
839                    dtype,
840                    Some(edgefirst_tensor::TensorMemory::Dma),
841                ) {
842                    return Ok(img);
843                }
844            }
845        }
846
847        // Try PBO (if GL available).
848        // PBO buffers are u8-sized; the int8 shader emulates i8 output via
849        // XOR 0x80 on the same underlying buffer, so both U8 and I8 work.
850        #[cfg(target_os = "linux")]
851        #[cfg(feature = "opengl")]
852        if dtype.size() == 1 {
853            if let Some(gl) = &self.opengl {
854                match gl.create_pbo_image(width, height, format) {
855                    Ok(t) => {
856                        if dtype == DType::I8 {
857                            // SAFETY: Tensor<u8> and Tensor<i8> are layout-
858                            // identical (same element size, no T-dependent
859                            // drop glue). The int8 shader applies XOR 0x80
860                            // on the same PBO buffer. Same rationale as
861                            // gl::processor::tensor_i8_as_u8_mut.
862                            // Invariant: PBO tensors never have chroma
863                            // (create_pbo_image → Tensor::wrap sets it None).
864                            debug_assert!(
865                                t.chroma().is_none(),
866                                "PBO i8 transmute requires chroma == None"
867                            );
868                            let t_i8: Tensor<i8> = unsafe { std::mem::transmute(t) };
869                            return Ok(TensorDyn::from(t_i8));
870                        }
871                        return Ok(TensorDyn::from(t));
872                    }
873                    Err(e) => log::debug!("PBO image creation failed, falling back to Mem: {e:?}"),
874                }
875            }
876        }
877
878        // Fallback to Mem
879        Ok(TensorDyn::image(
880            width,
881            height,
882            format,
883            dtype,
884            Some(edgefirst_tensor::TensorMemory::Mem),
885        )?)
886    }
887
888    /// Create an image tensor backed by an external DMA-BUF file descriptor.
889    ///
890    /// The GPU renders directly into this buffer via EGL DMA-BUF import —
891    /// no CPU copy is needed after `convert()`. The caller retains ownership
892    /// of the underlying buffer; the returned tensor borrows it via `dup(fd)`.
893    ///
894    /// # Arguments
895    ///
896    /// * `fd` - Borrowed reference to the DMA-BUF file descriptor
897    /// * `width` - Image width in pixels
898    /// * `height` - Image height in pixels
899    /// * `format` - Pixel format of the buffer
900    /// * `dtype` - Element data type (e.g. `DType::U8`)
901    ///
902    /// # Returns
903    ///
904    /// A `TensorDyn` configured as an image with the given format, backed by a
905    /// `dup`'d copy of the caller's file descriptor.
906    ///
907    /// # Platform
908    ///
909    /// Linux only. Returns `Error::NotSupported` on other platforms.
910    ///
911    /// # Errors
912    ///
913    /// Returns an error if the fd clone fails or the resulting shape is
914    /// invalid for the given format.
915    #[cfg(target_os = "linux")]
916    pub fn create_image_from_fd(
917        &self,
918        fd: std::os::fd::BorrowedFd<'_>,
919        width: usize,
920        height: usize,
921        format: PixelFormat,
922        dtype: DType,
923    ) -> Result<TensorDyn> {
924        let owned = fd.try_clone_to_owned().map_err(Error::Io)?;
925        let shape = match format.layout() {
926            PixelLayout::Packed => vec![height, width, format.channels()],
927            PixelLayout::Planar => vec![format.channels(), height, width],
928            PixelLayout::SemiPlanar => {
929                let total_h = match format {
930                    PixelFormat::Nv12 => {
931                        if !height.is_multiple_of(2) {
932                            return Err(Error::InvalidShape(format!(
933                                "NV12 requires even height, got {height}"
934                            )));
935                        }
936                        height * 3 / 2
937                    }
938                    PixelFormat::Nv16 => height * 2,
939                    _ => {
940                        return Err(Error::InvalidShape(format!(
941                            "unknown semi-planar height multiplier for {format:?}"
942                        )))
943                    }
944                };
945                vec![total_h, width]
946            }
947            _ => {
948                return Err(Error::NotSupported(format!(
949                    "unsupported pixel layout for create_image_from_fd: {:?}",
950                    format.layout()
951                )));
952            }
953        };
954        let tensor = TensorDyn::from_fd(owned, &shape, dtype, None)?;
955        if tensor.memory() != TensorMemory::Dma {
956            return Err(Error::NotSupported(format!(
957                "create_image_from_fd requires DMA-backed fd, got {:?}",
958                tensor.memory()
959            )));
960        }
961        Ok(tensor.with_format(format)?)
962    }
963}
964
965impl ImageProcessorTrait for ImageProcessor {
966    /// Converts the source image to the destination image format and size. The
967    /// image is cropped first, then flipped, then rotated
968    ///
969    /// Prefer hardware accelerators when available, falling back to CPU if
970    /// necessary.
971    fn convert(
972        &mut self,
973        src: &TensorDyn,
974        dst: &mut TensorDyn,
975        rotation: Rotation,
976        flip: Flip,
977        crop: Crop,
978    ) -> Result<()> {
979        let start = Instant::now();
980        let src_fmt = src.format();
981        let dst_fmt = dst.format();
982        log::trace!(
983            "convert: {src_fmt:?}({:?}/{:?}) → {dst_fmt:?}({:?}/{:?}), \
984             rotation={rotation:?}, flip={flip:?}, backend={:?}",
985            src.dtype(),
986            src.memory(),
987            dst.dtype(),
988            dst.memory(),
989            self.forced_backend,
990        );
991
992        // ── Forced backend: no fallback chain ────────────────────────
993        if let Some(forced) = self.forced_backend {
994            return match forced {
995                ForcedBackend::Cpu => {
996                    if let Some(cpu) = self.cpu.as_mut() {
997                        let r = cpu.convert(src, dst, rotation, flip, crop);
998                        log::trace!(
999                            "convert: forced=cpu result={} ({:?})",
1000                            if r.is_ok() { "ok" } else { "err" },
1001                            start.elapsed()
1002                        );
1003                        return r;
1004                    }
1005                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1006                }
1007                ForcedBackend::G2d => {
1008                    #[cfg(target_os = "linux")]
1009                    if let Some(g2d) = self.g2d.as_mut() {
1010                        let r = g2d.convert(src, dst, rotation, flip, crop);
1011                        log::trace!(
1012                            "convert: forced=g2d result={} ({:?})",
1013                            if r.is_ok() { "ok" } else { "err" },
1014                            start.elapsed()
1015                        );
1016                        return r;
1017                    }
1018                    Err(Error::ForcedBackendUnavailable("g2d".into()))
1019                }
1020                ForcedBackend::OpenGl => {
1021                    #[cfg(target_os = "linux")]
1022                    #[cfg(feature = "opengl")]
1023                    if let Some(opengl) = self.opengl.as_mut() {
1024                        let r = opengl.convert(src, dst, rotation, flip, crop);
1025                        log::trace!(
1026                            "convert: forced=opengl result={} ({:?})",
1027                            if r.is_ok() { "ok" } else { "err" },
1028                            start.elapsed()
1029                        );
1030                        return r;
1031                    }
1032                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1033                }
1034            };
1035        }
1036
1037        // ── Auto fallback chain: OpenGL → G2D → CPU ──────────────────
1038        #[cfg(target_os = "linux")]
1039        #[cfg(feature = "opengl")]
1040        if let Some(opengl) = self.opengl.as_mut() {
1041            match opengl.convert(src, dst, rotation, flip, crop) {
1042                Ok(_) => {
1043                    log::trace!(
1044                        "convert: auto selected=opengl for {src_fmt:?}→{dst_fmt:?} ({:?})",
1045                        start.elapsed()
1046                    );
1047                    return Ok(());
1048                }
1049                Err(e) => {
1050                    log::trace!("convert: auto opengl declined {src_fmt:?}→{dst_fmt:?}: {e}");
1051                }
1052            }
1053        }
1054
1055        #[cfg(target_os = "linux")]
1056        if let Some(g2d) = self.g2d.as_mut() {
1057            match g2d.convert(src, dst, rotation, flip, crop) {
1058                Ok(_) => {
1059                    log::trace!(
1060                        "convert: auto selected=g2d for {src_fmt:?}→{dst_fmt:?} ({:?})",
1061                        start.elapsed()
1062                    );
1063                    return Ok(());
1064                }
1065                Err(e) => {
1066                    log::trace!("convert: auto g2d declined {src_fmt:?}→{dst_fmt:?}: {e}");
1067                }
1068            }
1069        }
1070
1071        if let Some(cpu) = self.cpu.as_mut() {
1072            match cpu.convert(src, dst, rotation, flip, crop) {
1073                Ok(_) => {
1074                    log::trace!(
1075                        "convert: auto selected=cpu for {src_fmt:?}→{dst_fmt:?} ({:?})",
1076                        start.elapsed()
1077                    );
1078                    return Ok(());
1079                }
1080                Err(e) => {
1081                    log::trace!("convert: auto cpu failed {src_fmt:?}→{dst_fmt:?}: {e}");
1082                    return Err(e);
1083                }
1084            }
1085        }
1086        Err(Error::NoConverter)
1087    }
1088
1089    fn draw_masks(
1090        &mut self,
1091        dst: &mut TensorDyn,
1092        detect: &[DetectBox],
1093        segmentation: &[Segmentation],
1094    ) -> Result<()> {
1095        let start = Instant::now();
1096
1097        if detect.is_empty() && segmentation.is_empty() {
1098            return Ok(());
1099        }
1100
1101        // ── Forced backend: no fallback chain ────────────────────────
1102        if let Some(forced) = self.forced_backend {
1103            return match forced {
1104                ForcedBackend::Cpu => {
1105                    if let Some(cpu) = self.cpu.as_mut() {
1106                        return cpu.draw_masks(dst, detect, segmentation);
1107                    }
1108                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1109                }
1110                ForcedBackend::G2d => Err(Error::NotSupported(
1111                    "g2d does not support draw_masks".into(),
1112                )),
1113                ForcedBackend::OpenGl => {
1114                    #[cfg(target_os = "linux")]
1115                    #[cfg(feature = "opengl")]
1116                    if let Some(opengl) = self.opengl.as_mut() {
1117                        return opengl.draw_masks(dst, detect, segmentation);
1118                    }
1119                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1120                }
1121            };
1122        }
1123
1124        // skip G2D as it doesn't support rendering to image
1125
1126        #[cfg(target_os = "linux")]
1127        #[cfg(feature = "opengl")]
1128        if let Some(opengl) = self.opengl.as_mut() {
1129            log::trace!("draw_masks started with opengl in {:?}", start.elapsed());
1130            match opengl.draw_masks(dst, detect, segmentation) {
1131                Ok(_) => {
1132                    log::trace!("draw_masks with opengl in {:?}", start.elapsed());
1133                    return Ok(());
1134                }
1135                Err(e) => {
1136                    log::trace!("draw_masks didn't work with opengl: {e:?}")
1137                }
1138            }
1139        }
1140        log::trace!("draw_masks started with cpu in {:?}", start.elapsed());
1141        if let Some(cpu) = self.cpu.as_mut() {
1142            match cpu.draw_masks(dst, detect, segmentation) {
1143                Ok(_) => {
1144                    log::trace!("draw_masks with cpu in {:?}", start.elapsed());
1145                    return Ok(());
1146                }
1147                Err(e) => {
1148                    log::trace!("draw_masks didn't work with cpu: {e:?}");
1149                    return Err(e);
1150                }
1151            }
1152        }
1153        Err(Error::NoConverter)
1154    }
1155
1156    fn draw_masks_proto(
1157        &mut self,
1158        dst: &mut TensorDyn,
1159        detect: &[DetectBox],
1160        proto_data: &ProtoData,
1161    ) -> Result<()> {
1162        let start = Instant::now();
1163
1164        if detect.is_empty() {
1165            return Ok(());
1166        }
1167
1168        // ── Forced backend: no fallback chain ────────────────────────
1169        if let Some(forced) = self.forced_backend {
1170            return match forced {
1171                ForcedBackend::Cpu => {
1172                    if let Some(cpu) = self.cpu.as_mut() {
1173                        return cpu.draw_masks_proto(dst, detect, proto_data);
1174                    }
1175                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1176                }
1177                ForcedBackend::G2d => Err(Error::NotSupported(
1178                    "g2d does not support draw_masks_proto".into(),
1179                )),
1180                ForcedBackend::OpenGl => {
1181                    #[cfg(target_os = "linux")]
1182                    #[cfg(feature = "opengl")]
1183                    if let Some(opengl) = self.opengl.as_mut() {
1184                        return opengl.draw_masks_proto(dst, detect, proto_data);
1185                    }
1186                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1187                }
1188            };
1189        }
1190
1191        // skip G2D as it doesn't support rendering to image
1192
1193        // Hybrid path: CPU materialize + GL overlay (benchmarked faster than
1194        // full-GPU draw_masks_proto on all tested platforms: 27× on imx8mp,
1195        // 4× on imx95, 2.5× on rpi5, 1.6× on x86).
1196        #[cfg(target_os = "linux")]
1197        #[cfg(feature = "opengl")]
1198        if let Some(opengl) = self.opengl.as_mut() {
1199            let Some(cpu) = self.cpu.as_ref() else {
1200                return Err(Error::Internal(
1201                    "draw_masks_proto requires CPU backend for hybrid path".into(),
1202                ));
1203            };
1204            log::trace!(
1205                "draw_masks_proto started with hybrid (cpu+opengl) in {:?}",
1206                start.elapsed()
1207            );
1208            let segmentation = cpu.materialize_segmentations(detect, proto_data)?;
1209            match opengl.draw_masks(dst, detect, &segmentation) {
1210                Ok(_) => {
1211                    log::trace!(
1212                        "draw_masks_proto with hybrid (cpu+opengl) in {:?}",
1213                        start.elapsed()
1214                    );
1215                    return Ok(());
1216                }
1217                Err(e) => {
1218                    log::trace!("draw_masks_proto hybrid path failed, falling back to cpu: {e:?}");
1219                }
1220            }
1221        }
1222
1223        // CPU-only fallback (no OpenGL, or hybrid GL overlay failed)
1224        let Some(cpu) = self.cpu.as_mut() else {
1225            return Err(Error::Internal(
1226                "draw_masks_proto requires CPU backend for fallback path".into(),
1227            ));
1228        };
1229        log::trace!("draw_masks_proto started with cpu in {:?}", start.elapsed());
1230        cpu.draw_masks_proto(dst, detect, proto_data)
1231    }
1232
1233    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()> {
1234        let start = Instant::now();
1235
1236        // ── Forced backend: no fallback chain ────────────────────────
1237        if let Some(forced) = self.forced_backend {
1238            return match forced {
1239                ForcedBackend::Cpu => {
1240                    if let Some(cpu) = self.cpu.as_mut() {
1241                        return cpu.set_class_colors(colors);
1242                    }
1243                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1244                }
1245                ForcedBackend::G2d => Err(Error::NotSupported(
1246                    "g2d does not support set_class_colors".into(),
1247                )),
1248                ForcedBackend::OpenGl => {
1249                    #[cfg(target_os = "linux")]
1250                    #[cfg(feature = "opengl")]
1251                    if let Some(opengl) = self.opengl.as_mut() {
1252                        return opengl.set_class_colors(colors);
1253                    }
1254                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1255                }
1256            };
1257        }
1258
1259        // skip G2D as it doesn't support rendering to image
1260
1261        #[cfg(target_os = "linux")]
1262        #[cfg(feature = "opengl")]
1263        if let Some(opengl) = self.opengl.as_mut() {
1264            log::trace!("image started with opengl in {:?}", start.elapsed());
1265            match opengl.set_class_colors(colors) {
1266                Ok(_) => {
1267                    log::trace!("colors set with opengl in {:?}", start.elapsed());
1268                    return Ok(());
1269                }
1270                Err(e) => {
1271                    log::trace!("colors didn't set with opengl: {e:?}")
1272                }
1273            }
1274        }
1275        log::trace!("image started with cpu in {:?}", start.elapsed());
1276        if let Some(cpu) = self.cpu.as_mut() {
1277            match cpu.set_class_colors(colors) {
1278                Ok(_) => {
1279                    log::trace!("colors set with cpu in {:?}", start.elapsed());
1280                    return Ok(());
1281                }
1282                Err(e) => {
1283                    log::trace!("colors didn't set with cpu: {e:?}");
1284                    return Err(e);
1285                }
1286            }
1287        }
1288        Err(Error::NoConverter)
1289    }
1290
1291    fn decode_masks_atlas(
1292        &mut self,
1293        detect: &[DetectBox],
1294        proto_data: ProtoData,
1295        output_width: usize,
1296        output_height: usize,
1297    ) -> Result<(Vec<u8>, Vec<MaskRegion>)> {
1298        if detect.is_empty() {
1299            return Ok((Vec::new(), Vec::new()));
1300        }
1301
1302        // ── Forced backend: no fallback chain ────────────────────────
1303        if let Some(forced) = self.forced_backend {
1304            return match forced {
1305                ForcedBackend::Cpu => {
1306                    if let Some(cpu) = self.cpu.as_mut() {
1307                        return cpu.decode_masks_atlas(
1308                            detect,
1309                            proto_data,
1310                            output_width,
1311                            output_height,
1312                        );
1313                    }
1314                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1315                }
1316                ForcedBackend::G2d => Err(Error::NotSupported(
1317                    "g2d does not support decode_masks_atlas".into(),
1318                )),
1319                ForcedBackend::OpenGl => {
1320                    #[cfg(target_os = "linux")]
1321                    #[cfg(feature = "opengl")]
1322                    if let Some(opengl) = self.opengl.as_mut() {
1323                        return opengl.decode_masks_atlas(
1324                            detect,
1325                            proto_data,
1326                            output_width,
1327                            output_height,
1328                        );
1329                    }
1330                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1331                }
1332            };
1333        }
1334
1335        #[cfg(target_os = "linux")]
1336        #[cfg(feature = "opengl")]
1337        {
1338            let has_opengl = self.opengl.is_some();
1339            if has_opengl {
1340                let opengl = self.opengl.as_mut().unwrap();
1341                match opengl.decode_masks_atlas(detect, proto_data, output_width, output_height) {
1342                    Ok(r) => return Ok(r),
1343                    Err(e) => {
1344                        log::trace!("decode_masks_atlas didn't work with opengl: {e:?}");
1345                        return Err(e);
1346                    }
1347                }
1348            }
1349        }
1350        // CPU fallback: render per-detection masks and pack into compact atlas
1351        if let Some(cpu) = self.cpu.as_mut() {
1352            return cpu.decode_masks_atlas(detect, proto_data, output_width, output_height);
1353        }
1354        Err(Error::NoConverter)
1355    }
1356}
1357
1358// ---------------------------------------------------------------------------
1359// Image loading / saving helpers
1360// ---------------------------------------------------------------------------
1361
1362/// Read EXIF orientation from raw EXIF bytes and return (Rotation, Flip).
1363fn read_exif_orientation(exif_bytes: &[u8]) -> (Rotation, Flip) {
1364    let exifreader = exif::Reader::new();
1365    let Ok(exif_) = exifreader.read_raw(exif_bytes.to_vec()) else {
1366        return (Rotation::None, Flip::None);
1367    };
1368    let Some(orientation) = exif_.get_field(exif::Tag::Orientation, exif::In::PRIMARY) else {
1369        return (Rotation::None, Flip::None);
1370    };
1371    match orientation.value.get_uint(0) {
1372        Some(1) => (Rotation::None, Flip::None),
1373        Some(2) => (Rotation::None, Flip::Horizontal),
1374        Some(3) => (Rotation::Rotate180, Flip::None),
1375        Some(4) => (Rotation::Rotate180, Flip::Horizontal),
1376        Some(5) => (Rotation::Clockwise90, Flip::Horizontal),
1377        Some(6) => (Rotation::Clockwise90, Flip::None),
1378        Some(7) => (Rotation::CounterClockwise90, Flip::Horizontal),
1379        Some(8) => (Rotation::CounterClockwise90, Flip::None),
1380        Some(v) => {
1381            log::warn!("broken orientation EXIF value: {v}");
1382            (Rotation::None, Flip::None)
1383        }
1384        None => (Rotation::None, Flip::None),
1385    }
1386}
1387
1388/// Map a [`PixelFormat`] to the zune-jpeg `ColorSpace` for decoding.
1389/// Returns `None` for formats that the JPEG decoder cannot output directly.
1390fn pixelfmt_to_colorspace(fmt: PixelFormat) -> Option<ColorSpace> {
1391    match fmt {
1392        PixelFormat::Rgb => Some(ColorSpace::RGB),
1393        PixelFormat::Rgba => Some(ColorSpace::RGBA),
1394        PixelFormat::Grey => Some(ColorSpace::Luma),
1395        _ => None,
1396    }
1397}
1398
1399/// Map a zune-jpeg `ColorSpace` to a [`PixelFormat`].
1400fn colorspace_to_pixelfmt(cs: ColorSpace) -> Option<PixelFormat> {
1401    match cs {
1402        ColorSpace::RGB => Some(PixelFormat::Rgb),
1403        ColorSpace::RGBA => Some(PixelFormat::Rgba),
1404        ColorSpace::Luma => Some(PixelFormat::Grey),
1405        _ => None,
1406    }
1407}
1408
1409/// Load a JPEG image from raw bytes and return a [`TensorDyn`].
1410fn load_jpeg(
1411    image: &[u8],
1412    format: Option<PixelFormat>,
1413    memory: Option<TensorMemory>,
1414) -> Result<TensorDyn> {
1415    let colour = match format {
1416        Some(f) => pixelfmt_to_colorspace(f)
1417            .ok_or_else(|| Error::NotSupported(format!("Unsupported image format {f:?}")))?,
1418        None => ColorSpace::RGB,
1419    };
1420    let options = DecoderOptions::default().jpeg_set_out_colorspace(colour);
1421    let mut decoder = JpegDecoder::new_with_options(image, options);
1422    decoder.decode_headers()?;
1423
1424    let image_info = decoder.info().ok_or(Error::Internal(
1425        "JPEG did not return decoded image info".to_string(),
1426    ))?;
1427
1428    let converted_cs = decoder
1429        .get_output_colorspace()
1430        .ok_or(Error::Internal("No output colorspace".to_string()))?;
1431
1432    let converted_fmt = colorspace_to_pixelfmt(converted_cs).ok_or(Error::NotSupported(
1433        "Unsupported JPEG decoder output".to_string(),
1434    ))?;
1435
1436    let dest_fmt = format.unwrap_or(converted_fmt);
1437
1438    let (rotation, flip) = decoder
1439        .exif()
1440        .map(|x| read_exif_orientation(x))
1441        .unwrap_or((Rotation::None, Flip::None));
1442
1443    let w = image_info.width as usize;
1444    let h = image_info.height as usize;
1445
1446    if (rotation, flip) == (Rotation::None, Flip::None) {
1447        let mut img = Tensor::<u8>::image(w, h, dest_fmt, memory)?;
1448
1449        if converted_fmt != dest_fmt {
1450            let tmp = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
1451            decoder.decode_into(&mut tmp.map()?)?;
1452            CPUProcessor::convert_format_pf(&tmp, &mut img, converted_fmt, dest_fmt)?;
1453            return Ok(TensorDyn::from(img));
1454        }
1455        decoder.decode_into(&mut img.map()?)?;
1456        return Ok(TensorDyn::from(img));
1457    }
1458
1459    let mut tmp = Tensor::<u8>::image(w, h, dest_fmt, Some(TensorMemory::Mem))?;
1460
1461    if converted_fmt != dest_fmt {
1462        let tmp2 = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
1463        decoder.decode_into(&mut tmp2.map()?)?;
1464        CPUProcessor::convert_format_pf(&tmp2, &mut tmp, converted_fmt, dest_fmt)?;
1465    } else {
1466        decoder.decode_into(&mut tmp.map()?)?;
1467    }
1468
1469    rotate_flip_to_dyn(&tmp, dest_fmt, rotation, flip, memory)
1470}
1471
1472/// Load a PNG image from raw bytes and return a [`TensorDyn`].
1473fn load_png(
1474    image: &[u8],
1475    format: Option<PixelFormat>,
1476    memory: Option<TensorMemory>,
1477) -> Result<TensorDyn> {
1478    let fmt = format.unwrap_or(PixelFormat::Rgb);
1479    let alpha = match fmt {
1480        PixelFormat::Rgb => false,
1481        PixelFormat::Rgba => true,
1482        _ => {
1483            return Err(Error::NotImplemented(
1484                "Unsupported image format".to_string(),
1485            ));
1486        }
1487    };
1488
1489    let options = DecoderOptions::default()
1490        .png_set_add_alpha_channel(alpha)
1491        .png_set_decode_animated(false);
1492    let mut decoder = PngDecoder::new_with_options(image, options);
1493    decoder.decode_headers()?;
1494    let image_info = decoder.get_info().ok_or(Error::Internal(
1495        "PNG did not return decoded image info".to_string(),
1496    ))?;
1497
1498    let (rotation, flip) = image_info
1499        .exif
1500        .as_ref()
1501        .map(|x| read_exif_orientation(x))
1502        .unwrap_or((Rotation::None, Flip::None));
1503
1504    if (rotation, flip) == (Rotation::None, Flip::None) {
1505        let img = Tensor::<u8>::image(image_info.width, image_info.height, fmt, memory)?;
1506        decoder.decode_into(&mut img.map()?)?;
1507        return Ok(TensorDyn::from(img));
1508    }
1509
1510    let tmp = Tensor::<u8>::image(
1511        image_info.width,
1512        image_info.height,
1513        fmt,
1514        Some(TensorMemory::Mem),
1515    )?;
1516    decoder.decode_into(&mut tmp.map()?)?;
1517
1518    rotate_flip_to_dyn(&tmp, fmt, rotation, flip, memory)
1519}
1520
1521/// Load an image from raw bytes (JPEG or PNG) and return a [`TensorDyn`].
1522///
1523/// The optional `format` specifies the desired output pixel format (e.g.,
1524/// [`PixelFormat::Rgb`], [`PixelFormat::Rgba`]); if `None`, the native
1525/// format of the file is used (typically RGB for JPEG).
1526///
1527/// # Examples
1528/// ```rust
1529/// use edgefirst_image::load_image;
1530/// use edgefirst_tensor::PixelFormat;
1531/// # fn main() -> Result<(), edgefirst_image::Error> {
1532/// let jpeg = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
1533/// let img = load_image(jpeg, Some(PixelFormat::Rgb), None)?;
1534/// assert_eq!(img.width(), Some(1280));
1535/// assert_eq!(img.height(), Some(720));
1536/// # Ok(())
1537/// # }
1538/// ```
1539pub fn load_image(
1540    image: &[u8],
1541    format: Option<PixelFormat>,
1542    memory: Option<TensorMemory>,
1543) -> Result<TensorDyn> {
1544    if let Ok(i) = load_jpeg(image, format, memory) {
1545        return Ok(i);
1546    }
1547    if let Ok(i) = load_png(image, format, memory) {
1548        return Ok(i);
1549    }
1550    Err(Error::NotSupported(
1551        "Could not decode as jpeg or png".to_string(),
1552    ))
1553}
1554
1555/// Save a [`TensorDyn`] image as a JPEG file.
1556///
1557/// Only packed RGB and RGBA formats are supported.
1558pub fn save_jpeg(tensor: &TensorDyn, path: impl AsRef<std::path::Path>, quality: u8) -> Result<()> {
1559    let t = tensor.as_u8().ok_or(Error::UnsupportedFormat(
1560        "save_jpeg requires u8 tensor".to_string(),
1561    ))?;
1562    let fmt = t.format().ok_or(Error::NotAnImage)?;
1563    if fmt.layout() != PixelLayout::Packed {
1564        return Err(Error::NotImplemented(
1565            "Saving planar images is not supported".to_string(),
1566        ));
1567    }
1568
1569    let colour = match fmt {
1570        PixelFormat::Rgb => jpeg_encoder::ColorType::Rgb,
1571        PixelFormat::Rgba => jpeg_encoder::ColorType::Rgba,
1572        _ => {
1573            return Err(Error::NotImplemented(
1574                "Unsupported image format for saving".to_string(),
1575            ));
1576        }
1577    };
1578
1579    let w = t.width().ok_or(Error::NotAnImage)?;
1580    let h = t.height().ok_or(Error::NotAnImage)?;
1581    let encoder = jpeg_encoder::Encoder::new_file(path, quality)?;
1582    let tensor_map = t.map()?;
1583
1584    encoder.encode(&tensor_map, w as u16, h as u16, colour)?;
1585
1586    Ok(())
1587}
1588
1589pub(crate) struct FunctionTimer<T: Display> {
1590    name: T,
1591    start: std::time::Instant,
1592}
1593
1594impl<T: Display> FunctionTimer<T> {
1595    pub fn new(name: T) -> Self {
1596        Self {
1597            name,
1598            start: std::time::Instant::now(),
1599        }
1600    }
1601}
1602
1603impl<T: Display> Drop for FunctionTimer<T> {
1604    fn drop(&mut self) {
1605        log::trace!("{} elapsed: {:?}", self.name, self.start.elapsed())
1606    }
1607}
1608
1609const DEFAULT_COLORS: [[f32; 4]; 20] = [
1610    [0., 1., 0., 0.7],
1611    [1., 0.5568628, 0., 0.7],
1612    [0.25882353, 0.15294118, 0.13333333, 0.7],
1613    [0.8, 0.7647059, 0.78039216, 0.7],
1614    [0.3137255, 0.3137255, 0.3137255, 0.7],
1615    [0.1411765, 0.3098039, 0.1215686, 0.7],
1616    [1., 0.95686275, 0.5137255, 0.7],
1617    [0.3529412, 0.32156863, 0., 0.7],
1618    [0.4235294, 0.6235294, 0.6509804, 0.7],
1619    [0.5098039, 0.5098039, 0.7294118, 0.7],
1620    [0.00784314, 0.18823529, 0.29411765, 0.7],
1621    [0.0, 0.2706, 1.0, 0.7],
1622    [0.0, 0.0, 0.0, 0.7],
1623    [0.0, 0.5, 0.0, 0.7],
1624    [1.0, 0.0, 0.0, 0.7],
1625    [0.0, 0.0, 1.0, 0.7],
1626    [1.0, 0.5, 0.5, 0.7],
1627    [0.1333, 0.5451, 0.1333, 0.7],
1628    [0.1176, 0.4118, 0.8235, 0.7],
1629    [1., 1., 1., 0.7],
1630];
1631
1632const fn denorm<const M: usize, const N: usize>(a: [[f32; M]; N]) -> [[u8; M]; N] {
1633    let mut result = [[0; M]; N];
1634    let mut i = 0;
1635    while i < N {
1636        let mut j = 0;
1637        while j < M {
1638            result[i][j] = (a[i][j] * 255.0).round() as u8;
1639            j += 1;
1640        }
1641        i += 1;
1642    }
1643    result
1644}
1645
1646const DEFAULT_COLORS_U8: [[u8; 4]; 20] = denorm(DEFAULT_COLORS);
1647
1648#[cfg(test)]
1649#[cfg_attr(coverage_nightly, coverage(off))]
1650mod image_tests {
1651    use super::*;
1652    use crate::{CPUProcessor, Rotation};
1653    #[cfg(target_os = "linux")]
1654    use edgefirst_tensor::is_dma_available;
1655    use edgefirst_tensor::{TensorMapTrait, TensorMemory, TensorTrait};
1656    use image::buffer::ConvertBuffer;
1657
1658    /// Test helper: call `ImageProcessorTrait::convert()` on two `TensorDyn`s
1659    /// by going through the `TensorDyn` API.
1660    ///
1661    /// Returns the `(src_image, dst_image)` reconstructed from the TensorDyn
1662    /// round-trip so the caller can feed them to `compare_images` etc.
1663    fn convert_img(
1664        proc: &mut dyn ImageProcessorTrait,
1665        src: TensorDyn,
1666        dst: TensorDyn,
1667        rotation: Rotation,
1668        flip: Flip,
1669        crop: Crop,
1670    ) -> (Result<()>, TensorDyn, TensorDyn) {
1671        let src_fourcc = src.format().unwrap();
1672        let dst_fourcc = dst.format().unwrap();
1673        let src_dyn = src;
1674        let mut dst_dyn = dst;
1675        let result = proc.convert(&src_dyn, &mut dst_dyn, rotation, flip, crop);
1676        let src_back = {
1677            let mut __t = src_dyn.into_u8().unwrap();
1678            __t.set_format(src_fourcc).unwrap();
1679            TensorDyn::from(__t)
1680        };
1681        let dst_back = {
1682            let mut __t = dst_dyn.into_u8().unwrap();
1683            __t.set_format(dst_fourcc).unwrap();
1684            TensorDyn::from(__t)
1685        };
1686        (result, src_back, dst_back)
1687    }
1688
1689    #[ctor::ctor]
1690    fn init() {
1691        env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
1692    }
1693
1694    macro_rules! function {
1695        () => {{
1696            fn f() {}
1697            fn type_name_of<T>(_: T) -> &'static str {
1698                std::any::type_name::<T>()
1699            }
1700            let name = type_name_of(f);
1701
1702            // Find and cut the rest of the path
1703            match &name[..name.len() - 3].rfind(':') {
1704                Some(pos) => &name[pos + 1..name.len() - 3],
1705                None => &name[..name.len() - 3],
1706            }
1707        }};
1708    }
1709
1710    #[test]
1711    fn test_invalid_crop() {
1712        let src = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
1713        let dst = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
1714
1715        let crop = Crop::new()
1716            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
1717            .with_dst_rect(Some(Rect::new(0, 0, 150, 150)));
1718
1719        let result = crop.check_crop_dyn(&src, &dst);
1720        assert!(matches!(
1721            result,
1722            Err(Error::CropInvalid(e)) if e.starts_with("Dest and Src crop invalid")
1723        ));
1724
1725        let crop = crop.with_src_rect(Some(Rect::new(0, 0, 10, 10)));
1726        let result = crop.check_crop_dyn(&src, &dst);
1727        assert!(matches!(
1728            result,
1729            Err(Error::CropInvalid(e)) if e.starts_with("Dest crop invalid")
1730        ));
1731
1732        let crop = crop
1733            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
1734            .with_dst_rect(Some(Rect::new(0, 0, 50, 50)));
1735        let result = crop.check_crop_dyn(&src, &dst);
1736        assert!(matches!(
1737            result,
1738            Err(Error::CropInvalid(e)) if e.starts_with("Src crop invalid")
1739        ));
1740
1741        let crop = crop.with_src_rect(Some(Rect::new(50, 50, 50, 50)));
1742
1743        let result = crop.check_crop_dyn(&src, &dst);
1744        assert!(result.is_ok());
1745    }
1746
1747    #[test]
1748    fn test_invalid_tensor_format() -> Result<(), Error> {
1749        // 4D tensor cannot be set to a 3-channel pixel format
1750        let mut tensor = Tensor::<u8>::new(&[720, 1280, 4, 1], None, None)?;
1751        let result = tensor.set_format(PixelFormat::Rgb);
1752        assert!(result.is_err(), "4D tensor should reject set_format");
1753
1754        // Tensor with wrong channel count for the format
1755        let mut tensor = Tensor::<u8>::new(&[720, 1280, 4], None, None)?;
1756        let result = tensor.set_format(PixelFormat::Rgb);
1757        assert!(result.is_err(), "4-channel tensor should reject RGB format");
1758
1759        Ok(())
1760    }
1761
1762    #[test]
1763    fn test_invalid_image_file() -> Result<(), Error> {
1764        let result = crate::load_image(&[123; 5000], None, None);
1765        assert!(matches!(
1766            result,
1767            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
1768
1769        Ok(())
1770    }
1771
1772    #[test]
1773    fn test_invalid_jpeg_format() -> Result<(), Error> {
1774        let result = crate::load_image(&[123; 5000], Some(PixelFormat::Yuyv), None);
1775        assert!(matches!(
1776            result,
1777            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
1778
1779        Ok(())
1780    }
1781
1782    #[test]
1783    fn test_load_resize_save() {
1784        let file = include_bytes!(concat!(
1785            env!("CARGO_MANIFEST_DIR"),
1786            "/../../testdata/zidane.jpg"
1787        ));
1788        let img = crate::load_image(file, Some(PixelFormat::Rgba), None).unwrap();
1789        assert_eq!(img.width(), Some(1280));
1790        assert_eq!(img.height(), Some(720));
1791
1792        let dst = TensorDyn::image(640, 360, PixelFormat::Rgba, DType::U8, None).unwrap();
1793        let mut converter = CPUProcessor::new();
1794        let (result, _img, dst) = convert_img(
1795            &mut converter,
1796            img,
1797            dst,
1798            Rotation::None,
1799            Flip::None,
1800            Crop::no_crop(),
1801        );
1802        result.unwrap();
1803        assert_eq!(dst.width(), Some(640));
1804        assert_eq!(dst.height(), Some(360));
1805
1806        crate::save_jpeg(&dst, "zidane_resized.jpg", 80).unwrap();
1807
1808        let file = std::fs::read("zidane_resized.jpg").unwrap();
1809        let img = crate::load_image(&file, None, None).unwrap();
1810        assert_eq!(img.width(), Some(640));
1811        assert_eq!(img.height(), Some(360));
1812        assert_eq!(img.format().unwrap(), PixelFormat::Rgb);
1813    }
1814
1815    #[test]
1816    fn test_from_tensor_planar() -> Result<(), Error> {
1817        let mut tensor = Tensor::new(&[3, 720, 1280], None, None)?;
1818        tensor.map()?.copy_from_slice(include_bytes!(concat!(
1819            env!("CARGO_MANIFEST_DIR"),
1820            "/../../testdata/camera720p.8bps"
1821        )));
1822        let planar = {
1823            tensor
1824                .set_format(PixelFormat::PlanarRgb)
1825                .map_err(|e| crate::Error::Internal(e.to_string()))?;
1826            TensorDyn::from(tensor)
1827        };
1828
1829        let rbga = load_bytes_to_tensor(
1830            1280,
1831            720,
1832            PixelFormat::Rgba,
1833            None,
1834            include_bytes!(concat!(
1835                env!("CARGO_MANIFEST_DIR"),
1836                "/../../testdata/camera720p.rgba"
1837            )),
1838        )?;
1839        compare_images_convert_to_rgb(&planar, &rbga, 0.98, function!());
1840
1841        Ok(())
1842    }
1843
1844    #[test]
1845    fn test_from_tensor_invalid_format() {
1846        // PixelFormat::from_fourcc_str returns None for unknown FourCC codes.
1847        // Since there's no "TEST" pixel format, this validates graceful handling.
1848        assert!(PixelFormat::from_fourcc(u32::from_le_bytes(*b"TEST")).is_none());
1849    }
1850
1851    #[test]
1852    #[should_panic(expected = "Failed to save planar RGB image")]
1853    fn test_save_planar() {
1854        let planar_img = load_bytes_to_tensor(
1855            1280,
1856            720,
1857            PixelFormat::PlanarRgb,
1858            None,
1859            include_bytes!(concat!(
1860                env!("CARGO_MANIFEST_DIR"),
1861                "/../../testdata/camera720p.8bps"
1862            )),
1863        )
1864        .unwrap();
1865
1866        let save_path = "/tmp/planar_rgb.jpg";
1867        crate::save_jpeg(&planar_img, save_path, 90).expect("Failed to save planar RGB image");
1868    }
1869
1870    #[test]
1871    #[should_panic(expected = "Failed to save YUYV image")]
1872    fn test_save_yuyv() {
1873        let planar_img = load_bytes_to_tensor(
1874            1280,
1875            720,
1876            PixelFormat::Yuyv,
1877            None,
1878            include_bytes!(concat!(
1879                env!("CARGO_MANIFEST_DIR"),
1880                "/../../testdata/camera720p.yuyv"
1881            )),
1882        )
1883        .unwrap();
1884
1885        let save_path = "/tmp/yuyv.jpg";
1886        crate::save_jpeg(&planar_img, save_path, 90).expect("Failed to save YUYV image");
1887    }
1888
1889    #[test]
1890    fn test_rotation_angle() {
1891        assert_eq!(Rotation::from_degrees_clockwise(0), Rotation::None);
1892        assert_eq!(Rotation::from_degrees_clockwise(90), Rotation::Clockwise90);
1893        assert_eq!(Rotation::from_degrees_clockwise(180), Rotation::Rotate180);
1894        assert_eq!(
1895            Rotation::from_degrees_clockwise(270),
1896            Rotation::CounterClockwise90
1897        );
1898        assert_eq!(Rotation::from_degrees_clockwise(360), Rotation::None);
1899        assert_eq!(Rotation::from_degrees_clockwise(450), Rotation::Clockwise90);
1900        assert_eq!(Rotation::from_degrees_clockwise(540), Rotation::Rotate180);
1901        assert_eq!(
1902            Rotation::from_degrees_clockwise(630),
1903            Rotation::CounterClockwise90
1904        );
1905    }
1906
1907    #[test]
1908    #[should_panic(expected = "rotation angle is not a multiple of 90")]
1909    fn test_rotation_angle_panic() {
1910        Rotation::from_degrees_clockwise(361);
1911    }
1912
1913    #[test]
1914    fn test_disable_env_var() -> Result<(), Error> {
1915        #[cfg(target_os = "linux")]
1916        {
1917            let original = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
1918            unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
1919            let converter = ImageProcessor::new()?;
1920            match original {
1921                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
1922                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
1923            }
1924            assert!(converter.g2d.is_none());
1925        }
1926
1927        #[cfg(target_os = "linux")]
1928        #[cfg(feature = "opengl")]
1929        {
1930            let original = std::env::var("EDGEFIRST_DISABLE_GL").ok();
1931            unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
1932            let converter = ImageProcessor::new()?;
1933            match original {
1934                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
1935                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
1936            }
1937            assert!(converter.opengl.is_none());
1938        }
1939
1940        let original = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
1941        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
1942        let converter = ImageProcessor::new()?;
1943        match original {
1944            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
1945            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
1946        }
1947        assert!(converter.cpu.is_none());
1948
1949        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
1950        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
1951        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
1952        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
1953        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
1954        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
1955        let mut converter = ImageProcessor::new()?;
1956
1957        let src = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None)?;
1958        let dst = TensorDyn::image(640, 360, PixelFormat::Rgba, DType::U8, None)?;
1959        let (result, _src, _dst) = convert_img(
1960            &mut converter,
1961            src,
1962            dst,
1963            Rotation::None,
1964            Flip::None,
1965            Crop::no_crop(),
1966        );
1967        assert!(matches!(result, Err(Error::NoConverter)));
1968
1969        match original_cpu {
1970            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
1971            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
1972        }
1973        match original_gl {
1974            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
1975            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
1976        }
1977        match original_g2d {
1978            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
1979            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
1980        }
1981
1982        Ok(())
1983    }
1984
1985    #[test]
1986    fn test_unsupported_conversion() {
1987        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
1988        let dst = TensorDyn::image(640, 360, PixelFormat::Nv12, DType::U8, None).unwrap();
1989        let mut converter = ImageProcessor::new().unwrap();
1990        let (result, _src, _dst) = convert_img(
1991            &mut converter,
1992            src,
1993            dst,
1994            Rotation::None,
1995            Flip::None,
1996            Crop::no_crop(),
1997        );
1998        log::debug!("result: {:?}", result);
1999        assert!(matches!(
2000            result,
2001            Err(Error::NotSupported(e)) if e.starts_with("Conversion from NV12 to NV12")
2002        ));
2003    }
2004
2005    #[test]
2006    fn test_load_grey() {
2007        let grey_img = crate::load_image(
2008            include_bytes!(concat!(
2009                env!("CARGO_MANIFEST_DIR"),
2010                "/../../testdata/grey.jpg"
2011            )),
2012            Some(PixelFormat::Rgba),
2013            None,
2014        )
2015        .unwrap();
2016
2017        let grey_but_rgb_img = crate::load_image(
2018            include_bytes!(concat!(
2019                env!("CARGO_MANIFEST_DIR"),
2020                "/../../testdata/grey-rgb.jpg"
2021            )),
2022            Some(PixelFormat::Rgba),
2023            None,
2024        )
2025        .unwrap();
2026
2027        compare_images(&grey_img, &grey_but_rgb_img, 0.99, function!());
2028    }
2029
2030    #[test]
2031    fn test_new_nv12() {
2032        let nv12 = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
2033        assert_eq!(nv12.height(), Some(720));
2034        assert_eq!(nv12.width(), Some(1280));
2035        assert_eq!(nv12.format().unwrap(), PixelFormat::Nv12);
2036        // PixelFormat::Nv12.channels() returns 1 (luma plane channel count)
2037        assert_eq!(nv12.format().unwrap().channels(), 1);
2038        assert!(nv12.format().is_some_and(
2039            |f| f.layout() == PixelLayout::Planar || f.layout() == PixelLayout::SemiPlanar
2040        ))
2041    }
2042
2043    #[test]
2044    #[cfg(target_os = "linux")]
2045    fn test_new_image_converter() {
2046        let dst_width = 640;
2047        let dst_height = 360;
2048        let file = include_bytes!(concat!(
2049            env!("CARGO_MANIFEST_DIR"),
2050            "/../../testdata/zidane.jpg"
2051        ))
2052        .to_vec();
2053        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2054
2055        let mut converter = ImageProcessor::new().unwrap();
2056        let converter_dst = converter
2057            .create_image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None)
2058            .unwrap();
2059        let (result, src, converter_dst) = convert_img(
2060            &mut converter,
2061            src,
2062            converter_dst,
2063            Rotation::None,
2064            Flip::None,
2065            Crop::no_crop(),
2066        );
2067        result.unwrap();
2068
2069        let cpu_dst =
2070            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2071        let mut cpu_converter = CPUProcessor::new();
2072        let (result, _src, cpu_dst) = convert_img(
2073            &mut cpu_converter,
2074            src,
2075            cpu_dst,
2076            Rotation::None,
2077            Flip::None,
2078            Crop::no_crop(),
2079        );
2080        result.unwrap();
2081
2082        compare_images(&converter_dst, &cpu_dst, 0.98, function!());
2083    }
2084
2085    #[test]
2086    #[cfg(target_os = "linux")]
2087    fn test_create_image_dtype_i8() {
2088        let mut converter = ImageProcessor::new().unwrap();
2089
2090        // I8 image should allocate successfully via create_image
2091        let dst = converter
2092            .create_image(320, 240, PixelFormat::Rgb, DType::I8, None)
2093            .unwrap();
2094        assert_eq!(dst.dtype(), DType::I8);
2095        assert!(dst.width() == Some(320));
2096        assert!(dst.height() == Some(240));
2097        assert_eq!(dst.format(), Some(PixelFormat::Rgb));
2098
2099        // U8 for comparison
2100        let dst_u8 = converter
2101            .create_image(320, 240, PixelFormat::Rgb, DType::U8, None)
2102            .unwrap();
2103        assert_eq!(dst_u8.dtype(), DType::U8);
2104
2105        // Convert into I8 dst should succeed
2106        let file = include_bytes!(concat!(
2107            env!("CARGO_MANIFEST_DIR"),
2108            "/../../testdata/zidane.jpg"
2109        ))
2110        .to_vec();
2111        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2112        let mut dst_i8 = converter
2113            .create_image(320, 240, PixelFormat::Rgb, DType::I8, None)
2114            .unwrap();
2115        converter
2116            .convert(
2117                &src,
2118                &mut dst_i8,
2119                Rotation::None,
2120                Flip::None,
2121                Crop::no_crop(),
2122            )
2123            .unwrap();
2124    }
2125
2126    #[test]
2127    fn test_crop_skip() {
2128        let file = include_bytes!(concat!(
2129            env!("CARGO_MANIFEST_DIR"),
2130            "/../../testdata/zidane.jpg"
2131        ))
2132        .to_vec();
2133        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2134
2135        let mut converter = ImageProcessor::new().unwrap();
2136        let converter_dst = converter
2137            .create_image(1280, 720, PixelFormat::Rgba, DType::U8, None)
2138            .unwrap();
2139        let crop = Crop::new()
2140            .with_src_rect(Some(Rect::new(0, 0, 640, 640)))
2141            .with_dst_rect(Some(Rect::new(0, 0, 640, 640)));
2142        let (result, src, converter_dst) = convert_img(
2143            &mut converter,
2144            src,
2145            converter_dst,
2146            Rotation::None,
2147            Flip::None,
2148            crop,
2149        );
2150        result.unwrap();
2151
2152        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
2153        let mut cpu_converter = CPUProcessor::new();
2154        let (result, _src, cpu_dst) = convert_img(
2155            &mut cpu_converter,
2156            src,
2157            cpu_dst,
2158            Rotation::None,
2159            Flip::None,
2160            crop,
2161        );
2162        result.unwrap();
2163
2164        compare_images(&converter_dst, &cpu_dst, 0.99999, function!());
2165    }
2166
2167    #[test]
2168    fn test_invalid_pixel_format() {
2169        // PixelFormat::from_fourcc returns None for unknown formats,
2170        // so TensorDyn::image cannot be called with an invalid format.
2171        assert!(PixelFormat::from_fourcc(u32::from_le_bytes(*b"TEST")).is_none());
2172    }
2173
2174    // Helper function to check if G2D library is available (Linux/i.MX8 only)
2175    #[cfg(target_os = "linux")]
2176    static G2D_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2177
2178    #[cfg(target_os = "linux")]
2179    fn is_g2d_available() -> bool {
2180        *G2D_AVAILABLE.get_or_init(|| G2DProcessor::new().is_ok())
2181    }
2182
2183    #[cfg(target_os = "linux")]
2184    #[cfg(feature = "opengl")]
2185    static GL_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2186
2187    #[cfg(target_os = "linux")]
2188    #[cfg(feature = "opengl")]
2189    // Helper function to check if OpenGL is available
2190    fn is_opengl_available() -> bool {
2191        #[cfg(all(target_os = "linux", feature = "opengl"))]
2192        {
2193            *GL_AVAILABLE.get_or_init(|| GLProcessorThreaded::new(None).is_ok())
2194        }
2195
2196        #[cfg(not(all(target_os = "linux", feature = "opengl")))]
2197        {
2198            false
2199        }
2200    }
2201
2202    #[test]
2203    fn test_load_jpeg_with_exif() {
2204        let file = include_bytes!(concat!(
2205            env!("CARGO_MANIFEST_DIR"),
2206            "/../../testdata/zidane_rotated_exif.jpg"
2207        ))
2208        .to_vec();
2209        let loaded = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2210
2211        assert_eq!(loaded.height(), Some(1280));
2212        assert_eq!(loaded.width(), Some(720));
2213
2214        let file = include_bytes!(concat!(
2215            env!("CARGO_MANIFEST_DIR"),
2216            "/../../testdata/zidane.jpg"
2217        ))
2218        .to_vec();
2219        let cpu_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2220
2221        let (dst_width, dst_height) = (cpu_src.height().unwrap(), cpu_src.width().unwrap());
2222
2223        let cpu_dst =
2224            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2225        let mut cpu_converter = CPUProcessor::new();
2226
2227        let (result, _cpu_src, cpu_dst) = convert_img(
2228            &mut cpu_converter,
2229            cpu_src,
2230            cpu_dst,
2231            Rotation::Clockwise90,
2232            Flip::None,
2233            Crop::no_crop(),
2234        );
2235        result.unwrap();
2236
2237        compare_images(&loaded, &cpu_dst, 0.98, function!());
2238    }
2239
2240    #[test]
2241    fn test_load_png_with_exif() {
2242        let file = include_bytes!(concat!(
2243            env!("CARGO_MANIFEST_DIR"),
2244            "/../../testdata/zidane_rotated_exif_180.png"
2245        ))
2246        .to_vec();
2247        let loaded = crate::load_png(&file, Some(PixelFormat::Rgba), None).unwrap();
2248
2249        assert_eq!(loaded.height(), Some(720));
2250        assert_eq!(loaded.width(), Some(1280));
2251
2252        let file = include_bytes!(concat!(
2253            env!("CARGO_MANIFEST_DIR"),
2254            "/../../testdata/zidane.jpg"
2255        ))
2256        .to_vec();
2257        let cpu_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2258
2259        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
2260        let mut cpu_converter = CPUProcessor::new();
2261
2262        let (result, _cpu_src, cpu_dst) = convert_img(
2263            &mut cpu_converter,
2264            cpu_src,
2265            cpu_dst,
2266            Rotation::Rotate180,
2267            Flip::None,
2268            Crop::no_crop(),
2269        );
2270        result.unwrap();
2271
2272        compare_images(&loaded, &cpu_dst, 0.98, function!());
2273    }
2274
2275    #[test]
2276    #[cfg(target_os = "linux")]
2277    fn test_g2d_resize() {
2278        if !is_g2d_available() {
2279            eprintln!("SKIPPED: test_g2d_resize - G2D library (libg2d.so.2) not available");
2280            return;
2281        }
2282        if !is_dma_available() {
2283            eprintln!(
2284                "SKIPPED: test_g2d_resize - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2285            );
2286            return;
2287        }
2288
2289        let dst_width = 640;
2290        let dst_height = 360;
2291        let file = include_bytes!(concat!(
2292            env!("CARGO_MANIFEST_DIR"),
2293            "/../../testdata/zidane.jpg"
2294        ))
2295        .to_vec();
2296        let src =
2297            crate::load_image(&file, Some(PixelFormat::Rgba), Some(TensorMemory::Dma)).unwrap();
2298
2299        let g2d_dst = TensorDyn::image(
2300            dst_width,
2301            dst_height,
2302            PixelFormat::Rgba,
2303            DType::U8,
2304            Some(TensorMemory::Dma),
2305        )
2306        .unwrap();
2307        let mut g2d_converter = G2DProcessor::new().unwrap();
2308        let (result, src, g2d_dst) = convert_img(
2309            &mut g2d_converter,
2310            src,
2311            g2d_dst,
2312            Rotation::None,
2313            Flip::None,
2314            Crop::no_crop(),
2315        );
2316        result.unwrap();
2317
2318        let cpu_dst =
2319            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2320        let mut cpu_converter = CPUProcessor::new();
2321        let (result, _src, cpu_dst) = convert_img(
2322            &mut cpu_converter,
2323            src,
2324            cpu_dst,
2325            Rotation::None,
2326            Flip::None,
2327            Crop::no_crop(),
2328        );
2329        result.unwrap();
2330
2331        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2332    }
2333
2334    #[test]
2335    #[cfg(target_os = "linux")]
2336    #[cfg(feature = "opengl")]
2337    fn test_opengl_resize() {
2338        if !is_opengl_available() {
2339            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2340            return;
2341        }
2342
2343        let dst_width = 640;
2344        let dst_height = 360;
2345        let file = include_bytes!(concat!(
2346            env!("CARGO_MANIFEST_DIR"),
2347            "/../../testdata/zidane.jpg"
2348        ))
2349        .to_vec();
2350        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2351
2352        let cpu_dst =
2353            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2354        let mut cpu_converter = CPUProcessor::new();
2355        let (result, src, cpu_dst) = convert_img(
2356            &mut cpu_converter,
2357            src,
2358            cpu_dst,
2359            Rotation::None,
2360            Flip::None,
2361            Crop::no_crop(),
2362        );
2363        result.unwrap();
2364
2365        let mut src = src;
2366        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2367
2368        for _ in 0..5 {
2369            let gl_dst =
2370                TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None)
2371                    .unwrap();
2372            let (result, src_back, gl_dst) = convert_img(
2373                &mut gl_converter,
2374                src,
2375                gl_dst,
2376                Rotation::None,
2377                Flip::None,
2378                Crop::no_crop(),
2379            );
2380            result.unwrap();
2381            src = src_back;
2382
2383            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2384        }
2385    }
2386
2387    #[test]
2388    #[ignore] // Vivante GPU hangs with concurrent EGL contexts on i.MX8MP
2389    #[cfg(target_os = "linux")]
2390    #[cfg(feature = "opengl")]
2391    fn test_opengl_10_threads() {
2392        if !is_opengl_available() {
2393            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2394            return;
2395        }
2396
2397        let handles: Vec<_> = (0..10)
2398            .map(|i| {
2399                std::thread::Builder::new()
2400                    .name(format!("Thread {i}"))
2401                    .spawn(test_opengl_resize)
2402                    .unwrap()
2403            })
2404            .collect();
2405        handles.into_iter().for_each(|h| {
2406            if let Err(e) = h.join() {
2407                std::panic::resume_unwind(e)
2408            }
2409        });
2410    }
2411
2412    #[test]
2413    #[cfg(target_os = "linux")]
2414    #[cfg(feature = "opengl")]
2415    fn test_opengl_grey() {
2416        if !is_opengl_available() {
2417            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2418            return;
2419        }
2420
2421        let img = crate::load_image(
2422            include_bytes!(concat!(
2423                env!("CARGO_MANIFEST_DIR"),
2424                "/../../testdata/grey.jpg"
2425            )),
2426            Some(PixelFormat::Grey),
2427            None,
2428        )
2429        .unwrap();
2430
2431        let gl_dst = TensorDyn::image(640, 640, PixelFormat::Grey, DType::U8, None).unwrap();
2432        let cpu_dst = TensorDyn::image(640, 640, PixelFormat::Grey, DType::U8, None).unwrap();
2433
2434        let mut converter = CPUProcessor::new();
2435
2436        let (result, img, cpu_dst) = convert_img(
2437            &mut converter,
2438            img,
2439            cpu_dst,
2440            Rotation::None,
2441            Flip::None,
2442            Crop::no_crop(),
2443        );
2444        result.unwrap();
2445
2446        let mut gl = GLProcessorThreaded::new(None).unwrap();
2447        let (result, _img, gl_dst) = convert_img(
2448            &mut gl,
2449            img,
2450            gl_dst,
2451            Rotation::None,
2452            Flip::None,
2453            Crop::no_crop(),
2454        );
2455        result.unwrap();
2456
2457        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2458    }
2459
2460    #[test]
2461    #[cfg(target_os = "linux")]
2462    fn test_g2d_src_crop() {
2463        if !is_g2d_available() {
2464            eprintln!("SKIPPED: test_g2d_src_crop - G2D library (libg2d.so.2) not available");
2465            return;
2466        }
2467        if !is_dma_available() {
2468            eprintln!(
2469                "SKIPPED: test_g2d_src_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2470            );
2471            return;
2472        }
2473
2474        let dst_width = 640;
2475        let dst_height = 640;
2476        let file = include_bytes!(concat!(
2477            env!("CARGO_MANIFEST_DIR"),
2478            "/../../testdata/zidane.jpg"
2479        ))
2480        .to_vec();
2481        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2482
2483        let cpu_dst =
2484            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2485        let mut cpu_converter = CPUProcessor::new();
2486        let crop = Crop {
2487            src_rect: Some(Rect {
2488                left: 0,
2489                top: 0,
2490                width: 640,
2491                height: 360,
2492            }),
2493            dst_rect: None,
2494            dst_color: None,
2495        };
2496        let (result, src, cpu_dst) = convert_img(
2497            &mut cpu_converter,
2498            src,
2499            cpu_dst,
2500            Rotation::None,
2501            Flip::None,
2502            crop,
2503        );
2504        result.unwrap();
2505
2506        let g2d_dst =
2507            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2508        let mut g2d_converter = G2DProcessor::new().unwrap();
2509        let (result, _src, g2d_dst) = convert_img(
2510            &mut g2d_converter,
2511            src,
2512            g2d_dst,
2513            Rotation::None,
2514            Flip::None,
2515            crop,
2516        );
2517        result.unwrap();
2518
2519        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2520    }
2521
2522    #[test]
2523    #[cfg(target_os = "linux")]
2524    fn test_g2d_dst_crop() {
2525        if !is_g2d_available() {
2526            eprintln!("SKIPPED: test_g2d_dst_crop - G2D library (libg2d.so.2) not available");
2527            return;
2528        }
2529        if !is_dma_available() {
2530            eprintln!(
2531                "SKIPPED: test_g2d_dst_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2532            );
2533            return;
2534        }
2535
2536        let dst_width = 640;
2537        let dst_height = 640;
2538        let file = include_bytes!(concat!(
2539            env!("CARGO_MANIFEST_DIR"),
2540            "/../../testdata/zidane.jpg"
2541        ))
2542        .to_vec();
2543        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2544
2545        let cpu_dst =
2546            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2547        let mut cpu_converter = CPUProcessor::new();
2548        let crop = Crop {
2549            src_rect: None,
2550            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2551            dst_color: None,
2552        };
2553        let (result, src, cpu_dst) = convert_img(
2554            &mut cpu_converter,
2555            src,
2556            cpu_dst,
2557            Rotation::None,
2558            Flip::None,
2559            crop,
2560        );
2561        result.unwrap();
2562
2563        let g2d_dst =
2564            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2565        let mut g2d_converter = G2DProcessor::new().unwrap();
2566        let (result, _src, g2d_dst) = convert_img(
2567            &mut g2d_converter,
2568            src,
2569            g2d_dst,
2570            Rotation::None,
2571            Flip::None,
2572            crop,
2573        );
2574        result.unwrap();
2575
2576        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2577    }
2578
2579    #[test]
2580    #[cfg(target_os = "linux")]
2581    fn test_g2d_all_rgba() {
2582        if !is_g2d_available() {
2583            eprintln!("SKIPPED: test_g2d_all_rgba - G2D library (libg2d.so.2) not available");
2584            return;
2585        }
2586        if !is_dma_available() {
2587            eprintln!(
2588                "SKIPPED: test_g2d_all_rgba - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2589            );
2590            return;
2591        }
2592
2593        let dst_width = 640;
2594        let dst_height = 640;
2595        let file = include_bytes!(concat!(
2596            env!("CARGO_MANIFEST_DIR"),
2597            "/../../testdata/zidane.jpg"
2598        ))
2599        .to_vec();
2600        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2601        let src_dyn = src;
2602
2603        let mut cpu_dst =
2604            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2605        let mut cpu_converter = CPUProcessor::new();
2606        let mut g2d_dst =
2607            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2608        let mut g2d_converter = G2DProcessor::new().unwrap();
2609
2610        let crop = Crop {
2611            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2612            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2613            dst_color: None,
2614        };
2615
2616        for rot in [
2617            Rotation::None,
2618            Rotation::Clockwise90,
2619            Rotation::Rotate180,
2620            Rotation::CounterClockwise90,
2621        ] {
2622            cpu_dst
2623                .as_u8()
2624                .unwrap()
2625                .map()
2626                .unwrap()
2627                .as_mut_slice()
2628                .fill(114);
2629            g2d_dst
2630                .as_u8()
2631                .unwrap()
2632                .map()
2633                .unwrap()
2634                .as_mut_slice()
2635                .fill(114);
2636            for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
2637                let mut cpu_dst_dyn = cpu_dst;
2638                cpu_converter
2639                    .convert(&src_dyn, &mut cpu_dst_dyn, Rotation::None, Flip::None, crop)
2640                    .unwrap();
2641                cpu_dst = {
2642                    let mut __t = cpu_dst_dyn.into_u8().unwrap();
2643                    __t.set_format(PixelFormat::Rgba).unwrap();
2644                    TensorDyn::from(__t)
2645                };
2646
2647                let mut g2d_dst_dyn = g2d_dst;
2648                g2d_converter
2649                    .convert(&src_dyn, &mut g2d_dst_dyn, Rotation::None, Flip::None, crop)
2650                    .unwrap();
2651                g2d_dst = {
2652                    let mut __t = g2d_dst_dyn.into_u8().unwrap();
2653                    __t.set_format(PixelFormat::Rgba).unwrap();
2654                    TensorDyn::from(__t)
2655                };
2656
2657                compare_images(
2658                    &g2d_dst,
2659                    &cpu_dst,
2660                    0.98,
2661                    &format!("{} {:?} {:?}", function!(), rot, flip),
2662                );
2663            }
2664        }
2665    }
2666
2667    #[test]
2668    #[cfg(target_os = "linux")]
2669    #[cfg(feature = "opengl")]
2670    fn test_opengl_src_crop() {
2671        if !is_opengl_available() {
2672            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2673            return;
2674        }
2675
2676        let dst_width = 640;
2677        let dst_height = 360;
2678        let file = include_bytes!(concat!(
2679            env!("CARGO_MANIFEST_DIR"),
2680            "/../../testdata/zidane.jpg"
2681        ))
2682        .to_vec();
2683        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2684        let crop = Crop {
2685            src_rect: Some(Rect {
2686                left: 320,
2687                top: 180,
2688                width: 1280 - 320,
2689                height: 720 - 180,
2690            }),
2691            dst_rect: None,
2692            dst_color: None,
2693        };
2694
2695        let cpu_dst =
2696            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2697        let mut cpu_converter = CPUProcessor::new();
2698        let (result, src, cpu_dst) = convert_img(
2699            &mut cpu_converter,
2700            src,
2701            cpu_dst,
2702            Rotation::None,
2703            Flip::None,
2704            crop,
2705        );
2706        result.unwrap();
2707
2708        let gl_dst =
2709            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2710        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2711        let (result, _src, gl_dst) = convert_img(
2712            &mut gl_converter,
2713            src,
2714            gl_dst,
2715            Rotation::None,
2716            Flip::None,
2717            crop,
2718        );
2719        result.unwrap();
2720
2721        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2722    }
2723
2724    #[test]
2725    #[cfg(target_os = "linux")]
2726    #[cfg(feature = "opengl")]
2727    fn test_opengl_dst_crop() {
2728        if !is_opengl_available() {
2729            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2730            return;
2731        }
2732
2733        let dst_width = 640;
2734        let dst_height = 640;
2735        let file = include_bytes!(concat!(
2736            env!("CARGO_MANIFEST_DIR"),
2737            "/../../testdata/zidane.jpg"
2738        ))
2739        .to_vec();
2740        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2741
2742        let cpu_dst =
2743            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2744        let mut cpu_converter = CPUProcessor::new();
2745        let crop = Crop {
2746            src_rect: None,
2747            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2748            dst_color: None,
2749        };
2750        let (result, src, cpu_dst) = convert_img(
2751            &mut cpu_converter,
2752            src,
2753            cpu_dst,
2754            Rotation::None,
2755            Flip::None,
2756            crop,
2757        );
2758        result.unwrap();
2759
2760        let gl_dst =
2761            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2762        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2763        let (result, _src, gl_dst) = convert_img(
2764            &mut gl_converter,
2765            src,
2766            gl_dst,
2767            Rotation::None,
2768            Flip::None,
2769            crop,
2770        );
2771        result.unwrap();
2772
2773        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2774    }
2775
2776    #[test]
2777    #[cfg(target_os = "linux")]
2778    #[cfg(feature = "opengl")]
2779    fn test_opengl_all_rgba() {
2780        if !is_opengl_available() {
2781            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2782            return;
2783        }
2784
2785        let dst_width = 640;
2786        let dst_height = 640;
2787        let file = include_bytes!(concat!(
2788            env!("CARGO_MANIFEST_DIR"),
2789            "/../../testdata/zidane.jpg"
2790        ))
2791        .to_vec();
2792
2793        let mut cpu_converter = CPUProcessor::new();
2794
2795        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2796
2797        let mut mem = vec![None, Some(TensorMemory::Mem), Some(TensorMemory::Shm)];
2798        if is_dma_available() {
2799            mem.push(Some(TensorMemory::Dma));
2800        }
2801        let crop = Crop {
2802            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2803            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2804            dst_color: None,
2805        };
2806        for m in mem {
2807            let src = crate::load_image(&file, Some(PixelFormat::Rgba), m).unwrap();
2808            let src_dyn = src;
2809
2810            for rot in [
2811                Rotation::None,
2812                Rotation::Clockwise90,
2813                Rotation::Rotate180,
2814                Rotation::CounterClockwise90,
2815            ] {
2816                for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
2817                    let cpu_dst =
2818                        TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, m)
2819                            .unwrap();
2820                    let gl_dst =
2821                        TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, m)
2822                            .unwrap();
2823                    cpu_dst
2824                        .as_u8()
2825                        .unwrap()
2826                        .map()
2827                        .unwrap()
2828                        .as_mut_slice()
2829                        .fill(114);
2830                    gl_dst
2831                        .as_u8()
2832                        .unwrap()
2833                        .map()
2834                        .unwrap()
2835                        .as_mut_slice()
2836                        .fill(114);
2837
2838                    let mut cpu_dst_dyn = cpu_dst;
2839                    cpu_converter
2840                        .convert(&src_dyn, &mut cpu_dst_dyn, Rotation::None, Flip::None, crop)
2841                        .unwrap();
2842                    let cpu_dst = {
2843                        let mut __t = cpu_dst_dyn.into_u8().unwrap();
2844                        __t.set_format(PixelFormat::Rgba).unwrap();
2845                        TensorDyn::from(__t)
2846                    };
2847
2848                    let mut gl_dst_dyn = gl_dst;
2849                    gl_converter
2850                        .convert(&src_dyn, &mut gl_dst_dyn, Rotation::None, Flip::None, crop)
2851                        .map_err(|e| {
2852                            log::error!("error mem {m:?} rot {rot:?} error: {e:?}");
2853                            e
2854                        })
2855                        .unwrap();
2856                    let gl_dst = {
2857                        let mut __t = gl_dst_dyn.into_u8().unwrap();
2858                        __t.set_format(PixelFormat::Rgba).unwrap();
2859                        TensorDyn::from(__t)
2860                    };
2861
2862                    compare_images(
2863                        &gl_dst,
2864                        &cpu_dst,
2865                        0.98,
2866                        &format!("{} {:?} {:?}", function!(), rot, flip),
2867                    );
2868                }
2869            }
2870        }
2871    }
2872
2873    #[test]
2874    #[cfg(target_os = "linux")]
2875    fn test_cpu_rotate() {
2876        for rot in [
2877            Rotation::Clockwise90,
2878            Rotation::Rotate180,
2879            Rotation::CounterClockwise90,
2880        ] {
2881            test_cpu_rotate_(rot);
2882        }
2883    }
2884
2885    #[cfg(target_os = "linux")]
2886    fn test_cpu_rotate_(rot: Rotation) {
2887        // This test rotates the image 4 times and checks that the image was returned to
2888        // be the same Currently doesn't check if rotations actually rotated in
2889        // right direction
2890        let file = include_bytes!(concat!(
2891            env!("CARGO_MANIFEST_DIR"),
2892            "/../../testdata/zidane.jpg"
2893        ))
2894        .to_vec();
2895
2896        let unchanged_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2897        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2898
2899        let (dst_width, dst_height) = match rot {
2900            Rotation::None | Rotation::Rotate180 => (src.width().unwrap(), src.height().unwrap()),
2901            Rotation::Clockwise90 | Rotation::CounterClockwise90 => {
2902                (src.height().unwrap(), src.width().unwrap())
2903            }
2904        };
2905
2906        let cpu_dst =
2907            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2908        let mut cpu_converter = CPUProcessor::new();
2909
2910        // After rotating 4 times, the image should be the same as the original
2911
2912        let (result, src, cpu_dst) = convert_img(
2913            &mut cpu_converter,
2914            src,
2915            cpu_dst,
2916            rot,
2917            Flip::None,
2918            Crop::no_crop(),
2919        );
2920        result.unwrap();
2921
2922        let (result, cpu_dst, src) = convert_img(
2923            &mut cpu_converter,
2924            cpu_dst,
2925            src,
2926            rot,
2927            Flip::None,
2928            Crop::no_crop(),
2929        );
2930        result.unwrap();
2931
2932        let (result, src, cpu_dst) = convert_img(
2933            &mut cpu_converter,
2934            src,
2935            cpu_dst,
2936            rot,
2937            Flip::None,
2938            Crop::no_crop(),
2939        );
2940        result.unwrap();
2941
2942        let (result, _cpu_dst, src) = convert_img(
2943            &mut cpu_converter,
2944            cpu_dst,
2945            src,
2946            rot,
2947            Flip::None,
2948            Crop::no_crop(),
2949        );
2950        result.unwrap();
2951
2952        compare_images(&src, &unchanged_src, 0.98, function!());
2953    }
2954
2955    #[test]
2956    #[cfg(target_os = "linux")]
2957    #[cfg(feature = "opengl")]
2958    fn test_opengl_rotate() {
2959        if !is_opengl_available() {
2960            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2961            return;
2962        }
2963
2964        let size = (1280, 720);
2965        let mut mem = vec![None, Some(TensorMemory::Shm), Some(TensorMemory::Mem)];
2966
2967        if is_dma_available() {
2968            mem.push(Some(TensorMemory::Dma));
2969        }
2970        for m in mem {
2971            for rot in [
2972                Rotation::Clockwise90,
2973                Rotation::Rotate180,
2974                Rotation::CounterClockwise90,
2975            ] {
2976                test_opengl_rotate_(size, rot, m);
2977            }
2978        }
2979    }
2980
2981    #[cfg(target_os = "linux")]
2982    #[cfg(feature = "opengl")]
2983    fn test_opengl_rotate_(
2984        size: (usize, usize),
2985        rot: Rotation,
2986        tensor_memory: Option<TensorMemory>,
2987    ) {
2988        let (dst_width, dst_height) = match rot {
2989            Rotation::None | Rotation::Rotate180 => size,
2990            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
2991        };
2992
2993        let file = include_bytes!(concat!(
2994            env!("CARGO_MANIFEST_DIR"),
2995            "/../../testdata/zidane.jpg"
2996        ))
2997        .to_vec();
2998        let src = crate::load_image(&file, Some(PixelFormat::Rgba), tensor_memory).unwrap();
2999
3000        let cpu_dst =
3001            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3002        let mut cpu_converter = CPUProcessor::new();
3003
3004        let (result, mut src, cpu_dst) = convert_img(
3005            &mut cpu_converter,
3006            src,
3007            cpu_dst,
3008            rot,
3009            Flip::None,
3010            Crop::no_crop(),
3011        );
3012        result.unwrap();
3013
3014        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3015
3016        for _ in 0..5 {
3017            let gl_dst = TensorDyn::image(
3018                dst_width,
3019                dst_height,
3020                PixelFormat::Rgba,
3021                DType::U8,
3022                tensor_memory,
3023            )
3024            .unwrap();
3025            let (result, src_back, gl_dst) = convert_img(
3026                &mut gl_converter,
3027                src,
3028                gl_dst,
3029                rot,
3030                Flip::None,
3031                Crop::no_crop(),
3032            );
3033            result.unwrap();
3034            src = src_back;
3035            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3036        }
3037    }
3038
3039    #[test]
3040    #[cfg(target_os = "linux")]
3041    fn test_g2d_rotate() {
3042        if !is_g2d_available() {
3043            eprintln!("SKIPPED: test_g2d_rotate - G2D library (libg2d.so.2) not available");
3044            return;
3045        }
3046        if !is_dma_available() {
3047            eprintln!(
3048                "SKIPPED: test_g2d_rotate - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3049            );
3050            return;
3051        }
3052
3053        let size = (1280, 720);
3054        for rot in [
3055            Rotation::Clockwise90,
3056            Rotation::Rotate180,
3057            Rotation::CounterClockwise90,
3058        ] {
3059            test_g2d_rotate_(size, rot);
3060        }
3061    }
3062
3063    #[cfg(target_os = "linux")]
3064    fn test_g2d_rotate_(size: (usize, usize), rot: Rotation) {
3065        let (dst_width, dst_height) = match rot {
3066            Rotation::None | Rotation::Rotate180 => size,
3067            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
3068        };
3069
3070        let file = include_bytes!(concat!(
3071            env!("CARGO_MANIFEST_DIR"),
3072            "/../../testdata/zidane.jpg"
3073        ))
3074        .to_vec();
3075        let src =
3076            crate::load_image(&file, Some(PixelFormat::Rgba), Some(TensorMemory::Dma)).unwrap();
3077
3078        let cpu_dst =
3079            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3080        let mut cpu_converter = CPUProcessor::new();
3081
3082        let (result, src, cpu_dst) = convert_img(
3083            &mut cpu_converter,
3084            src,
3085            cpu_dst,
3086            rot,
3087            Flip::None,
3088            Crop::no_crop(),
3089        );
3090        result.unwrap();
3091
3092        let g2d_dst = TensorDyn::image(
3093            dst_width,
3094            dst_height,
3095            PixelFormat::Rgba,
3096            DType::U8,
3097            Some(TensorMemory::Dma),
3098        )
3099        .unwrap();
3100        let mut g2d_converter = G2DProcessor::new().unwrap();
3101
3102        let (result, _src, g2d_dst) = convert_img(
3103            &mut g2d_converter,
3104            src,
3105            g2d_dst,
3106            rot,
3107            Flip::None,
3108            Crop::no_crop(),
3109        );
3110        result.unwrap();
3111
3112        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3113    }
3114
3115    #[test]
3116    fn test_rgba_to_yuyv_resize_cpu() {
3117        let src = load_bytes_to_tensor(
3118            1280,
3119            720,
3120            PixelFormat::Rgba,
3121            None,
3122            include_bytes!(concat!(
3123                env!("CARGO_MANIFEST_DIR"),
3124                "/../../testdata/camera720p.rgba"
3125            )),
3126        )
3127        .unwrap();
3128
3129        let (dst_width, dst_height) = (640, 360);
3130
3131        let dst =
3132            TensorDyn::image(dst_width, dst_height, PixelFormat::Yuyv, DType::U8, None).unwrap();
3133
3134        let dst_through_yuyv =
3135            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3136        let dst_direct =
3137            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3138
3139        let mut cpu_converter = CPUProcessor::new();
3140
3141        let (result, src, dst) = convert_img(
3142            &mut cpu_converter,
3143            src,
3144            dst,
3145            Rotation::None,
3146            Flip::None,
3147            Crop::no_crop(),
3148        );
3149        result.unwrap();
3150
3151        let (result, _dst, dst_through_yuyv) = convert_img(
3152            &mut cpu_converter,
3153            dst,
3154            dst_through_yuyv,
3155            Rotation::None,
3156            Flip::None,
3157            Crop::no_crop(),
3158        );
3159        result.unwrap();
3160
3161        let (result, _src, dst_direct) = convert_img(
3162            &mut cpu_converter,
3163            src,
3164            dst_direct,
3165            Rotation::None,
3166            Flip::None,
3167            Crop::no_crop(),
3168        );
3169        result.unwrap();
3170
3171        compare_images(&dst_through_yuyv, &dst_direct, 0.98, function!());
3172    }
3173
3174    #[test]
3175    #[cfg(target_os = "linux")]
3176    #[cfg(feature = "opengl")]
3177    #[ignore = "opengl doesn't support rendering to PixelFormat::Yuyv texture"]
3178    fn test_rgba_to_yuyv_resize_opengl() {
3179        if !is_opengl_available() {
3180            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3181            return;
3182        }
3183
3184        if !is_dma_available() {
3185            eprintln!(
3186                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3187                function!()
3188            );
3189            return;
3190        }
3191
3192        let src = load_bytes_to_tensor(
3193            1280,
3194            720,
3195            PixelFormat::Rgba,
3196            None,
3197            include_bytes!(concat!(
3198                env!("CARGO_MANIFEST_DIR"),
3199                "/../../testdata/camera720p.rgba"
3200            )),
3201        )
3202        .unwrap();
3203
3204        let (dst_width, dst_height) = (640, 360);
3205
3206        let dst = TensorDyn::image(
3207            dst_width,
3208            dst_height,
3209            PixelFormat::Yuyv,
3210            DType::U8,
3211            Some(TensorMemory::Dma),
3212        )
3213        .unwrap();
3214
3215        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3216
3217        let (result, src, dst) = convert_img(
3218            &mut gl_converter,
3219            src,
3220            dst,
3221            Rotation::None,
3222            Flip::None,
3223            Crop::new()
3224                .with_dst_rect(Some(Rect::new(100, 100, 100, 100)))
3225                .with_dst_color(Some([255, 255, 255, 255])),
3226        );
3227        result.unwrap();
3228
3229        std::fs::write(
3230            "rgba_to_yuyv_opengl.yuyv",
3231            dst.as_u8().unwrap().map().unwrap().as_slice(),
3232        )
3233        .unwrap();
3234        let cpu_dst = TensorDyn::image(
3235            dst_width,
3236            dst_height,
3237            PixelFormat::Yuyv,
3238            DType::U8,
3239            Some(TensorMemory::Dma),
3240        )
3241        .unwrap();
3242        let (result, _src, cpu_dst) = convert_img(
3243            &mut CPUProcessor::new(),
3244            src,
3245            cpu_dst,
3246            Rotation::None,
3247            Flip::None,
3248            Crop::no_crop(),
3249        );
3250        result.unwrap();
3251
3252        compare_images_convert_to_rgb(&dst, &cpu_dst, 0.98, function!());
3253    }
3254
3255    #[test]
3256    #[cfg(target_os = "linux")]
3257    fn test_rgba_to_yuyv_resize_g2d() {
3258        if !is_g2d_available() {
3259            eprintln!(
3260                "SKIPPED: test_rgba_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3261            );
3262            return;
3263        }
3264        if !is_dma_available() {
3265            eprintln!(
3266                "SKIPPED: test_rgba_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3267            );
3268            return;
3269        }
3270
3271        let src = load_bytes_to_tensor(
3272            1280,
3273            720,
3274            PixelFormat::Rgba,
3275            Some(TensorMemory::Dma),
3276            include_bytes!(concat!(
3277                env!("CARGO_MANIFEST_DIR"),
3278                "/../../testdata/camera720p.rgba"
3279            )),
3280        )
3281        .unwrap();
3282
3283        let (dst_width, dst_height) = (1280, 720);
3284
3285        let cpu_dst = TensorDyn::image(
3286            dst_width,
3287            dst_height,
3288            PixelFormat::Yuyv,
3289            DType::U8,
3290            Some(TensorMemory::Dma),
3291        )
3292        .unwrap();
3293
3294        let g2d_dst = TensorDyn::image(
3295            dst_width,
3296            dst_height,
3297            PixelFormat::Yuyv,
3298            DType::U8,
3299            Some(TensorMemory::Dma),
3300        )
3301        .unwrap();
3302
3303        let mut g2d_converter = G2DProcessor::new().unwrap();
3304        let crop = Crop {
3305            src_rect: None,
3306            dst_rect: Some(Rect::new(100, 100, 2, 2)),
3307            dst_color: None,
3308        };
3309
3310        g2d_dst
3311            .as_u8()
3312            .unwrap()
3313            .map()
3314            .unwrap()
3315            .as_mut_slice()
3316            .fill(128);
3317        let (result, src, g2d_dst) = convert_img(
3318            &mut g2d_converter,
3319            src,
3320            g2d_dst,
3321            Rotation::None,
3322            Flip::None,
3323            crop,
3324        );
3325        result.unwrap();
3326
3327        let cpu_dst_img = cpu_dst;
3328        cpu_dst_img
3329            .as_u8()
3330            .unwrap()
3331            .map()
3332            .unwrap()
3333            .as_mut_slice()
3334            .fill(128);
3335        let (result, _src, cpu_dst) = convert_img(
3336            &mut CPUProcessor::new(),
3337            src,
3338            cpu_dst_img,
3339            Rotation::None,
3340            Flip::None,
3341            crop,
3342        );
3343        result.unwrap();
3344
3345        compare_images_convert_to_rgb(&cpu_dst, &g2d_dst, 0.98, function!());
3346    }
3347
3348    #[test]
3349    fn test_yuyv_to_rgba_cpu() {
3350        let file = include_bytes!(concat!(
3351            env!("CARGO_MANIFEST_DIR"),
3352            "/../../testdata/camera720p.yuyv"
3353        ))
3354        .to_vec();
3355        let src = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
3356        src.as_u8()
3357            .unwrap()
3358            .map()
3359            .unwrap()
3360            .as_mut_slice()
3361            .copy_from_slice(&file);
3362
3363        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3364        let mut cpu_converter = CPUProcessor::new();
3365
3366        let (result, _src, dst) = convert_img(
3367            &mut cpu_converter,
3368            src,
3369            dst,
3370            Rotation::None,
3371            Flip::None,
3372            Crop::no_crop(),
3373        );
3374        result.unwrap();
3375
3376        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3377        target_image
3378            .as_u8()
3379            .unwrap()
3380            .map()
3381            .unwrap()
3382            .as_mut_slice()
3383            .copy_from_slice(include_bytes!(concat!(
3384                env!("CARGO_MANIFEST_DIR"),
3385                "/../../testdata/camera720p.rgba"
3386            )));
3387
3388        compare_images(&dst, &target_image, 0.98, function!());
3389    }
3390
3391    #[test]
3392    fn test_yuyv_to_rgb_cpu() {
3393        let file = include_bytes!(concat!(
3394            env!("CARGO_MANIFEST_DIR"),
3395            "/../../testdata/camera720p.yuyv"
3396        ))
3397        .to_vec();
3398        let src = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
3399        src.as_u8()
3400            .unwrap()
3401            .map()
3402            .unwrap()
3403            .as_mut_slice()
3404            .copy_from_slice(&file);
3405
3406        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3407        let mut cpu_converter = CPUProcessor::new();
3408
3409        let (result, _src, dst) = convert_img(
3410            &mut cpu_converter,
3411            src,
3412            dst,
3413            Rotation::None,
3414            Flip::None,
3415            Crop::no_crop(),
3416        );
3417        result.unwrap();
3418
3419        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3420        target_image
3421            .as_u8()
3422            .unwrap()
3423            .map()
3424            .unwrap()
3425            .as_mut_slice()
3426            .as_chunks_mut::<3>()
3427            .0
3428            .iter_mut()
3429            .zip(
3430                include_bytes!(concat!(
3431                    env!("CARGO_MANIFEST_DIR"),
3432                    "/../../testdata/camera720p.rgba"
3433                ))
3434                .as_chunks::<4>()
3435                .0,
3436            )
3437            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
3438
3439        compare_images(&dst, &target_image, 0.98, function!());
3440    }
3441
3442    #[test]
3443    #[cfg(target_os = "linux")]
3444    fn test_yuyv_to_rgba_g2d() {
3445        if !is_g2d_available() {
3446            eprintln!("SKIPPED: test_yuyv_to_rgba_g2d - G2D library (libg2d.so.2) not available");
3447            return;
3448        }
3449        if !is_dma_available() {
3450            eprintln!(
3451                "SKIPPED: test_yuyv_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3452            );
3453            return;
3454        }
3455
3456        let src = load_bytes_to_tensor(
3457            1280,
3458            720,
3459            PixelFormat::Yuyv,
3460            None,
3461            include_bytes!(concat!(
3462                env!("CARGO_MANIFEST_DIR"),
3463                "/../../testdata/camera720p.yuyv"
3464            )),
3465        )
3466        .unwrap();
3467
3468        let dst = TensorDyn::image(
3469            1280,
3470            720,
3471            PixelFormat::Rgba,
3472            DType::U8,
3473            Some(TensorMemory::Dma),
3474        )
3475        .unwrap();
3476        let mut g2d_converter = G2DProcessor::new().unwrap();
3477
3478        let (result, _src, dst) = convert_img(
3479            &mut g2d_converter,
3480            src,
3481            dst,
3482            Rotation::None,
3483            Flip::None,
3484            Crop::no_crop(),
3485        );
3486        result.unwrap();
3487
3488        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3489        target_image
3490            .as_u8()
3491            .unwrap()
3492            .map()
3493            .unwrap()
3494            .as_mut_slice()
3495            .copy_from_slice(include_bytes!(concat!(
3496                env!("CARGO_MANIFEST_DIR"),
3497                "/../../testdata/camera720p.rgba"
3498            )));
3499
3500        compare_images(&dst, &target_image, 0.98, function!());
3501    }
3502
3503    #[test]
3504    #[cfg(target_os = "linux")]
3505    #[cfg(feature = "opengl")]
3506    fn test_yuyv_to_rgba_opengl() {
3507        if !is_opengl_available() {
3508            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3509            return;
3510        }
3511        if !is_dma_available() {
3512            eprintln!(
3513                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3514                function!()
3515            );
3516            return;
3517        }
3518
3519        let src = load_bytes_to_tensor(
3520            1280,
3521            720,
3522            PixelFormat::Yuyv,
3523            Some(TensorMemory::Dma),
3524            include_bytes!(concat!(
3525                env!("CARGO_MANIFEST_DIR"),
3526                "/../../testdata/camera720p.yuyv"
3527            )),
3528        )
3529        .unwrap();
3530
3531        let dst = TensorDyn::image(
3532            1280,
3533            720,
3534            PixelFormat::Rgba,
3535            DType::U8,
3536            Some(TensorMemory::Dma),
3537        )
3538        .unwrap();
3539        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3540
3541        let (result, _src, dst) = convert_img(
3542            &mut gl_converter,
3543            src,
3544            dst,
3545            Rotation::None,
3546            Flip::None,
3547            Crop::no_crop(),
3548        );
3549        result.unwrap();
3550
3551        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3552        target_image
3553            .as_u8()
3554            .unwrap()
3555            .map()
3556            .unwrap()
3557            .as_mut_slice()
3558            .copy_from_slice(include_bytes!(concat!(
3559                env!("CARGO_MANIFEST_DIR"),
3560                "/../../testdata/camera720p.rgba"
3561            )));
3562
3563        compare_images(&dst, &target_image, 0.98, function!());
3564    }
3565
3566    #[test]
3567    #[cfg(target_os = "linux")]
3568    fn test_yuyv_to_rgb_g2d() {
3569        if !is_g2d_available() {
3570            eprintln!("SKIPPED: test_yuyv_to_rgb_g2d - G2D library (libg2d.so.2) not available");
3571            return;
3572        }
3573        if !is_dma_available() {
3574            eprintln!(
3575                "SKIPPED: test_yuyv_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3576            );
3577            return;
3578        }
3579
3580        let src = load_bytes_to_tensor(
3581            1280,
3582            720,
3583            PixelFormat::Yuyv,
3584            None,
3585            include_bytes!(concat!(
3586                env!("CARGO_MANIFEST_DIR"),
3587                "/../../testdata/camera720p.yuyv"
3588            )),
3589        )
3590        .unwrap();
3591
3592        let g2d_dst = TensorDyn::image(
3593            1280,
3594            720,
3595            PixelFormat::Rgb,
3596            DType::U8,
3597            Some(TensorMemory::Dma),
3598        )
3599        .unwrap();
3600        let mut g2d_converter = G2DProcessor::new().unwrap();
3601
3602        let (result, src, g2d_dst) = convert_img(
3603            &mut g2d_converter,
3604            src,
3605            g2d_dst,
3606            Rotation::None,
3607            Flip::None,
3608            Crop::no_crop(),
3609        );
3610        result.unwrap();
3611
3612        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3613        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3614
3615        let (result, _src, cpu_dst) = convert_img(
3616            &mut cpu_converter,
3617            src,
3618            cpu_dst,
3619            Rotation::None,
3620            Flip::None,
3621            Crop::no_crop(),
3622        );
3623        result.unwrap();
3624
3625        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3626    }
3627
3628    #[test]
3629    #[cfg(target_os = "linux")]
3630    fn test_yuyv_to_yuyv_resize_g2d() {
3631        if !is_g2d_available() {
3632            eprintln!(
3633                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3634            );
3635            return;
3636        }
3637        if !is_dma_available() {
3638            eprintln!(
3639                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3640            );
3641            return;
3642        }
3643
3644        let src = load_bytes_to_tensor(
3645            1280,
3646            720,
3647            PixelFormat::Yuyv,
3648            None,
3649            include_bytes!(concat!(
3650                env!("CARGO_MANIFEST_DIR"),
3651                "/../../testdata/camera720p.yuyv"
3652            )),
3653        )
3654        .unwrap();
3655
3656        let g2d_dst = TensorDyn::image(
3657            600,
3658            400,
3659            PixelFormat::Yuyv,
3660            DType::U8,
3661            Some(TensorMemory::Dma),
3662        )
3663        .unwrap();
3664        let mut g2d_converter = G2DProcessor::new().unwrap();
3665
3666        let (result, src, g2d_dst) = convert_img(
3667            &mut g2d_converter,
3668            src,
3669            g2d_dst,
3670            Rotation::None,
3671            Flip::None,
3672            Crop::no_crop(),
3673        );
3674        result.unwrap();
3675
3676        let cpu_dst = TensorDyn::image(600, 400, PixelFormat::Yuyv, DType::U8, None).unwrap();
3677        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3678
3679        let (result, _src, cpu_dst) = convert_img(
3680            &mut cpu_converter,
3681            src,
3682            cpu_dst,
3683            Rotation::None,
3684            Flip::None,
3685            Crop::no_crop(),
3686        );
3687        result.unwrap();
3688
3689        // TODO: compare PixelFormat::Yuyv and PixelFormat::Yuyv images without having to convert them to PixelFormat::Rgb
3690        compare_images_convert_to_rgb(&g2d_dst, &cpu_dst, 0.98, function!());
3691    }
3692
3693    #[test]
3694    fn test_yuyv_to_rgba_resize_cpu() {
3695        let src = load_bytes_to_tensor(
3696            1280,
3697            720,
3698            PixelFormat::Yuyv,
3699            None,
3700            include_bytes!(concat!(
3701                env!("CARGO_MANIFEST_DIR"),
3702                "/../../testdata/camera720p.yuyv"
3703            )),
3704        )
3705        .unwrap();
3706
3707        let (dst_width, dst_height) = (960, 540);
3708
3709        let dst =
3710            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3711        let mut cpu_converter = CPUProcessor::new();
3712
3713        let (result, _src, dst) = convert_img(
3714            &mut cpu_converter,
3715            src,
3716            dst,
3717            Rotation::None,
3718            Flip::None,
3719            Crop::no_crop(),
3720        );
3721        result.unwrap();
3722
3723        let dst_target =
3724            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3725        let src_target = load_bytes_to_tensor(
3726            1280,
3727            720,
3728            PixelFormat::Rgba,
3729            None,
3730            include_bytes!(concat!(
3731                env!("CARGO_MANIFEST_DIR"),
3732                "/../../testdata/camera720p.rgba"
3733            )),
3734        )
3735        .unwrap();
3736        let (result, _src_target, dst_target) = convert_img(
3737            &mut cpu_converter,
3738            src_target,
3739            dst_target,
3740            Rotation::None,
3741            Flip::None,
3742            Crop::no_crop(),
3743        );
3744        result.unwrap();
3745
3746        compare_images(&dst, &dst_target, 0.98, function!());
3747    }
3748
3749    #[test]
3750    #[cfg(target_os = "linux")]
3751    fn test_yuyv_to_rgba_crop_flip_g2d() {
3752        if !is_g2d_available() {
3753            eprintln!(
3754                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - G2D library (libg2d.so.2) not available"
3755            );
3756            return;
3757        }
3758        if !is_dma_available() {
3759            eprintln!(
3760                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3761            );
3762            return;
3763        }
3764
3765        let src = load_bytes_to_tensor(
3766            1280,
3767            720,
3768            PixelFormat::Yuyv,
3769            Some(TensorMemory::Dma),
3770            include_bytes!(concat!(
3771                env!("CARGO_MANIFEST_DIR"),
3772                "/../../testdata/camera720p.yuyv"
3773            )),
3774        )
3775        .unwrap();
3776
3777        let (dst_width, dst_height) = (640, 640);
3778
3779        let dst_g2d = TensorDyn::image(
3780            dst_width,
3781            dst_height,
3782            PixelFormat::Rgba,
3783            DType::U8,
3784            Some(TensorMemory::Dma),
3785        )
3786        .unwrap();
3787        let mut g2d_converter = G2DProcessor::new().unwrap();
3788        let crop = Crop {
3789            src_rect: Some(Rect {
3790                left: 20,
3791                top: 15,
3792                width: 400,
3793                height: 300,
3794            }),
3795            dst_rect: None,
3796            dst_color: None,
3797        };
3798
3799        let (result, src, dst_g2d) = convert_img(
3800            &mut g2d_converter,
3801            src,
3802            dst_g2d,
3803            Rotation::None,
3804            Flip::Horizontal,
3805            crop,
3806        );
3807        result.unwrap();
3808
3809        let dst_cpu = TensorDyn::image(
3810            dst_width,
3811            dst_height,
3812            PixelFormat::Rgba,
3813            DType::U8,
3814            Some(TensorMemory::Dma),
3815        )
3816        .unwrap();
3817        let mut cpu_converter = CPUProcessor::new();
3818
3819        let (result, _src, dst_cpu) = convert_img(
3820            &mut cpu_converter,
3821            src,
3822            dst_cpu,
3823            Rotation::None,
3824            Flip::Horizontal,
3825            crop,
3826        );
3827        result.unwrap();
3828        compare_images(&dst_g2d, &dst_cpu, 0.98, function!());
3829    }
3830
3831    #[test]
3832    #[cfg(target_os = "linux")]
3833    #[cfg(feature = "opengl")]
3834    fn test_yuyv_to_rgba_crop_flip_opengl() {
3835        if !is_opengl_available() {
3836            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3837            return;
3838        }
3839
3840        if !is_dma_available() {
3841            eprintln!(
3842                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3843                function!()
3844            );
3845            return;
3846        }
3847
3848        let src = load_bytes_to_tensor(
3849            1280,
3850            720,
3851            PixelFormat::Yuyv,
3852            Some(TensorMemory::Dma),
3853            include_bytes!(concat!(
3854                env!("CARGO_MANIFEST_DIR"),
3855                "/../../testdata/camera720p.yuyv"
3856            )),
3857        )
3858        .unwrap();
3859
3860        let (dst_width, dst_height) = (640, 640);
3861
3862        let dst_gl = TensorDyn::image(
3863            dst_width,
3864            dst_height,
3865            PixelFormat::Rgba,
3866            DType::U8,
3867            Some(TensorMemory::Dma),
3868        )
3869        .unwrap();
3870        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3871        let crop = Crop {
3872            src_rect: Some(Rect {
3873                left: 20,
3874                top: 15,
3875                width: 400,
3876                height: 300,
3877            }),
3878            dst_rect: None,
3879            dst_color: None,
3880        };
3881
3882        let (result, src, dst_gl) = convert_img(
3883            &mut gl_converter,
3884            src,
3885            dst_gl,
3886            Rotation::None,
3887            Flip::Horizontal,
3888            crop,
3889        );
3890        result.unwrap();
3891
3892        let dst_cpu = TensorDyn::image(
3893            dst_width,
3894            dst_height,
3895            PixelFormat::Rgba,
3896            DType::U8,
3897            Some(TensorMemory::Dma),
3898        )
3899        .unwrap();
3900        let mut cpu_converter = CPUProcessor::new();
3901
3902        let (result, _src, dst_cpu) = convert_img(
3903            &mut cpu_converter,
3904            src,
3905            dst_cpu,
3906            Rotation::None,
3907            Flip::Horizontal,
3908            crop,
3909        );
3910        result.unwrap();
3911        compare_images(&dst_gl, &dst_cpu, 0.98, function!());
3912    }
3913
3914    #[test]
3915    fn test_vyuy_to_rgba_cpu() {
3916        let file = include_bytes!(concat!(
3917            env!("CARGO_MANIFEST_DIR"),
3918            "/../../testdata/camera720p.vyuy"
3919        ))
3920        .to_vec();
3921        let src = TensorDyn::image(1280, 720, PixelFormat::Vyuy, DType::U8, None).unwrap();
3922        src.as_u8()
3923            .unwrap()
3924            .map()
3925            .unwrap()
3926            .as_mut_slice()
3927            .copy_from_slice(&file);
3928
3929        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3930        let mut cpu_converter = CPUProcessor::new();
3931
3932        let (result, _src, dst) = convert_img(
3933            &mut cpu_converter,
3934            src,
3935            dst,
3936            Rotation::None,
3937            Flip::None,
3938            Crop::no_crop(),
3939        );
3940        result.unwrap();
3941
3942        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3943        target_image
3944            .as_u8()
3945            .unwrap()
3946            .map()
3947            .unwrap()
3948            .as_mut_slice()
3949            .copy_from_slice(include_bytes!(concat!(
3950                env!("CARGO_MANIFEST_DIR"),
3951                "/../../testdata/camera720p.rgba"
3952            )));
3953
3954        compare_images(&dst, &target_image, 0.98, function!());
3955    }
3956
3957    #[test]
3958    fn test_vyuy_to_rgb_cpu() {
3959        let file = include_bytes!(concat!(
3960            env!("CARGO_MANIFEST_DIR"),
3961            "/../../testdata/camera720p.vyuy"
3962        ))
3963        .to_vec();
3964        let src = TensorDyn::image(1280, 720, PixelFormat::Vyuy, DType::U8, None).unwrap();
3965        src.as_u8()
3966            .unwrap()
3967            .map()
3968            .unwrap()
3969            .as_mut_slice()
3970            .copy_from_slice(&file);
3971
3972        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3973        let mut cpu_converter = CPUProcessor::new();
3974
3975        let (result, _src, dst) = convert_img(
3976            &mut cpu_converter,
3977            src,
3978            dst,
3979            Rotation::None,
3980            Flip::None,
3981            Crop::no_crop(),
3982        );
3983        result.unwrap();
3984
3985        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3986        target_image
3987            .as_u8()
3988            .unwrap()
3989            .map()
3990            .unwrap()
3991            .as_mut_slice()
3992            .as_chunks_mut::<3>()
3993            .0
3994            .iter_mut()
3995            .zip(
3996                include_bytes!(concat!(
3997                    env!("CARGO_MANIFEST_DIR"),
3998                    "/../../testdata/camera720p.rgba"
3999                ))
4000                .as_chunks::<4>()
4001                .0,
4002            )
4003            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
4004
4005        compare_images(&dst, &target_image, 0.98, function!());
4006    }
4007
4008    #[test]
4009    #[cfg(target_os = "linux")]
4010    fn test_vyuy_to_rgba_g2d() {
4011        if !is_g2d_available() {
4012            eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D library (libg2d.so.2) not available");
4013            return;
4014        }
4015        if !is_dma_available() {
4016            eprintln!(
4017                "SKIPPED: test_vyuy_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4018            );
4019            return;
4020        }
4021
4022        let src = load_bytes_to_tensor(
4023            1280,
4024            720,
4025            PixelFormat::Vyuy,
4026            None,
4027            include_bytes!(concat!(
4028                env!("CARGO_MANIFEST_DIR"),
4029                "/../../testdata/camera720p.vyuy"
4030            )),
4031        )
4032        .unwrap();
4033
4034        let dst = TensorDyn::image(
4035            1280,
4036            720,
4037            PixelFormat::Rgba,
4038            DType::U8,
4039            Some(TensorMemory::Dma),
4040        )
4041        .unwrap();
4042        let mut g2d_converter = G2DProcessor::new().unwrap();
4043
4044        let (result, _src, dst) = convert_img(
4045            &mut g2d_converter,
4046            src,
4047            dst,
4048            Rotation::None,
4049            Flip::None,
4050            Crop::no_crop(),
4051        );
4052        match result {
4053            Err(Error::G2D(_)) => {
4054                eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D does not support PixelFormat::Vyuy format");
4055                return;
4056            }
4057            r => r.unwrap(),
4058        }
4059
4060        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4061        target_image
4062            .as_u8()
4063            .unwrap()
4064            .map()
4065            .unwrap()
4066            .as_mut_slice()
4067            .copy_from_slice(include_bytes!(concat!(
4068                env!("CARGO_MANIFEST_DIR"),
4069                "/../../testdata/camera720p.rgba"
4070            )));
4071
4072        compare_images(&dst, &target_image, 0.98, function!());
4073    }
4074
4075    #[test]
4076    #[cfg(target_os = "linux")]
4077    fn test_vyuy_to_rgb_g2d() {
4078        if !is_g2d_available() {
4079            eprintln!("SKIPPED: test_vyuy_to_rgb_g2d - G2D library (libg2d.so.2) not available");
4080            return;
4081        }
4082        if !is_dma_available() {
4083            eprintln!(
4084                "SKIPPED: test_vyuy_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4085            );
4086            return;
4087        }
4088
4089        let src = load_bytes_to_tensor(
4090            1280,
4091            720,
4092            PixelFormat::Vyuy,
4093            None,
4094            include_bytes!(concat!(
4095                env!("CARGO_MANIFEST_DIR"),
4096                "/../../testdata/camera720p.vyuy"
4097            )),
4098        )
4099        .unwrap();
4100
4101        let g2d_dst = TensorDyn::image(
4102            1280,
4103            720,
4104            PixelFormat::Rgb,
4105            DType::U8,
4106            Some(TensorMemory::Dma),
4107        )
4108        .unwrap();
4109        let mut g2d_converter = G2DProcessor::new().unwrap();
4110
4111        let (result, src, g2d_dst) = convert_img(
4112            &mut g2d_converter,
4113            src,
4114            g2d_dst,
4115            Rotation::None,
4116            Flip::None,
4117            Crop::no_crop(),
4118        );
4119        match result {
4120            Err(Error::G2D(_)) => {
4121                eprintln!(
4122                    "SKIPPED: test_vyuy_to_rgb_g2d - G2D does not support PixelFormat::Vyuy format"
4123                );
4124                return;
4125            }
4126            r => r.unwrap(),
4127        }
4128
4129        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4130        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
4131
4132        let (result, _src, cpu_dst) = convert_img(
4133            &mut cpu_converter,
4134            src,
4135            cpu_dst,
4136            Rotation::None,
4137            Flip::None,
4138            Crop::no_crop(),
4139        );
4140        result.unwrap();
4141
4142        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
4143    }
4144
4145    #[test]
4146    #[cfg(target_os = "linux")]
4147    #[cfg(feature = "opengl")]
4148    fn test_vyuy_to_rgba_opengl() {
4149        if !is_opengl_available() {
4150            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4151            return;
4152        }
4153        if !is_dma_available() {
4154            eprintln!(
4155                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4156                function!()
4157            );
4158            return;
4159        }
4160
4161        let src = load_bytes_to_tensor(
4162            1280,
4163            720,
4164            PixelFormat::Vyuy,
4165            Some(TensorMemory::Dma),
4166            include_bytes!(concat!(
4167                env!("CARGO_MANIFEST_DIR"),
4168                "/../../testdata/camera720p.vyuy"
4169            )),
4170        )
4171        .unwrap();
4172
4173        let dst = TensorDyn::image(
4174            1280,
4175            720,
4176            PixelFormat::Rgba,
4177            DType::U8,
4178            Some(TensorMemory::Dma),
4179        )
4180        .unwrap();
4181        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4182
4183        let (result, _src, dst) = convert_img(
4184            &mut gl_converter,
4185            src,
4186            dst,
4187            Rotation::None,
4188            Flip::None,
4189            Crop::no_crop(),
4190        );
4191        match result {
4192            Err(Error::NotSupported(_)) => {
4193                eprintln!(
4194                    "SKIPPED: {} - OpenGL does not support PixelFormat::Vyuy DMA format",
4195                    function!()
4196                );
4197                return;
4198            }
4199            r => r.unwrap(),
4200        }
4201
4202        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4203        target_image
4204            .as_u8()
4205            .unwrap()
4206            .map()
4207            .unwrap()
4208            .as_mut_slice()
4209            .copy_from_slice(include_bytes!(concat!(
4210                env!("CARGO_MANIFEST_DIR"),
4211                "/../../testdata/camera720p.rgba"
4212            )));
4213
4214        compare_images(&dst, &target_image, 0.98, function!());
4215    }
4216
4217    #[test]
4218    fn test_nv12_to_rgba_cpu() {
4219        let file = include_bytes!(concat!(
4220            env!("CARGO_MANIFEST_DIR"),
4221            "/../../testdata/zidane.nv12"
4222        ))
4223        .to_vec();
4224        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4225        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4226            .copy_from_slice(&file);
4227
4228        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4229        let mut cpu_converter = CPUProcessor::new();
4230
4231        let (result, _src, dst) = convert_img(
4232            &mut cpu_converter,
4233            src,
4234            dst,
4235            Rotation::None,
4236            Flip::None,
4237            Crop::no_crop(),
4238        );
4239        result.unwrap();
4240
4241        let target_image = crate::load_image(
4242            include_bytes!(concat!(
4243                env!("CARGO_MANIFEST_DIR"),
4244                "/../../testdata/zidane.jpg"
4245            )),
4246            Some(PixelFormat::Rgba),
4247            None,
4248        )
4249        .unwrap();
4250
4251        compare_images(&dst, &target_image, 0.98, function!());
4252    }
4253
4254    #[test]
4255    fn test_nv12_to_rgb_cpu() {
4256        let file = include_bytes!(concat!(
4257            env!("CARGO_MANIFEST_DIR"),
4258            "/../../testdata/zidane.nv12"
4259        ))
4260        .to_vec();
4261        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4262        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4263            .copy_from_slice(&file);
4264
4265        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4266        let mut cpu_converter = CPUProcessor::new();
4267
4268        let (result, _src, dst) = convert_img(
4269            &mut cpu_converter,
4270            src,
4271            dst,
4272            Rotation::None,
4273            Flip::None,
4274            Crop::no_crop(),
4275        );
4276        result.unwrap();
4277
4278        let target_image = crate::load_image(
4279            include_bytes!(concat!(
4280                env!("CARGO_MANIFEST_DIR"),
4281                "/../../testdata/zidane.jpg"
4282            )),
4283            Some(PixelFormat::Rgb),
4284            None,
4285        )
4286        .unwrap();
4287
4288        compare_images(&dst, &target_image, 0.98, function!());
4289    }
4290
4291    #[test]
4292    fn test_nv12_to_grey_cpu() {
4293        let file = include_bytes!(concat!(
4294            env!("CARGO_MANIFEST_DIR"),
4295            "/../../testdata/zidane.nv12"
4296        ))
4297        .to_vec();
4298        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4299        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4300            .copy_from_slice(&file);
4301
4302        let dst = TensorDyn::image(1280, 720, PixelFormat::Grey, DType::U8, None).unwrap();
4303        let mut cpu_converter = CPUProcessor::new();
4304
4305        let (result, _src, dst) = convert_img(
4306            &mut cpu_converter,
4307            src,
4308            dst,
4309            Rotation::None,
4310            Flip::None,
4311            Crop::no_crop(),
4312        );
4313        result.unwrap();
4314
4315        let target_image = crate::load_image(
4316            include_bytes!(concat!(
4317                env!("CARGO_MANIFEST_DIR"),
4318                "/../../testdata/zidane.jpg"
4319            )),
4320            Some(PixelFormat::Grey),
4321            None,
4322        )
4323        .unwrap();
4324
4325        compare_images(&dst, &target_image, 0.98, function!());
4326    }
4327
4328    #[test]
4329    fn test_nv12_to_yuyv_cpu() {
4330        let file = include_bytes!(concat!(
4331            env!("CARGO_MANIFEST_DIR"),
4332            "/../../testdata/zidane.nv12"
4333        ))
4334        .to_vec();
4335        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4336        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4337            .copy_from_slice(&file);
4338
4339        let dst = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
4340        let mut cpu_converter = CPUProcessor::new();
4341
4342        let (result, _src, dst) = convert_img(
4343            &mut cpu_converter,
4344            src,
4345            dst,
4346            Rotation::None,
4347            Flip::None,
4348            Crop::no_crop(),
4349        );
4350        result.unwrap();
4351
4352        let target_image = crate::load_image(
4353            include_bytes!(concat!(
4354                env!("CARGO_MANIFEST_DIR"),
4355                "/../../testdata/zidane.jpg"
4356            )),
4357            Some(PixelFormat::Rgb),
4358            None,
4359        )
4360        .unwrap();
4361
4362        compare_images_convert_to_rgb(&dst, &target_image, 0.98, function!());
4363    }
4364
4365    #[test]
4366    fn test_cpu_resize_planar_rgb() {
4367        let src = TensorDyn::image(4, 4, PixelFormat::Rgba, DType::U8, None).unwrap();
4368        #[rustfmt::skip]
4369        let src_image = [
4370                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4371                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4372                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4373                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4374        ];
4375        src.as_u8()
4376            .unwrap()
4377            .map()
4378            .unwrap()
4379            .as_mut_slice()
4380            .copy_from_slice(&src_image);
4381
4382        let cpu_dst = TensorDyn::image(5, 5, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
4383        let mut cpu_converter = CPUProcessor::new();
4384
4385        let (result, _src, cpu_dst) = convert_img(
4386            &mut cpu_converter,
4387            src,
4388            cpu_dst,
4389            Rotation::None,
4390            Flip::None,
4391            Crop::new()
4392                .with_dst_rect(Some(Rect {
4393                    left: 1,
4394                    top: 1,
4395                    width: 4,
4396                    height: 4,
4397                }))
4398                .with_dst_color(Some([114, 114, 114, 255])),
4399        );
4400        result.unwrap();
4401
4402        #[rustfmt::skip]
4403        let expected_dst = [
4404            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,    114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4405            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,    114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4406            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,      114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4407        ];
4408
4409        assert_eq!(
4410            cpu_dst.as_u8().unwrap().map().unwrap().as_slice(),
4411            &expected_dst
4412        );
4413    }
4414
4415    #[test]
4416    fn test_cpu_resize_planar_rgba() {
4417        let src = TensorDyn::image(4, 4, PixelFormat::Rgba, DType::U8, None).unwrap();
4418        #[rustfmt::skip]
4419        let src_image = [
4420                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4421                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4422                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4423                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4424        ];
4425        src.as_u8()
4426            .unwrap()
4427            .map()
4428            .unwrap()
4429            .as_mut_slice()
4430            .copy_from_slice(&src_image);
4431
4432        let cpu_dst = TensorDyn::image(5, 5, PixelFormat::PlanarRgba, DType::U8, None).unwrap();
4433        let mut cpu_converter = CPUProcessor::new();
4434
4435        let (result, _src, cpu_dst) = convert_img(
4436            &mut cpu_converter,
4437            src,
4438            cpu_dst,
4439            Rotation::None,
4440            Flip::None,
4441            Crop::new()
4442                .with_dst_rect(Some(Rect {
4443                    left: 1,
4444                    top: 1,
4445                    width: 4,
4446                    height: 4,
4447                }))
4448                .with_dst_color(Some([114, 114, 114, 255])),
4449        );
4450        result.unwrap();
4451
4452        #[rustfmt::skip]
4453        let expected_dst = [
4454            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,        114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4455            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,        114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4456            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,          114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4457            255, 255, 255, 255, 255,    255, 255, 255, 255, 255,    255, 0, 255, 0, 255,        255, 0, 255, 0, 255,      255, 0, 255, 0, 255,
4458        ];
4459
4460        assert_eq!(
4461            cpu_dst.as_u8().unwrap().map().unwrap().as_slice(),
4462            &expected_dst
4463        );
4464    }
4465
4466    #[test]
4467    #[cfg(target_os = "linux")]
4468    #[cfg(feature = "opengl")]
4469    fn test_opengl_resize_planar_rgb() {
4470        if !is_opengl_available() {
4471            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4472            return;
4473        }
4474
4475        if !is_dma_available() {
4476            eprintln!(
4477                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4478                function!()
4479            );
4480            return;
4481        }
4482
4483        let dst_width = 640;
4484        let dst_height = 640;
4485        let file = include_bytes!(concat!(
4486            env!("CARGO_MANIFEST_DIR"),
4487            "/../../testdata/test_image.jpg"
4488        ))
4489        .to_vec();
4490        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
4491
4492        let cpu_dst = TensorDyn::image(
4493            dst_width,
4494            dst_height,
4495            PixelFormat::PlanarRgb,
4496            DType::U8,
4497            None,
4498        )
4499        .unwrap();
4500        let mut cpu_converter = CPUProcessor::new();
4501        let (result, src, cpu_dst) = convert_img(
4502            &mut cpu_converter,
4503            src,
4504            cpu_dst,
4505            Rotation::None,
4506            Flip::None,
4507            Crop::no_crop(),
4508        );
4509        result.unwrap();
4510        let crop_letterbox = Crop::new()
4511            .with_dst_rect(Some(Rect {
4512                left: 102,
4513                top: 102,
4514                width: 440,
4515                height: 440,
4516            }))
4517            .with_dst_color(Some([114, 114, 114, 114]));
4518        let (result, src, cpu_dst) = convert_img(
4519            &mut cpu_converter,
4520            src,
4521            cpu_dst,
4522            Rotation::None,
4523            Flip::None,
4524            crop_letterbox,
4525        );
4526        result.unwrap();
4527
4528        let gl_dst = TensorDyn::image(
4529            dst_width,
4530            dst_height,
4531            PixelFormat::PlanarRgb,
4532            DType::U8,
4533            None,
4534        )
4535        .unwrap();
4536        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4537
4538        let (result, _src, gl_dst) = convert_img(
4539            &mut gl_converter,
4540            src,
4541            gl_dst,
4542            Rotation::None,
4543            Flip::None,
4544            crop_letterbox,
4545        );
4546        result.unwrap();
4547        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
4548    }
4549
4550    #[test]
4551    fn test_cpu_resize_nv16() {
4552        let file = include_bytes!(concat!(
4553            env!("CARGO_MANIFEST_DIR"),
4554            "/../../testdata/zidane.jpg"
4555        ))
4556        .to_vec();
4557        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
4558
4559        let cpu_nv16_dst = TensorDyn::image(640, 640, PixelFormat::Nv16, DType::U8, None).unwrap();
4560        let cpu_rgb_dst = TensorDyn::image(640, 640, PixelFormat::Rgb, DType::U8, None).unwrap();
4561        let mut cpu_converter = CPUProcessor::new();
4562        let crop = Crop::new()
4563            .with_dst_rect(Some(Rect {
4564                left: 20,
4565                top: 140,
4566                width: 600,
4567                height: 360,
4568            }))
4569            .with_dst_color(Some([255, 128, 0, 255]));
4570
4571        let (result, src, cpu_nv16_dst) = convert_img(
4572            &mut cpu_converter,
4573            src,
4574            cpu_nv16_dst,
4575            Rotation::None,
4576            Flip::None,
4577            crop,
4578        );
4579        result.unwrap();
4580
4581        let (result, _src, cpu_rgb_dst) = convert_img(
4582            &mut cpu_converter,
4583            src,
4584            cpu_rgb_dst,
4585            Rotation::None,
4586            Flip::None,
4587            crop,
4588        );
4589        result.unwrap();
4590        compare_images_convert_to_rgb(&cpu_nv16_dst, &cpu_rgb_dst, 0.99, function!());
4591    }
4592
4593    fn load_bytes_to_tensor(
4594        width: usize,
4595        height: usize,
4596        format: PixelFormat,
4597        memory: Option<TensorMemory>,
4598        bytes: &[u8],
4599    ) -> Result<TensorDyn, Error> {
4600        let src = TensorDyn::image(width, height, format, DType::U8, memory)?;
4601        src.as_u8()
4602            .unwrap()
4603            .map()?
4604            .as_mut_slice()
4605            .copy_from_slice(bytes);
4606        Ok(src)
4607    }
4608
4609    fn compare_images(img1: &TensorDyn, img2: &TensorDyn, threshold: f64, name: &str) {
4610        assert_eq!(img1.height(), img2.height(), "Heights differ");
4611        assert_eq!(img1.width(), img2.width(), "Widths differ");
4612        assert_eq!(
4613            img1.format().unwrap(),
4614            img2.format().unwrap(),
4615            "PixelFormat differ"
4616        );
4617        assert!(
4618            matches!(
4619                img1.format().unwrap(),
4620                PixelFormat::Rgb | PixelFormat::Rgba | PixelFormat::Grey | PixelFormat::PlanarRgb
4621            ),
4622            "format must be Rgb or Rgba for comparison"
4623        );
4624
4625        let image1 = match img1.format().unwrap() {
4626            PixelFormat::Rgb => image::RgbImage::from_vec(
4627                img1.width().unwrap() as u32,
4628                img1.height().unwrap() as u32,
4629                img1.as_u8().unwrap().map().unwrap().to_vec(),
4630            )
4631            .unwrap(),
4632            PixelFormat::Rgba => image::RgbaImage::from_vec(
4633                img1.width().unwrap() as u32,
4634                img1.height().unwrap() as u32,
4635                img1.as_u8().unwrap().map().unwrap().to_vec(),
4636            )
4637            .unwrap()
4638            .convert(),
4639            PixelFormat::Grey => image::GrayImage::from_vec(
4640                img1.width().unwrap() as u32,
4641                img1.height().unwrap() as u32,
4642                img1.as_u8().unwrap().map().unwrap().to_vec(),
4643            )
4644            .unwrap()
4645            .convert(),
4646            PixelFormat::PlanarRgb => image::GrayImage::from_vec(
4647                img1.width().unwrap() as u32,
4648                (img1.height().unwrap() * 3) as u32,
4649                img1.as_u8().unwrap().map().unwrap().to_vec(),
4650            )
4651            .unwrap()
4652            .convert(),
4653            _ => return,
4654        };
4655
4656        let image2 = match img2.format().unwrap() {
4657            PixelFormat::Rgb => image::RgbImage::from_vec(
4658                img2.width().unwrap() as u32,
4659                img2.height().unwrap() as u32,
4660                img2.as_u8().unwrap().map().unwrap().to_vec(),
4661            )
4662            .unwrap(),
4663            PixelFormat::Rgba => image::RgbaImage::from_vec(
4664                img2.width().unwrap() as u32,
4665                img2.height().unwrap() as u32,
4666                img2.as_u8().unwrap().map().unwrap().to_vec(),
4667            )
4668            .unwrap()
4669            .convert(),
4670            PixelFormat::Grey => image::GrayImage::from_vec(
4671                img2.width().unwrap() as u32,
4672                img2.height().unwrap() as u32,
4673                img2.as_u8().unwrap().map().unwrap().to_vec(),
4674            )
4675            .unwrap()
4676            .convert(),
4677            PixelFormat::PlanarRgb => image::GrayImage::from_vec(
4678                img2.width().unwrap() as u32,
4679                (img2.height().unwrap() * 3) as u32,
4680                img2.as_u8().unwrap().map().unwrap().to_vec(),
4681            )
4682            .unwrap()
4683            .convert(),
4684            _ => return,
4685        };
4686
4687        let similarity = image_compare::rgb_similarity_structure(
4688            &image_compare::Algorithm::RootMeanSquared,
4689            &image1,
4690            &image2,
4691        )
4692        .expect("Image Comparison failed");
4693        if similarity.score < threshold {
4694            // image1.save(format!("{name}_1.png"));
4695            // image2.save(format!("{name}_2.png"));
4696            similarity
4697                .image
4698                .to_color_map()
4699                .save(format!("{name}.png"))
4700                .unwrap();
4701            panic!(
4702                "{name}: converted image and target image have similarity score too low: {} < {}",
4703                similarity.score, threshold
4704            )
4705        }
4706    }
4707
4708    fn compare_images_convert_to_rgb(
4709        img1: &TensorDyn,
4710        img2: &TensorDyn,
4711        threshold: f64,
4712        name: &str,
4713    ) {
4714        assert_eq!(img1.height(), img2.height(), "Heights differ");
4715        assert_eq!(img1.width(), img2.width(), "Widths differ");
4716
4717        let mut img_rgb1 = TensorDyn::image(
4718            img1.width().unwrap(),
4719            img1.height().unwrap(),
4720            PixelFormat::Rgb,
4721            DType::U8,
4722            Some(TensorMemory::Mem),
4723        )
4724        .unwrap();
4725        let mut img_rgb2 = TensorDyn::image(
4726            img1.width().unwrap(),
4727            img1.height().unwrap(),
4728            PixelFormat::Rgb,
4729            DType::U8,
4730            Some(TensorMemory::Mem),
4731        )
4732        .unwrap();
4733        let mut __cv = CPUProcessor::default();
4734        let r1 = __cv.convert(
4735            img1,
4736            &mut img_rgb1,
4737            crate::Rotation::None,
4738            crate::Flip::None,
4739            crate::Crop::default(),
4740        );
4741        let r2 = __cv.convert(
4742            img2,
4743            &mut img_rgb2,
4744            crate::Rotation::None,
4745            crate::Flip::None,
4746            crate::Crop::default(),
4747        );
4748        if r1.is_err() || r2.is_err() {
4749            // Fallback: compare raw bytes as greyscale strip
4750            let w = img1.width().unwrap() as u32;
4751            let data1 = img1.as_u8().unwrap().map().unwrap().to_vec();
4752            let data2 = img2.as_u8().unwrap().map().unwrap().to_vec();
4753            let h1 = (data1.len() as u32) / w;
4754            let h2 = (data2.len() as u32) / w;
4755            let g1 = image::GrayImage::from_vec(w, h1, data1).unwrap();
4756            let g2 = image::GrayImage::from_vec(w, h2, data2).unwrap();
4757            let similarity = image_compare::gray_similarity_structure(
4758                &image_compare::Algorithm::RootMeanSquared,
4759                &g1,
4760                &g2,
4761            )
4762            .expect("Image Comparison failed");
4763            if similarity.score < threshold {
4764                panic!(
4765                    "{name}: converted image and target image have similarity score too low: {} < {}",
4766                    similarity.score, threshold
4767                )
4768            }
4769            return;
4770        }
4771
4772        let image1 = image::RgbImage::from_vec(
4773            img_rgb1.width().unwrap() as u32,
4774            img_rgb1.height().unwrap() as u32,
4775            img_rgb1.as_u8().unwrap().map().unwrap().to_vec(),
4776        )
4777        .unwrap();
4778
4779        let image2 = image::RgbImage::from_vec(
4780            img_rgb2.width().unwrap() as u32,
4781            img_rgb2.height().unwrap() as u32,
4782            img_rgb2.as_u8().unwrap().map().unwrap().to_vec(),
4783        )
4784        .unwrap();
4785
4786        let similarity = image_compare::rgb_similarity_structure(
4787            &image_compare::Algorithm::RootMeanSquared,
4788            &image1,
4789            &image2,
4790        )
4791        .expect("Image Comparison failed");
4792        if similarity.score < threshold {
4793            // image1.save(format!("{name}_1.png"));
4794            // image2.save(format!("{name}_2.png"));
4795            similarity
4796                .image
4797                .to_color_map()
4798                .save(format!("{name}.png"))
4799                .unwrap();
4800            panic!(
4801                "{name}: converted image and target image have similarity score too low: {} < {}",
4802                similarity.score, threshold
4803            )
4804        }
4805    }
4806
4807    // =========================================================================
4808    // PixelFormat::Nv12 Format Tests
4809    // =========================================================================
4810
4811    #[test]
4812    fn test_nv12_image_creation() {
4813        let width = 640;
4814        let height = 480;
4815        let img = TensorDyn::image(width, height, PixelFormat::Nv12, DType::U8, None).unwrap();
4816
4817        assert_eq!(img.width(), Some(width));
4818        assert_eq!(img.height(), Some(height));
4819        assert_eq!(img.format().unwrap(), PixelFormat::Nv12);
4820        // PixelFormat::Nv12 uses shape [H*3/2, W] to store Y plane + UV plane
4821        assert_eq!(img.as_u8().unwrap().shape(), &[height * 3 / 2, width]);
4822    }
4823
4824    #[test]
4825    fn test_nv12_channels() {
4826        let img = TensorDyn::image(640, 480, PixelFormat::Nv12, DType::U8, None).unwrap();
4827        // PixelFormat::Nv12.channels() returns 1 (luma plane)
4828        assert_eq!(img.format().unwrap().channels(), 1);
4829    }
4830
4831    // =========================================================================
4832    // Tensor Format Metadata Tests
4833    // =========================================================================
4834
4835    #[test]
4836    fn test_tensor_set_format_planar() {
4837        let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
4838        tensor.set_format(PixelFormat::PlanarRgb).unwrap();
4839        assert_eq!(tensor.format(), Some(PixelFormat::PlanarRgb));
4840        assert_eq!(tensor.width(), Some(640));
4841        assert_eq!(tensor.height(), Some(480));
4842    }
4843
4844    #[test]
4845    fn test_tensor_set_format_interleaved() {
4846        let mut tensor = Tensor::<u8>::new(&[480, 640, 4], None, None).unwrap();
4847        tensor.set_format(PixelFormat::Rgba).unwrap();
4848        assert_eq!(tensor.format(), Some(PixelFormat::Rgba));
4849        assert_eq!(tensor.width(), Some(640));
4850        assert_eq!(tensor.height(), Some(480));
4851    }
4852
4853    #[test]
4854    fn test_tensordyn_image_rgb() {
4855        let img = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::U8, None).unwrap();
4856        assert_eq!(img.width(), Some(640));
4857        assert_eq!(img.height(), Some(480));
4858        assert_eq!(img.format(), Some(PixelFormat::Rgb));
4859    }
4860
4861    #[test]
4862    fn test_tensordyn_image_planar_rgb() {
4863        let img = TensorDyn::image(640, 480, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
4864        assert_eq!(img.width(), Some(640));
4865        assert_eq!(img.height(), Some(480));
4866        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
4867    }
4868
4869    #[test]
4870    fn test_rgb_int8_format() {
4871        // Int8 variant: same PixelFormat::Rgb but with DType::I8
4872        let img = TensorDyn::image(
4873            1280,
4874            720,
4875            PixelFormat::Rgb,
4876            DType::I8,
4877            Some(TensorMemory::Mem),
4878        )
4879        .unwrap();
4880        assert_eq!(img.width(), Some(1280));
4881        assert_eq!(img.height(), Some(720));
4882        assert_eq!(img.format(), Some(PixelFormat::Rgb));
4883        assert_eq!(img.dtype(), DType::I8);
4884    }
4885
4886    #[test]
4887    fn test_planar_rgb_int8_format() {
4888        let img = TensorDyn::image(
4889            1280,
4890            720,
4891            PixelFormat::PlanarRgb,
4892            DType::I8,
4893            Some(TensorMemory::Mem),
4894        )
4895        .unwrap();
4896        assert_eq!(img.width(), Some(1280));
4897        assert_eq!(img.height(), Some(720));
4898        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
4899        assert_eq!(img.dtype(), DType::I8);
4900    }
4901
4902    #[test]
4903    fn test_rgb_from_tensor() {
4904        let mut tensor = Tensor::<u8>::new(&[720, 1280, 3], None, None).unwrap();
4905        tensor.set_format(PixelFormat::Rgb).unwrap();
4906        let img = TensorDyn::from(tensor);
4907        assert_eq!(img.width(), Some(1280));
4908        assert_eq!(img.height(), Some(720));
4909        assert_eq!(img.format(), Some(PixelFormat::Rgb));
4910    }
4911
4912    #[test]
4913    fn test_planar_rgb_from_tensor() {
4914        let mut tensor = Tensor::<u8>::new(&[3, 720, 1280], None, None).unwrap();
4915        tensor.set_format(PixelFormat::PlanarRgb).unwrap();
4916        let img = TensorDyn::from(tensor);
4917        assert_eq!(img.width(), Some(1280));
4918        assert_eq!(img.height(), Some(720));
4919        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
4920    }
4921
4922    #[test]
4923    fn test_dtype_determines_int8() {
4924        // DType::I8 indicates int8 data
4925        let u8_img = TensorDyn::image(64, 64, PixelFormat::Rgb, DType::U8, None).unwrap();
4926        let i8_img = TensorDyn::image(64, 64, PixelFormat::Rgb, DType::I8, None).unwrap();
4927        assert_eq!(u8_img.dtype(), DType::U8);
4928        assert_eq!(i8_img.dtype(), DType::I8);
4929    }
4930
4931    #[test]
4932    fn test_pixel_layout_packed_vs_planar() {
4933        // Packed vs planar layout classification
4934        assert_eq!(PixelFormat::Rgb.layout(), PixelLayout::Packed);
4935        assert_eq!(PixelFormat::Rgba.layout(), PixelLayout::Packed);
4936        assert_eq!(PixelFormat::PlanarRgb.layout(), PixelLayout::Planar);
4937        assert_eq!(PixelFormat::Nv12.layout(), PixelLayout::SemiPlanar);
4938    }
4939
4940    /// Integration test that exercises the PBO-to-PBO convert path.
4941    /// Uses ImageProcessor::create_image() to allocate PBO-backed tensors,
4942    /// then converts between them. Skipped when GL is unavailable or the
4943    /// backend is not PBO (e.g. DMA-buf systems).
4944    #[cfg(target_os = "linux")]
4945    #[cfg(feature = "opengl")]
4946    #[test]
4947    fn test_convert_pbo_to_pbo() {
4948        let mut converter = ImageProcessor::new().unwrap();
4949
4950        // Skip if GL is not available or backend is not PBO
4951        let is_pbo = converter
4952            .opengl
4953            .as_ref()
4954            .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
4955        if !is_pbo {
4956            eprintln!("Skipping test_convert_pbo_to_pbo: backend is not PBO");
4957            return;
4958        }
4959
4960        let src_w = 640;
4961        let src_h = 480;
4962        let dst_w = 320;
4963        let dst_h = 240;
4964
4965        // Create PBO-backed source image
4966        let pbo_src = converter
4967            .create_image(src_w, src_h, PixelFormat::Rgba, DType::U8, None)
4968            .unwrap();
4969        assert_eq!(
4970            pbo_src.as_u8().unwrap().memory(),
4971            TensorMemory::Pbo,
4972            "create_image should produce a PBO tensor"
4973        );
4974
4975        // Fill source PBO with test pattern: load JPEG then convert Mem→PBO
4976        let file = include_bytes!(concat!(
4977            env!("CARGO_MANIFEST_DIR"),
4978            "/../../testdata/zidane.jpg"
4979        ))
4980        .to_vec();
4981        let jpeg_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
4982
4983        // Resize JPEG into a Mem temp of the right size, then copy into PBO
4984        let mem_src = TensorDyn::image(
4985            src_w,
4986            src_h,
4987            PixelFormat::Rgba,
4988            DType::U8,
4989            Some(TensorMemory::Mem),
4990        )
4991        .unwrap();
4992        let (result, _jpeg_src, mem_src) = convert_img(
4993            &mut CPUProcessor::new(),
4994            jpeg_src,
4995            mem_src,
4996            Rotation::None,
4997            Flip::None,
4998            Crop::no_crop(),
4999        );
5000        result.unwrap();
5001
5002        // Copy pixel data into the PBO source by mapping it
5003        {
5004            let src_data = mem_src.as_u8().unwrap().map().unwrap();
5005            let mut pbo_map = pbo_src.as_u8().unwrap().map().unwrap();
5006            pbo_map.copy_from_slice(&src_data);
5007        }
5008
5009        // Create PBO-backed destination image
5010        let pbo_dst = converter
5011            .create_image(dst_w, dst_h, PixelFormat::Rgba, DType::U8, None)
5012            .unwrap();
5013        assert_eq!(pbo_dst.as_u8().unwrap().memory(), TensorMemory::Pbo);
5014
5015        // Convert PBO→PBO (this exercises convert_pbo_to_pbo)
5016        let mut pbo_dst = pbo_dst;
5017        let result = converter.convert(
5018            &pbo_src,
5019            &mut pbo_dst,
5020            Rotation::None,
5021            Flip::None,
5022            Crop::no_crop(),
5023        );
5024        result.unwrap();
5025
5026        // Verify: compare with CPU-only conversion of the same input
5027        let cpu_dst = TensorDyn::image(
5028            dst_w,
5029            dst_h,
5030            PixelFormat::Rgba,
5031            DType::U8,
5032            Some(TensorMemory::Mem),
5033        )
5034        .unwrap();
5035        let (result, _mem_src, cpu_dst) = convert_img(
5036            &mut CPUProcessor::new(),
5037            mem_src,
5038            cpu_dst,
5039            Rotation::None,
5040            Flip::None,
5041            Crop::no_crop(),
5042        );
5043        result.unwrap();
5044
5045        let pbo_dst_img = {
5046            let mut __t = pbo_dst.into_u8().unwrap();
5047            __t.set_format(PixelFormat::Rgba).unwrap();
5048            TensorDyn::from(__t)
5049        };
5050        compare_images(&pbo_dst_img, &cpu_dst, 0.95, function!());
5051        log::info!("test_convert_pbo_to_pbo: PASS — PBO-to-PBO convert matches CPU reference");
5052    }
5053
5054    #[test]
5055    fn test_image_bgra() {
5056        let img = TensorDyn::image(
5057            640,
5058            480,
5059            PixelFormat::Bgra,
5060            DType::U8,
5061            Some(edgefirst_tensor::TensorMemory::Mem),
5062        )
5063        .unwrap();
5064        assert_eq!(img.width(), Some(640));
5065        assert_eq!(img.height(), Some(480));
5066        assert_eq!(img.format().unwrap().channels(), 4);
5067        assert_eq!(img.format().unwrap(), PixelFormat::Bgra);
5068    }
5069
5070    // ========================================================================
5071    // Tests for EDGEFIRST_FORCE_BACKEND env var
5072    // ========================================================================
5073
5074    #[test]
5075    fn test_force_backend_cpu() {
5076        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5077        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5078        let result = ImageProcessor::new();
5079        match original {
5080            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5081            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5082        }
5083        let converter = result.unwrap();
5084        assert!(converter.cpu.is_some());
5085        assert_eq!(converter.forced_backend, Some(ForcedBackend::Cpu));
5086    }
5087
5088    #[test]
5089    fn test_force_backend_invalid() {
5090        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5091        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "invalid") };
5092        let result = ImageProcessor::new();
5093        match original {
5094            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5095            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5096        }
5097        assert!(
5098            matches!(&result, Err(Error::ForcedBackendUnavailable(s)) if s.contains("unknown")),
5099            "invalid backend value should return ForcedBackendUnavailable error: {result:?}"
5100        );
5101    }
5102
5103    #[test]
5104    fn test_force_backend_unset() {
5105        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5106        unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") };
5107        let result = ImageProcessor::new();
5108        match original {
5109            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5110            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5111        }
5112        let converter = result.unwrap();
5113        assert!(converter.forced_backend.is_none());
5114    }
5115
5116    // ========================================================================
5117    // Tests for hybrid mask path error handling
5118    // ========================================================================
5119
5120    #[test]
5121    fn test_draw_masks_proto_no_cpu_returns_error() {
5122        // Disable CPU backend to trigger the error path
5123        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
5124        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
5125        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
5126        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
5127        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
5128        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
5129
5130        let result = ImageProcessor::new();
5131
5132        match original_cpu {
5133            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
5134            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
5135        }
5136        match original_gl {
5137            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
5138            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
5139        }
5140        match original_g2d {
5141            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
5142            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
5143        }
5144
5145        let mut converter = result.unwrap();
5146        assert!(converter.cpu.is_none(), "CPU should be disabled");
5147
5148        let dst = TensorDyn::image(
5149            640,
5150            480,
5151            PixelFormat::Rgba,
5152            DType::U8,
5153            Some(TensorMemory::Mem),
5154        )
5155        .unwrap();
5156        let mut dst_dyn = dst;
5157        let det = [DetectBox {
5158            bbox: edgefirst_decoder::BoundingBox {
5159                xmin: 0.1,
5160                ymin: 0.1,
5161                xmax: 0.5,
5162                ymax: 0.5,
5163            },
5164            score: 0.9,
5165            label: 0,
5166        }];
5167        let proto_data = ProtoData {
5168            mask_coefficients: vec![vec![0.5; 4]],
5169            protos: edgefirst_decoder::ProtoTensor::Float(ndarray::Array3::<f32>::zeros((8, 8, 4))),
5170        };
5171        let result = converter.draw_masks_proto(&mut dst_dyn, &det, &proto_data);
5172        assert!(
5173            matches!(&result, Err(Error::Internal(s)) if s.contains("CPU backend")),
5174            "draw_masks_proto without CPU should return Internal error: {result:?}"
5175        );
5176    }
5177
5178    #[test]
5179    fn test_draw_masks_proto_cpu_fallback_works() {
5180        // Force CPU-only backend to ensure the CPU fallback path executes
5181        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5182        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5183        let result = ImageProcessor::new();
5184        match original {
5185            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5186            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5187        }
5188
5189        let mut converter = result.unwrap();
5190        assert!(converter.cpu.is_some());
5191
5192        let dst = TensorDyn::image(
5193            64,
5194            64,
5195            PixelFormat::Rgba,
5196            DType::U8,
5197            Some(TensorMemory::Mem),
5198        )
5199        .unwrap();
5200        let mut dst_dyn = dst;
5201        let det = [DetectBox {
5202            bbox: edgefirst_decoder::BoundingBox {
5203                xmin: 0.1,
5204                ymin: 0.1,
5205                xmax: 0.5,
5206                ymax: 0.5,
5207            },
5208            score: 0.9,
5209            label: 0,
5210        }];
5211        let proto_data = ProtoData {
5212            mask_coefficients: vec![vec![0.5; 4]],
5213            protos: edgefirst_decoder::ProtoTensor::Float(ndarray::Array3::<f32>::zeros((8, 8, 4))),
5214        };
5215        let result = converter.draw_masks_proto(&mut dst_dyn, &det, &proto_data);
5216        assert!(result.is_ok(), "CPU fallback path should work: {result:?}");
5217    }
5218
5219    #[test]
5220    fn test_set_format_then_cpu_convert() {
5221        // Force CPU backend (save/restore to avoid leaking into other tests)
5222        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5223        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5224        let mut processor = ImageProcessor::new().unwrap();
5225        match original {
5226            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5227            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5228        }
5229
5230        // Load a source image
5231        let image = include_bytes!(concat!(
5232            env!("CARGO_MANIFEST_DIR"),
5233            "/../../testdata/zidane.jpg"
5234        ));
5235        let src = load_image(image, Some(PixelFormat::Rgba), None).unwrap();
5236
5237        // Create a raw tensor, then attach format — simulating the from_fd workflow
5238        let mut dst =
5239            TensorDyn::new(&[640, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
5240        dst.set_format(PixelFormat::Rgb).unwrap();
5241
5242        // Convert should work with the set_format-annotated tensor
5243        processor
5244            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())
5245            .unwrap();
5246
5247        // Verify format survived conversion
5248        assert_eq!(dst.format(), Some(PixelFormat::Rgb));
5249        assert_eq!(dst.width(), Some(640));
5250        assert_eq!(dst.height(), Some(640));
5251    }
5252}