Skip to main content

edgefirst_image/
lib.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4/*!
5
6## EdgeFirst HAL - Image Converter
7
8The `edgefirst_image` crate is part of the EdgeFirst Hardware Abstraction
9Layer (HAL) and provides functionality for converting images between
10different formats and sizes.  The crate is designed to work with hardware
11acceleration when available, but also provides a CPU-based fallback for
12environments where hardware acceleration is not present or not suitable.
13
14The main features of the `edgefirst_image` crate include:
15- Support for various image formats, including YUYV, RGB, RGBA, and GREY.
16- Support for source crop, destination crop, rotation, and flipping.
17- Image conversion using hardware acceleration (G2D, OpenGL) when available.
18- CPU-based image conversion as a fallback option.
19
20The crate uses [`TensorDyn`] from `edgefirst_tensor` to represent images,
21with [`PixelFormat`] metadata describing the pixel layout. The
22[`ImageProcessor`] struct manages the conversion process, selecting
23the appropriate conversion method based on the available hardware.
24
25## Examples
26
27```rust
28# use edgefirst_image::{ImageProcessor, Rotation, Flip, Crop, ImageProcessorTrait, load_image};
29# use edgefirst_tensor::{PixelFormat, DType, TensorDyn};
30# fn main() -> Result<(), edgefirst_image::Error> {
31let image = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
32let src = load_image(image, Some(PixelFormat::Rgba), None)?;
33let mut converter = ImageProcessor::new()?;
34let mut dst = converter.create_image(640, 480, PixelFormat::Rgb, DType::U8, None)?;
35converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())?;
36# Ok(())
37# }
38```
39
40## Environment Variables
41The behavior of the `edgefirst_image::ImageProcessor` struct can be influenced by the
42following environment variables:
43- `EDGEFIRST_FORCE_BACKEND`: When set to `cpu`, `g2d`, or `opengl` (case-insensitive),
44  only that single backend is initialized and no fallback chain is used. If the
45  forced backend fails to initialize, an error is returned immediately. This is
46  useful for benchmarking individual backends in isolation. When this variable is
47  set, the `EDGEFIRST_DISABLE_*` variables are ignored.
48- `EDGEFIRST_DISABLE_GL`: If set to `1`, disables the use of OpenGL for image
49  conversion, forcing the use of CPU or other available hardware methods.
50- `EDGEFIRST_DISABLE_G2D`: If set to `1`, disables the use of G2D for image
51  conversion, forcing the use of CPU or other available hardware methods.
52- `EDGEFIRST_DISABLE_CPU`: If set to `1`, disables the use of CPU for image
53  conversion, forcing the use of hardware acceleration methods. If no hardware
54  acceleration methods are available, an error will be returned when attempting
55  to create an `ImageProcessor`.
56
57Additionally the TensorMemory used by default allocations can be controlled using the
58`EDGEFIRST_TENSOR_FORCE_MEM` environment variable. If set to `1`, default tensor memory
59uses system memory. This will disable the use of specialized memory regions for tensors
60and hardware acceleration. However, this will increase the performance of the CPU converter.
61*/
62#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
63
64use edgefirst_decoder::{DetectBox, ProtoData, Segmentation};
65use edgefirst_tensor::{
66    DType, PixelFormat, PixelLayout, Tensor, TensorDyn, TensorMemory, TensorTrait as _,
67};
68use enum_dispatch::enum_dispatch;
69use std::{fmt::Display, time::Instant};
70use zune_jpeg::{
71    zune_core::{colorspace::ColorSpace, options::DecoderOptions},
72    JpegDecoder,
73};
74use zune_png::PngDecoder;
75
76pub use cpu::CPUProcessor;
77pub use error::{Error, Result};
78#[cfg(target_os = "linux")]
79pub use g2d::G2DProcessor;
80#[cfg(target_os = "linux")]
81#[cfg(feature = "opengl")]
82pub use opengl_headless::GLProcessorThreaded;
83#[cfg(target_os = "linux")]
84#[cfg(feature = "opengl")]
85pub use opengl_headless::Int8InterpolationMode;
86#[cfg(target_os = "linux")]
87#[cfg(feature = "opengl")]
88pub use opengl_headless::{probe_egl_displays, EglDisplayInfo, EglDisplayKind};
89
90mod cpu;
91mod error;
92mod g2d;
93#[path = "gl/mod.rs"]
94mod opengl_headless;
95
96// Use `edgefirst_tensor::PixelFormat` variants (Rgb, Rgba, Grey, etc.) and
97// `TensorDyn` / `Tensor<u8>` with `.format()` metadata instead.
98
99/// Flips the image data, then rotates it. Returns a new `TensorDyn`.
100fn rotate_flip_to_dyn(
101    src: &Tensor<u8>,
102    src_fmt: PixelFormat,
103    rotation: Rotation,
104    flip: Flip,
105    memory: Option<TensorMemory>,
106) -> Result<TensorDyn, Error> {
107    let src_w = src.width().unwrap();
108    let src_h = src.height().unwrap();
109    let channels = src_fmt.channels();
110
111    let (dst_w, dst_h) = match rotation {
112        Rotation::None | Rotation::Rotate180 => (src_w, src_h),
113        Rotation::Clockwise90 | Rotation::CounterClockwise90 => (src_h, src_w),
114    };
115
116    let dst = Tensor::<u8>::image(dst_w, dst_h, src_fmt, memory)?;
117    let src_map = src.map()?;
118    let mut dst_map = dst.map()?;
119
120    CPUProcessor::flip_rotate_ndarray_pf(
121        &src_map,
122        &mut dst_map,
123        dst_w,
124        dst_h,
125        channels,
126        rotation,
127        flip,
128    )?;
129    drop(dst_map);
130    drop(src_map);
131
132    Ok(TensorDyn::from(dst))
133}
134
135#[derive(Debug, Clone, Copy, PartialEq, Eq)]
136pub enum Rotation {
137    None = 0,
138    Clockwise90 = 1,
139    Rotate180 = 2,
140    CounterClockwise90 = 3,
141}
142impl Rotation {
143    /// Creates a Rotation enum from an angle in degrees. The angle must be a
144    /// multiple of 90.
145    ///
146    /// # Panics
147    /// Panics if the angle is not a multiple of 90.
148    ///
149    /// # Examples
150    /// ```rust
151    /// # use edgefirst_image::Rotation;
152    /// let rotation = Rotation::from_degrees_clockwise(270);
153    /// assert_eq!(rotation, Rotation::CounterClockwise90);
154    /// ```
155    pub fn from_degrees_clockwise(angle: usize) -> Rotation {
156        match angle.rem_euclid(360) {
157            0 => Rotation::None,
158            90 => Rotation::Clockwise90,
159            180 => Rotation::Rotate180,
160            270 => Rotation::CounterClockwise90,
161            _ => panic!("rotation angle is not a multiple of 90"),
162        }
163    }
164}
165
166#[derive(Debug, Clone, Copy, PartialEq, Eq)]
167pub enum Flip {
168    None = 0,
169    Vertical = 1,
170    Horizontal = 2,
171}
172
173/// Options for mask overlay rendering.
174///
175/// Controls how segmentation masks are composited onto the destination image:
176/// - `background`: when set, the background image is drawn first and masks
177///   are composited over it (result written to `dst`). When `None`, masks
178///   are composited directly over `dst`'s existing content.
179/// - `opacity`: scales the alpha of rendered mask colors. `1.0` (default)
180///   preserves the class color's alpha unchanged; `0.5` makes masks
181///   semi-transparent.
182#[derive(Debug, Clone, Copy)]
183pub struct MaskOverlay<'a> {
184    pub background: Option<&'a TensorDyn>,
185    pub opacity: f32,
186}
187
188impl Default for MaskOverlay<'_> {
189    fn default() -> Self {
190        Self {
191            background: None,
192            opacity: 1.0,
193        }
194    }
195}
196
197impl<'a> MaskOverlay<'a> {
198    pub fn new() -> Self {
199        Self::default()
200    }
201
202    pub fn with_background(mut self, bg: &'a TensorDyn) -> Self {
203        self.background = Some(bg);
204        self
205    }
206
207    pub fn with_opacity(mut self, opacity: f32) -> Self {
208        self.opacity = opacity.clamp(0.0, 1.0);
209        self
210    }
211
212    /// Blit background into dst (if set) and return an overlay with
213    /// background cleared so backends don't need to handle it.
214    fn apply_background(&self, dst: &mut TensorDyn) -> Result<MaskOverlay<'static>> {
215        use edgefirst_tensor::TensorMapTrait;
216        if let Some(bg) = self.background {
217            if bg.shape() != dst.shape() {
218                return Err(Error::InvalidShape(
219                    "background shape does not match dst".into(),
220                ));
221            }
222            if bg.format() != dst.format() {
223                return Err(Error::InvalidShape(
224                    "background pixel format does not match dst".into(),
225                ));
226            }
227            let bg_u8 = bg.as_u8().ok_or(Error::NotAnImage)?;
228            let dst_u8 = dst.as_u8_mut().ok_or(Error::NotAnImage)?;
229            let bg_map = bg_u8.map()?;
230            let mut dst_map = dst_u8.map()?;
231            let bg_slice = bg_map.as_slice();
232            let dst_slice = dst_map.as_mut_slice();
233            if bg_slice.len() != dst_slice.len() {
234                return Err(Error::InvalidShape(
235                    "background buffer size does not match dst".into(),
236                ));
237            }
238            dst_slice.copy_from_slice(bg_slice);
239        }
240        Ok(MaskOverlay {
241            background: None,
242            opacity: self.opacity.clamp(0.0, 1.0),
243        })
244    }
245}
246
247#[derive(Debug, Clone, Copy, PartialEq, Eq)]
248pub struct Crop {
249    pub src_rect: Option<Rect>,
250    pub dst_rect: Option<Rect>,
251    pub dst_color: Option<[u8; 4]>,
252}
253
254impl Default for Crop {
255    fn default() -> Self {
256        Crop::new()
257    }
258}
259impl Crop {
260    // Creates a new Crop with default values (no cropping).
261    pub fn new() -> Self {
262        Crop {
263            src_rect: None,
264            dst_rect: None,
265            dst_color: None,
266        }
267    }
268
269    // Sets the source rectangle for cropping.
270    pub fn with_src_rect(mut self, src_rect: Option<Rect>) -> Self {
271        self.src_rect = src_rect;
272        self
273    }
274
275    // Sets the destination rectangle for cropping.
276    pub fn with_dst_rect(mut self, dst_rect: Option<Rect>) -> Self {
277        self.dst_rect = dst_rect;
278        self
279    }
280
281    // Sets the destination color for areas outside the cropped region.
282    pub fn with_dst_color(mut self, dst_color: Option<[u8; 4]>) -> Self {
283        self.dst_color = dst_color;
284        self
285    }
286
287    // Creates a new Crop with no cropping.
288    pub fn no_crop() -> Self {
289        Crop::new()
290    }
291
292    /// Validate crop rectangles against explicit dimensions.
293    pub(crate) fn check_crop_dims(
294        &self,
295        src_w: usize,
296        src_h: usize,
297        dst_w: usize,
298        dst_h: usize,
299    ) -> Result<(), Error> {
300        let src_ok = self
301            .src_rect
302            .is_none_or(|r| r.left + r.width <= src_w && r.top + r.height <= src_h);
303        let dst_ok = self
304            .dst_rect
305            .is_none_or(|r| r.left + r.width <= dst_w && r.top + r.height <= dst_h);
306        match (src_ok, dst_ok) {
307            (true, true) => Ok(()),
308            (true, false) => Err(Error::CropInvalid(format!(
309                "Dest crop invalid: {:?}",
310                self.dst_rect
311            ))),
312            (false, true) => Err(Error::CropInvalid(format!(
313                "Src crop invalid: {:?}",
314                self.src_rect
315            ))),
316            (false, false) => Err(Error::CropInvalid(format!(
317                "Dest and Src crop invalid: {:?} {:?}",
318                self.dst_rect, self.src_rect
319            ))),
320        }
321    }
322
323    /// Validate crop rectangles against TensorDyn source and destination.
324    pub fn check_crop_dyn(
325        &self,
326        src: &edgefirst_tensor::TensorDyn,
327        dst: &edgefirst_tensor::TensorDyn,
328    ) -> Result<(), Error> {
329        self.check_crop_dims(
330            src.width().unwrap_or(0),
331            src.height().unwrap_or(0),
332            dst.width().unwrap_or(0),
333            dst.height().unwrap_or(0),
334        )
335    }
336}
337
338#[derive(Debug, Clone, Copy, PartialEq, Eq)]
339pub struct Rect {
340    pub left: usize,
341    pub top: usize,
342    pub width: usize,
343    pub height: usize,
344}
345
346impl Rect {
347    // Creates a new Rect with the specified left, top, width, and height.
348    pub fn new(left: usize, top: usize, width: usize, height: usize) -> Self {
349        Self {
350            left,
351            top,
352            width,
353            height,
354        }
355    }
356
357    // Checks if the rectangle is valid for the given TensorDyn image.
358    pub fn check_rect_dyn(&self, image: &TensorDyn) -> bool {
359        let w = image.width().unwrap_or(0);
360        let h = image.height().unwrap_or(0);
361        self.left + self.width <= w && self.top + self.height <= h
362    }
363}
364
365#[enum_dispatch(ImageProcessor)]
366pub trait ImageProcessorTrait {
367    /// Converts the source image to the destination image format and size. The
368    /// image is cropped first, then flipped, then rotated
369    ///
370    /// # Arguments
371    ///
372    /// * `dst` - The destination image to be converted to.
373    /// * `src` - The source image to convert from.
374    /// * `rotation` - The rotation to apply to the destination image.
375    /// * `flip` - Flips the image
376    /// * `crop` - An optional rectangle specifying the area to crop from the
377    ///   source image
378    ///
379    /// # Returns
380    ///
381    /// A `Result` indicating success or failure of the conversion.
382    fn convert(
383        &mut self,
384        src: &TensorDyn,
385        dst: &mut TensorDyn,
386        rotation: Rotation,
387        flip: Flip,
388        crop: Crop,
389    ) -> Result<()>;
390
391    /// Draw pre-decoded detection boxes and segmentation masks onto `dst`.
392    ///
393    /// Supports two segmentation modes based on the mask channel count:
394    /// - **Instance segmentation** (`C=1`): one `Segmentation` per detection,
395    ///   `segmentation` and `detect` are zipped.
396    /// - **Semantic segmentation** (`C>1`): a single `Segmentation` covering
397    ///   all classes; only the first element is used.
398    ///
399    /// # Format requirements
400    ///
401    /// - CPU backend: `dst` must be `RGBA` or `RGB`.
402    /// - OpenGL backend: `dst` must be `RGBA`, `BGRA`, or `RGB`.
403    /// - G2D backend: not implemented (returns `NotImplemented`).
404    ///
405    /// An empty `segmentation` slice is valid — only bounding boxes are drawn.
406    ///
407    /// `overlay` controls compositing: `background` replaces dst's base
408    /// content; `opacity` scales mask alpha. Use `MaskOverlay::default()`
409    /// for backward-compatible behaviour.
410    fn draw_decoded_masks(
411        &mut self,
412        dst: &mut TensorDyn,
413        detect: &[DetectBox],
414        segmentation: &[Segmentation],
415        overlay: MaskOverlay<'_>,
416    ) -> Result<()>;
417
418    /// Draw masks from proto data onto image (fused decode+draw).
419    ///
420    /// For YOLO segmentation models, this avoids materializing intermediate
421    /// `Array3<u8>` masks. The `ProtoData` contains mask coefficients and the
422    /// prototype tensor; the renderer computes `mask_coeff @ protos` directly
423    /// at the output resolution using bilinear sampling.
424    ///
425    /// `detect` and `proto_data.mask_coefficients` must have the same length
426    /// (enforced by zip — excess entries are silently ignored). An empty
427    /// `detect` slice is valid and returns immediately after drawing nothing.
428    ///
429    /// # Format requirements
430    ///
431    /// Same as [`draw_decoded_masks`](Self::draw_decoded_masks). G2D returns `NotImplemented`.
432    ///
433    /// `overlay` controls compositing — see [`draw_decoded_masks`](Self::draw_decoded_masks).
434    fn draw_proto_masks(
435        &mut self,
436        dst: &mut TensorDyn,
437        detect: &[DetectBox],
438        proto_data: &ProtoData,
439        overlay: MaskOverlay<'_>,
440    ) -> Result<()>;
441
442    /// Sets the colors used for rendering segmentation masks. Up to 20 colors
443    /// can be set.
444    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()>;
445}
446
447/// Configuration for [`ImageProcessor`] construction.
448///
449/// Use with [`ImageProcessor::with_config`] to override the default EGL
450/// display auto-detection and backend selection. The default configuration
451/// preserves the existing auto-detection behaviour.
452#[derive(Debug, Clone, Default)]
453pub struct ImageProcessorConfig {
454    /// Force OpenGL to use this EGL display type instead of auto-detecting.
455    ///
456    /// When `None`, the processor probes displays in priority order: GBM,
457    /// PlatformDevice, Default. Use [`probe_egl_displays`] to discover
458    /// which displays are available on the current system.
459    ///
460    /// Ignored when `EDGEFIRST_DISABLE_GL=1` is set.
461    #[cfg(target_os = "linux")]
462    #[cfg(feature = "opengl")]
463    pub egl_display: Option<EglDisplayKind>,
464
465    /// Preferred compute backend.
466    ///
467    /// When set to a specific backend (not [`ComputeBackend::Auto`]), the
468    /// processor initializes that backend with no fallback — returns an error if the conversion is not supported.
469    /// This takes precedence over `EDGEFIRST_FORCE_BACKEND` and the
470    /// `EDGEFIRST_DISABLE_*` environment variables.
471    ///
472    /// - [`ComputeBackend::OpenGl`]: init OpenGL + CPU, skip G2D
473    /// - [`ComputeBackend::G2d`]: init G2D + CPU, skip OpenGL
474    /// - [`ComputeBackend::Cpu`]: init CPU only
475    /// - [`ComputeBackend::Auto`]: existing env-var-driven selection
476    pub backend: ComputeBackend,
477}
478
479/// Compute backend selection for [`ImageProcessor`].
480///
481/// Use with [`ImageProcessorConfig::backend`] to select which backend the
482/// processor should prefer. When a specific backend is selected, the
483/// processor initializes that backend plus CPU as a fallback. When `Auto`
484/// is used, the existing environment-variable-driven selection applies.
485#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
486pub enum ComputeBackend {
487    /// Auto-detect based on available hardware and environment variables.
488    #[default]
489    Auto,
490    /// CPU-only processing (no hardware acceleration).
491    Cpu,
492    /// Prefer G2D hardware blitter (+ CPU fallback).
493    G2d,
494    /// Prefer OpenGL ES (+ CPU fallback).
495    OpenGl,
496}
497
498/// Backend forced via the `EDGEFIRST_FORCE_BACKEND` environment variable
499/// or [`ImageProcessorConfig::backend`].
500///
501/// When set, the [`ImageProcessor`] only initializes and dispatches to the
502/// selected backend — no fallback chain is used.
503#[derive(Debug, Clone, Copy, PartialEq, Eq)]
504pub(crate) enum ForcedBackend {
505    Cpu,
506    G2d,
507    OpenGl,
508}
509
510/// Image converter that uses available hardware acceleration or CPU as a
511/// fallback.
512#[derive(Debug)]
513pub struct ImageProcessor {
514    /// CPU-based image converter as a fallback. This is only None if the
515    /// EDGEFIRST_DISABLE_CPU environment variable is set.
516    pub cpu: Option<CPUProcessor>,
517
518    #[cfg(target_os = "linux")]
519    /// G2D-based image converter for Linux systems. This is only available if
520    /// the EDGEFIRST_DISABLE_G2D environment variable is not set and libg2d.so
521    /// is available.
522    pub g2d: Option<G2DProcessor>,
523    #[cfg(target_os = "linux")]
524    #[cfg(feature = "opengl")]
525    /// OpenGL-based image converter for Linux systems. This is only available
526    /// if the EDGEFIRST_DISABLE_GL environment variable is not set and OpenGL
527    /// ES is available.
528    pub opengl: Option<GLProcessorThreaded>,
529
530    /// When set, only the specified backend is used — no fallback chain.
531    pub(crate) forced_backend: Option<ForcedBackend>,
532}
533
534unsafe impl Send for ImageProcessor {}
535unsafe impl Sync for ImageProcessor {}
536
537impl ImageProcessor {
538    /// Creates a new `ImageProcessor` instance, initializing available
539    /// hardware converters based on the system capabilities and environment
540    /// variables.
541    ///
542    /// # Examples
543    /// ```rust
544    /// # use edgefirst_image::{ImageProcessor, Rotation, Flip, Crop, ImageProcessorTrait, load_image};
545    /// # use edgefirst_tensor::{PixelFormat, DType, TensorDyn};
546    /// # fn main() -> Result<(), edgefirst_image::Error> {
547    /// let image = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
548    /// let src = load_image(image, Some(PixelFormat::Rgba), None)?;
549    /// let mut converter = ImageProcessor::new()?;
550    /// let mut dst = converter.create_image(640, 480, PixelFormat::Rgb, DType::U8, None)?;
551    /// converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())?;
552    /// # Ok(())
553    /// # }
554    /// ```
555    pub fn new() -> Result<Self> {
556        Self::with_config(ImageProcessorConfig::default())
557    }
558
559    /// Creates a new `ImageProcessor` with the given configuration.
560    ///
561    /// When [`ImageProcessorConfig::backend`] is set to a specific backend,
562    /// environment variables are ignored and the processor initializes the
563    /// requested backend plus CPU as a fallback.
564    ///
565    /// When `Auto`, the existing `EDGEFIRST_FORCE_BACKEND` and
566    /// `EDGEFIRST_DISABLE_*` environment variables apply.
567    #[allow(unused_variables)]
568    pub fn with_config(config: ImageProcessorConfig) -> Result<Self> {
569        // ── Config-driven backend selection ──────────────────────────
570        // When the caller explicitly requests a backend via the config,
571        // skip all environment variable logic.
572        match config.backend {
573            ComputeBackend::Cpu => {
574                log::info!("ComputeBackend::Cpu — CPU only");
575                return Ok(Self {
576                    cpu: Some(CPUProcessor::new()),
577                    #[cfg(target_os = "linux")]
578                    g2d: None,
579                    #[cfg(target_os = "linux")]
580                    #[cfg(feature = "opengl")]
581                    opengl: None,
582                    forced_backend: None,
583                });
584            }
585            ComputeBackend::G2d => {
586                log::info!("ComputeBackend::G2d — G2D + CPU fallback");
587                #[cfg(target_os = "linux")]
588                {
589                    let g2d = match G2DProcessor::new() {
590                        Ok(g) => Some(g),
591                        Err(e) => {
592                            log::warn!("G2D requested but failed to initialize: {e:?}");
593                            None
594                        }
595                    };
596                    return Ok(Self {
597                        cpu: Some(CPUProcessor::new()),
598                        g2d,
599                        #[cfg(feature = "opengl")]
600                        opengl: None,
601                        forced_backend: None,
602                    });
603                }
604                #[cfg(not(target_os = "linux"))]
605                {
606                    log::warn!("G2D requested but not available on this platform, using CPU");
607                    return Ok(Self {
608                        cpu: Some(CPUProcessor::new()),
609                        forced_backend: None,
610                    });
611                }
612            }
613            ComputeBackend::OpenGl => {
614                log::info!("ComputeBackend::OpenGl — OpenGL + CPU fallback");
615                #[cfg(target_os = "linux")]
616                {
617                    #[cfg(feature = "opengl")]
618                    let opengl = match GLProcessorThreaded::new(config.egl_display) {
619                        Ok(gl) => Some(gl),
620                        Err(e) => {
621                            log::warn!("OpenGL requested but failed to initialize: {e:?}");
622                            None
623                        }
624                    };
625                    return Ok(Self {
626                        cpu: Some(CPUProcessor::new()),
627                        g2d: None,
628                        #[cfg(feature = "opengl")]
629                        opengl,
630                        forced_backend: None,
631                    });
632                }
633                #[cfg(not(target_os = "linux"))]
634                {
635                    log::warn!("OpenGL requested but not available on this platform, using CPU");
636                    return Ok(Self {
637                        cpu: Some(CPUProcessor::new()),
638                        forced_backend: None,
639                    });
640                }
641            }
642            ComputeBackend::Auto => { /* fall through to env-var logic below */ }
643        }
644
645        // ── EDGEFIRST_FORCE_BACKEND ──────────────────────────────────
646        // When set, only the requested backend is initialised and no
647        // fallback chain is used. Accepted values (case-insensitive):
648        //   "cpu", "g2d", "opengl"
649        if let Ok(val) = std::env::var("EDGEFIRST_FORCE_BACKEND") {
650            let val_lower = val.to_lowercase();
651            let forced = match val_lower.as_str() {
652                "cpu" => ForcedBackend::Cpu,
653                "g2d" => ForcedBackend::G2d,
654                "opengl" => ForcedBackend::OpenGl,
655                other => {
656                    return Err(Error::ForcedBackendUnavailable(format!(
657                        "unknown EDGEFIRST_FORCE_BACKEND value: {other:?} (expected cpu, g2d, or opengl)"
658                    )));
659                }
660            };
661
662            log::info!("EDGEFIRST_FORCE_BACKEND={val} — only initializing {val_lower} backend");
663
664            return match forced {
665                ForcedBackend::Cpu => Ok(Self {
666                    cpu: Some(CPUProcessor::new()),
667                    #[cfg(target_os = "linux")]
668                    g2d: None,
669                    #[cfg(target_os = "linux")]
670                    #[cfg(feature = "opengl")]
671                    opengl: None,
672                    forced_backend: Some(ForcedBackend::Cpu),
673                }),
674                ForcedBackend::G2d => {
675                    #[cfg(target_os = "linux")]
676                    {
677                        let g2d = G2DProcessor::new().map_err(|e| {
678                            Error::ForcedBackendUnavailable(format!(
679                                "g2d forced but failed to initialize: {e:?}"
680                            ))
681                        })?;
682                        Ok(Self {
683                            cpu: None,
684                            g2d: Some(g2d),
685                            #[cfg(feature = "opengl")]
686                            opengl: None,
687                            forced_backend: Some(ForcedBackend::G2d),
688                        })
689                    }
690                    #[cfg(not(target_os = "linux"))]
691                    {
692                        Err(Error::ForcedBackendUnavailable(
693                            "g2d backend is only available on Linux".into(),
694                        ))
695                    }
696                }
697                ForcedBackend::OpenGl => {
698                    #[cfg(target_os = "linux")]
699                    #[cfg(feature = "opengl")]
700                    {
701                        let opengl = GLProcessorThreaded::new(config.egl_display).map_err(|e| {
702                            Error::ForcedBackendUnavailable(format!(
703                                "opengl forced but failed to initialize: {e:?}"
704                            ))
705                        })?;
706                        Ok(Self {
707                            cpu: None,
708                            g2d: None,
709                            opengl: Some(opengl),
710                            forced_backend: Some(ForcedBackend::OpenGl),
711                        })
712                    }
713                    #[cfg(not(all(target_os = "linux", feature = "opengl")))]
714                    {
715                        Err(Error::ForcedBackendUnavailable(
716                            "opengl backend requires Linux with the 'opengl' feature enabled"
717                                .into(),
718                        ))
719                    }
720                }
721            };
722        }
723
724        // ── Existing DISABLE logic (unchanged) ──────────────────────
725        #[cfg(target_os = "linux")]
726        let g2d = if std::env::var("EDGEFIRST_DISABLE_G2D")
727            .map(|x| x != "0" && x.to_lowercase() != "false")
728            .unwrap_or(false)
729        {
730            log::debug!("EDGEFIRST_DISABLE_G2D is set");
731            None
732        } else {
733            match G2DProcessor::new() {
734                Ok(g2d_converter) => Some(g2d_converter),
735                Err(err) => {
736                    log::warn!("Failed to initialize G2D converter: {err:?}");
737                    None
738                }
739            }
740        };
741
742        #[cfg(target_os = "linux")]
743        #[cfg(feature = "opengl")]
744        let opengl = if std::env::var("EDGEFIRST_DISABLE_GL")
745            .map(|x| x != "0" && x.to_lowercase() != "false")
746            .unwrap_or(false)
747        {
748            log::debug!("EDGEFIRST_DISABLE_GL is set");
749            None
750        } else {
751            match GLProcessorThreaded::new(config.egl_display) {
752                Ok(gl_converter) => Some(gl_converter),
753                Err(err) => {
754                    log::warn!("Failed to initialize GL converter: {err:?}");
755                    None
756                }
757            }
758        };
759
760        let cpu = if std::env::var("EDGEFIRST_DISABLE_CPU")
761            .map(|x| x != "0" && x.to_lowercase() != "false")
762            .unwrap_or(false)
763        {
764            log::debug!("EDGEFIRST_DISABLE_CPU is set");
765            None
766        } else {
767            Some(CPUProcessor::new())
768        };
769        Ok(Self {
770            cpu,
771            #[cfg(target_os = "linux")]
772            g2d,
773            #[cfg(target_os = "linux")]
774            #[cfg(feature = "opengl")]
775            opengl,
776            forced_backend: None,
777        })
778    }
779
780    /// Sets the interpolation mode for int8 proto textures on the OpenGL
781    /// backend. No-op if OpenGL is not available.
782    #[cfg(target_os = "linux")]
783    #[cfg(feature = "opengl")]
784    pub fn set_int8_interpolation_mode(&mut self, mode: Int8InterpolationMode) -> Result<()> {
785        if let Some(ref mut gl) = self.opengl {
786            gl.set_int8_interpolation_mode(mode)?;
787        }
788        Ok(())
789    }
790
791    /// Create a [`TensorDyn`] image with the best available memory backend.
792    ///
793    /// Priority: DMA-buf → PBO (byte-sized types: u8, i8) → system memory.
794    ///
795    /// Use this method instead of [`TensorDyn::image()`] when the tensor will
796    /// be used with [`ImageProcessor::convert()`]. It selects the optimal
797    /// memory backing (including PBO for GPU zero-copy) which direct
798    /// allocation cannot achieve.
799    ///
800    /// This method is on [`ImageProcessor`] rather than [`ImageProcessorTrait`]
801    /// because optimal allocation requires knowledge of the active compute
802    /// backends (e.g. the GL context handle for PBO allocation). Individual
803    /// backend implementations ([`CPUProcessor`], etc.) do not have this
804    /// cross-backend visibility.
805    ///
806    /// # Arguments
807    ///
808    /// * `width` - Image width in pixels
809    /// * `height` - Image height in pixels
810    /// * `format` - Pixel format
811    /// * `dtype` - Element data type (e.g. `DType::U8`, `DType::I8`)
812    /// * `memory` - Optional memory type override; when `None`, the best
813    ///   available backend is selected automatically.
814    ///
815    /// # Returns
816    ///
817    /// A [`TensorDyn`] backed by the highest-performance memory type
818    /// available on this system.
819    ///
820    /// # Errors
821    ///
822    /// Returns an error if all allocation strategies fail.
823    pub fn create_image(
824        &self,
825        width: usize,
826        height: usize,
827        format: PixelFormat,
828        dtype: DType,
829        memory: Option<TensorMemory>,
830    ) -> Result<TensorDyn> {
831        // If an explicit memory type is requested, honour it directly.
832        if let Some(mem) = memory {
833            return Ok(TensorDyn::image(width, height, format, dtype, Some(mem))?);
834        }
835
836        // Try DMA first on Linux — skip only when GL has explicitly selected PBO
837        // as the preferred transfer path (PBO is better than DMA in that case).
838        #[cfg(target_os = "linux")]
839        {
840            #[cfg(feature = "opengl")]
841            let gl_uses_pbo = self
842                .opengl
843                .as_ref()
844                .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
845            #[cfg(not(feature = "opengl"))]
846            let gl_uses_pbo = false;
847
848            if !gl_uses_pbo {
849                if let Ok(img) = TensorDyn::image(
850                    width,
851                    height,
852                    format,
853                    dtype,
854                    Some(edgefirst_tensor::TensorMemory::Dma),
855                ) {
856                    return Ok(img);
857                }
858            }
859        }
860
861        // Try PBO (if GL available).
862        // PBO buffers are u8-sized; the int8 shader emulates i8 output via
863        // XOR 0x80 on the same underlying buffer, so both U8 and I8 work.
864        #[cfg(target_os = "linux")]
865        #[cfg(feature = "opengl")]
866        if dtype.size() == 1 {
867            if let Some(gl) = &self.opengl {
868                match gl.create_pbo_image(width, height, format) {
869                    Ok(t) => {
870                        if dtype == DType::I8 {
871                            // SAFETY: Tensor<u8> and Tensor<i8> are layout-
872                            // identical (same element size, no T-dependent
873                            // drop glue). The int8 shader applies XOR 0x80
874                            // on the same PBO buffer. Same rationale as
875                            // gl::processor::tensor_i8_as_u8_mut.
876                            // Invariant: PBO tensors never have chroma
877                            // (create_pbo_image → Tensor::wrap sets it None).
878                            debug_assert!(
879                                t.chroma().is_none(),
880                                "PBO i8 transmute requires chroma == None"
881                            );
882                            let t_i8: Tensor<i8> = unsafe { std::mem::transmute(t) };
883                            return Ok(TensorDyn::from(t_i8));
884                        }
885                        return Ok(TensorDyn::from(t));
886                    }
887                    Err(e) => log::debug!("PBO image creation failed, falling back to Mem: {e:?}"),
888                }
889            }
890        }
891
892        // Fallback to Mem
893        Ok(TensorDyn::image(
894            width,
895            height,
896            format,
897            dtype,
898            Some(edgefirst_tensor::TensorMemory::Mem),
899        )?)
900    }
901
902    /// Import an external DMA-BUF image.
903    ///
904    /// Each [`PlaneDescriptor`] owns an already-duped fd; this method
905    /// consumes the descriptors and takes ownership of those fds (whether
906    /// the call succeeds or fails).
907    ///
908    /// The caller must ensure the DMA-BUF allocation is large enough for the
909    /// specified width, height, format, and any stride/offset on the plane
910    /// descriptors. No buffer-size validation is performed; an undersized
911    /// buffer may cause GPU faults or EGL import failure.
912    ///
913    /// # Arguments
914    ///
915    /// * `image` - Plane descriptor for the primary (or only) plane
916    /// * `chroma` - Optional plane descriptor for the UV chroma plane
917    ///   (required for multiplane NV12)
918    /// * `width` - Image width in pixels
919    /// * `height` - Image height in pixels
920    /// * `format` - Pixel format of the buffer
921    /// * `dtype` - Element data type (e.g. `DType::U8`)
922    ///
923    /// # Returns
924    ///
925    /// A `TensorDyn` configured as an image.
926    ///
927    /// # Errors
928    ///
929    /// * [`Error::NotSupported`] if `chroma` is `Some` for a non-semi-planar
930    ///   format, or multiplane NV16 (not yet supported), or the fd is not
931    ///   DMA-backed
932    /// * [`Error::InvalidShape`] if NV12 height is odd
933    ///
934    /// # Platform
935    ///
936    /// Linux only.
937    ///
938    /// # Examples
939    ///
940    /// ```rust,ignore
941    /// use edgefirst_tensor::PlaneDescriptor;
942    ///
943    /// // Single-plane RGBA
944    /// let pd = PlaneDescriptor::new(fd.as_fd())?;
945    /// let src = proc.import_image(pd, None, 1920, 1080, PixelFormat::Rgba, DType::U8)?;
946    ///
947    /// // Multi-plane NV12 with stride
948    /// let y_pd = PlaneDescriptor::new(y_fd.as_fd())?.with_stride(2048);
949    /// let uv_pd = PlaneDescriptor::new(uv_fd.as_fd())?.with_stride(2048);
950    /// let src = proc.import_image(y_pd, Some(uv_pd), 1920, 1080,
951    ///                             PixelFormat::Nv12, DType::U8)?;
952    /// ```
953    #[cfg(target_os = "linux")]
954    pub fn import_image(
955        &self,
956        image: edgefirst_tensor::PlaneDescriptor,
957        chroma: Option<edgefirst_tensor::PlaneDescriptor>,
958        width: usize,
959        height: usize,
960        format: PixelFormat,
961        dtype: DType,
962    ) -> Result<TensorDyn> {
963        use edgefirst_tensor::{Tensor, TensorMemory};
964
965        // Capture stride/offset from descriptors before consuming them
966        let image_stride = image.stride();
967        let image_offset = image.offset();
968        let chroma_stride = chroma.as_ref().and_then(|c| c.stride());
969        let chroma_offset = chroma.as_ref().and_then(|c| c.offset());
970
971        if let Some(chroma_pd) = chroma {
972            // ── Multiplane path ──────────────────────────────────────
973            // Multiplane tensors are backed by Tensor<u8> (or transmuted to
974            // Tensor<i8>). Reject other dtypes to avoid silently returning a
975            // tensor with the wrong element type.
976            if dtype != DType::U8 && dtype != DType::I8 {
977                return Err(Error::NotSupported(format!(
978                    "multiplane import only supports U8/I8, got {dtype:?}"
979                )));
980            }
981            if format.layout() != PixelLayout::SemiPlanar {
982                return Err(Error::NotSupported(format!(
983                    "import_image with chroma requires a semi-planar format, got {format:?}"
984                )));
985            }
986
987            let chroma_h = match format {
988                PixelFormat::Nv12 => {
989                    if !height.is_multiple_of(2) {
990                        return Err(Error::InvalidShape(format!(
991                            "NV12 requires even height, got {height}"
992                        )));
993                    }
994                    height / 2
995                }
996                // NV16 multiplane will be supported in a future release;
997                // the GL backend currently only handles NV12 plane1 attributes.
998                PixelFormat::Nv16 => {
999                    return Err(Error::NotSupported(
1000                        "multiplane NV16 is not yet supported; use contiguous NV16 instead".into(),
1001                    ))
1002                }
1003                _ => {
1004                    return Err(Error::NotSupported(format!(
1005                        "unsupported semi-planar format: {format:?}"
1006                    )))
1007                }
1008            };
1009
1010            let luma = Tensor::<u8>::from_fd(image.into_fd(), &[height, width], Some("luma"))?;
1011            if luma.memory() != TensorMemory::Dma {
1012                return Err(Error::NotSupported(format!(
1013                    "luma fd must be DMA-backed, got {:?}",
1014                    luma.memory()
1015                )));
1016            }
1017
1018            let chroma_tensor =
1019                Tensor::<u8>::from_fd(chroma_pd.into_fd(), &[chroma_h, width], Some("chroma"))?;
1020            if chroma_tensor.memory() != TensorMemory::Dma {
1021                return Err(Error::NotSupported(format!(
1022                    "chroma fd must be DMA-backed, got {:?}",
1023                    chroma_tensor.memory()
1024                )));
1025            }
1026
1027            // from_planes creates the combined tensor with format set,
1028            // preserving luma's row_stride (currently None since luma was raw).
1029            let mut tensor = Tensor::<u8>::from_planes(luma, chroma_tensor, format)?;
1030
1031            // Apply stride/offset to the combined tensor (luma plane)
1032            if let Some(s) = image_stride {
1033                tensor.set_row_stride(s)?;
1034            }
1035            if let Some(o) = image_offset {
1036                tensor.set_plane_offset(o);
1037            }
1038
1039            // Apply stride/offset to the chroma sub-tensor.
1040            // The chroma tensor is a raw 2D [chroma_h, width] tensor without
1041            // format metadata, so we validate stride manually rather than
1042            // using set_row_stride (which requires format).
1043            if let Some(chroma_ref) = tensor.chroma_mut() {
1044                if let Some(s) = chroma_stride {
1045                    if s < width {
1046                        return Err(Error::InvalidShape(format!(
1047                            "chroma stride {s} < minimum {width} for {format:?}"
1048                        )));
1049                    }
1050                    chroma_ref.set_row_stride_unchecked(s);
1051                }
1052                if let Some(o) = chroma_offset {
1053                    chroma_ref.set_plane_offset(o);
1054                }
1055            }
1056
1057            if dtype == DType::I8 {
1058                // SAFETY: Tensor<u8> and Tensor<i8> have identical layout because
1059                // the struct contains only type-erased storage (OwnedFd, shape, name),
1060                // no inline T values. This assertion catches layout drift at compile time.
1061                const {
1062                    assert!(std::mem::size_of::<Tensor<u8>>() == std::mem::size_of::<Tensor<i8>>());
1063                    assert!(
1064                        std::mem::align_of::<Tensor<u8>>() == std::mem::align_of::<Tensor<i8>>()
1065                    );
1066                }
1067                let tensor_i8: Tensor<i8> = unsafe { std::mem::transmute(tensor) };
1068                return Ok(TensorDyn::from(tensor_i8));
1069            }
1070            Ok(TensorDyn::from(tensor))
1071        } else {
1072            // ── Single-plane path ────────────────────────────────────
1073            let shape = match format.layout() {
1074                PixelLayout::Packed => vec![height, width, format.channels()],
1075                PixelLayout::Planar => vec![format.channels(), height, width],
1076                PixelLayout::SemiPlanar => {
1077                    let total_h = match format {
1078                        PixelFormat::Nv12 => {
1079                            if !height.is_multiple_of(2) {
1080                                return Err(Error::InvalidShape(format!(
1081                                    "NV12 requires even height, got {height}"
1082                                )));
1083                            }
1084                            height * 3 / 2
1085                        }
1086                        PixelFormat::Nv16 => height * 2,
1087                        _ => {
1088                            return Err(Error::InvalidShape(format!(
1089                                "unknown semi-planar height multiplier for {format:?}"
1090                            )))
1091                        }
1092                    };
1093                    vec![total_h, width]
1094                }
1095                _ => {
1096                    return Err(Error::NotSupported(format!(
1097                        "unsupported pixel layout for import_image: {:?}",
1098                        format.layout()
1099                    )));
1100                }
1101            };
1102            let tensor = TensorDyn::from_fd(image.into_fd(), &shape, dtype, None)?;
1103            if tensor.memory() != TensorMemory::Dma {
1104                return Err(Error::NotSupported(format!(
1105                    "import_image requires DMA-backed fd, got {:?}",
1106                    tensor.memory()
1107                )));
1108            }
1109            let mut tensor = tensor.with_format(format)?;
1110            if let Some(s) = image_stride {
1111                tensor.set_row_stride(s)?;
1112            }
1113            if let Some(o) = image_offset {
1114                tensor.set_plane_offset(o);
1115            }
1116            Ok(tensor)
1117        }
1118    }
1119
1120    /// Decode model outputs and draw segmentation masks onto `dst`.
1121    ///
1122    /// This is the primary mask rendering API. The processor decodes via the
1123    /// provided [`Decoder`], selects the optimal rendering path (hybrid
1124    /// CPU+GL or fused GPU), and composites masks onto `dst`.
1125    ///
1126    /// Returns the detected bounding boxes.
1127    pub fn draw_masks(
1128        &mut self,
1129        decoder: &edgefirst_decoder::Decoder,
1130        outputs: &[&TensorDyn],
1131        dst: &mut TensorDyn,
1132        overlay: MaskOverlay<'_>,
1133    ) -> Result<Vec<DetectBox>> {
1134        let mut output_boxes = Vec::with_capacity(100);
1135
1136        // Try proto path first (fused rendering without materializing masks)
1137        let proto_result = decoder
1138            .decode_proto(outputs, &mut output_boxes)
1139            .map_err(|e| Error::Internal(format!("decode_proto: {e:#?}")))?;
1140
1141        if let Some(proto_data) = proto_result {
1142            self.draw_proto_masks(dst, &output_boxes, &proto_data, overlay)?;
1143        } else {
1144            // Detection-only or unsupported model: full decode + render
1145            let mut output_masks = Vec::with_capacity(100);
1146            decoder
1147                .decode(outputs, &mut output_boxes, &mut output_masks)
1148                .map_err(|e| Error::Internal(format!("decode: {e:#?}")))?;
1149            self.draw_decoded_masks(dst, &output_boxes, &output_masks, overlay)?;
1150        }
1151        Ok(output_boxes)
1152    }
1153
1154    /// Decode tracked model outputs and draw segmentation masks onto `dst`.
1155    ///
1156    /// Like [`draw_masks`](Self::draw_masks) but integrates a tracker for
1157    /// maintaining object identities across frames. The tracker runs after
1158    /// NMS but before mask extraction.
1159    ///
1160    /// Returns detected boxes and track info.
1161    #[cfg(feature = "tracker")]
1162    pub fn draw_masks_tracked<TR: edgefirst_tracker::Tracker<DetectBox>>(
1163        &mut self,
1164        decoder: &edgefirst_decoder::Decoder,
1165        tracker: &mut TR,
1166        timestamp: u64,
1167        outputs: &[&TensorDyn],
1168        dst: &mut TensorDyn,
1169        overlay: MaskOverlay<'_>,
1170    ) -> Result<(Vec<DetectBox>, Vec<edgefirst_tracker::TrackInfo>)> {
1171        let mut output_boxes = Vec::with_capacity(100);
1172        let mut output_tracks = Vec::new();
1173
1174        let proto_result = decoder
1175            .decode_proto_tracked(
1176                tracker,
1177                timestamp,
1178                outputs,
1179                &mut output_boxes,
1180                &mut output_tracks,
1181            )
1182            .map_err(|e| Error::Internal(format!("decode_proto_tracked: {e:#?}")))?;
1183
1184        if let Some(proto_data) = proto_result {
1185            self.draw_proto_masks(dst, &output_boxes, &proto_data, overlay)?;
1186        } else {
1187            // Note: decode_proto_tracked returns None for detection-only/ModelPack
1188            // models WITHOUT calling the tracker. The else branch below is the
1189            // first (and only) tracker call for those model types.
1190            let mut output_masks = Vec::with_capacity(100);
1191            decoder
1192                .decode_tracked(
1193                    tracker,
1194                    timestamp,
1195                    outputs,
1196                    &mut output_boxes,
1197                    &mut output_masks,
1198                    &mut output_tracks,
1199                )
1200                .map_err(|e| Error::Internal(format!("decode_tracked: {e:#?}")))?;
1201            self.draw_decoded_masks(dst, &output_boxes, &output_masks, overlay)?;
1202        }
1203        Ok((output_boxes, output_tracks))
1204    }
1205}
1206
1207impl ImageProcessorTrait for ImageProcessor {
1208    /// Converts the source image to the destination image format and size. The
1209    /// image is cropped first, then flipped, then rotated
1210    ///
1211    /// Prefer hardware accelerators when available, falling back to CPU if
1212    /// necessary.
1213    fn convert(
1214        &mut self,
1215        src: &TensorDyn,
1216        dst: &mut TensorDyn,
1217        rotation: Rotation,
1218        flip: Flip,
1219        crop: Crop,
1220    ) -> Result<()> {
1221        let start = Instant::now();
1222        let src_fmt = src.format();
1223        let dst_fmt = dst.format();
1224        log::trace!(
1225            "convert: {src_fmt:?}({:?}/{:?}) → {dst_fmt:?}({:?}/{:?}), \
1226             rotation={rotation:?}, flip={flip:?}, backend={:?}",
1227            src.dtype(),
1228            src.memory(),
1229            dst.dtype(),
1230            dst.memory(),
1231            self.forced_backend,
1232        );
1233
1234        // ── Forced backend: no fallback chain ────────────────────────
1235        if let Some(forced) = self.forced_backend {
1236            return match forced {
1237                ForcedBackend::Cpu => {
1238                    if let Some(cpu) = self.cpu.as_mut() {
1239                        let r = cpu.convert(src, dst, rotation, flip, crop);
1240                        log::trace!(
1241                            "convert: forced=cpu result={} ({:?})",
1242                            if r.is_ok() { "ok" } else { "err" },
1243                            start.elapsed()
1244                        );
1245                        return r;
1246                    }
1247                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1248                }
1249                ForcedBackend::G2d => {
1250                    #[cfg(target_os = "linux")]
1251                    if let Some(g2d) = self.g2d.as_mut() {
1252                        let r = g2d.convert(src, dst, rotation, flip, crop);
1253                        log::trace!(
1254                            "convert: forced=g2d result={} ({:?})",
1255                            if r.is_ok() { "ok" } else { "err" },
1256                            start.elapsed()
1257                        );
1258                        return r;
1259                    }
1260                    Err(Error::ForcedBackendUnavailable("g2d".into()))
1261                }
1262                ForcedBackend::OpenGl => {
1263                    #[cfg(target_os = "linux")]
1264                    #[cfg(feature = "opengl")]
1265                    if let Some(opengl) = self.opengl.as_mut() {
1266                        let r = opengl.convert(src, dst, rotation, flip, crop);
1267                        log::trace!(
1268                            "convert: forced=opengl result={} ({:?})",
1269                            if r.is_ok() { "ok" } else { "err" },
1270                            start.elapsed()
1271                        );
1272                        return r;
1273                    }
1274                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1275                }
1276            };
1277        }
1278
1279        // ── Auto fallback chain: OpenGL → G2D → CPU ──────────────────
1280        #[cfg(target_os = "linux")]
1281        #[cfg(feature = "opengl")]
1282        if let Some(opengl) = self.opengl.as_mut() {
1283            match opengl.convert(src, dst, rotation, flip, crop) {
1284                Ok(_) => {
1285                    log::trace!(
1286                        "convert: auto selected=opengl for {src_fmt:?}→{dst_fmt:?} ({:?})",
1287                        start.elapsed()
1288                    );
1289                    return Ok(());
1290                }
1291                Err(e) => {
1292                    log::trace!("convert: auto opengl declined {src_fmt:?}→{dst_fmt:?}: {e}");
1293                }
1294            }
1295        }
1296
1297        #[cfg(target_os = "linux")]
1298        if let Some(g2d) = self.g2d.as_mut() {
1299            match g2d.convert(src, dst, rotation, flip, crop) {
1300                Ok(_) => {
1301                    log::trace!(
1302                        "convert: auto selected=g2d for {src_fmt:?}→{dst_fmt:?} ({:?})",
1303                        start.elapsed()
1304                    );
1305                    return Ok(());
1306                }
1307                Err(e) => {
1308                    log::trace!("convert: auto g2d declined {src_fmt:?}→{dst_fmt:?}: {e}");
1309                }
1310            }
1311        }
1312
1313        if let Some(cpu) = self.cpu.as_mut() {
1314            match cpu.convert(src, dst, rotation, flip, crop) {
1315                Ok(_) => {
1316                    log::trace!(
1317                        "convert: auto selected=cpu for {src_fmt:?}→{dst_fmt:?} ({:?})",
1318                        start.elapsed()
1319                    );
1320                    return Ok(());
1321                }
1322                Err(e) => {
1323                    log::trace!("convert: auto cpu failed {src_fmt:?}→{dst_fmt:?}: {e}");
1324                    return Err(e);
1325                }
1326            }
1327        }
1328        Err(Error::NoConverter)
1329    }
1330
1331    fn draw_decoded_masks(
1332        &mut self,
1333        dst: &mut TensorDyn,
1334        detect: &[DetectBox],
1335        segmentation: &[Segmentation],
1336        overlay: MaskOverlay<'_>,
1337    ) -> Result<()> {
1338        let start = Instant::now();
1339
1340        if detect.is_empty() && segmentation.is_empty() {
1341            return Ok(());
1342        }
1343
1344        // ── Forced backend: no fallback chain ────────────────────────
1345        if let Some(forced) = self.forced_backend {
1346            return match forced {
1347                ForcedBackend::Cpu => {
1348                    // CPU needs background pre-blitted
1349                    let overlay = overlay.apply_background(dst)?;
1350                    if let Some(cpu) = self.cpu.as_mut() {
1351                        return cpu.draw_decoded_masks(dst, detect, segmentation, overlay);
1352                    }
1353                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1354                }
1355                ForcedBackend::G2d => Err(Error::NotSupported(
1356                    "g2d does not support draw_decoded_masks".into(),
1357                )),
1358                ForcedBackend::OpenGl => {
1359                    // GL handles background natively via GPU blit
1360                    #[cfg(target_os = "linux")]
1361                    #[cfg(feature = "opengl")]
1362                    if let Some(opengl) = self.opengl.as_mut() {
1363                        return opengl.draw_decoded_masks(dst, detect, segmentation, overlay);
1364                    }
1365                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1366                }
1367            };
1368        }
1369
1370        // skip G2D as it doesn't support rendering to image
1371
1372        // GL path: pass overlay with background — GL will GPU-blit if DMA-BUF
1373        #[cfg(target_os = "linux")]
1374        #[cfg(feature = "opengl")]
1375        if let Some(opengl) = self.opengl.as_mut() {
1376            log::trace!(
1377                "draw_decoded_masks started with opengl in {:?}",
1378                start.elapsed()
1379            );
1380            match opengl.draw_decoded_masks(dst, detect, segmentation, overlay) {
1381                Ok(_) => {
1382                    log::trace!("draw_decoded_masks with opengl in {:?}", start.elapsed());
1383                    return Ok(());
1384                }
1385                Err(e) => {
1386                    log::trace!("draw_decoded_masks didn't work with opengl: {e:?}")
1387                }
1388            }
1389        }
1390
1391        // CPU fallback: blit background via memcpy before rendering
1392        let overlay = overlay.apply_background(dst)?;
1393        log::trace!(
1394            "draw_decoded_masks started with cpu in {:?}",
1395            start.elapsed()
1396        );
1397        if let Some(cpu) = self.cpu.as_mut() {
1398            match cpu.draw_decoded_masks(dst, detect, segmentation, overlay) {
1399                Ok(_) => {
1400                    log::trace!("draw_decoded_masks with cpu in {:?}", start.elapsed());
1401                    return Ok(());
1402                }
1403                Err(e) => {
1404                    log::trace!("draw_decoded_masks didn't work with cpu: {e:?}");
1405                    return Err(e);
1406                }
1407            }
1408        }
1409        Err(Error::NoConverter)
1410    }
1411
1412    fn draw_proto_masks(
1413        &mut self,
1414        dst: &mut TensorDyn,
1415        detect: &[DetectBox],
1416        proto_data: &ProtoData,
1417        overlay: MaskOverlay<'_>,
1418    ) -> Result<()> {
1419        let start = Instant::now();
1420
1421        if detect.is_empty() {
1422            return Ok(());
1423        }
1424
1425        // ── Forced backend: no fallback chain ────────────────────────
1426        if let Some(forced) = self.forced_backend {
1427            return match forced {
1428                ForcedBackend::Cpu => {
1429                    let overlay = overlay.apply_background(dst)?;
1430                    if let Some(cpu) = self.cpu.as_mut() {
1431                        return cpu.draw_proto_masks(dst, detect, proto_data, overlay);
1432                    }
1433                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1434                }
1435                ForcedBackend::G2d => Err(Error::NotSupported(
1436                    "g2d does not support draw_proto_masks".into(),
1437                )),
1438                ForcedBackend::OpenGl => {
1439                    #[cfg(target_os = "linux")]
1440                    #[cfg(feature = "opengl")]
1441                    if let Some(opengl) = self.opengl.as_mut() {
1442                        return opengl.draw_proto_masks(dst, detect, proto_data, overlay);
1443                    }
1444                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1445                }
1446            };
1447        }
1448
1449        // skip G2D as it doesn't support rendering to image
1450
1451        // Hybrid path: CPU materialize + GL overlay (benchmarked faster than
1452        // full-GPU draw_proto_masks on all tested platforms: 27× on imx8mp,
1453        // 4× on imx95, 2.5× on rpi5, 1.6× on x86).
1454        // GL handles background natively via GPU blit.
1455        #[cfg(target_os = "linux")]
1456        #[cfg(feature = "opengl")]
1457        if let Some(opengl) = self.opengl.as_mut() {
1458            let Some(cpu) = self.cpu.as_ref() else {
1459                return Err(Error::Internal(
1460                    "draw_proto_masks requires CPU backend for hybrid path".into(),
1461                ));
1462            };
1463            log::trace!(
1464                "draw_proto_masks started with hybrid (cpu+opengl) in {:?}",
1465                start.elapsed()
1466            );
1467            let segmentation = cpu.materialize_segmentations(detect, proto_data)?;
1468            match opengl.draw_decoded_masks(dst, detect, &segmentation, overlay) {
1469                Ok(_) => {
1470                    log::trace!(
1471                        "draw_proto_masks with hybrid (cpu+opengl) in {:?}",
1472                        start.elapsed()
1473                    );
1474                    return Ok(());
1475                }
1476                Err(e) => {
1477                    log::trace!("draw_proto_masks hybrid path failed, falling back to cpu: {e:?}");
1478                }
1479            }
1480        }
1481
1482        // CPU-only fallback: blit background via memcpy
1483        let overlay = overlay.apply_background(dst)?;
1484        let Some(cpu) = self.cpu.as_mut() else {
1485            return Err(Error::Internal(
1486                "draw_proto_masks requires CPU backend for fallback path".into(),
1487            ));
1488        };
1489        log::trace!("draw_proto_masks started with cpu in {:?}", start.elapsed());
1490        cpu.draw_proto_masks(dst, detect, proto_data, overlay)
1491    }
1492
1493    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()> {
1494        let start = Instant::now();
1495
1496        // ── Forced backend: no fallback chain ────────────────────────
1497        if let Some(forced) = self.forced_backend {
1498            return match forced {
1499                ForcedBackend::Cpu => {
1500                    if let Some(cpu) = self.cpu.as_mut() {
1501                        return cpu.set_class_colors(colors);
1502                    }
1503                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1504                }
1505                ForcedBackend::G2d => Err(Error::NotSupported(
1506                    "g2d does not support set_class_colors".into(),
1507                )),
1508                ForcedBackend::OpenGl => {
1509                    #[cfg(target_os = "linux")]
1510                    #[cfg(feature = "opengl")]
1511                    if let Some(opengl) = self.opengl.as_mut() {
1512                        return opengl.set_class_colors(colors);
1513                    }
1514                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1515                }
1516            };
1517        }
1518
1519        // skip G2D as it doesn't support rendering to image
1520
1521        #[cfg(target_os = "linux")]
1522        #[cfg(feature = "opengl")]
1523        if let Some(opengl) = self.opengl.as_mut() {
1524            log::trace!("image started with opengl in {:?}", start.elapsed());
1525            match opengl.set_class_colors(colors) {
1526                Ok(_) => {
1527                    log::trace!("colors set with opengl in {:?}", start.elapsed());
1528                    return Ok(());
1529                }
1530                Err(e) => {
1531                    log::trace!("colors didn't set with opengl: {e:?}")
1532                }
1533            }
1534        }
1535        log::trace!("image started with cpu in {:?}", start.elapsed());
1536        if let Some(cpu) = self.cpu.as_mut() {
1537            match cpu.set_class_colors(colors) {
1538                Ok(_) => {
1539                    log::trace!("colors set with cpu in {:?}", start.elapsed());
1540                    return Ok(());
1541                }
1542                Err(e) => {
1543                    log::trace!("colors didn't set with cpu: {e:?}");
1544                    return Err(e);
1545                }
1546            }
1547        }
1548        Err(Error::NoConverter)
1549    }
1550}
1551
1552// ---------------------------------------------------------------------------
1553// Image loading / saving helpers
1554// ---------------------------------------------------------------------------
1555
1556/// Read EXIF orientation from raw EXIF bytes and return (Rotation, Flip).
1557fn read_exif_orientation(exif_bytes: &[u8]) -> (Rotation, Flip) {
1558    let exifreader = exif::Reader::new();
1559    let Ok(exif_) = exifreader.read_raw(exif_bytes.to_vec()) else {
1560        return (Rotation::None, Flip::None);
1561    };
1562    let Some(orientation) = exif_.get_field(exif::Tag::Orientation, exif::In::PRIMARY) else {
1563        return (Rotation::None, Flip::None);
1564    };
1565    match orientation.value.get_uint(0) {
1566        Some(1) => (Rotation::None, Flip::None),
1567        Some(2) => (Rotation::None, Flip::Horizontal),
1568        Some(3) => (Rotation::Rotate180, Flip::None),
1569        Some(4) => (Rotation::Rotate180, Flip::Horizontal),
1570        Some(5) => (Rotation::Clockwise90, Flip::Horizontal),
1571        Some(6) => (Rotation::Clockwise90, Flip::None),
1572        Some(7) => (Rotation::CounterClockwise90, Flip::Horizontal),
1573        Some(8) => (Rotation::CounterClockwise90, Flip::None),
1574        Some(v) => {
1575            log::warn!("broken orientation EXIF value: {v}");
1576            (Rotation::None, Flip::None)
1577        }
1578        None => (Rotation::None, Flip::None),
1579    }
1580}
1581
1582/// Map a [`PixelFormat`] to the zune-jpeg `ColorSpace` for decoding.
1583/// Returns `None` for formats that the JPEG decoder cannot output directly.
1584fn pixelfmt_to_colorspace(fmt: PixelFormat) -> Option<ColorSpace> {
1585    match fmt {
1586        PixelFormat::Rgb => Some(ColorSpace::RGB),
1587        PixelFormat::Rgba => Some(ColorSpace::RGBA),
1588        PixelFormat::Grey => Some(ColorSpace::Luma),
1589        _ => None,
1590    }
1591}
1592
1593/// Map a zune-jpeg `ColorSpace` to a [`PixelFormat`].
1594fn colorspace_to_pixelfmt(cs: ColorSpace) -> Option<PixelFormat> {
1595    match cs {
1596        ColorSpace::RGB => Some(PixelFormat::Rgb),
1597        ColorSpace::RGBA => Some(PixelFormat::Rgba),
1598        ColorSpace::Luma => Some(PixelFormat::Grey),
1599        _ => None,
1600    }
1601}
1602
1603/// Load a JPEG image from raw bytes and return a [`TensorDyn`].
1604fn load_jpeg(
1605    image: &[u8],
1606    format: Option<PixelFormat>,
1607    memory: Option<TensorMemory>,
1608) -> Result<TensorDyn> {
1609    let colour = match format {
1610        Some(f) => pixelfmt_to_colorspace(f)
1611            .ok_or_else(|| Error::NotSupported(format!("Unsupported image format {f:?}")))?,
1612        None => ColorSpace::RGB,
1613    };
1614    let options = DecoderOptions::default().jpeg_set_out_colorspace(colour);
1615    let mut decoder = JpegDecoder::new_with_options(image, options);
1616    decoder.decode_headers()?;
1617
1618    let image_info = decoder.info().ok_or(Error::Internal(
1619        "JPEG did not return decoded image info".to_string(),
1620    ))?;
1621
1622    let converted_cs = decoder
1623        .get_output_colorspace()
1624        .ok_or(Error::Internal("No output colorspace".to_string()))?;
1625
1626    let converted_fmt = colorspace_to_pixelfmt(converted_cs).ok_or(Error::NotSupported(
1627        "Unsupported JPEG decoder output".to_string(),
1628    ))?;
1629
1630    let dest_fmt = format.unwrap_or(converted_fmt);
1631
1632    let (rotation, flip) = decoder
1633        .exif()
1634        .map(|x| read_exif_orientation(x))
1635        .unwrap_or((Rotation::None, Flip::None));
1636
1637    let w = image_info.width as usize;
1638    let h = image_info.height as usize;
1639
1640    if (rotation, flip) == (Rotation::None, Flip::None) {
1641        let mut img = Tensor::<u8>::image(w, h, dest_fmt, memory)?;
1642
1643        if converted_fmt != dest_fmt {
1644            let tmp = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
1645            decoder.decode_into(&mut tmp.map()?)?;
1646            CPUProcessor::convert_format_pf(&tmp, &mut img, converted_fmt, dest_fmt)?;
1647            return Ok(TensorDyn::from(img));
1648        }
1649        decoder.decode_into(&mut img.map()?)?;
1650        return Ok(TensorDyn::from(img));
1651    }
1652
1653    let mut tmp = Tensor::<u8>::image(w, h, dest_fmt, Some(TensorMemory::Mem))?;
1654
1655    if converted_fmt != dest_fmt {
1656        let tmp2 = Tensor::<u8>::image(w, h, converted_fmt, Some(TensorMemory::Mem))?;
1657        decoder.decode_into(&mut tmp2.map()?)?;
1658        CPUProcessor::convert_format_pf(&tmp2, &mut tmp, converted_fmt, dest_fmt)?;
1659    } else {
1660        decoder.decode_into(&mut tmp.map()?)?;
1661    }
1662
1663    rotate_flip_to_dyn(&tmp, dest_fmt, rotation, flip, memory)
1664}
1665
1666/// Load a PNG image from raw bytes and return a [`TensorDyn`].
1667fn load_png(
1668    image: &[u8],
1669    format: Option<PixelFormat>,
1670    memory: Option<TensorMemory>,
1671) -> Result<TensorDyn> {
1672    let fmt = format.unwrap_or(PixelFormat::Rgb);
1673    let alpha = match fmt {
1674        PixelFormat::Rgb => false,
1675        PixelFormat::Rgba => true,
1676        _ => {
1677            return Err(Error::NotImplemented(
1678                "Unsupported image format".to_string(),
1679            ));
1680        }
1681    };
1682
1683    let options = DecoderOptions::default()
1684        .png_set_add_alpha_channel(alpha)
1685        .png_set_decode_animated(false);
1686    let mut decoder = PngDecoder::new_with_options(image, options);
1687    decoder.decode_headers()?;
1688    let image_info = decoder.get_info().ok_or(Error::Internal(
1689        "PNG did not return decoded image info".to_string(),
1690    ))?;
1691
1692    let (rotation, flip) = image_info
1693        .exif
1694        .as_ref()
1695        .map(|x| read_exif_orientation(x))
1696        .unwrap_or((Rotation::None, Flip::None));
1697
1698    if (rotation, flip) == (Rotation::None, Flip::None) {
1699        let img = Tensor::<u8>::image(image_info.width, image_info.height, fmt, memory)?;
1700        decoder.decode_into(&mut img.map()?)?;
1701        return Ok(TensorDyn::from(img));
1702    }
1703
1704    let tmp = Tensor::<u8>::image(
1705        image_info.width,
1706        image_info.height,
1707        fmt,
1708        Some(TensorMemory::Mem),
1709    )?;
1710    decoder.decode_into(&mut tmp.map()?)?;
1711
1712    rotate_flip_to_dyn(&tmp, fmt, rotation, flip, memory)
1713}
1714
1715/// Load an image from raw bytes (JPEG or PNG) and return a [`TensorDyn`].
1716///
1717/// The optional `format` specifies the desired output pixel format (e.g.,
1718/// [`PixelFormat::Rgb`], [`PixelFormat::Rgba`]); if `None`, the native
1719/// format of the file is used (typically RGB for JPEG).
1720///
1721/// # Examples
1722/// ```rust
1723/// use edgefirst_image::load_image;
1724/// use edgefirst_tensor::PixelFormat;
1725/// # fn main() -> Result<(), edgefirst_image::Error> {
1726/// let jpeg = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../../testdata/zidane.jpg"));
1727/// let img = load_image(jpeg, Some(PixelFormat::Rgb), None)?;
1728/// assert_eq!(img.width(), Some(1280));
1729/// assert_eq!(img.height(), Some(720));
1730/// # Ok(())
1731/// # }
1732/// ```
1733pub fn load_image(
1734    image: &[u8],
1735    format: Option<PixelFormat>,
1736    memory: Option<TensorMemory>,
1737) -> Result<TensorDyn> {
1738    if let Ok(i) = load_jpeg(image, format, memory) {
1739        return Ok(i);
1740    }
1741    if let Ok(i) = load_png(image, format, memory) {
1742        return Ok(i);
1743    }
1744    Err(Error::NotSupported(
1745        "Could not decode as jpeg or png".to_string(),
1746    ))
1747}
1748
1749/// Save a [`TensorDyn`] image as a JPEG file.
1750///
1751/// Only packed RGB and RGBA formats are supported.
1752pub fn save_jpeg(tensor: &TensorDyn, path: impl AsRef<std::path::Path>, quality: u8) -> Result<()> {
1753    let t = tensor.as_u8().ok_or(Error::UnsupportedFormat(
1754        "save_jpeg requires u8 tensor".to_string(),
1755    ))?;
1756    let fmt = t.format().ok_or(Error::NotAnImage)?;
1757    if fmt.layout() != PixelLayout::Packed {
1758        return Err(Error::NotImplemented(
1759            "Saving planar images is not supported".to_string(),
1760        ));
1761    }
1762
1763    let colour = match fmt {
1764        PixelFormat::Rgb => jpeg_encoder::ColorType::Rgb,
1765        PixelFormat::Rgba => jpeg_encoder::ColorType::Rgba,
1766        _ => {
1767            return Err(Error::NotImplemented(
1768                "Unsupported image format for saving".to_string(),
1769            ));
1770        }
1771    };
1772
1773    let w = t.width().ok_or(Error::NotAnImage)?;
1774    let h = t.height().ok_or(Error::NotAnImage)?;
1775    let encoder = jpeg_encoder::Encoder::new_file(path, quality)?;
1776    let tensor_map = t.map()?;
1777
1778    encoder.encode(&tensor_map, w as u16, h as u16, colour)?;
1779
1780    Ok(())
1781}
1782
1783pub(crate) struct FunctionTimer<T: Display> {
1784    name: T,
1785    start: std::time::Instant,
1786}
1787
1788impl<T: Display> FunctionTimer<T> {
1789    pub fn new(name: T) -> Self {
1790        Self {
1791            name,
1792            start: std::time::Instant::now(),
1793        }
1794    }
1795}
1796
1797impl<T: Display> Drop for FunctionTimer<T> {
1798    fn drop(&mut self) {
1799        log::trace!("{} elapsed: {:?}", self.name, self.start.elapsed())
1800    }
1801}
1802
1803const DEFAULT_COLORS: [[f32; 4]; 20] = [
1804    [0., 1., 0., 0.7],
1805    [1., 0.5568628, 0., 0.7],
1806    [0.25882353, 0.15294118, 0.13333333, 0.7],
1807    [0.8, 0.7647059, 0.78039216, 0.7],
1808    [0.3137255, 0.3137255, 0.3137255, 0.7],
1809    [0.1411765, 0.3098039, 0.1215686, 0.7],
1810    [1., 0.95686275, 0.5137255, 0.7],
1811    [0.3529412, 0.32156863, 0., 0.7],
1812    [0.4235294, 0.6235294, 0.6509804, 0.7],
1813    [0.5098039, 0.5098039, 0.7294118, 0.7],
1814    [0.00784314, 0.18823529, 0.29411765, 0.7],
1815    [0.0, 0.2706, 1.0, 0.7],
1816    [0.0, 0.0, 0.0, 0.7],
1817    [0.0, 0.5, 0.0, 0.7],
1818    [1.0, 0.0, 0.0, 0.7],
1819    [0.0, 0.0, 1.0, 0.7],
1820    [1.0, 0.5, 0.5, 0.7],
1821    [0.1333, 0.5451, 0.1333, 0.7],
1822    [0.1176, 0.4118, 0.8235, 0.7],
1823    [1., 1., 1., 0.7],
1824];
1825
1826const fn denorm<const M: usize, const N: usize>(a: [[f32; M]; N]) -> [[u8; M]; N] {
1827    let mut result = [[0; M]; N];
1828    let mut i = 0;
1829    while i < N {
1830        let mut j = 0;
1831        while j < M {
1832            result[i][j] = (a[i][j] * 255.0).round() as u8;
1833            j += 1;
1834        }
1835        i += 1;
1836    }
1837    result
1838}
1839
1840const DEFAULT_COLORS_U8: [[u8; 4]; 20] = denorm(DEFAULT_COLORS);
1841
1842#[cfg(test)]
1843#[cfg_attr(coverage_nightly, coverage(off))]
1844mod image_tests {
1845    use super::*;
1846    use crate::{CPUProcessor, Rotation};
1847    #[cfg(target_os = "linux")]
1848    use edgefirst_tensor::is_dma_available;
1849    use edgefirst_tensor::{TensorMapTrait, TensorMemory, TensorTrait};
1850    use image::buffer::ConvertBuffer;
1851
1852    /// Test helper: call `ImageProcessorTrait::convert()` on two `TensorDyn`s
1853    /// by going through the `TensorDyn` API.
1854    ///
1855    /// Returns the `(src_image, dst_image)` reconstructed from the TensorDyn
1856    /// round-trip so the caller can feed them to `compare_images` etc.
1857    fn convert_img(
1858        proc: &mut dyn ImageProcessorTrait,
1859        src: TensorDyn,
1860        dst: TensorDyn,
1861        rotation: Rotation,
1862        flip: Flip,
1863        crop: Crop,
1864    ) -> (Result<()>, TensorDyn, TensorDyn) {
1865        let src_fourcc = src.format().unwrap();
1866        let dst_fourcc = dst.format().unwrap();
1867        let src_dyn = src;
1868        let mut dst_dyn = dst;
1869        let result = proc.convert(&src_dyn, &mut dst_dyn, rotation, flip, crop);
1870        let src_back = {
1871            let mut __t = src_dyn.into_u8().unwrap();
1872            __t.set_format(src_fourcc).unwrap();
1873            TensorDyn::from(__t)
1874        };
1875        let dst_back = {
1876            let mut __t = dst_dyn.into_u8().unwrap();
1877            __t.set_format(dst_fourcc).unwrap();
1878            TensorDyn::from(__t)
1879        };
1880        (result, src_back, dst_back)
1881    }
1882
1883    #[ctor::ctor]
1884    fn init() {
1885        env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
1886    }
1887
1888    macro_rules! function {
1889        () => {{
1890            fn f() {}
1891            fn type_name_of<T>(_: T) -> &'static str {
1892                std::any::type_name::<T>()
1893            }
1894            let name = type_name_of(f);
1895
1896            // Find and cut the rest of the path
1897            match &name[..name.len() - 3].rfind(':') {
1898                Some(pos) => &name[pos + 1..name.len() - 3],
1899                None => &name[..name.len() - 3],
1900            }
1901        }};
1902    }
1903
1904    #[test]
1905    fn test_invalid_crop() {
1906        let src = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
1907        let dst = TensorDyn::image(100, 100, PixelFormat::Rgb, DType::U8, None).unwrap();
1908
1909        let crop = Crop::new()
1910            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
1911            .with_dst_rect(Some(Rect::new(0, 0, 150, 150)));
1912
1913        let result = crop.check_crop_dyn(&src, &dst);
1914        assert!(matches!(
1915            result,
1916            Err(Error::CropInvalid(e)) if e.starts_with("Dest and Src crop invalid")
1917        ));
1918
1919        let crop = crop.with_src_rect(Some(Rect::new(0, 0, 10, 10)));
1920        let result = crop.check_crop_dyn(&src, &dst);
1921        assert!(matches!(
1922            result,
1923            Err(Error::CropInvalid(e)) if e.starts_with("Dest crop invalid")
1924        ));
1925
1926        let crop = crop
1927            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
1928            .with_dst_rect(Some(Rect::new(0, 0, 50, 50)));
1929        let result = crop.check_crop_dyn(&src, &dst);
1930        assert!(matches!(
1931            result,
1932            Err(Error::CropInvalid(e)) if e.starts_with("Src crop invalid")
1933        ));
1934
1935        let crop = crop.with_src_rect(Some(Rect::new(50, 50, 50, 50)));
1936
1937        let result = crop.check_crop_dyn(&src, &dst);
1938        assert!(result.is_ok());
1939    }
1940
1941    #[test]
1942    fn test_invalid_tensor_format() -> Result<(), Error> {
1943        // 4D tensor cannot be set to a 3-channel pixel format
1944        let mut tensor = Tensor::<u8>::new(&[720, 1280, 4, 1], None, None)?;
1945        let result = tensor.set_format(PixelFormat::Rgb);
1946        assert!(result.is_err(), "4D tensor should reject set_format");
1947
1948        // Tensor with wrong channel count for the format
1949        let mut tensor = Tensor::<u8>::new(&[720, 1280, 4], None, None)?;
1950        let result = tensor.set_format(PixelFormat::Rgb);
1951        assert!(result.is_err(), "4-channel tensor should reject RGB format");
1952
1953        Ok(())
1954    }
1955
1956    #[test]
1957    fn test_invalid_image_file() -> Result<(), Error> {
1958        let result = crate::load_image(&[123; 5000], None, None);
1959        assert!(matches!(
1960            result,
1961            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
1962
1963        Ok(())
1964    }
1965
1966    #[test]
1967    fn test_invalid_jpeg_format() -> Result<(), Error> {
1968        let result = crate::load_image(&[123; 5000], Some(PixelFormat::Yuyv), None);
1969        assert!(matches!(
1970            result,
1971            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
1972
1973        Ok(())
1974    }
1975
1976    #[test]
1977    fn test_load_resize_save() {
1978        let file = include_bytes!(concat!(
1979            env!("CARGO_MANIFEST_DIR"),
1980            "/../../testdata/zidane.jpg"
1981        ));
1982        let img = crate::load_image(file, Some(PixelFormat::Rgba), None).unwrap();
1983        assert_eq!(img.width(), Some(1280));
1984        assert_eq!(img.height(), Some(720));
1985
1986        let dst = TensorDyn::image(640, 360, PixelFormat::Rgba, DType::U8, None).unwrap();
1987        let mut converter = CPUProcessor::new();
1988        let (result, _img, dst) = convert_img(
1989            &mut converter,
1990            img,
1991            dst,
1992            Rotation::None,
1993            Flip::None,
1994            Crop::no_crop(),
1995        );
1996        result.unwrap();
1997        assert_eq!(dst.width(), Some(640));
1998        assert_eq!(dst.height(), Some(360));
1999
2000        crate::save_jpeg(&dst, "zidane_resized.jpg", 80).unwrap();
2001
2002        let file = std::fs::read("zidane_resized.jpg").unwrap();
2003        let img = crate::load_image(&file, None, None).unwrap();
2004        assert_eq!(img.width(), Some(640));
2005        assert_eq!(img.height(), Some(360));
2006        assert_eq!(img.format().unwrap(), PixelFormat::Rgb);
2007    }
2008
2009    #[test]
2010    fn test_from_tensor_planar() -> Result<(), Error> {
2011        let mut tensor = Tensor::new(&[3, 720, 1280], None, None)?;
2012        tensor.map()?.copy_from_slice(include_bytes!(concat!(
2013            env!("CARGO_MANIFEST_DIR"),
2014            "/../../testdata/camera720p.8bps"
2015        )));
2016        let planar = {
2017            tensor
2018                .set_format(PixelFormat::PlanarRgb)
2019                .map_err(|e| crate::Error::Internal(e.to_string()))?;
2020            TensorDyn::from(tensor)
2021        };
2022
2023        let rbga = load_bytes_to_tensor(
2024            1280,
2025            720,
2026            PixelFormat::Rgba,
2027            None,
2028            include_bytes!(concat!(
2029                env!("CARGO_MANIFEST_DIR"),
2030                "/../../testdata/camera720p.rgba"
2031            )),
2032        )?;
2033        compare_images_convert_to_rgb(&planar, &rbga, 0.98, function!());
2034
2035        Ok(())
2036    }
2037
2038    #[test]
2039    fn test_from_tensor_invalid_format() {
2040        // PixelFormat::from_fourcc_str returns None for unknown FourCC codes.
2041        // Since there's no "TEST" pixel format, this validates graceful handling.
2042        assert!(PixelFormat::from_fourcc(u32::from_le_bytes(*b"TEST")).is_none());
2043    }
2044
2045    #[test]
2046    #[should_panic(expected = "Failed to save planar RGB image")]
2047    fn test_save_planar() {
2048        let planar_img = load_bytes_to_tensor(
2049            1280,
2050            720,
2051            PixelFormat::PlanarRgb,
2052            None,
2053            include_bytes!(concat!(
2054                env!("CARGO_MANIFEST_DIR"),
2055                "/../../testdata/camera720p.8bps"
2056            )),
2057        )
2058        .unwrap();
2059
2060        let save_path = "/tmp/planar_rgb.jpg";
2061        crate::save_jpeg(&planar_img, save_path, 90).expect("Failed to save planar RGB image");
2062    }
2063
2064    #[test]
2065    #[should_panic(expected = "Failed to save YUYV image")]
2066    fn test_save_yuyv() {
2067        let planar_img = load_bytes_to_tensor(
2068            1280,
2069            720,
2070            PixelFormat::Yuyv,
2071            None,
2072            include_bytes!(concat!(
2073                env!("CARGO_MANIFEST_DIR"),
2074                "/../../testdata/camera720p.yuyv"
2075            )),
2076        )
2077        .unwrap();
2078
2079        let save_path = "/tmp/yuyv.jpg";
2080        crate::save_jpeg(&planar_img, save_path, 90).expect("Failed to save YUYV image");
2081    }
2082
2083    #[test]
2084    fn test_rotation_angle() {
2085        assert_eq!(Rotation::from_degrees_clockwise(0), Rotation::None);
2086        assert_eq!(Rotation::from_degrees_clockwise(90), Rotation::Clockwise90);
2087        assert_eq!(Rotation::from_degrees_clockwise(180), Rotation::Rotate180);
2088        assert_eq!(
2089            Rotation::from_degrees_clockwise(270),
2090            Rotation::CounterClockwise90
2091        );
2092        assert_eq!(Rotation::from_degrees_clockwise(360), Rotation::None);
2093        assert_eq!(Rotation::from_degrees_clockwise(450), Rotation::Clockwise90);
2094        assert_eq!(Rotation::from_degrees_clockwise(540), Rotation::Rotate180);
2095        assert_eq!(
2096            Rotation::from_degrees_clockwise(630),
2097            Rotation::CounterClockwise90
2098        );
2099    }
2100
2101    #[test]
2102    #[should_panic(expected = "rotation angle is not a multiple of 90")]
2103    fn test_rotation_angle_panic() {
2104        Rotation::from_degrees_clockwise(361);
2105    }
2106
2107    #[test]
2108    fn test_disable_env_var() -> Result<(), Error> {
2109        #[cfg(target_os = "linux")]
2110        {
2111            let original = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2112            unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2113            let converter = ImageProcessor::new()?;
2114            match original {
2115                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2116                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2117            }
2118            assert!(converter.g2d.is_none());
2119        }
2120
2121        #[cfg(target_os = "linux")]
2122        #[cfg(feature = "opengl")]
2123        {
2124            let original = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2125            unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2126            let converter = ImageProcessor::new()?;
2127            match original {
2128                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2129                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2130            }
2131            assert!(converter.opengl.is_none());
2132        }
2133
2134        let original = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2135        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2136        let converter = ImageProcessor::new()?;
2137        match original {
2138            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2139            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2140        }
2141        assert!(converter.cpu.is_none());
2142
2143        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2144        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2145        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2146        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2147        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2148        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2149        let mut converter = ImageProcessor::new()?;
2150
2151        let src = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None)?;
2152        let dst = TensorDyn::image(640, 360, PixelFormat::Rgba, DType::U8, None)?;
2153        let (result, _src, _dst) = convert_img(
2154            &mut converter,
2155            src,
2156            dst,
2157            Rotation::None,
2158            Flip::None,
2159            Crop::no_crop(),
2160        );
2161        assert!(matches!(result, Err(Error::NoConverter)));
2162
2163        match original_cpu {
2164            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2165            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2166        }
2167        match original_gl {
2168            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2169            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2170        }
2171        match original_g2d {
2172            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2173            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2174        }
2175
2176        Ok(())
2177    }
2178
2179    #[test]
2180    fn test_unsupported_conversion() {
2181        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
2182        let dst = TensorDyn::image(640, 360, PixelFormat::Nv12, DType::U8, None).unwrap();
2183        let mut converter = ImageProcessor::new().unwrap();
2184        let (result, _src, _dst) = convert_img(
2185            &mut converter,
2186            src,
2187            dst,
2188            Rotation::None,
2189            Flip::None,
2190            Crop::no_crop(),
2191        );
2192        log::debug!("result: {:?}", result);
2193        assert!(matches!(
2194            result,
2195            Err(Error::NotSupported(e)) if e.starts_with("Conversion from NV12 to NV12")
2196        ));
2197    }
2198
2199    #[test]
2200    fn test_load_grey() {
2201        let grey_img = crate::load_image(
2202            include_bytes!(concat!(
2203                env!("CARGO_MANIFEST_DIR"),
2204                "/../../testdata/grey.jpg"
2205            )),
2206            Some(PixelFormat::Rgba),
2207            None,
2208        )
2209        .unwrap();
2210
2211        let grey_but_rgb_img = crate::load_image(
2212            include_bytes!(concat!(
2213                env!("CARGO_MANIFEST_DIR"),
2214                "/../../testdata/grey-rgb.jpg"
2215            )),
2216            Some(PixelFormat::Rgba),
2217            None,
2218        )
2219        .unwrap();
2220
2221        compare_images(&grey_img, &grey_but_rgb_img, 0.99, function!());
2222    }
2223
2224    #[test]
2225    fn test_new_nv12() {
2226        let nv12 = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
2227        assert_eq!(nv12.height(), Some(720));
2228        assert_eq!(nv12.width(), Some(1280));
2229        assert_eq!(nv12.format().unwrap(), PixelFormat::Nv12);
2230        // PixelFormat::Nv12.channels() returns 1 (luma plane channel count)
2231        assert_eq!(nv12.format().unwrap().channels(), 1);
2232        assert!(nv12.format().is_some_and(
2233            |f| f.layout() == PixelLayout::Planar || f.layout() == PixelLayout::SemiPlanar
2234        ))
2235    }
2236
2237    #[test]
2238    #[cfg(target_os = "linux")]
2239    fn test_new_image_converter() {
2240        let dst_width = 640;
2241        let dst_height = 360;
2242        let file = include_bytes!(concat!(
2243            env!("CARGO_MANIFEST_DIR"),
2244            "/../../testdata/zidane.jpg"
2245        ))
2246        .to_vec();
2247        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2248
2249        let mut converter = ImageProcessor::new().unwrap();
2250        let converter_dst = converter
2251            .create_image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None)
2252            .unwrap();
2253        let (result, src, converter_dst) = convert_img(
2254            &mut converter,
2255            src,
2256            converter_dst,
2257            Rotation::None,
2258            Flip::None,
2259            Crop::no_crop(),
2260        );
2261        result.unwrap();
2262
2263        let cpu_dst =
2264            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2265        let mut cpu_converter = CPUProcessor::new();
2266        let (result, _src, cpu_dst) = convert_img(
2267            &mut cpu_converter,
2268            src,
2269            cpu_dst,
2270            Rotation::None,
2271            Flip::None,
2272            Crop::no_crop(),
2273        );
2274        result.unwrap();
2275
2276        compare_images(&converter_dst, &cpu_dst, 0.98, function!());
2277    }
2278
2279    #[test]
2280    #[cfg(target_os = "linux")]
2281    fn test_create_image_dtype_i8() {
2282        let mut converter = ImageProcessor::new().unwrap();
2283
2284        // I8 image should allocate successfully via create_image
2285        let dst = converter
2286            .create_image(320, 240, PixelFormat::Rgb, DType::I8, None)
2287            .unwrap();
2288        assert_eq!(dst.dtype(), DType::I8);
2289        assert!(dst.width() == Some(320));
2290        assert!(dst.height() == Some(240));
2291        assert_eq!(dst.format(), Some(PixelFormat::Rgb));
2292
2293        // U8 for comparison
2294        let dst_u8 = converter
2295            .create_image(320, 240, PixelFormat::Rgb, DType::U8, None)
2296            .unwrap();
2297        assert_eq!(dst_u8.dtype(), DType::U8);
2298
2299        // Convert into I8 dst should succeed
2300        let file = include_bytes!(concat!(
2301            env!("CARGO_MANIFEST_DIR"),
2302            "/../../testdata/zidane.jpg"
2303        ))
2304        .to_vec();
2305        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2306        let mut dst_i8 = converter
2307            .create_image(320, 240, PixelFormat::Rgb, DType::I8, None)
2308            .unwrap();
2309        converter
2310            .convert(
2311                &src,
2312                &mut dst_i8,
2313                Rotation::None,
2314                Flip::None,
2315                Crop::no_crop(),
2316            )
2317            .unwrap();
2318    }
2319
2320    #[test]
2321    #[ignore] // Hangs on desktop platforms where DMA-buf is unavailable and PBO
2322              // fallback triggers a GPU driver hang during SHM→texture upload (e.g.,
2323              // NVIDIA without /dev/dma_heap permissions). Works on embedded targets.
2324    fn test_crop_skip() {
2325        let file = include_bytes!(concat!(
2326            env!("CARGO_MANIFEST_DIR"),
2327            "/../../testdata/zidane.jpg"
2328        ))
2329        .to_vec();
2330        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2331
2332        let mut converter = ImageProcessor::new().unwrap();
2333        let converter_dst = converter
2334            .create_image(1280, 720, PixelFormat::Rgba, DType::U8, None)
2335            .unwrap();
2336        let crop = Crop::new()
2337            .with_src_rect(Some(Rect::new(0, 0, 640, 640)))
2338            .with_dst_rect(Some(Rect::new(0, 0, 640, 640)));
2339        let (result, src, converter_dst) = convert_img(
2340            &mut converter,
2341            src,
2342            converter_dst,
2343            Rotation::None,
2344            Flip::None,
2345            crop,
2346        );
2347        result.unwrap();
2348
2349        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
2350        let mut cpu_converter = CPUProcessor::new();
2351        let (result, _src, cpu_dst) = convert_img(
2352            &mut cpu_converter,
2353            src,
2354            cpu_dst,
2355            Rotation::None,
2356            Flip::None,
2357            crop,
2358        );
2359        result.unwrap();
2360
2361        compare_images(&converter_dst, &cpu_dst, 0.99999, function!());
2362    }
2363
2364    #[test]
2365    fn test_invalid_pixel_format() {
2366        // PixelFormat::from_fourcc returns None for unknown formats,
2367        // so TensorDyn::image cannot be called with an invalid format.
2368        assert!(PixelFormat::from_fourcc(u32::from_le_bytes(*b"TEST")).is_none());
2369    }
2370
2371    // Helper function to check if G2D library is available (Linux/i.MX8 only)
2372    #[cfg(target_os = "linux")]
2373    static G2D_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2374
2375    #[cfg(target_os = "linux")]
2376    fn is_g2d_available() -> bool {
2377        *G2D_AVAILABLE.get_or_init(|| G2DProcessor::new().is_ok())
2378    }
2379
2380    #[cfg(target_os = "linux")]
2381    #[cfg(feature = "opengl")]
2382    static GL_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2383
2384    #[cfg(target_os = "linux")]
2385    #[cfg(feature = "opengl")]
2386    // Helper function to check if OpenGL is available
2387    fn is_opengl_available() -> bool {
2388        #[cfg(all(target_os = "linux", feature = "opengl"))]
2389        {
2390            *GL_AVAILABLE.get_or_init(|| GLProcessorThreaded::new(None).is_ok())
2391        }
2392
2393        #[cfg(not(all(target_os = "linux", feature = "opengl")))]
2394        {
2395            false
2396        }
2397    }
2398
2399    #[test]
2400    fn test_load_jpeg_with_exif() {
2401        let file = include_bytes!(concat!(
2402            env!("CARGO_MANIFEST_DIR"),
2403            "/../../testdata/zidane_rotated_exif.jpg"
2404        ))
2405        .to_vec();
2406        let loaded = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2407
2408        assert_eq!(loaded.height(), Some(1280));
2409        assert_eq!(loaded.width(), Some(720));
2410
2411        let file = include_bytes!(concat!(
2412            env!("CARGO_MANIFEST_DIR"),
2413            "/../../testdata/zidane.jpg"
2414        ))
2415        .to_vec();
2416        let cpu_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2417
2418        let (dst_width, dst_height) = (cpu_src.height().unwrap(), cpu_src.width().unwrap());
2419
2420        let cpu_dst =
2421            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2422        let mut cpu_converter = CPUProcessor::new();
2423
2424        let (result, _cpu_src, cpu_dst) = convert_img(
2425            &mut cpu_converter,
2426            cpu_src,
2427            cpu_dst,
2428            Rotation::Clockwise90,
2429            Flip::None,
2430            Crop::no_crop(),
2431        );
2432        result.unwrap();
2433
2434        compare_images(&loaded, &cpu_dst, 0.98, function!());
2435    }
2436
2437    #[test]
2438    fn test_load_png_with_exif() {
2439        let file = include_bytes!(concat!(
2440            env!("CARGO_MANIFEST_DIR"),
2441            "/../../testdata/zidane_rotated_exif_180.png"
2442        ))
2443        .to_vec();
2444        let loaded = crate::load_png(&file, Some(PixelFormat::Rgba), None).unwrap();
2445
2446        assert_eq!(loaded.height(), Some(720));
2447        assert_eq!(loaded.width(), Some(1280));
2448
2449        let file = include_bytes!(concat!(
2450            env!("CARGO_MANIFEST_DIR"),
2451            "/../../testdata/zidane.jpg"
2452        ))
2453        .to_vec();
2454        let cpu_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2455
2456        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
2457        let mut cpu_converter = CPUProcessor::new();
2458
2459        let (result, _cpu_src, cpu_dst) = convert_img(
2460            &mut cpu_converter,
2461            cpu_src,
2462            cpu_dst,
2463            Rotation::Rotate180,
2464            Flip::None,
2465            Crop::no_crop(),
2466        );
2467        result.unwrap();
2468
2469        compare_images(&loaded, &cpu_dst, 0.98, function!());
2470    }
2471
2472    #[test]
2473    #[cfg(target_os = "linux")]
2474    fn test_g2d_resize() {
2475        if !is_g2d_available() {
2476            eprintln!("SKIPPED: test_g2d_resize - G2D library (libg2d.so.2) not available");
2477            return;
2478        }
2479        if !is_dma_available() {
2480            eprintln!(
2481                "SKIPPED: test_g2d_resize - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2482            );
2483            return;
2484        }
2485
2486        let dst_width = 640;
2487        let dst_height = 360;
2488        let file = include_bytes!(concat!(
2489            env!("CARGO_MANIFEST_DIR"),
2490            "/../../testdata/zidane.jpg"
2491        ))
2492        .to_vec();
2493        let src =
2494            crate::load_image(&file, Some(PixelFormat::Rgba), Some(TensorMemory::Dma)).unwrap();
2495
2496        let g2d_dst = TensorDyn::image(
2497            dst_width,
2498            dst_height,
2499            PixelFormat::Rgba,
2500            DType::U8,
2501            Some(TensorMemory::Dma),
2502        )
2503        .unwrap();
2504        let mut g2d_converter = G2DProcessor::new().unwrap();
2505        let (result, src, g2d_dst) = convert_img(
2506            &mut g2d_converter,
2507            src,
2508            g2d_dst,
2509            Rotation::None,
2510            Flip::None,
2511            Crop::no_crop(),
2512        );
2513        result.unwrap();
2514
2515        let cpu_dst =
2516            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2517        let mut cpu_converter = CPUProcessor::new();
2518        let (result, _src, cpu_dst) = convert_img(
2519            &mut cpu_converter,
2520            src,
2521            cpu_dst,
2522            Rotation::None,
2523            Flip::None,
2524            Crop::no_crop(),
2525        );
2526        result.unwrap();
2527
2528        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2529    }
2530
2531    #[test]
2532    #[cfg(target_os = "linux")]
2533    #[cfg(feature = "opengl")]
2534    fn test_opengl_resize() {
2535        if !is_opengl_available() {
2536            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2537            return;
2538        }
2539
2540        let dst_width = 640;
2541        let dst_height = 360;
2542        let file = include_bytes!(concat!(
2543            env!("CARGO_MANIFEST_DIR"),
2544            "/../../testdata/zidane.jpg"
2545        ))
2546        .to_vec();
2547        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2548
2549        let cpu_dst =
2550            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2551        let mut cpu_converter = CPUProcessor::new();
2552        let (result, src, cpu_dst) = convert_img(
2553            &mut cpu_converter,
2554            src,
2555            cpu_dst,
2556            Rotation::None,
2557            Flip::None,
2558            Crop::no_crop(),
2559        );
2560        result.unwrap();
2561
2562        let mut src = src;
2563        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2564
2565        for _ in 0..5 {
2566            let gl_dst =
2567                TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None)
2568                    .unwrap();
2569            let (result, src_back, gl_dst) = convert_img(
2570                &mut gl_converter,
2571                src,
2572                gl_dst,
2573                Rotation::None,
2574                Flip::None,
2575                Crop::no_crop(),
2576            );
2577            result.unwrap();
2578            src = src_back;
2579
2580            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2581        }
2582    }
2583
2584    #[test]
2585    #[ignore] // Vivante GPU hangs with concurrent EGL contexts on i.MX8MP
2586    #[cfg(target_os = "linux")]
2587    #[cfg(feature = "opengl")]
2588    fn test_opengl_10_threads() {
2589        if !is_opengl_available() {
2590            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2591            return;
2592        }
2593
2594        let handles: Vec<_> = (0..10)
2595            .map(|i| {
2596                std::thread::Builder::new()
2597                    .name(format!("Thread {i}"))
2598                    .spawn(test_opengl_resize)
2599                    .unwrap()
2600            })
2601            .collect();
2602        handles.into_iter().for_each(|h| {
2603            if let Err(e) = h.join() {
2604                std::panic::resume_unwind(e)
2605            }
2606        });
2607    }
2608
2609    #[test]
2610    #[cfg(target_os = "linux")]
2611    #[cfg(feature = "opengl")]
2612    fn test_opengl_grey() {
2613        if !is_opengl_available() {
2614            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2615            return;
2616        }
2617
2618        let img = crate::load_image(
2619            include_bytes!(concat!(
2620                env!("CARGO_MANIFEST_DIR"),
2621                "/../../testdata/grey.jpg"
2622            )),
2623            Some(PixelFormat::Grey),
2624            None,
2625        )
2626        .unwrap();
2627
2628        let gl_dst = TensorDyn::image(640, 640, PixelFormat::Grey, DType::U8, None).unwrap();
2629        let cpu_dst = TensorDyn::image(640, 640, PixelFormat::Grey, DType::U8, None).unwrap();
2630
2631        let mut converter = CPUProcessor::new();
2632
2633        let (result, img, cpu_dst) = convert_img(
2634            &mut converter,
2635            img,
2636            cpu_dst,
2637            Rotation::None,
2638            Flip::None,
2639            Crop::no_crop(),
2640        );
2641        result.unwrap();
2642
2643        let mut gl = GLProcessorThreaded::new(None).unwrap();
2644        let (result, _img, gl_dst) = convert_img(
2645            &mut gl,
2646            img,
2647            gl_dst,
2648            Rotation::None,
2649            Flip::None,
2650            Crop::no_crop(),
2651        );
2652        result.unwrap();
2653
2654        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2655    }
2656
2657    #[test]
2658    #[cfg(target_os = "linux")]
2659    fn test_g2d_src_crop() {
2660        if !is_g2d_available() {
2661            eprintln!("SKIPPED: test_g2d_src_crop - G2D library (libg2d.so.2) not available");
2662            return;
2663        }
2664        if !is_dma_available() {
2665            eprintln!(
2666                "SKIPPED: test_g2d_src_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2667            );
2668            return;
2669        }
2670
2671        let dst_width = 640;
2672        let dst_height = 640;
2673        let file = include_bytes!(concat!(
2674            env!("CARGO_MANIFEST_DIR"),
2675            "/../../testdata/zidane.jpg"
2676        ))
2677        .to_vec();
2678        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2679
2680        let cpu_dst =
2681            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2682        let mut cpu_converter = CPUProcessor::new();
2683        let crop = Crop {
2684            src_rect: Some(Rect {
2685                left: 0,
2686                top: 0,
2687                width: 640,
2688                height: 360,
2689            }),
2690            dst_rect: None,
2691            dst_color: None,
2692        };
2693        let (result, src, cpu_dst) = convert_img(
2694            &mut cpu_converter,
2695            src,
2696            cpu_dst,
2697            Rotation::None,
2698            Flip::None,
2699            crop,
2700        );
2701        result.unwrap();
2702
2703        let g2d_dst =
2704            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2705        let mut g2d_converter = G2DProcessor::new().unwrap();
2706        let (result, _src, g2d_dst) = convert_img(
2707            &mut g2d_converter,
2708            src,
2709            g2d_dst,
2710            Rotation::None,
2711            Flip::None,
2712            crop,
2713        );
2714        result.unwrap();
2715
2716        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2717    }
2718
2719    #[test]
2720    #[cfg(target_os = "linux")]
2721    fn test_g2d_dst_crop() {
2722        if !is_g2d_available() {
2723            eprintln!("SKIPPED: test_g2d_dst_crop - G2D library (libg2d.so.2) not available");
2724            return;
2725        }
2726        if !is_dma_available() {
2727            eprintln!(
2728                "SKIPPED: test_g2d_dst_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2729            );
2730            return;
2731        }
2732
2733        let dst_width = 640;
2734        let dst_height = 640;
2735        let file = include_bytes!(concat!(
2736            env!("CARGO_MANIFEST_DIR"),
2737            "/../../testdata/zidane.jpg"
2738        ))
2739        .to_vec();
2740        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2741
2742        let cpu_dst =
2743            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2744        let mut cpu_converter = CPUProcessor::new();
2745        let crop = Crop {
2746            src_rect: None,
2747            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2748            dst_color: None,
2749        };
2750        let (result, src, cpu_dst) = convert_img(
2751            &mut cpu_converter,
2752            src,
2753            cpu_dst,
2754            Rotation::None,
2755            Flip::None,
2756            crop,
2757        );
2758        result.unwrap();
2759
2760        let g2d_dst =
2761            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2762        let mut g2d_converter = G2DProcessor::new().unwrap();
2763        let (result, _src, g2d_dst) = convert_img(
2764            &mut g2d_converter,
2765            src,
2766            g2d_dst,
2767            Rotation::None,
2768            Flip::None,
2769            crop,
2770        );
2771        result.unwrap();
2772
2773        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2774    }
2775
2776    #[test]
2777    #[cfg(target_os = "linux")]
2778    fn test_g2d_all_rgba() {
2779        if !is_g2d_available() {
2780            eprintln!("SKIPPED: test_g2d_all_rgba - G2D library (libg2d.so.2) not available");
2781            return;
2782        }
2783        if !is_dma_available() {
2784            eprintln!(
2785                "SKIPPED: test_g2d_all_rgba - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2786            );
2787            return;
2788        }
2789
2790        let dst_width = 640;
2791        let dst_height = 640;
2792        let file = include_bytes!(concat!(
2793            env!("CARGO_MANIFEST_DIR"),
2794            "/../../testdata/zidane.jpg"
2795        ))
2796        .to_vec();
2797        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2798        let src_dyn = src;
2799
2800        let mut cpu_dst =
2801            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2802        let mut cpu_converter = CPUProcessor::new();
2803        let mut g2d_dst =
2804            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2805        let mut g2d_converter = G2DProcessor::new().unwrap();
2806
2807        let crop = Crop {
2808            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2809            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2810            dst_color: None,
2811        };
2812
2813        for rot in [
2814            Rotation::None,
2815            Rotation::Clockwise90,
2816            Rotation::Rotate180,
2817            Rotation::CounterClockwise90,
2818        ] {
2819            cpu_dst
2820                .as_u8()
2821                .unwrap()
2822                .map()
2823                .unwrap()
2824                .as_mut_slice()
2825                .fill(114);
2826            g2d_dst
2827                .as_u8()
2828                .unwrap()
2829                .map()
2830                .unwrap()
2831                .as_mut_slice()
2832                .fill(114);
2833            for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
2834                let mut cpu_dst_dyn = cpu_dst;
2835                cpu_converter
2836                    .convert(&src_dyn, &mut cpu_dst_dyn, Rotation::None, Flip::None, crop)
2837                    .unwrap();
2838                cpu_dst = {
2839                    let mut __t = cpu_dst_dyn.into_u8().unwrap();
2840                    __t.set_format(PixelFormat::Rgba).unwrap();
2841                    TensorDyn::from(__t)
2842                };
2843
2844                let mut g2d_dst_dyn = g2d_dst;
2845                g2d_converter
2846                    .convert(&src_dyn, &mut g2d_dst_dyn, Rotation::None, Flip::None, crop)
2847                    .unwrap();
2848                g2d_dst = {
2849                    let mut __t = g2d_dst_dyn.into_u8().unwrap();
2850                    __t.set_format(PixelFormat::Rgba).unwrap();
2851                    TensorDyn::from(__t)
2852                };
2853
2854                compare_images(
2855                    &g2d_dst,
2856                    &cpu_dst,
2857                    0.98,
2858                    &format!("{} {:?} {:?}", function!(), rot, flip),
2859                );
2860            }
2861        }
2862    }
2863
2864    #[test]
2865    #[cfg(target_os = "linux")]
2866    #[cfg(feature = "opengl")]
2867    fn test_opengl_src_crop() {
2868        if !is_opengl_available() {
2869            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2870            return;
2871        }
2872
2873        let dst_width = 640;
2874        let dst_height = 360;
2875        let file = include_bytes!(concat!(
2876            env!("CARGO_MANIFEST_DIR"),
2877            "/../../testdata/zidane.jpg"
2878        ))
2879        .to_vec();
2880        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2881        let crop = Crop {
2882            src_rect: Some(Rect {
2883                left: 320,
2884                top: 180,
2885                width: 1280 - 320,
2886                height: 720 - 180,
2887            }),
2888            dst_rect: None,
2889            dst_color: None,
2890        };
2891
2892        let cpu_dst =
2893            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2894        let mut cpu_converter = CPUProcessor::new();
2895        let (result, src, cpu_dst) = convert_img(
2896            &mut cpu_converter,
2897            src,
2898            cpu_dst,
2899            Rotation::None,
2900            Flip::None,
2901            crop,
2902        );
2903        result.unwrap();
2904
2905        let gl_dst =
2906            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2907        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2908        let (result, _src, gl_dst) = convert_img(
2909            &mut gl_converter,
2910            src,
2911            gl_dst,
2912            Rotation::None,
2913            Flip::None,
2914            crop,
2915        );
2916        result.unwrap();
2917
2918        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2919    }
2920
2921    #[test]
2922    #[cfg(target_os = "linux")]
2923    #[cfg(feature = "opengl")]
2924    fn test_opengl_dst_crop() {
2925        if !is_opengl_available() {
2926            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2927            return;
2928        }
2929
2930        let dst_width = 640;
2931        let dst_height = 640;
2932        let file = include_bytes!(concat!(
2933            env!("CARGO_MANIFEST_DIR"),
2934            "/../../testdata/zidane.jpg"
2935        ))
2936        .to_vec();
2937        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
2938
2939        let cpu_dst =
2940            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2941        let mut cpu_converter = CPUProcessor::new();
2942        let crop = Crop {
2943            src_rect: None,
2944            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2945            dst_color: None,
2946        };
2947        let (result, src, cpu_dst) = convert_img(
2948            &mut cpu_converter,
2949            src,
2950            cpu_dst,
2951            Rotation::None,
2952            Flip::None,
2953            crop,
2954        );
2955        result.unwrap();
2956
2957        let gl_dst =
2958            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
2959        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2960        let (result, _src, gl_dst) = convert_img(
2961            &mut gl_converter,
2962            src,
2963            gl_dst,
2964            Rotation::None,
2965            Flip::None,
2966            crop,
2967        );
2968        result.unwrap();
2969
2970        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2971    }
2972
2973    #[test]
2974    #[cfg(target_os = "linux")]
2975    #[cfg(feature = "opengl")]
2976    fn test_opengl_all_rgba() {
2977        if !is_opengl_available() {
2978            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2979            return;
2980        }
2981
2982        let dst_width = 640;
2983        let dst_height = 640;
2984        let file = include_bytes!(concat!(
2985            env!("CARGO_MANIFEST_DIR"),
2986            "/../../testdata/zidane.jpg"
2987        ))
2988        .to_vec();
2989
2990        let mut cpu_converter = CPUProcessor::new();
2991
2992        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2993
2994        let mut mem = vec![None, Some(TensorMemory::Mem), Some(TensorMemory::Shm)];
2995        if is_dma_available() {
2996            mem.push(Some(TensorMemory::Dma));
2997        }
2998        let crop = Crop {
2999            src_rect: Some(Rect::new(50, 120, 1024, 576)),
3000            dst_rect: Some(Rect::new(100, 100, 512, 288)),
3001            dst_color: None,
3002        };
3003        for m in mem {
3004            let src = crate::load_image(&file, Some(PixelFormat::Rgba), m).unwrap();
3005            let src_dyn = src;
3006
3007            for rot in [
3008                Rotation::None,
3009                Rotation::Clockwise90,
3010                Rotation::Rotate180,
3011                Rotation::CounterClockwise90,
3012            ] {
3013                for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
3014                    let cpu_dst =
3015                        TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, m)
3016                            .unwrap();
3017                    let gl_dst =
3018                        TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, m)
3019                            .unwrap();
3020                    cpu_dst
3021                        .as_u8()
3022                        .unwrap()
3023                        .map()
3024                        .unwrap()
3025                        .as_mut_slice()
3026                        .fill(114);
3027                    gl_dst
3028                        .as_u8()
3029                        .unwrap()
3030                        .map()
3031                        .unwrap()
3032                        .as_mut_slice()
3033                        .fill(114);
3034
3035                    let mut cpu_dst_dyn = cpu_dst;
3036                    cpu_converter
3037                        .convert(&src_dyn, &mut cpu_dst_dyn, Rotation::None, Flip::None, crop)
3038                        .unwrap();
3039                    let cpu_dst = {
3040                        let mut __t = cpu_dst_dyn.into_u8().unwrap();
3041                        __t.set_format(PixelFormat::Rgba).unwrap();
3042                        TensorDyn::from(__t)
3043                    };
3044
3045                    let mut gl_dst_dyn = gl_dst;
3046                    gl_converter
3047                        .convert(&src_dyn, &mut gl_dst_dyn, Rotation::None, Flip::None, crop)
3048                        .map_err(|e| {
3049                            log::error!("error mem {m:?} rot {rot:?} error: {e:?}");
3050                            e
3051                        })
3052                        .unwrap();
3053                    let gl_dst = {
3054                        let mut __t = gl_dst_dyn.into_u8().unwrap();
3055                        __t.set_format(PixelFormat::Rgba).unwrap();
3056                        TensorDyn::from(__t)
3057                    };
3058
3059                    compare_images(
3060                        &gl_dst,
3061                        &cpu_dst,
3062                        0.98,
3063                        &format!("{} {:?} {:?}", function!(), rot, flip),
3064                    );
3065                }
3066            }
3067        }
3068    }
3069
3070    #[test]
3071    #[cfg(target_os = "linux")]
3072    fn test_cpu_rotate() {
3073        for rot in [
3074            Rotation::Clockwise90,
3075            Rotation::Rotate180,
3076            Rotation::CounterClockwise90,
3077        ] {
3078            test_cpu_rotate_(rot);
3079        }
3080    }
3081
3082    #[cfg(target_os = "linux")]
3083    fn test_cpu_rotate_(rot: Rotation) {
3084        // This test rotates the image 4 times and checks that the image was returned to
3085        // be the same Currently doesn't check if rotations actually rotated in
3086        // right direction
3087        let file = include_bytes!(concat!(
3088            env!("CARGO_MANIFEST_DIR"),
3089            "/../../testdata/zidane.jpg"
3090        ))
3091        .to_vec();
3092
3093        let unchanged_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
3094        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
3095
3096        let (dst_width, dst_height) = match rot {
3097            Rotation::None | Rotation::Rotate180 => (src.width().unwrap(), src.height().unwrap()),
3098            Rotation::Clockwise90 | Rotation::CounterClockwise90 => {
3099                (src.height().unwrap(), src.width().unwrap())
3100            }
3101        };
3102
3103        let cpu_dst =
3104            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3105        let mut cpu_converter = CPUProcessor::new();
3106
3107        // After rotating 4 times, the image should be the same as the original
3108
3109        let (result, src, cpu_dst) = convert_img(
3110            &mut cpu_converter,
3111            src,
3112            cpu_dst,
3113            rot,
3114            Flip::None,
3115            Crop::no_crop(),
3116        );
3117        result.unwrap();
3118
3119        let (result, cpu_dst, src) = convert_img(
3120            &mut cpu_converter,
3121            cpu_dst,
3122            src,
3123            rot,
3124            Flip::None,
3125            Crop::no_crop(),
3126        );
3127        result.unwrap();
3128
3129        let (result, src, cpu_dst) = convert_img(
3130            &mut cpu_converter,
3131            src,
3132            cpu_dst,
3133            rot,
3134            Flip::None,
3135            Crop::no_crop(),
3136        );
3137        result.unwrap();
3138
3139        let (result, _cpu_dst, src) = convert_img(
3140            &mut cpu_converter,
3141            cpu_dst,
3142            src,
3143            rot,
3144            Flip::None,
3145            Crop::no_crop(),
3146        );
3147        result.unwrap();
3148
3149        compare_images(&src, &unchanged_src, 0.98, function!());
3150    }
3151
3152    #[test]
3153    #[cfg(target_os = "linux")]
3154    #[cfg(feature = "opengl")]
3155    fn test_opengl_rotate() {
3156        if !is_opengl_available() {
3157            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3158            return;
3159        }
3160
3161        let size = (1280, 720);
3162        let mut mem = vec![None, Some(TensorMemory::Shm), Some(TensorMemory::Mem)];
3163
3164        if is_dma_available() {
3165            mem.push(Some(TensorMemory::Dma));
3166        }
3167        for m in mem {
3168            for rot in [
3169                Rotation::Clockwise90,
3170                Rotation::Rotate180,
3171                Rotation::CounterClockwise90,
3172            ] {
3173                test_opengl_rotate_(size, rot, m);
3174            }
3175        }
3176    }
3177
3178    #[cfg(target_os = "linux")]
3179    #[cfg(feature = "opengl")]
3180    fn test_opengl_rotate_(
3181        size: (usize, usize),
3182        rot: Rotation,
3183        tensor_memory: Option<TensorMemory>,
3184    ) {
3185        let (dst_width, dst_height) = match rot {
3186            Rotation::None | Rotation::Rotate180 => size,
3187            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
3188        };
3189
3190        let file = include_bytes!(concat!(
3191            env!("CARGO_MANIFEST_DIR"),
3192            "/../../testdata/zidane.jpg"
3193        ))
3194        .to_vec();
3195        let src = crate::load_image(&file, Some(PixelFormat::Rgba), tensor_memory).unwrap();
3196
3197        let cpu_dst =
3198            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3199        let mut cpu_converter = CPUProcessor::new();
3200
3201        let (result, mut src, cpu_dst) = convert_img(
3202            &mut cpu_converter,
3203            src,
3204            cpu_dst,
3205            rot,
3206            Flip::None,
3207            Crop::no_crop(),
3208        );
3209        result.unwrap();
3210
3211        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3212
3213        for _ in 0..5 {
3214            let gl_dst = TensorDyn::image(
3215                dst_width,
3216                dst_height,
3217                PixelFormat::Rgba,
3218                DType::U8,
3219                tensor_memory,
3220            )
3221            .unwrap();
3222            let (result, src_back, gl_dst) = convert_img(
3223                &mut gl_converter,
3224                src,
3225                gl_dst,
3226                rot,
3227                Flip::None,
3228                Crop::no_crop(),
3229            );
3230            result.unwrap();
3231            src = src_back;
3232            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3233        }
3234    }
3235
3236    #[test]
3237    #[cfg(target_os = "linux")]
3238    fn test_g2d_rotate() {
3239        if !is_g2d_available() {
3240            eprintln!("SKIPPED: test_g2d_rotate - G2D library (libg2d.so.2) not available");
3241            return;
3242        }
3243        if !is_dma_available() {
3244            eprintln!(
3245                "SKIPPED: test_g2d_rotate - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3246            );
3247            return;
3248        }
3249
3250        let size = (1280, 720);
3251        for rot in [
3252            Rotation::Clockwise90,
3253            Rotation::Rotate180,
3254            Rotation::CounterClockwise90,
3255        ] {
3256            test_g2d_rotate_(size, rot);
3257        }
3258    }
3259
3260    #[cfg(target_os = "linux")]
3261    fn test_g2d_rotate_(size: (usize, usize), rot: Rotation) {
3262        let (dst_width, dst_height) = match rot {
3263            Rotation::None | Rotation::Rotate180 => size,
3264            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
3265        };
3266
3267        let file = include_bytes!(concat!(
3268            env!("CARGO_MANIFEST_DIR"),
3269            "/../../testdata/zidane.jpg"
3270        ))
3271        .to_vec();
3272        let src =
3273            crate::load_image(&file, Some(PixelFormat::Rgba), Some(TensorMemory::Dma)).unwrap();
3274
3275        let cpu_dst =
3276            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3277        let mut cpu_converter = CPUProcessor::new();
3278
3279        let (result, src, cpu_dst) = convert_img(
3280            &mut cpu_converter,
3281            src,
3282            cpu_dst,
3283            rot,
3284            Flip::None,
3285            Crop::no_crop(),
3286        );
3287        result.unwrap();
3288
3289        let g2d_dst = TensorDyn::image(
3290            dst_width,
3291            dst_height,
3292            PixelFormat::Rgba,
3293            DType::U8,
3294            Some(TensorMemory::Dma),
3295        )
3296        .unwrap();
3297        let mut g2d_converter = G2DProcessor::new().unwrap();
3298
3299        let (result, _src, g2d_dst) = convert_img(
3300            &mut g2d_converter,
3301            src,
3302            g2d_dst,
3303            rot,
3304            Flip::None,
3305            Crop::no_crop(),
3306        );
3307        result.unwrap();
3308
3309        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3310    }
3311
3312    #[test]
3313    fn test_rgba_to_yuyv_resize_cpu() {
3314        let src = load_bytes_to_tensor(
3315            1280,
3316            720,
3317            PixelFormat::Rgba,
3318            None,
3319            include_bytes!(concat!(
3320                env!("CARGO_MANIFEST_DIR"),
3321                "/../../testdata/camera720p.rgba"
3322            )),
3323        )
3324        .unwrap();
3325
3326        let (dst_width, dst_height) = (640, 360);
3327
3328        let dst =
3329            TensorDyn::image(dst_width, dst_height, PixelFormat::Yuyv, DType::U8, None).unwrap();
3330
3331        let dst_through_yuyv =
3332            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3333        let dst_direct =
3334            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3335
3336        let mut cpu_converter = CPUProcessor::new();
3337
3338        let (result, src, dst) = convert_img(
3339            &mut cpu_converter,
3340            src,
3341            dst,
3342            Rotation::None,
3343            Flip::None,
3344            Crop::no_crop(),
3345        );
3346        result.unwrap();
3347
3348        let (result, _dst, dst_through_yuyv) = convert_img(
3349            &mut cpu_converter,
3350            dst,
3351            dst_through_yuyv,
3352            Rotation::None,
3353            Flip::None,
3354            Crop::no_crop(),
3355        );
3356        result.unwrap();
3357
3358        let (result, _src, dst_direct) = convert_img(
3359            &mut cpu_converter,
3360            src,
3361            dst_direct,
3362            Rotation::None,
3363            Flip::None,
3364            Crop::no_crop(),
3365        );
3366        result.unwrap();
3367
3368        compare_images(&dst_through_yuyv, &dst_direct, 0.98, function!());
3369    }
3370
3371    #[test]
3372    #[cfg(target_os = "linux")]
3373    #[cfg(feature = "opengl")]
3374    #[ignore = "opengl doesn't support rendering to PixelFormat::Yuyv texture"]
3375    fn test_rgba_to_yuyv_resize_opengl() {
3376        if !is_opengl_available() {
3377            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3378            return;
3379        }
3380
3381        if !is_dma_available() {
3382            eprintln!(
3383                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3384                function!()
3385            );
3386            return;
3387        }
3388
3389        let src = load_bytes_to_tensor(
3390            1280,
3391            720,
3392            PixelFormat::Rgba,
3393            None,
3394            include_bytes!(concat!(
3395                env!("CARGO_MANIFEST_DIR"),
3396                "/../../testdata/camera720p.rgba"
3397            )),
3398        )
3399        .unwrap();
3400
3401        let (dst_width, dst_height) = (640, 360);
3402
3403        let dst = TensorDyn::image(
3404            dst_width,
3405            dst_height,
3406            PixelFormat::Yuyv,
3407            DType::U8,
3408            Some(TensorMemory::Dma),
3409        )
3410        .unwrap();
3411
3412        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3413
3414        let (result, src, dst) = convert_img(
3415            &mut gl_converter,
3416            src,
3417            dst,
3418            Rotation::None,
3419            Flip::None,
3420            Crop::new()
3421                .with_dst_rect(Some(Rect::new(100, 100, 100, 100)))
3422                .with_dst_color(Some([255, 255, 255, 255])),
3423        );
3424        result.unwrap();
3425
3426        std::fs::write(
3427            "rgba_to_yuyv_opengl.yuyv",
3428            dst.as_u8().unwrap().map().unwrap().as_slice(),
3429        )
3430        .unwrap();
3431        let cpu_dst = TensorDyn::image(
3432            dst_width,
3433            dst_height,
3434            PixelFormat::Yuyv,
3435            DType::U8,
3436            Some(TensorMemory::Dma),
3437        )
3438        .unwrap();
3439        let (result, _src, cpu_dst) = convert_img(
3440            &mut CPUProcessor::new(),
3441            src,
3442            cpu_dst,
3443            Rotation::None,
3444            Flip::None,
3445            Crop::no_crop(),
3446        );
3447        result.unwrap();
3448
3449        compare_images_convert_to_rgb(&dst, &cpu_dst, 0.98, function!());
3450    }
3451
3452    #[test]
3453    #[cfg(target_os = "linux")]
3454    fn test_rgba_to_yuyv_resize_g2d() {
3455        if !is_g2d_available() {
3456            eprintln!(
3457                "SKIPPED: test_rgba_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3458            );
3459            return;
3460        }
3461        if !is_dma_available() {
3462            eprintln!(
3463                "SKIPPED: test_rgba_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3464            );
3465            return;
3466        }
3467
3468        let src = load_bytes_to_tensor(
3469            1280,
3470            720,
3471            PixelFormat::Rgba,
3472            Some(TensorMemory::Dma),
3473            include_bytes!(concat!(
3474                env!("CARGO_MANIFEST_DIR"),
3475                "/../../testdata/camera720p.rgba"
3476            )),
3477        )
3478        .unwrap();
3479
3480        let (dst_width, dst_height) = (1280, 720);
3481
3482        let cpu_dst = TensorDyn::image(
3483            dst_width,
3484            dst_height,
3485            PixelFormat::Yuyv,
3486            DType::U8,
3487            Some(TensorMemory::Dma),
3488        )
3489        .unwrap();
3490
3491        let g2d_dst = TensorDyn::image(
3492            dst_width,
3493            dst_height,
3494            PixelFormat::Yuyv,
3495            DType::U8,
3496            Some(TensorMemory::Dma),
3497        )
3498        .unwrap();
3499
3500        let mut g2d_converter = G2DProcessor::new().unwrap();
3501        let crop = Crop {
3502            src_rect: None,
3503            dst_rect: Some(Rect::new(100, 100, 2, 2)),
3504            dst_color: None,
3505        };
3506
3507        g2d_dst
3508            .as_u8()
3509            .unwrap()
3510            .map()
3511            .unwrap()
3512            .as_mut_slice()
3513            .fill(128);
3514        let (result, src, g2d_dst) = convert_img(
3515            &mut g2d_converter,
3516            src,
3517            g2d_dst,
3518            Rotation::None,
3519            Flip::None,
3520            crop,
3521        );
3522        result.unwrap();
3523
3524        let cpu_dst_img = cpu_dst;
3525        cpu_dst_img
3526            .as_u8()
3527            .unwrap()
3528            .map()
3529            .unwrap()
3530            .as_mut_slice()
3531            .fill(128);
3532        let (result, _src, cpu_dst) = convert_img(
3533            &mut CPUProcessor::new(),
3534            src,
3535            cpu_dst_img,
3536            Rotation::None,
3537            Flip::None,
3538            crop,
3539        );
3540        result.unwrap();
3541
3542        compare_images_convert_to_rgb(&cpu_dst, &g2d_dst, 0.98, function!());
3543    }
3544
3545    #[test]
3546    fn test_yuyv_to_rgba_cpu() {
3547        let file = include_bytes!(concat!(
3548            env!("CARGO_MANIFEST_DIR"),
3549            "/../../testdata/camera720p.yuyv"
3550        ))
3551        .to_vec();
3552        let src = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
3553        src.as_u8()
3554            .unwrap()
3555            .map()
3556            .unwrap()
3557            .as_mut_slice()
3558            .copy_from_slice(&file);
3559
3560        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3561        let mut cpu_converter = CPUProcessor::new();
3562
3563        let (result, _src, dst) = convert_img(
3564            &mut cpu_converter,
3565            src,
3566            dst,
3567            Rotation::None,
3568            Flip::None,
3569            Crop::no_crop(),
3570        );
3571        result.unwrap();
3572
3573        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3574        target_image
3575            .as_u8()
3576            .unwrap()
3577            .map()
3578            .unwrap()
3579            .as_mut_slice()
3580            .copy_from_slice(include_bytes!(concat!(
3581                env!("CARGO_MANIFEST_DIR"),
3582                "/../../testdata/camera720p.rgba"
3583            )));
3584
3585        compare_images(&dst, &target_image, 0.98, function!());
3586    }
3587
3588    #[test]
3589    fn test_yuyv_to_rgb_cpu() {
3590        let file = include_bytes!(concat!(
3591            env!("CARGO_MANIFEST_DIR"),
3592            "/../../testdata/camera720p.yuyv"
3593        ))
3594        .to_vec();
3595        let src = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
3596        src.as_u8()
3597            .unwrap()
3598            .map()
3599            .unwrap()
3600            .as_mut_slice()
3601            .copy_from_slice(&file);
3602
3603        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3604        let mut cpu_converter = CPUProcessor::new();
3605
3606        let (result, _src, dst) = convert_img(
3607            &mut cpu_converter,
3608            src,
3609            dst,
3610            Rotation::None,
3611            Flip::None,
3612            Crop::no_crop(),
3613        );
3614        result.unwrap();
3615
3616        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3617        target_image
3618            .as_u8()
3619            .unwrap()
3620            .map()
3621            .unwrap()
3622            .as_mut_slice()
3623            .as_chunks_mut::<3>()
3624            .0
3625            .iter_mut()
3626            .zip(
3627                include_bytes!(concat!(
3628                    env!("CARGO_MANIFEST_DIR"),
3629                    "/../../testdata/camera720p.rgba"
3630                ))
3631                .as_chunks::<4>()
3632                .0,
3633            )
3634            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
3635
3636        compare_images(&dst, &target_image, 0.98, function!());
3637    }
3638
3639    #[test]
3640    #[cfg(target_os = "linux")]
3641    fn test_yuyv_to_rgba_g2d() {
3642        if !is_g2d_available() {
3643            eprintln!("SKIPPED: test_yuyv_to_rgba_g2d - G2D library (libg2d.so.2) not available");
3644            return;
3645        }
3646        if !is_dma_available() {
3647            eprintln!(
3648                "SKIPPED: test_yuyv_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3649            );
3650            return;
3651        }
3652
3653        let src = load_bytes_to_tensor(
3654            1280,
3655            720,
3656            PixelFormat::Yuyv,
3657            None,
3658            include_bytes!(concat!(
3659                env!("CARGO_MANIFEST_DIR"),
3660                "/../../testdata/camera720p.yuyv"
3661            )),
3662        )
3663        .unwrap();
3664
3665        let dst = TensorDyn::image(
3666            1280,
3667            720,
3668            PixelFormat::Rgba,
3669            DType::U8,
3670            Some(TensorMemory::Dma),
3671        )
3672        .unwrap();
3673        let mut g2d_converter = G2DProcessor::new().unwrap();
3674
3675        let (result, _src, dst) = convert_img(
3676            &mut g2d_converter,
3677            src,
3678            dst,
3679            Rotation::None,
3680            Flip::None,
3681            Crop::no_crop(),
3682        );
3683        result.unwrap();
3684
3685        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3686        target_image
3687            .as_u8()
3688            .unwrap()
3689            .map()
3690            .unwrap()
3691            .as_mut_slice()
3692            .copy_from_slice(include_bytes!(concat!(
3693                env!("CARGO_MANIFEST_DIR"),
3694                "/../../testdata/camera720p.rgba"
3695            )));
3696
3697        compare_images(&dst, &target_image, 0.98, function!());
3698    }
3699
3700    #[test]
3701    #[cfg(target_os = "linux")]
3702    #[cfg(feature = "opengl")]
3703    fn test_yuyv_to_rgba_opengl() {
3704        if !is_opengl_available() {
3705            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3706            return;
3707        }
3708        if !is_dma_available() {
3709            eprintln!(
3710                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3711                function!()
3712            );
3713            return;
3714        }
3715
3716        let src = load_bytes_to_tensor(
3717            1280,
3718            720,
3719            PixelFormat::Yuyv,
3720            Some(TensorMemory::Dma),
3721            include_bytes!(concat!(
3722                env!("CARGO_MANIFEST_DIR"),
3723                "/../../testdata/camera720p.yuyv"
3724            )),
3725        )
3726        .unwrap();
3727
3728        let dst = TensorDyn::image(
3729            1280,
3730            720,
3731            PixelFormat::Rgba,
3732            DType::U8,
3733            Some(TensorMemory::Dma),
3734        )
3735        .unwrap();
3736        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3737
3738        let (result, _src, dst) = convert_img(
3739            &mut gl_converter,
3740            src,
3741            dst,
3742            Rotation::None,
3743            Flip::None,
3744            Crop::no_crop(),
3745        );
3746        result.unwrap();
3747
3748        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
3749        target_image
3750            .as_u8()
3751            .unwrap()
3752            .map()
3753            .unwrap()
3754            .as_mut_slice()
3755            .copy_from_slice(include_bytes!(concat!(
3756                env!("CARGO_MANIFEST_DIR"),
3757                "/../../testdata/camera720p.rgba"
3758            )));
3759
3760        compare_images(&dst, &target_image, 0.98, function!());
3761    }
3762
3763    #[test]
3764    #[cfg(target_os = "linux")]
3765    fn test_yuyv_to_rgb_g2d() {
3766        if !is_g2d_available() {
3767            eprintln!("SKIPPED: test_yuyv_to_rgb_g2d - G2D library (libg2d.so.2) not available");
3768            return;
3769        }
3770        if !is_dma_available() {
3771            eprintln!(
3772                "SKIPPED: test_yuyv_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3773            );
3774            return;
3775        }
3776
3777        let src = load_bytes_to_tensor(
3778            1280,
3779            720,
3780            PixelFormat::Yuyv,
3781            None,
3782            include_bytes!(concat!(
3783                env!("CARGO_MANIFEST_DIR"),
3784                "/../../testdata/camera720p.yuyv"
3785            )),
3786        )
3787        .unwrap();
3788
3789        let g2d_dst = TensorDyn::image(
3790            1280,
3791            720,
3792            PixelFormat::Rgb,
3793            DType::U8,
3794            Some(TensorMemory::Dma),
3795        )
3796        .unwrap();
3797        let mut g2d_converter = G2DProcessor::new().unwrap();
3798
3799        let (result, src, g2d_dst) = convert_img(
3800            &mut g2d_converter,
3801            src,
3802            g2d_dst,
3803            Rotation::None,
3804            Flip::None,
3805            Crop::no_crop(),
3806        );
3807        result.unwrap();
3808
3809        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
3810        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3811
3812        let (result, _src, cpu_dst) = convert_img(
3813            &mut cpu_converter,
3814            src,
3815            cpu_dst,
3816            Rotation::None,
3817            Flip::None,
3818            Crop::no_crop(),
3819        );
3820        result.unwrap();
3821
3822        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3823    }
3824
3825    #[test]
3826    #[cfg(target_os = "linux")]
3827    fn test_yuyv_to_yuyv_resize_g2d() {
3828        if !is_g2d_available() {
3829            eprintln!(
3830                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3831            );
3832            return;
3833        }
3834        if !is_dma_available() {
3835            eprintln!(
3836                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3837            );
3838            return;
3839        }
3840
3841        let src = load_bytes_to_tensor(
3842            1280,
3843            720,
3844            PixelFormat::Yuyv,
3845            None,
3846            include_bytes!(concat!(
3847                env!("CARGO_MANIFEST_DIR"),
3848                "/../../testdata/camera720p.yuyv"
3849            )),
3850        )
3851        .unwrap();
3852
3853        let g2d_dst = TensorDyn::image(
3854            600,
3855            400,
3856            PixelFormat::Yuyv,
3857            DType::U8,
3858            Some(TensorMemory::Dma),
3859        )
3860        .unwrap();
3861        let mut g2d_converter = G2DProcessor::new().unwrap();
3862
3863        let (result, src, g2d_dst) = convert_img(
3864            &mut g2d_converter,
3865            src,
3866            g2d_dst,
3867            Rotation::None,
3868            Flip::None,
3869            Crop::no_crop(),
3870        );
3871        result.unwrap();
3872
3873        let cpu_dst = TensorDyn::image(600, 400, PixelFormat::Yuyv, DType::U8, None).unwrap();
3874        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3875
3876        let (result, _src, cpu_dst) = convert_img(
3877            &mut cpu_converter,
3878            src,
3879            cpu_dst,
3880            Rotation::None,
3881            Flip::None,
3882            Crop::no_crop(),
3883        );
3884        result.unwrap();
3885
3886        // TODO: compare PixelFormat::Yuyv and PixelFormat::Yuyv images without having to convert them to PixelFormat::Rgb
3887        compare_images_convert_to_rgb(&g2d_dst, &cpu_dst, 0.98, function!());
3888    }
3889
3890    #[test]
3891    fn test_yuyv_to_rgba_resize_cpu() {
3892        let src = load_bytes_to_tensor(
3893            1280,
3894            720,
3895            PixelFormat::Yuyv,
3896            None,
3897            include_bytes!(concat!(
3898                env!("CARGO_MANIFEST_DIR"),
3899                "/../../testdata/camera720p.yuyv"
3900            )),
3901        )
3902        .unwrap();
3903
3904        let (dst_width, dst_height) = (960, 540);
3905
3906        let dst =
3907            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3908        let mut cpu_converter = CPUProcessor::new();
3909
3910        let (result, _src, dst) = convert_img(
3911            &mut cpu_converter,
3912            src,
3913            dst,
3914            Rotation::None,
3915            Flip::None,
3916            Crop::no_crop(),
3917        );
3918        result.unwrap();
3919
3920        let dst_target =
3921            TensorDyn::image(dst_width, dst_height, PixelFormat::Rgba, DType::U8, None).unwrap();
3922        let src_target = load_bytes_to_tensor(
3923            1280,
3924            720,
3925            PixelFormat::Rgba,
3926            None,
3927            include_bytes!(concat!(
3928                env!("CARGO_MANIFEST_DIR"),
3929                "/../../testdata/camera720p.rgba"
3930            )),
3931        )
3932        .unwrap();
3933        let (result, _src_target, dst_target) = convert_img(
3934            &mut cpu_converter,
3935            src_target,
3936            dst_target,
3937            Rotation::None,
3938            Flip::None,
3939            Crop::no_crop(),
3940        );
3941        result.unwrap();
3942
3943        compare_images(&dst, &dst_target, 0.98, function!());
3944    }
3945
3946    #[test]
3947    #[cfg(target_os = "linux")]
3948    fn test_yuyv_to_rgba_crop_flip_g2d() {
3949        if !is_g2d_available() {
3950            eprintln!(
3951                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - G2D library (libg2d.so.2) not available"
3952            );
3953            return;
3954        }
3955        if !is_dma_available() {
3956            eprintln!(
3957                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3958            );
3959            return;
3960        }
3961
3962        let src = load_bytes_to_tensor(
3963            1280,
3964            720,
3965            PixelFormat::Yuyv,
3966            Some(TensorMemory::Dma),
3967            include_bytes!(concat!(
3968                env!("CARGO_MANIFEST_DIR"),
3969                "/../../testdata/camera720p.yuyv"
3970            )),
3971        )
3972        .unwrap();
3973
3974        let (dst_width, dst_height) = (640, 640);
3975
3976        let dst_g2d = TensorDyn::image(
3977            dst_width,
3978            dst_height,
3979            PixelFormat::Rgba,
3980            DType::U8,
3981            Some(TensorMemory::Dma),
3982        )
3983        .unwrap();
3984        let mut g2d_converter = G2DProcessor::new().unwrap();
3985        let crop = Crop {
3986            src_rect: Some(Rect {
3987                left: 20,
3988                top: 15,
3989                width: 400,
3990                height: 300,
3991            }),
3992            dst_rect: None,
3993            dst_color: None,
3994        };
3995
3996        let (result, src, dst_g2d) = convert_img(
3997            &mut g2d_converter,
3998            src,
3999            dst_g2d,
4000            Rotation::None,
4001            Flip::Horizontal,
4002            crop,
4003        );
4004        result.unwrap();
4005
4006        let dst_cpu = TensorDyn::image(
4007            dst_width,
4008            dst_height,
4009            PixelFormat::Rgba,
4010            DType::U8,
4011            Some(TensorMemory::Dma),
4012        )
4013        .unwrap();
4014        let mut cpu_converter = CPUProcessor::new();
4015
4016        let (result, _src, dst_cpu) = convert_img(
4017            &mut cpu_converter,
4018            src,
4019            dst_cpu,
4020            Rotation::None,
4021            Flip::Horizontal,
4022            crop,
4023        );
4024        result.unwrap();
4025        compare_images(&dst_g2d, &dst_cpu, 0.98, function!());
4026    }
4027
4028    #[test]
4029    #[cfg(target_os = "linux")]
4030    #[cfg(feature = "opengl")]
4031    fn test_yuyv_to_rgba_crop_flip_opengl() {
4032        if !is_opengl_available() {
4033            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4034            return;
4035        }
4036
4037        if !is_dma_available() {
4038            eprintln!(
4039                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4040                function!()
4041            );
4042            return;
4043        }
4044
4045        let src = load_bytes_to_tensor(
4046            1280,
4047            720,
4048            PixelFormat::Yuyv,
4049            Some(TensorMemory::Dma),
4050            include_bytes!(concat!(
4051                env!("CARGO_MANIFEST_DIR"),
4052                "/../../testdata/camera720p.yuyv"
4053            )),
4054        )
4055        .unwrap();
4056
4057        let (dst_width, dst_height) = (640, 640);
4058
4059        let dst_gl = TensorDyn::image(
4060            dst_width,
4061            dst_height,
4062            PixelFormat::Rgba,
4063            DType::U8,
4064            Some(TensorMemory::Dma),
4065        )
4066        .unwrap();
4067        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4068        let crop = Crop {
4069            src_rect: Some(Rect {
4070                left: 20,
4071                top: 15,
4072                width: 400,
4073                height: 300,
4074            }),
4075            dst_rect: None,
4076            dst_color: None,
4077        };
4078
4079        let (result, src, dst_gl) = convert_img(
4080            &mut gl_converter,
4081            src,
4082            dst_gl,
4083            Rotation::None,
4084            Flip::Horizontal,
4085            crop,
4086        );
4087        result.unwrap();
4088
4089        let dst_cpu = TensorDyn::image(
4090            dst_width,
4091            dst_height,
4092            PixelFormat::Rgba,
4093            DType::U8,
4094            Some(TensorMemory::Dma),
4095        )
4096        .unwrap();
4097        let mut cpu_converter = CPUProcessor::new();
4098
4099        let (result, _src, dst_cpu) = convert_img(
4100            &mut cpu_converter,
4101            src,
4102            dst_cpu,
4103            Rotation::None,
4104            Flip::Horizontal,
4105            crop,
4106        );
4107        result.unwrap();
4108        compare_images(&dst_gl, &dst_cpu, 0.98, function!());
4109    }
4110
4111    #[test]
4112    fn test_vyuy_to_rgba_cpu() {
4113        let file = include_bytes!(concat!(
4114            env!("CARGO_MANIFEST_DIR"),
4115            "/../../testdata/camera720p.vyuy"
4116        ))
4117        .to_vec();
4118        let src = TensorDyn::image(1280, 720, PixelFormat::Vyuy, DType::U8, None).unwrap();
4119        src.as_u8()
4120            .unwrap()
4121            .map()
4122            .unwrap()
4123            .as_mut_slice()
4124            .copy_from_slice(&file);
4125
4126        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4127        let mut cpu_converter = CPUProcessor::new();
4128
4129        let (result, _src, dst) = convert_img(
4130            &mut cpu_converter,
4131            src,
4132            dst,
4133            Rotation::None,
4134            Flip::None,
4135            Crop::no_crop(),
4136        );
4137        result.unwrap();
4138
4139        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4140        target_image
4141            .as_u8()
4142            .unwrap()
4143            .map()
4144            .unwrap()
4145            .as_mut_slice()
4146            .copy_from_slice(include_bytes!(concat!(
4147                env!("CARGO_MANIFEST_DIR"),
4148                "/../../testdata/camera720p.rgba"
4149            )));
4150
4151        compare_images(&dst, &target_image, 0.98, function!());
4152    }
4153
4154    #[test]
4155    fn test_vyuy_to_rgb_cpu() {
4156        let file = include_bytes!(concat!(
4157            env!("CARGO_MANIFEST_DIR"),
4158            "/../../testdata/camera720p.vyuy"
4159        ))
4160        .to_vec();
4161        let src = TensorDyn::image(1280, 720, PixelFormat::Vyuy, DType::U8, None).unwrap();
4162        src.as_u8()
4163            .unwrap()
4164            .map()
4165            .unwrap()
4166            .as_mut_slice()
4167            .copy_from_slice(&file);
4168
4169        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4170        let mut cpu_converter = CPUProcessor::new();
4171
4172        let (result, _src, dst) = convert_img(
4173            &mut cpu_converter,
4174            src,
4175            dst,
4176            Rotation::None,
4177            Flip::None,
4178            Crop::no_crop(),
4179        );
4180        result.unwrap();
4181
4182        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4183        target_image
4184            .as_u8()
4185            .unwrap()
4186            .map()
4187            .unwrap()
4188            .as_mut_slice()
4189            .as_chunks_mut::<3>()
4190            .0
4191            .iter_mut()
4192            .zip(
4193                include_bytes!(concat!(
4194                    env!("CARGO_MANIFEST_DIR"),
4195                    "/../../testdata/camera720p.rgba"
4196                ))
4197                .as_chunks::<4>()
4198                .0,
4199            )
4200            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
4201
4202        compare_images(&dst, &target_image, 0.98, function!());
4203    }
4204
4205    #[test]
4206    #[cfg(target_os = "linux")]
4207    #[ignore = "G2D does not support VYUY; re-enable when hardware support is added"]
4208    fn test_vyuy_to_rgba_g2d() {
4209        if !is_g2d_available() {
4210            eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D library (libg2d.so.2) not available");
4211            return;
4212        }
4213        if !is_dma_available() {
4214            eprintln!(
4215                "SKIPPED: test_vyuy_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4216            );
4217            return;
4218        }
4219
4220        let src = load_bytes_to_tensor(
4221            1280,
4222            720,
4223            PixelFormat::Vyuy,
4224            None,
4225            include_bytes!(concat!(
4226                env!("CARGO_MANIFEST_DIR"),
4227                "/../../testdata/camera720p.vyuy"
4228            )),
4229        )
4230        .unwrap();
4231
4232        let dst = TensorDyn::image(
4233            1280,
4234            720,
4235            PixelFormat::Rgba,
4236            DType::U8,
4237            Some(TensorMemory::Dma),
4238        )
4239        .unwrap();
4240        let mut g2d_converter = G2DProcessor::new().unwrap();
4241
4242        let (result, _src, dst) = convert_img(
4243            &mut g2d_converter,
4244            src,
4245            dst,
4246            Rotation::None,
4247            Flip::None,
4248            Crop::no_crop(),
4249        );
4250        match result {
4251            Err(Error::G2D(_)) => {
4252                eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D does not support PixelFormat::Vyuy format");
4253                return;
4254            }
4255            r => r.unwrap(),
4256        }
4257
4258        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4259        target_image
4260            .as_u8()
4261            .unwrap()
4262            .map()
4263            .unwrap()
4264            .as_mut_slice()
4265            .copy_from_slice(include_bytes!(concat!(
4266                env!("CARGO_MANIFEST_DIR"),
4267                "/../../testdata/camera720p.rgba"
4268            )));
4269
4270        compare_images(&dst, &target_image, 0.98, function!());
4271    }
4272
4273    #[test]
4274    #[cfg(target_os = "linux")]
4275    #[ignore = "G2D does not support VYUY; re-enable when hardware support is added"]
4276    fn test_vyuy_to_rgb_g2d() {
4277        if !is_g2d_available() {
4278            eprintln!("SKIPPED: test_vyuy_to_rgb_g2d - G2D library (libg2d.so.2) not available");
4279            return;
4280        }
4281        if !is_dma_available() {
4282            eprintln!(
4283                "SKIPPED: test_vyuy_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4284            );
4285            return;
4286        }
4287
4288        let src = load_bytes_to_tensor(
4289            1280,
4290            720,
4291            PixelFormat::Vyuy,
4292            None,
4293            include_bytes!(concat!(
4294                env!("CARGO_MANIFEST_DIR"),
4295                "/../../testdata/camera720p.vyuy"
4296            )),
4297        )
4298        .unwrap();
4299
4300        let g2d_dst = TensorDyn::image(
4301            1280,
4302            720,
4303            PixelFormat::Rgb,
4304            DType::U8,
4305            Some(TensorMemory::Dma),
4306        )
4307        .unwrap();
4308        let mut g2d_converter = G2DProcessor::new().unwrap();
4309
4310        let (result, src, g2d_dst) = convert_img(
4311            &mut g2d_converter,
4312            src,
4313            g2d_dst,
4314            Rotation::None,
4315            Flip::None,
4316            Crop::no_crop(),
4317        );
4318        match result {
4319            Err(Error::G2D(_)) => {
4320                eprintln!(
4321                    "SKIPPED: test_vyuy_to_rgb_g2d - G2D does not support PixelFormat::Vyuy format"
4322                );
4323                return;
4324            }
4325            r => r.unwrap(),
4326        }
4327
4328        let cpu_dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4329        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
4330
4331        let (result, _src, cpu_dst) = convert_img(
4332            &mut cpu_converter,
4333            src,
4334            cpu_dst,
4335            Rotation::None,
4336            Flip::None,
4337            Crop::no_crop(),
4338        );
4339        result.unwrap();
4340
4341        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
4342    }
4343
4344    #[test]
4345    #[cfg(target_os = "linux")]
4346    #[cfg(feature = "opengl")]
4347    fn test_vyuy_to_rgba_opengl() {
4348        if !is_opengl_available() {
4349            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4350            return;
4351        }
4352        if !is_dma_available() {
4353            eprintln!(
4354                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4355                function!()
4356            );
4357            return;
4358        }
4359
4360        let src = load_bytes_to_tensor(
4361            1280,
4362            720,
4363            PixelFormat::Vyuy,
4364            Some(TensorMemory::Dma),
4365            include_bytes!(concat!(
4366                env!("CARGO_MANIFEST_DIR"),
4367                "/../../testdata/camera720p.vyuy"
4368            )),
4369        )
4370        .unwrap();
4371
4372        let dst = TensorDyn::image(
4373            1280,
4374            720,
4375            PixelFormat::Rgba,
4376            DType::U8,
4377            Some(TensorMemory::Dma),
4378        )
4379        .unwrap();
4380        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4381
4382        let (result, _src, dst) = convert_img(
4383            &mut gl_converter,
4384            src,
4385            dst,
4386            Rotation::None,
4387            Flip::None,
4388            Crop::no_crop(),
4389        );
4390        match result {
4391            Err(Error::NotSupported(_)) => {
4392                eprintln!(
4393                    "SKIPPED: {} - OpenGL does not support PixelFormat::Vyuy DMA format",
4394                    function!()
4395                );
4396                return;
4397            }
4398            r => r.unwrap(),
4399        }
4400
4401        let target_image = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4402        target_image
4403            .as_u8()
4404            .unwrap()
4405            .map()
4406            .unwrap()
4407            .as_mut_slice()
4408            .copy_from_slice(include_bytes!(concat!(
4409                env!("CARGO_MANIFEST_DIR"),
4410                "/../../testdata/camera720p.rgba"
4411            )));
4412
4413        compare_images(&dst, &target_image, 0.98, function!());
4414    }
4415
4416    #[test]
4417    fn test_nv12_to_rgba_cpu() {
4418        let file = include_bytes!(concat!(
4419            env!("CARGO_MANIFEST_DIR"),
4420            "/../../testdata/zidane.nv12"
4421        ))
4422        .to_vec();
4423        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4424        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4425            .copy_from_slice(&file);
4426
4427        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgba, DType::U8, None).unwrap();
4428        let mut cpu_converter = CPUProcessor::new();
4429
4430        let (result, _src, dst) = convert_img(
4431            &mut cpu_converter,
4432            src,
4433            dst,
4434            Rotation::None,
4435            Flip::None,
4436            Crop::no_crop(),
4437        );
4438        result.unwrap();
4439
4440        let target_image = crate::load_image(
4441            include_bytes!(concat!(
4442                env!("CARGO_MANIFEST_DIR"),
4443                "/../../testdata/zidane.jpg"
4444            )),
4445            Some(PixelFormat::Rgba),
4446            None,
4447        )
4448        .unwrap();
4449
4450        compare_images(&dst, &target_image, 0.98, function!());
4451    }
4452
4453    #[test]
4454    fn test_nv12_to_rgb_cpu() {
4455        let file = include_bytes!(concat!(
4456            env!("CARGO_MANIFEST_DIR"),
4457            "/../../testdata/zidane.nv12"
4458        ))
4459        .to_vec();
4460        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4461        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4462            .copy_from_slice(&file);
4463
4464        let dst = TensorDyn::image(1280, 720, PixelFormat::Rgb, DType::U8, None).unwrap();
4465        let mut cpu_converter = CPUProcessor::new();
4466
4467        let (result, _src, dst) = convert_img(
4468            &mut cpu_converter,
4469            src,
4470            dst,
4471            Rotation::None,
4472            Flip::None,
4473            Crop::no_crop(),
4474        );
4475        result.unwrap();
4476
4477        let target_image = crate::load_image(
4478            include_bytes!(concat!(
4479                env!("CARGO_MANIFEST_DIR"),
4480                "/../../testdata/zidane.jpg"
4481            )),
4482            Some(PixelFormat::Rgb),
4483            None,
4484        )
4485        .unwrap();
4486
4487        compare_images(&dst, &target_image, 0.98, function!());
4488    }
4489
4490    #[test]
4491    fn test_nv12_to_grey_cpu() {
4492        let file = include_bytes!(concat!(
4493            env!("CARGO_MANIFEST_DIR"),
4494            "/../../testdata/zidane.nv12"
4495        ))
4496        .to_vec();
4497        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4498        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4499            .copy_from_slice(&file);
4500
4501        let dst = TensorDyn::image(1280, 720, PixelFormat::Grey, DType::U8, None).unwrap();
4502        let mut cpu_converter = CPUProcessor::new();
4503
4504        let (result, _src, dst) = convert_img(
4505            &mut cpu_converter,
4506            src,
4507            dst,
4508            Rotation::None,
4509            Flip::None,
4510            Crop::no_crop(),
4511        );
4512        result.unwrap();
4513
4514        let target_image = crate::load_image(
4515            include_bytes!(concat!(
4516                env!("CARGO_MANIFEST_DIR"),
4517                "/../../testdata/zidane.jpg"
4518            )),
4519            Some(PixelFormat::Grey),
4520            None,
4521        )
4522        .unwrap();
4523
4524        compare_images(&dst, &target_image, 0.98, function!());
4525    }
4526
4527    #[test]
4528    fn test_nv12_to_yuyv_cpu() {
4529        let file = include_bytes!(concat!(
4530            env!("CARGO_MANIFEST_DIR"),
4531            "/../../testdata/zidane.nv12"
4532        ))
4533        .to_vec();
4534        let src = TensorDyn::image(1280, 720, PixelFormat::Nv12, DType::U8, None).unwrap();
4535        src.as_u8().unwrap().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)]
4536            .copy_from_slice(&file);
4537
4538        let dst = TensorDyn::image(1280, 720, PixelFormat::Yuyv, DType::U8, None).unwrap();
4539        let mut cpu_converter = CPUProcessor::new();
4540
4541        let (result, _src, dst) = convert_img(
4542            &mut cpu_converter,
4543            src,
4544            dst,
4545            Rotation::None,
4546            Flip::None,
4547            Crop::no_crop(),
4548        );
4549        result.unwrap();
4550
4551        let target_image = crate::load_image(
4552            include_bytes!(concat!(
4553                env!("CARGO_MANIFEST_DIR"),
4554                "/../../testdata/zidane.jpg"
4555            )),
4556            Some(PixelFormat::Rgb),
4557            None,
4558        )
4559        .unwrap();
4560
4561        compare_images_convert_to_rgb(&dst, &target_image, 0.98, function!());
4562    }
4563
4564    #[test]
4565    fn test_cpu_resize_planar_rgb() {
4566        let src = TensorDyn::image(4, 4, PixelFormat::Rgba, DType::U8, None).unwrap();
4567        #[rustfmt::skip]
4568        let src_image = [
4569                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4570                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4571                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4572                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4573        ];
4574        src.as_u8()
4575            .unwrap()
4576            .map()
4577            .unwrap()
4578            .as_mut_slice()
4579            .copy_from_slice(&src_image);
4580
4581        let cpu_dst = TensorDyn::image(5, 5, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
4582        let mut cpu_converter = CPUProcessor::new();
4583
4584        let (result, _src, cpu_dst) = convert_img(
4585            &mut cpu_converter,
4586            src,
4587            cpu_dst,
4588            Rotation::None,
4589            Flip::None,
4590            Crop::new()
4591                .with_dst_rect(Some(Rect {
4592                    left: 1,
4593                    top: 1,
4594                    width: 4,
4595                    height: 4,
4596                }))
4597                .with_dst_color(Some([114, 114, 114, 255])),
4598        );
4599        result.unwrap();
4600
4601        #[rustfmt::skip]
4602        let expected_dst = [
4603            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,    114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4604            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,    114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4605            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,      114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4606        ];
4607
4608        assert_eq!(
4609            cpu_dst.as_u8().unwrap().map().unwrap().as_slice(),
4610            &expected_dst
4611        );
4612    }
4613
4614    #[test]
4615    fn test_cpu_resize_planar_rgba() {
4616        let src = TensorDyn::image(4, 4, PixelFormat::Rgba, DType::U8, None).unwrap();
4617        #[rustfmt::skip]
4618        let src_image = [
4619                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4620                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4621                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4622                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4623        ];
4624        src.as_u8()
4625            .unwrap()
4626            .map()
4627            .unwrap()
4628            .as_mut_slice()
4629            .copy_from_slice(&src_image);
4630
4631        let cpu_dst = TensorDyn::image(5, 5, PixelFormat::PlanarRgba, DType::U8, None).unwrap();
4632        let mut cpu_converter = CPUProcessor::new();
4633
4634        let (result, _src, cpu_dst) = convert_img(
4635            &mut cpu_converter,
4636            src,
4637            cpu_dst,
4638            Rotation::None,
4639            Flip::None,
4640            Crop::new()
4641                .with_dst_rect(Some(Rect {
4642                    left: 1,
4643                    top: 1,
4644                    width: 4,
4645                    height: 4,
4646                }))
4647                .with_dst_color(Some([114, 114, 114, 255])),
4648        );
4649        result.unwrap();
4650
4651        #[rustfmt::skip]
4652        let expected_dst = [
4653            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,        114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4654            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,        114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4655            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,          114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4656            255, 255, 255, 255, 255,    255, 255, 255, 255, 255,    255, 0, 255, 0, 255,        255, 0, 255, 0, 255,      255, 0, 255, 0, 255,
4657        ];
4658
4659        assert_eq!(
4660            cpu_dst.as_u8().unwrap().map().unwrap().as_slice(),
4661            &expected_dst
4662        );
4663    }
4664
4665    #[test]
4666    #[cfg(target_os = "linux")]
4667    #[cfg(feature = "opengl")]
4668    fn test_opengl_resize_planar_rgb() {
4669        if !is_opengl_available() {
4670            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4671            return;
4672        }
4673
4674        if !is_dma_available() {
4675            eprintln!(
4676                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4677                function!()
4678            );
4679            return;
4680        }
4681
4682        let dst_width = 640;
4683        let dst_height = 640;
4684        let file = include_bytes!(concat!(
4685            env!("CARGO_MANIFEST_DIR"),
4686            "/../../testdata/test_image.jpg"
4687        ))
4688        .to_vec();
4689        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
4690
4691        let cpu_dst = TensorDyn::image(
4692            dst_width,
4693            dst_height,
4694            PixelFormat::PlanarRgb,
4695            DType::U8,
4696            None,
4697        )
4698        .unwrap();
4699        let mut cpu_converter = CPUProcessor::new();
4700        let (result, src, cpu_dst) = convert_img(
4701            &mut cpu_converter,
4702            src,
4703            cpu_dst,
4704            Rotation::None,
4705            Flip::None,
4706            Crop::no_crop(),
4707        );
4708        result.unwrap();
4709        let crop_letterbox = Crop::new()
4710            .with_dst_rect(Some(Rect {
4711                left: 102,
4712                top: 102,
4713                width: 440,
4714                height: 440,
4715            }))
4716            .with_dst_color(Some([114, 114, 114, 114]));
4717        let (result, src, cpu_dst) = convert_img(
4718            &mut cpu_converter,
4719            src,
4720            cpu_dst,
4721            Rotation::None,
4722            Flip::None,
4723            crop_letterbox,
4724        );
4725        result.unwrap();
4726
4727        let gl_dst = TensorDyn::image(
4728            dst_width,
4729            dst_height,
4730            PixelFormat::PlanarRgb,
4731            DType::U8,
4732            None,
4733        )
4734        .unwrap();
4735        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4736
4737        let (result, _src, gl_dst) = convert_img(
4738            &mut gl_converter,
4739            src,
4740            gl_dst,
4741            Rotation::None,
4742            Flip::None,
4743            crop_letterbox,
4744        );
4745        result.unwrap();
4746        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
4747    }
4748
4749    #[test]
4750    fn test_cpu_resize_nv16() {
4751        let file = include_bytes!(concat!(
4752            env!("CARGO_MANIFEST_DIR"),
4753            "/../../testdata/zidane.jpg"
4754        ))
4755        .to_vec();
4756        let src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
4757
4758        let cpu_nv16_dst = TensorDyn::image(640, 640, PixelFormat::Nv16, DType::U8, None).unwrap();
4759        let cpu_rgb_dst = TensorDyn::image(640, 640, PixelFormat::Rgb, DType::U8, None).unwrap();
4760        let mut cpu_converter = CPUProcessor::new();
4761        let crop = Crop::new()
4762            .with_dst_rect(Some(Rect {
4763                left: 20,
4764                top: 140,
4765                width: 600,
4766                height: 360,
4767            }))
4768            .with_dst_color(Some([255, 128, 0, 255]));
4769
4770        let (result, src, cpu_nv16_dst) = convert_img(
4771            &mut cpu_converter,
4772            src,
4773            cpu_nv16_dst,
4774            Rotation::None,
4775            Flip::None,
4776            crop,
4777        );
4778        result.unwrap();
4779
4780        let (result, _src, cpu_rgb_dst) = convert_img(
4781            &mut cpu_converter,
4782            src,
4783            cpu_rgb_dst,
4784            Rotation::None,
4785            Flip::None,
4786            crop,
4787        );
4788        result.unwrap();
4789        compare_images_convert_to_rgb(&cpu_nv16_dst, &cpu_rgb_dst, 0.99, function!());
4790    }
4791
4792    fn load_bytes_to_tensor(
4793        width: usize,
4794        height: usize,
4795        format: PixelFormat,
4796        memory: Option<TensorMemory>,
4797        bytes: &[u8],
4798    ) -> Result<TensorDyn, Error> {
4799        let src = TensorDyn::image(width, height, format, DType::U8, memory)?;
4800        src.as_u8()
4801            .unwrap()
4802            .map()?
4803            .as_mut_slice()
4804            .copy_from_slice(bytes);
4805        Ok(src)
4806    }
4807
4808    fn compare_images(img1: &TensorDyn, img2: &TensorDyn, threshold: f64, name: &str) {
4809        assert_eq!(img1.height(), img2.height(), "Heights differ");
4810        assert_eq!(img1.width(), img2.width(), "Widths differ");
4811        assert_eq!(
4812            img1.format().unwrap(),
4813            img2.format().unwrap(),
4814            "PixelFormat differ"
4815        );
4816        assert!(
4817            matches!(
4818                img1.format().unwrap(),
4819                PixelFormat::Rgb | PixelFormat::Rgba | PixelFormat::Grey | PixelFormat::PlanarRgb
4820            ),
4821            "format must be Rgb or Rgba for comparison"
4822        );
4823
4824        let image1 = match img1.format().unwrap() {
4825            PixelFormat::Rgb => image::RgbImage::from_vec(
4826                img1.width().unwrap() as u32,
4827                img1.height().unwrap() as u32,
4828                img1.as_u8().unwrap().map().unwrap().to_vec(),
4829            )
4830            .unwrap(),
4831            PixelFormat::Rgba => image::RgbaImage::from_vec(
4832                img1.width().unwrap() as u32,
4833                img1.height().unwrap() as u32,
4834                img1.as_u8().unwrap().map().unwrap().to_vec(),
4835            )
4836            .unwrap()
4837            .convert(),
4838            PixelFormat::Grey => image::GrayImage::from_vec(
4839                img1.width().unwrap() as u32,
4840                img1.height().unwrap() as u32,
4841                img1.as_u8().unwrap().map().unwrap().to_vec(),
4842            )
4843            .unwrap()
4844            .convert(),
4845            PixelFormat::PlanarRgb => image::GrayImage::from_vec(
4846                img1.width().unwrap() as u32,
4847                (img1.height().unwrap() * 3) as u32,
4848                img1.as_u8().unwrap().map().unwrap().to_vec(),
4849            )
4850            .unwrap()
4851            .convert(),
4852            _ => return,
4853        };
4854
4855        let image2 = match img2.format().unwrap() {
4856            PixelFormat::Rgb => image::RgbImage::from_vec(
4857                img2.width().unwrap() as u32,
4858                img2.height().unwrap() as u32,
4859                img2.as_u8().unwrap().map().unwrap().to_vec(),
4860            )
4861            .unwrap(),
4862            PixelFormat::Rgba => image::RgbaImage::from_vec(
4863                img2.width().unwrap() as u32,
4864                img2.height().unwrap() as u32,
4865                img2.as_u8().unwrap().map().unwrap().to_vec(),
4866            )
4867            .unwrap()
4868            .convert(),
4869            PixelFormat::Grey => image::GrayImage::from_vec(
4870                img2.width().unwrap() as u32,
4871                img2.height().unwrap() as u32,
4872                img2.as_u8().unwrap().map().unwrap().to_vec(),
4873            )
4874            .unwrap()
4875            .convert(),
4876            PixelFormat::PlanarRgb => image::GrayImage::from_vec(
4877                img2.width().unwrap() as u32,
4878                (img2.height().unwrap() * 3) as u32,
4879                img2.as_u8().unwrap().map().unwrap().to_vec(),
4880            )
4881            .unwrap()
4882            .convert(),
4883            _ => return,
4884        };
4885
4886        let similarity = image_compare::rgb_similarity_structure(
4887            &image_compare::Algorithm::RootMeanSquared,
4888            &image1,
4889            &image2,
4890        )
4891        .expect("Image Comparison failed");
4892        if similarity.score < threshold {
4893            // image1.save(format!("{name}_1.png"));
4894            // image2.save(format!("{name}_2.png"));
4895            similarity
4896                .image
4897                .to_color_map()
4898                .save(format!("{name}.png"))
4899                .unwrap();
4900            panic!(
4901                "{name}: converted image and target image have similarity score too low: {} < {}",
4902                similarity.score, threshold
4903            )
4904        }
4905    }
4906
4907    fn compare_images_convert_to_rgb(
4908        img1: &TensorDyn,
4909        img2: &TensorDyn,
4910        threshold: f64,
4911        name: &str,
4912    ) {
4913        assert_eq!(img1.height(), img2.height(), "Heights differ");
4914        assert_eq!(img1.width(), img2.width(), "Widths differ");
4915
4916        let mut img_rgb1 = TensorDyn::image(
4917            img1.width().unwrap(),
4918            img1.height().unwrap(),
4919            PixelFormat::Rgb,
4920            DType::U8,
4921            Some(TensorMemory::Mem),
4922        )
4923        .unwrap();
4924        let mut img_rgb2 = TensorDyn::image(
4925            img1.width().unwrap(),
4926            img1.height().unwrap(),
4927            PixelFormat::Rgb,
4928            DType::U8,
4929            Some(TensorMemory::Mem),
4930        )
4931        .unwrap();
4932        let mut __cv = CPUProcessor::default();
4933        let r1 = __cv.convert(
4934            img1,
4935            &mut img_rgb1,
4936            crate::Rotation::None,
4937            crate::Flip::None,
4938            crate::Crop::default(),
4939        );
4940        let r2 = __cv.convert(
4941            img2,
4942            &mut img_rgb2,
4943            crate::Rotation::None,
4944            crate::Flip::None,
4945            crate::Crop::default(),
4946        );
4947        if r1.is_err() || r2.is_err() {
4948            // Fallback: compare raw bytes as greyscale strip
4949            let w = img1.width().unwrap() as u32;
4950            let data1 = img1.as_u8().unwrap().map().unwrap().to_vec();
4951            let data2 = img2.as_u8().unwrap().map().unwrap().to_vec();
4952            let h1 = (data1.len() as u32) / w;
4953            let h2 = (data2.len() as u32) / w;
4954            let g1 = image::GrayImage::from_vec(w, h1, data1).unwrap();
4955            let g2 = image::GrayImage::from_vec(w, h2, data2).unwrap();
4956            let similarity = image_compare::gray_similarity_structure(
4957                &image_compare::Algorithm::RootMeanSquared,
4958                &g1,
4959                &g2,
4960            )
4961            .expect("Image Comparison failed");
4962            if similarity.score < threshold {
4963                panic!(
4964                    "{name}: converted image and target image have similarity score too low: {} < {}",
4965                    similarity.score, threshold
4966                )
4967            }
4968            return;
4969        }
4970
4971        let image1 = image::RgbImage::from_vec(
4972            img_rgb1.width().unwrap() as u32,
4973            img_rgb1.height().unwrap() as u32,
4974            img_rgb1.as_u8().unwrap().map().unwrap().to_vec(),
4975        )
4976        .unwrap();
4977
4978        let image2 = image::RgbImage::from_vec(
4979            img_rgb2.width().unwrap() as u32,
4980            img_rgb2.height().unwrap() as u32,
4981            img_rgb2.as_u8().unwrap().map().unwrap().to_vec(),
4982        )
4983        .unwrap();
4984
4985        let similarity = image_compare::rgb_similarity_structure(
4986            &image_compare::Algorithm::RootMeanSquared,
4987            &image1,
4988            &image2,
4989        )
4990        .expect("Image Comparison failed");
4991        if similarity.score < threshold {
4992            // image1.save(format!("{name}_1.png"));
4993            // image2.save(format!("{name}_2.png"));
4994            similarity
4995                .image
4996                .to_color_map()
4997                .save(format!("{name}.png"))
4998                .unwrap();
4999            panic!(
5000                "{name}: converted image and target image have similarity score too low: {} < {}",
5001                similarity.score, threshold
5002            )
5003        }
5004    }
5005
5006    // =========================================================================
5007    // PixelFormat::Nv12 Format Tests
5008    // =========================================================================
5009
5010    #[test]
5011    fn test_nv12_image_creation() {
5012        let width = 640;
5013        let height = 480;
5014        let img = TensorDyn::image(width, height, PixelFormat::Nv12, DType::U8, None).unwrap();
5015
5016        assert_eq!(img.width(), Some(width));
5017        assert_eq!(img.height(), Some(height));
5018        assert_eq!(img.format().unwrap(), PixelFormat::Nv12);
5019        // PixelFormat::Nv12 uses shape [H*3/2, W] to store Y plane + UV plane
5020        assert_eq!(img.as_u8().unwrap().shape(), &[height * 3 / 2, width]);
5021    }
5022
5023    #[test]
5024    fn test_nv12_channels() {
5025        let img = TensorDyn::image(640, 480, PixelFormat::Nv12, DType::U8, None).unwrap();
5026        // PixelFormat::Nv12.channels() returns 1 (luma plane)
5027        assert_eq!(img.format().unwrap().channels(), 1);
5028    }
5029
5030    // =========================================================================
5031    // Tensor Format Metadata Tests
5032    // =========================================================================
5033
5034    #[test]
5035    fn test_tensor_set_format_planar() {
5036        let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
5037        tensor.set_format(PixelFormat::PlanarRgb).unwrap();
5038        assert_eq!(tensor.format(), Some(PixelFormat::PlanarRgb));
5039        assert_eq!(tensor.width(), Some(640));
5040        assert_eq!(tensor.height(), Some(480));
5041    }
5042
5043    #[test]
5044    fn test_tensor_set_format_interleaved() {
5045        let mut tensor = Tensor::<u8>::new(&[480, 640, 4], None, None).unwrap();
5046        tensor.set_format(PixelFormat::Rgba).unwrap();
5047        assert_eq!(tensor.format(), Some(PixelFormat::Rgba));
5048        assert_eq!(tensor.width(), Some(640));
5049        assert_eq!(tensor.height(), Some(480));
5050    }
5051
5052    #[test]
5053    fn test_tensordyn_image_rgb() {
5054        let img = TensorDyn::image(640, 480, PixelFormat::Rgb, DType::U8, None).unwrap();
5055        assert_eq!(img.width(), Some(640));
5056        assert_eq!(img.height(), Some(480));
5057        assert_eq!(img.format(), Some(PixelFormat::Rgb));
5058    }
5059
5060    #[test]
5061    fn test_tensordyn_image_planar_rgb() {
5062        let img = TensorDyn::image(640, 480, PixelFormat::PlanarRgb, DType::U8, None).unwrap();
5063        assert_eq!(img.width(), Some(640));
5064        assert_eq!(img.height(), Some(480));
5065        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
5066    }
5067
5068    #[test]
5069    fn test_rgb_int8_format() {
5070        // Int8 variant: same PixelFormat::Rgb but with DType::I8
5071        let img = TensorDyn::image(
5072            1280,
5073            720,
5074            PixelFormat::Rgb,
5075            DType::I8,
5076            Some(TensorMemory::Mem),
5077        )
5078        .unwrap();
5079        assert_eq!(img.width(), Some(1280));
5080        assert_eq!(img.height(), Some(720));
5081        assert_eq!(img.format(), Some(PixelFormat::Rgb));
5082        assert_eq!(img.dtype(), DType::I8);
5083    }
5084
5085    #[test]
5086    fn test_planar_rgb_int8_format() {
5087        let img = TensorDyn::image(
5088            1280,
5089            720,
5090            PixelFormat::PlanarRgb,
5091            DType::I8,
5092            Some(TensorMemory::Mem),
5093        )
5094        .unwrap();
5095        assert_eq!(img.width(), Some(1280));
5096        assert_eq!(img.height(), Some(720));
5097        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
5098        assert_eq!(img.dtype(), DType::I8);
5099    }
5100
5101    #[test]
5102    fn test_rgb_from_tensor() {
5103        let mut tensor = Tensor::<u8>::new(&[720, 1280, 3], None, None).unwrap();
5104        tensor.set_format(PixelFormat::Rgb).unwrap();
5105        let img = TensorDyn::from(tensor);
5106        assert_eq!(img.width(), Some(1280));
5107        assert_eq!(img.height(), Some(720));
5108        assert_eq!(img.format(), Some(PixelFormat::Rgb));
5109    }
5110
5111    #[test]
5112    fn test_planar_rgb_from_tensor() {
5113        let mut tensor = Tensor::<u8>::new(&[3, 720, 1280], None, None).unwrap();
5114        tensor.set_format(PixelFormat::PlanarRgb).unwrap();
5115        let img = TensorDyn::from(tensor);
5116        assert_eq!(img.width(), Some(1280));
5117        assert_eq!(img.height(), Some(720));
5118        assert_eq!(img.format(), Some(PixelFormat::PlanarRgb));
5119    }
5120
5121    #[test]
5122    fn test_dtype_determines_int8() {
5123        // DType::I8 indicates int8 data
5124        let u8_img = TensorDyn::image(64, 64, PixelFormat::Rgb, DType::U8, None).unwrap();
5125        let i8_img = TensorDyn::image(64, 64, PixelFormat::Rgb, DType::I8, None).unwrap();
5126        assert_eq!(u8_img.dtype(), DType::U8);
5127        assert_eq!(i8_img.dtype(), DType::I8);
5128    }
5129
5130    #[test]
5131    fn test_pixel_layout_packed_vs_planar() {
5132        // Packed vs planar layout classification
5133        assert_eq!(PixelFormat::Rgb.layout(), PixelLayout::Packed);
5134        assert_eq!(PixelFormat::Rgba.layout(), PixelLayout::Packed);
5135        assert_eq!(PixelFormat::PlanarRgb.layout(), PixelLayout::Planar);
5136        assert_eq!(PixelFormat::Nv12.layout(), PixelLayout::SemiPlanar);
5137    }
5138
5139    /// Integration test that exercises the PBO-to-PBO convert path.
5140    /// Uses ImageProcessor::create_image() to allocate PBO-backed tensors,
5141    /// then converts between them. Skipped when GL is unavailable or the
5142    /// backend is not PBO (e.g. DMA-buf systems).
5143    #[cfg(target_os = "linux")]
5144    #[cfg(feature = "opengl")]
5145    #[test]
5146    fn test_convert_pbo_to_pbo() {
5147        let mut converter = ImageProcessor::new().unwrap();
5148
5149        // Skip if GL is not available or backend is not PBO
5150        let is_pbo = converter
5151            .opengl
5152            .as_ref()
5153            .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
5154        if !is_pbo {
5155            eprintln!("Skipping test_convert_pbo_to_pbo: backend is not PBO");
5156            return;
5157        }
5158
5159        let src_w = 640;
5160        let src_h = 480;
5161        let dst_w = 320;
5162        let dst_h = 240;
5163
5164        // Create PBO-backed source image
5165        let pbo_src = converter
5166            .create_image(src_w, src_h, PixelFormat::Rgba, DType::U8, None)
5167            .unwrap();
5168        assert_eq!(
5169            pbo_src.as_u8().unwrap().memory(),
5170            TensorMemory::Pbo,
5171            "create_image should produce a PBO tensor"
5172        );
5173
5174        // Fill source PBO with test pattern: load JPEG then convert Mem→PBO
5175        let file = include_bytes!(concat!(
5176            env!("CARGO_MANIFEST_DIR"),
5177            "/../../testdata/zidane.jpg"
5178        ))
5179        .to_vec();
5180        let jpeg_src = crate::load_image(&file, Some(PixelFormat::Rgba), None).unwrap();
5181
5182        // Resize JPEG into a Mem temp of the right size, then copy into PBO
5183        let mem_src = TensorDyn::image(
5184            src_w,
5185            src_h,
5186            PixelFormat::Rgba,
5187            DType::U8,
5188            Some(TensorMemory::Mem),
5189        )
5190        .unwrap();
5191        let (result, _jpeg_src, mem_src) = convert_img(
5192            &mut CPUProcessor::new(),
5193            jpeg_src,
5194            mem_src,
5195            Rotation::None,
5196            Flip::None,
5197            Crop::no_crop(),
5198        );
5199        result.unwrap();
5200
5201        // Copy pixel data into the PBO source by mapping it
5202        {
5203            let src_data = mem_src.as_u8().unwrap().map().unwrap();
5204            let mut pbo_map = pbo_src.as_u8().unwrap().map().unwrap();
5205            pbo_map.copy_from_slice(&src_data);
5206        }
5207
5208        // Create PBO-backed destination image
5209        let pbo_dst = converter
5210            .create_image(dst_w, dst_h, PixelFormat::Rgba, DType::U8, None)
5211            .unwrap();
5212        assert_eq!(pbo_dst.as_u8().unwrap().memory(), TensorMemory::Pbo);
5213
5214        // Convert PBO→PBO (this exercises convert_pbo_to_pbo)
5215        let mut pbo_dst = pbo_dst;
5216        let result = converter.convert(
5217            &pbo_src,
5218            &mut pbo_dst,
5219            Rotation::None,
5220            Flip::None,
5221            Crop::no_crop(),
5222        );
5223        result.unwrap();
5224
5225        // Verify: compare with CPU-only conversion of the same input
5226        let cpu_dst = TensorDyn::image(
5227            dst_w,
5228            dst_h,
5229            PixelFormat::Rgba,
5230            DType::U8,
5231            Some(TensorMemory::Mem),
5232        )
5233        .unwrap();
5234        let (result, _mem_src, cpu_dst) = convert_img(
5235            &mut CPUProcessor::new(),
5236            mem_src,
5237            cpu_dst,
5238            Rotation::None,
5239            Flip::None,
5240            Crop::no_crop(),
5241        );
5242        result.unwrap();
5243
5244        let pbo_dst_img = {
5245            let mut __t = pbo_dst.into_u8().unwrap();
5246            __t.set_format(PixelFormat::Rgba).unwrap();
5247            TensorDyn::from(__t)
5248        };
5249        compare_images(&pbo_dst_img, &cpu_dst, 0.95, function!());
5250        log::info!("test_convert_pbo_to_pbo: PASS — PBO-to-PBO convert matches CPU reference");
5251    }
5252
5253    #[test]
5254    fn test_image_bgra() {
5255        let img = TensorDyn::image(
5256            640,
5257            480,
5258            PixelFormat::Bgra,
5259            DType::U8,
5260            Some(edgefirst_tensor::TensorMemory::Mem),
5261        )
5262        .unwrap();
5263        assert_eq!(img.width(), Some(640));
5264        assert_eq!(img.height(), Some(480));
5265        assert_eq!(img.format().unwrap().channels(), 4);
5266        assert_eq!(img.format().unwrap(), PixelFormat::Bgra);
5267    }
5268
5269    // ========================================================================
5270    // Tests for EDGEFIRST_FORCE_BACKEND env var
5271    // ========================================================================
5272
5273    #[test]
5274    fn test_force_backend_cpu() {
5275        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5276        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5277        let result = ImageProcessor::new();
5278        match original {
5279            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5280            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5281        }
5282        let converter = result.unwrap();
5283        assert!(converter.cpu.is_some());
5284        assert_eq!(converter.forced_backend, Some(ForcedBackend::Cpu));
5285    }
5286
5287    #[test]
5288    fn test_force_backend_invalid() {
5289        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5290        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "invalid") };
5291        let result = ImageProcessor::new();
5292        match original {
5293            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5294            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5295        }
5296        assert!(
5297            matches!(&result, Err(Error::ForcedBackendUnavailable(s)) if s.contains("unknown")),
5298            "invalid backend value should return ForcedBackendUnavailable error: {result:?}"
5299        );
5300    }
5301
5302    #[test]
5303    fn test_force_backend_unset() {
5304        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5305        unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") };
5306        let result = ImageProcessor::new();
5307        match original {
5308            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5309            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5310        }
5311        let converter = result.unwrap();
5312        assert!(converter.forced_backend.is_none());
5313    }
5314
5315    // ========================================================================
5316    // Tests for hybrid mask path error handling
5317    // ========================================================================
5318
5319    #[test]
5320    fn test_draw_proto_masks_no_cpu_returns_error() {
5321        // Disable CPU backend to trigger the error path
5322        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
5323        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
5324        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
5325        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
5326        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
5327        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
5328
5329        let result = ImageProcessor::new();
5330
5331        match original_cpu {
5332            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
5333            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
5334        }
5335        match original_gl {
5336            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
5337            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
5338        }
5339        match original_g2d {
5340            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
5341            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
5342        }
5343
5344        let mut converter = result.unwrap();
5345        assert!(converter.cpu.is_none(), "CPU should be disabled");
5346
5347        let dst = TensorDyn::image(
5348            640,
5349            480,
5350            PixelFormat::Rgba,
5351            DType::U8,
5352            Some(TensorMemory::Mem),
5353        )
5354        .unwrap();
5355        let mut dst_dyn = dst;
5356        let det = [DetectBox {
5357            bbox: edgefirst_decoder::BoundingBox {
5358                xmin: 0.1,
5359                ymin: 0.1,
5360                xmax: 0.5,
5361                ymax: 0.5,
5362            },
5363            score: 0.9,
5364            label: 0,
5365        }];
5366        let proto_data = ProtoData {
5367            mask_coefficients: vec![vec![0.5; 4]],
5368            protos: edgefirst_decoder::ProtoTensor::Float(ndarray::Array3::<f32>::zeros((8, 8, 4))),
5369        };
5370        let result =
5371            converter.draw_proto_masks(&mut dst_dyn, &det, &proto_data, Default::default());
5372        assert!(
5373            matches!(&result, Err(Error::Internal(s)) if s.contains("CPU backend")),
5374            "draw_proto_masks without CPU should return Internal error: {result:?}"
5375        );
5376    }
5377
5378    #[test]
5379    fn test_draw_proto_masks_cpu_fallback_works() {
5380        // Force CPU-only backend to ensure the CPU fallback path executes
5381        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5382        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5383        let result = ImageProcessor::new();
5384        match original {
5385            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5386            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5387        }
5388
5389        let mut converter = result.unwrap();
5390        assert!(converter.cpu.is_some());
5391
5392        let dst = TensorDyn::image(
5393            64,
5394            64,
5395            PixelFormat::Rgba,
5396            DType::U8,
5397            Some(TensorMemory::Mem),
5398        )
5399        .unwrap();
5400        let mut dst_dyn = dst;
5401        let det = [DetectBox {
5402            bbox: edgefirst_decoder::BoundingBox {
5403                xmin: 0.1,
5404                ymin: 0.1,
5405                xmax: 0.5,
5406                ymax: 0.5,
5407            },
5408            score: 0.9,
5409            label: 0,
5410        }];
5411        let proto_data = ProtoData {
5412            mask_coefficients: vec![vec![0.5; 4]],
5413            protos: edgefirst_decoder::ProtoTensor::Float(ndarray::Array3::<f32>::zeros((8, 8, 4))),
5414        };
5415        let result =
5416            converter.draw_proto_masks(&mut dst_dyn, &det, &proto_data, Default::default());
5417        assert!(result.is_ok(), "CPU fallback path should work: {result:?}");
5418    }
5419
5420    #[test]
5421    fn test_set_format_then_cpu_convert() {
5422        // Force CPU backend (save/restore to avoid leaking into other tests)
5423        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5424        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5425        let mut processor = ImageProcessor::new().unwrap();
5426        match original {
5427            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5428            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5429        }
5430
5431        // Load a source image
5432        let image = include_bytes!(concat!(
5433            env!("CARGO_MANIFEST_DIR"),
5434            "/../../testdata/zidane.jpg"
5435        ));
5436        let src = load_image(image, Some(PixelFormat::Rgba), None).unwrap();
5437
5438        // Create a raw tensor, then attach format — simulating the from_fd workflow
5439        let mut dst =
5440            TensorDyn::new(&[640, 640, 3], DType::U8, Some(TensorMemory::Mem), None).unwrap();
5441        dst.set_format(PixelFormat::Rgb).unwrap();
5442
5443        // Convert should work with the set_format-annotated tensor
5444        processor
5445            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::default())
5446            .unwrap();
5447
5448        // Verify format survived conversion
5449        assert_eq!(dst.format(), Some(PixelFormat::Rgb));
5450        assert_eq!(dst.width(), Some(640));
5451        assert_eq!(dst.height(), Some(640));
5452    }
5453}