Skip to main content

edgefirst_image/
lib.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4/*!
5
6## EdgeFirst HAL - Image Converter
7
8The `edgefirst_image` crate is part of the EdgeFirst Hardware Abstraction
9Layer (HAL) and provides functionality for converting images between
10different formats and sizes.  The crate is designed to work with hardware
11acceleration when available, but also provides a CPU-based fallback for
12environments where hardware acceleration is not present or not suitable.
13
14The main features of the `edgefirst_image` crate include:
15- Support for various image formats, including YUYV, RGB, RGBA, and GREY.
16- Support for source crop, destination crop, rotation, and flipping.
17- Image conversion using hardware acceleration (G2D, OpenGL) when available.
18- CPU-based image conversion as a fallback option.
19
20The crate defines a `TensorImage` struct that represents an image as a
21tensor, along with its format information. It also provides an
22`ImageProcessor` struct that manages the conversion process, selecting
23the appropriate conversion method based on the available hardware.
24
25## Examples
26
27```rust
28# use edgefirst_image::{ImageProcessor, TensorImage, RGBA, RGB, Rotation, Flip, Crop, ImageProcessorTrait};
29# fn main() -> Result<(), edgefirst_image::Error> {
30let image = include_bytes!("../../../testdata/zidane.jpg");
31let img = TensorImage::load(image, Some(RGBA), None)?;
32let mut converter = ImageProcessor::new()?;
33let mut dst = TensorImage::new(640, 480, RGB, None)?;
34converter.convert(&img, &mut dst, Rotation::None, Flip::None, Crop::default())?;
35# Ok(())
36# }
37```
38
39## Environment Variables
40The behavior of the `edgefirst_image::ImageProcessor` struct can be influenced by the
41following environment variables:
42- `EDGEFIRST_FORCE_BACKEND`: When set to `cpu`, `g2d`, or `opengl` (case-insensitive),
43  only that single backend is initialized and no fallback chain is used. If the
44  forced backend fails to initialize, an error is returned immediately. This is
45  useful for benchmarking individual backends in isolation. When this variable is
46  set, the `EDGEFIRST_DISABLE_*` variables are ignored.
47- `EDGEFIRST_DISABLE_GL`: If set to `1`, disables the use of OpenGL for image
48  conversion, forcing the use of CPU or other available hardware methods.
49- `EDGEFIRST_DISABLE_G2D`: If set to `1`, disables the use of G2D for image
50  conversion, forcing the use of CPU or other available hardware methods.
51- `EDGEFIRST_DISABLE_CPU`: If set to `1`, disables the use of CPU for image
52  conversion, forcing the use of hardware acceleration methods. If no hardware
53  acceleration methods are available, an error will be returned when attempting
54  to create an `ImageProcessor`.
55
56Additionally the TensorMemory used by default allocations can be controlled using the
57`EDGEFIRST_TENSOR_FORCE_MEM` environment variable. If set to `1`, default tensor memory
58uses system memory. This will disable the use of specialized memory regions for tensors
59and hardware acceleration. However, this will increase the performance of the CPU converter.
60*/
61#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
62
63use edgefirst_decoder::{DetectBox, ProtoData, Segmentation};
64use edgefirst_tensor::{Tensor, TensorMemory, TensorTrait as _};
65use enum_dispatch::enum_dispatch;
66use four_char_code::{four_char_code, FourCharCode};
67use std::{fmt::Display, time::Instant};
68use zune_jpeg::{
69    zune_core::{colorspace::ColorSpace, options::DecoderOptions},
70    JpegDecoder,
71};
72use zune_png::PngDecoder;
73
74pub use cpu::CPUProcessor;
75pub use error::{Error, Result};
76#[cfg(target_os = "linux")]
77pub use g2d::G2DProcessor;
78#[cfg(target_os = "linux")]
79#[cfg(feature = "opengl")]
80pub use opengl_headless::GLProcessorThreaded;
81#[cfg(target_os = "linux")]
82#[cfg(feature = "opengl")]
83pub use opengl_headless::Int8InterpolationMode;
84#[cfg(target_os = "linux")]
85#[cfg(feature = "opengl")]
86pub use opengl_headless::{probe_egl_displays, EglDisplayInfo, EglDisplayKind};
87
88/// Result of rendering a single per-instance grayscale mask.
89///
90/// Contains the bounding-box region in output image coordinates and the
91/// raw uint8 pixel data (RED channel only, 0–255 representing sigmoid output).
92#[derive(Debug, Clone)]
93pub(crate) struct MaskResult {
94    /// X offset of the bbox region in the output image.
95    pub(crate) x: usize,
96    /// Y offset of the bbox region in the output image.
97    pub(crate) y: usize,
98    /// Width of the bbox region.
99    pub(crate) w: usize,
100    /// Height of the bbox region.
101    pub(crate) h: usize,
102    /// Grayscale pixel data (w * h bytes, row-major).
103    pub(crate) pixels: Vec<u8>,
104}
105
106/// Region metadata for a single detection within a compact mask atlas.
107///
108/// The atlas packs padded bounding-box strips vertically.  This struct
109/// records where each detection's strip lives in the atlas and how it
110/// maps back to the original output coordinate space.
111#[must_use]
112#[derive(Debug, Clone, Copy)]
113pub struct MaskRegion {
114    /// Row offset of this detection's strip in the atlas.
115    pub atlas_y_offset: usize,
116    /// Left edge of the padded bbox in output image coordinates.
117    pub padded_x: usize,
118    /// Top edge of the padded bbox in output image coordinates.
119    pub padded_y: usize,
120    /// Width of the padded bbox.
121    pub padded_w: usize,
122    /// Height of the padded bbox (= number of atlas rows for this strip).
123    pub padded_h: usize,
124    /// Original (unpadded) bbox left edge in output image coordinates.
125    pub bbox_x: usize,
126    /// Original (unpadded) bbox top edge in output image coordinates.
127    pub bbox_y: usize,
128    /// Original (unpadded) bbox width.
129    pub bbox_w: usize,
130    /// Original (unpadded) bbox height.
131    pub bbox_h: usize,
132}
133
134mod cpu;
135mod error;
136mod g2d;
137mod opengl_headless;
138
139/// 8 bit interleaved YUV422, limited range
140pub const YUYV: FourCharCode = four_char_code!("YUYV");
141/// 8 bit interleaved YUV422 (VYUY byte order), limited range
142pub const VYUY: FourCharCode = four_char_code!("VYUY");
143/// 8 bit planar YUV420, limited range
144pub const NV12: FourCharCode = four_char_code!("NV12");
145/// 8 bit planar YUV422, limited range
146pub const NV16: FourCharCode = four_char_code!("NV16");
147/// 8 bit RGBA
148pub const RGBA: FourCharCode = four_char_code!("RGBA");
149/// 8 bit BGRA (byte order: B, G, R, A). Used by Cairo/Wayland (ARGB32 on
150/// little-endian).
151pub const BGRA: FourCharCode = four_char_code!("BGRA");
152/// 8 bit RGB
153pub const RGB: FourCharCode = four_char_code!("RGB ");
154/// 8 bit grayscale, full range
155pub const GREY: FourCharCode = four_char_code!("Y800");
156
157// TODO: planar RGB is 8BPS? https://fourcc.org/8bps/
158pub const PLANAR_RGB: FourCharCode = four_char_code!("8BPS");
159
160// TODO: What fourcc code is planar RGBA?
161pub const PLANAR_RGBA: FourCharCode = four_char_code!("8BPA");
162
163/// Packed RGB with uint8→int8 XOR 0x80 reinterpretation.
164/// The underlying bytes are uint8 with MSB flipped; when cast to i8, values
165/// map correctly: uint8 0 → int8 -128, uint8 128 → int8 0, uint8 255 → int8 127.
166pub const RGB_INT8: FourCharCode = four_char_code!("RGBi");
167
168/// Planar RGB (channels-first) with uint8→int8 XOR 0x80 reinterpretation.
169/// The underlying bytes are uint8 with MSB flipped; when cast to i8, values
170/// map correctly: uint8 0 → int8 -128, uint8 128 → int8 0, uint8 255 → int8 127.
171/// Tensor shape is `[3, H, W]` (channels-first).
172pub const PLANAR_RGB_INT8: FourCharCode = four_char_code!("8BPi");
173
174/// An image represented as a tensor with associated format information.
175#[derive(Debug)]
176pub struct TensorImage {
177    tensor: Tensor<u8>,
178    fourcc: FourCharCode,
179    is_planar: bool,
180}
181
182impl TensorImage {
183    /// Creates a new `TensorImage` with the specified width, height, format,
184    /// and memory type.
185    ///
186    /// # Examples
187    /// ```rust
188    /// use edgefirst_image::{RGB, TensorImage};
189    /// use edgefirst_tensor::TensorMemory;
190    /// # fn main() -> Result<(), edgefirst_image::Error> {
191    /// let img = TensorImage::new(640, 480, RGB, Some(TensorMemory::Mem))?;
192    /// assert_eq!(img.width(), 640);
193    /// assert_eq!(img.height(), 480);
194    /// assert_eq!(img.fourcc(), RGB);
195    /// assert!(!img.is_planar());
196    /// # Ok(())
197    /// # }
198    /// ```
199    pub fn new(
200        width: usize,
201        height: usize,
202        fourcc: FourCharCode,
203        memory: Option<TensorMemory>,
204    ) -> Result<Self> {
205        let channels = fourcc_channels(fourcc)?;
206        let is_planar = fourcc_planar(fourcc)?;
207
208        // NV12 is semi-planar with Y plane (W×H) + UV plane (W×H/2)
209        // Total bytes = W × H × 1.5. Use shape [H*3/2, W] to encode this.
210        if fourcc == NV12 {
211            let shape = vec![height * 3 / 2, width];
212            let tensor = Tensor::new(&shape, memory, None)?;
213
214            return Ok(Self {
215                tensor,
216                fourcc,
217                is_planar,
218            });
219        }
220
221        // NV16 is semi-planar with Y plane (W×H) + UV plane (W×H)
222        // Total bytes = W × H × 2. Use shape [H*2, W] to encode this.
223        if fourcc == NV16 {
224            let shape = vec![height * 2, width];
225            let tensor = Tensor::new(&shape, memory, None)?;
226
227            return Ok(Self {
228                tensor,
229                fourcc,
230                is_planar,
231            });
232        }
233
234        if is_planar {
235            let shape = vec![channels, height, width];
236            let tensor = Tensor::new(&shape, memory, None)?;
237
238            return Ok(Self {
239                tensor,
240                fourcc,
241                is_planar,
242            });
243        }
244
245        let shape = vec![height, width, channels];
246        let tensor = Tensor::new(&shape, memory, None)?;
247
248        Ok(Self {
249            tensor,
250            fourcc,
251            is_planar,
252        })
253    }
254
255    /// Creates a new `TensorImage` from an existing tensor and specified
256    /// format.
257    ///
258    /// The required tensor shape depends on the pixel format:
259    ///
260    /// | Format | Shape | Description |
261    /// |--------|-------|-------------|
262    /// | `RGB`  | `[H, W, 3]` | 3-channel interleaved |
263    /// | `RGBA` | `[H, W, 4]` | 4-channel interleaved |
264    /// | `GREY` | `[H, W, 1]` | Single-channel grayscale |
265    /// | `YUYV` | `[H, W, 2]` | YUV 4:2:2 interleaved |
266    /// | `PLANAR_RGB`  | `[3, H, W]` | Channels-first (3 planes) |
267    /// | `PLANAR_RGBA` | `[4, H, W]` | Channels-first (4 planes) |
268    /// | `RGB_INT8` | `[H, W, 3]` | Packed RGB, int8 via XOR 0x80 |
269    /// | `PLANAR_RGB_INT8` | `[3, H, W]` | Planar RGB, int8 via XOR 0x80 |
270    /// | `NV12` | `[H*3/2, W]` | Semi-planar YUV 4:2:0 (2D) |
271    /// | `NV16` | `[H*2, W]`   | Semi-planar YUV 4:2:2 (2D) |
272    ///
273    /// Most formats use a 3D tensor where the channel dimension matches
274    /// the format's channel count. The semi-planar formats NV12 and NV16
275    /// are special: the Y and UV planes have different heights, so the
276    /// data cannot be described as `[H, W, C]`. Instead the contiguous
277    /// memory is represented as a 2D tensor whose first dimension encodes
278    /// the total byte height (Y rows + UV rows).
279    ///
280    /// # Examples
281    ///
282    /// RGB (3D interleaved):
283    /// ```rust
284    /// use edgefirst_image::{RGB, TensorImage};
285    /// use edgefirst_tensor::Tensor;
286    ///  # fn main() -> Result<(), edgefirst_image::Error> {
287    /// let tensor = Tensor::new(&[720, 1280, 3], None, None)?;
288    /// let img = TensorImage::from_tensor(tensor, RGB)?;
289    /// assert_eq!(img.width(), 1280);
290    /// assert_eq!(img.height(), 720);
291    /// assert_eq!(img.fourcc(), RGB);
292    /// # Ok(())
293    /// # }
294    /// ```
295    ///
296    /// GREY (3D with 1 channel):
297    /// ```rust
298    /// use edgefirst_image::{GREY, TensorImage};
299    /// use edgefirst_tensor::Tensor;
300    ///  # fn main() -> Result<(), edgefirst_image::Error> {
301    /// let tensor = Tensor::new(&[480, 640, 1], None, None)?;
302    /// let img = TensorImage::from_tensor(tensor, GREY)?;
303    /// assert_eq!(img.width(), 640);
304    /// assert_eq!(img.height(), 480);
305    /// # Ok(())
306    /// # }
307    /// ```
308    ///
309    /// NV12 (2D semi-planar, height*3/2 rows):
310    /// ```rust
311    /// use edgefirst_image::{NV12, TensorImage};
312    /// use edgefirst_tensor::Tensor;
313    ///  # fn main() -> Result<(), edgefirst_image::Error> {
314    /// // 1080p NV12: 1080 Y rows + 540 UV rows = 1620 total rows
315    /// let tensor = Tensor::new(&[1620, 1920], None, None)?;
316    /// let img = TensorImage::from_tensor(tensor, NV12)?;
317    /// assert_eq!(img.width(), 1920);
318    /// assert_eq!(img.height(), 1080);
319    /// # Ok(())
320    /// # }
321    /// ```
322    pub fn from_tensor(tensor: Tensor<u8>, fourcc: FourCharCode) -> Result<Self> {
323        let shape = tensor.shape();
324        let is_planar = fourcc_planar(fourcc)?;
325
326        // NV12/NV16 use 2D shape [H*3/2, W] or [H*2, W] respectively
327        if fourcc == NV12 || fourcc == NV16 {
328            if shape.len() != 2 {
329                return Err(Error::InvalidShape(format!(
330                    "Semi-planar format {} requires 2D tensor, got {}: {:?}",
331                    fourcc.to_string(),
332                    shape.len(),
333                    shape
334                )));
335            }
336            return Ok(Self {
337                tensor,
338                fourcc,
339                is_planar,
340            });
341        }
342
343        // All other formats use 3D shape
344        if shape.len() != 3 {
345            return Err(Error::InvalidShape(format!(
346                "Tensor shape must have 3 dimensions, got {}: {:?}",
347                shape.len(),
348                shape
349            )));
350        }
351        let channels = if is_planar { shape[0] } else { shape[2] };
352
353        if fourcc_channels(fourcc)? != channels {
354            return Err(Error::InvalidShape(format!(
355                "Invalid tensor shape {:?} for format {}",
356                shape,
357                fourcc.to_string()
358            )));
359        }
360
361        Ok(Self {
362            tensor,
363            fourcc,
364            is_planar,
365        })
366    }
367
368    /// Loads an image from the given byte slice, attempting to decode it as
369    /// JPEG or PNG format. Exif orientation is supported. The default format is
370    /// RGB.
371    ///
372    /// # Examples
373    /// ```rust
374    /// use edgefirst_image::{RGBA, TensorImage};
375    /// use edgefirst_tensor::TensorMemory;
376    /// # fn main() -> Result<(), edgefirst_image::Error> {
377    /// let jpeg_bytes = include_bytes!("../../../testdata/zidane.png");
378    /// let img = TensorImage::load(jpeg_bytes, Some(RGBA), Some(TensorMemory::Mem))?;
379    /// assert_eq!(img.width(), 1280);
380    /// assert_eq!(img.height(), 720);
381    /// assert_eq!(img.fourcc(), RGBA);
382    /// # Ok(())
383    /// # }
384    /// ```
385    pub fn load(
386        image: &[u8],
387        format: Option<FourCharCode>,
388        memory: Option<TensorMemory>,
389    ) -> Result<Self> {
390        if let Ok(i) = Self::load_jpeg(image, format, memory) {
391            return Ok(i);
392        }
393        if let Ok(i) = Self::load_png(image, format, memory) {
394            return Ok(i);
395        }
396
397        Err(Error::NotSupported(
398            "Could not decode as jpeg or png".to_string(),
399        ))
400    }
401
402    /// Loads a JPEG image from the given byte slice. Supports EXIF orientation.
403    /// The default format is RGB.
404    ///
405    /// # Examples
406    /// ```rust
407    /// use edgefirst_image::{RGB, TensorImage};
408    /// use edgefirst_tensor::TensorMemory;
409    /// # fn main() -> Result<(), edgefirst_image::Error> {
410    /// let jpeg_bytes = include_bytes!("../../../testdata/zidane.jpg");
411    /// let img = TensorImage::load_jpeg(jpeg_bytes, Some(RGB), Some(TensorMemory::Mem))?;
412    /// assert_eq!(img.width(), 1280);
413    /// assert_eq!(img.height(), 720);
414    /// assert_eq!(img.fourcc(), RGB);
415    /// # Ok(())
416    /// # }
417    /// ```
418    pub fn load_jpeg(
419        image: &[u8],
420        format: Option<FourCharCode>,
421        memory: Option<TensorMemory>,
422    ) -> Result<Self> {
423        let colour = match format {
424            Some(RGB) => ColorSpace::RGB,
425            Some(RGBA) => ColorSpace::RGBA,
426            Some(GREY) => ColorSpace::Luma,
427            None => ColorSpace::RGB,
428            Some(f) => {
429                return Err(Error::NotSupported(format!(
430                    "Unsupported image format {}",
431                    f.display()
432                )));
433            }
434        };
435        let options = DecoderOptions::default().jpeg_set_out_colorspace(colour);
436        let mut decoder = JpegDecoder::new_with_options(image, options);
437        decoder.decode_headers()?;
438
439        let image_info = decoder.info().ok_or(Error::Internal(
440            "JPEG did not return decoded image info".to_string(),
441        ))?;
442
443        let converted_color_space = decoder
444            .get_output_colorspace()
445            .ok_or(Error::Internal("No output colorspace".to_string()))?;
446
447        let converted_color_space = match converted_color_space {
448            ColorSpace::RGB => RGB,
449            ColorSpace::RGBA => RGBA,
450            ColorSpace::Luma => GREY,
451            _ => {
452                return Err(Error::NotSupported(
453                    "Unsupported JPEG decoder output".to_string(),
454                ));
455            }
456        };
457
458        let dest_format = format.unwrap_or(converted_color_space);
459
460        let (rotation, flip) = decoder
461            .exif()
462            .map(|x| Self::read_exif_orientation(x))
463            .unwrap_or((Rotation::None, Flip::None));
464
465        if (rotation, flip) == (Rotation::None, Flip::None) {
466            let mut img = Self::new(
467                image_info.width as usize,
468                image_info.height as usize,
469                dest_format,
470                memory,
471            )?;
472
473            if converted_color_space != dest_format {
474                let tmp = Self::new(
475                    image_info.width as usize,
476                    image_info.height as usize,
477                    converted_color_space,
478                    Some(TensorMemory::Mem),
479                )?;
480
481                decoder.decode_into(&mut tmp.tensor.map()?)?;
482
483                CPUProcessor::convert_format(&tmp, &mut img)?;
484                return Ok(img);
485            }
486            decoder.decode_into(&mut img.tensor.map()?)?;
487            return Ok(img);
488        }
489
490        let mut tmp = Self::new(
491            image_info.width as usize,
492            image_info.height as usize,
493            dest_format,
494            Some(TensorMemory::Mem),
495        )?;
496
497        if converted_color_space != dest_format {
498            let tmp2 = Self::new(
499                image_info.width as usize,
500                image_info.height as usize,
501                converted_color_space,
502                Some(TensorMemory::Mem),
503            )?;
504
505            decoder.decode_into(&mut tmp2.tensor.map()?)?;
506
507            CPUProcessor::convert_format(&tmp2, &mut tmp)?;
508        } else {
509            decoder.decode_into(&mut tmp.tensor.map()?)?;
510        }
511
512        rotate_flip_to_tensor_image(&tmp, rotation, flip, memory)
513    }
514
515    /// Loads a PNG image from the given byte slice. Supports EXIF orientation.
516    /// The default format is RGB.
517    ///
518    /// # Examples
519    /// ```rust
520    /// use edgefirst_image::{RGB, TensorImage};
521    /// use edgefirst_tensor::TensorMemory;
522    /// # fn main() -> Result<(), edgefirst_image::Error> {
523    /// let png_bytes = include_bytes!("../../../testdata/zidane.png");
524    /// let img = TensorImage::load_png(png_bytes, Some(RGB), Some(TensorMemory::Mem))?;
525    /// assert_eq!(img.width(), 1280);
526    /// assert_eq!(img.height(), 720);
527    /// assert_eq!(img.fourcc(), RGB);
528    /// # Ok(())
529    /// # }
530    /// ```
531    pub fn load_png(
532        image: &[u8],
533        format: Option<FourCharCode>,
534        memory: Option<TensorMemory>,
535    ) -> Result<Self> {
536        let format = format.unwrap_or(RGB);
537        let alpha = match format {
538            RGB => false,
539            RGBA => true,
540            _ => {
541                return Err(Error::NotImplemented(
542                    "Unsupported image format".to_string(),
543                ));
544            }
545        };
546
547        let options = DecoderOptions::default()
548            .png_set_add_alpha_channel(alpha)
549            .png_set_decode_animated(false);
550        let mut decoder = PngDecoder::new_with_options(image, options);
551        decoder.decode_headers()?;
552        let image_info = decoder.get_info().ok_or(Error::Internal(
553            "PNG did not return decoded image info".to_string(),
554        ))?;
555
556        let (rotation, flip) = image_info
557            .exif
558            .as_ref()
559            .map(|x| Self::read_exif_orientation(x))
560            .unwrap_or((Rotation::None, Flip::None));
561
562        if (rotation, flip) == (Rotation::None, Flip::None) {
563            let img = Self::new(image_info.width, image_info.height, format, memory)?;
564            decoder.decode_into(&mut img.tensor.map()?)?;
565            return Ok(img);
566        }
567
568        let tmp = Self::new(
569            image_info.width,
570            image_info.height,
571            format,
572            Some(TensorMemory::Mem),
573        )?;
574        decoder.decode_into(&mut tmp.tensor.map()?)?;
575
576        rotate_flip_to_tensor_image(&tmp, rotation, flip, memory)
577    }
578
579    fn read_exif_orientation(exif_: &[u8]) -> (Rotation, Flip) {
580        let exifreader = exif::Reader::new();
581        let Ok(exif_) = exifreader.read_raw(exif_.to_vec()) else {
582            return (Rotation::None, Flip::None);
583        };
584        let Some(orientation) = exif_.get_field(exif::Tag::Orientation, exif::In::PRIMARY) else {
585            return (Rotation::None, Flip::None);
586        };
587        match orientation.value.get_uint(0) {
588            Some(1) => (Rotation::None, Flip::None),
589            Some(2) => (Rotation::None, Flip::Horizontal),
590            Some(3) => (Rotation::Rotate180, Flip::None),
591            Some(4) => (Rotation::Rotate180, Flip::Horizontal),
592            Some(5) => (Rotation::Clockwise90, Flip::Horizontal),
593            Some(6) => (Rotation::Clockwise90, Flip::None),
594            Some(7) => (Rotation::CounterClockwise90, Flip::Horizontal),
595            Some(8) => (Rotation::CounterClockwise90, Flip::None),
596            Some(v) => {
597                log::warn!("broken orientation EXIF value: {v}");
598                (Rotation::None, Flip::None)
599            }
600            None => (Rotation::None, Flip::None),
601        }
602    }
603
604    /// Saves the image as a JPEG file at the specified path with the given
605    /// quality. Only RGB and RGBA formats are supported.
606    ///
607    /// # Examples
608    /// ```rust
609    /// use edgefirst_image::{RGB, TensorImage};
610    /// use edgefirst_tensor::Tensor;
611    ///  # fn main() -> Result<(), edgefirst_image::Error> {
612    /// let tensor = Tensor::new(&[720, 1280, 3], None, None)?;
613    /// let img = TensorImage::from_tensor(tensor, RGB)?;
614    /// let save_path = "/tmp/output.jpg";
615    /// img.save_jpeg(save_path, 90)?;
616    /// # Ok(())
617    /// # }
618    pub fn save_jpeg(&self, path: &str, quality: u8) -> Result<()> {
619        if self.is_planar {
620            return Err(Error::NotImplemented(
621                "Saving planar images is not supported".to_string(),
622            ));
623        }
624
625        let colour = if self.fourcc == RGB {
626            jpeg_encoder::ColorType::Rgb
627        } else if self.fourcc == RGBA {
628            jpeg_encoder::ColorType::Rgba
629        } else {
630            return Err(Error::NotImplemented(
631                "Unsupported image format for saving".to_string(),
632            ));
633        };
634
635        let encoder = jpeg_encoder::Encoder::new_file(path, quality)?;
636        let tensor_map = self.tensor.map()?;
637
638        encoder.encode(
639            &tensor_map,
640            self.width() as u16,
641            self.height() as u16,
642            colour,
643        )?;
644
645        Ok(())
646    }
647
648    /// Returns a reference to the underlying tensor.
649    ///
650    /// # Examples
651    /// ```rust
652    /// use edgefirst_image::{RGB, TensorImage};
653    /// use edgefirst_tensor::{Tensor, TensorTrait};
654    ///  # fn main() -> Result<(), edgefirst_image::Error> {
655    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
656    /// let img = TensorImage::from_tensor(tensor, RGB)?;
657    /// let underlying_tensor = img.tensor();
658    /// assert_eq!(underlying_tensor.name(), "Tensor");
659    /// # Ok(())
660    /// # }
661    pub fn tensor(&self) -> &Tensor<u8> {
662        &self.tensor
663    }
664
665    /// Returns the FourCC code representing the image format.
666    ///
667    /// # Examples
668    /// ```rust
669    /// use edgefirst_image::{RGB, TensorImage};
670    /// use edgefirst_tensor::{Tensor, TensorTrait};
671    ///  # fn main() -> Result<(), edgefirst_image::Error> {
672    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
673    /// let img = TensorImage::from_tensor(tensor, RGB)?;
674    /// assert_eq!(img.fourcc(), RGB);
675    /// # Ok(())
676    /// # }
677    pub fn fourcc(&self) -> FourCharCode {
678        self.fourcc
679    }
680
681    /// Override the FourCC format tag without touching the underlying tensor.
682    /// Used internally for int8 ↔ uint8 format aliasing where the pixel layout
683    /// is identical and only the interpretation differs.
684    pub(crate) fn set_fourcc(&mut self, fourcc: FourCharCode) {
685        self.fourcc = fourcc;
686    }
687
688    /// # Examples
689    /// ```rust
690    /// use edgefirst_image::{RGB, TensorImage};
691    /// use edgefirst_tensor::{Tensor, TensorTrait};
692    ///  # fn main() -> Result<(), edgefirst_image::Error> {
693    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
694    /// let img = TensorImage::from_tensor(tensor, RGB)?;
695    /// assert!(!img.is_planar());
696    /// # Ok(())
697    /// # }
698    pub fn is_planar(&self) -> bool {
699        self.is_planar
700    }
701
702    /// # Examples
703    /// ```rust
704    /// use edgefirst_image::{RGB, TensorImage};
705    /// use edgefirst_tensor::{Tensor, TensorTrait};
706    ///  # fn main() -> Result<(), edgefirst_image::Error> {
707    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
708    /// let img = TensorImage::from_tensor(tensor, RGB)?;
709    /// assert_eq!(img.width(), 1280);
710    /// # Ok(())
711    /// # }
712    pub fn width(&self) -> usize {
713        // NV12/NV16 use 2D shape [H*k, W]
714        if self.fourcc == NV12 || self.fourcc == NV16 {
715            return self.tensor.shape()[1];
716        }
717        match self.is_planar {
718            true => self.tensor.shape()[2],
719            false => self.tensor.shape()[1],
720        }
721    }
722
723    /// # Examples
724    /// ```rust
725    /// use edgefirst_image::{RGB, TensorImage};
726    /// use edgefirst_tensor::{Tensor, TensorTrait};
727    ///  # fn main() -> Result<(), edgefirst_image::Error> {
728    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
729    /// let img = TensorImage::from_tensor(tensor, RGB)?;
730    /// assert_eq!(img.height(), 720);
731    /// # Ok(())
732    /// # }
733    pub fn height(&self) -> usize {
734        // NV12 uses shape [H*3/2, W], so height = shape[0] * 2 / 3
735        if self.fourcc == NV12 {
736            return self.tensor.shape()[0] * 2 / 3;
737        }
738        // NV16 uses shape [H*2, W], so height = shape[0] / 2
739        if self.fourcc == NV16 {
740            return self.tensor.shape()[0] / 2;
741        }
742        match self.is_planar {
743            true => self.tensor.shape()[1],
744            false => self.tensor.shape()[0],
745        }
746    }
747
748    /// # Examples
749    /// ```rust
750    /// use edgefirst_image::{RGB, TensorImage};
751    /// use edgefirst_tensor::{Tensor, TensorTrait};
752    ///  # fn main() -> Result<(), edgefirst_image::Error> {
753    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
754    /// let img = TensorImage::from_tensor(tensor, RGB)?;
755    /// assert_eq!(img.channels(), 3);
756    /// # Ok(())
757    /// # }
758    pub fn channels(&self) -> usize {
759        // NV12/NV16 use 2D shape, conceptually have 2 components (Y + interleaved UV)
760        if self.fourcc == NV12 || self.fourcc == NV16 {
761            return 2;
762        }
763        match self.is_planar {
764            true => self.tensor.shape()[0],
765            false => self.tensor.shape()[2],
766        }
767    }
768
769    /// # Examples
770    /// ```rust
771    /// use edgefirst_image::{RGB, TensorImage};
772    /// use edgefirst_tensor::{Tensor, TensorTrait};
773    ///  # fn main() -> Result<(), edgefirst_image::Error> {
774    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
775    /// let img = TensorImage::from_tensor(tensor, RGB)?;
776    /// assert_eq!(img.row_stride(), 1280*3);
777    /// # Ok(())
778    /// # }
779    pub fn row_stride(&self) -> usize {
780        match self.is_planar {
781            true => self.width(),
782            false => self.width() * self.channels(),
783        }
784    }
785
786    /// Returns the buffer identity of the underlying tensor.
787    pub fn buffer_identity(&self) -> &edgefirst_tensor::BufferIdentity {
788        self.tensor.buffer_identity()
789    }
790}
791
792/// Trait for types that can be used as destination images for conversion.
793///
794/// This trait abstracts over the difference between owned (`TensorImage`) and
795/// borrowed (`TensorImageRef`) image buffers, enabling the same conversion code
796/// to work with both.
797pub trait TensorImageDst {
798    /// Returns a reference to the underlying tensor.
799    fn tensor(&self) -> &Tensor<u8>;
800    /// Returns a mutable reference to the underlying tensor.
801    fn tensor_mut(&mut self) -> &mut Tensor<u8>;
802    /// Returns the FourCC code representing the image format.
803    fn fourcc(&self) -> FourCharCode;
804    /// Returns whether the image is in planar format.
805    fn is_planar(&self) -> bool;
806    /// Returns the width of the image in pixels.
807    fn width(&self) -> usize;
808    /// Returns the height of the image in pixels.
809    fn height(&self) -> usize;
810    /// Returns the number of channels in the image.
811    fn channels(&self) -> usize;
812    /// Returns the row stride in bytes.
813    fn row_stride(&self) -> usize;
814    /// Returns the buffer identity of the underlying tensor.
815    fn buffer_identity(&self) -> &edgefirst_tensor::BufferIdentity;
816}
817
818impl TensorImageDst for TensorImage {
819    fn tensor(&self) -> &Tensor<u8> {
820        &self.tensor
821    }
822
823    fn tensor_mut(&mut self) -> &mut Tensor<u8> {
824        &mut self.tensor
825    }
826
827    fn fourcc(&self) -> FourCharCode {
828        self.fourcc
829    }
830
831    fn is_planar(&self) -> bool {
832        self.is_planar
833    }
834
835    fn width(&self) -> usize {
836        TensorImage::width(self)
837    }
838
839    fn height(&self) -> usize {
840        TensorImage::height(self)
841    }
842
843    fn channels(&self) -> usize {
844        TensorImage::channels(self)
845    }
846
847    fn row_stride(&self) -> usize {
848        TensorImage::row_stride(self)
849    }
850
851    fn buffer_identity(&self) -> &edgefirst_tensor::BufferIdentity {
852        TensorImage::buffer_identity(self)
853    }
854}
855
856/// A borrowed view of an image tensor for zero-copy preprocessing.
857///
858/// `TensorImageRef` wraps a borrowed `&mut Tensor<u8>` instead of owning it,
859/// enabling zero-copy operations where the HAL writes directly into an external
860/// tensor (e.g., a model's pre-allocated input buffer).
861///
862/// # Examples
863/// ```rust,ignore
864/// // Create a borrowed tensor image wrapping the model's input tensor
865/// let mut dst = TensorImageRef::from_borrowed_tensor(
866///     model.input_tensor(0),
867///     PLANAR_RGB,
868/// )?;
869///
870/// // Preprocess directly into the model's input buffer
871/// processor.convert(&src_image, &mut dst, Rotation::None, Flip::None, Crop::default())?;
872///
873/// // Run inference - no copy needed!
874/// model.run()?;
875/// ```
876#[derive(Debug)]
877pub struct TensorImageRef<'a> {
878    pub(crate) tensor: &'a mut Tensor<u8>,
879    fourcc: FourCharCode,
880    is_planar: bool,
881}
882
883impl<'a> TensorImageRef<'a> {
884    /// Creates a `TensorImageRef` from a borrowed tensor reference.
885    ///
886    /// The tensor shape must match the expected format:
887    /// - For planar formats (e.g., PLANAR_RGB): shape is `[channels, height,
888    ///   width]`
889    /// - For interleaved formats (e.g., RGB, RGBA): shape is `[height, width,
890    ///   channels]`
891    ///
892    /// # Arguments
893    /// * `tensor` - A mutable reference to the tensor to wrap
894    /// * `fourcc` - The pixel format of the image
895    ///
896    /// # Returns
897    /// A `Result` containing the `TensorImageRef` or an error if the tensor
898    /// shape doesn't match the expected format.
899    pub fn from_borrowed_tensor(tensor: &'a mut Tensor<u8>, fourcc: FourCharCode) -> Result<Self> {
900        let shape = tensor.shape();
901        let is_planar = fourcc_planar(fourcc)?;
902
903        // NV12/NV16 use 2D shape [H*3/2, W] or [H*2, W] respectively
904        if fourcc == NV12 || fourcc == NV16 {
905            if shape.len() != 2 {
906                return Err(Error::InvalidShape(format!(
907                    "Semi-planar format {} requires 2D tensor, got {}: {:?}",
908                    fourcc.to_string(),
909                    shape.len(),
910                    shape
911                )));
912            }
913            return Ok(Self {
914                tensor,
915                fourcc,
916                is_planar,
917            });
918        }
919
920        // All other formats use 3D shape
921        if shape.len() != 3 {
922            return Err(Error::InvalidShape(format!(
923                "Tensor shape must have 3 dimensions, got {}: {:?}",
924                shape.len(),
925                shape
926            )));
927        }
928        let channels = if is_planar { shape[0] } else { shape[2] };
929
930        if fourcc_channels(fourcc)? != channels {
931            return Err(Error::InvalidShape(format!(
932                "Invalid tensor shape {:?} for format {}",
933                shape,
934                fourcc.to_string()
935            )));
936        }
937
938        Ok(Self {
939            tensor,
940            fourcc,
941            is_planar,
942        })
943    }
944
945    /// Returns a reference to the underlying tensor.
946    pub fn tensor(&self) -> &Tensor<u8> {
947        self.tensor
948    }
949
950    /// Returns the FourCC code representing the image format.
951    pub fn fourcc(&self) -> FourCharCode {
952        self.fourcc
953    }
954
955    /// Returns whether the image is in planar format.
956    pub fn is_planar(&self) -> bool {
957        self.is_planar
958    }
959
960    /// Returns the width of the image in pixels.
961    pub fn width(&self) -> usize {
962        match self.is_planar {
963            true => self.tensor.shape()[2],
964            false => self.tensor.shape()[1],
965        }
966    }
967
968    /// Returns the height of the image in pixels.
969    pub fn height(&self) -> usize {
970        match self.is_planar {
971            true => self.tensor.shape()[1],
972            false => self.tensor.shape()[0],
973        }
974    }
975
976    /// Returns the number of channels in the image.
977    pub fn channels(&self) -> usize {
978        match self.is_planar {
979            true => self.tensor.shape()[0],
980            false => self.tensor.shape()[2],
981        }
982    }
983
984    /// Returns the row stride in bytes.
985    pub fn row_stride(&self) -> usize {
986        match self.is_planar {
987            true => self.width(),
988            false => self.width() * self.channels(),
989        }
990    }
991}
992
993impl TensorImageDst for TensorImageRef<'_> {
994    fn tensor(&self) -> &Tensor<u8> {
995        self.tensor
996    }
997
998    fn tensor_mut(&mut self) -> &mut Tensor<u8> {
999        self.tensor
1000    }
1001
1002    fn fourcc(&self) -> FourCharCode {
1003        self.fourcc
1004    }
1005
1006    fn is_planar(&self) -> bool {
1007        self.is_planar
1008    }
1009
1010    fn width(&self) -> usize {
1011        TensorImageRef::width(self)
1012    }
1013
1014    fn height(&self) -> usize {
1015        TensorImageRef::height(self)
1016    }
1017
1018    fn channels(&self) -> usize {
1019        TensorImageRef::channels(self)
1020    }
1021
1022    fn row_stride(&self) -> usize {
1023        TensorImageRef::row_stride(self)
1024    }
1025
1026    fn buffer_identity(&self) -> &edgefirst_tensor::BufferIdentity {
1027        self.tensor.buffer_identity()
1028    }
1029}
1030
1031/// Flips the image, and the rotates it.
1032fn rotate_flip_to_tensor_image(
1033    src: &TensorImage,
1034    rotation: Rotation,
1035    flip: Flip,
1036    memory: Option<TensorMemory>,
1037) -> Result<TensorImage, Error> {
1038    let src_map = src.tensor.map()?;
1039    let dst = match rotation {
1040        Rotation::None | Rotation::Rotate180 => {
1041            TensorImage::new(src.width(), src.height(), src.fourcc(), memory)?
1042        }
1043        Rotation::Clockwise90 | Rotation::CounterClockwise90 => {
1044            TensorImage::new(src.height(), src.width(), src.fourcc(), memory)?
1045        }
1046    };
1047
1048    let mut dst_map = dst.tensor.map()?;
1049
1050    CPUProcessor::flip_rotate_ndarray(&src_map, &mut dst_map, &dst, rotation, flip)?;
1051
1052    Ok(dst)
1053}
1054
1055#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1056pub enum Rotation {
1057    None = 0,
1058    Clockwise90 = 1,
1059    Rotate180 = 2,
1060    CounterClockwise90 = 3,
1061}
1062impl Rotation {
1063    /// Creates a Rotation enum from an angle in degrees. The angle must be a
1064    /// multiple of 90.
1065    ///
1066    /// # Panics
1067    /// Panics if the angle is not a multiple of 90.
1068    ///
1069    /// # Examples
1070    /// ```rust
1071    /// # use edgefirst_image::Rotation;
1072    /// let rotation = Rotation::from_degrees_clockwise(270);
1073    /// assert_eq!(rotation, Rotation::CounterClockwise90);
1074    /// ```
1075    pub fn from_degrees_clockwise(angle: usize) -> Rotation {
1076        match angle.rem_euclid(360) {
1077            0 => Rotation::None,
1078            90 => Rotation::Clockwise90,
1079            180 => Rotation::Rotate180,
1080            270 => Rotation::CounterClockwise90,
1081            _ => panic!("rotation angle is not a multiple of 90"),
1082        }
1083    }
1084}
1085
1086#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1087pub enum Flip {
1088    None = 0,
1089    Vertical = 1,
1090    Horizontal = 2,
1091}
1092
1093#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1094pub struct Crop {
1095    pub src_rect: Option<Rect>,
1096    pub dst_rect: Option<Rect>,
1097    pub dst_color: Option<[u8; 4]>,
1098}
1099
1100impl Default for Crop {
1101    fn default() -> Self {
1102        Crop::new()
1103    }
1104}
1105impl Crop {
1106    // Creates a new Crop with default values (no cropping).
1107    pub fn new() -> Self {
1108        Crop {
1109            src_rect: None,
1110            dst_rect: None,
1111            dst_color: None,
1112        }
1113    }
1114
1115    // Sets the source rectangle for cropping.
1116    pub fn with_src_rect(mut self, src_rect: Option<Rect>) -> Self {
1117        self.src_rect = src_rect;
1118        self
1119    }
1120
1121    // Sets the destination rectangle for cropping.
1122    pub fn with_dst_rect(mut self, dst_rect: Option<Rect>) -> Self {
1123        self.dst_rect = dst_rect;
1124        self
1125    }
1126
1127    // Sets the destination color for areas outside the cropped region.
1128    pub fn with_dst_color(mut self, dst_color: Option<[u8; 4]>) -> Self {
1129        self.dst_color = dst_color;
1130        self
1131    }
1132
1133    // Creates a new Crop with no cropping.
1134    pub fn no_crop() -> Self {
1135        Crop::new()
1136    }
1137
1138    // Checks if the crop rectangles are valid for the given source and
1139    // destination images.
1140    pub fn check_crop(&self, src: &TensorImage, dst: &TensorImage) -> Result<(), Error> {
1141        let src = self.src_rect.is_none_or(|x| x.check_rect(src));
1142        let dst = self.dst_rect.is_none_or(|x| x.check_rect(dst));
1143        match (src, dst) {
1144            (true, true) => Ok(()),
1145            (true, false) => Err(Error::CropInvalid(format!(
1146                "Dest crop invalid: {:?}",
1147                self.dst_rect
1148            ))),
1149            (false, true) => Err(Error::CropInvalid(format!(
1150                "Src crop invalid: {:?}",
1151                self.src_rect
1152            ))),
1153            (false, false) => Err(Error::CropInvalid(format!(
1154                "Dest and Src crop invalid: {:?} {:?}",
1155                self.dst_rect, self.src_rect
1156            ))),
1157        }
1158    }
1159
1160    // Checks if the crop rectangles are valid for the given source and
1161    // destination images (using TensorImageRef for destination).
1162    pub fn check_crop_ref(&self, src: &TensorImage, dst: &TensorImageRef<'_>) -> Result<(), Error> {
1163        let src = self.src_rect.is_none_or(|x| x.check_rect(src));
1164        let dst = self.dst_rect.is_none_or(|x| x.check_rect_dst(dst));
1165        match (src, dst) {
1166            (true, true) => Ok(()),
1167            (true, false) => Err(Error::CropInvalid(format!(
1168                "Dest crop invalid: {:?}",
1169                self.dst_rect
1170            ))),
1171            (false, true) => Err(Error::CropInvalid(format!(
1172                "Src crop invalid: {:?}",
1173                self.src_rect
1174            ))),
1175            (false, false) => Err(Error::CropInvalid(format!(
1176                "Dest and Src crop invalid: {:?} {:?}",
1177                self.dst_rect, self.src_rect
1178            ))),
1179        }
1180    }
1181}
1182
1183#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1184pub struct Rect {
1185    pub left: usize,
1186    pub top: usize,
1187    pub width: usize,
1188    pub height: usize,
1189}
1190
1191impl Rect {
1192    // Creates a new Rect with the specified left, top, width, and height.
1193    pub fn new(left: usize, top: usize, width: usize, height: usize) -> Self {
1194        Self {
1195            left,
1196            top,
1197            width,
1198            height,
1199        }
1200    }
1201
1202    // Checks if the rectangle is valid for the given image.
1203    pub fn check_rect(&self, image: &TensorImage) -> bool {
1204        self.left + self.width <= image.width() && self.top + self.height <= image.height()
1205    }
1206
1207    // Checks if the rectangle is valid for the given destination image.
1208    pub fn check_rect_dst<D: TensorImageDst>(&self, image: &D) -> bool {
1209        self.left + self.width <= image.width() && self.top + self.height <= image.height()
1210    }
1211}
1212
1213#[enum_dispatch(ImageProcessor)]
1214pub trait ImageProcessorTrait {
1215    /// Converts the source image to the destination image format and size. The
1216    /// image is cropped first, then flipped, then rotated
1217    ///
1218    /// # Arguments
1219    ///
1220    /// * `dst` - The destination image to be converted to.
1221    /// * `src` - The source image to convert from.
1222    /// * `rotation` - The rotation to apply to the destination image.
1223    /// * `flip` - Flips the image
1224    /// * `crop` - An optional rectangle specifying the area to crop from the
1225    ///   source image
1226    ///
1227    /// # Returns
1228    ///
1229    /// A `Result` indicating success or failure of the conversion.
1230    fn convert(
1231        &mut self,
1232        src: &TensorImage,
1233        dst: &mut TensorImage,
1234        rotation: Rotation,
1235        flip: Flip,
1236        crop: Crop,
1237    ) -> Result<()>;
1238
1239    /// Converts the source image to a borrowed destination tensor for zero-copy
1240    /// preprocessing.
1241    ///
1242    /// This variant accepts a `TensorImageRef` as the destination, enabling
1243    /// direct writes into external buffers (e.g., model input tensors) without
1244    /// intermediate copies.
1245    ///
1246    /// # Arguments
1247    ///
1248    /// * `src` - The source image to convert from.
1249    /// * `dst` - A borrowed tensor image wrapping the destination buffer.
1250    /// * `rotation` - The rotation to apply to the destination image.
1251    /// * `flip` - Flips the image
1252    /// * `crop` - An optional rectangle specifying the area to crop from the
1253    ///   source image
1254    ///
1255    /// # Returns
1256    ///
1257    /// A `Result` indicating success or failure of the conversion.
1258    fn convert_ref(
1259        &mut self,
1260        src: &TensorImage,
1261        dst: &mut TensorImageRef<'_>,
1262        rotation: Rotation,
1263        flip: Flip,
1264        crop: Crop,
1265    ) -> Result<()>;
1266
1267    /// Draw pre-decoded detection boxes and segmentation masks onto `dst`.
1268    ///
1269    /// Supports two segmentation modes based on the mask channel count:
1270    /// - **Instance segmentation** (`C=1`): one `Segmentation` per detection,
1271    ///   `segmentation` and `detect` are zipped.
1272    /// - **Semantic segmentation** (`C>1`): a single `Segmentation` covering
1273    ///   all classes; only the first element is used.
1274    ///
1275    /// # Format requirements
1276    ///
1277    /// - CPU backend: `dst` must be `RGBA` or `RGB`.
1278    /// - OpenGL backend: `dst` must be `RGBA`, `BGRA`, or `RGB`.
1279    /// - G2D backend: not implemented (returns `NotImplemented`).
1280    ///
1281    /// An empty `segmentation` slice is valid — only bounding boxes are drawn.
1282    fn draw_masks(
1283        &mut self,
1284        dst: &mut TensorImage,
1285        detect: &[DetectBox],
1286        segmentation: &[Segmentation],
1287    ) -> Result<()>;
1288
1289    /// Draw masks from proto data onto image (fused decode+draw).
1290    ///
1291    /// For YOLO segmentation models, this avoids materializing intermediate
1292    /// `Array3<u8>` masks. The `ProtoData` contains mask coefficients and the
1293    /// prototype tensor; the renderer computes `mask_coeff @ protos` directly
1294    /// at the output resolution using bilinear sampling.
1295    ///
1296    /// `detect` and `proto_data.mask_coefficients` must have the same length
1297    /// (enforced by zip — excess entries are silently ignored). An empty
1298    /// `detect` slice is valid and returns immediately after drawing nothing.
1299    ///
1300    /// # Format requirements
1301    ///
1302    /// Same as [`draw_masks`](Self::draw_masks). G2D returns `NotImplemented`.
1303    fn draw_masks_proto(
1304        &mut self,
1305        dst: &mut TensorImage,
1306        detect: &[DetectBox],
1307        proto_data: &ProtoData,
1308    ) -> Result<()>;
1309
1310    /// Decode masks into a compact atlas buffer.
1311    ///
1312    /// Used internally by the Python/C `decode_masks` APIs. The atlas is a
1313    /// compact vertical strip where each detection occupies a strip sized to
1314    /// its padded bounding box (not the full output resolution).
1315    ///
1316    /// `output_width` and `output_height` define the coordinate space for
1317    /// interpreting bounding boxes — individual mask regions are bbox-sized.
1318    /// Mask pixels are binary: `255` = presence, `0` = background.
1319    ///
1320    /// Returns `(atlas_pixels, regions)` where `regions` describes each
1321    /// detection's location and bbox within the atlas.
1322    ///
1323    /// G2D backend returns `NotImplemented`.
1324    fn decode_masks_atlas(
1325        &mut self,
1326        detect: &[DetectBox],
1327        proto_data: ProtoData,
1328        output_width: usize,
1329        output_height: usize,
1330    ) -> Result<(Vec<u8>, Vec<MaskRegion>)>;
1331
1332    /// Sets the colors used for rendering segmentation masks. Up to 17 colors
1333    /// can be set.
1334    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()>;
1335}
1336
1337/// Configuration for [`ImageProcessor`] construction.
1338///
1339/// Use with [`ImageProcessor::with_config`] to override the default EGL
1340/// display auto-detection. The default configuration (all fields `None`)
1341/// preserves the existing auto-detection behaviour.
1342#[derive(Debug, Clone, Default)]
1343pub struct ImageProcessorConfig {
1344    /// Force OpenGL to use this EGL display type instead of auto-detecting.
1345    ///
1346    /// When `None`, the processor probes displays in priority order: GBM,
1347    /// PlatformDevice, Default. Use [`probe_egl_displays`] to discover
1348    /// which displays are available on the current system.
1349    ///
1350    /// Ignored when `EDGEFIRST_DISABLE_GL=1` is set.
1351    #[cfg(target_os = "linux")]
1352    #[cfg(feature = "opengl")]
1353    pub egl_display: Option<EglDisplayKind>,
1354}
1355
1356/// Backend forced via the `EDGEFIRST_FORCE_BACKEND` environment variable.
1357///
1358/// When set, the [`ImageProcessor`] only initializes and dispatches to the
1359/// selected backend — no fallback chain is used.
1360#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1361pub(crate) enum ForcedBackend {
1362    Cpu,
1363    G2d,
1364    OpenGl,
1365}
1366
1367/// Image converter that uses available hardware acceleration or CPU as a
1368/// fallback.
1369#[derive(Debug)]
1370pub struct ImageProcessor {
1371    /// CPU-based image converter as a fallback. This is only None if the
1372    /// EDGEFIRST_DISABLE_CPU environment variable is set.
1373    pub cpu: Option<CPUProcessor>,
1374
1375    #[cfg(target_os = "linux")]
1376    /// G2D-based image converter for Linux systems. This is only available if
1377    /// the EDGEFIRST_DISABLE_G2D environment variable is not set and libg2d.so
1378    /// is available.
1379    pub g2d: Option<G2DProcessor>,
1380    #[cfg(target_os = "linux")]
1381    #[cfg(feature = "opengl")]
1382    /// OpenGL-based image converter for Linux systems. This is only available
1383    /// if the EDGEFIRST_DISABLE_GL environment variable is not set and OpenGL
1384    /// ES is available.
1385    pub opengl: Option<GLProcessorThreaded>,
1386
1387    /// When set, only the specified backend is used — no fallback chain.
1388    pub(crate) forced_backend: Option<ForcedBackend>,
1389}
1390
1391unsafe impl Send for ImageProcessor {}
1392unsafe impl Sync for ImageProcessor {}
1393
1394impl ImageProcessor {
1395    /// Creates a new `ImageProcessor` instance, initializing available
1396    /// hardware converters based on the system capabilities and environment
1397    /// variables.
1398    ///
1399    /// # Examples
1400    /// ```rust
1401    /// # use edgefirst_image::{ImageProcessor, TensorImage, RGBA, RGB, Rotation, Flip, Crop, ImageProcessorTrait};
1402    /// # fn main() -> Result<(), edgefirst_image::Error> {
1403    /// let image = include_bytes!("../../../testdata/zidane.jpg");
1404    /// let img = TensorImage::load(image, Some(RGBA), None)?;
1405    /// let mut converter = ImageProcessor::new()?;
1406    /// let mut dst = TensorImage::new(640, 480, RGB, None)?;
1407    /// converter.convert(&img, &mut dst, Rotation::None, Flip::None, Crop::default())?;
1408    /// # Ok(())
1409    /// # }
1410    pub fn new() -> Result<Self> {
1411        Self::with_config(ImageProcessorConfig::default())
1412    }
1413
1414    /// Creates a new `ImageProcessor` with the given configuration.
1415    ///
1416    /// This allows overriding the EGL display type used for OpenGL
1417    /// acceleration. The `EDGEFIRST_DISABLE_GL=1` environment variable
1418    /// still takes precedence over any override.
1419    #[allow(unused_variables)]
1420    pub fn with_config(config: ImageProcessorConfig) -> Result<Self> {
1421        // ── EDGEFIRST_FORCE_BACKEND ──────────────────────────────────
1422        // When set, only the requested backend is initialised and no
1423        // fallback chain is used. Accepted values (case-insensitive):
1424        //   "cpu", "g2d", "opengl"
1425        if let Ok(val) = std::env::var("EDGEFIRST_FORCE_BACKEND") {
1426            let val_lower = val.to_lowercase();
1427            let forced = match val_lower.as_str() {
1428                "cpu" => ForcedBackend::Cpu,
1429                "g2d" => ForcedBackend::G2d,
1430                "opengl" => ForcedBackend::OpenGl,
1431                other => {
1432                    return Err(Error::ForcedBackendUnavailable(format!(
1433                        "unknown EDGEFIRST_FORCE_BACKEND value: {other:?} (expected cpu, g2d, or opengl)"
1434                    )));
1435                }
1436            };
1437
1438            log::info!("EDGEFIRST_FORCE_BACKEND={val} — only initializing {val_lower} backend");
1439
1440            return match forced {
1441                ForcedBackend::Cpu => Ok(Self {
1442                    cpu: Some(CPUProcessor::new()),
1443                    #[cfg(target_os = "linux")]
1444                    g2d: None,
1445                    #[cfg(target_os = "linux")]
1446                    #[cfg(feature = "opengl")]
1447                    opengl: None,
1448                    forced_backend: Some(ForcedBackend::Cpu),
1449                }),
1450                ForcedBackend::G2d => {
1451                    #[cfg(target_os = "linux")]
1452                    {
1453                        let g2d = G2DProcessor::new().map_err(|e| {
1454                            Error::ForcedBackendUnavailable(format!(
1455                                "g2d forced but failed to initialize: {e:?}"
1456                            ))
1457                        })?;
1458                        Ok(Self {
1459                            cpu: None,
1460                            g2d: Some(g2d),
1461                            #[cfg(feature = "opengl")]
1462                            opengl: None,
1463                            forced_backend: Some(ForcedBackend::G2d),
1464                        })
1465                    }
1466                    #[cfg(not(target_os = "linux"))]
1467                    {
1468                        Err(Error::ForcedBackendUnavailable(
1469                            "g2d backend is only available on Linux".into(),
1470                        ))
1471                    }
1472                }
1473                ForcedBackend::OpenGl => {
1474                    #[cfg(target_os = "linux")]
1475                    #[cfg(feature = "opengl")]
1476                    {
1477                        let opengl = GLProcessorThreaded::new(config.egl_display).map_err(|e| {
1478                            Error::ForcedBackendUnavailable(format!(
1479                                "opengl forced but failed to initialize: {e:?}"
1480                            ))
1481                        })?;
1482                        Ok(Self {
1483                            cpu: None,
1484                            g2d: None,
1485                            opengl: Some(opengl),
1486                            forced_backend: Some(ForcedBackend::OpenGl),
1487                        })
1488                    }
1489                    #[cfg(not(all(target_os = "linux", feature = "opengl")))]
1490                    {
1491                        Err(Error::ForcedBackendUnavailable(
1492                            "opengl backend requires Linux with the 'opengl' feature enabled"
1493                                .into(),
1494                        ))
1495                    }
1496                }
1497            };
1498        }
1499
1500        // ── Existing DISABLE logic (unchanged) ──────────────────────
1501        #[cfg(target_os = "linux")]
1502        let g2d = if std::env::var("EDGEFIRST_DISABLE_G2D")
1503            .map(|x| x != "0" && x.to_lowercase() != "false")
1504            .unwrap_or(false)
1505        {
1506            log::debug!("EDGEFIRST_DISABLE_G2D is set");
1507            None
1508        } else {
1509            match G2DProcessor::new() {
1510                Ok(g2d_converter) => Some(g2d_converter),
1511                Err(err) => {
1512                    log::warn!("Failed to initialize G2D converter: {err:?}");
1513                    None
1514                }
1515            }
1516        };
1517
1518        #[cfg(target_os = "linux")]
1519        #[cfg(feature = "opengl")]
1520        let opengl = if std::env::var("EDGEFIRST_DISABLE_GL")
1521            .map(|x| x != "0" && x.to_lowercase() != "false")
1522            .unwrap_or(false)
1523        {
1524            log::debug!("EDGEFIRST_DISABLE_GL is set");
1525            None
1526        } else {
1527            match GLProcessorThreaded::new(config.egl_display) {
1528                Ok(gl_converter) => Some(gl_converter),
1529                Err(err) => {
1530                    log::warn!("Failed to initialize GL converter: {err:?}");
1531                    None
1532                }
1533            }
1534        };
1535
1536        let cpu = if std::env::var("EDGEFIRST_DISABLE_CPU")
1537            .map(|x| x != "0" && x.to_lowercase() != "false")
1538            .unwrap_or(false)
1539        {
1540            log::debug!("EDGEFIRST_DISABLE_CPU is set");
1541            None
1542        } else {
1543            Some(CPUProcessor::new())
1544        };
1545        Ok(Self {
1546            cpu,
1547            #[cfg(target_os = "linux")]
1548            g2d,
1549            #[cfg(target_os = "linux")]
1550            #[cfg(feature = "opengl")]
1551            opengl,
1552            forced_backend: None,
1553        })
1554    }
1555
1556    /// Sets the interpolation mode for int8 proto textures on the OpenGL
1557    /// backend. No-op if OpenGL is not available.
1558    #[cfg(target_os = "linux")]
1559    #[cfg(feature = "opengl")]
1560    pub fn set_int8_interpolation_mode(&mut self, mode: Int8InterpolationMode) -> Result<()> {
1561        if let Some(ref mut gl) = self.opengl {
1562            gl.set_int8_interpolation_mode(mode)?;
1563        }
1564        Ok(())
1565    }
1566
1567    /// Create a `TensorImage` with the best available memory backend.
1568    ///
1569    /// Priority: DMA-buf → PBO → system memory.
1570    ///
1571    /// # Arguments
1572    ///
1573    /// * `width` - Image width in pixels
1574    /// * `height` - Image height in pixels
1575    /// * `fourcc` - Pixel format as a FourCC code
1576    ///
1577    /// # Returns
1578    ///
1579    /// A `TensorImage` backed by the highest-performance memory type
1580    /// available on this system.
1581    ///
1582    /// # Errors
1583    ///
1584    /// Returns an error if all allocation strategies fail.
1585    pub fn create_image(
1586        &self,
1587        width: usize,
1588        height: usize,
1589        fourcc: four_char_code::FourCharCode,
1590    ) -> Result<TensorImage> {
1591        // Try DMA first on Linux — skip only when GL has explicitly selected PBO
1592        // as the preferred transfer path (PBO is better than DMA in that case).
1593        #[cfg(target_os = "linux")]
1594        {
1595            #[cfg(feature = "opengl")]
1596            let gl_uses_pbo = self
1597                .opengl
1598                .as_ref()
1599                .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
1600            #[cfg(not(feature = "opengl"))]
1601            let gl_uses_pbo = false;
1602
1603            if !gl_uses_pbo {
1604                if let Ok(img) = TensorImage::new(
1605                    width,
1606                    height,
1607                    fourcc,
1608                    Some(edgefirst_tensor::TensorMemory::Dma),
1609                ) {
1610                    return Ok(img);
1611                }
1612            }
1613        }
1614
1615        // Try PBO (if GL available)
1616        #[cfg(target_os = "linux")]
1617        #[cfg(feature = "opengl")]
1618        if let Some(gl) = &self.opengl {
1619            match gl.create_pbo_image(width, height, fourcc) {
1620                Ok(img) => return Ok(img),
1621                Err(e) => log::debug!("PBO image creation failed, falling back to Mem: {e:?}"),
1622            }
1623        }
1624
1625        // Fallback to Mem
1626        TensorImage::new(
1627            width,
1628            height,
1629            fourcc,
1630            Some(edgefirst_tensor::TensorMemory::Mem),
1631        )
1632    }
1633}
1634
1635impl ImageProcessorTrait for ImageProcessor {
1636    /// Converts the source image to the destination image format and size. The
1637    /// image is cropped first, then flipped, then rotated
1638    ///
1639    /// Prefer hardware accelerators when available, falling back to CPU if
1640    /// necessary.
1641    fn convert(
1642        &mut self,
1643        src: &TensorImage,
1644        dst: &mut TensorImage,
1645        rotation: Rotation,
1646        flip: Flip,
1647        crop: Crop,
1648    ) -> Result<()> {
1649        let start = Instant::now();
1650
1651        // ── Forced backend: no fallback chain ────────────────────────
1652        if let Some(forced) = self.forced_backend {
1653            return match forced {
1654                ForcedBackend::Cpu => {
1655                    if let Some(cpu) = self.cpu.as_mut() {
1656                        return cpu.convert(src, dst, rotation, flip, crop);
1657                    }
1658                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1659                }
1660                ForcedBackend::G2d => {
1661                    #[cfg(target_os = "linux")]
1662                    if let Some(g2d) = self.g2d.as_mut() {
1663                        return g2d.convert(src, dst, rotation, flip, crop);
1664                    }
1665                    Err(Error::ForcedBackendUnavailable("g2d".into()))
1666                }
1667                ForcedBackend::OpenGl => {
1668                    #[cfg(target_os = "linux")]
1669                    #[cfg(feature = "opengl")]
1670                    if let Some(opengl) = self.opengl.as_mut() {
1671                        return opengl.convert(src, dst, rotation, flip, crop);
1672                    }
1673                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1674                }
1675            };
1676        }
1677
1678        // ── Existing fallback chain ──────────────────────────────────
1679        #[cfg(target_os = "linux")]
1680        if let Some(g2d) = self.g2d.as_mut() {
1681            log::trace!("image started with g2d in {:?}", start.elapsed());
1682            match g2d.convert(src, dst, rotation, flip, crop) {
1683                Ok(_) => {
1684                    log::trace!("image converted with g2d in {:?}", start.elapsed());
1685                    return Ok(());
1686                }
1687                Err(e) => {
1688                    log::trace!("image didn't convert with g2d: {e:?}")
1689                }
1690            }
1691        }
1692
1693        // if the image is just a copy without an resizing, the send it to the CPU and
1694        // skip OpenGL
1695        let src_shape = match crop.src_rect {
1696            Some(s) => (s.width, s.height),
1697            None => (src.width(), src.height()),
1698        };
1699        let dst_shape = match crop.dst_rect {
1700            Some(d) => (d.width, d.height),
1701            None => (dst.width(), dst.height()),
1702        };
1703
1704        // TODO: Check if still use CPU when rotation or flip is enabled
1705        if src_shape == dst_shape && flip == Flip::None && rotation == Rotation::None {
1706            if let Some(cpu) = self.cpu.as_mut() {
1707                match cpu.convert(src, dst, rotation, flip, crop) {
1708                    Ok(_) => {
1709                        log::trace!("image converted with cpu in {:?}", start.elapsed());
1710                        return Ok(());
1711                    }
1712                    Err(e) => {
1713                        log::trace!("image didn't convert with cpu: {e:?}");
1714                        return Err(e);
1715                    }
1716                }
1717            }
1718        }
1719
1720        #[cfg(target_os = "linux")]
1721        #[cfg(feature = "opengl")]
1722        if let Some(opengl) = self.opengl.as_mut() {
1723            log::trace!("image started with opengl in {:?}", start.elapsed());
1724            match opengl.convert(src, dst, rotation, flip, crop) {
1725                Ok(_) => {
1726                    log::trace!("image converted with opengl in {:?}", start.elapsed());
1727                    return Ok(());
1728                }
1729                Err(e) => {
1730                    log::trace!("image didn't convert with opengl: {e:?}")
1731                }
1732            }
1733        }
1734        log::trace!("image started with cpu in {:?}", start.elapsed());
1735        if let Some(cpu) = self.cpu.as_mut() {
1736            match cpu.convert(src, dst, rotation, flip, crop) {
1737                Ok(_) => {
1738                    log::trace!("image converted with cpu in {:?}", start.elapsed());
1739                    return Ok(());
1740                }
1741                Err(e) => {
1742                    log::trace!("image didn't convert with cpu: {e:?}");
1743                    return Err(e);
1744                }
1745            }
1746        }
1747        Err(Error::NoConverter)
1748    }
1749
1750    fn convert_ref(
1751        &mut self,
1752        src: &TensorImage,
1753        dst: &mut TensorImageRef<'_>,
1754        rotation: Rotation,
1755        flip: Flip,
1756        crop: Crop,
1757    ) -> Result<()> {
1758        let start = Instant::now();
1759
1760        // ── Forced backend: no fallback chain ────────────────────────
1761        if let Some(forced) = self.forced_backend {
1762            return match forced {
1763                ForcedBackend::Cpu => {
1764                    if let Some(cpu) = self.cpu.as_mut() {
1765                        return cpu.convert_ref(src, dst, rotation, flip, crop);
1766                    }
1767                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1768                }
1769                ForcedBackend::G2d => {
1770                    #[cfg(target_os = "linux")]
1771                    if let Some(g2d) = self.g2d.as_mut() {
1772                        return g2d.convert_ref(src, dst, rotation, flip, crop);
1773                    }
1774                    Err(Error::ForcedBackendUnavailable("g2d".into()))
1775                }
1776                ForcedBackend::OpenGl => {
1777                    #[cfg(target_os = "linux")]
1778                    #[cfg(feature = "opengl")]
1779                    if let Some(opengl) = self.opengl.as_mut() {
1780                        return opengl.convert_ref(src, dst, rotation, flip, crop);
1781                    }
1782                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1783                }
1784            };
1785        }
1786
1787        // For TensorImageRef, we prefer CPU since hardware accelerators typically
1788        // don't support PLANAR_RGB output which is the common model input format.
1789        // The CPU path uses the generic conversion functions that work with any
1790        // TensorImageDst implementation.
1791        if let Some(cpu) = self.cpu.as_mut() {
1792            match cpu.convert_ref(src, dst, rotation, flip, crop) {
1793                Ok(_) => {
1794                    log::trace!("image converted with cpu (ref) in {:?}", start.elapsed());
1795                    return Ok(());
1796                }
1797                Err(e) => {
1798                    log::trace!("image didn't convert with cpu (ref): {e:?}");
1799                    return Err(e);
1800                }
1801            }
1802        }
1803
1804        Err(Error::NoConverter)
1805    }
1806
1807    fn draw_masks(
1808        &mut self,
1809        dst: &mut TensorImage,
1810        detect: &[DetectBox],
1811        segmentation: &[Segmentation],
1812    ) -> Result<()> {
1813        let start = Instant::now();
1814
1815        if detect.is_empty() && segmentation.is_empty() {
1816            return Ok(());
1817        }
1818
1819        // ── Forced backend: no fallback chain ────────────────────────
1820        if let Some(forced) = self.forced_backend {
1821            return match forced {
1822                ForcedBackend::Cpu => {
1823                    if let Some(cpu) = self.cpu.as_mut() {
1824                        return cpu.draw_masks(dst, detect, segmentation);
1825                    }
1826                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1827                }
1828                ForcedBackend::G2d => Err(Error::NotSupported(
1829                    "g2d does not support draw_masks".into(),
1830                )),
1831                ForcedBackend::OpenGl => {
1832                    #[cfg(target_os = "linux")]
1833                    #[cfg(feature = "opengl")]
1834                    if let Some(opengl) = self.opengl.as_mut() {
1835                        return opengl.draw_masks(dst, detect, segmentation);
1836                    }
1837                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1838                }
1839            };
1840        }
1841
1842        // skip G2D as it doesn't support rendering to image
1843
1844        #[cfg(target_os = "linux")]
1845        #[cfg(feature = "opengl")]
1846        if let Some(opengl) = self.opengl.as_mut() {
1847            log::trace!("draw_masks started with opengl in {:?}", start.elapsed());
1848            match opengl.draw_masks(dst, detect, segmentation) {
1849                Ok(_) => {
1850                    log::trace!("draw_masks with opengl in {:?}", start.elapsed());
1851                    return Ok(());
1852                }
1853                Err(e) => {
1854                    log::trace!("draw_masks didn't work with opengl: {e:?}")
1855                }
1856            }
1857        }
1858        log::trace!("draw_masks started with cpu in {:?}", start.elapsed());
1859        if let Some(cpu) = self.cpu.as_mut() {
1860            match cpu.draw_masks(dst, detect, segmentation) {
1861                Ok(_) => {
1862                    log::trace!("draw_masks with cpu in {:?}", start.elapsed());
1863                    return Ok(());
1864                }
1865                Err(e) => {
1866                    log::trace!("draw_masks didn't work with cpu: {e:?}");
1867                    return Err(e);
1868                }
1869            }
1870        }
1871        Err(Error::NoConverter)
1872    }
1873
1874    fn draw_masks_proto(
1875        &mut self,
1876        dst: &mut TensorImage,
1877        detect: &[DetectBox],
1878        proto_data: &ProtoData,
1879    ) -> Result<()> {
1880        let start = Instant::now();
1881
1882        if detect.is_empty() {
1883            return Ok(());
1884        }
1885
1886        // ── Forced backend: no fallback chain ────────────────────────
1887        if let Some(forced) = self.forced_backend {
1888            return match forced {
1889                ForcedBackend::Cpu => {
1890                    if let Some(cpu) = self.cpu.as_mut() {
1891                        return cpu.draw_masks_proto(dst, detect, proto_data);
1892                    }
1893                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1894                }
1895                ForcedBackend::G2d => Err(Error::NotSupported(
1896                    "g2d does not support draw_masks_proto".into(),
1897                )),
1898                ForcedBackend::OpenGl => {
1899                    #[cfg(target_os = "linux")]
1900                    #[cfg(feature = "opengl")]
1901                    if let Some(opengl) = self.opengl.as_mut() {
1902                        return opengl.draw_masks_proto(dst, detect, proto_data);
1903                    }
1904                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1905                }
1906            };
1907        }
1908
1909        // skip G2D as it doesn't support rendering to image
1910
1911        // Hybrid path: CPU materialize + GL overlay (benchmarked faster than
1912        // full-GPU draw_masks_proto on all tested platforms: 27× on imx8mp,
1913        // 4× on imx95, 2.5× on rpi5, 1.6× on x86).
1914        #[cfg(target_os = "linux")]
1915        #[cfg(feature = "opengl")]
1916        if let Some(opengl) = self.opengl.as_mut() {
1917            let Some(cpu) = self.cpu.as_ref() else {
1918                return Err(Error::Internal(
1919                    "draw_masks_proto requires CPU backend for hybrid path".into(),
1920                ));
1921            };
1922            log::trace!(
1923                "draw_masks_proto started with hybrid (cpu+opengl) in {:?}",
1924                start.elapsed()
1925            );
1926            let segmentation = cpu.materialize_segmentations(detect, proto_data)?;
1927            match opengl.draw_masks(dst, detect, &segmentation) {
1928                Ok(_) => {
1929                    log::trace!(
1930                        "draw_masks_proto with hybrid (cpu+opengl) in {:?}",
1931                        start.elapsed()
1932                    );
1933                    return Ok(());
1934                }
1935                Err(e) => {
1936                    log::trace!("draw_masks_proto hybrid path failed, falling back to cpu: {e:?}");
1937                }
1938            }
1939        }
1940
1941        // CPU-only fallback (no OpenGL, or hybrid GL overlay failed)
1942        let Some(cpu) = self.cpu.as_mut() else {
1943            return Err(Error::Internal(
1944                "draw_masks_proto requires CPU backend for fallback path".into(),
1945            ));
1946        };
1947        log::trace!("draw_masks_proto started with cpu in {:?}", start.elapsed());
1948        cpu.draw_masks_proto(dst, detect, proto_data)
1949    }
1950
1951    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()> {
1952        let start = Instant::now();
1953
1954        // ── Forced backend: no fallback chain ────────────────────────
1955        if let Some(forced) = self.forced_backend {
1956            return match forced {
1957                ForcedBackend::Cpu => {
1958                    if let Some(cpu) = self.cpu.as_mut() {
1959                        return cpu.set_class_colors(colors);
1960                    }
1961                    Err(Error::ForcedBackendUnavailable("cpu".into()))
1962                }
1963                ForcedBackend::G2d => Err(Error::NotSupported(
1964                    "g2d does not support set_class_colors".into(),
1965                )),
1966                ForcedBackend::OpenGl => {
1967                    #[cfg(target_os = "linux")]
1968                    #[cfg(feature = "opengl")]
1969                    if let Some(opengl) = self.opengl.as_mut() {
1970                        return opengl.set_class_colors(colors);
1971                    }
1972                    Err(Error::ForcedBackendUnavailable("opengl".into()))
1973                }
1974            };
1975        }
1976
1977        // skip G2D as it doesn't support rendering to image
1978
1979        #[cfg(target_os = "linux")]
1980        #[cfg(feature = "opengl")]
1981        if let Some(opengl) = self.opengl.as_mut() {
1982            log::trace!("image started with opengl in {:?}", start.elapsed());
1983            match opengl.set_class_colors(colors) {
1984                Ok(_) => {
1985                    log::trace!("colors set with opengl in {:?}", start.elapsed());
1986                    return Ok(());
1987                }
1988                Err(e) => {
1989                    log::trace!("colors didn't set with opengl: {e:?}")
1990                }
1991            }
1992        }
1993        log::trace!("image started with cpu in {:?}", start.elapsed());
1994        if let Some(cpu) = self.cpu.as_mut() {
1995            match cpu.set_class_colors(colors) {
1996                Ok(_) => {
1997                    log::trace!("colors set with cpu in {:?}", start.elapsed());
1998                    return Ok(());
1999                }
2000                Err(e) => {
2001                    log::trace!("colors didn't set with cpu: {e:?}");
2002                    return Err(e);
2003                }
2004            }
2005        }
2006        Err(Error::NoConverter)
2007    }
2008
2009    fn decode_masks_atlas(
2010        &mut self,
2011        detect: &[DetectBox],
2012        proto_data: ProtoData,
2013        output_width: usize,
2014        output_height: usize,
2015    ) -> Result<(Vec<u8>, Vec<MaskRegion>)> {
2016        if detect.is_empty() {
2017            return Ok((Vec::new(), Vec::new()));
2018        }
2019
2020        // ── Forced backend: no fallback chain ────────────────────────
2021        if let Some(forced) = self.forced_backend {
2022            return match forced {
2023                ForcedBackend::Cpu => {
2024                    if let Some(cpu) = self.cpu.as_mut() {
2025                        return cpu.decode_masks_atlas(
2026                            detect,
2027                            proto_data,
2028                            output_width,
2029                            output_height,
2030                        );
2031                    }
2032                    Err(Error::ForcedBackendUnavailable("cpu".into()))
2033                }
2034                ForcedBackend::G2d => Err(Error::NotSupported(
2035                    "g2d does not support decode_masks_atlas".into(),
2036                )),
2037                ForcedBackend::OpenGl => {
2038                    #[cfg(target_os = "linux")]
2039                    #[cfg(feature = "opengl")]
2040                    if let Some(opengl) = self.opengl.as_mut() {
2041                        return opengl.decode_masks_atlas(
2042                            detect,
2043                            proto_data,
2044                            output_width,
2045                            output_height,
2046                        );
2047                    }
2048                    Err(Error::ForcedBackendUnavailable("opengl".into()))
2049                }
2050            };
2051        }
2052
2053        #[cfg(target_os = "linux")]
2054        #[cfg(feature = "opengl")]
2055        {
2056            let has_opengl = self.opengl.is_some();
2057            if has_opengl {
2058                let opengl = self.opengl.as_mut().unwrap();
2059                match opengl.decode_masks_atlas(detect, proto_data, output_width, output_height) {
2060                    Ok(r) => return Ok(r),
2061                    Err(e) => {
2062                        log::trace!("decode_masks_atlas didn't work with opengl: {e:?}");
2063                        return Err(e);
2064                    }
2065                }
2066            }
2067        }
2068        // CPU fallback: render per-detection masks and pack into compact atlas
2069        if let Some(cpu) = self.cpu.as_mut() {
2070            return cpu.decode_masks_atlas(detect, proto_data, output_width, output_height);
2071        }
2072        Err(Error::NoConverter)
2073    }
2074}
2075
2076fn fourcc_channels(fourcc: FourCharCode) -> Result<usize> {
2077    match fourcc {
2078        RGBA | BGRA => Ok(4), // RGBA/BGRA: 4 channels
2079        RGB => Ok(3),         // RGB has 3 channels (R, G, B)
2080        YUYV | VYUY => Ok(2), // YUYV/VYUY has 2 channels (Y and UV)
2081        GREY => Ok(1),        // Y800 has 1 channel (Y)
2082        NV12 => Ok(2),        // NV12 has 2 channel. 2nd channel is half empty
2083        NV16 => Ok(2),        // NV16 has 2 channel. 2nd channel is full size
2084        PLANAR_RGB => Ok(3),
2085        PLANAR_RGBA => Ok(4),
2086        RGB_INT8 => Ok(3),
2087        PLANAR_RGB_INT8 => Ok(3),
2088        _ => Err(Error::NotSupported(format!(
2089            "Unsupported fourcc: {}",
2090            fourcc.to_string()
2091        ))),
2092    }
2093}
2094
2095fn fourcc_planar(fourcc: FourCharCode) -> Result<bool> {
2096    match fourcc {
2097        RGBA | BGRA => Ok(false), // RGBA/BGRA: 4 channels, interleaved
2098        RGB => Ok(false),         // RGB has 3 channels (R, G, B)
2099        YUYV | VYUY => Ok(false), // YUYV/VYUY has 2 channels (Y and UV)
2100        GREY => Ok(false),        // Y800 has 1 channel (Y)
2101        NV12 => Ok(true),         // Planar YUV
2102        NV16 => Ok(true),         // Planar YUV
2103        PLANAR_RGB => Ok(true),   // Planar RGB
2104        PLANAR_RGBA => Ok(true),  // Planar RGBA
2105        RGB_INT8 => Ok(false),
2106        PLANAR_RGB_INT8 => Ok(true),
2107        _ => Err(Error::NotSupported(format!(
2108            "Unsupported fourcc: {}",
2109            fourcc.to_string()
2110        ))),
2111    }
2112}
2113
2114/// Returns `true` if the format uses XOR 0x80 int8 reinterpretation.
2115pub(crate) fn fourcc_is_int8(fourcc: FourCharCode) -> bool {
2116    matches!(fourcc, RGB_INT8 | PLANAR_RGB_INT8)
2117}
2118
2119/// Returns the uint8 equivalent of an int8 format, or the format unchanged.
2120#[allow(dead_code)] // Will be used by Task 5 (non-DMA int8 path)
2121pub(crate) fn fourcc_uint8_equivalent(fourcc: FourCharCode) -> FourCharCode {
2122    match fourcc {
2123        RGB_INT8 => RGB,
2124        PLANAR_RGB_INT8 => PLANAR_RGB,
2125        other => other,
2126    }
2127}
2128
2129/// Returns `true` if the format is packed RGB (3 bytes per pixel, interleaved).
2130#[cfg_attr(not(target_os = "linux"), allow(dead_code))]
2131pub(crate) fn fourcc_is_packed_rgb(fourcc: FourCharCode) -> bool {
2132    matches!(fourcc, RGB | RGB_INT8)
2133}
2134
2135pub(crate) struct FunctionTimer<T: Display> {
2136    name: T,
2137    start: std::time::Instant,
2138}
2139
2140impl<T: Display> FunctionTimer<T> {
2141    pub fn new(name: T) -> Self {
2142        Self {
2143            name,
2144            start: std::time::Instant::now(),
2145        }
2146    }
2147}
2148
2149impl<T: Display> Drop for FunctionTimer<T> {
2150    fn drop(&mut self) {
2151        log::trace!("{} elapsed: {:?}", self.name, self.start.elapsed())
2152    }
2153}
2154
2155const DEFAULT_COLORS: [[f32; 4]; 20] = [
2156    [0., 1., 0., 0.7],
2157    [1., 0.5568628, 0., 0.7],
2158    [0.25882353, 0.15294118, 0.13333333, 0.7],
2159    [0.8, 0.7647059, 0.78039216, 0.7],
2160    [0.3137255, 0.3137255, 0.3137255, 0.7],
2161    [0.1411765, 0.3098039, 0.1215686, 0.7],
2162    [1., 0.95686275, 0.5137255, 0.7],
2163    [0.3529412, 0.32156863, 0., 0.7],
2164    [0.4235294, 0.6235294, 0.6509804, 0.7],
2165    [0.5098039, 0.5098039, 0.7294118, 0.7],
2166    [0.00784314, 0.18823529, 0.29411765, 0.7],
2167    [0.0, 0.2706, 1.0, 0.7],
2168    [0.0, 0.0, 0.0, 0.7],
2169    [0.0, 0.5, 0.0, 0.7],
2170    [1.0, 0.0, 0.0, 0.7],
2171    [0.0, 0.0, 1.0, 0.7],
2172    [1.0, 0.5, 0.5, 0.7],
2173    [0.1333, 0.5451, 0.1333, 0.7],
2174    [0.1176, 0.4118, 0.8235, 0.7],
2175    [1., 1., 1., 0.7],
2176];
2177
2178const fn denorm<const M: usize, const N: usize>(a: [[f32; M]; N]) -> [[u8; M]; N] {
2179    let mut result = [[0; M]; N];
2180    let mut i = 0;
2181    while i < N {
2182        let mut j = 0;
2183        while j < M {
2184            result[i][j] = (a[i][j] * 255.0).round() as u8;
2185            j += 1;
2186        }
2187        i += 1;
2188    }
2189    result
2190}
2191
2192const DEFAULT_COLORS_U8: [[u8; 4]; 20] = denorm(DEFAULT_COLORS);
2193
2194#[cfg(test)]
2195#[cfg_attr(coverage_nightly, coverage(off))]
2196mod image_tests {
2197    use super::*;
2198    use crate::{CPUProcessor, Rotation};
2199    #[cfg(target_os = "linux")]
2200    use edgefirst_tensor::is_dma_available;
2201    use edgefirst_tensor::{TensorMapTrait, TensorMemory};
2202    use image::buffer::ConvertBuffer;
2203
2204    #[ctor::ctor]
2205    fn init() {
2206        env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
2207    }
2208
2209    macro_rules! function {
2210        () => {{
2211            fn f() {}
2212            fn type_name_of<T>(_: T) -> &'static str {
2213                std::any::type_name::<T>()
2214            }
2215            let name = type_name_of(f);
2216
2217            // Find and cut the rest of the path
2218            match &name[..name.len() - 3].rfind(':') {
2219                Some(pos) => &name[pos + 1..name.len() - 3],
2220                None => &name[..name.len() - 3],
2221            }
2222        }};
2223    }
2224
2225    #[test]
2226    fn test_invalid_crop() {
2227        let src = TensorImage::new(100, 100, RGB, None).unwrap();
2228        let dst = TensorImage::new(100, 100, RGB, None).unwrap();
2229
2230        let crop = Crop::new()
2231            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
2232            .with_dst_rect(Some(Rect::new(0, 0, 150, 150)));
2233
2234        let result = crop.check_crop(&src, &dst);
2235        assert!(matches!(
2236            result,
2237            Err(Error::CropInvalid(e)) if e.starts_with("Dest and Src crop invalid")
2238        ));
2239
2240        let crop = crop.with_src_rect(Some(Rect::new(0, 0, 10, 10)));
2241        let result = crop.check_crop(&src, &dst);
2242        assert!(matches!(
2243            result,
2244            Err(Error::CropInvalid(e)) if e.starts_with("Dest crop invalid")
2245        ));
2246
2247        let crop = crop
2248            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
2249            .with_dst_rect(Some(Rect::new(0, 0, 50, 50)));
2250        let result = crop.check_crop(&src, &dst);
2251        assert!(matches!(
2252            result,
2253            Err(Error::CropInvalid(e)) if e.starts_with("Src crop invalid")
2254        ));
2255
2256        let crop = crop.with_src_rect(Some(Rect::new(50, 50, 50, 50)));
2257
2258        let result = crop.check_crop(&src, &dst);
2259        assert!(result.is_ok());
2260    }
2261
2262    #[test]
2263    fn test_invalid_tensor() -> Result<(), Error> {
2264        let tensor = Tensor::new(&[720, 1280, 4, 1], None, None)?;
2265        let result = TensorImage::from_tensor(tensor, RGB);
2266        assert!(matches!(
2267            result,
2268            Err(Error::InvalidShape(e)) if e.starts_with("Tensor shape must have 3 dimensions, got")
2269        ));
2270
2271        let tensor = Tensor::new(&[720, 1280, 4], None, None)?;
2272        let result = TensorImage::from_tensor(tensor, RGB);
2273        assert!(matches!(
2274            result,
2275            Err(Error::InvalidShape(e)) if e.starts_with("Invalid tensor shape")
2276        ));
2277
2278        Ok(())
2279    }
2280
2281    #[test]
2282    fn test_invalid_image_file() -> Result<(), Error> {
2283        let result = TensorImage::load(&[123; 5000], None, None);
2284        assert!(matches!(
2285            result,
2286            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
2287
2288        Ok(())
2289    }
2290
2291    #[test]
2292    fn test_invalid_jpeg_fourcc() -> Result<(), Error> {
2293        let result = TensorImage::load(&[123; 5000], Some(YUYV), None);
2294        assert!(matches!(
2295            result,
2296            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
2297
2298        Ok(())
2299    }
2300
2301    #[test]
2302    fn test_load_resize_save() {
2303        let file = include_bytes!("../../../testdata/zidane.jpg");
2304        let img = TensorImage::load_jpeg(file, Some(RGBA), None).unwrap();
2305        assert_eq!(img.width(), 1280);
2306        assert_eq!(img.height(), 720);
2307
2308        let mut dst = TensorImage::new(640, 360, RGBA, None).unwrap();
2309        let mut converter = CPUProcessor::new();
2310        converter
2311            .convert(&img, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
2312            .unwrap();
2313        assert_eq!(dst.width(), 640);
2314        assert_eq!(dst.height(), 360);
2315
2316        dst.save_jpeg("zidane_resized.jpg", 80).unwrap();
2317
2318        let file = std::fs::read("zidane_resized.jpg").unwrap();
2319        let img = TensorImage::load_jpeg(&file, None, None).unwrap();
2320        assert_eq!(img.width(), 640);
2321        assert_eq!(img.height(), 360);
2322        assert_eq!(img.fourcc(), RGB);
2323    }
2324
2325    #[test]
2326    fn test_from_tensor_planar() -> Result<(), Error> {
2327        let tensor = Tensor::new(&[3, 720, 1280], None, None)?;
2328        tensor
2329            .map()?
2330            .copy_from_slice(include_bytes!("../../../testdata/camera720p.8bps"));
2331        let planar = TensorImage::from_tensor(tensor, PLANAR_RGB)?;
2332
2333        let rbga = load_bytes_to_tensor(
2334            1280,
2335            720,
2336            RGBA,
2337            None,
2338            include_bytes!("../../../testdata/camera720p.rgba"),
2339        )?;
2340        compare_images_convert_to_rgb(&planar, &rbga, 0.98, function!());
2341
2342        Ok(())
2343    }
2344
2345    #[test]
2346    fn test_from_tensor_invalid_fourcc() {
2347        let tensor = Tensor::new(&[3, 720, 1280], None, None).unwrap();
2348        let result = TensorImage::from_tensor(tensor, four_char_code!("TEST"));
2349        matches!(result, Err(Error::NotSupported(e)) if e.starts_with("Unsupported fourcc : TEST"));
2350    }
2351
2352    #[test]
2353    #[should_panic(expected = "Failed to save planar RGB image")]
2354    fn test_save_planar() {
2355        let planar_img = load_bytes_to_tensor(
2356            1280,
2357            720,
2358            PLANAR_RGB,
2359            None,
2360            include_bytes!("../../../testdata/camera720p.8bps"),
2361        )
2362        .unwrap();
2363
2364        let save_path = "/tmp/planar_rgb.jpg";
2365        planar_img
2366            .save_jpeg(save_path, 90)
2367            .expect("Failed to save planar RGB image");
2368    }
2369
2370    #[test]
2371    #[should_panic(expected = "Failed to save YUYV image")]
2372    fn test_save_yuyv() {
2373        let planar_img = load_bytes_to_tensor(
2374            1280,
2375            720,
2376            YUYV,
2377            None,
2378            include_bytes!("../../../testdata/camera720p.yuyv"),
2379        )
2380        .unwrap();
2381
2382        let save_path = "/tmp/yuyv.jpg";
2383        planar_img
2384            .save_jpeg(save_path, 90)
2385            .expect("Failed to save YUYV image");
2386    }
2387
2388    #[test]
2389    fn test_rotation_angle() {
2390        assert_eq!(Rotation::from_degrees_clockwise(0), Rotation::None);
2391        assert_eq!(Rotation::from_degrees_clockwise(90), Rotation::Clockwise90);
2392        assert_eq!(Rotation::from_degrees_clockwise(180), Rotation::Rotate180);
2393        assert_eq!(
2394            Rotation::from_degrees_clockwise(270),
2395            Rotation::CounterClockwise90
2396        );
2397        assert_eq!(Rotation::from_degrees_clockwise(360), Rotation::None);
2398        assert_eq!(Rotation::from_degrees_clockwise(450), Rotation::Clockwise90);
2399        assert_eq!(Rotation::from_degrees_clockwise(540), Rotation::Rotate180);
2400        assert_eq!(
2401            Rotation::from_degrees_clockwise(630),
2402            Rotation::CounterClockwise90
2403        );
2404    }
2405
2406    #[test]
2407    #[should_panic(expected = "rotation angle is not a multiple of 90")]
2408    fn test_rotation_angle_panic() {
2409        Rotation::from_degrees_clockwise(361);
2410    }
2411
2412    #[test]
2413    fn test_disable_env_var() -> Result<(), Error> {
2414        #[cfg(target_os = "linux")]
2415        {
2416            let original = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2417            unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2418            let converter = ImageProcessor::new()?;
2419            match original {
2420                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2421                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2422            }
2423            assert!(converter.g2d.is_none());
2424        }
2425
2426        #[cfg(target_os = "linux")]
2427        #[cfg(feature = "opengl")]
2428        {
2429            let original = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2430            unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2431            let converter = ImageProcessor::new()?;
2432            match original {
2433                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2434                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2435            }
2436            assert!(converter.opengl.is_none());
2437        }
2438
2439        let original = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2440        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2441        let converter = ImageProcessor::new()?;
2442        match original {
2443            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2444            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2445        }
2446        assert!(converter.cpu.is_none());
2447
2448        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2449        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2450        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2451        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2452        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2453        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2454        let mut converter = ImageProcessor::new()?;
2455
2456        let src = TensorImage::new(1280, 720, RGBA, None)?;
2457        let mut dst = TensorImage::new(640, 360, RGBA, None)?;
2458        let result = converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop());
2459        assert!(matches!(result, Err(Error::NoConverter)));
2460
2461        match original_cpu {
2462            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2463            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2464        }
2465        match original_gl {
2466            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2467            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2468        }
2469        match original_g2d {
2470            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2471            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2472        }
2473
2474        Ok(())
2475    }
2476
2477    #[test]
2478    fn test_unsupported_conversion() {
2479        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
2480        let mut dst = TensorImage::new(640, 360, NV12, None).unwrap();
2481        let mut converter = ImageProcessor::new().unwrap();
2482        let result = converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop());
2483        log::debug!("result: {:?}", result);
2484        assert!(matches!(
2485            result,
2486            Err(Error::NotSupported(e)) if e.starts_with("Conversion from NV12 to NV12")
2487        ));
2488    }
2489
2490    #[test]
2491    fn test_load_grey() {
2492        let grey_img = TensorImage::load_jpeg(
2493            include_bytes!("../../../testdata/grey.jpg"),
2494            Some(RGBA),
2495            None,
2496        )
2497        .unwrap();
2498
2499        let grey_but_rgb_img = TensorImage::load_jpeg(
2500            include_bytes!("../../../testdata/grey-rgb.jpg"),
2501            Some(RGBA),
2502            None,
2503        )
2504        .unwrap();
2505
2506        compare_images(&grey_img, &grey_but_rgb_img, 0.99, function!());
2507    }
2508
2509    #[test]
2510    fn test_new_nv12() {
2511        let nv12 = TensorImage::new(1280, 720, NV12, None).unwrap();
2512        assert_eq!(nv12.height(), 720);
2513        assert_eq!(nv12.width(), 1280);
2514        assert_eq!(nv12.fourcc(), NV12);
2515        assert_eq!(nv12.channels(), 2);
2516        assert!(nv12.is_planar())
2517    }
2518
2519    #[test]
2520    #[cfg(target_os = "linux")]
2521    fn test_new_image_converter() {
2522        let dst_width = 640;
2523        let dst_height = 360;
2524        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2525        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2526
2527        let mut converter_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2528        let mut converter = ImageProcessor::new().unwrap();
2529        converter
2530            .convert(
2531                &src,
2532                &mut converter_dst,
2533                Rotation::None,
2534                Flip::None,
2535                Crop::no_crop(),
2536            )
2537            .unwrap();
2538
2539        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2540        let mut cpu_converter = CPUProcessor::new();
2541        cpu_converter
2542            .convert(
2543                &src,
2544                &mut cpu_dst,
2545                Rotation::None,
2546                Flip::None,
2547                Crop::no_crop(),
2548            )
2549            .unwrap();
2550
2551        compare_images(&converter_dst, &cpu_dst, 0.98, function!());
2552    }
2553
2554    #[test]
2555    fn test_crop_skip() {
2556        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2557        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2558
2559        let mut converter_dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
2560        let mut converter = ImageProcessor::new().unwrap();
2561        let crop = Crop::new()
2562            .with_src_rect(Some(Rect::new(0, 0, 640, 640)))
2563            .with_dst_rect(Some(Rect::new(0, 0, 640, 640)));
2564        converter
2565            .convert(&src, &mut converter_dst, Rotation::None, Flip::None, crop)
2566            .unwrap();
2567
2568        let mut cpu_dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
2569        let mut cpu_converter = CPUProcessor::new();
2570        cpu_converter
2571            .convert(&src, &mut cpu_dst, Rotation::None, Flip::None, crop)
2572            .unwrap();
2573
2574        compare_images(&converter_dst, &cpu_dst, 0.99999, function!());
2575    }
2576
2577    #[test]
2578    fn test_invalid_fourcc() {
2579        let result = TensorImage::new(1280, 720, four_char_code!("TEST"), None);
2580        assert!(matches!(
2581            result,
2582            Err(Error::NotSupported(e)) if e == "Unsupported fourcc: TEST"
2583        ));
2584    }
2585
2586    // Helper function to check if G2D library is available (Linux/i.MX8 only)
2587    #[cfg(target_os = "linux")]
2588    static G2D_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2589
2590    #[cfg(target_os = "linux")]
2591    fn is_g2d_available() -> bool {
2592        *G2D_AVAILABLE.get_or_init(|| G2DProcessor::new().is_ok())
2593    }
2594
2595    #[cfg(target_os = "linux")]
2596    #[cfg(feature = "opengl")]
2597    static GL_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2598
2599    #[cfg(target_os = "linux")]
2600    #[cfg(feature = "opengl")]
2601    // Helper function to check if OpenGL is available
2602    fn is_opengl_available() -> bool {
2603        #[cfg(all(target_os = "linux", feature = "opengl"))]
2604        {
2605            *GL_AVAILABLE.get_or_init(|| GLProcessorThreaded::new(None).is_ok())
2606        }
2607
2608        #[cfg(not(all(target_os = "linux", feature = "opengl")))]
2609        {
2610            false
2611        }
2612    }
2613
2614    #[test]
2615    fn test_load_jpeg_with_exif() {
2616        let file = include_bytes!("../../../testdata/zidane_rotated_exif.jpg").to_vec();
2617        let loaded = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2618
2619        assert_eq!(loaded.height(), 1280);
2620        assert_eq!(loaded.width(), 720);
2621
2622        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2623        let cpu_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2624
2625        let (dst_width, dst_height) = (cpu_src.height(), cpu_src.width());
2626
2627        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2628        let mut cpu_converter = CPUProcessor::new();
2629
2630        cpu_converter
2631            .convert(
2632                &cpu_src,
2633                &mut cpu_dst,
2634                Rotation::Clockwise90,
2635                Flip::None,
2636                Crop::no_crop(),
2637            )
2638            .unwrap();
2639
2640        compare_images(&loaded, &cpu_dst, 0.98, function!());
2641    }
2642
2643    #[test]
2644    fn test_load_png_with_exif() {
2645        let file = include_bytes!("../../../testdata/zidane_rotated_exif_180.png").to_vec();
2646        let loaded = TensorImage::load_png(&file, Some(RGBA), None).unwrap();
2647
2648        assert_eq!(loaded.height(), 720);
2649        assert_eq!(loaded.width(), 1280);
2650
2651        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2652        let cpu_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2653
2654        let mut cpu_dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
2655        let mut cpu_converter = CPUProcessor::new();
2656
2657        cpu_converter
2658            .convert(
2659                &cpu_src,
2660                &mut cpu_dst,
2661                Rotation::Rotate180,
2662                Flip::None,
2663                Crop::no_crop(),
2664            )
2665            .unwrap();
2666
2667        compare_images(&loaded, &cpu_dst, 0.98, function!());
2668    }
2669
2670    #[test]
2671    #[cfg(target_os = "linux")]
2672    fn test_g2d_resize() {
2673        if !is_g2d_available() {
2674            eprintln!("SKIPPED: test_g2d_resize - G2D library (libg2d.so.2) not available");
2675            return;
2676        }
2677        if !is_dma_available() {
2678            eprintln!(
2679                "SKIPPED: test_g2d_resize - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2680            );
2681            return;
2682        }
2683
2684        let dst_width = 640;
2685        let dst_height = 360;
2686        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2687        let src = TensorImage::load_jpeg(&file, Some(RGBA), Some(TensorMemory::Dma)).unwrap();
2688
2689        let mut g2d_dst =
2690            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
2691        let mut g2d_converter = G2DProcessor::new().unwrap();
2692        g2d_converter
2693            .convert(
2694                &src,
2695                &mut g2d_dst,
2696                Rotation::None,
2697                Flip::None,
2698                Crop::no_crop(),
2699            )
2700            .unwrap();
2701
2702        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2703        let mut cpu_converter = CPUProcessor::new();
2704        cpu_converter
2705            .convert(
2706                &src,
2707                &mut cpu_dst,
2708                Rotation::None,
2709                Flip::None,
2710                Crop::no_crop(),
2711            )
2712            .unwrap();
2713
2714        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2715    }
2716
2717    #[test]
2718    #[cfg(target_os = "linux")]
2719    #[cfg(feature = "opengl")]
2720    fn test_opengl_resize() {
2721        if !is_opengl_available() {
2722            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2723            return;
2724        }
2725
2726        let dst_width = 640;
2727        let dst_height = 360;
2728        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2729        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2730
2731        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2732        let mut cpu_converter = CPUProcessor::new();
2733        cpu_converter
2734            .convert(
2735                &src,
2736                &mut cpu_dst,
2737                Rotation::None,
2738                Flip::None,
2739                Crop::no_crop(),
2740            )
2741            .unwrap();
2742        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2743        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2744
2745        for _ in 0..5 {
2746            gl_converter
2747                .convert(
2748                    &src,
2749                    &mut gl_dst,
2750                    Rotation::None,
2751                    Flip::None,
2752                    Crop::no_crop(),
2753                )
2754                .unwrap();
2755
2756            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2757        }
2758
2759        drop(gl_dst);
2760    }
2761
2762    #[test]
2763    #[ignore] // Vivante GPU hangs with concurrent EGL contexts on i.MX8MP
2764    #[cfg(target_os = "linux")]
2765    #[cfg(feature = "opengl")]
2766    fn test_opengl_10_threads() {
2767        if !is_opengl_available() {
2768            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2769            return;
2770        }
2771
2772        let handles: Vec<_> = (0..10)
2773            .map(|i| {
2774                std::thread::Builder::new()
2775                    .name(format!("Thread {i}"))
2776                    .spawn(test_opengl_resize)
2777                    .unwrap()
2778            })
2779            .collect();
2780        handles.into_iter().for_each(|h| {
2781            if let Err(e) = h.join() {
2782                std::panic::resume_unwind(e)
2783            }
2784        });
2785    }
2786
2787    #[test]
2788    #[cfg(target_os = "linux")]
2789    #[cfg(feature = "opengl")]
2790    fn test_opengl_grey() {
2791        if !is_opengl_available() {
2792            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2793            return;
2794        }
2795
2796        let img = TensorImage::load_jpeg(
2797            include_bytes!("../../../testdata/grey.jpg"),
2798            Some(GREY),
2799            None,
2800        )
2801        .unwrap();
2802
2803        let mut gl_dst = TensorImage::new(640, 640, GREY, None).unwrap();
2804        let mut cpu_dst = TensorImage::new(640, 640, GREY, None).unwrap();
2805
2806        let mut converter = CPUProcessor::new();
2807
2808        converter
2809            .convert(
2810                &img,
2811                &mut cpu_dst,
2812                Rotation::None,
2813                Flip::None,
2814                Crop::no_crop(),
2815            )
2816            .unwrap();
2817
2818        let mut gl = GLProcessorThreaded::new(None).unwrap();
2819        gl.convert(
2820            &img,
2821            &mut gl_dst,
2822            Rotation::None,
2823            Flip::None,
2824            Crop::no_crop(),
2825        )
2826        .unwrap();
2827
2828        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2829    }
2830
2831    #[test]
2832    #[cfg(target_os = "linux")]
2833    fn test_g2d_src_crop() {
2834        if !is_g2d_available() {
2835            eprintln!("SKIPPED: test_g2d_src_crop - G2D library (libg2d.so.2) not available");
2836            return;
2837        }
2838        if !is_dma_available() {
2839            eprintln!(
2840                "SKIPPED: test_g2d_src_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2841            );
2842            return;
2843        }
2844
2845        let dst_width = 640;
2846        let dst_height = 640;
2847        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2848        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2849
2850        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2851        let mut cpu_converter = CPUProcessor::new();
2852        cpu_converter
2853            .convert(
2854                &src,
2855                &mut cpu_dst,
2856                Rotation::None,
2857                Flip::None,
2858                Crop {
2859                    src_rect: Some(Rect {
2860                        left: 0,
2861                        top: 0,
2862                        width: 640,
2863                        height: 360,
2864                    }),
2865                    dst_rect: None,
2866                    dst_color: None,
2867                },
2868            )
2869            .unwrap();
2870
2871        let mut g2d_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2872        let mut g2d_converter = G2DProcessor::new().unwrap();
2873        g2d_converter
2874            .convert(
2875                &src,
2876                &mut g2d_dst,
2877                Rotation::None,
2878                Flip::None,
2879                Crop {
2880                    src_rect: Some(Rect {
2881                        left: 0,
2882                        top: 0,
2883                        width: 640,
2884                        height: 360,
2885                    }),
2886                    dst_rect: None,
2887                    dst_color: None,
2888                },
2889            )
2890            .unwrap();
2891
2892        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2893    }
2894
2895    #[test]
2896    #[cfg(target_os = "linux")]
2897    fn test_g2d_dst_crop() {
2898        if !is_g2d_available() {
2899            eprintln!("SKIPPED: test_g2d_dst_crop - G2D library (libg2d.so.2) not available");
2900            return;
2901        }
2902        if !is_dma_available() {
2903            eprintln!(
2904                "SKIPPED: test_g2d_dst_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2905            );
2906            return;
2907        }
2908
2909        let dst_width = 640;
2910        let dst_height = 640;
2911        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2912        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2913
2914        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2915        let mut cpu_converter = CPUProcessor::new();
2916        cpu_converter
2917            .convert(
2918                &src,
2919                &mut cpu_dst,
2920                Rotation::None,
2921                Flip::None,
2922                Crop {
2923                    src_rect: None,
2924                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
2925                    dst_color: None,
2926                },
2927            )
2928            .unwrap();
2929
2930        let mut g2d_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2931        let mut g2d_converter = G2DProcessor::new().unwrap();
2932        g2d_converter
2933            .convert(
2934                &src,
2935                &mut g2d_dst,
2936                Rotation::None,
2937                Flip::None,
2938                Crop {
2939                    src_rect: None,
2940                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
2941                    dst_color: None,
2942                },
2943            )
2944            .unwrap();
2945
2946        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2947    }
2948
2949    #[test]
2950    #[cfg(target_os = "linux")]
2951    fn test_g2d_all_rgba() {
2952        if !is_g2d_available() {
2953            eprintln!("SKIPPED: test_g2d_all_rgba - G2D library (libg2d.so.2) not available");
2954            return;
2955        }
2956        if !is_dma_available() {
2957            eprintln!(
2958                "SKIPPED: test_g2d_all_rgba - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2959            );
2960            return;
2961        }
2962
2963        let dst_width = 640;
2964        let dst_height = 640;
2965        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2966        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2967
2968        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2969        let mut cpu_converter = CPUProcessor::new();
2970        let mut g2d_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2971        let mut g2d_converter = G2DProcessor::new().unwrap();
2972
2973        for rot in [
2974            Rotation::None,
2975            Rotation::Clockwise90,
2976            Rotation::Rotate180,
2977            Rotation::CounterClockwise90,
2978        ] {
2979            cpu_dst.tensor.map().unwrap().as_mut_slice().fill(114);
2980            g2d_dst.tensor.map().unwrap().as_mut_slice().fill(114);
2981            for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
2982                cpu_converter
2983                    .convert(
2984                        &src,
2985                        &mut cpu_dst,
2986                        Rotation::None,
2987                        Flip::None,
2988                        Crop {
2989                            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2990                            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2991                            dst_color: None,
2992                        },
2993                    )
2994                    .unwrap();
2995
2996                g2d_converter
2997                    .convert(
2998                        &src,
2999                        &mut g2d_dst,
3000                        Rotation::None,
3001                        Flip::None,
3002                        Crop {
3003                            src_rect: Some(Rect::new(50, 120, 1024, 576)),
3004                            dst_rect: Some(Rect::new(100, 100, 512, 288)),
3005                            dst_color: None,
3006                        },
3007                    )
3008                    .unwrap();
3009
3010                compare_images(
3011                    &g2d_dst,
3012                    &cpu_dst,
3013                    0.98,
3014                    &format!("{} {:?} {:?}", function!(), rot, flip),
3015                );
3016            }
3017        }
3018    }
3019
3020    #[test]
3021    #[cfg(target_os = "linux")]
3022    #[cfg(feature = "opengl")]
3023    fn test_opengl_src_crop() {
3024        if !is_opengl_available() {
3025            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3026            return;
3027        }
3028
3029        let dst_width = 640;
3030        let dst_height = 360;
3031        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
3032        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
3033
3034        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3035        let mut cpu_converter = CPUProcessor::new();
3036        cpu_converter
3037            .convert(
3038                &src,
3039                &mut cpu_dst,
3040                Rotation::None,
3041                Flip::None,
3042                Crop {
3043                    src_rect: Some(Rect {
3044                        left: 320,
3045                        top: 180,
3046                        width: 1280 - 320,
3047                        height: 720 - 180,
3048                    }),
3049                    dst_rect: None,
3050                    dst_color: None,
3051                },
3052            )
3053            .unwrap();
3054
3055        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3056        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3057
3058        gl_converter
3059            .convert(
3060                &src,
3061                &mut gl_dst,
3062                Rotation::None,
3063                Flip::None,
3064                Crop {
3065                    src_rect: Some(Rect {
3066                        left: 320,
3067                        top: 180,
3068                        width: 1280 - 320,
3069                        height: 720 - 180,
3070                    }),
3071                    dst_rect: None,
3072                    dst_color: None,
3073                },
3074            )
3075            .unwrap();
3076
3077        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3078    }
3079
3080    #[test]
3081    #[cfg(target_os = "linux")]
3082    #[cfg(feature = "opengl")]
3083    fn test_opengl_dst_crop() {
3084        if !is_opengl_available() {
3085            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3086            return;
3087        }
3088
3089        let dst_width = 640;
3090        let dst_height = 640;
3091        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
3092        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
3093
3094        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3095        let mut cpu_converter = CPUProcessor::new();
3096        cpu_converter
3097            .convert(
3098                &src,
3099                &mut cpu_dst,
3100                Rotation::None,
3101                Flip::None,
3102                Crop {
3103                    src_rect: None,
3104                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
3105                    dst_color: None,
3106                },
3107            )
3108            .unwrap();
3109
3110        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3111        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3112        gl_converter
3113            .convert(
3114                &src,
3115                &mut gl_dst,
3116                Rotation::None,
3117                Flip::None,
3118                Crop {
3119                    src_rect: None,
3120                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
3121                    dst_color: None,
3122                },
3123            )
3124            .unwrap();
3125
3126        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3127    }
3128
3129    #[test]
3130    #[cfg(target_os = "linux")]
3131    #[cfg(feature = "opengl")]
3132    fn test_opengl_all_rgba() {
3133        if !is_opengl_available() {
3134            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3135            return;
3136        }
3137
3138        let dst_width = 640;
3139        let dst_height = 640;
3140        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
3141
3142        let mut cpu_converter = CPUProcessor::new();
3143
3144        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3145
3146        let mut mem = vec![None, Some(TensorMemory::Mem), Some(TensorMemory::Shm)];
3147        if is_dma_available() {
3148            mem.push(Some(TensorMemory::Dma));
3149        }
3150        for m in mem {
3151            let src = TensorImage::load_jpeg(&file, Some(RGBA), m).unwrap();
3152
3153            for rot in [
3154                Rotation::None,
3155                Rotation::Clockwise90,
3156                Rotation::Rotate180,
3157                Rotation::CounterClockwise90,
3158            ] {
3159                for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
3160                    let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, m).unwrap();
3161                    let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, m).unwrap();
3162                    cpu_dst.tensor.map().unwrap().as_mut_slice().fill(114);
3163                    gl_dst.tensor.map().unwrap().as_mut_slice().fill(114);
3164                    cpu_converter
3165                        .convert(
3166                            &src,
3167                            &mut cpu_dst,
3168                            Rotation::None,
3169                            Flip::None,
3170                            Crop {
3171                                src_rect: Some(Rect::new(50, 120, 1024, 576)),
3172                                dst_rect: Some(Rect::new(100, 100, 512, 288)),
3173                                dst_color: None,
3174                            },
3175                        )
3176                        .unwrap();
3177
3178                    gl_converter
3179                        .convert(
3180                            &src,
3181                            &mut gl_dst,
3182                            Rotation::None,
3183                            Flip::None,
3184                            Crop {
3185                                src_rect: Some(Rect::new(50, 120, 1024, 576)),
3186                                dst_rect: Some(Rect::new(100, 100, 512, 288)),
3187                                dst_color: None,
3188                            },
3189                        )
3190                        .map_err(|e| {
3191                            log::error!("error mem {m:?} rot {rot:?} error: {e:?}");
3192                            e
3193                        })
3194                        .unwrap();
3195
3196                    compare_images(
3197                        &gl_dst,
3198                        &cpu_dst,
3199                        0.98,
3200                        &format!("{} {:?} {:?}", function!(), rot, flip),
3201                    );
3202                }
3203            }
3204        }
3205    }
3206
3207    #[test]
3208    #[cfg(target_os = "linux")]
3209    fn test_cpu_rotate() {
3210        for rot in [
3211            Rotation::Clockwise90,
3212            Rotation::Rotate180,
3213            Rotation::CounterClockwise90,
3214        ] {
3215            test_cpu_rotate_(rot);
3216        }
3217    }
3218
3219    #[cfg(target_os = "linux")]
3220    fn test_cpu_rotate_(rot: Rotation) {
3221        // This test rotates the image 4 times and checks that the image was returned to
3222        // be the same Currently doesn't check if rotations actually rotated in
3223        // right direction
3224        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
3225
3226        let unchanged_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
3227        let mut src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
3228
3229        let (dst_width, dst_height) = match rot {
3230            Rotation::None | Rotation::Rotate180 => (src.width(), src.height()),
3231            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (src.height(), src.width()),
3232        };
3233
3234        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3235        let mut cpu_converter = CPUProcessor::new();
3236
3237        // After rotating 4 times, the image should be the same as the original
3238
3239        cpu_converter
3240            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
3241            .unwrap();
3242
3243        cpu_converter
3244            .convert(&cpu_dst, &mut src, rot, Flip::None, Crop::no_crop())
3245            .unwrap();
3246
3247        cpu_converter
3248            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
3249            .unwrap();
3250
3251        cpu_converter
3252            .convert(&cpu_dst, &mut src, rot, Flip::None, Crop::no_crop())
3253            .unwrap();
3254
3255        compare_images(&src, &unchanged_src, 0.98, function!());
3256    }
3257
3258    #[test]
3259    #[cfg(target_os = "linux")]
3260    #[cfg(feature = "opengl")]
3261    fn test_opengl_rotate() {
3262        if !is_opengl_available() {
3263            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3264            return;
3265        }
3266
3267        let size = (1280, 720);
3268        let mut mem = vec![None, Some(TensorMemory::Shm), Some(TensorMemory::Mem)];
3269
3270        if is_dma_available() {
3271            mem.push(Some(TensorMemory::Dma));
3272        }
3273        for m in mem {
3274            for rot in [
3275                Rotation::Clockwise90,
3276                Rotation::Rotate180,
3277                Rotation::CounterClockwise90,
3278            ] {
3279                test_opengl_rotate_(size, rot, m);
3280            }
3281        }
3282    }
3283
3284    #[cfg(target_os = "linux")]
3285    #[cfg(feature = "opengl")]
3286    fn test_opengl_rotate_(
3287        size: (usize, usize),
3288        rot: Rotation,
3289        tensor_memory: Option<TensorMemory>,
3290    ) {
3291        let (dst_width, dst_height) = match rot {
3292            Rotation::None | Rotation::Rotate180 => size,
3293            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
3294        };
3295
3296        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
3297        let src = TensorImage::load_jpeg(&file, Some(RGBA), tensor_memory).unwrap();
3298
3299        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3300        let mut cpu_converter = CPUProcessor::new();
3301
3302        cpu_converter
3303            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
3304            .unwrap();
3305
3306        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, tensor_memory).unwrap();
3307        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3308
3309        for _ in 0..5 {
3310            gl_converter
3311                .convert(&src, &mut gl_dst, rot, Flip::None, Crop::no_crop())
3312                .unwrap();
3313            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3314        }
3315    }
3316
3317    #[test]
3318    #[cfg(target_os = "linux")]
3319    fn test_g2d_rotate() {
3320        if !is_g2d_available() {
3321            eprintln!("SKIPPED: test_g2d_rotate - G2D library (libg2d.so.2) not available");
3322            return;
3323        }
3324        if !is_dma_available() {
3325            eprintln!(
3326                "SKIPPED: test_g2d_rotate - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3327            );
3328            return;
3329        }
3330
3331        let size = (1280, 720);
3332        for rot in [
3333            Rotation::Clockwise90,
3334            Rotation::Rotate180,
3335            Rotation::CounterClockwise90,
3336        ] {
3337            test_g2d_rotate_(size, rot);
3338        }
3339    }
3340
3341    #[cfg(target_os = "linux")]
3342    fn test_g2d_rotate_(size: (usize, usize), rot: Rotation) {
3343        let (dst_width, dst_height) = match rot {
3344            Rotation::None | Rotation::Rotate180 => size,
3345            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
3346        };
3347
3348        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
3349        let src = TensorImage::load_jpeg(&file, Some(RGBA), Some(TensorMemory::Dma)).unwrap();
3350
3351        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3352        let mut cpu_converter = CPUProcessor::new();
3353
3354        cpu_converter
3355            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
3356            .unwrap();
3357
3358        let mut g2d_dst =
3359            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3360        let mut g2d_converter = G2DProcessor::new().unwrap();
3361
3362        g2d_converter
3363            .convert(&src, &mut g2d_dst, rot, Flip::None, Crop::no_crop())
3364            .unwrap();
3365
3366        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3367    }
3368
3369    #[test]
3370    fn test_rgba_to_yuyv_resize_cpu() {
3371        let src = load_bytes_to_tensor(
3372            1280,
3373            720,
3374            RGBA,
3375            None,
3376            include_bytes!("../../../testdata/camera720p.rgba"),
3377        )
3378        .unwrap();
3379
3380        let (dst_width, dst_height) = (640, 360);
3381
3382        let mut dst = TensorImage::new(dst_width, dst_height, YUYV, None).unwrap();
3383
3384        let mut dst_through_yuyv = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3385        let mut dst_direct = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3386
3387        let mut cpu_converter = CPUProcessor::new();
3388
3389        cpu_converter
3390            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3391            .unwrap();
3392
3393        cpu_converter
3394            .convert(
3395                &dst,
3396                &mut dst_through_yuyv,
3397                Rotation::None,
3398                Flip::None,
3399                Crop::no_crop(),
3400            )
3401            .unwrap();
3402
3403        cpu_converter
3404            .convert(
3405                &src,
3406                &mut dst_direct,
3407                Rotation::None,
3408                Flip::None,
3409                Crop::no_crop(),
3410            )
3411            .unwrap();
3412
3413        compare_images(&dst_through_yuyv, &dst_direct, 0.98, function!());
3414    }
3415
3416    #[test]
3417    #[cfg(target_os = "linux")]
3418    #[cfg(feature = "opengl")]
3419    #[ignore = "opengl doesn't support rendering to YUYV texture"]
3420    fn test_rgba_to_yuyv_resize_opengl() {
3421        if !is_opengl_available() {
3422            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3423            return;
3424        }
3425
3426        if !is_dma_available() {
3427            eprintln!(
3428                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3429                function!()
3430            );
3431            return;
3432        }
3433
3434        let src = load_bytes_to_tensor(
3435            1280,
3436            720,
3437            RGBA,
3438            None,
3439            include_bytes!("../../../testdata/camera720p.rgba"),
3440        )
3441        .unwrap();
3442
3443        let (dst_width, dst_height) = (640, 360);
3444
3445        let mut dst =
3446            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3447
3448        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3449
3450        gl_converter
3451            .convert(
3452                &src,
3453                &mut dst,
3454                Rotation::None,
3455                Flip::None,
3456                Crop::new()
3457                    .with_dst_rect(Some(Rect::new(100, 100, 100, 100)))
3458                    .with_dst_color(Some([255, 255, 255, 255])),
3459            )
3460            .unwrap();
3461
3462        std::fs::write(
3463            "rgba_to_yuyv_opengl.yuyv",
3464            dst.tensor().map().unwrap().as_slice(),
3465        )
3466        .unwrap();
3467        let mut cpu_dst =
3468            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3469        CPUProcessor::new()
3470            .convert(
3471                &src,
3472                &mut cpu_dst,
3473                Rotation::None,
3474                Flip::None,
3475                Crop::no_crop(),
3476            )
3477            .unwrap();
3478
3479        compare_images_convert_to_rgb(&dst, &cpu_dst, 0.98, function!());
3480    }
3481
3482    #[test]
3483    #[cfg(target_os = "linux")]
3484    fn test_rgba_to_yuyv_resize_g2d() {
3485        if !is_g2d_available() {
3486            eprintln!(
3487                "SKIPPED: test_rgba_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3488            );
3489            return;
3490        }
3491        if !is_dma_available() {
3492            eprintln!(
3493                "SKIPPED: test_rgba_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3494            );
3495            return;
3496        }
3497
3498        let src = load_bytes_to_tensor(
3499            1280,
3500            720,
3501            RGBA,
3502            Some(TensorMemory::Dma),
3503            include_bytes!("../../../testdata/camera720p.rgba"),
3504        )
3505        .unwrap();
3506
3507        let (dst_width, dst_height) = (1280, 720);
3508
3509        let mut cpu_dst =
3510            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3511
3512        let mut g2d_dst =
3513            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3514
3515        let mut g2d_converter = G2DProcessor::new().unwrap();
3516
3517        g2d_dst.tensor.map().unwrap().as_mut_slice().fill(128);
3518        g2d_converter
3519            .convert(
3520                &src,
3521                &mut g2d_dst,
3522                Rotation::None,
3523                Flip::None,
3524                Crop {
3525                    src_rect: None,
3526                    dst_rect: Some(Rect::new(100, 100, 2, 2)),
3527                    dst_color: None,
3528                },
3529            )
3530            .unwrap();
3531
3532        cpu_dst.tensor.map().unwrap().as_mut_slice().fill(128);
3533        CPUProcessor::new()
3534            .convert(
3535                &src,
3536                &mut cpu_dst,
3537                Rotation::None,
3538                Flip::None,
3539                Crop {
3540                    src_rect: None,
3541                    dst_rect: Some(Rect::new(100, 100, 2, 2)),
3542                    dst_color: None,
3543                },
3544            )
3545            .unwrap();
3546
3547        compare_images_convert_to_rgb(&cpu_dst, &g2d_dst, 0.98, function!());
3548    }
3549
3550    #[test]
3551    fn test_yuyv_to_rgba_cpu() {
3552        let file = include_bytes!("../../../testdata/camera720p.yuyv").to_vec();
3553        let src = TensorImage::new(1280, 720, YUYV, None).unwrap();
3554        src.tensor()
3555            .map()
3556            .unwrap()
3557            .as_mut_slice()
3558            .copy_from_slice(&file);
3559
3560        let mut dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
3561        let mut cpu_converter = CPUProcessor::new();
3562
3563        cpu_converter
3564            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3565            .unwrap();
3566
3567        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3568        target_image
3569            .tensor()
3570            .map()
3571            .unwrap()
3572            .as_mut_slice()
3573            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3574
3575        compare_images(&dst, &target_image, 0.98, function!());
3576    }
3577
3578    #[test]
3579    fn test_yuyv_to_rgb_cpu() {
3580        let file = include_bytes!("../../../testdata/camera720p.yuyv").to_vec();
3581        let src = TensorImage::new(1280, 720, YUYV, None).unwrap();
3582        src.tensor()
3583            .map()
3584            .unwrap()
3585            .as_mut_slice()
3586            .copy_from_slice(&file);
3587
3588        let mut dst = TensorImage::new(1280, 720, RGB, None).unwrap();
3589        let mut cpu_converter = CPUProcessor::new();
3590
3591        cpu_converter
3592            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3593            .unwrap();
3594
3595        let target_image = TensorImage::new(1280, 720, RGB, None).unwrap();
3596        target_image
3597            .tensor()
3598            .map()
3599            .unwrap()
3600            .as_mut_slice()
3601            .as_chunks_mut::<3>()
3602            .0
3603            .iter_mut()
3604            .zip(
3605                include_bytes!("../../../testdata/camera720p.rgba")
3606                    .as_chunks::<4>()
3607                    .0,
3608            )
3609            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
3610
3611        compare_images(&dst, &target_image, 0.98, function!());
3612    }
3613
3614    #[test]
3615    #[cfg(target_os = "linux")]
3616    fn test_yuyv_to_rgba_g2d() {
3617        if !is_g2d_available() {
3618            eprintln!("SKIPPED: test_yuyv_to_rgba_g2d - G2D library (libg2d.so.2) not available");
3619            return;
3620        }
3621        if !is_dma_available() {
3622            eprintln!(
3623                "SKIPPED: test_yuyv_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3624            );
3625            return;
3626        }
3627
3628        let src = load_bytes_to_tensor(
3629            1280,
3630            720,
3631            YUYV,
3632            None,
3633            include_bytes!("../../../testdata/camera720p.yuyv"),
3634        )
3635        .unwrap();
3636
3637        let mut dst = TensorImage::new(1280, 720, RGBA, Some(TensorMemory::Dma)).unwrap();
3638        let mut g2d_converter = G2DProcessor::new().unwrap();
3639
3640        g2d_converter
3641            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3642            .unwrap();
3643
3644        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3645        target_image
3646            .tensor()
3647            .map()
3648            .unwrap()
3649            .as_mut_slice()
3650            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3651
3652        compare_images(&dst, &target_image, 0.98, function!());
3653    }
3654
3655    #[test]
3656    #[cfg(target_os = "linux")]
3657    #[cfg(feature = "opengl")]
3658    fn test_yuyv_to_rgba_opengl() {
3659        if !is_opengl_available() {
3660            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3661            return;
3662        }
3663        if !is_dma_available() {
3664            eprintln!(
3665                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3666                function!()
3667            );
3668            return;
3669        }
3670
3671        let src = load_bytes_to_tensor(
3672            1280,
3673            720,
3674            YUYV,
3675            Some(TensorMemory::Dma),
3676            include_bytes!("../../../testdata/camera720p.yuyv"),
3677        )
3678        .unwrap();
3679
3680        let mut dst = TensorImage::new(1280, 720, RGBA, Some(TensorMemory::Dma)).unwrap();
3681        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3682
3683        gl_converter
3684            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3685            .unwrap();
3686
3687        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3688        target_image
3689            .tensor()
3690            .map()
3691            .unwrap()
3692            .as_mut_slice()
3693            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3694
3695        compare_images(&dst, &target_image, 0.98, function!());
3696    }
3697
3698    #[test]
3699    #[cfg(target_os = "linux")]
3700    fn test_yuyv_to_rgb_g2d() {
3701        if !is_g2d_available() {
3702            eprintln!("SKIPPED: test_yuyv_to_rgb_g2d - G2D library (libg2d.so.2) not available");
3703            return;
3704        }
3705        if !is_dma_available() {
3706            eprintln!(
3707                "SKIPPED: test_yuyv_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3708            );
3709            return;
3710        }
3711
3712        let src = load_bytes_to_tensor(
3713            1280,
3714            720,
3715            YUYV,
3716            None,
3717            include_bytes!("../../../testdata/camera720p.yuyv"),
3718        )
3719        .unwrap();
3720
3721        let mut g2d_dst = TensorImage::new(1280, 720, RGB, Some(TensorMemory::Dma)).unwrap();
3722        let mut g2d_converter = G2DProcessor::new().unwrap();
3723
3724        g2d_converter
3725            .convert(
3726                &src,
3727                &mut g2d_dst,
3728                Rotation::None,
3729                Flip::None,
3730                Crop::no_crop(),
3731            )
3732            .unwrap();
3733
3734        let mut cpu_dst = TensorImage::new(1280, 720, RGB, None).unwrap();
3735        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3736
3737        cpu_converter
3738            .convert(
3739                &src,
3740                &mut cpu_dst,
3741                Rotation::None,
3742                Flip::None,
3743                Crop::no_crop(),
3744            )
3745            .unwrap();
3746
3747        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3748    }
3749
3750    #[test]
3751    #[cfg(target_os = "linux")]
3752    fn test_yuyv_to_yuyv_resize_g2d() {
3753        if !is_g2d_available() {
3754            eprintln!(
3755                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3756            );
3757            return;
3758        }
3759        if !is_dma_available() {
3760            eprintln!(
3761                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3762            );
3763            return;
3764        }
3765
3766        let src = load_bytes_to_tensor(
3767            1280,
3768            720,
3769            YUYV,
3770            None,
3771            include_bytes!("../../../testdata/camera720p.yuyv"),
3772        )
3773        .unwrap();
3774
3775        let mut g2d_dst = TensorImage::new(600, 400, YUYV, Some(TensorMemory::Dma)).unwrap();
3776        let mut g2d_converter = G2DProcessor::new().unwrap();
3777
3778        g2d_converter
3779            .convert(
3780                &src,
3781                &mut g2d_dst,
3782                Rotation::None,
3783                Flip::None,
3784                Crop::no_crop(),
3785            )
3786            .unwrap();
3787
3788        let mut cpu_dst = TensorImage::new(600, 400, YUYV, None).unwrap();
3789        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3790
3791        cpu_converter
3792            .convert(
3793                &src,
3794                &mut cpu_dst,
3795                Rotation::None,
3796                Flip::None,
3797                Crop::no_crop(),
3798            )
3799            .unwrap();
3800
3801        // TODO: compare YUYV and YUYV images without having to convert them to RGB
3802        compare_images_convert_to_rgb(&g2d_dst, &cpu_dst, 0.98, function!());
3803    }
3804
3805    #[test]
3806    fn test_yuyv_to_rgba_resize_cpu() {
3807        let src = load_bytes_to_tensor(
3808            1280,
3809            720,
3810            YUYV,
3811            None,
3812            include_bytes!("../../../testdata/camera720p.yuyv"),
3813        )
3814        .unwrap();
3815
3816        let (dst_width, dst_height) = (960, 540);
3817
3818        let mut dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3819        let mut cpu_converter = CPUProcessor::new();
3820
3821        cpu_converter
3822            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3823            .unwrap();
3824
3825        let mut dst_target = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3826        let src_target = load_bytes_to_tensor(
3827            1280,
3828            720,
3829            RGBA,
3830            None,
3831            include_bytes!("../../../testdata/camera720p.rgba"),
3832        )
3833        .unwrap();
3834        cpu_converter
3835            .convert(
3836                &src_target,
3837                &mut dst_target,
3838                Rotation::None,
3839                Flip::None,
3840                Crop::no_crop(),
3841            )
3842            .unwrap();
3843
3844        compare_images(&dst, &dst_target, 0.98, function!());
3845    }
3846
3847    #[test]
3848    #[cfg(target_os = "linux")]
3849    fn test_yuyv_to_rgba_crop_flip_g2d() {
3850        if !is_g2d_available() {
3851            eprintln!(
3852                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - G2D library (libg2d.so.2) not available"
3853            );
3854            return;
3855        }
3856        if !is_dma_available() {
3857            eprintln!(
3858                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3859            );
3860            return;
3861        }
3862
3863        let src = load_bytes_to_tensor(
3864            1280,
3865            720,
3866            YUYV,
3867            Some(TensorMemory::Dma),
3868            include_bytes!("../../../testdata/camera720p.yuyv"),
3869        )
3870        .unwrap();
3871
3872        let (dst_width, dst_height) = (640, 640);
3873
3874        let mut dst_g2d =
3875            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3876        let mut g2d_converter = G2DProcessor::new().unwrap();
3877
3878        g2d_converter
3879            .convert(
3880                &src,
3881                &mut dst_g2d,
3882                Rotation::None,
3883                Flip::Horizontal,
3884                Crop {
3885                    src_rect: Some(Rect {
3886                        left: 20,
3887                        top: 15,
3888                        width: 400,
3889                        height: 300,
3890                    }),
3891                    dst_rect: None,
3892                    dst_color: None,
3893                },
3894            )
3895            .unwrap();
3896
3897        let mut dst_cpu =
3898            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3899        let mut cpu_converter = CPUProcessor::new();
3900
3901        cpu_converter
3902            .convert(
3903                &src,
3904                &mut dst_cpu,
3905                Rotation::None,
3906                Flip::Horizontal,
3907                Crop {
3908                    src_rect: Some(Rect {
3909                        left: 20,
3910                        top: 15,
3911                        width: 400,
3912                        height: 300,
3913                    }),
3914                    dst_rect: None,
3915                    dst_color: None,
3916                },
3917            )
3918            .unwrap();
3919        compare_images(&dst_g2d, &dst_cpu, 0.98, function!());
3920    }
3921
3922    #[test]
3923    #[cfg(target_os = "linux")]
3924    #[cfg(feature = "opengl")]
3925    fn test_yuyv_to_rgba_crop_flip_opengl() {
3926        if !is_opengl_available() {
3927            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3928            return;
3929        }
3930
3931        if !is_dma_available() {
3932            eprintln!(
3933                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3934                function!()
3935            );
3936            return;
3937        }
3938
3939        let src = load_bytes_to_tensor(
3940            1280,
3941            720,
3942            YUYV,
3943            Some(TensorMemory::Dma),
3944            include_bytes!("../../../testdata/camera720p.yuyv"),
3945        )
3946        .unwrap();
3947
3948        let (dst_width, dst_height) = (640, 640);
3949
3950        let mut dst_gl =
3951            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3952        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3953
3954        gl_converter
3955            .convert(
3956                &src,
3957                &mut dst_gl,
3958                Rotation::None,
3959                Flip::Horizontal,
3960                Crop {
3961                    src_rect: Some(Rect {
3962                        left: 20,
3963                        top: 15,
3964                        width: 400,
3965                        height: 300,
3966                    }),
3967                    dst_rect: None,
3968                    dst_color: None,
3969                },
3970            )
3971            .unwrap();
3972
3973        let mut dst_cpu =
3974            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3975        let mut cpu_converter = CPUProcessor::new();
3976
3977        cpu_converter
3978            .convert(
3979                &src,
3980                &mut dst_cpu,
3981                Rotation::None,
3982                Flip::Horizontal,
3983                Crop {
3984                    src_rect: Some(Rect {
3985                        left: 20,
3986                        top: 15,
3987                        width: 400,
3988                        height: 300,
3989                    }),
3990                    dst_rect: None,
3991                    dst_color: None,
3992                },
3993            )
3994            .unwrap();
3995        compare_images(&dst_gl, &dst_cpu, 0.98, function!());
3996    }
3997
3998    #[test]
3999    fn test_vyuy_to_rgba_cpu() {
4000        let file = include_bytes!("../../../testdata/camera720p.vyuy").to_vec();
4001        let src = TensorImage::new(1280, 720, VYUY, None).unwrap();
4002        src.tensor()
4003            .map()
4004            .unwrap()
4005            .as_mut_slice()
4006            .copy_from_slice(&file);
4007
4008        let mut dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
4009        let mut cpu_converter = CPUProcessor::new();
4010
4011        cpu_converter
4012            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
4013            .unwrap();
4014
4015        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
4016        target_image
4017            .tensor()
4018            .map()
4019            .unwrap()
4020            .as_mut_slice()
4021            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
4022
4023        compare_images(&dst, &target_image, 0.98, function!());
4024    }
4025
4026    #[test]
4027    fn test_vyuy_to_rgb_cpu() {
4028        let file = include_bytes!("../../../testdata/camera720p.vyuy").to_vec();
4029        let src = TensorImage::new(1280, 720, VYUY, None).unwrap();
4030        src.tensor()
4031            .map()
4032            .unwrap()
4033            .as_mut_slice()
4034            .copy_from_slice(&file);
4035
4036        let mut dst = TensorImage::new(1280, 720, RGB, None).unwrap();
4037        let mut cpu_converter = CPUProcessor::new();
4038
4039        cpu_converter
4040            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
4041            .unwrap();
4042
4043        let target_image = TensorImage::new(1280, 720, RGB, None).unwrap();
4044        target_image
4045            .tensor()
4046            .map()
4047            .unwrap()
4048            .as_mut_slice()
4049            .as_chunks_mut::<3>()
4050            .0
4051            .iter_mut()
4052            .zip(
4053                include_bytes!("../../../testdata/camera720p.rgba")
4054                    .as_chunks::<4>()
4055                    .0,
4056            )
4057            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
4058
4059        compare_images(&dst, &target_image, 0.98, function!());
4060    }
4061
4062    #[test]
4063    #[cfg(target_os = "linux")]
4064    fn test_vyuy_to_rgba_g2d() {
4065        if !is_g2d_available() {
4066            eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D library (libg2d.so.2) not available");
4067            return;
4068        }
4069        if !is_dma_available() {
4070            eprintln!(
4071                "SKIPPED: test_vyuy_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4072            );
4073            return;
4074        }
4075
4076        let src = load_bytes_to_tensor(
4077            1280,
4078            720,
4079            VYUY,
4080            None,
4081            include_bytes!("../../../testdata/camera720p.vyuy"),
4082        )
4083        .unwrap();
4084
4085        let mut dst = TensorImage::new(1280, 720, RGBA, Some(TensorMemory::Dma)).unwrap();
4086        let mut g2d_converter = G2DProcessor::new().unwrap();
4087
4088        match g2d_converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop()) {
4089            Err(Error::G2D(_)) => {
4090                eprintln!("SKIPPED: test_vyuy_to_rgba_g2d - G2D does not support VYUY format");
4091                return;
4092            }
4093            r => r.unwrap(),
4094        }
4095
4096        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
4097        target_image
4098            .tensor()
4099            .map()
4100            .unwrap()
4101            .as_mut_slice()
4102            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
4103
4104        compare_images(&dst, &target_image, 0.98, function!());
4105    }
4106
4107    #[test]
4108    #[cfg(target_os = "linux")]
4109    fn test_vyuy_to_rgb_g2d() {
4110        if !is_g2d_available() {
4111            eprintln!("SKIPPED: test_vyuy_to_rgb_g2d - G2D library (libg2d.so.2) not available");
4112            return;
4113        }
4114        if !is_dma_available() {
4115            eprintln!(
4116                "SKIPPED: test_vyuy_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
4117            );
4118            return;
4119        }
4120
4121        let src = load_bytes_to_tensor(
4122            1280,
4123            720,
4124            VYUY,
4125            None,
4126            include_bytes!("../../../testdata/camera720p.vyuy"),
4127        )
4128        .unwrap();
4129
4130        let mut g2d_dst = TensorImage::new(1280, 720, RGB, Some(TensorMemory::Dma)).unwrap();
4131        let mut g2d_converter = G2DProcessor::new().unwrap();
4132
4133        match g2d_converter.convert(
4134            &src,
4135            &mut g2d_dst,
4136            Rotation::None,
4137            Flip::None,
4138            Crop::no_crop(),
4139        ) {
4140            Err(Error::G2D(_)) => {
4141                eprintln!("SKIPPED: test_vyuy_to_rgb_g2d - G2D does not support VYUY format");
4142                return;
4143            }
4144            r => r.unwrap(),
4145        }
4146
4147        let mut cpu_dst = TensorImage::new(1280, 720, RGB, None).unwrap();
4148        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
4149
4150        cpu_converter
4151            .convert(
4152                &src,
4153                &mut cpu_dst,
4154                Rotation::None,
4155                Flip::None,
4156                Crop::no_crop(),
4157            )
4158            .unwrap();
4159
4160        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
4161    }
4162
4163    #[test]
4164    #[cfg(target_os = "linux")]
4165    #[cfg(feature = "opengl")]
4166    fn test_vyuy_to_rgba_opengl() {
4167        if !is_opengl_available() {
4168            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4169            return;
4170        }
4171        if !is_dma_available() {
4172            eprintln!(
4173                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4174                function!()
4175            );
4176            return;
4177        }
4178
4179        let src = load_bytes_to_tensor(
4180            1280,
4181            720,
4182            VYUY,
4183            Some(TensorMemory::Dma),
4184            include_bytes!("../../../testdata/camera720p.vyuy"),
4185        )
4186        .unwrap();
4187
4188        let mut dst = TensorImage::new(1280, 720, RGBA, Some(TensorMemory::Dma)).unwrap();
4189        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4190
4191        match gl_converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop()) {
4192            Err(Error::NotSupported(_)) => {
4193                eprintln!(
4194                    "SKIPPED: {} - OpenGL does not support VYUY DMA format",
4195                    function!()
4196                );
4197                return;
4198            }
4199            r => r.unwrap(),
4200        }
4201
4202        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
4203        target_image
4204            .tensor()
4205            .map()
4206            .unwrap()
4207            .as_mut_slice()
4208            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
4209
4210        compare_images(&dst, &target_image, 0.98, function!());
4211    }
4212
4213    #[test]
4214    fn test_nv12_to_rgba_cpu() {
4215        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
4216        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
4217        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
4218
4219        let mut dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
4220        let mut cpu_converter = CPUProcessor::new();
4221
4222        cpu_converter
4223            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
4224            .unwrap();
4225
4226        let target_image = TensorImage::load_jpeg(
4227            include_bytes!("../../../testdata/zidane.jpg"),
4228            Some(RGBA),
4229            None,
4230        )
4231        .unwrap();
4232
4233        compare_images(&dst, &target_image, 0.98, function!());
4234    }
4235
4236    #[test]
4237    fn test_nv12_to_rgb_cpu() {
4238        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
4239        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
4240        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
4241
4242        let mut dst = TensorImage::new(1280, 720, RGB, None).unwrap();
4243        let mut cpu_converter = CPUProcessor::new();
4244
4245        cpu_converter
4246            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
4247            .unwrap();
4248
4249        let target_image = TensorImage::load_jpeg(
4250            include_bytes!("../../../testdata/zidane.jpg"),
4251            Some(RGB),
4252            None,
4253        )
4254        .unwrap();
4255
4256        compare_images(&dst, &target_image, 0.98, function!());
4257    }
4258
4259    #[test]
4260    fn test_nv12_to_grey_cpu() {
4261        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
4262        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
4263        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
4264
4265        let mut dst = TensorImage::new(1280, 720, GREY, None).unwrap();
4266        let mut cpu_converter = CPUProcessor::new();
4267
4268        cpu_converter
4269            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
4270            .unwrap();
4271
4272        let target_image = TensorImage::load_jpeg(
4273            include_bytes!("../../../testdata/zidane.jpg"),
4274            Some(GREY),
4275            None,
4276        )
4277        .unwrap();
4278
4279        compare_images(&dst, &target_image, 0.98, function!());
4280    }
4281
4282    #[test]
4283    fn test_nv12_to_yuyv_cpu() {
4284        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
4285        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
4286        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
4287
4288        let mut dst = TensorImage::new(1280, 720, YUYV, None).unwrap();
4289        let mut cpu_converter = CPUProcessor::new();
4290
4291        cpu_converter
4292            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
4293            .unwrap();
4294
4295        let target_image = TensorImage::load_jpeg(
4296            include_bytes!("../../../testdata/zidane.jpg"),
4297            Some(RGB),
4298            None,
4299        )
4300        .unwrap();
4301
4302        compare_images_convert_to_rgb(&dst, &target_image, 0.98, function!());
4303    }
4304
4305    #[test]
4306    fn test_cpu_resize_planar_rgb() {
4307        let src = TensorImage::new(4, 4, RGBA, None).unwrap();
4308        #[rustfmt::skip]
4309        let src_image = [
4310                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4311                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4312                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4313                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4314        ];
4315        src.tensor()
4316            .map()
4317            .unwrap()
4318            .as_mut_slice()
4319            .copy_from_slice(&src_image);
4320
4321        let mut cpu_dst = TensorImage::new(5, 5, PLANAR_RGB, None).unwrap();
4322        let mut cpu_converter = CPUProcessor::new();
4323
4324        cpu_converter
4325            .convert(
4326                &src,
4327                &mut cpu_dst,
4328                Rotation::None,
4329                Flip::None,
4330                Crop::new()
4331                    .with_dst_rect(Some(Rect {
4332                        left: 1,
4333                        top: 1,
4334                        width: 4,
4335                        height: 4,
4336                    }))
4337                    .with_dst_color(Some([114, 114, 114, 255])),
4338            )
4339            .unwrap();
4340
4341        #[rustfmt::skip]
4342        let expected_dst = [
4343            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,    114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4344            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,    114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4345            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,      114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4346        ];
4347
4348        assert_eq!(cpu_dst.tensor().map().unwrap().as_slice(), &expected_dst);
4349    }
4350
4351    #[test]
4352    fn test_cpu_resize_planar_rgba() {
4353        let src = TensorImage::new(4, 4, RGBA, None).unwrap();
4354        #[rustfmt::skip]
4355        let src_image = [
4356                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
4357                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4358                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
4359                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
4360        ];
4361        src.tensor()
4362            .map()
4363            .unwrap()
4364            .as_mut_slice()
4365            .copy_from_slice(&src_image);
4366
4367        let mut cpu_dst = TensorImage::new(5, 5, PLANAR_RGBA, None).unwrap();
4368        let mut cpu_converter = CPUProcessor::new();
4369
4370        cpu_converter
4371            .convert(
4372                &src,
4373                &mut cpu_dst,
4374                Rotation::None,
4375                Flip::None,
4376                Crop::new()
4377                    .with_dst_rect(Some(Rect {
4378                        left: 1,
4379                        top: 1,
4380                        width: 4,
4381                        height: 4,
4382                    }))
4383                    .with_dst_color(Some([114, 114, 114, 255])),
4384            )
4385            .unwrap();
4386
4387        #[rustfmt::skip]
4388        let expected_dst = [
4389            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,        114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
4390            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,        114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
4391            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,          114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
4392            255, 255, 255, 255, 255,    255, 255, 255, 255, 255,    255, 0, 255, 0, 255,        255, 0, 255, 0, 255,      255, 0, 255, 0, 255,
4393        ];
4394
4395        assert_eq!(cpu_dst.tensor().map().unwrap().as_slice(), &expected_dst);
4396    }
4397
4398    #[test]
4399    #[cfg(target_os = "linux")]
4400    #[cfg(feature = "opengl")]
4401    fn test_opengl_resize_planar_rgb() {
4402        if !is_opengl_available() {
4403            eprintln!("SKIPPED: {} - OpenGL not available", function!());
4404            return;
4405        }
4406
4407        if !is_dma_available() {
4408            eprintln!(
4409                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
4410                function!()
4411            );
4412            return;
4413        }
4414
4415        let dst_width = 640;
4416        let dst_height = 640;
4417        let file = include_bytes!("../../../testdata/test_image.jpg").to_vec();
4418        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
4419
4420        let mut cpu_dst = TensorImage::new(dst_width, dst_height, PLANAR_RGB, None).unwrap();
4421        let mut cpu_converter = CPUProcessor::new();
4422        cpu_converter
4423            .convert(
4424                &src,
4425                &mut cpu_dst,
4426                Rotation::None,
4427                Flip::None,
4428                Crop::no_crop(),
4429            )
4430            .unwrap();
4431        cpu_converter
4432            .convert(
4433                &src,
4434                &mut cpu_dst,
4435                Rotation::None,
4436                Flip::None,
4437                Crop::new()
4438                    .with_dst_rect(Some(Rect {
4439                        left: 102,
4440                        top: 102,
4441                        width: 440,
4442                        height: 440,
4443                    }))
4444                    .with_dst_color(Some([114, 114, 114, 114])),
4445            )
4446            .unwrap();
4447
4448        let mut gl_dst = TensorImage::new(dst_width, dst_height, PLANAR_RGB, None).unwrap();
4449        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
4450
4451        gl_converter
4452            .convert(
4453                &src,
4454                &mut gl_dst,
4455                Rotation::None,
4456                Flip::None,
4457                Crop::new()
4458                    .with_dst_rect(Some(Rect {
4459                        left: 102,
4460                        top: 102,
4461                        width: 440,
4462                        height: 440,
4463                    }))
4464                    .with_dst_color(Some([114, 114, 114, 114])),
4465            )
4466            .unwrap();
4467        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
4468    }
4469
4470    #[test]
4471    fn test_cpu_resize_nv16() {
4472        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
4473        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
4474
4475        let mut cpu_nv16_dst = TensorImage::new(640, 640, NV16, None).unwrap();
4476        let mut cpu_rgb_dst = TensorImage::new(640, 640, RGB, None).unwrap();
4477        let mut cpu_converter = CPUProcessor::new();
4478
4479        cpu_converter
4480            .convert(
4481                &src,
4482                &mut cpu_nv16_dst,
4483                Rotation::None,
4484                Flip::None,
4485                // Crop::no_crop(),
4486                Crop::new()
4487                    .with_dst_rect(Some(Rect {
4488                        left: 20,
4489                        top: 140,
4490                        width: 600,
4491                        height: 360,
4492                    }))
4493                    .with_dst_color(Some([255, 128, 0, 255])),
4494            )
4495            .unwrap();
4496
4497        cpu_converter
4498            .convert(
4499                &src,
4500                &mut cpu_rgb_dst,
4501                Rotation::None,
4502                Flip::None,
4503                Crop::new()
4504                    .with_dst_rect(Some(Rect {
4505                        left: 20,
4506                        top: 140,
4507                        width: 600,
4508                        height: 360,
4509                    }))
4510                    .with_dst_color(Some([255, 128, 0, 255])),
4511            )
4512            .unwrap();
4513        compare_images_convert_to_rgb(&cpu_nv16_dst, &cpu_rgb_dst, 0.99, function!());
4514    }
4515
4516    fn load_bytes_to_tensor(
4517        width: usize,
4518        height: usize,
4519        fourcc: FourCharCode,
4520        memory: Option<TensorMemory>,
4521        bytes: &[u8],
4522    ) -> Result<TensorImage, Error> {
4523        let src = TensorImage::new(width, height, fourcc, memory)?;
4524        src.tensor().map()?.as_mut_slice().copy_from_slice(bytes);
4525        Ok(src)
4526    }
4527
4528    fn compare_images(img1: &TensorImage, img2: &TensorImage, threshold: f64, name: &str) {
4529        assert_eq!(img1.height(), img2.height(), "Heights differ");
4530        assert_eq!(img1.width(), img2.width(), "Widths differ");
4531        assert_eq!(img1.fourcc(), img2.fourcc(), "FourCC differ");
4532        assert!(
4533            matches!(img1.fourcc(), RGB | RGBA | GREY | PLANAR_RGB),
4534            "FourCC must be RGB or RGBA for comparison"
4535        );
4536
4537        let image1 = match img1.fourcc() {
4538            RGB => image::RgbImage::from_vec(
4539                img1.width() as u32,
4540                img1.height() as u32,
4541                img1.tensor().map().unwrap().to_vec(),
4542            )
4543            .unwrap(),
4544            RGBA => image::RgbaImage::from_vec(
4545                img1.width() as u32,
4546                img1.height() as u32,
4547                img1.tensor().map().unwrap().to_vec(),
4548            )
4549            .unwrap()
4550            .convert(),
4551            GREY => image::GrayImage::from_vec(
4552                img1.width() as u32,
4553                img1.height() as u32,
4554                img1.tensor().map().unwrap().to_vec(),
4555            )
4556            .unwrap()
4557            .convert(),
4558            PLANAR_RGB => image::GrayImage::from_vec(
4559                img1.width() as u32,
4560                (img1.height() * 3) as u32,
4561                img1.tensor().map().unwrap().to_vec(),
4562            )
4563            .unwrap()
4564            .convert(),
4565            _ => return,
4566        };
4567
4568        let image2 = match img2.fourcc() {
4569            RGB => image::RgbImage::from_vec(
4570                img2.width() as u32,
4571                img2.height() as u32,
4572                img2.tensor().map().unwrap().to_vec(),
4573            )
4574            .unwrap(),
4575            RGBA => image::RgbaImage::from_vec(
4576                img2.width() as u32,
4577                img2.height() as u32,
4578                img2.tensor().map().unwrap().to_vec(),
4579            )
4580            .unwrap()
4581            .convert(),
4582            GREY => image::GrayImage::from_vec(
4583                img2.width() as u32,
4584                img2.height() as u32,
4585                img2.tensor().map().unwrap().to_vec(),
4586            )
4587            .unwrap()
4588            .convert(),
4589            PLANAR_RGB => image::GrayImage::from_vec(
4590                img2.width() as u32,
4591                (img2.height() * 3) as u32,
4592                img2.tensor().map().unwrap().to_vec(),
4593            )
4594            .unwrap()
4595            .convert(),
4596            _ => return,
4597        };
4598
4599        let similarity = image_compare::rgb_similarity_structure(
4600            &image_compare::Algorithm::RootMeanSquared,
4601            &image1,
4602            &image2,
4603        )
4604        .expect("Image Comparison failed");
4605        if similarity.score < threshold {
4606            // image1.save(format!("{name}_1.png"));
4607            // image2.save(format!("{name}_2.png"));
4608            similarity
4609                .image
4610                .to_color_map()
4611                .save(format!("{name}.png"))
4612                .unwrap();
4613            panic!(
4614                "{name}: converted image and target image have similarity score too low: {} < {}",
4615                similarity.score, threshold
4616            )
4617        }
4618    }
4619
4620    fn compare_images_convert_to_rgb(
4621        img1: &TensorImage,
4622        img2: &TensorImage,
4623        threshold: f64,
4624        name: &str,
4625    ) {
4626        assert_eq!(img1.height(), img2.height(), "Heights differ");
4627        assert_eq!(img1.width(), img2.width(), "Widths differ");
4628
4629        let mut img_rgb1 =
4630            TensorImage::new(img1.width(), img1.height(), RGB, Some(TensorMemory::Mem)).unwrap();
4631        let mut img_rgb2 =
4632            TensorImage::new(img1.width(), img1.height(), RGB, Some(TensorMemory::Mem)).unwrap();
4633        CPUProcessor::convert_format(img1, &mut img_rgb1).unwrap();
4634        CPUProcessor::convert_format(img2, &mut img_rgb2).unwrap();
4635
4636        let image1 = image::RgbImage::from_vec(
4637            img_rgb1.width() as u32,
4638            img_rgb1.height() as u32,
4639            img_rgb1.tensor().map().unwrap().to_vec(),
4640        )
4641        .unwrap();
4642
4643        let image2 = image::RgbImage::from_vec(
4644            img_rgb2.width() as u32,
4645            img_rgb2.height() as u32,
4646            img_rgb2.tensor().map().unwrap().to_vec(),
4647        )
4648        .unwrap();
4649
4650        let similarity = image_compare::rgb_similarity_structure(
4651            &image_compare::Algorithm::RootMeanSquared,
4652            &image1,
4653            &image2,
4654        )
4655        .expect("Image Comparison failed");
4656        if similarity.score < threshold {
4657            // image1.save(format!("{name}_1.png"));
4658            // image2.save(format!("{name}_2.png"));
4659            similarity
4660                .image
4661                .to_color_map()
4662                .save(format!("{name}.png"))
4663                .unwrap();
4664            panic!(
4665                "{name}: converted image and target image have similarity score too low: {} < {}",
4666                similarity.score, threshold
4667            )
4668        }
4669    }
4670
4671    // =========================================================================
4672    // NV12 Format Tests
4673    // =========================================================================
4674
4675    #[test]
4676    fn test_nv12_tensor_image_creation() {
4677        let width = 640;
4678        let height = 480;
4679        let img = TensorImage::new(width, height, NV12, None).unwrap();
4680
4681        assert_eq!(img.width(), width);
4682        assert_eq!(img.height(), height);
4683        assert_eq!(img.fourcc(), NV12);
4684        // NV12 uses shape [H*3/2, W] to store Y plane + UV plane
4685        assert_eq!(img.tensor().shape(), &[height * 3 / 2, width]);
4686    }
4687
4688    #[test]
4689    fn test_nv12_channels() {
4690        let img = TensorImage::new(640, 480, NV12, None).unwrap();
4691        // NV12 reports 2 channels (Y + interleaved UV)
4692        assert_eq!(img.channels(), 2);
4693    }
4694
4695    // =========================================================================
4696    // TensorImageRef Tests
4697    // =========================================================================
4698
4699    #[test]
4700    fn test_tensor_image_ref_from_planar_tensor() {
4701        // Create a planar RGB tensor [3, 480, 640]
4702        let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
4703
4704        let img_ref = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
4705
4706        assert_eq!(img_ref.width(), 640);
4707        assert_eq!(img_ref.height(), 480);
4708        assert_eq!(img_ref.channels(), 3);
4709        assert_eq!(img_ref.fourcc(), PLANAR_RGB);
4710        assert!(img_ref.is_planar());
4711    }
4712
4713    #[test]
4714    fn test_tensor_image_ref_from_interleaved_tensor() {
4715        // Create an interleaved RGBA tensor [480, 640, 4]
4716        let mut tensor = Tensor::<u8>::new(&[480, 640, 4], None, None).unwrap();
4717
4718        let img_ref = TensorImageRef::from_borrowed_tensor(&mut tensor, RGBA).unwrap();
4719
4720        assert_eq!(img_ref.width(), 640);
4721        assert_eq!(img_ref.height(), 480);
4722        assert_eq!(img_ref.channels(), 4);
4723        assert_eq!(img_ref.fourcc(), RGBA);
4724        assert!(!img_ref.is_planar());
4725    }
4726
4727    #[test]
4728    fn test_tensor_image_ref_invalid_shape() {
4729        // 2D tensor should fail
4730        let mut tensor = Tensor::<u8>::new(&[480, 640], None, None).unwrap();
4731        let result = TensorImageRef::from_borrowed_tensor(&mut tensor, RGB);
4732        assert!(matches!(result, Err(Error::InvalidShape(_))));
4733    }
4734
4735    #[test]
4736    fn test_tensor_image_ref_wrong_channels() {
4737        // RGBA expects 4 channels but tensor has 3
4738        let mut tensor = Tensor::<u8>::new(&[480, 640, 3], None, None).unwrap();
4739        let result = TensorImageRef::from_borrowed_tensor(&mut tensor, RGBA);
4740        assert!(matches!(result, Err(Error::InvalidShape(_))));
4741    }
4742
4743    #[test]
4744    fn test_tensor_image_dst_trait_tensor_image() {
4745        let img = TensorImage::new(640, 480, RGB, None).unwrap();
4746
4747        // Test TensorImageDst trait implementation
4748        fn check_dst<T: TensorImageDst>(dst: &T) {
4749            assert_eq!(dst.width(), 640);
4750            assert_eq!(dst.height(), 480);
4751            assert_eq!(dst.channels(), 3);
4752            assert!(!dst.is_planar());
4753        }
4754
4755        check_dst(&img);
4756    }
4757
4758    #[test]
4759    fn test_tensor_image_dst_trait_tensor_image_ref() {
4760        let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
4761        let img_ref = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
4762
4763        fn check_dst<T: TensorImageDst>(dst: &T) {
4764            assert_eq!(dst.width(), 640);
4765            assert_eq!(dst.height(), 480);
4766            assert_eq!(dst.channels(), 3);
4767            assert!(dst.is_planar());
4768        }
4769
4770        check_dst(&img_ref);
4771    }
4772
4773    #[test]
4774    fn test_rgb_int8_format() {
4775        let img = TensorImage::new(1280, 720, RGB_INT8, Some(TensorMemory::Mem)).unwrap();
4776        assert_eq!(img.width(), 1280);
4777        assert_eq!(img.height(), 720);
4778        assert_eq!(img.channels(), 3);
4779        assert!(!img.is_planar());
4780        assert_eq!(img.fourcc(), RGB_INT8);
4781    }
4782
4783    #[test]
4784    fn test_planar_rgb_int8_format() {
4785        let img = TensorImage::new(1280, 720, PLANAR_RGB_INT8, Some(TensorMemory::Mem)).unwrap();
4786        assert_eq!(img.width(), 1280);
4787        assert_eq!(img.height(), 720);
4788        assert_eq!(img.channels(), 3);
4789        assert!(img.is_planar());
4790        assert_eq!(img.fourcc(), PLANAR_RGB_INT8);
4791    }
4792
4793    #[test]
4794    fn test_rgb_int8_from_tensor() {
4795        let tensor = Tensor::<u8>::new(&[720, 1280, 3], None, None).unwrap();
4796        let img = TensorImage::from_tensor(tensor, RGB_INT8).unwrap();
4797        assert_eq!(img.width(), 1280);
4798        assert_eq!(img.height(), 720);
4799        assert_eq!(img.channels(), 3);
4800        assert!(!img.is_planar());
4801        assert_eq!(img.fourcc(), RGB_INT8);
4802    }
4803
4804    #[test]
4805    fn test_planar_rgb_int8_from_tensor() {
4806        let tensor = Tensor::<u8>::new(&[3, 720, 1280], None, None).unwrap();
4807        let img = TensorImage::from_tensor(tensor, PLANAR_RGB_INT8).unwrap();
4808        assert_eq!(img.width(), 1280);
4809        assert_eq!(img.height(), 720);
4810        assert_eq!(img.channels(), 3);
4811        assert!(img.is_planar());
4812        assert_eq!(img.fourcc(), PLANAR_RGB_INT8);
4813    }
4814
4815    #[test]
4816    fn test_fourcc_is_int8() {
4817        assert!(fourcc_is_int8(RGB_INT8));
4818        assert!(fourcc_is_int8(PLANAR_RGB_INT8));
4819        assert!(!fourcc_is_int8(RGB));
4820        assert!(!fourcc_is_int8(PLANAR_RGB));
4821        assert!(!fourcc_is_int8(RGBA));
4822    }
4823
4824    #[test]
4825    fn test_fourcc_uint8_equivalent() {
4826        assert_eq!(fourcc_uint8_equivalent(RGB_INT8), RGB);
4827        assert_eq!(fourcc_uint8_equivalent(PLANAR_RGB_INT8), PLANAR_RGB);
4828        assert_eq!(fourcc_uint8_equivalent(RGBA), RGBA);
4829    }
4830
4831    #[test]
4832    fn test_fourcc_is_packed_rgb() {
4833        assert!(fourcc_is_packed_rgb(RGB));
4834        assert!(fourcc_is_packed_rgb(RGB_INT8));
4835        assert!(!fourcc_is_packed_rgb(PLANAR_RGB));
4836        assert!(!fourcc_is_packed_rgb(RGBA));
4837    }
4838
4839    /// Integration test that exercises the PBO-to-PBO convert path.
4840    /// Uses ImageProcessor::create_image() to allocate PBO-backed tensors,
4841    /// then converts between them. Skipped when GL is unavailable or the
4842    /// backend is not PBO (e.g. DMA-buf systems).
4843    #[cfg(target_os = "linux")]
4844    #[cfg(feature = "opengl")]
4845    #[test]
4846    fn test_convert_pbo_to_pbo() {
4847        let mut converter = ImageProcessor::new().unwrap();
4848
4849        // Skip if GL is not available or backend is not PBO
4850        let is_pbo = converter
4851            .opengl
4852            .as_ref()
4853            .is_some_and(|gl| gl.transfer_backend() == opengl_headless::TransferBackend::Pbo);
4854        if !is_pbo {
4855            eprintln!("Skipping test_convert_pbo_to_pbo: backend is not PBO");
4856            return;
4857        }
4858
4859        let src_w = 640;
4860        let src_h = 480;
4861        let dst_w = 320;
4862        let dst_h = 240;
4863
4864        // Create PBO-backed source image
4865        let pbo_src = converter.create_image(src_w, src_h, RGBA).unwrap();
4866        assert_eq!(
4867            pbo_src.tensor().memory(),
4868            TensorMemory::Pbo,
4869            "create_image should produce a PBO tensor"
4870        );
4871
4872        // Fill source PBO with test pattern: load JPEG then convert Mem→PBO
4873        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
4874        let jpeg_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
4875
4876        // Resize JPEG into a Mem temp of the right size, then copy into PBO
4877        let mut mem_src = TensorImage::new(src_w, src_h, RGBA, Some(TensorMemory::Mem)).unwrap();
4878        CPUProcessor::new()
4879            .convert(
4880                &jpeg_src,
4881                &mut mem_src,
4882                Rotation::None,
4883                Flip::None,
4884                Crop::no_crop(),
4885            )
4886            .unwrap();
4887
4888        // Copy pixel data into the PBO source by mapping it
4889        {
4890            let src_data = mem_src.tensor().map().unwrap();
4891            let mut pbo_map = pbo_src.tensor().map().unwrap();
4892            pbo_map.copy_from_slice(&src_data);
4893        }
4894
4895        // Create PBO-backed destination image
4896        let mut pbo_dst = converter.create_image(dst_w, dst_h, RGBA).unwrap();
4897        assert_eq!(pbo_dst.tensor().memory(), TensorMemory::Pbo);
4898
4899        // Convert PBO→PBO (this exercises convert_pbo_to_pbo)
4900        converter
4901            .convert(
4902                &pbo_src,
4903                &mut pbo_dst,
4904                Rotation::None,
4905                Flip::None,
4906                Crop::no_crop(),
4907            )
4908            .unwrap();
4909
4910        // Verify: compare with CPU-only conversion of the same input
4911        let mut cpu_dst = TensorImage::new(dst_w, dst_h, RGBA, Some(TensorMemory::Mem)).unwrap();
4912        CPUProcessor::new()
4913            .convert(
4914                &mem_src,
4915                &mut cpu_dst,
4916                Rotation::None,
4917                Flip::None,
4918                Crop::no_crop(),
4919            )
4920            .unwrap();
4921
4922        compare_images(&pbo_dst, &cpu_dst, 0.95, function!());
4923        log::info!("test_convert_pbo_to_pbo: PASS — PBO-to-PBO convert matches CPU reference");
4924    }
4925
4926    #[test]
4927    fn test_tensor_image_bgra() {
4928        let img =
4929            TensorImage::new(640, 480, BGRA, Some(edgefirst_tensor::TensorMemory::Mem)).unwrap();
4930        assert_eq!(img.width(), 640);
4931        assert_eq!(img.height(), 480);
4932        assert_eq!(img.channels(), 4);
4933        assert_eq!(img.fourcc(), BGRA);
4934    }
4935
4936    // ========================================================================
4937    // Tests for EDGEFIRST_FORCE_BACKEND env var
4938    // ========================================================================
4939
4940    #[test]
4941    fn test_force_backend_cpu() {
4942        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
4943        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
4944        let result = ImageProcessor::new();
4945        match original {
4946            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
4947            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
4948        }
4949        let converter = result.unwrap();
4950        assert!(converter.cpu.is_some());
4951        assert_eq!(converter.forced_backend, Some(ForcedBackend::Cpu));
4952    }
4953
4954    #[test]
4955    fn test_force_backend_invalid() {
4956        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
4957        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "invalid") };
4958        let result = ImageProcessor::new();
4959        match original {
4960            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
4961            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
4962        }
4963        assert!(
4964            matches!(&result, Err(Error::ForcedBackendUnavailable(s)) if s.contains("unknown")),
4965            "invalid backend value should return ForcedBackendUnavailable error: {result:?}"
4966        );
4967    }
4968
4969    #[test]
4970    fn test_force_backend_unset() {
4971        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
4972        unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") };
4973        let result = ImageProcessor::new();
4974        match original {
4975            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
4976            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
4977        }
4978        let converter = result.unwrap();
4979        assert!(converter.forced_backend.is_none());
4980    }
4981
4982    // ========================================================================
4983    // Tests for hybrid mask path error handling
4984    // ========================================================================
4985
4986    #[test]
4987    fn test_draw_masks_proto_no_cpu_returns_error() {
4988        // Disable CPU backend to trigger the error path
4989        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
4990        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
4991        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
4992        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
4993        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
4994        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
4995
4996        let result = ImageProcessor::new();
4997
4998        match original_cpu {
4999            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
5000            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
5001        }
5002        match original_gl {
5003            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
5004            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
5005        }
5006        match original_g2d {
5007            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
5008            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
5009        }
5010
5011        let mut converter = result.unwrap();
5012        assert!(converter.cpu.is_none(), "CPU should be disabled");
5013
5014        let mut dst = TensorImage::new(640, 480, RGBA, Some(TensorMemory::Mem)).unwrap();
5015        let det = [DetectBox {
5016            bbox: edgefirst_decoder::BoundingBox {
5017                xmin: 0.1,
5018                ymin: 0.1,
5019                xmax: 0.5,
5020                ymax: 0.5,
5021            },
5022            score: 0.9,
5023            label: 0,
5024        }];
5025        let proto_data = ProtoData {
5026            mask_coefficients: vec![vec![0.5; 4]],
5027            protos: edgefirst_decoder::ProtoTensor::Float(ndarray::Array3::<f32>::zeros((8, 8, 4))),
5028        };
5029        let result = converter.draw_masks_proto(&mut dst, &det, &proto_data);
5030        assert!(
5031            matches!(&result, Err(Error::Internal(s)) if s.contains("CPU backend")),
5032            "draw_masks_proto without CPU should return Internal error: {result:?}"
5033        );
5034    }
5035
5036    #[test]
5037    fn test_draw_masks_proto_cpu_fallback_works() {
5038        // Force CPU-only backend to ensure the CPU fallback path executes
5039        let original = std::env::var("EDGEFIRST_FORCE_BACKEND").ok();
5040        unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", "cpu") };
5041        let result = ImageProcessor::new();
5042        match original {
5043            Some(s) => unsafe { std::env::set_var("EDGEFIRST_FORCE_BACKEND", s) },
5044            None => unsafe { std::env::remove_var("EDGEFIRST_FORCE_BACKEND") },
5045        }
5046
5047        let mut converter = result.unwrap();
5048        assert!(converter.cpu.is_some());
5049
5050        let mut dst = TensorImage::new(64, 64, RGBA, Some(TensorMemory::Mem)).unwrap();
5051        let det = [DetectBox {
5052            bbox: edgefirst_decoder::BoundingBox {
5053                xmin: 0.1,
5054                ymin: 0.1,
5055                xmax: 0.5,
5056                ymax: 0.5,
5057            },
5058            score: 0.9,
5059            label: 0,
5060        }];
5061        let proto_data = ProtoData {
5062            mask_coefficients: vec![vec![0.5; 4]],
5063            protos: edgefirst_decoder::ProtoTensor::Float(ndarray::Array3::<f32>::zeros((8, 8, 4))),
5064        };
5065        let result = converter.draw_masks_proto(&mut dst, &det, &proto_data);
5066        assert!(result.is_ok(), "CPU fallback path should work: {result:?}");
5067    }
5068}