Skip to main content

edgefirst_image/
lib.rs

1// SPDX-FileCopyrightText: Copyright 2025 Au-Zone Technologies
2// SPDX-License-Identifier: Apache-2.0
3
4/*!
5
6## EdgeFirst HAL - Image Converter
7
8The `edgefirst_image` crate is part of the EdgeFirst Hardware Abstraction
9Layer (HAL) and provides functionality for converting images between
10different formats and sizes.  The crate is designed to work with hardware
11acceleration when available, but also provides a CPU-based fallback for
12environments where hardware acceleration is not present or not suitable.
13
14The main features of the `edgefirst_image` crate include:
15- Support for various image formats, including YUYV, RGB, RGBA, and GREY.
16- Support for source crop, destination crop, rotation, and flipping.
17- Image conversion using hardware acceleration (G2D, OpenGL) when available.
18- CPU-based image conversion as a fallback option.
19
20The crate defines a `TensorImage` struct that represents an image as a
21tensor, along with its format information. It also provides an
22`ImageProcessor` struct that manages the conversion process, selecting
23the appropriate conversion method based on the available hardware.
24
25## Examples
26
27```rust
28# use edgefirst_image::{ImageProcessor, TensorImage, RGBA, RGB, Rotation, Flip, Crop, ImageProcessorTrait};
29# fn main() -> Result<(), edgefirst_image::Error> {
30let image = include_bytes!("../../../testdata/zidane.jpg");
31let img = TensorImage::load(image, Some(RGBA), None)?;
32let mut converter = ImageProcessor::new()?;
33let mut dst = TensorImage::new(640, 480, RGB, None)?;
34converter.convert(&img, &mut dst, Rotation::None, Flip::None, Crop::default())?;
35# Ok(())
36# }
37```
38
39## Environment Variables
40The behavior of the `edgefirst_image::ImageProcessor` struct can be influenced by the
41following environment variables:
42- `EDGEFIRST_DISABLE_GL`: If set to `1`, disables the use of OpenGL for image
43  conversion, forcing the use of CPU or other available hardware methods.
44- `EDGEFIRST_DISABLE_G2D`: If set to `1`, disables the use of G2D for image
45  conversion, forcing the use of CPU or other available hardware methods.
46- `EDGEFIRST_DISABLE_CPU`: If set to `1`, disables the use of CPU for image
47  conversion, forcing the use of hardware acceleration methods. If no hardware
48  acceleration methods are available, an error will be returned when attempting
49  to create an `ImageProcessor`.
50
51Additionally the TensorMemory used by default allocations can be controlled using the
52`EDGEFIRST_TENSOR_FORCE_MEM` environment variable. If set to `1`, default tensor memory
53uses system memory. This will disable the use of specialized memory regions for tensors
54and hardware acceleration. However, this will increase the performance of the CPU converter.
55*/
56#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
57
58#[cfg(feature = "decoder")]
59use edgefirst_decoder::{DetectBox, ProtoData, Segmentation};
60use edgefirst_tensor::{Tensor, TensorMemory, TensorTrait as _};
61use enum_dispatch::enum_dispatch;
62use four_char_code::{four_char_code, FourCharCode};
63use std::{fmt::Display, time::Instant};
64use zune_jpeg::{
65    zune_core::{colorspace::ColorSpace, options::DecoderOptions},
66    JpegDecoder,
67};
68use zune_png::PngDecoder;
69
70pub use cpu::CPUProcessor;
71pub use error::{Error, Result};
72#[cfg(target_os = "linux")]
73pub use g2d::G2DProcessor;
74#[cfg(target_os = "linux")]
75#[cfg(feature = "opengl")]
76pub use opengl_headless::GLProcessorThreaded;
77#[cfg(target_os = "linux")]
78#[cfg(feature = "opengl")]
79#[cfg(feature = "decoder")]
80pub use opengl_headless::Int8InterpolationMode;
81#[cfg(target_os = "linux")]
82#[cfg(feature = "opengl")]
83pub use opengl_headless::{probe_egl_displays, EglDisplayInfo, EglDisplayKind};
84
85/// Result of rendering a single per-instance grayscale mask.
86///
87/// Contains the bounding-box region in output image coordinates and the
88/// raw uint8 pixel data (RED channel only, 0–255 representing sigmoid output).
89#[cfg(feature = "decoder")]
90#[derive(Debug, Clone)]
91pub struct MaskResult {
92    /// X offset of the bbox region in the output image.
93    pub x: usize,
94    /// Y offset of the bbox region in the output image.
95    pub y: usize,
96    /// Width of the bbox region.
97    pub w: usize,
98    /// Height of the bbox region.
99    pub h: usize,
100    /// Grayscale pixel data (w * h bytes, row-major).
101    pub pixels: Vec<u8>,
102}
103
104mod cpu;
105mod error;
106mod g2d;
107mod opengl_headless;
108
109/// 8 bit interleaved YUV422, limited range
110pub const YUYV: FourCharCode = four_char_code!("YUYV");
111/// 8 bit planar YUV420, limited range
112pub const NV12: FourCharCode = four_char_code!("NV12");
113/// 8 bit planar YUV422, limited range
114pub const NV16: FourCharCode = four_char_code!("NV16");
115/// 8 bit RGBA
116pub const RGBA: FourCharCode = four_char_code!("RGBA");
117/// 8 bit RGB
118pub const RGB: FourCharCode = four_char_code!("RGB ");
119/// 8 bit grayscale, full range
120pub const GREY: FourCharCode = four_char_code!("Y800");
121
122// TODO: planar RGB is 8BPS? https://fourcc.org/8bps/
123pub const PLANAR_RGB: FourCharCode = four_char_code!("8BPS");
124
125// TODO: What fourcc code is planar RGBA?
126pub const PLANAR_RGBA: FourCharCode = four_char_code!("8BPA");
127
128/// An image represented as a tensor with associated format information.
129#[derive(Debug)]
130pub struct TensorImage {
131    tensor: Tensor<u8>,
132    fourcc: FourCharCode,
133    is_planar: bool,
134}
135
136impl TensorImage {
137    /// Creates a new `TensorImage` with the specified width, height, format,
138    /// and memory type.
139    ///
140    /// # Examples
141    /// ```rust
142    /// use edgefirst_image::{RGB, TensorImage};
143    /// use edgefirst_tensor::TensorMemory;
144    /// # fn main() -> Result<(), edgefirst_image::Error> {
145    /// let img = TensorImage::new(640, 480, RGB, Some(TensorMemory::Mem))?;
146    /// assert_eq!(img.width(), 640);
147    /// assert_eq!(img.height(), 480);
148    /// assert_eq!(img.fourcc(), RGB);
149    /// assert!(!img.is_planar());
150    /// # Ok(())
151    /// # }
152    /// ```
153    pub fn new(
154        width: usize,
155        height: usize,
156        fourcc: FourCharCode,
157        memory: Option<TensorMemory>,
158    ) -> Result<Self> {
159        let channels = fourcc_channels(fourcc)?;
160        let is_planar = fourcc_planar(fourcc)?;
161
162        // NV12 is semi-planar with Y plane (W×H) + UV plane (W×H/2)
163        // Total bytes = W × H × 1.5. Use shape [H*3/2, W] to encode this.
164        if fourcc == NV12 {
165            let shape = vec![height * 3 / 2, width];
166            let tensor = Tensor::new(&shape, memory, None)?;
167
168            return Ok(Self {
169                tensor,
170                fourcc,
171                is_planar,
172            });
173        }
174
175        // NV16 is semi-planar with Y plane (W×H) + UV plane (W×H)
176        // Total bytes = W × H × 2. Use shape [H*2, W] to encode this.
177        if fourcc == NV16 {
178            let shape = vec![height * 2, width];
179            let tensor = Tensor::new(&shape, memory, None)?;
180
181            return Ok(Self {
182                tensor,
183                fourcc,
184                is_planar,
185            });
186        }
187
188        if is_planar {
189            let shape = vec![channels, height, width];
190            let tensor = Tensor::new(&shape, memory, None)?;
191
192            return Ok(Self {
193                tensor,
194                fourcc,
195                is_planar,
196            });
197        }
198
199        let shape = vec![height, width, channels];
200        let tensor = Tensor::new(&shape, memory, None)?;
201
202        Ok(Self {
203            tensor,
204            fourcc,
205            is_planar,
206        })
207    }
208
209    /// Creates a new `TensorImage` from an existing tensor and specified
210    /// format.
211    ///
212    /// The required tensor shape depends on the pixel format:
213    ///
214    /// | Format | Shape | Description |
215    /// |--------|-------|-------------|
216    /// | `RGB`  | `[H, W, 3]` | 3-channel interleaved |
217    /// | `RGBA` | `[H, W, 4]` | 4-channel interleaved |
218    /// | `GREY` | `[H, W, 1]` | Single-channel grayscale |
219    /// | `YUYV` | `[H, W, 2]` | YUV 4:2:2 interleaved |
220    /// | `PLANAR_RGB`  | `[3, H, W]` | Channels-first (3 planes) |
221    /// | `PLANAR_RGBA` | `[4, H, W]` | Channels-first (4 planes) |
222    /// | `NV12` | `[H*3/2, W]` | Semi-planar YUV 4:2:0 (2D) |
223    /// | `NV16` | `[H*2, W]`   | Semi-planar YUV 4:2:2 (2D) |
224    ///
225    /// Most formats use a 3D tensor where the channel dimension matches
226    /// the format's channel count. The semi-planar formats NV12 and NV16
227    /// are special: the Y and UV planes have different heights, so the
228    /// data cannot be described as `[H, W, C]`. Instead the contiguous
229    /// memory is represented as a 2D tensor whose first dimension encodes
230    /// the total byte height (Y rows + UV rows).
231    ///
232    /// # Examples
233    ///
234    /// RGB (3D interleaved):
235    /// ```rust
236    /// use edgefirst_image::{RGB, TensorImage};
237    /// use edgefirst_tensor::Tensor;
238    ///  # fn main() -> Result<(), edgefirst_image::Error> {
239    /// let tensor = Tensor::new(&[720, 1280, 3], None, None)?;
240    /// let img = TensorImage::from_tensor(tensor, RGB)?;
241    /// assert_eq!(img.width(), 1280);
242    /// assert_eq!(img.height(), 720);
243    /// assert_eq!(img.fourcc(), RGB);
244    /// # Ok(())
245    /// # }
246    /// ```
247    ///
248    /// GREY (3D with 1 channel):
249    /// ```rust
250    /// use edgefirst_image::{GREY, TensorImage};
251    /// use edgefirst_tensor::Tensor;
252    ///  # fn main() -> Result<(), edgefirst_image::Error> {
253    /// let tensor = Tensor::new(&[480, 640, 1], None, None)?;
254    /// let img = TensorImage::from_tensor(tensor, GREY)?;
255    /// assert_eq!(img.width(), 640);
256    /// assert_eq!(img.height(), 480);
257    /// # Ok(())
258    /// # }
259    /// ```
260    ///
261    /// NV12 (2D semi-planar, height*3/2 rows):
262    /// ```rust
263    /// use edgefirst_image::{NV12, TensorImage};
264    /// use edgefirst_tensor::Tensor;
265    ///  # fn main() -> Result<(), edgefirst_image::Error> {
266    /// // 1080p NV12: 1080 Y rows + 540 UV rows = 1620 total rows
267    /// let tensor = Tensor::new(&[1620, 1920], None, None)?;
268    /// let img = TensorImage::from_tensor(tensor, NV12)?;
269    /// assert_eq!(img.width(), 1920);
270    /// assert_eq!(img.height(), 1080);
271    /// # Ok(())
272    /// # }
273    /// ```
274    pub fn from_tensor(tensor: Tensor<u8>, fourcc: FourCharCode) -> Result<Self> {
275        let shape = tensor.shape();
276        let is_planar = fourcc_planar(fourcc)?;
277
278        // NV12/NV16 use 2D shape [H*3/2, W] or [H*2, W] respectively
279        if fourcc == NV12 || fourcc == NV16 {
280            if shape.len() != 2 {
281                return Err(Error::InvalidShape(format!(
282                    "Semi-planar format {} requires 2D tensor, got {}: {:?}",
283                    fourcc.to_string(),
284                    shape.len(),
285                    shape
286                )));
287            }
288            return Ok(Self {
289                tensor,
290                fourcc,
291                is_planar,
292            });
293        }
294
295        // All other formats use 3D shape
296        if shape.len() != 3 {
297            return Err(Error::InvalidShape(format!(
298                "Tensor shape must have 3 dimensions, got {}: {:?}",
299                shape.len(),
300                shape
301            )));
302        }
303        let channels = if is_planar { shape[0] } else { shape[2] };
304
305        if fourcc_channels(fourcc)? != channels {
306            return Err(Error::InvalidShape(format!(
307                "Invalid tensor shape {:?} for format {}",
308                shape,
309                fourcc.to_string()
310            )));
311        }
312
313        Ok(Self {
314            tensor,
315            fourcc,
316            is_planar,
317        })
318    }
319
320    /// Loads an image from the given byte slice, attempting to decode it as
321    /// JPEG or PNG format. Exif orientation is supported. The default format is
322    /// RGB.
323    ///
324    /// # Examples
325    /// ```rust
326    /// use edgefirst_image::{RGBA, TensorImage};
327    /// use edgefirst_tensor::TensorMemory;
328    /// # fn main() -> Result<(), edgefirst_image::Error> {
329    /// let jpeg_bytes = include_bytes!("../../../testdata/zidane.png");
330    /// let img = TensorImage::load(jpeg_bytes, Some(RGBA), Some(TensorMemory::Mem))?;
331    /// assert_eq!(img.width(), 1280);
332    /// assert_eq!(img.height(), 720);
333    /// assert_eq!(img.fourcc(), RGBA);
334    /// # Ok(())
335    /// # }
336    /// ```
337    pub fn load(
338        image: &[u8],
339        format: Option<FourCharCode>,
340        memory: Option<TensorMemory>,
341    ) -> Result<Self> {
342        if let Ok(i) = Self::load_jpeg(image, format, memory) {
343            return Ok(i);
344        }
345        if let Ok(i) = Self::load_png(image, format, memory) {
346            return Ok(i);
347        }
348
349        Err(Error::NotSupported(
350            "Could not decode as jpeg or png".to_string(),
351        ))
352    }
353
354    /// Loads a JPEG image from the given byte slice. Supports EXIF orientation.
355    /// The default format is RGB.
356    ///
357    /// # Examples
358    /// ```rust
359    /// use edgefirst_image::{RGB, TensorImage};
360    /// use edgefirst_tensor::TensorMemory;
361    /// # fn main() -> Result<(), edgefirst_image::Error> {
362    /// let jpeg_bytes = include_bytes!("../../../testdata/zidane.jpg");
363    /// let img = TensorImage::load_jpeg(jpeg_bytes, Some(RGB), Some(TensorMemory::Mem))?;
364    /// assert_eq!(img.width(), 1280);
365    /// assert_eq!(img.height(), 720);
366    /// assert_eq!(img.fourcc(), RGB);
367    /// # Ok(())
368    /// # }
369    /// ```
370    pub fn load_jpeg(
371        image: &[u8],
372        format: Option<FourCharCode>,
373        memory: Option<TensorMemory>,
374    ) -> Result<Self> {
375        let colour = match format {
376            Some(RGB) => ColorSpace::RGB,
377            Some(RGBA) => ColorSpace::RGBA,
378            Some(GREY) => ColorSpace::Luma,
379            None => ColorSpace::RGB,
380            Some(f) => {
381                return Err(Error::NotSupported(format!(
382                    "Unsupported image format {}",
383                    f.display()
384                )));
385            }
386        };
387        let options = DecoderOptions::default().jpeg_set_out_colorspace(colour);
388        let mut decoder = JpegDecoder::new_with_options(image, options);
389        decoder.decode_headers()?;
390
391        let image_info = decoder.info().ok_or(Error::Internal(
392            "JPEG did not return decoded image info".to_string(),
393        ))?;
394
395        let converted_color_space = decoder
396            .get_output_colorspace()
397            .ok_or(Error::Internal("No output colorspace".to_string()))?;
398
399        let converted_color_space = match converted_color_space {
400            ColorSpace::RGB => RGB,
401            ColorSpace::RGBA => RGBA,
402            ColorSpace::Luma => GREY,
403            _ => {
404                return Err(Error::NotSupported(
405                    "Unsupported JPEG decoder output".to_string(),
406                ));
407            }
408        };
409
410        let dest_format = format.unwrap_or(converted_color_space);
411
412        let (rotation, flip) = decoder
413            .exif()
414            .map(|x| Self::read_exif_orientation(x))
415            .unwrap_or((Rotation::None, Flip::None));
416
417        if (rotation, flip) == (Rotation::None, Flip::None) {
418            let mut img = Self::new(
419                image_info.width as usize,
420                image_info.height as usize,
421                dest_format,
422                memory,
423            )?;
424
425            if converted_color_space != dest_format {
426                let tmp = Self::new(
427                    image_info.width as usize,
428                    image_info.height as usize,
429                    converted_color_space,
430                    Some(TensorMemory::Mem),
431                )?;
432
433                decoder.decode_into(&mut tmp.tensor.map()?)?;
434
435                CPUProcessor::convert_format(&tmp, &mut img)?;
436                return Ok(img);
437            }
438            decoder.decode_into(&mut img.tensor.map()?)?;
439            return Ok(img);
440        }
441
442        let mut tmp = Self::new(
443            image_info.width as usize,
444            image_info.height as usize,
445            dest_format,
446            Some(TensorMemory::Mem),
447        )?;
448
449        if converted_color_space != dest_format {
450            let tmp2 = Self::new(
451                image_info.width as usize,
452                image_info.height as usize,
453                converted_color_space,
454                Some(TensorMemory::Mem),
455            )?;
456
457            decoder.decode_into(&mut tmp2.tensor.map()?)?;
458
459            CPUProcessor::convert_format(&tmp2, &mut tmp)?;
460        } else {
461            decoder.decode_into(&mut tmp.tensor.map()?)?;
462        }
463
464        rotate_flip_to_tensor_image(&tmp, rotation, flip, memory)
465    }
466
467    /// Loads a PNG image from the given byte slice. Supports EXIF orientation.
468    /// The default format is RGB.
469    ///
470    /// # Examples
471    /// ```rust
472    /// use edgefirst_image::{RGB, TensorImage};
473    /// use edgefirst_tensor::TensorMemory;
474    /// # fn main() -> Result<(), edgefirst_image::Error> {
475    /// let png_bytes = include_bytes!("../../../testdata/zidane.png");
476    /// let img = TensorImage::load_png(png_bytes, Some(RGB), Some(TensorMemory::Mem))?;
477    /// assert_eq!(img.width(), 1280);
478    /// assert_eq!(img.height(), 720);
479    /// assert_eq!(img.fourcc(), RGB);
480    /// # Ok(())
481    /// # }
482    /// ```
483    pub fn load_png(
484        image: &[u8],
485        format: Option<FourCharCode>,
486        memory: Option<TensorMemory>,
487    ) -> Result<Self> {
488        let format = format.unwrap_or(RGB);
489        let alpha = match format {
490            RGB => false,
491            RGBA => true,
492            _ => {
493                return Err(Error::NotImplemented(
494                    "Unsupported image format".to_string(),
495                ));
496            }
497        };
498
499        let options = DecoderOptions::default()
500            .png_set_add_alpha_channel(alpha)
501            .png_set_decode_animated(false);
502        let mut decoder = PngDecoder::new_with_options(image, options);
503        decoder.decode_headers()?;
504        let image_info = decoder.get_info().ok_or(Error::Internal(
505            "PNG did not return decoded image info".to_string(),
506        ))?;
507
508        let (rotation, flip) = image_info
509            .exif
510            .as_ref()
511            .map(|x| Self::read_exif_orientation(x))
512            .unwrap_or((Rotation::None, Flip::None));
513
514        if (rotation, flip) == (Rotation::None, Flip::None) {
515            let img = Self::new(image_info.width, image_info.height, format, memory)?;
516            decoder.decode_into(&mut img.tensor.map()?)?;
517            return Ok(img);
518        }
519
520        let tmp = Self::new(
521            image_info.width,
522            image_info.height,
523            format,
524            Some(TensorMemory::Mem),
525        )?;
526        decoder.decode_into(&mut tmp.tensor.map()?)?;
527
528        rotate_flip_to_tensor_image(&tmp, rotation, flip, memory)
529    }
530
531    fn read_exif_orientation(exif_: &[u8]) -> (Rotation, Flip) {
532        let exifreader = exif::Reader::new();
533        let Ok(exif_) = exifreader.read_raw(exif_.to_vec()) else {
534            return (Rotation::None, Flip::None);
535        };
536        let Some(orientation) = exif_.get_field(exif::Tag::Orientation, exif::In::PRIMARY) else {
537            return (Rotation::None, Flip::None);
538        };
539        match orientation.value.get_uint(0) {
540            Some(1) => (Rotation::None, Flip::None),
541            Some(2) => (Rotation::None, Flip::Horizontal),
542            Some(3) => (Rotation::Rotate180, Flip::None),
543            Some(4) => (Rotation::Rotate180, Flip::Horizontal),
544            Some(5) => (Rotation::Clockwise90, Flip::Horizontal),
545            Some(6) => (Rotation::Clockwise90, Flip::None),
546            Some(7) => (Rotation::CounterClockwise90, Flip::Horizontal),
547            Some(8) => (Rotation::CounterClockwise90, Flip::None),
548            Some(v) => {
549                log::warn!("broken orientation EXIF value: {v}");
550                (Rotation::None, Flip::None)
551            }
552            None => (Rotation::None, Flip::None),
553        }
554    }
555
556    /// Saves the image as a JPEG file at the specified path with the given
557    /// quality. Only RGB and RGBA formats are supported.
558    ///
559    /// # Examples
560    /// ```rust
561    /// use edgefirst_image::{RGB, TensorImage};
562    /// use edgefirst_tensor::Tensor;
563    ///  # fn main() -> Result<(), edgefirst_image::Error> {
564    /// let tensor = Tensor::new(&[720, 1280, 3], None, None)?;
565    /// let img = TensorImage::from_tensor(tensor, RGB)?;
566    /// let save_path = "/tmp/output.jpg";
567    /// img.save_jpeg(save_path, 90)?;
568    /// # Ok(())
569    /// # }
570    pub fn save_jpeg(&self, path: &str, quality: u8) -> Result<()> {
571        if self.is_planar {
572            return Err(Error::NotImplemented(
573                "Saving planar images is not supported".to_string(),
574            ));
575        }
576
577        let colour = if self.fourcc == RGB {
578            jpeg_encoder::ColorType::Rgb
579        } else if self.fourcc == RGBA {
580            jpeg_encoder::ColorType::Rgba
581        } else {
582            return Err(Error::NotImplemented(
583                "Unsupported image format for saving".to_string(),
584            ));
585        };
586
587        let encoder = jpeg_encoder::Encoder::new_file(path, quality)?;
588        let tensor_map = self.tensor.map()?;
589
590        encoder.encode(
591            &tensor_map,
592            self.width() as u16,
593            self.height() as u16,
594            colour,
595        )?;
596
597        Ok(())
598    }
599
600    /// Returns a reference to the underlying tensor.
601    ///
602    /// # Examples
603    /// ```rust
604    /// use edgefirst_image::{RGB, TensorImage};
605    /// use edgefirst_tensor::{Tensor, TensorTrait};
606    ///  # fn main() -> Result<(), edgefirst_image::Error> {
607    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
608    /// let img = TensorImage::from_tensor(tensor, RGB)?;
609    /// let underlying_tensor = img.tensor();
610    /// assert_eq!(underlying_tensor.name(), "Tensor");
611    /// # Ok(())
612    /// # }
613    pub fn tensor(&self) -> &Tensor<u8> {
614        &self.tensor
615    }
616
617    /// Returns the FourCC code representing the image format.
618    ///
619    /// # Examples
620    /// ```rust
621    /// use edgefirst_image::{RGB, TensorImage};
622    /// use edgefirst_tensor::{Tensor, TensorTrait};
623    ///  # fn main() -> Result<(), edgefirst_image::Error> {
624    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
625    /// let img = TensorImage::from_tensor(tensor, RGB)?;
626    /// assert_eq!(img.fourcc(), RGB);
627    /// # Ok(())
628    /// # }
629    pub fn fourcc(&self) -> FourCharCode {
630        self.fourcc
631    }
632
633    /// # Examples
634    /// ```rust
635    /// use edgefirst_image::{RGB, TensorImage};
636    /// use edgefirst_tensor::{Tensor, TensorTrait};
637    ///  # fn main() -> Result<(), edgefirst_image::Error> {
638    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
639    /// let img = TensorImage::from_tensor(tensor, RGB)?;
640    /// assert!(!img.is_planar());
641    /// # Ok(())
642    /// # }
643    pub fn is_planar(&self) -> bool {
644        self.is_planar
645    }
646
647    /// # Examples
648    /// ```rust
649    /// use edgefirst_image::{RGB, TensorImage};
650    /// use edgefirst_tensor::{Tensor, TensorTrait};
651    ///  # fn main() -> Result<(), edgefirst_image::Error> {
652    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
653    /// let img = TensorImage::from_tensor(tensor, RGB)?;
654    /// assert_eq!(img.width(), 1280);
655    /// # Ok(())
656    /// # }
657    pub fn width(&self) -> usize {
658        // NV12/NV16 use 2D shape [H*k, W]
659        if self.fourcc == NV12 || self.fourcc == NV16 {
660            return self.tensor.shape()[1];
661        }
662        match self.is_planar {
663            true => self.tensor.shape()[2],
664            false => self.tensor.shape()[1],
665        }
666    }
667
668    /// # Examples
669    /// ```rust
670    /// use edgefirst_image::{RGB, TensorImage};
671    /// use edgefirst_tensor::{Tensor, TensorTrait};
672    ///  # fn main() -> Result<(), edgefirst_image::Error> {
673    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
674    /// let img = TensorImage::from_tensor(tensor, RGB)?;
675    /// assert_eq!(img.height(), 720);
676    /// # Ok(())
677    /// # }
678    pub fn height(&self) -> usize {
679        // NV12 uses shape [H*3/2, W], so height = shape[0] * 2 / 3
680        if self.fourcc == NV12 {
681            return self.tensor.shape()[0] * 2 / 3;
682        }
683        // NV16 uses shape [H*2, W], so height = shape[0] / 2
684        if self.fourcc == NV16 {
685            return self.tensor.shape()[0] / 2;
686        }
687        match self.is_planar {
688            true => self.tensor.shape()[1],
689            false => self.tensor.shape()[0],
690        }
691    }
692
693    /// # Examples
694    /// ```rust
695    /// use edgefirst_image::{RGB, TensorImage};
696    /// use edgefirst_tensor::{Tensor, TensorTrait};
697    ///  # fn main() -> Result<(), edgefirst_image::Error> {
698    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
699    /// let img = TensorImage::from_tensor(tensor, RGB)?;
700    /// assert_eq!(img.channels(), 3);
701    /// # Ok(())
702    /// # }
703    pub fn channels(&self) -> usize {
704        // NV12/NV16 use 2D shape, conceptually have 2 components (Y + interleaved UV)
705        if self.fourcc == NV12 || self.fourcc == NV16 {
706            return 2;
707        }
708        match self.is_planar {
709            true => self.tensor.shape()[0],
710            false => self.tensor.shape()[2],
711        }
712    }
713
714    /// # Examples
715    /// ```rust
716    /// use edgefirst_image::{RGB, TensorImage};
717    /// use edgefirst_tensor::{Tensor, TensorTrait};
718    ///  # fn main() -> Result<(), edgefirst_image::Error> {
719    /// let tensor = Tensor::new(&[720, 1280, 3], None, Some("Tensor"))?;
720    /// let img = TensorImage::from_tensor(tensor, RGB)?;
721    /// assert_eq!(img.row_stride(), 1280*3);
722    /// # Ok(())
723    /// # }
724    pub fn row_stride(&self) -> usize {
725        match self.is_planar {
726            true => self.width(),
727            false => self.width() * self.channels(),
728        }
729    }
730}
731
732/// Trait for types that can be used as destination images for conversion.
733///
734/// This trait abstracts over the difference between owned (`TensorImage`) and
735/// borrowed (`TensorImageRef`) image buffers, enabling the same conversion code
736/// to work with both.
737pub trait TensorImageDst {
738    /// Returns a reference to the underlying tensor.
739    fn tensor(&self) -> &Tensor<u8>;
740    /// Returns a mutable reference to the underlying tensor.
741    fn tensor_mut(&mut self) -> &mut Tensor<u8>;
742    /// Returns the FourCC code representing the image format.
743    fn fourcc(&self) -> FourCharCode;
744    /// Returns whether the image is in planar format.
745    fn is_planar(&self) -> bool;
746    /// Returns the width of the image in pixels.
747    fn width(&self) -> usize;
748    /// Returns the height of the image in pixels.
749    fn height(&self) -> usize;
750    /// Returns the number of channels in the image.
751    fn channels(&self) -> usize;
752    /// Returns the row stride in bytes.
753    fn row_stride(&self) -> usize;
754}
755
756impl TensorImageDst for TensorImage {
757    fn tensor(&self) -> &Tensor<u8> {
758        &self.tensor
759    }
760
761    fn tensor_mut(&mut self) -> &mut Tensor<u8> {
762        &mut self.tensor
763    }
764
765    fn fourcc(&self) -> FourCharCode {
766        self.fourcc
767    }
768
769    fn is_planar(&self) -> bool {
770        self.is_planar
771    }
772
773    fn width(&self) -> usize {
774        TensorImage::width(self)
775    }
776
777    fn height(&self) -> usize {
778        TensorImage::height(self)
779    }
780
781    fn channels(&self) -> usize {
782        TensorImage::channels(self)
783    }
784
785    fn row_stride(&self) -> usize {
786        TensorImage::row_stride(self)
787    }
788}
789
790/// A borrowed view of an image tensor for zero-copy preprocessing.
791///
792/// `TensorImageRef` wraps a borrowed `&mut Tensor<u8>` instead of owning it,
793/// enabling zero-copy operations where the HAL writes directly into an external
794/// tensor (e.g., a model's pre-allocated input buffer).
795///
796/// # Examples
797/// ```rust,ignore
798/// // Create a borrowed tensor image wrapping the model's input tensor
799/// let mut dst = TensorImageRef::from_borrowed_tensor(
800///     model.input_tensor(0),
801///     PLANAR_RGB,
802/// )?;
803///
804/// // Preprocess directly into the model's input buffer
805/// processor.convert(&src_image, &mut dst, Rotation::None, Flip::None, Crop::default())?;
806///
807/// // Run inference - no copy needed!
808/// model.run()?;
809/// ```
810#[derive(Debug)]
811pub struct TensorImageRef<'a> {
812    pub(crate) tensor: &'a mut Tensor<u8>,
813    fourcc: FourCharCode,
814    is_planar: bool,
815}
816
817impl<'a> TensorImageRef<'a> {
818    /// Creates a `TensorImageRef` from a borrowed tensor reference.
819    ///
820    /// The tensor shape must match the expected format:
821    /// - For planar formats (e.g., PLANAR_RGB): shape is `[channels, height,
822    ///   width]`
823    /// - For interleaved formats (e.g., RGB, RGBA): shape is `[height, width,
824    ///   channels]`
825    ///
826    /// # Arguments
827    /// * `tensor` - A mutable reference to the tensor to wrap
828    /// * `fourcc` - The pixel format of the image
829    ///
830    /// # Returns
831    /// A `Result` containing the `TensorImageRef` or an error if the tensor
832    /// shape doesn't match the expected format.
833    pub fn from_borrowed_tensor(tensor: &'a mut Tensor<u8>, fourcc: FourCharCode) -> Result<Self> {
834        let shape = tensor.shape();
835        let is_planar = fourcc_planar(fourcc)?;
836
837        // NV12/NV16 use 2D shape [H*3/2, W] or [H*2, W] respectively
838        if fourcc == NV12 || fourcc == NV16 {
839            if shape.len() != 2 {
840                return Err(Error::InvalidShape(format!(
841                    "Semi-planar format {} requires 2D tensor, got {}: {:?}",
842                    fourcc.to_string(),
843                    shape.len(),
844                    shape
845                )));
846            }
847            return Ok(Self {
848                tensor,
849                fourcc,
850                is_planar,
851            });
852        }
853
854        // All other formats use 3D shape
855        if shape.len() != 3 {
856            return Err(Error::InvalidShape(format!(
857                "Tensor shape must have 3 dimensions, got {}: {:?}",
858                shape.len(),
859                shape
860            )));
861        }
862        let channels = if is_planar { shape[0] } else { shape[2] };
863
864        if fourcc_channels(fourcc)? != channels {
865            return Err(Error::InvalidShape(format!(
866                "Invalid tensor shape {:?} for format {}",
867                shape,
868                fourcc.to_string()
869            )));
870        }
871
872        Ok(Self {
873            tensor,
874            fourcc,
875            is_planar,
876        })
877    }
878
879    /// Returns a reference to the underlying tensor.
880    pub fn tensor(&self) -> &Tensor<u8> {
881        self.tensor
882    }
883
884    /// Returns the FourCC code representing the image format.
885    pub fn fourcc(&self) -> FourCharCode {
886        self.fourcc
887    }
888
889    /// Returns whether the image is in planar format.
890    pub fn is_planar(&self) -> bool {
891        self.is_planar
892    }
893
894    /// Returns the width of the image in pixels.
895    pub fn width(&self) -> usize {
896        match self.is_planar {
897            true => self.tensor.shape()[2],
898            false => self.tensor.shape()[1],
899        }
900    }
901
902    /// Returns the height of the image in pixels.
903    pub fn height(&self) -> usize {
904        match self.is_planar {
905            true => self.tensor.shape()[1],
906            false => self.tensor.shape()[0],
907        }
908    }
909
910    /// Returns the number of channels in the image.
911    pub fn channels(&self) -> usize {
912        match self.is_planar {
913            true => self.tensor.shape()[0],
914            false => self.tensor.shape()[2],
915        }
916    }
917
918    /// Returns the row stride in bytes.
919    pub fn row_stride(&self) -> usize {
920        match self.is_planar {
921            true => self.width(),
922            false => self.width() * self.channels(),
923        }
924    }
925}
926
927impl TensorImageDst for TensorImageRef<'_> {
928    fn tensor(&self) -> &Tensor<u8> {
929        self.tensor
930    }
931
932    fn tensor_mut(&mut self) -> &mut Tensor<u8> {
933        self.tensor
934    }
935
936    fn fourcc(&self) -> FourCharCode {
937        self.fourcc
938    }
939
940    fn is_planar(&self) -> bool {
941        self.is_planar
942    }
943
944    fn width(&self) -> usize {
945        TensorImageRef::width(self)
946    }
947
948    fn height(&self) -> usize {
949        TensorImageRef::height(self)
950    }
951
952    fn channels(&self) -> usize {
953        TensorImageRef::channels(self)
954    }
955
956    fn row_stride(&self) -> usize {
957        TensorImageRef::row_stride(self)
958    }
959}
960
961/// Flips the image, and the rotates it.
962fn rotate_flip_to_tensor_image(
963    src: &TensorImage,
964    rotation: Rotation,
965    flip: Flip,
966    memory: Option<TensorMemory>,
967) -> Result<TensorImage, Error> {
968    let src_map = src.tensor.map()?;
969    let dst = match rotation {
970        Rotation::None | Rotation::Rotate180 => {
971            TensorImage::new(src.width(), src.height(), src.fourcc(), memory)?
972        }
973        Rotation::Clockwise90 | Rotation::CounterClockwise90 => {
974            TensorImage::new(src.height(), src.width(), src.fourcc(), memory)?
975        }
976    };
977
978    let mut dst_map = dst.tensor.map()?;
979
980    CPUProcessor::flip_rotate_ndarray(&src_map, &mut dst_map, &dst, rotation, flip)?;
981
982    Ok(dst)
983}
984
985#[derive(Debug, Clone, Copy, PartialEq, Eq)]
986pub enum Rotation {
987    None = 0,
988    Clockwise90 = 1,
989    Rotate180 = 2,
990    CounterClockwise90 = 3,
991}
992impl Rotation {
993    /// Creates a Rotation enum from an angle in degrees. The angle must be a
994    /// multiple of 90.
995    ///
996    /// # Panics
997    /// Panics if the angle is not a multiple of 90.
998    ///
999    /// # Examples
1000    /// ```rust
1001    /// # use edgefirst_image::Rotation;
1002    /// let rotation = Rotation::from_degrees_clockwise(270);
1003    /// assert_eq!(rotation, Rotation::CounterClockwise90);
1004    /// ```
1005    pub fn from_degrees_clockwise(angle: usize) -> Rotation {
1006        match angle.rem_euclid(360) {
1007            0 => Rotation::None,
1008            90 => Rotation::Clockwise90,
1009            180 => Rotation::Rotate180,
1010            270 => Rotation::CounterClockwise90,
1011            _ => panic!("rotation angle is not a multiple of 90"),
1012        }
1013    }
1014}
1015
1016#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1017pub enum Flip {
1018    None = 0,
1019    Vertical = 1,
1020    Horizontal = 2,
1021}
1022
1023#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1024pub struct Crop {
1025    pub src_rect: Option<Rect>,
1026    pub dst_rect: Option<Rect>,
1027    pub dst_color: Option<[u8; 4]>,
1028}
1029
1030impl Default for Crop {
1031    fn default() -> Self {
1032        Crop::new()
1033    }
1034}
1035impl Crop {
1036    // Creates a new Crop with default values (no cropping).
1037    pub fn new() -> Self {
1038        Crop {
1039            src_rect: None,
1040            dst_rect: None,
1041            dst_color: None,
1042        }
1043    }
1044
1045    // Sets the source rectangle for cropping.
1046    pub fn with_src_rect(mut self, src_rect: Option<Rect>) -> Self {
1047        self.src_rect = src_rect;
1048        self
1049    }
1050
1051    // Sets the destination rectangle for cropping.
1052    pub fn with_dst_rect(mut self, dst_rect: Option<Rect>) -> Self {
1053        self.dst_rect = dst_rect;
1054        self
1055    }
1056
1057    // Sets the destination color for areas outside the cropped region.
1058    pub fn with_dst_color(mut self, dst_color: Option<[u8; 4]>) -> Self {
1059        self.dst_color = dst_color;
1060        self
1061    }
1062
1063    // Creates a new Crop with no cropping.
1064    pub fn no_crop() -> Self {
1065        Crop::new()
1066    }
1067
1068    // Checks if the crop rectangles are valid for the given source and
1069    // destination images.
1070    pub fn check_crop(&self, src: &TensorImage, dst: &TensorImage) -> Result<(), Error> {
1071        let src = self.src_rect.is_none_or(|x| x.check_rect(src));
1072        let dst = self.dst_rect.is_none_or(|x| x.check_rect(dst));
1073        match (src, dst) {
1074            (true, true) => Ok(()),
1075            (true, false) => Err(Error::CropInvalid(format!(
1076                "Dest crop invalid: {:?}",
1077                self.dst_rect
1078            ))),
1079            (false, true) => Err(Error::CropInvalid(format!(
1080                "Src crop invalid: {:?}",
1081                self.src_rect
1082            ))),
1083            (false, false) => Err(Error::CropInvalid(format!(
1084                "Dest and Src crop invalid: {:?} {:?}",
1085                self.dst_rect, self.src_rect
1086            ))),
1087        }
1088    }
1089
1090    // Checks if the crop rectangles are valid for the given source and
1091    // destination images (using TensorImageRef for destination).
1092    pub fn check_crop_ref(&self, src: &TensorImage, dst: &TensorImageRef<'_>) -> Result<(), Error> {
1093        let src = self.src_rect.is_none_or(|x| x.check_rect(src));
1094        let dst = self.dst_rect.is_none_or(|x| x.check_rect_dst(dst));
1095        match (src, dst) {
1096            (true, true) => Ok(()),
1097            (true, false) => Err(Error::CropInvalid(format!(
1098                "Dest crop invalid: {:?}",
1099                self.dst_rect
1100            ))),
1101            (false, true) => Err(Error::CropInvalid(format!(
1102                "Src crop invalid: {:?}",
1103                self.src_rect
1104            ))),
1105            (false, false) => Err(Error::CropInvalid(format!(
1106                "Dest and Src crop invalid: {:?} {:?}",
1107                self.dst_rect, self.src_rect
1108            ))),
1109        }
1110    }
1111}
1112
1113#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1114pub struct Rect {
1115    pub left: usize,
1116    pub top: usize,
1117    pub width: usize,
1118    pub height: usize,
1119}
1120
1121impl Rect {
1122    // Creates a new Rect with the specified left, top, width, and height.
1123    pub fn new(left: usize, top: usize, width: usize, height: usize) -> Self {
1124        Self {
1125            left,
1126            top,
1127            width,
1128            height,
1129        }
1130    }
1131
1132    // Checks if the rectangle is valid for the given image.
1133    pub fn check_rect(&self, image: &TensorImage) -> bool {
1134        self.left + self.width <= image.width() && self.top + self.height <= image.height()
1135    }
1136
1137    // Checks if the rectangle is valid for the given destination image.
1138    pub fn check_rect_dst<D: TensorImageDst>(&self, image: &D) -> bool {
1139        self.left + self.width <= image.width() && self.top + self.height <= image.height()
1140    }
1141}
1142
1143#[enum_dispatch(ImageProcessor)]
1144pub trait ImageProcessorTrait {
1145    /// Converts the source image to the destination image format and size. The
1146    /// image is cropped first, then flipped, then rotated
1147    ///
1148    /// # Arguments
1149    ///
1150    /// * `dst` - The destination image to be converted to.
1151    /// * `src` - The source image to convert from.
1152    /// * `rotation` - The rotation to apply to the destination image.
1153    /// * `flip` - Flips the image
1154    /// * `crop` - An optional rectangle specifying the area to crop from the
1155    ///   source image
1156    ///
1157    /// # Returns
1158    ///
1159    /// A `Result` indicating success or failure of the conversion.
1160    fn convert(
1161        &mut self,
1162        src: &TensorImage,
1163        dst: &mut TensorImage,
1164        rotation: Rotation,
1165        flip: Flip,
1166        crop: Crop,
1167    ) -> Result<()>;
1168
1169    /// Converts the source image to a borrowed destination tensor for zero-copy
1170    /// preprocessing.
1171    ///
1172    /// This variant accepts a `TensorImageRef` as the destination, enabling
1173    /// direct writes into external buffers (e.g., model input tensors) without
1174    /// intermediate copies.
1175    ///
1176    /// # Arguments
1177    ///
1178    /// * `src` - The source image to convert from.
1179    /// * `dst` - A borrowed tensor image wrapping the destination buffer.
1180    /// * `rotation` - The rotation to apply to the destination image.
1181    /// * `flip` - Flips the image
1182    /// * `crop` - An optional rectangle specifying the area to crop from the
1183    ///   source image
1184    ///
1185    /// # Returns
1186    ///
1187    /// A `Result` indicating success or failure of the conversion.
1188    fn convert_ref(
1189        &mut self,
1190        src: &TensorImage,
1191        dst: &mut TensorImageRef<'_>,
1192        rotation: Rotation,
1193        flip: Flip,
1194        crop: Crop,
1195    ) -> Result<()>;
1196
1197    #[cfg(feature = "decoder")]
1198    fn render_to_image(
1199        &mut self,
1200        dst: &mut TensorImage,
1201        detect: &[DetectBox],
1202        segmentation: &[Segmentation],
1203    ) -> Result<()>;
1204
1205    #[cfg(feature = "decoder")]
1206    /// Renders detection boxes and segmentation masks from raw prototype data.
1207    ///
1208    /// For YOLO segmentation models, this avoids materializing intermediate
1209    /// `Array3<u8>` masks. The `ProtoData` contains mask coefficients and the
1210    /// prototype tensor; the renderer computes `mask_coeff @ protos` directly.
1211    ///
1212    /// Phase 1 implementation materializes masks internally and delegates to
1213    /// existing render paths. Phase 2 will compute masks in the GPU shader.
1214    fn render_from_protos(
1215        &mut self,
1216        dst: &mut TensorImage,
1217        detect: &[DetectBox],
1218        proto_data: &ProtoData,
1219    ) -> Result<()>;
1220
1221    #[cfg(feature = "decoder")]
1222    /// Renders per-instance grayscale masks from raw prototype data at full
1223    /// output resolution.
1224    ///
1225    /// Each mask is rendered at the detection's bounding-box region using
1226    /// `sigmoid(mask_coeff @ protos)` without thresholding, producing
1227    /// continuous [0,255] values suitable for soft IoU computation.
1228    ///
1229    /// Returns one [`MaskResult`] per detection with the bbox-cropped pixels.
1230    fn render_masks_from_protos(
1231        &mut self,
1232        detect: &[DetectBox],
1233        proto_data: ProtoData,
1234        output_width: usize,
1235        output_height: usize,
1236    ) -> Result<Vec<MaskResult>>;
1237
1238    #[cfg(feature = "decoder")]
1239    /// Sets the colors used for rendering segmentation masks. Up to 17 colors
1240    /// can be set.
1241    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()>;
1242}
1243
1244/// Configuration for [`ImageProcessor`] construction.
1245///
1246/// Use with [`ImageProcessor::with_config`] to override the default EGL
1247/// display auto-detection. The default configuration (all fields `None`)
1248/// preserves the existing auto-detection behaviour.
1249#[derive(Debug, Clone, Default)]
1250pub struct ImageProcessorConfig {
1251    /// Force OpenGL to use this EGL display type instead of auto-detecting.
1252    ///
1253    /// When `None`, the processor probes displays in priority order: GBM,
1254    /// PlatformDevice, Default. Use [`probe_egl_displays`] to discover
1255    /// which displays are available on the current system.
1256    ///
1257    /// Ignored when `EDGEFIRST_DISABLE_GL=1` is set.
1258    #[cfg(target_os = "linux")]
1259    #[cfg(feature = "opengl")]
1260    pub egl_display: Option<EglDisplayKind>,
1261}
1262
1263/// Image converter that uses available hardware acceleration or CPU as a
1264/// fallback.
1265#[derive(Debug)]
1266pub struct ImageProcessor {
1267    /// CPU-based image converter as a fallback. This is only None if the
1268    /// EDGEFIRST_DISABLE_CPU environment variable is set.
1269    pub cpu: Option<CPUProcessor>,
1270
1271    #[cfg(target_os = "linux")]
1272    /// G2D-based image converter for Linux systems. This is only available if
1273    /// the EDGEFIRST_DISABLE_G2D environment variable is not set and libg2d.so
1274    /// is available.
1275    pub g2d: Option<G2DProcessor>,
1276    #[cfg(target_os = "linux")]
1277    #[cfg(feature = "opengl")]
1278    /// OpenGL-based image converter for Linux systems. This is only available
1279    /// if the EDGEFIRST_DISABLE_GL environment variable is not set and OpenGL
1280    /// ES is available.
1281    pub opengl: Option<GLProcessorThreaded>,
1282}
1283
1284unsafe impl Send for ImageProcessor {}
1285unsafe impl Sync for ImageProcessor {}
1286
1287impl ImageProcessor {
1288    /// Creates a new `ImageProcessor` instance, initializing available
1289    /// hardware converters based on the system capabilities and environment
1290    /// variables.
1291    ///
1292    /// # Examples
1293    /// ```rust
1294    /// # use edgefirst_image::{ImageProcessor, TensorImage, RGBA, RGB, Rotation, Flip, Crop, ImageProcessorTrait};
1295    /// # fn main() -> Result<(), edgefirst_image::Error> {
1296    /// let image = include_bytes!("../../../testdata/zidane.jpg");
1297    /// let img = TensorImage::load(image, Some(RGBA), None)?;
1298    /// let mut converter = ImageProcessor::new()?;
1299    /// let mut dst = TensorImage::new(640, 480, RGB, None)?;
1300    /// converter.convert(&img, &mut dst, Rotation::None, Flip::None, Crop::default())?;
1301    /// # Ok(())
1302    /// # }
1303    pub fn new() -> Result<Self> {
1304        Self::with_config(ImageProcessorConfig::default())
1305    }
1306
1307    /// Creates a new `ImageProcessor` with the given configuration.
1308    ///
1309    /// This allows overriding the EGL display type used for OpenGL
1310    /// acceleration. The `EDGEFIRST_DISABLE_GL=1` environment variable
1311    /// still takes precedence over any override.
1312    #[allow(unused_variables)]
1313    pub fn with_config(config: ImageProcessorConfig) -> Result<Self> {
1314        #[cfg(target_os = "linux")]
1315        let g2d = if std::env::var("EDGEFIRST_DISABLE_G2D")
1316            .map(|x| x != "0" && x.to_lowercase() != "false")
1317            .unwrap_or(false)
1318        {
1319            log::debug!("EDGEFIRST_DISABLE_G2D is set");
1320            None
1321        } else {
1322            match G2DProcessor::new() {
1323                Ok(g2d_converter) => Some(g2d_converter),
1324                Err(err) => {
1325                    log::warn!("Failed to initialize G2D converter: {err:?}");
1326                    None
1327                }
1328            }
1329        };
1330
1331        #[cfg(target_os = "linux")]
1332        #[cfg(feature = "opengl")]
1333        let opengl = if std::env::var("EDGEFIRST_DISABLE_GL")
1334            .map(|x| x != "0" && x.to_lowercase() != "false")
1335            .unwrap_or(false)
1336        {
1337            log::debug!("EDGEFIRST_DISABLE_GL is set");
1338            None
1339        } else {
1340            match GLProcessorThreaded::new(config.egl_display) {
1341                Ok(gl_converter) => Some(gl_converter),
1342                Err(err) => {
1343                    log::warn!("Failed to initialize GL converter: {err:?}");
1344                    None
1345                }
1346            }
1347        };
1348
1349        let cpu = if std::env::var("EDGEFIRST_DISABLE_CPU")
1350            .map(|x| x != "0" && x.to_lowercase() != "false")
1351            .unwrap_or(false)
1352        {
1353            log::debug!("EDGEFIRST_DISABLE_CPU is set");
1354            None
1355        } else {
1356            Some(CPUProcessor::new())
1357        };
1358        Ok(Self {
1359            cpu,
1360            #[cfg(target_os = "linux")]
1361            g2d,
1362            #[cfg(target_os = "linux")]
1363            #[cfg(feature = "opengl")]
1364            opengl,
1365        })
1366    }
1367
1368    /// Sets the interpolation mode for int8 proto textures on the OpenGL
1369    /// backend. No-op if OpenGL is not available.
1370    #[cfg(target_os = "linux")]
1371    #[cfg(feature = "opengl")]
1372    #[cfg(feature = "decoder")]
1373    pub fn set_int8_interpolation_mode(&mut self, mode: Int8InterpolationMode) -> Result<()> {
1374        if let Some(ref mut gl) = self.opengl {
1375            gl.set_int8_interpolation_mode(mode)?;
1376        }
1377        Ok(())
1378    }
1379}
1380
1381impl ImageProcessorTrait for ImageProcessor {
1382    /// Converts the source image to the destination image format and size. The
1383    /// image is cropped first, then flipped, then rotated
1384    ///
1385    /// Prefer hardware accelerators when available, falling back to CPU if
1386    /// necessary.
1387    fn convert(
1388        &mut self,
1389        src: &TensorImage,
1390        dst: &mut TensorImage,
1391        rotation: Rotation,
1392        flip: Flip,
1393        crop: Crop,
1394    ) -> Result<()> {
1395        let start = Instant::now();
1396
1397        #[cfg(target_os = "linux")]
1398        if let Some(g2d) = self.g2d.as_mut() {
1399            log::trace!("image started with g2d in {:?}", start.elapsed());
1400            match g2d.convert(src, dst, rotation, flip, crop) {
1401                Ok(_) => {
1402                    log::trace!("image converted with g2d in {:?}", start.elapsed());
1403                    return Ok(());
1404                }
1405                Err(e) => {
1406                    log::trace!("image didn't convert with g2d: {e:?}")
1407                }
1408            }
1409        }
1410
1411        // if the image is just a copy without an resizing, the send it to the CPU and
1412        // skip OpenGL
1413        let src_shape = match crop.src_rect {
1414            Some(s) => (s.width, s.height),
1415            None => (src.width(), src.height()),
1416        };
1417        let dst_shape = match crop.dst_rect {
1418            Some(d) => (d.width, d.height),
1419            None => (dst.width(), dst.height()),
1420        };
1421
1422        // TODO: Check if still use CPU when rotation or flip is enabled
1423        if src_shape == dst_shape && flip == Flip::None && rotation == Rotation::None {
1424            if let Some(cpu) = self.cpu.as_mut() {
1425                match cpu.convert(src, dst, rotation, flip, crop) {
1426                    Ok(_) => {
1427                        log::trace!("image converted with cpu in {:?}", start.elapsed());
1428                        return Ok(());
1429                    }
1430                    Err(e) => {
1431                        log::trace!("image didn't convert with cpu: {e:?}");
1432                        return Err(e);
1433                    }
1434                }
1435            }
1436        }
1437
1438        #[cfg(target_os = "linux")]
1439        #[cfg(feature = "opengl")]
1440        if let Some(opengl) = self.opengl.as_mut() {
1441            log::trace!("image started with opengl in {:?}", start.elapsed());
1442            match opengl.convert(src, dst, rotation, flip, crop) {
1443                Ok(_) => {
1444                    log::trace!("image converted with opengl in {:?}", start.elapsed());
1445                    return Ok(());
1446                }
1447                Err(e) => {
1448                    log::trace!("image didn't convert with opengl: {e:?}")
1449                }
1450            }
1451        }
1452        log::trace!("image started with cpu in {:?}", start.elapsed());
1453        if let Some(cpu) = self.cpu.as_mut() {
1454            match cpu.convert(src, dst, rotation, flip, crop) {
1455                Ok(_) => {
1456                    log::trace!("image converted with cpu in {:?}", start.elapsed());
1457                    return Ok(());
1458                }
1459                Err(e) => {
1460                    log::trace!("image didn't convert with cpu: {e:?}");
1461                    return Err(e);
1462                }
1463            }
1464        }
1465        Err(Error::NoConverter)
1466    }
1467
1468    fn convert_ref(
1469        &mut self,
1470        src: &TensorImage,
1471        dst: &mut TensorImageRef<'_>,
1472        rotation: Rotation,
1473        flip: Flip,
1474        crop: Crop,
1475    ) -> Result<()> {
1476        let start = Instant::now();
1477
1478        // For TensorImageRef, we prefer CPU since hardware accelerators typically
1479        // don't support PLANAR_RGB output which is the common model input format.
1480        // The CPU path uses the generic conversion functions that work with any
1481        // TensorImageDst implementation.
1482        if let Some(cpu) = self.cpu.as_mut() {
1483            match cpu.convert_ref(src, dst, rotation, flip, crop) {
1484                Ok(_) => {
1485                    log::trace!("image converted with cpu (ref) in {:?}", start.elapsed());
1486                    return Ok(());
1487                }
1488                Err(e) => {
1489                    log::trace!("image didn't convert with cpu (ref): {e:?}");
1490                    return Err(e);
1491                }
1492            }
1493        }
1494
1495        Err(Error::NoConverter)
1496    }
1497
1498    #[cfg(feature = "decoder")]
1499    fn render_to_image(
1500        &mut self,
1501        dst: &mut TensorImage,
1502        detect: &[DetectBox],
1503        segmentation: &[Segmentation],
1504    ) -> Result<()> {
1505        let start = Instant::now();
1506
1507        if detect.is_empty() && segmentation.is_empty() {
1508            return Ok(());
1509        }
1510
1511        // skip G2D as it doesn't support rendering to image
1512
1513        #[cfg(target_os = "linux")]
1514        #[cfg(feature = "opengl")]
1515        if let Some(opengl) = self.opengl.as_mut() {
1516            log::trace!("image started with opengl in {:?}", start.elapsed());
1517            match opengl.render_to_image(dst, detect, segmentation) {
1518                Ok(_) => {
1519                    log::trace!("image rendered with opengl in {:?}", start.elapsed());
1520                    return Ok(());
1521                }
1522                Err(e) => {
1523                    log::trace!("image didn't render with opengl: {e:?}")
1524                }
1525            }
1526        }
1527        log::trace!("image started with cpu in {:?}", start.elapsed());
1528        if let Some(cpu) = self.cpu.as_mut() {
1529            match cpu.render_to_image(dst, detect, segmentation) {
1530                Ok(_) => {
1531                    log::trace!("image render with cpu in {:?}", start.elapsed());
1532                    return Ok(());
1533                }
1534                Err(e) => {
1535                    log::trace!("image didn't render with cpu: {e:?}");
1536                    return Err(e);
1537                }
1538            }
1539        }
1540        Err(Error::NoConverter)
1541    }
1542
1543    #[cfg(feature = "decoder")]
1544    fn render_from_protos(
1545        &mut self,
1546        dst: &mut TensorImage,
1547        detect: &[DetectBox],
1548        proto_data: &ProtoData,
1549    ) -> Result<()> {
1550        let start = Instant::now();
1551
1552        if detect.is_empty() {
1553            return Ok(());
1554        }
1555
1556        // skip G2D as it doesn't support rendering to image
1557
1558        #[cfg(target_os = "linux")]
1559        #[cfg(feature = "opengl")]
1560        if let Some(opengl) = self.opengl.as_mut() {
1561            log::trace!(
1562                "render_from_protos started with opengl in {:?}",
1563                start.elapsed()
1564            );
1565            match opengl.render_from_protos(dst, detect, proto_data) {
1566                Ok(_) => {
1567                    log::trace!("render_from_protos with opengl in {:?}", start.elapsed());
1568                    return Ok(());
1569                }
1570                Err(e) => {
1571                    log::trace!("render_from_protos didn't work with opengl: {e:?}")
1572                }
1573            }
1574        }
1575        log::trace!(
1576            "render_from_protos started with cpu in {:?}",
1577            start.elapsed()
1578        );
1579        if let Some(cpu) = self.cpu.as_mut() {
1580            match cpu.render_from_protos(dst, detect, proto_data) {
1581                Ok(_) => {
1582                    log::trace!("render_from_protos with cpu in {:?}", start.elapsed());
1583                    return Ok(());
1584                }
1585                Err(e) => {
1586                    log::trace!("render_from_protos didn't work with cpu: {e:?}");
1587                    return Err(e);
1588                }
1589            }
1590        }
1591        Err(Error::NoConverter)
1592    }
1593
1594    #[cfg(feature = "decoder")]
1595    fn set_class_colors(&mut self, colors: &[[u8; 4]]) -> Result<()> {
1596        let start = Instant::now();
1597
1598        // skip G2D as it doesn't support rendering to image
1599
1600        #[cfg(target_os = "linux")]
1601        #[cfg(feature = "opengl")]
1602        if let Some(opengl) = self.opengl.as_mut() {
1603            log::trace!("image started with opengl in {:?}", start.elapsed());
1604            match opengl.set_class_colors(colors) {
1605                Ok(_) => {
1606                    log::trace!("colors set with opengl in {:?}", start.elapsed());
1607                    return Ok(());
1608                }
1609                Err(e) => {
1610                    log::trace!("colors didn't set with opengl: {e:?}")
1611                }
1612            }
1613        }
1614        log::trace!("image started with cpu in {:?}", start.elapsed());
1615        if let Some(cpu) = self.cpu.as_mut() {
1616            match cpu.set_class_colors(colors) {
1617                Ok(_) => {
1618                    log::trace!("colors set with cpu in {:?}", start.elapsed());
1619                    return Ok(());
1620                }
1621                Err(e) => {
1622                    log::trace!("colors didn't set with cpu: {e:?}");
1623                    return Err(e);
1624                }
1625            }
1626        }
1627        Err(Error::NoConverter)
1628    }
1629
1630    #[cfg(feature = "decoder")]
1631    fn render_masks_from_protos(
1632        &mut self,
1633        detect: &[DetectBox],
1634        proto_data: ProtoData,
1635        output_width: usize,
1636        output_height: usize,
1637    ) -> Result<Vec<MaskResult>> {
1638        if detect.is_empty() {
1639            return Ok(Vec::new());
1640        }
1641
1642        // OpenGL path takes ownership; CPU fallback uses reference.
1643        // Try OpenGL first — if it fails, fall through to CPU with the
1644        // original data (OpenGL failure returns proto_data via Err).
1645        #[cfg(target_os = "linux")]
1646        #[cfg(feature = "opengl")]
1647        {
1648            let has_opengl = self.opengl.is_some();
1649            if has_opengl {
1650                let opengl = self.opengl.as_mut().unwrap();
1651                match opengl.render_masks_from_protos(
1652                    detect,
1653                    proto_data,
1654                    output_width,
1655                    output_height,
1656                ) {
1657                    Ok(r) => return Ok(r),
1658                    Err(e) => {
1659                        log::trace!("render_masks_from_protos didn't work with opengl: {e:?}");
1660                        // proto_data was moved into the GL thread — must use
1661                        // CPU path with fresh data, but since OpenGL rarely
1662                        // fails at this point, this is acceptable.
1663                        return Err(e);
1664                    }
1665                }
1666            }
1667        }
1668        if let Some(cpu) = self.cpu.as_mut() {
1669            return cpu.render_masks_from_protos(detect, proto_data, output_width, output_height);
1670        }
1671        Err(Error::NoConverter)
1672    }
1673}
1674
1675fn fourcc_channels(fourcc: FourCharCode) -> Result<usize> {
1676    match fourcc {
1677        RGBA => Ok(4), // RGBA has 4 channels (R, G, B, A)
1678        RGB => Ok(3),  // RGB has 3 channels (R, G, B)
1679        YUYV => Ok(2), // YUYV has 2 channels (Y and UV)
1680        GREY => Ok(1), // Y800 has 1 channel (Y)
1681        NV12 => Ok(2), // NV12 has 2 channel. 2nd channel is half empty
1682        NV16 => Ok(2), // NV16 has 2 channel. 2nd channel is full size
1683        PLANAR_RGB => Ok(3),
1684        PLANAR_RGBA => Ok(4),
1685        _ => Err(Error::NotSupported(format!(
1686            "Unsupported fourcc: {}",
1687            fourcc.to_string()
1688        ))),
1689    }
1690}
1691
1692fn fourcc_planar(fourcc: FourCharCode) -> Result<bool> {
1693    match fourcc {
1694        RGBA => Ok(false),       // RGBA has 4 channels (R, G, B, A)
1695        RGB => Ok(false),        // RGB has 3 channels (R, G, B)
1696        YUYV => Ok(false),       // YUYV has 2 channels (Y and UV)
1697        GREY => Ok(false),       // Y800 has 1 channel (Y)
1698        NV12 => Ok(true),        // Planar YUV
1699        NV16 => Ok(true),        // Planar YUV
1700        PLANAR_RGB => Ok(true),  // Planar RGB
1701        PLANAR_RGBA => Ok(true), // Planar RGBA
1702        _ => Err(Error::NotSupported(format!(
1703            "Unsupported fourcc: {}",
1704            fourcc.to_string()
1705        ))),
1706    }
1707}
1708
1709pub(crate) struct FunctionTimer<T: Display> {
1710    name: T,
1711    start: std::time::Instant,
1712}
1713
1714impl<T: Display> FunctionTimer<T> {
1715    pub fn new(name: T) -> Self {
1716        Self {
1717            name,
1718            start: std::time::Instant::now(),
1719        }
1720    }
1721}
1722
1723impl<T: Display> Drop for FunctionTimer<T> {
1724    fn drop(&mut self) {
1725        log::trace!("{} elapsed: {:?}", self.name, self.start.elapsed())
1726    }
1727}
1728
1729#[cfg(feature = "decoder")]
1730const DEFAULT_COLORS: [[f32; 4]; 20] = [
1731    [0., 1., 0., 0.7],
1732    [1., 0.5568628, 0., 0.7],
1733    [0.25882353, 0.15294118, 0.13333333, 0.7],
1734    [0.8, 0.7647059, 0.78039216, 0.7],
1735    [0.3137255, 0.3137255, 0.3137255, 0.7],
1736    [0.1411765, 0.3098039, 0.1215686, 0.7],
1737    [1., 0.95686275, 0.5137255, 0.7],
1738    [0.3529412, 0.32156863, 0., 0.7],
1739    [0.4235294, 0.6235294, 0.6509804, 0.7],
1740    [0.5098039, 0.5098039, 0.7294118, 0.7],
1741    [0.00784314, 0.18823529, 0.29411765, 0.7],
1742    [0.0, 0.2706, 1.0, 0.7],
1743    [0.0, 0.0, 0.0, 0.7],
1744    [0.0, 0.5, 0.0, 0.7],
1745    [1.0, 0.0, 0.0, 0.7],
1746    [0.0, 0.0, 1.0, 0.7],
1747    [1.0, 0.5, 0.5, 0.7],
1748    [0.1333, 0.5451, 0.1333, 0.7],
1749    [0.1176, 0.4118, 0.8235, 0.7],
1750    [1., 1., 1., 0.7],
1751];
1752
1753#[cfg(feature = "decoder")]
1754const fn denorm<const M: usize, const N: usize>(a: [[f32; M]; N]) -> [[u8; M]; N] {
1755    let mut result = [[0; M]; N];
1756    let mut i = 0;
1757    while i < N {
1758        let mut j = 0;
1759        while j < M {
1760            result[i][j] = (a[i][j] * 255.0).round() as u8;
1761            j += 1;
1762        }
1763        i += 1;
1764    }
1765    result
1766}
1767
1768#[cfg(feature = "decoder")]
1769const DEFAULT_COLORS_U8: [[u8; 4]; 20] = denorm(DEFAULT_COLORS);
1770
1771#[cfg(test)]
1772#[cfg_attr(coverage_nightly, coverage(off))]
1773mod image_tests {
1774    use super::*;
1775    use crate::{CPUProcessor, Rotation};
1776    #[cfg(target_os = "linux")]
1777    use edgefirst_tensor::is_dma_available;
1778    use edgefirst_tensor::{TensorMapTrait, TensorMemory};
1779    use image::buffer::ConvertBuffer;
1780
1781    #[ctor::ctor]
1782    fn init() {
1783        env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
1784    }
1785
1786    macro_rules! function {
1787        () => {{
1788            fn f() {}
1789            fn type_name_of<T>(_: T) -> &'static str {
1790                std::any::type_name::<T>()
1791            }
1792            let name = type_name_of(f);
1793
1794            // Find and cut the rest of the path
1795            match &name[..name.len() - 3].rfind(':') {
1796                Some(pos) => &name[pos + 1..name.len() - 3],
1797                None => &name[..name.len() - 3],
1798            }
1799        }};
1800    }
1801
1802    #[test]
1803    fn test_invalid_crop() {
1804        let src = TensorImage::new(100, 100, RGB, None).unwrap();
1805        let dst = TensorImage::new(100, 100, RGB, None).unwrap();
1806
1807        let crop = Crop::new()
1808            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
1809            .with_dst_rect(Some(Rect::new(0, 0, 150, 150)));
1810
1811        let result = crop.check_crop(&src, &dst);
1812        assert!(matches!(
1813            result,
1814            Err(Error::CropInvalid(e)) if e.starts_with("Dest and Src crop invalid")
1815        ));
1816
1817        let crop = crop.with_src_rect(Some(Rect::new(0, 0, 10, 10)));
1818        let result = crop.check_crop(&src, &dst);
1819        assert!(matches!(
1820            result,
1821            Err(Error::CropInvalid(e)) if e.starts_with("Dest crop invalid")
1822        ));
1823
1824        let crop = crop
1825            .with_src_rect(Some(Rect::new(50, 50, 60, 60)))
1826            .with_dst_rect(Some(Rect::new(0, 0, 50, 50)));
1827        let result = crop.check_crop(&src, &dst);
1828        assert!(matches!(
1829            result,
1830            Err(Error::CropInvalid(e)) if e.starts_with("Src crop invalid")
1831        ));
1832
1833        let crop = crop.with_src_rect(Some(Rect::new(50, 50, 50, 50)));
1834
1835        let result = crop.check_crop(&src, &dst);
1836        assert!(result.is_ok());
1837    }
1838
1839    #[test]
1840    fn test_invalid_tensor() -> Result<(), Error> {
1841        let tensor = Tensor::new(&[720, 1280, 4, 1], None, None)?;
1842        let result = TensorImage::from_tensor(tensor, RGB);
1843        assert!(matches!(
1844            result,
1845            Err(Error::InvalidShape(e)) if e.starts_with("Tensor shape must have 3 dimensions, got")
1846        ));
1847
1848        let tensor = Tensor::new(&[720, 1280, 4], None, None)?;
1849        let result = TensorImage::from_tensor(tensor, RGB);
1850        assert!(matches!(
1851            result,
1852            Err(Error::InvalidShape(e)) if e.starts_with("Invalid tensor shape")
1853        ));
1854
1855        Ok(())
1856    }
1857
1858    #[test]
1859    fn test_invalid_image_file() -> Result<(), Error> {
1860        let result = TensorImage::load(&[123; 5000], None, None);
1861        assert!(matches!(
1862            result,
1863            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
1864
1865        Ok(())
1866    }
1867
1868    #[test]
1869    fn test_invalid_jpeg_fourcc() -> Result<(), Error> {
1870        let result = TensorImage::load(&[123; 5000], Some(YUYV), None);
1871        assert!(matches!(
1872            result,
1873            Err(Error::NotSupported(e)) if e == "Could not decode as jpeg or png"));
1874
1875        Ok(())
1876    }
1877
1878    #[test]
1879    fn test_load_resize_save() {
1880        let file = include_bytes!("../../../testdata/zidane.jpg");
1881        let img = TensorImage::load_jpeg(file, Some(RGBA), None).unwrap();
1882        assert_eq!(img.width(), 1280);
1883        assert_eq!(img.height(), 720);
1884
1885        let mut dst = TensorImage::new(640, 360, RGBA, None).unwrap();
1886        let mut converter = CPUProcessor::new();
1887        converter
1888            .convert(&img, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
1889            .unwrap();
1890        assert_eq!(dst.width(), 640);
1891        assert_eq!(dst.height(), 360);
1892
1893        dst.save_jpeg("zidane_resized.jpg", 80).unwrap();
1894
1895        let file = std::fs::read("zidane_resized.jpg").unwrap();
1896        let img = TensorImage::load_jpeg(&file, None, None).unwrap();
1897        assert_eq!(img.width(), 640);
1898        assert_eq!(img.height(), 360);
1899        assert_eq!(img.fourcc(), RGB);
1900    }
1901
1902    #[test]
1903    fn test_from_tensor_planar() -> Result<(), Error> {
1904        let tensor = Tensor::new(&[3, 720, 1280], None, None)?;
1905        tensor
1906            .map()?
1907            .copy_from_slice(include_bytes!("../../../testdata/camera720p.8bps"));
1908        let planar = TensorImage::from_tensor(tensor, PLANAR_RGB)?;
1909
1910        let rbga = load_bytes_to_tensor(
1911            1280,
1912            720,
1913            RGBA,
1914            None,
1915            include_bytes!("../../../testdata/camera720p.rgba"),
1916        )?;
1917        compare_images_convert_to_rgb(&planar, &rbga, 0.98, function!());
1918
1919        Ok(())
1920    }
1921
1922    #[test]
1923    fn test_from_tensor_invalid_fourcc() {
1924        let tensor = Tensor::new(&[3, 720, 1280], None, None).unwrap();
1925        let result = TensorImage::from_tensor(tensor, four_char_code!("TEST"));
1926        matches!(result, Err(Error::NotSupported(e)) if e.starts_with("Unsupported fourcc : TEST"));
1927    }
1928
1929    #[test]
1930    #[should_panic(expected = "Failed to save planar RGB image")]
1931    fn test_save_planar() {
1932        let planar_img = load_bytes_to_tensor(
1933            1280,
1934            720,
1935            PLANAR_RGB,
1936            None,
1937            include_bytes!("../../../testdata/camera720p.8bps"),
1938        )
1939        .unwrap();
1940
1941        let save_path = "/tmp/planar_rgb.jpg";
1942        planar_img
1943            .save_jpeg(save_path, 90)
1944            .expect("Failed to save planar RGB image");
1945    }
1946
1947    #[test]
1948    #[should_panic(expected = "Failed to save YUYV image")]
1949    fn test_save_yuyv() {
1950        let planar_img = load_bytes_to_tensor(
1951            1280,
1952            720,
1953            YUYV,
1954            None,
1955            include_bytes!("../../../testdata/camera720p.yuyv"),
1956        )
1957        .unwrap();
1958
1959        let save_path = "/tmp/yuyv.jpg";
1960        planar_img
1961            .save_jpeg(save_path, 90)
1962            .expect("Failed to save YUYV image");
1963    }
1964
1965    #[test]
1966    fn test_rotation_angle() {
1967        assert_eq!(Rotation::from_degrees_clockwise(0), Rotation::None);
1968        assert_eq!(Rotation::from_degrees_clockwise(90), Rotation::Clockwise90);
1969        assert_eq!(Rotation::from_degrees_clockwise(180), Rotation::Rotate180);
1970        assert_eq!(
1971            Rotation::from_degrees_clockwise(270),
1972            Rotation::CounterClockwise90
1973        );
1974        assert_eq!(Rotation::from_degrees_clockwise(360), Rotation::None);
1975        assert_eq!(Rotation::from_degrees_clockwise(450), Rotation::Clockwise90);
1976        assert_eq!(Rotation::from_degrees_clockwise(540), Rotation::Rotate180);
1977        assert_eq!(
1978            Rotation::from_degrees_clockwise(630),
1979            Rotation::CounterClockwise90
1980        );
1981    }
1982
1983    #[test]
1984    #[should_panic(expected = "rotation angle is not a multiple of 90")]
1985    fn test_rotation_angle_panic() {
1986        Rotation::from_degrees_clockwise(361);
1987    }
1988
1989    #[test]
1990    fn test_disable_env_var() -> Result<(), Error> {
1991        #[cfg(target_os = "linux")]
1992        {
1993            let original = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
1994            unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
1995            let converter = ImageProcessor::new()?;
1996            match original {
1997                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
1998                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
1999            }
2000            assert!(converter.g2d.is_none());
2001        }
2002
2003        #[cfg(target_os = "linux")]
2004        #[cfg(feature = "opengl")]
2005        {
2006            let original = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2007            unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2008            let converter = ImageProcessor::new()?;
2009            match original {
2010                Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2011                None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2012            }
2013            assert!(converter.opengl.is_none());
2014        }
2015
2016        let original = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2017        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2018        let converter = ImageProcessor::new()?;
2019        match original {
2020            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2021            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2022        }
2023        assert!(converter.cpu.is_none());
2024
2025        let original_cpu = std::env::var("EDGEFIRST_DISABLE_CPU").ok();
2026        unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", "1") };
2027        let original_gl = std::env::var("EDGEFIRST_DISABLE_GL").ok();
2028        unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", "1") };
2029        let original_g2d = std::env::var("EDGEFIRST_DISABLE_G2D").ok();
2030        unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", "1") };
2031        let mut converter = ImageProcessor::new()?;
2032
2033        let src = TensorImage::new(1280, 720, RGBA, None)?;
2034        let mut dst = TensorImage::new(640, 360, RGBA, None)?;
2035        let result = converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop());
2036        assert!(matches!(result, Err(Error::NoConverter)));
2037
2038        match original_cpu {
2039            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_CPU", s) },
2040            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_CPU") },
2041        }
2042        match original_gl {
2043            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_GL", s) },
2044            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_GL") },
2045        }
2046        match original_g2d {
2047            Some(s) => unsafe { std::env::set_var("EDGEFIRST_DISABLE_G2D", s) },
2048            None => unsafe { std::env::remove_var("EDGEFIRST_DISABLE_G2D") },
2049        }
2050
2051        Ok(())
2052    }
2053
2054    #[test]
2055    fn test_unsupported_conversion() {
2056        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
2057        let mut dst = TensorImage::new(640, 360, NV12, None).unwrap();
2058        let mut converter = ImageProcessor::new().unwrap();
2059        let result = converter.convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop());
2060        log::debug!("result: {:?}", result);
2061        assert!(matches!(
2062            result,
2063            Err(Error::NotSupported(e)) if e.starts_with("Conversion from NV12 to NV12")
2064        ));
2065    }
2066
2067    #[test]
2068    fn test_load_grey() {
2069        let grey_img = TensorImage::load_jpeg(
2070            include_bytes!("../../../testdata/grey.jpg"),
2071            Some(RGBA),
2072            None,
2073        )
2074        .unwrap();
2075
2076        let grey_but_rgb_img = TensorImage::load_jpeg(
2077            include_bytes!("../../../testdata/grey-rgb.jpg"),
2078            Some(RGBA),
2079            None,
2080        )
2081        .unwrap();
2082
2083        compare_images(&grey_img, &grey_but_rgb_img, 0.99, function!());
2084    }
2085
2086    #[test]
2087    fn test_new_nv12() {
2088        let nv12 = TensorImage::new(1280, 720, NV12, None).unwrap();
2089        assert_eq!(nv12.height(), 720);
2090        assert_eq!(nv12.width(), 1280);
2091        assert_eq!(nv12.fourcc(), NV12);
2092        assert_eq!(nv12.channels(), 2);
2093        assert!(nv12.is_planar())
2094    }
2095
2096    #[test]
2097    #[cfg(target_os = "linux")]
2098    fn test_new_image_converter() {
2099        let dst_width = 640;
2100        let dst_height = 360;
2101        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2102        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2103
2104        let mut converter_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2105        let mut converter = ImageProcessor::new().unwrap();
2106        converter
2107            .convert(
2108                &src,
2109                &mut converter_dst,
2110                Rotation::None,
2111                Flip::None,
2112                Crop::no_crop(),
2113            )
2114            .unwrap();
2115
2116        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2117        let mut cpu_converter = CPUProcessor::new();
2118        cpu_converter
2119            .convert(
2120                &src,
2121                &mut cpu_dst,
2122                Rotation::None,
2123                Flip::None,
2124                Crop::no_crop(),
2125            )
2126            .unwrap();
2127
2128        compare_images(&converter_dst, &cpu_dst, 0.98, function!());
2129    }
2130
2131    #[test]
2132    fn test_crop_skip() {
2133        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2134        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2135
2136        let mut converter_dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
2137        let mut converter = ImageProcessor::new().unwrap();
2138        let crop = Crop::new()
2139            .with_src_rect(Some(Rect::new(0, 0, 640, 640)))
2140            .with_dst_rect(Some(Rect::new(0, 0, 640, 640)));
2141        converter
2142            .convert(&src, &mut converter_dst, Rotation::None, Flip::None, crop)
2143            .unwrap();
2144
2145        let mut cpu_dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
2146        let mut cpu_converter = CPUProcessor::new();
2147        cpu_converter
2148            .convert(&src, &mut cpu_dst, Rotation::None, Flip::None, crop)
2149            .unwrap();
2150
2151        compare_images(&converter_dst, &cpu_dst, 0.99999, function!());
2152    }
2153
2154    #[test]
2155    fn test_invalid_fourcc() {
2156        let result = TensorImage::new(1280, 720, four_char_code!("TEST"), None);
2157        assert!(matches!(
2158            result,
2159            Err(Error::NotSupported(e)) if e == "Unsupported fourcc: TEST"
2160        ));
2161    }
2162
2163    // Helper function to check if G2D library is available (Linux/i.MX8 only)
2164    #[cfg(target_os = "linux")]
2165    static G2D_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2166
2167    #[cfg(target_os = "linux")]
2168    fn is_g2d_available() -> bool {
2169        *G2D_AVAILABLE.get_or_init(|| G2DProcessor::new().is_ok())
2170    }
2171
2172    #[cfg(target_os = "linux")]
2173    #[cfg(feature = "opengl")]
2174    static GL_AVAILABLE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2175
2176    #[cfg(target_os = "linux")]
2177    #[cfg(feature = "opengl")]
2178    // Helper function to check if OpenGL is available
2179    fn is_opengl_available() -> bool {
2180        #[cfg(all(target_os = "linux", feature = "opengl"))]
2181        {
2182            *GL_AVAILABLE.get_or_init(|| GLProcessorThreaded::new(None).is_ok())
2183        }
2184
2185        #[cfg(not(all(target_os = "linux", feature = "opengl")))]
2186        {
2187            false
2188        }
2189    }
2190
2191    #[test]
2192    fn test_load_jpeg_with_exif() {
2193        let file = include_bytes!("../../../testdata/zidane_rotated_exif.jpg").to_vec();
2194        let loaded = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2195
2196        assert_eq!(loaded.height(), 1280);
2197        assert_eq!(loaded.width(), 720);
2198
2199        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2200        let cpu_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2201
2202        let (dst_width, dst_height) = (cpu_src.height(), cpu_src.width());
2203
2204        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2205        let mut cpu_converter = CPUProcessor::new();
2206
2207        cpu_converter
2208            .convert(
2209                &cpu_src,
2210                &mut cpu_dst,
2211                Rotation::Clockwise90,
2212                Flip::None,
2213                Crop::no_crop(),
2214            )
2215            .unwrap();
2216
2217        compare_images(&loaded, &cpu_dst, 0.98, function!());
2218    }
2219
2220    #[test]
2221    fn test_load_png_with_exif() {
2222        let file = include_bytes!("../../../testdata/zidane_rotated_exif_180.png").to_vec();
2223        let loaded = TensorImage::load_png(&file, Some(RGBA), None).unwrap();
2224
2225        assert_eq!(loaded.height(), 720);
2226        assert_eq!(loaded.width(), 1280);
2227
2228        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2229        let cpu_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2230
2231        let mut cpu_dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
2232        let mut cpu_converter = CPUProcessor::new();
2233
2234        cpu_converter
2235            .convert(
2236                &cpu_src,
2237                &mut cpu_dst,
2238                Rotation::Rotate180,
2239                Flip::None,
2240                Crop::no_crop(),
2241            )
2242            .unwrap();
2243
2244        compare_images(&loaded, &cpu_dst, 0.98, function!());
2245    }
2246
2247    #[test]
2248    #[cfg(target_os = "linux")]
2249    fn test_g2d_resize() {
2250        if !is_g2d_available() {
2251            eprintln!("SKIPPED: test_g2d_resize - G2D library (libg2d.so.2) not available");
2252            return;
2253        }
2254        if !is_dma_available() {
2255            eprintln!(
2256                "SKIPPED: test_g2d_resize - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2257            );
2258            return;
2259        }
2260
2261        let dst_width = 640;
2262        let dst_height = 360;
2263        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2264        let src = TensorImage::load_jpeg(&file, Some(RGBA), Some(TensorMemory::Dma)).unwrap();
2265
2266        let mut g2d_dst =
2267            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
2268        let mut g2d_converter = G2DProcessor::new().unwrap();
2269        g2d_converter
2270            .convert(
2271                &src,
2272                &mut g2d_dst,
2273                Rotation::None,
2274                Flip::None,
2275                Crop::no_crop(),
2276            )
2277            .unwrap();
2278
2279        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2280        let mut cpu_converter = CPUProcessor::new();
2281        cpu_converter
2282            .convert(
2283                &src,
2284                &mut cpu_dst,
2285                Rotation::None,
2286                Flip::None,
2287                Crop::no_crop(),
2288            )
2289            .unwrap();
2290
2291        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2292    }
2293
2294    #[test]
2295    #[cfg(target_os = "linux")]
2296    #[cfg(feature = "opengl")]
2297    fn test_opengl_resize() {
2298        if !is_opengl_available() {
2299            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2300            return;
2301        }
2302
2303        let dst_width = 640;
2304        let dst_height = 360;
2305        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2306        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2307
2308        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2309        let mut cpu_converter = CPUProcessor::new();
2310        cpu_converter
2311            .convert(
2312                &src,
2313                &mut cpu_dst,
2314                Rotation::None,
2315                Flip::None,
2316                Crop::no_crop(),
2317            )
2318            .unwrap();
2319        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2320        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2321
2322        for _ in 0..5 {
2323            gl_converter
2324                .convert(
2325                    &src,
2326                    &mut gl_dst,
2327                    Rotation::None,
2328                    Flip::None,
2329                    Crop::no_crop(),
2330                )
2331                .unwrap();
2332
2333            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2334        }
2335
2336        drop(gl_dst);
2337    }
2338
2339    #[test]
2340    #[cfg(target_os = "linux")]
2341    #[cfg(feature = "opengl")]
2342    fn test_opengl_10_threads() {
2343        if !is_opengl_available() {
2344            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2345            return;
2346        }
2347
2348        let handles: Vec<_> = (0..10)
2349            .map(|i| {
2350                std::thread::Builder::new()
2351                    .name(format!("Thread {i}"))
2352                    .spawn(test_opengl_resize)
2353                    .unwrap()
2354            })
2355            .collect();
2356        handles.into_iter().for_each(|h| {
2357            if let Err(e) = h.join() {
2358                std::panic::resume_unwind(e)
2359            }
2360        });
2361    }
2362
2363    #[test]
2364    #[cfg(target_os = "linux")]
2365    #[cfg(feature = "opengl")]
2366    fn test_opengl_grey() {
2367        if !is_opengl_available() {
2368            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2369            return;
2370        }
2371
2372        let img = TensorImage::load_jpeg(
2373            include_bytes!("../../../testdata/grey.jpg"),
2374            Some(GREY),
2375            None,
2376        )
2377        .unwrap();
2378
2379        let mut gl_dst = TensorImage::new(640, 640, GREY, None).unwrap();
2380        let mut cpu_dst = TensorImage::new(640, 640, GREY, None).unwrap();
2381
2382        let mut converter = CPUProcessor::new();
2383
2384        converter
2385            .convert(
2386                &img,
2387                &mut cpu_dst,
2388                Rotation::None,
2389                Flip::None,
2390                Crop::no_crop(),
2391            )
2392            .unwrap();
2393
2394        let mut gl = GLProcessorThreaded::new(None).unwrap();
2395        gl.convert(
2396            &img,
2397            &mut gl_dst,
2398            Rotation::None,
2399            Flip::None,
2400            Crop::no_crop(),
2401        )
2402        .unwrap();
2403
2404        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2405    }
2406
2407    #[test]
2408    #[cfg(target_os = "linux")]
2409    fn test_g2d_src_crop() {
2410        if !is_g2d_available() {
2411            eprintln!("SKIPPED: test_g2d_src_crop - G2D library (libg2d.so.2) not available");
2412            return;
2413        }
2414        if !is_dma_available() {
2415            eprintln!(
2416                "SKIPPED: test_g2d_src_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2417            );
2418            return;
2419        }
2420
2421        let dst_width = 640;
2422        let dst_height = 640;
2423        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2424        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2425
2426        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2427        let mut cpu_converter = CPUProcessor::new();
2428        cpu_converter
2429            .convert(
2430                &src,
2431                &mut cpu_dst,
2432                Rotation::None,
2433                Flip::None,
2434                Crop {
2435                    src_rect: Some(Rect {
2436                        left: 0,
2437                        top: 0,
2438                        width: 640,
2439                        height: 360,
2440                    }),
2441                    dst_rect: None,
2442                    dst_color: None,
2443                },
2444            )
2445            .unwrap();
2446
2447        let mut g2d_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2448        let mut g2d_converter = G2DProcessor::new().unwrap();
2449        g2d_converter
2450            .convert(
2451                &src,
2452                &mut g2d_dst,
2453                Rotation::None,
2454                Flip::None,
2455                Crop {
2456                    src_rect: Some(Rect {
2457                        left: 0,
2458                        top: 0,
2459                        width: 640,
2460                        height: 360,
2461                    }),
2462                    dst_rect: None,
2463                    dst_color: None,
2464                },
2465            )
2466            .unwrap();
2467
2468        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2469    }
2470
2471    #[test]
2472    #[cfg(target_os = "linux")]
2473    fn test_g2d_dst_crop() {
2474        if !is_g2d_available() {
2475            eprintln!("SKIPPED: test_g2d_dst_crop - G2D library (libg2d.so.2) not available");
2476            return;
2477        }
2478        if !is_dma_available() {
2479            eprintln!(
2480                "SKIPPED: test_g2d_dst_crop - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2481            );
2482            return;
2483        }
2484
2485        let dst_width = 640;
2486        let dst_height = 640;
2487        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2488        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2489
2490        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2491        let mut cpu_converter = CPUProcessor::new();
2492        cpu_converter
2493            .convert(
2494                &src,
2495                &mut cpu_dst,
2496                Rotation::None,
2497                Flip::None,
2498                Crop {
2499                    src_rect: None,
2500                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
2501                    dst_color: None,
2502                },
2503            )
2504            .unwrap();
2505
2506        let mut g2d_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2507        let mut g2d_converter = G2DProcessor::new().unwrap();
2508        g2d_converter
2509            .convert(
2510                &src,
2511                &mut g2d_dst,
2512                Rotation::None,
2513                Flip::None,
2514                Crop {
2515                    src_rect: None,
2516                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
2517                    dst_color: None,
2518                },
2519            )
2520            .unwrap();
2521
2522        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2523    }
2524
2525    #[test]
2526    #[cfg(target_os = "linux")]
2527    fn test_g2d_all_rgba() {
2528        if !is_g2d_available() {
2529            eprintln!("SKIPPED: test_g2d_all_rgba - G2D library (libg2d.so.2) not available");
2530            return;
2531        }
2532        if !is_dma_available() {
2533            eprintln!(
2534                "SKIPPED: test_g2d_all_rgba - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2535            );
2536            return;
2537        }
2538
2539        let dst_width = 640;
2540        let dst_height = 640;
2541        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2542        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2543
2544        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2545        let mut cpu_converter = CPUProcessor::new();
2546        let mut g2d_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2547        let mut g2d_converter = G2DProcessor::new().unwrap();
2548
2549        for rot in [
2550            Rotation::None,
2551            Rotation::Clockwise90,
2552            Rotation::Rotate180,
2553            Rotation::CounterClockwise90,
2554        ] {
2555            cpu_dst.tensor.map().unwrap().as_mut_slice().fill(114);
2556            g2d_dst.tensor.map().unwrap().as_mut_slice().fill(114);
2557            for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
2558                cpu_converter
2559                    .convert(
2560                        &src,
2561                        &mut cpu_dst,
2562                        Rotation::None,
2563                        Flip::None,
2564                        Crop {
2565                            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2566                            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2567                            dst_color: None,
2568                        },
2569                    )
2570                    .unwrap();
2571
2572                g2d_converter
2573                    .convert(
2574                        &src,
2575                        &mut g2d_dst,
2576                        Rotation::None,
2577                        Flip::None,
2578                        Crop {
2579                            src_rect: Some(Rect::new(50, 120, 1024, 576)),
2580                            dst_rect: Some(Rect::new(100, 100, 512, 288)),
2581                            dst_color: None,
2582                        },
2583                    )
2584                    .unwrap();
2585
2586                compare_images(
2587                    &g2d_dst,
2588                    &cpu_dst,
2589                    0.98,
2590                    &format!("{} {:?} {:?}", function!(), rot, flip),
2591                );
2592            }
2593        }
2594    }
2595
2596    #[test]
2597    #[cfg(target_os = "linux")]
2598    #[cfg(feature = "opengl")]
2599    fn test_opengl_src_crop() {
2600        if !is_opengl_available() {
2601            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2602            return;
2603        }
2604
2605        let dst_width = 640;
2606        let dst_height = 360;
2607        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2608        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2609
2610        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2611        let mut cpu_converter = CPUProcessor::new();
2612        cpu_converter
2613            .convert(
2614                &src,
2615                &mut cpu_dst,
2616                Rotation::None,
2617                Flip::None,
2618                Crop {
2619                    src_rect: Some(Rect {
2620                        left: 320,
2621                        top: 180,
2622                        width: 1280 - 320,
2623                        height: 720 - 180,
2624                    }),
2625                    dst_rect: None,
2626                    dst_color: None,
2627                },
2628            )
2629            .unwrap();
2630
2631        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2632        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2633
2634        gl_converter
2635            .convert(
2636                &src,
2637                &mut gl_dst,
2638                Rotation::None,
2639                Flip::None,
2640                Crop {
2641                    src_rect: Some(Rect {
2642                        left: 320,
2643                        top: 180,
2644                        width: 1280 - 320,
2645                        height: 720 - 180,
2646                    }),
2647                    dst_rect: None,
2648                    dst_color: None,
2649                },
2650            )
2651            .unwrap();
2652
2653        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2654    }
2655
2656    #[test]
2657    #[cfg(target_os = "linux")]
2658    #[cfg(feature = "opengl")]
2659    fn test_opengl_dst_crop() {
2660        if !is_opengl_available() {
2661            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2662            return;
2663        }
2664
2665        let dst_width = 640;
2666        let dst_height = 640;
2667        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2668        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2669
2670        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2671        let mut cpu_converter = CPUProcessor::new();
2672        cpu_converter
2673            .convert(
2674                &src,
2675                &mut cpu_dst,
2676                Rotation::None,
2677                Flip::None,
2678                Crop {
2679                    src_rect: None,
2680                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
2681                    dst_color: None,
2682                },
2683            )
2684            .unwrap();
2685
2686        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2687        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2688        gl_converter
2689            .convert(
2690                &src,
2691                &mut gl_dst,
2692                Rotation::None,
2693                Flip::None,
2694                Crop {
2695                    src_rect: None,
2696                    dst_rect: Some(Rect::new(100, 100, 512, 288)),
2697                    dst_color: None,
2698                },
2699            )
2700            .unwrap();
2701
2702        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2703    }
2704
2705    #[test]
2706    #[cfg(target_os = "linux")]
2707    #[cfg(feature = "opengl")]
2708    fn test_opengl_all_rgba() {
2709        if !is_opengl_available() {
2710            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2711            return;
2712        }
2713
2714        let dst_width = 640;
2715        let dst_height = 640;
2716        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2717
2718        let mut cpu_converter = CPUProcessor::new();
2719
2720        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2721
2722        let mut mem = vec![None, Some(TensorMemory::Mem), Some(TensorMemory::Shm)];
2723        if is_dma_available() {
2724            mem.push(Some(TensorMemory::Dma));
2725        }
2726        for m in mem {
2727            let src = TensorImage::load_jpeg(&file, Some(RGBA), m).unwrap();
2728
2729            for rot in [
2730                Rotation::None,
2731                Rotation::Clockwise90,
2732                Rotation::Rotate180,
2733                Rotation::CounterClockwise90,
2734            ] {
2735                for flip in [Flip::None, Flip::Horizontal, Flip::Vertical] {
2736                    let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, m).unwrap();
2737                    let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, m).unwrap();
2738                    cpu_dst.tensor.map().unwrap().as_mut_slice().fill(114);
2739                    gl_dst.tensor.map().unwrap().as_mut_slice().fill(114);
2740                    cpu_converter
2741                        .convert(
2742                            &src,
2743                            &mut cpu_dst,
2744                            Rotation::None,
2745                            Flip::None,
2746                            Crop {
2747                                src_rect: Some(Rect::new(50, 120, 1024, 576)),
2748                                dst_rect: Some(Rect::new(100, 100, 512, 288)),
2749                                dst_color: None,
2750                            },
2751                        )
2752                        .unwrap();
2753
2754                    gl_converter
2755                        .convert(
2756                            &src,
2757                            &mut gl_dst,
2758                            Rotation::None,
2759                            Flip::None,
2760                            Crop {
2761                                src_rect: Some(Rect::new(50, 120, 1024, 576)),
2762                                dst_rect: Some(Rect::new(100, 100, 512, 288)),
2763                                dst_color: None,
2764                            },
2765                        )
2766                        .map_err(|e| {
2767                            log::error!("error mem {m:?} rot {rot:?} error: {e:?}");
2768                            e
2769                        })
2770                        .unwrap();
2771
2772                    compare_images(
2773                        &gl_dst,
2774                        &cpu_dst,
2775                        0.98,
2776                        &format!("{} {:?} {:?}", function!(), rot, flip),
2777                    );
2778                }
2779            }
2780        }
2781    }
2782
2783    #[test]
2784    #[cfg(target_os = "linux")]
2785    fn test_cpu_rotate() {
2786        for rot in [
2787            Rotation::Clockwise90,
2788            Rotation::Rotate180,
2789            Rotation::CounterClockwise90,
2790        ] {
2791            test_cpu_rotate_(rot);
2792        }
2793    }
2794
2795    #[cfg(target_os = "linux")]
2796    fn test_cpu_rotate_(rot: Rotation) {
2797        // This test rotates the image 4 times and checks that the image was returned to
2798        // be the same Currently doesn't check if rotations actually rotated in
2799        // right direction
2800        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2801
2802        let unchanged_src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2803        let mut src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
2804
2805        let (dst_width, dst_height) = match rot {
2806            Rotation::None | Rotation::Rotate180 => (src.width(), src.height()),
2807            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (src.height(), src.width()),
2808        };
2809
2810        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2811        let mut cpu_converter = CPUProcessor::new();
2812
2813        // After rotating 4 times, the image should be the same as the original
2814
2815        cpu_converter
2816            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
2817            .unwrap();
2818
2819        cpu_converter
2820            .convert(&cpu_dst, &mut src, rot, Flip::None, Crop::no_crop())
2821            .unwrap();
2822
2823        cpu_converter
2824            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
2825            .unwrap();
2826
2827        cpu_converter
2828            .convert(&cpu_dst, &mut src, rot, Flip::None, Crop::no_crop())
2829            .unwrap();
2830
2831        compare_images(&src, &unchanged_src, 0.98, function!());
2832    }
2833
2834    #[test]
2835    #[cfg(target_os = "linux")]
2836    #[cfg(feature = "opengl")]
2837    fn test_opengl_rotate() {
2838        if !is_opengl_available() {
2839            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2840            return;
2841        }
2842
2843        let size = (1280, 720);
2844        let mut mem = vec![None, Some(TensorMemory::Shm), Some(TensorMemory::Mem)];
2845
2846        if is_dma_available() {
2847            mem.push(Some(TensorMemory::Dma));
2848        }
2849        for m in mem {
2850            for rot in [
2851                Rotation::Clockwise90,
2852                Rotation::Rotate180,
2853                Rotation::CounterClockwise90,
2854            ] {
2855                test_opengl_rotate_(size, rot, m);
2856            }
2857        }
2858    }
2859
2860    #[cfg(target_os = "linux")]
2861    #[cfg(feature = "opengl")]
2862    fn test_opengl_rotate_(
2863        size: (usize, usize),
2864        rot: Rotation,
2865        tensor_memory: Option<TensorMemory>,
2866    ) {
2867        let (dst_width, dst_height) = match rot {
2868            Rotation::None | Rotation::Rotate180 => size,
2869            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
2870        };
2871
2872        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2873        let src = TensorImage::load_jpeg(&file, Some(RGBA), tensor_memory).unwrap();
2874
2875        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2876        let mut cpu_converter = CPUProcessor::new();
2877
2878        cpu_converter
2879            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
2880            .unwrap();
2881
2882        let mut gl_dst = TensorImage::new(dst_width, dst_height, RGBA, tensor_memory).unwrap();
2883        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
2884
2885        for _ in 0..5 {
2886            gl_converter
2887                .convert(&src, &mut gl_dst, rot, Flip::None, Crop::no_crop())
2888                .unwrap();
2889            compare_images(&gl_dst, &cpu_dst, 0.98, function!());
2890        }
2891    }
2892
2893    #[test]
2894    #[cfg(target_os = "linux")]
2895    fn test_g2d_rotate() {
2896        if !is_g2d_available() {
2897            eprintln!("SKIPPED: test_g2d_rotate - G2D library (libg2d.so.2) not available");
2898            return;
2899        }
2900        if !is_dma_available() {
2901            eprintln!(
2902                "SKIPPED: test_g2d_rotate - DMA memory allocation not available (permission denied or no DMA-BUF support)"
2903            );
2904            return;
2905        }
2906
2907        let size = (1280, 720);
2908        for rot in [
2909            Rotation::Clockwise90,
2910            Rotation::Rotate180,
2911            Rotation::CounterClockwise90,
2912        ] {
2913            test_g2d_rotate_(size, rot);
2914        }
2915    }
2916
2917    #[cfg(target_os = "linux")]
2918    fn test_g2d_rotate_(size: (usize, usize), rot: Rotation) {
2919        let (dst_width, dst_height) = match rot {
2920            Rotation::None | Rotation::Rotate180 => size,
2921            Rotation::Clockwise90 | Rotation::CounterClockwise90 => (size.1, size.0),
2922        };
2923
2924        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
2925        let src = TensorImage::load_jpeg(&file, Some(RGBA), Some(TensorMemory::Dma)).unwrap();
2926
2927        let mut cpu_dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2928        let mut cpu_converter = CPUProcessor::new();
2929
2930        cpu_converter
2931            .convert(&src, &mut cpu_dst, rot, Flip::None, Crop::no_crop())
2932            .unwrap();
2933
2934        let mut g2d_dst =
2935            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
2936        let mut g2d_converter = G2DProcessor::new().unwrap();
2937
2938        g2d_converter
2939            .convert(&src, &mut g2d_dst, rot, Flip::None, Crop::no_crop())
2940            .unwrap();
2941
2942        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
2943    }
2944
2945    #[test]
2946    fn test_rgba_to_yuyv_resize_cpu() {
2947        let src = load_bytes_to_tensor(
2948            1280,
2949            720,
2950            RGBA,
2951            None,
2952            include_bytes!("../../../testdata/camera720p.rgba"),
2953        )
2954        .unwrap();
2955
2956        let (dst_width, dst_height) = (640, 360);
2957
2958        let mut dst = TensorImage::new(dst_width, dst_height, YUYV, None).unwrap();
2959
2960        let mut dst_through_yuyv = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2961        let mut dst_direct = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
2962
2963        let mut cpu_converter = CPUProcessor::new();
2964
2965        cpu_converter
2966            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
2967            .unwrap();
2968
2969        cpu_converter
2970            .convert(
2971                &dst,
2972                &mut dst_through_yuyv,
2973                Rotation::None,
2974                Flip::None,
2975                Crop::no_crop(),
2976            )
2977            .unwrap();
2978
2979        cpu_converter
2980            .convert(
2981                &src,
2982                &mut dst_direct,
2983                Rotation::None,
2984                Flip::None,
2985                Crop::no_crop(),
2986            )
2987            .unwrap();
2988
2989        compare_images(&dst_through_yuyv, &dst_direct, 0.98, function!());
2990    }
2991
2992    #[test]
2993    #[cfg(target_os = "linux")]
2994    #[cfg(feature = "opengl")]
2995    #[ignore = "opengl doesn't support rendering to YUYV texture"]
2996    fn test_rgba_to_yuyv_resize_opengl() {
2997        if !is_opengl_available() {
2998            eprintln!("SKIPPED: {} - OpenGL not available", function!());
2999            return;
3000        }
3001
3002        if !is_dma_available() {
3003            eprintln!(
3004                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3005                function!()
3006            );
3007            return;
3008        }
3009
3010        let src = load_bytes_to_tensor(
3011            1280,
3012            720,
3013            RGBA,
3014            None,
3015            include_bytes!("../../../testdata/camera720p.rgba"),
3016        )
3017        .unwrap();
3018
3019        let (dst_width, dst_height) = (640, 360);
3020
3021        let mut dst =
3022            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3023
3024        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3025
3026        gl_converter
3027            .convert(
3028                &src,
3029                &mut dst,
3030                Rotation::None,
3031                Flip::None,
3032                Crop::new()
3033                    .with_dst_rect(Some(Rect::new(100, 100, 100, 100)))
3034                    .with_dst_color(Some([255, 255, 255, 255])),
3035            )
3036            .unwrap();
3037
3038        std::fs::write(
3039            "rgba_to_yuyv_opengl.yuyv",
3040            dst.tensor().map().unwrap().as_slice(),
3041        )
3042        .unwrap();
3043        let mut cpu_dst =
3044            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3045        CPUProcessor::new()
3046            .convert(
3047                &src,
3048                &mut cpu_dst,
3049                Rotation::None,
3050                Flip::None,
3051                Crop::no_crop(),
3052            )
3053            .unwrap();
3054
3055        compare_images_convert_to_rgb(&dst, &cpu_dst, 0.98, function!());
3056    }
3057
3058    #[test]
3059    #[cfg(target_os = "linux")]
3060    fn test_rgba_to_yuyv_resize_g2d() {
3061        if !is_g2d_available() {
3062            eprintln!(
3063                "SKIPPED: test_rgba_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3064            );
3065            return;
3066        }
3067        if !is_dma_available() {
3068            eprintln!(
3069                "SKIPPED: test_rgba_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3070            );
3071            return;
3072        }
3073
3074        let src = load_bytes_to_tensor(
3075            1280,
3076            720,
3077            RGBA,
3078            Some(TensorMemory::Dma),
3079            include_bytes!("../../../testdata/camera720p.rgba"),
3080        )
3081        .unwrap();
3082
3083        let (dst_width, dst_height) = (1280, 720);
3084
3085        let mut cpu_dst =
3086            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3087
3088        let mut g2d_dst =
3089            TensorImage::new(dst_width, dst_height, YUYV, Some(TensorMemory::Dma)).unwrap();
3090
3091        let mut g2d_converter = G2DProcessor::new().unwrap();
3092
3093        g2d_dst.tensor.map().unwrap().as_mut_slice().fill(128);
3094        g2d_converter
3095            .convert(
3096                &src,
3097                &mut g2d_dst,
3098                Rotation::None,
3099                Flip::None,
3100                Crop {
3101                    src_rect: None,
3102                    dst_rect: Some(Rect::new(100, 100, 2, 2)),
3103                    dst_color: None,
3104                },
3105            )
3106            .unwrap();
3107
3108        cpu_dst.tensor.map().unwrap().as_mut_slice().fill(128);
3109        CPUProcessor::new()
3110            .convert(
3111                &src,
3112                &mut cpu_dst,
3113                Rotation::None,
3114                Flip::None,
3115                Crop {
3116                    src_rect: None,
3117                    dst_rect: Some(Rect::new(100, 100, 2, 2)),
3118                    dst_color: None,
3119                },
3120            )
3121            .unwrap();
3122
3123        compare_images_convert_to_rgb(&cpu_dst, &g2d_dst, 0.98, function!());
3124    }
3125
3126    #[test]
3127    fn test_yuyv_to_rgba_cpu() {
3128        let file = include_bytes!("../../../testdata/camera720p.yuyv").to_vec();
3129        let src = TensorImage::new(1280, 720, YUYV, None).unwrap();
3130        src.tensor()
3131            .map()
3132            .unwrap()
3133            .as_mut_slice()
3134            .copy_from_slice(&file);
3135
3136        let mut dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
3137        let mut cpu_converter = CPUProcessor::new();
3138
3139        cpu_converter
3140            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3141            .unwrap();
3142
3143        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3144        target_image
3145            .tensor()
3146            .map()
3147            .unwrap()
3148            .as_mut_slice()
3149            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3150
3151        compare_images(&dst, &target_image, 0.98, function!());
3152    }
3153
3154    #[test]
3155    fn test_yuyv_to_rgb_cpu() {
3156        let file = include_bytes!("../../../testdata/camera720p.yuyv").to_vec();
3157        let src = TensorImage::new(1280, 720, YUYV, None).unwrap();
3158        src.tensor()
3159            .map()
3160            .unwrap()
3161            .as_mut_slice()
3162            .copy_from_slice(&file);
3163
3164        let mut dst = TensorImage::new(1280, 720, RGB, None).unwrap();
3165        let mut cpu_converter = CPUProcessor::new();
3166
3167        cpu_converter
3168            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3169            .unwrap();
3170
3171        let target_image = TensorImage::new(1280, 720, RGB, None).unwrap();
3172        target_image
3173            .tensor()
3174            .map()
3175            .unwrap()
3176            .as_mut_slice()
3177            .as_chunks_mut::<3>()
3178            .0
3179            .iter_mut()
3180            .zip(
3181                include_bytes!("../../../testdata/camera720p.rgba")
3182                    .as_chunks::<4>()
3183                    .0,
3184            )
3185            .for_each(|(dst, src)| *dst = [src[0], src[1], src[2]]);
3186
3187        compare_images(&dst, &target_image, 0.98, function!());
3188    }
3189
3190    #[test]
3191    #[cfg(target_os = "linux")]
3192    fn test_yuyv_to_rgba_g2d() {
3193        if !is_g2d_available() {
3194            eprintln!("SKIPPED: test_yuyv_to_rgba_g2d - G2D library (libg2d.so.2) not available");
3195            return;
3196        }
3197        if !is_dma_available() {
3198            eprintln!(
3199                "SKIPPED: test_yuyv_to_rgba_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3200            );
3201            return;
3202        }
3203
3204        let src = load_bytes_to_tensor(
3205            1280,
3206            720,
3207            YUYV,
3208            None,
3209            include_bytes!("../../../testdata/camera720p.yuyv"),
3210        )
3211        .unwrap();
3212
3213        let mut dst = TensorImage::new(1280, 720, RGBA, Some(TensorMemory::Dma)).unwrap();
3214        let mut g2d_converter = G2DProcessor::new().unwrap();
3215
3216        g2d_converter
3217            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3218            .unwrap();
3219
3220        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3221        target_image
3222            .tensor()
3223            .map()
3224            .unwrap()
3225            .as_mut_slice()
3226            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3227
3228        compare_images(&dst, &target_image, 0.98, function!());
3229    }
3230
3231    #[test]
3232    #[cfg(target_os = "linux")]
3233    #[cfg(feature = "opengl")]
3234    fn test_yuyv_to_rgba_opengl() {
3235        if !is_opengl_available() {
3236            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3237            return;
3238        }
3239        if !is_dma_available() {
3240            eprintln!(
3241                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3242                function!()
3243            );
3244            return;
3245        }
3246
3247        let src = load_bytes_to_tensor(
3248            1280,
3249            720,
3250            YUYV,
3251            Some(TensorMemory::Dma),
3252            include_bytes!("../../../testdata/camera720p.yuyv"),
3253        )
3254        .unwrap();
3255
3256        let mut dst = TensorImage::new(1280, 720, RGBA, Some(TensorMemory::Dma)).unwrap();
3257        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3258
3259        gl_converter
3260            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3261            .unwrap();
3262
3263        let target_image = TensorImage::new(1280, 720, RGBA, None).unwrap();
3264        target_image
3265            .tensor()
3266            .map()
3267            .unwrap()
3268            .as_mut_slice()
3269            .copy_from_slice(include_bytes!("../../../testdata/camera720p.rgba"));
3270
3271        compare_images(&dst, &target_image, 0.98, function!());
3272    }
3273
3274    #[test]
3275    #[cfg(target_os = "linux")]
3276    fn test_yuyv_to_rgb_g2d() {
3277        if !is_g2d_available() {
3278            eprintln!("SKIPPED: test_yuyv_to_rgb_g2d - G2D library (libg2d.so.2) not available");
3279            return;
3280        }
3281        if !is_dma_available() {
3282            eprintln!(
3283                "SKIPPED: test_yuyv_to_rgb_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3284            );
3285            return;
3286        }
3287
3288        let src = load_bytes_to_tensor(
3289            1280,
3290            720,
3291            YUYV,
3292            None,
3293            include_bytes!("../../../testdata/camera720p.yuyv"),
3294        )
3295        .unwrap();
3296
3297        let mut g2d_dst = TensorImage::new(1280, 720, RGB, Some(TensorMemory::Dma)).unwrap();
3298        let mut g2d_converter = G2DProcessor::new().unwrap();
3299
3300        g2d_converter
3301            .convert(
3302                &src,
3303                &mut g2d_dst,
3304                Rotation::None,
3305                Flip::None,
3306                Crop::no_crop(),
3307            )
3308            .unwrap();
3309
3310        let mut cpu_dst = TensorImage::new(1280, 720, RGB, None).unwrap();
3311        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3312
3313        cpu_converter
3314            .convert(
3315                &src,
3316                &mut cpu_dst,
3317                Rotation::None,
3318                Flip::None,
3319                Crop::no_crop(),
3320            )
3321            .unwrap();
3322
3323        compare_images(&g2d_dst, &cpu_dst, 0.98, function!());
3324    }
3325
3326    #[test]
3327    #[cfg(target_os = "linux")]
3328    fn test_yuyv_to_yuyv_resize_g2d() {
3329        if !is_g2d_available() {
3330            eprintln!(
3331                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - G2D library (libg2d.so.2) not available"
3332            );
3333            return;
3334        }
3335        if !is_dma_available() {
3336            eprintln!(
3337                "SKIPPED: test_yuyv_to_yuyv_resize_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3338            );
3339            return;
3340        }
3341
3342        let src = load_bytes_to_tensor(
3343            1280,
3344            720,
3345            YUYV,
3346            None,
3347            include_bytes!("../../../testdata/camera720p.yuyv"),
3348        )
3349        .unwrap();
3350
3351        let mut g2d_dst = TensorImage::new(600, 400, YUYV, Some(TensorMemory::Dma)).unwrap();
3352        let mut g2d_converter = G2DProcessor::new().unwrap();
3353
3354        g2d_converter
3355            .convert(
3356                &src,
3357                &mut g2d_dst,
3358                Rotation::None,
3359                Flip::None,
3360                Crop::no_crop(),
3361            )
3362            .unwrap();
3363
3364        let mut cpu_dst = TensorImage::new(600, 400, YUYV, None).unwrap();
3365        let mut cpu_converter: CPUProcessor = CPUProcessor::new();
3366
3367        cpu_converter
3368            .convert(
3369                &src,
3370                &mut cpu_dst,
3371                Rotation::None,
3372                Flip::None,
3373                Crop::no_crop(),
3374            )
3375            .unwrap();
3376
3377        // TODO: compare YUYV and YUYV images without having to convert them to RGB
3378        compare_images_convert_to_rgb(&g2d_dst, &cpu_dst, 0.98, function!());
3379    }
3380
3381    #[test]
3382    fn test_yuyv_to_rgba_resize_cpu() {
3383        let src = load_bytes_to_tensor(
3384            1280,
3385            720,
3386            YUYV,
3387            None,
3388            include_bytes!("../../../testdata/camera720p.yuyv"),
3389        )
3390        .unwrap();
3391
3392        let (dst_width, dst_height) = (960, 540);
3393
3394        let mut dst = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3395        let mut cpu_converter = CPUProcessor::new();
3396
3397        cpu_converter
3398            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3399            .unwrap();
3400
3401        let mut dst_target = TensorImage::new(dst_width, dst_height, RGBA, None).unwrap();
3402        let src_target = load_bytes_to_tensor(
3403            1280,
3404            720,
3405            RGBA,
3406            None,
3407            include_bytes!("../../../testdata/camera720p.rgba"),
3408        )
3409        .unwrap();
3410        cpu_converter
3411            .convert(
3412                &src_target,
3413                &mut dst_target,
3414                Rotation::None,
3415                Flip::None,
3416                Crop::no_crop(),
3417            )
3418            .unwrap();
3419
3420        compare_images(&dst, &dst_target, 0.98, function!());
3421    }
3422
3423    #[test]
3424    #[cfg(target_os = "linux")]
3425    fn test_yuyv_to_rgba_crop_flip_g2d() {
3426        if !is_g2d_available() {
3427            eprintln!(
3428                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - G2D library (libg2d.so.2) not available"
3429            );
3430            return;
3431        }
3432        if !is_dma_available() {
3433            eprintln!(
3434                "SKIPPED: test_yuyv_to_rgba_crop_flip_g2d - DMA memory allocation not available (permission denied or no DMA-BUF support)"
3435            );
3436            return;
3437        }
3438
3439        let src = load_bytes_to_tensor(
3440            1280,
3441            720,
3442            YUYV,
3443            Some(TensorMemory::Dma),
3444            include_bytes!("../../../testdata/camera720p.yuyv"),
3445        )
3446        .unwrap();
3447
3448        let (dst_width, dst_height) = (640, 640);
3449
3450        let mut dst_g2d =
3451            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3452        let mut g2d_converter = G2DProcessor::new().unwrap();
3453
3454        g2d_converter
3455            .convert(
3456                &src,
3457                &mut dst_g2d,
3458                Rotation::None,
3459                Flip::Horizontal,
3460                Crop {
3461                    src_rect: Some(Rect {
3462                        left: 20,
3463                        top: 15,
3464                        width: 400,
3465                        height: 300,
3466                    }),
3467                    dst_rect: None,
3468                    dst_color: None,
3469                },
3470            )
3471            .unwrap();
3472
3473        let mut dst_cpu =
3474            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3475        let mut cpu_converter = CPUProcessor::new();
3476
3477        cpu_converter
3478            .convert(
3479                &src,
3480                &mut dst_cpu,
3481                Rotation::None,
3482                Flip::Horizontal,
3483                Crop {
3484                    src_rect: Some(Rect {
3485                        left: 20,
3486                        top: 15,
3487                        width: 400,
3488                        height: 300,
3489                    }),
3490                    dst_rect: None,
3491                    dst_color: None,
3492                },
3493            )
3494            .unwrap();
3495        compare_images(&dst_g2d, &dst_cpu, 0.98, function!());
3496    }
3497
3498    #[test]
3499    #[cfg(target_os = "linux")]
3500    #[cfg(feature = "opengl")]
3501    fn test_yuyv_to_rgba_crop_flip_opengl() {
3502        if !is_opengl_available() {
3503            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3504            return;
3505        }
3506
3507        if !is_dma_available() {
3508            eprintln!(
3509                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3510                function!()
3511            );
3512            return;
3513        }
3514
3515        let src = load_bytes_to_tensor(
3516            1280,
3517            720,
3518            YUYV,
3519            Some(TensorMemory::Dma),
3520            include_bytes!("../../../testdata/camera720p.yuyv"),
3521        )
3522        .unwrap();
3523
3524        let (dst_width, dst_height) = (640, 640);
3525
3526        let mut dst_gl =
3527            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3528        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3529
3530        gl_converter
3531            .convert(
3532                &src,
3533                &mut dst_gl,
3534                Rotation::None,
3535                Flip::Horizontal,
3536                Crop {
3537                    src_rect: Some(Rect {
3538                        left: 20,
3539                        top: 15,
3540                        width: 400,
3541                        height: 300,
3542                    }),
3543                    dst_rect: None,
3544                    dst_color: None,
3545                },
3546            )
3547            .unwrap();
3548
3549        let mut dst_cpu =
3550            TensorImage::new(dst_width, dst_height, RGBA, Some(TensorMemory::Dma)).unwrap();
3551        let mut cpu_converter = CPUProcessor::new();
3552
3553        cpu_converter
3554            .convert(
3555                &src,
3556                &mut dst_cpu,
3557                Rotation::None,
3558                Flip::Horizontal,
3559                Crop {
3560                    src_rect: Some(Rect {
3561                        left: 20,
3562                        top: 15,
3563                        width: 400,
3564                        height: 300,
3565                    }),
3566                    dst_rect: None,
3567                    dst_color: None,
3568                },
3569            )
3570            .unwrap();
3571        compare_images(&dst_gl, &dst_cpu, 0.98, function!());
3572    }
3573
3574    #[test]
3575    fn test_nv12_to_rgba_cpu() {
3576        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
3577        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
3578        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
3579
3580        let mut dst = TensorImage::new(1280, 720, RGBA, None).unwrap();
3581        let mut cpu_converter = CPUProcessor::new();
3582
3583        cpu_converter
3584            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3585            .unwrap();
3586
3587        let target_image = TensorImage::load_jpeg(
3588            include_bytes!("../../../testdata/zidane.jpg"),
3589            Some(RGBA),
3590            None,
3591        )
3592        .unwrap();
3593
3594        compare_images(&dst, &target_image, 0.98, function!());
3595    }
3596
3597    #[test]
3598    fn test_nv12_to_rgb_cpu() {
3599        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
3600        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
3601        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
3602
3603        let mut dst = TensorImage::new(1280, 720, RGB, None).unwrap();
3604        let mut cpu_converter = CPUProcessor::new();
3605
3606        cpu_converter
3607            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3608            .unwrap();
3609
3610        let target_image = TensorImage::load_jpeg(
3611            include_bytes!("../../../testdata/zidane.jpg"),
3612            Some(RGB),
3613            None,
3614        )
3615        .unwrap();
3616
3617        compare_images(&dst, &target_image, 0.98, function!());
3618    }
3619
3620    #[test]
3621    fn test_nv12_to_grey_cpu() {
3622        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
3623        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
3624        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
3625
3626        let mut dst = TensorImage::new(1280, 720, GREY, None).unwrap();
3627        let mut cpu_converter = CPUProcessor::new();
3628
3629        cpu_converter
3630            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3631            .unwrap();
3632
3633        let target_image = TensorImage::load_jpeg(
3634            include_bytes!("../../../testdata/zidane.jpg"),
3635            Some(GREY),
3636            None,
3637        )
3638        .unwrap();
3639
3640        compare_images(&dst, &target_image, 0.98, function!());
3641    }
3642
3643    #[test]
3644    fn test_nv12_to_yuyv_cpu() {
3645        let file = include_bytes!("../../../testdata/zidane.nv12").to_vec();
3646        let src = TensorImage::new(1280, 720, NV12, None).unwrap();
3647        src.tensor().map().unwrap().as_mut_slice()[0..(1280 * 720 * 3 / 2)].copy_from_slice(&file);
3648
3649        let mut dst = TensorImage::new(1280, 720, YUYV, None).unwrap();
3650        let mut cpu_converter = CPUProcessor::new();
3651
3652        cpu_converter
3653            .convert(&src, &mut dst, Rotation::None, Flip::None, Crop::no_crop())
3654            .unwrap();
3655
3656        let target_image = TensorImage::load_jpeg(
3657            include_bytes!("../../../testdata/zidane.jpg"),
3658            Some(RGB),
3659            None,
3660        )
3661        .unwrap();
3662
3663        compare_images_convert_to_rgb(&dst, &target_image, 0.98, function!());
3664    }
3665
3666    #[test]
3667    fn test_cpu_resize_planar_rgb() {
3668        let src = TensorImage::new(4, 4, RGBA, None).unwrap();
3669        #[rustfmt::skip]
3670        let src_image = [
3671                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
3672                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
3673                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
3674                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
3675        ];
3676        src.tensor()
3677            .map()
3678            .unwrap()
3679            .as_mut_slice()
3680            .copy_from_slice(&src_image);
3681
3682        let mut cpu_dst = TensorImage::new(5, 5, PLANAR_RGB, None).unwrap();
3683        let mut cpu_converter = CPUProcessor::new();
3684
3685        cpu_converter
3686            .convert(
3687                &src,
3688                &mut cpu_dst,
3689                Rotation::None,
3690                Flip::None,
3691                Crop::new()
3692                    .with_dst_rect(Some(Rect {
3693                        left: 1,
3694                        top: 1,
3695                        width: 4,
3696                        height: 4,
3697                    }))
3698                    .with_dst_color(Some([114, 114, 114, 255])),
3699            )
3700            .unwrap();
3701
3702        #[rustfmt::skip]
3703        let expected_dst = [
3704            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,    114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
3705            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,    114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
3706            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,      114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
3707        ];
3708
3709        assert_eq!(cpu_dst.tensor().map().unwrap().as_slice(), &expected_dst);
3710    }
3711
3712    #[test]
3713    fn test_cpu_resize_planar_rgba() {
3714        let src = TensorImage::new(4, 4, RGBA, None).unwrap();
3715        #[rustfmt::skip]
3716        let src_image = [
3717                    255, 0, 0, 255,     0, 255, 0, 255,     0, 0, 255, 255,     255, 255, 0, 255,
3718                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
3719                    0, 0, 255, 0,       0, 255, 255, 255,   255, 255, 0, 0,     0, 0, 0, 255,
3720                    255, 0, 0, 0,       0, 0, 0, 255,       255,  0, 255, 0,    255, 0, 255, 255,
3721        ];
3722        src.tensor()
3723            .map()
3724            .unwrap()
3725            .as_mut_slice()
3726            .copy_from_slice(&src_image);
3727
3728        let mut cpu_dst = TensorImage::new(5, 5, PLANAR_RGBA, None).unwrap();
3729        let mut cpu_converter = CPUProcessor::new();
3730
3731        cpu_converter
3732            .convert(
3733                &src,
3734                &mut cpu_dst,
3735                Rotation::None,
3736                Flip::None,
3737                Crop::new()
3738                    .with_dst_rect(Some(Rect {
3739                        left: 1,
3740                        top: 1,
3741                        width: 4,
3742                        height: 4,
3743                    }))
3744                    .with_dst_color(Some([114, 114, 114, 255])),
3745            )
3746            .unwrap();
3747
3748        #[rustfmt::skip]
3749        let expected_dst = [
3750            114, 114, 114, 114, 114,    114, 255, 0, 0, 255,        114, 255, 0, 255, 255,      114, 0, 0, 255, 0,        114, 255, 0, 255, 255,
3751            114, 114, 114, 114, 114,    114, 0, 255, 0, 255,        114, 0, 0, 0, 0,            114, 0, 255, 255, 0,      114, 0, 0, 0, 0,
3752            114, 114, 114, 114, 114,    114, 0, 0, 255, 0,          114, 0, 0, 255, 255,        114, 255, 255, 0, 0,      114, 0, 0, 255, 255,
3753            255, 255, 255, 255, 255,    255, 255, 255, 255, 255,    255, 0, 255, 0, 255,        255, 0, 255, 0, 255,      255, 0, 255, 0, 255,
3754        ];
3755
3756        assert_eq!(cpu_dst.tensor().map().unwrap().as_slice(), &expected_dst);
3757    }
3758
3759    #[test]
3760    #[cfg(target_os = "linux")]
3761    #[cfg(feature = "opengl")]
3762    fn test_opengl_resize_planar_rgb() {
3763        if !is_opengl_available() {
3764            eprintln!("SKIPPED: {} - OpenGL not available", function!());
3765            return;
3766        }
3767
3768        if !is_dma_available() {
3769            eprintln!(
3770                "SKIPPED: {} - DMA memory allocation not available (permission denied or no DMA-BUF support)",
3771                function!()
3772            );
3773            return;
3774        }
3775
3776        let dst_width = 640;
3777        let dst_height = 640;
3778        let file = include_bytes!("../../../testdata/test_image.jpg").to_vec();
3779        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
3780
3781        let mut cpu_dst = TensorImage::new(dst_width, dst_height, PLANAR_RGB, None).unwrap();
3782        let mut cpu_converter = CPUProcessor::new();
3783        cpu_converter
3784            .convert(
3785                &src,
3786                &mut cpu_dst,
3787                Rotation::None,
3788                Flip::None,
3789                Crop::no_crop(),
3790            )
3791            .unwrap();
3792        cpu_converter
3793            .convert(
3794                &src,
3795                &mut cpu_dst,
3796                Rotation::None,
3797                Flip::None,
3798                Crop::new()
3799                    .with_dst_rect(Some(Rect {
3800                        left: 102,
3801                        top: 102,
3802                        width: 440,
3803                        height: 440,
3804                    }))
3805                    .with_dst_color(Some([114, 114, 114, 114])),
3806            )
3807            .unwrap();
3808
3809        let mut gl_dst = TensorImage::new(dst_width, dst_height, PLANAR_RGB, None).unwrap();
3810        let mut gl_converter = GLProcessorThreaded::new(None).unwrap();
3811
3812        gl_converter
3813            .convert(
3814                &src,
3815                &mut gl_dst,
3816                Rotation::None,
3817                Flip::None,
3818                Crop::new()
3819                    .with_dst_rect(Some(Rect {
3820                        left: 102,
3821                        top: 102,
3822                        width: 440,
3823                        height: 440,
3824                    }))
3825                    .with_dst_color(Some([114, 114, 114, 114])),
3826            )
3827            .unwrap();
3828        compare_images(&gl_dst, &cpu_dst, 0.98, function!());
3829    }
3830
3831    #[test]
3832    fn test_cpu_resize_nv16() {
3833        let file = include_bytes!("../../../testdata/zidane.jpg").to_vec();
3834        let src = TensorImage::load_jpeg(&file, Some(RGBA), None).unwrap();
3835
3836        let mut cpu_nv16_dst = TensorImage::new(640, 640, NV16, None).unwrap();
3837        let mut cpu_rgb_dst = TensorImage::new(640, 640, RGB, None).unwrap();
3838        let mut cpu_converter = CPUProcessor::new();
3839
3840        cpu_converter
3841            .convert(
3842                &src,
3843                &mut cpu_nv16_dst,
3844                Rotation::None,
3845                Flip::None,
3846                // Crop::no_crop(),
3847                Crop::new()
3848                    .with_dst_rect(Some(Rect {
3849                        left: 20,
3850                        top: 140,
3851                        width: 600,
3852                        height: 360,
3853                    }))
3854                    .with_dst_color(Some([255, 128, 0, 255])),
3855            )
3856            .unwrap();
3857
3858        cpu_converter
3859            .convert(
3860                &src,
3861                &mut cpu_rgb_dst,
3862                Rotation::None,
3863                Flip::None,
3864                Crop::new()
3865                    .with_dst_rect(Some(Rect {
3866                        left: 20,
3867                        top: 140,
3868                        width: 600,
3869                        height: 360,
3870                    }))
3871                    .with_dst_color(Some([255, 128, 0, 255])),
3872            )
3873            .unwrap();
3874        compare_images_convert_to_rgb(&cpu_nv16_dst, &cpu_rgb_dst, 0.99, function!());
3875    }
3876
3877    fn load_bytes_to_tensor(
3878        width: usize,
3879        height: usize,
3880        fourcc: FourCharCode,
3881        memory: Option<TensorMemory>,
3882        bytes: &[u8],
3883    ) -> Result<TensorImage, Error> {
3884        let src = TensorImage::new(width, height, fourcc, memory)?;
3885        src.tensor().map()?.as_mut_slice().copy_from_slice(bytes);
3886        Ok(src)
3887    }
3888
3889    fn compare_images(img1: &TensorImage, img2: &TensorImage, threshold: f64, name: &str) {
3890        assert_eq!(img1.height(), img2.height(), "Heights differ");
3891        assert_eq!(img1.width(), img2.width(), "Widths differ");
3892        assert_eq!(img1.fourcc(), img2.fourcc(), "FourCC differ");
3893        assert!(
3894            matches!(img1.fourcc(), RGB | RGBA | GREY | PLANAR_RGB),
3895            "FourCC must be RGB or RGBA for comparison"
3896        );
3897
3898        let image1 = match img1.fourcc() {
3899            RGB => image::RgbImage::from_vec(
3900                img1.width() as u32,
3901                img1.height() as u32,
3902                img1.tensor().map().unwrap().to_vec(),
3903            )
3904            .unwrap(),
3905            RGBA => image::RgbaImage::from_vec(
3906                img1.width() as u32,
3907                img1.height() as u32,
3908                img1.tensor().map().unwrap().to_vec(),
3909            )
3910            .unwrap()
3911            .convert(),
3912            GREY => image::GrayImage::from_vec(
3913                img1.width() as u32,
3914                img1.height() as u32,
3915                img1.tensor().map().unwrap().to_vec(),
3916            )
3917            .unwrap()
3918            .convert(),
3919            PLANAR_RGB => image::GrayImage::from_vec(
3920                img1.width() as u32,
3921                (img1.height() * 3) as u32,
3922                img1.tensor().map().unwrap().to_vec(),
3923            )
3924            .unwrap()
3925            .convert(),
3926            _ => return,
3927        };
3928
3929        let image2 = match img2.fourcc() {
3930            RGB => image::RgbImage::from_vec(
3931                img2.width() as u32,
3932                img2.height() as u32,
3933                img2.tensor().map().unwrap().to_vec(),
3934            )
3935            .unwrap(),
3936            RGBA => image::RgbaImage::from_vec(
3937                img2.width() as u32,
3938                img2.height() as u32,
3939                img2.tensor().map().unwrap().to_vec(),
3940            )
3941            .unwrap()
3942            .convert(),
3943            GREY => image::GrayImage::from_vec(
3944                img2.width() as u32,
3945                img2.height() as u32,
3946                img2.tensor().map().unwrap().to_vec(),
3947            )
3948            .unwrap()
3949            .convert(),
3950            PLANAR_RGB => image::GrayImage::from_vec(
3951                img2.width() as u32,
3952                (img2.height() * 3) as u32,
3953                img2.tensor().map().unwrap().to_vec(),
3954            )
3955            .unwrap()
3956            .convert(),
3957            _ => return,
3958        };
3959
3960        let similarity = image_compare::rgb_similarity_structure(
3961            &image_compare::Algorithm::RootMeanSquared,
3962            &image1,
3963            &image2,
3964        )
3965        .expect("Image Comparison failed");
3966        if similarity.score < threshold {
3967            // image1.save(format!("{name}_1.png"));
3968            // image2.save(format!("{name}_2.png"));
3969            similarity
3970                .image
3971                .to_color_map()
3972                .save(format!("{name}.png"))
3973                .unwrap();
3974            panic!(
3975                "{name}: converted image and target image have similarity score too low: {} < {}",
3976                similarity.score, threshold
3977            )
3978        }
3979    }
3980
3981    fn compare_images_convert_to_rgb(
3982        img1: &TensorImage,
3983        img2: &TensorImage,
3984        threshold: f64,
3985        name: &str,
3986    ) {
3987        assert_eq!(img1.height(), img2.height(), "Heights differ");
3988        assert_eq!(img1.width(), img2.width(), "Widths differ");
3989
3990        let mut img_rgb1 =
3991            TensorImage::new(img1.width(), img1.height(), RGB, Some(TensorMemory::Mem)).unwrap();
3992        let mut img_rgb2 =
3993            TensorImage::new(img1.width(), img1.height(), RGB, Some(TensorMemory::Mem)).unwrap();
3994        CPUProcessor::convert_format(img1, &mut img_rgb1).unwrap();
3995        CPUProcessor::convert_format(img2, &mut img_rgb2).unwrap();
3996
3997        let image1 = image::RgbImage::from_vec(
3998            img_rgb1.width() as u32,
3999            img_rgb1.height() as u32,
4000            img_rgb1.tensor().map().unwrap().to_vec(),
4001        )
4002        .unwrap();
4003
4004        let image2 = image::RgbImage::from_vec(
4005            img_rgb2.width() as u32,
4006            img_rgb2.height() as u32,
4007            img_rgb2.tensor().map().unwrap().to_vec(),
4008        )
4009        .unwrap();
4010
4011        let similarity = image_compare::rgb_similarity_structure(
4012            &image_compare::Algorithm::RootMeanSquared,
4013            &image1,
4014            &image2,
4015        )
4016        .expect("Image Comparison failed");
4017        if similarity.score < threshold {
4018            // image1.save(format!("{name}_1.png"));
4019            // image2.save(format!("{name}_2.png"));
4020            similarity
4021                .image
4022                .to_color_map()
4023                .save(format!("{name}.png"))
4024                .unwrap();
4025            panic!(
4026                "{name}: converted image and target image have similarity score too low: {} < {}",
4027                similarity.score, threshold
4028            )
4029        }
4030    }
4031
4032    // =========================================================================
4033    // NV12 Format Tests
4034    // =========================================================================
4035
4036    #[test]
4037    fn test_nv12_tensor_image_creation() {
4038        let width = 640;
4039        let height = 480;
4040        let img = TensorImage::new(width, height, NV12, None).unwrap();
4041
4042        assert_eq!(img.width(), width);
4043        assert_eq!(img.height(), height);
4044        assert_eq!(img.fourcc(), NV12);
4045        // NV12 uses shape [H*3/2, W] to store Y plane + UV plane
4046        assert_eq!(img.tensor().shape(), &[height * 3 / 2, width]);
4047    }
4048
4049    #[test]
4050    fn test_nv12_channels() {
4051        let img = TensorImage::new(640, 480, NV12, None).unwrap();
4052        // NV12 reports 2 channels (Y + interleaved UV)
4053        assert_eq!(img.channels(), 2);
4054    }
4055
4056    // =========================================================================
4057    // TensorImageRef Tests
4058    // =========================================================================
4059
4060    #[test]
4061    fn test_tensor_image_ref_from_planar_tensor() {
4062        // Create a planar RGB tensor [3, 480, 640]
4063        let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
4064
4065        let img_ref = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
4066
4067        assert_eq!(img_ref.width(), 640);
4068        assert_eq!(img_ref.height(), 480);
4069        assert_eq!(img_ref.channels(), 3);
4070        assert_eq!(img_ref.fourcc(), PLANAR_RGB);
4071        assert!(img_ref.is_planar());
4072    }
4073
4074    #[test]
4075    fn test_tensor_image_ref_from_interleaved_tensor() {
4076        // Create an interleaved RGBA tensor [480, 640, 4]
4077        let mut tensor = Tensor::<u8>::new(&[480, 640, 4], None, None).unwrap();
4078
4079        let img_ref = TensorImageRef::from_borrowed_tensor(&mut tensor, RGBA).unwrap();
4080
4081        assert_eq!(img_ref.width(), 640);
4082        assert_eq!(img_ref.height(), 480);
4083        assert_eq!(img_ref.channels(), 4);
4084        assert_eq!(img_ref.fourcc(), RGBA);
4085        assert!(!img_ref.is_planar());
4086    }
4087
4088    #[test]
4089    fn test_tensor_image_ref_invalid_shape() {
4090        // 2D tensor should fail
4091        let mut tensor = Tensor::<u8>::new(&[480, 640], None, None).unwrap();
4092        let result = TensorImageRef::from_borrowed_tensor(&mut tensor, RGB);
4093        assert!(matches!(result, Err(Error::InvalidShape(_))));
4094    }
4095
4096    #[test]
4097    fn test_tensor_image_ref_wrong_channels() {
4098        // RGBA expects 4 channels but tensor has 3
4099        let mut tensor = Tensor::<u8>::new(&[480, 640, 3], None, None).unwrap();
4100        let result = TensorImageRef::from_borrowed_tensor(&mut tensor, RGBA);
4101        assert!(matches!(result, Err(Error::InvalidShape(_))));
4102    }
4103
4104    #[test]
4105    fn test_tensor_image_dst_trait_tensor_image() {
4106        let img = TensorImage::new(640, 480, RGB, None).unwrap();
4107
4108        // Test TensorImageDst trait implementation
4109        fn check_dst<T: TensorImageDst>(dst: &T) {
4110            assert_eq!(dst.width(), 640);
4111            assert_eq!(dst.height(), 480);
4112            assert_eq!(dst.channels(), 3);
4113            assert!(!dst.is_planar());
4114        }
4115
4116        check_dst(&img);
4117    }
4118
4119    #[test]
4120    fn test_tensor_image_dst_trait_tensor_image_ref() {
4121        let mut tensor = Tensor::<u8>::new(&[3, 480, 640], None, None).unwrap();
4122        let img_ref = TensorImageRef::from_borrowed_tensor(&mut tensor, PLANAR_RGB).unwrap();
4123
4124        fn check_dst<T: TensorImageDst>(dst: &T) {
4125            assert_eq!(dst.width(), 640);
4126            assert_eq!(dst.height(), 480);
4127            assert_eq!(dst.channels(), 3);
4128            assert!(dst.is_planar());
4129        }
4130
4131        check_dst(&img_ref);
4132    }
4133}